+ // No more data will come in, so re-create the last chunk accordingly
+ uint8_t* resized_chunk = new uint8_t[used_samples_ * unit_size_];
+ memcpy(resized_chunk, current_chunk_, used_samples_ * unit_size_);
+
+ delete[] current_chunk_;
+ current_chunk_ = resized_chunk;
+
+ data_chunks_.pop_back();
+ data_chunks_.push_back(resized_chunk);
+}
+
+void Segment::append_single_sample(void *data)
+{
+ lock_guard<recursive_mutex> lock(mutex_);
+
+ // There will always be space for at least one sample in
+ // the current chunk, so we do not need to test for space
+
+ memcpy(current_chunk_ + (used_samples_ * unit_size_),
+ data, unit_size_);
+ used_samples_++;
+ unused_samples_--;
+
+ if (unused_samples_ == 0) {
+ current_chunk_ = new uint8_t[chunk_size_];
+ data_chunks_.push_back(current_chunk_);
+ used_samples_ = 0;
+ unused_samples_ = chunk_size_ / unit_size_;
+ }
+
+ sample_count_++;
+}
+
+void Segment::append_samples(void* data, uint64_t samples)
+{
+ lock_guard<recursive_mutex> lock(mutex_);
+
+ if (unused_samples_ >= samples) {
+ // All samples fit into the current chunk
+ memcpy(current_chunk_ + (used_samples_ * unit_size_),
+ data, (samples * unit_size_));
+ used_samples_ += samples;
+ unused_samples_ -= samples;
+ } else {
+ // Only a part of the samples fit, split data up between chunks
+ memcpy(current_chunk_ + (used_samples_ * unit_size_),
+ data, (unused_samples_ * unit_size_));
+ const uint64_t remaining_samples = samples - unused_samples_;
+
+ // If we're out of memory, this will throw std::bad_alloc
+ current_chunk_ = new uint8_t[chunk_size_];
+ data_chunks_.push_back(current_chunk_);
+ memcpy(current_chunk_, (uint8_t*)data + (unused_samples_ * unit_size_),
+ (remaining_samples * unit_size_));
+
+ used_samples_ = remaining_samples;
+ unused_samples_ = (chunk_size_ / unit_size_) - remaining_samples;
+ }
+
+ if (unused_samples_ == 0) {