#include "segment.hpp"
-#include <assert.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include <vector>
+#include <cassert>
+#include <cstdlib>
+#include <cstring>
+using std::bad_alloc;
using std::lock_guard;
+using std::min;
using std::recursive_mutex;
-using std::vector;
namespace pv {
namespace data {
-Segment::Segment(uint64_t samplerate, unsigned int unit_size) :
+const uint64_t Segment::MaxChunkSize = 10 * 1024 * 1024; /* 10MiB */
+
+Segment::Segment(uint32_t segment_id, uint64_t samplerate, unsigned int unit_size) :
+ segment_id_(segment_id),
sample_count_(0),
start_time_(0),
samplerate_(samplerate),
- unit_size_(unit_size)
+ unit_size_(unit_size),
+ iterator_count_(0),
+ mem_optimization_requested_(false),
+ is_complete_(false)
{
lock_guard<recursive_mutex> lock(mutex_);
assert(unit_size_ > 0);
// Determine the number of samples we can fit in one chunk
// without exceeding MaxChunkSize
- chunk_size_ = std::min(MaxChunkSize,
- (MaxChunkSize / unit_size_) * unit_size_);
+ chunk_size_ = min(MaxChunkSize, (MaxChunkSize / unit_size_) * unit_size_);
// Create the initial chunk
current_chunk_ = new uint8_t[chunk_size_];
return unit_size_;
}
+uint32_t Segment::segment_id() const
+{
+ return segment_id_;
+}
+
+void Segment::set_complete()
+{
+ is_complete_ = true;
+}
+
+bool Segment::is_complete() const
+{
+ return is_complete_;
+}
+
+void Segment::free_unused_memory()
+{
+ lock_guard<recursive_mutex> lock(mutex_);
+
+ // Do not mess with the data chunks if we have iterators pointing at them
+ if (iterator_count_ > 0) {
+ mem_optimization_requested_ = true;
+ return;
+ }
+
+ if (current_chunk_) {
+ // No more data will come in, so re-create the last chunk accordingly
+ uint8_t* resized_chunk = new uint8_t[used_samples_ * unit_size_];
+ memcpy(resized_chunk, current_chunk_, used_samples_ * unit_size_);
+
+ delete[] current_chunk_;
+ current_chunk_ = resized_chunk;
+
+ data_chunks_.pop_back();
+ data_chunks_.push_back(resized_chunk);
+ }
+}
+
void Segment::append_single_sample(void *data)
{
lock_guard<recursive_mutex> lock(mutex_);
// There will always be space for at least one sample in
// the current chunk, so we do not need to test for space
- memcpy(current_chunk_ + (used_samples_ * unit_size_),
- data, unit_size_);
+ memcpy(current_chunk_ + (used_samples_ * unit_size_), data, unit_size_);
used_samples_++;
unused_samples_--;
{
lock_guard<recursive_mutex> lock(mutex_);
- if (unused_samples_ >= samples) {
- // All samples fit into the current chunk
- memcpy(current_chunk_ + (used_samples_ * unit_size_),
- data, (samples * unit_size_));
- used_samples_ += samples;
- unused_samples_ -= samples;
- } else {
- // Only a part of the samples fit, split data up between chunks
- memcpy(current_chunk_ + (used_samples_ * unit_size_),
- data, (unused_samples_ * unit_size_));
- const uint64_t remaining_samples = samples - unused_samples_;
-
- // If we're out of memory, this will throw std::bad_alloc
- current_chunk_ = new uint8_t[chunk_size_];
- data_chunks_.push_back(current_chunk_);
- memcpy(current_chunk_, (uint8_t*)data + (unused_samples_ * unit_size_),
- (remaining_samples * unit_size_));
-
- used_samples_ = remaining_samples;
- unused_samples_ = (chunk_size_ / unit_size_) - remaining_samples;
- }
-
- if (unused_samples_ == 0) {
- // If we're out of memory, this will throw std::bad_alloc
- current_chunk_ = new uint8_t[chunk_size_];
- data_chunks_.push_back(current_chunk_);
- used_samples_ = 0;
- unused_samples_ = chunk_size_ / unit_size_;
- }
+ const uint8_t* data_byte_ptr = (uint8_t*)data;
+ uint64_t remaining_samples = samples;
+ uint64_t data_offset = 0;
+
+ do {
+ uint64_t copy_count = 0;
+
+ if (remaining_samples <= unused_samples_) {
+ // All samples fit into the current chunk
+ copy_count = remaining_samples;
+ } else {
+ // Only a part of the samples fit, fill up current chunk
+ copy_count = unused_samples_;
+ }
+
+ const uint8_t* dest = &(current_chunk_[used_samples_ * unit_size_]);
+ const uint8_t* src = &(data_byte_ptr[data_offset]);
+ memcpy((void*)dest, (void*)src, (copy_count * unit_size_));
+
+ used_samples_ += copy_count;
+ unused_samples_ -= copy_count;
+ remaining_samples -= copy_count;
+ data_offset += (copy_count * unit_size_);
+
+ if (unused_samples_ == 0) {
+ try {
+ // If we're out of memory, allocating a chunk will throw
+ // std::bad_alloc. To give the application some usable memory
+ // to work with in case chunk allocation fails, we allocate
+ // extra memory and throw it away if it all succeeded.
+ // This way, memory allocation will fail early enough to let
+ // PV remain alive. Otherwise, PV will crash in a random
+ // memory-allocating part of the application.
+ current_chunk_ = new uint8_t[chunk_size_];
+
+ const int dummy_size = 2 * chunk_size_;
+ auto dummy_chunk = new uint8_t[dummy_size];
+ memset(dummy_chunk, 0xFF, dummy_size);
+ delete[] dummy_chunk;
+ } catch (bad_alloc&) {
+ delete[] current_chunk_; // The new may have succeeded
+ current_chunk_ = nullptr;
+ throw;
+ }
+
+ data_chunks_.push_back(current_chunk_);
+ used_samples_ = 0;
+ unused_samples_ = chunk_size_ / unit_size_;
+ }
+ } while (remaining_samples > 0);
sample_count_ += samples;
}
-uint8_t* Segment::get_raw_samples(uint64_t start, uint64_t count) const
+void Segment::get_raw_samples(uint64_t start, uint64_t count,
+ uint8_t* dest) const
{
assert(start < sample_count_);
assert(start + count <= sample_count_);
assert(count > 0);
+ assert(dest != nullptr);
lock_guard<recursive_mutex> lock(mutex_);
- uint8_t* dest = new uint8_t[count * unit_size_];
uint8_t* dest_ptr = dest;
uint64_t chunk_num = (start * unit_size_) / chunk_size_;
while (count > 0) {
const uint8_t* chunk = data_chunks_[chunk_num];
- uint64_t copy_size = std::min(count * unit_size_,
+ uint64_t copy_size = min(count * unit_size_,
chunk_size_ - chunk_offs);
memcpy(dest_ptr, chunk + chunk_offs, copy_size);
chunk_num++;
chunk_offs = 0;
}
-
- return dest;
}
-SegmentRawDataIterator* Segment::begin_raw_sample_iteration(uint64_t start) const
+SegmentRawDataIterator* Segment::begin_raw_sample_iteration(uint64_t start)
{
SegmentRawDataIterator* it = new SegmentRawDataIterator;
assert(start < sample_count_);
+ iterator_count_++;
+
it->sample_index = start;
it->chunk_num = (start * unit_size_) / chunk_size_;
it->chunk_offs = (start * unit_size_) % chunk_size_;
return it;
}
-void Segment::continue_raw_sample_iteration(SegmentRawDataIterator* it, uint64_t increase) const
+void Segment::continue_raw_sample_iteration(SegmentRawDataIterator* it, uint64_t increase)
{
- lock_guard<recursive_mutex> lock(mutex_);
-
+ // Fail gracefully if we are asked to deliver data we don't have
if (it->sample_index > sample_count_)
- {
- // Fail gracefully if we are asked to deliver data we don't have
return;
- } else {
- it->sample_index += increase;
- it->chunk_offs += (increase * unit_size_);
- }
+
+ it->sample_index += increase;
+ it->chunk_offs += (increase * unit_size_);
if (it->chunk_offs > (chunk_size_ - 1)) {
it->chunk_num++;
it->value = it->chunk + it->chunk_offs;
}
-void Segment::end_raw_sample_iteration(SegmentRawDataIterator* it) const
+void Segment::end_raw_sample_iteration(SegmentRawDataIterator* it)
{
delete it;
-}
+ iterator_count_--;
+
+ if ((iterator_count_ == 0) && mem_optimization_requested_) {
+ mem_optimization_requested_ = false;
+ free_unused_memory();
+ }
+}
} // namespace data
} // namespace pv