2 * This file is part of the PulseView project.
4 * Copyright (C) 2017 Soeren Apel <soeren@apelpie.net>
5 * Copyright (C) 2012 Joel Holdsworth <joel@airwebreathe.org.uk>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 #include "segment.hpp"
28 using std::lock_guard;
29 using std::recursive_mutex;
35 const uint64_t Segment::MaxChunkSize = 10*1024*1024; /* 10MiB */
37 Segment::Segment(uint64_t samplerate, unsigned int unit_size) :
40 samplerate_(samplerate),
41 unit_size_(unit_size),
43 mem_optimization_requested_(false)
45 lock_guard<recursive_mutex> lock(mutex_);
46 assert(unit_size_ > 0);
48 // Determine the number of samples we can fit in one chunk
49 // without exceeding MaxChunkSize
50 chunk_size_ = std::min(MaxChunkSize,
51 (MaxChunkSize / unit_size_) * unit_size_);
53 // Create the initial chunk
54 current_chunk_ = new uint8_t[chunk_size_];
55 data_chunks_.push_back(current_chunk_);
57 unused_samples_ = chunk_size_ / unit_size_;
62 lock_guard<recursive_mutex> lock(mutex_);
64 for (uint8_t* chunk : data_chunks_)
68 uint64_t Segment::get_sample_count() const
70 lock_guard<recursive_mutex> lock(mutex_);
74 const pv::util::Timestamp& Segment::start_time() const
79 double Segment::samplerate() const
84 void Segment::set_samplerate(double samplerate)
86 samplerate_ = samplerate;
89 unsigned int Segment::unit_size() const
94 void Segment::free_unused_memory()
96 lock_guard<recursive_mutex> lock(mutex_);
98 // Do not mess with the data chunks if we have iterators pointing at them
99 if (iterator_count_ > 0) {
100 mem_optimization_requested_ = true;
104 // No more data will come in, so re-create the last chunk accordingly
105 uint8_t* resized_chunk = new uint8_t[used_samples_ * unit_size_];
106 memcpy(resized_chunk, current_chunk_, used_samples_ * unit_size_);
108 delete[] current_chunk_;
109 current_chunk_ = resized_chunk;
111 data_chunks_.pop_back();
112 data_chunks_.push_back(resized_chunk);
115 void Segment::append_single_sample(void *data)
117 lock_guard<recursive_mutex> lock(mutex_);
119 // There will always be space for at least one sample in
120 // the current chunk, so we do not need to test for space
122 memcpy(current_chunk_ + (used_samples_ * unit_size_),
127 if (unused_samples_ == 0) {
128 current_chunk_ = new uint8_t[chunk_size_];
129 data_chunks_.push_back(current_chunk_);
131 unused_samples_ = chunk_size_ / unit_size_;
137 void Segment::append_samples(void* data, uint64_t samples)
139 lock_guard<recursive_mutex> lock(mutex_);
141 if (unused_samples_ >= samples) {
142 // All samples fit into the current chunk
143 memcpy(current_chunk_ + (used_samples_ * unit_size_),
144 data, (samples * unit_size_));
145 used_samples_ += samples;
146 unused_samples_ -= samples;
148 // Only a part of the samples fit, split data up between chunks
149 memcpy(current_chunk_ + (used_samples_ * unit_size_),
150 data, (unused_samples_ * unit_size_));
151 const uint64_t remaining_samples = samples - unused_samples_;
153 // If we're out of memory, this will throw std::bad_alloc
154 current_chunk_ = new uint8_t[chunk_size_];
155 data_chunks_.push_back(current_chunk_);
156 memcpy(current_chunk_, (uint8_t*)data + (unused_samples_ * unit_size_),
157 (remaining_samples * unit_size_));
159 used_samples_ = remaining_samples;
160 unused_samples_ = (chunk_size_ / unit_size_) - remaining_samples;
163 if (unused_samples_ == 0) {
164 // If we're out of memory, this will throw std::bad_alloc
165 current_chunk_ = new uint8_t[chunk_size_];
166 data_chunks_.push_back(current_chunk_);
168 unused_samples_ = chunk_size_ / unit_size_;
171 sample_count_ += samples;
174 uint8_t* Segment::get_raw_samples(uint64_t start, uint64_t count) const
176 assert(start < sample_count_);
177 assert(start + count <= sample_count_);
180 lock_guard<recursive_mutex> lock(mutex_);
182 uint8_t* dest = new uint8_t[count * unit_size_];
183 uint8_t* dest_ptr = dest;
185 uint64_t chunk_num = (start * unit_size_) / chunk_size_;
186 uint64_t chunk_offs = (start * unit_size_) % chunk_size_;
189 const uint8_t* chunk = data_chunks_[chunk_num];
191 uint64_t copy_size = std::min(count * unit_size_,
192 chunk_size_ - chunk_offs);
194 memcpy(dest_ptr, chunk + chunk_offs, copy_size);
196 dest_ptr += copy_size;
197 count -= (copy_size / unit_size_);
206 SegmentRawDataIterator* Segment::begin_raw_sample_iteration(uint64_t start)
208 SegmentRawDataIterator* it = new SegmentRawDataIterator;
210 assert(start < sample_count_);
214 it->sample_index = start;
215 it->chunk_num = (start * unit_size_) / chunk_size_;
216 it->chunk_offs = (start * unit_size_) % chunk_size_;
217 it->chunk = data_chunks_[it->chunk_num];
218 it->value = it->chunk + it->chunk_offs;
223 void Segment::continue_raw_sample_iteration(SegmentRawDataIterator* it, uint64_t increase)
225 lock_guard<recursive_mutex> lock(mutex_);
227 if (it->sample_index > sample_count_)
229 // Fail gracefully if we are asked to deliver data we don't have
232 it->sample_index += increase;
233 it->chunk_offs += (increase * unit_size_);
236 if (it->chunk_offs > (chunk_size_ - 1)) {
238 it->chunk_offs -= chunk_size_;
239 it->chunk = data_chunks_[it->chunk_num];
242 it->value = it->chunk + it->chunk_offs;
245 void Segment::end_raw_sample_iteration(SegmentRawDataIterator* it)
251 if ((iterator_count_ == 0) && mem_optimization_requested_) {
252 mem_optimization_requested_ = false;
253 free_unused_memory();