Segment: Do not alter chunks when there are active iterators
[pulseview.git] / pv / data / segment.cpp
1 /*
2  * This file is part of the PulseView project.
3  *
4  * Copyright (C) 2017 Soeren Apel <soeren@apelpie.net>
5  * Copyright (C) 2012 Joel Holdsworth <joel@airwebreathe.org.uk>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #include "segment.hpp"
22
23 #include <assert.h>
24 #include <stdlib.h>
25 #include <string.h>
26
27 #include <vector>
28
29 using std::lock_guard;
30 using std::recursive_mutex;
31 using std::vector;
32
33 namespace pv {
34 namespace data {
35
36 const uint64_t Segment::MaxChunkSize = 10*1024*1024;  /* 10MiB */
37
38 Segment::Segment(uint64_t samplerate, unsigned int unit_size) :
39         sample_count_(0),
40         start_time_(0),
41         samplerate_(samplerate),
42         unit_size_(unit_size),
43         iterator_count_(0),
44         mem_optimization_requested_(false)
45 {
46         lock_guard<recursive_mutex> lock(mutex_);
47         assert(unit_size_ > 0);
48
49         // Determine the number of samples we can fit in one chunk
50         // without exceeding MaxChunkSize
51         chunk_size_ = std::min(MaxChunkSize,
52                 (MaxChunkSize / unit_size_) * unit_size_);
53
54         // Create the initial chunk
55         current_chunk_ = new uint8_t[chunk_size_];
56         data_chunks_.push_back(current_chunk_);
57         used_samples_ = 0;
58         unused_samples_ = chunk_size_ / unit_size_;
59 }
60
61 Segment::~Segment()
62 {
63         lock_guard<recursive_mutex> lock(mutex_);
64
65         for (uint8_t* chunk : data_chunks_)
66                 delete[] chunk;
67 }
68
69 uint64_t Segment::get_sample_count() const
70 {
71         lock_guard<recursive_mutex> lock(mutex_);
72         return sample_count_;
73 }
74
75 const pv::util::Timestamp& Segment::start_time() const
76 {
77         return start_time_;
78 }
79
80 double Segment::samplerate() const
81 {
82         return samplerate_;
83 }
84
85 void Segment::set_samplerate(double samplerate)
86 {
87         samplerate_ = samplerate;
88 }
89
90 unsigned int Segment::unit_size() const
91 {
92         return unit_size_;
93 }
94
95 void Segment::free_unused_memory()
96 {
97         lock_guard<recursive_mutex> lock(mutex_);
98
99         // Do not mess with the data chunks if we have iterators pointing at them
100         if (iterator_count_ > 0) {
101                 mem_optimization_requested_ = true;
102                 return;
103         }
104
105         // No more data will come in, so re-create the last chunk accordingly
106         uint8_t* resized_chunk = new uint8_t[used_samples_ * unit_size_];
107         memcpy(resized_chunk, current_chunk_, used_samples_ * unit_size_);
108
109         delete[] current_chunk_;
110         current_chunk_ = resized_chunk;
111
112         data_chunks_.pop_back();
113         data_chunks_.push_back(resized_chunk);
114 }
115
116 void Segment::append_single_sample(void *data)
117 {
118         lock_guard<recursive_mutex> lock(mutex_);
119
120         // There will always be space for at least one sample in
121         // the current chunk, so we do not need to test for space
122
123         memcpy(current_chunk_ + (used_samples_ * unit_size_),
124                 data, unit_size_);
125         used_samples_++;
126         unused_samples_--;
127
128         if (unused_samples_ == 0) {
129                 current_chunk_ = new uint8_t[chunk_size_];
130                 data_chunks_.push_back(current_chunk_);
131                 used_samples_ = 0;
132                 unused_samples_ = chunk_size_ / unit_size_;
133         }
134
135         sample_count_++;
136 }
137
138 void Segment::append_samples(void* data, uint64_t samples)
139 {
140         lock_guard<recursive_mutex> lock(mutex_);
141
142         if (unused_samples_ >= samples) {
143                 // All samples fit into the current chunk
144                 memcpy(current_chunk_ + (used_samples_ * unit_size_),
145                         data, (samples * unit_size_));
146                 used_samples_ += samples;
147                 unused_samples_ -= samples;
148         } else {
149                 // Only a part of the samples fit, split data up between chunks
150                 memcpy(current_chunk_ + (used_samples_ * unit_size_),
151                         data, (unused_samples_ * unit_size_));
152                 const uint64_t remaining_samples = samples - unused_samples_;
153
154                 // If we're out of memory, this will throw std::bad_alloc
155                 current_chunk_ = new uint8_t[chunk_size_];
156                 data_chunks_.push_back(current_chunk_);
157                 memcpy(current_chunk_, (uint8_t*)data + (unused_samples_ * unit_size_),
158                         (remaining_samples * unit_size_));
159
160                 used_samples_ = remaining_samples;
161                 unused_samples_ = (chunk_size_ / unit_size_) - remaining_samples;
162         }
163
164         if (unused_samples_ == 0) {
165                 // If we're out of memory, this will throw std::bad_alloc
166                 current_chunk_ = new uint8_t[chunk_size_];
167                 data_chunks_.push_back(current_chunk_);
168                 used_samples_ = 0;
169                 unused_samples_ = chunk_size_ / unit_size_;
170         }
171
172         sample_count_ += samples;
173 }
174
175 uint8_t* Segment::get_raw_samples(uint64_t start, uint64_t count) const
176 {
177         assert(start < sample_count_);
178         assert(start + count <= sample_count_);
179         assert(count > 0);
180
181         lock_guard<recursive_mutex> lock(mutex_);
182
183         uint8_t* dest = new uint8_t[count * unit_size_];
184         uint8_t* dest_ptr = dest;
185
186         uint64_t chunk_num = (start * unit_size_) / chunk_size_;
187         uint64_t chunk_offs = (start * unit_size_) % chunk_size_;
188
189         while (count > 0) {
190                 const uint8_t* chunk = data_chunks_[chunk_num];
191
192                 uint64_t copy_size = std::min(count * unit_size_,
193                         chunk_size_ - chunk_offs);
194
195                 memcpy(dest_ptr, chunk + chunk_offs, copy_size);
196
197                 dest_ptr += copy_size;
198                 count -= (copy_size / unit_size_);
199
200                 chunk_num++;
201                 chunk_offs = 0;
202         }
203
204         return dest;
205 }
206
207 SegmentRawDataIterator* Segment::begin_raw_sample_iteration(uint64_t start)
208 {
209         SegmentRawDataIterator* it = new SegmentRawDataIterator;
210
211         assert(start < sample_count_);
212
213         iterator_count_++;
214
215         it->sample_index = start;
216         it->chunk_num = (start * unit_size_) / chunk_size_;
217         it->chunk_offs = (start * unit_size_) % chunk_size_;
218         it->chunk = data_chunks_[it->chunk_num];
219         it->value = it->chunk + it->chunk_offs;
220
221         return it;
222 }
223
224 void Segment::continue_raw_sample_iteration(SegmentRawDataIterator* it, uint64_t increase)
225 {
226         lock_guard<recursive_mutex> lock(mutex_);
227
228         if (it->sample_index > sample_count_)
229         {
230                 // Fail gracefully if we are asked to deliver data we don't have
231                 return;
232         } else {
233                 it->sample_index += increase;
234                 it->chunk_offs += (increase * unit_size_);
235         }
236
237         if (it->chunk_offs > (chunk_size_ - 1)) {
238                 it->chunk_num++;
239                 it->chunk_offs -= chunk_size_;
240                 it->chunk = data_chunks_[it->chunk_num];
241         }
242
243         it->value = it->chunk + it->chunk_offs;
244 }
245
246 void Segment::end_raw_sample_iteration(SegmentRawDataIterator* it)
247 {
248         delete it;
249
250         iterator_count_--;
251
252         if ((iterator_count_ == 0) && mem_optimization_requested_) {
253                 mem_optimization_requested_ = false;
254                 free_unused_memory();
255         }
256 }
257
258
259 } // namespace data
260 } // namespace pv