129 #ifndef VIGRA_MULTI_ARRAY_CHUNKED_HXX
130 #define VIGRA_MULTI_ARRAY_CHUNKED_HXX
135 #include "multi_fwd.hxx"
136 #include "multi_handle.hxx"
137 #include "multi_array.hxx"
138 #include "memory.hxx"
139 #include "metaprogramming.hxx"
140 #include "threading.hxx"
141 #include "compression.hxx"
151 # include "windows.h"
156 # include <sys/stat.h>
157 # include <sys/mman.h>
161 #ifdef VIGRA_CHECK_BOUNDS
162 #define VIGRA_ASSERT_INSIDE(diff) \
163 vigra_precondition(this->isInside(diff), "Index out of bounds")
165 #define VIGRA_ASSERT_INSIDE(diff)
172 #define VIGRA_NO_SPARSE_FILE
178 void winErrorToException(std::string message =
"")
181 DWORD dw = GetLastError();
184 FORMAT_MESSAGE_ALLOCATE_BUFFER |
185 FORMAT_MESSAGE_FROM_SYSTEM |
186 FORMAT_MESSAGE_IGNORE_INSERTS,
189 MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
193 message += (
char*)lpMsgBuf;
196 throw std::runtime_error(message);
200 std::string winTempFileName(std::string path =
"")
204 TCHAR default_path[MAX_PATH];
205 if(!GetTempPath(MAX_PATH, default_path))
206 winErrorToException(
"winTempFileName(): ");
210 TCHAR name[MAX_PATH];
211 if(!GetTempFileName(path.c_str(), TEXT(
"vigra"), 0, name))
212 winErrorToException(
"winTempFileName(): ");
214 return std::string(name);
218 std::size_t winClusterSize()
221 ::GetSystemInfo(&info);
222 return info.dwAllocationGranularity;
230 std::size_t mmap_alignment = winClusterSize();
232 std::size_t mmap_alignment = sysconf(_SC_PAGE_SIZE);
237 template <
unsigned int N,
class T>
238 class IteratorChunkHandle;
242 template <
unsigned int N>
245 template <
class T,
int M>
246 static void chunkIndex(TinyVector<T, M>
const & p,
247 TinyVector<T, M>
const & bits,
248 TinyVector<T, M> & index)
250 typedef std::size_t UI;
251 ChunkIndexing<N-1>::chunkIndex(p, bits, index);
252 index[N-1] = (UI)p[N-1] >> bits[N-1];
255 template <
class T,
int M>
256 static std::size_t chunkOffset(TinyVector<T, M>
const & p,
257 TinyVector<T, M>
const & bits,
258 TinyVector<T, M>
const & strides)
260 typedef std::size_t UI;
261 return ChunkIndexing<N-1>::chunkOffset(p, bits, strides) +
262 ((UI)p[N-1] >> bits[N-1]) * strides[N-1];
265 template <
class T,
int M>
266 static std::size_t offsetInChunk(TinyVector<T, M>
const & p,
267 TinyVector<T, M>
const & mask,
268 TinyVector<T, M>
const & strides)
270 typedef std::size_t UI;
271 return ChunkIndexing<N-1>::offsetInChunk(p, mask, strides) +
272 ((UI)p[N-1] & (UI)mask[N-1]) * strides[N-1];
277 struct ChunkIndexing<1>
279 template <
class T,
int M>
280 static void chunkIndex(TinyVector<T, M>
const & p,
281 TinyVector<T, M>
const & bits,
282 TinyVector<T, M> & index)
284 typedef std::size_t UI;
285 index[0] = (UI)p[0] >> bits[0];
288 template <
class T,
int M>
289 static std::size_t chunkOffset(TinyVector<T, M>
const & p,
290 TinyVector<T, M>
const & bits,
291 TinyVector<T, M>
const & strides)
293 typedef std::size_t UI;
294 return ((UI)p[0] >> bits[0]) * strides[0];
297 template <
class T,
int M>
298 static std::size_t offsetInChunk(TinyVector<T, M>
const & p,
299 TinyVector<T, M>
const & mask,
300 TinyVector<T, M>
const & strides)
302 typedef std::size_t UI;
303 return ((UI)p[0] & (UI)mask[0]) * strides[0];
307 template <
class T,
int M>
308 inline TinyVector<T, M>
309 computeChunkArrayShape(TinyVector<T, M> shape,
310 TinyVector<T, M>
const & bits,
311 TinyVector<T, M>
const & mask)
313 for(
int k=0; k<M; ++k)
314 shape[k] = (shape[k] + mask[k]) >> bits[k];
318 template <
class T,
int M>
320 defaultCacheSize(TinyVector<T, M>
const & shape)
323 for(
int k=0; k<M-1; ++k)
324 for(
int j=k+1; j<M; ++j)
325 res = std::max(res, shape[k]*shape[j]);
331 template <
unsigned int N,
class T>
335 typedef typename MultiArrayShape<N>::type shape_type;
336 typedef T value_type;
344 ChunkBase(shape_type
const & strides, pointer p = 0)
349 typename MultiArrayShape<N>::type strides_;
353 template <
unsigned int N,
class T>
354 class SharedChunkHandle
357 typedef typename MultiArrayShape<N>::type shape_type;
359 static const long chunk_asleep = -2;
360 static const long chunk_uninitialized = -3;
361 static const long chunk_locked = -4;
362 static const long chunk_failed = -5;
368 chunk_state_ = chunk_uninitialized;
371 SharedChunkHandle(SharedChunkHandle
const & rhs)
372 : pointer_(rhs.pointer_)
375 chunk_state_ = chunk_uninitialized;
378 shape_type
const & strides()
const
380 return pointer_->strides_;
383 ChunkBase<N, T> * pointer_;
384 mutable threading::atomic_long chunk_state_;
387 SharedChunkHandle & operator=(SharedChunkHandle
const & rhs);
390 template <
unsigned int N,
class T>
391 class ChunkedArrayBase
394 enum ActualDimension{ actual_dimension = (N == 0) ? 1 : N };
395 typedef typename MultiArrayShape<N>::type shape_type;
396 typedef T value_type;
397 typedef value_type * pointer;
398 typedef value_type & reference;
399 typedef ChunkBase<N, T> Chunk;
406 ChunkedArrayBase(shape_type
const & shape, shape_type
const & chunk_shape)
408 , chunk_shape_(
prod(chunk_shape) > 0 ? chunk_shape : detail::ChunkShape<N, T>::defaultShape())
411 virtual ~ChunkedArrayBase()
414 virtual void unrefChunk(IteratorChunkHandle<N, T> * h)
const = 0;
416 virtual pointer chunkForIterator(shape_type
const & point,
417 shape_type & strides, shape_type & upper_bound,
418 IteratorChunkHandle<N, T> * h) = 0;
420 virtual pointer chunkForIterator(shape_type
const & point,
421 shape_type & strides, shape_type & upper_bound,
422 IteratorChunkHandle<N, T> * h)
const = 0;
424 virtual std::string backend()
const = 0;
426 virtual shape_type chunkArrayShape()
const = 0;
428 virtual bool isReadOnly()
const
438 shape_type
const & shape()
const
448 shape_type
const & chunkShape()
const
455 return chunk_shape_[d];
458 bool isInside(shape_type
const & p)
const
460 for(
int d=0; d<N; ++d)
461 if(p[d] < 0 || p[d] >= shape_[d])
466 shape_type shape_, chunk_shape_;
469 template <
unsigned int N,
class T>
472 struct ChunkUnrefProxyBase
474 virtual ~ChunkUnrefProxyBase() {}
477 template <
unsigned int N,
class T_MaybeConst>
478 class MultiArrayView<N, T_MaybeConst, ChunkedArrayTag>
479 :
public ChunkedArrayBase<N, typename UnqualifiedType<T_MaybeConst>::type>
482 enum ActualDimension { actual_dimension = (N==0) ? 1 : N };
483 typedef typename UnqualifiedType<T_MaybeConst>::type T;
484 typedef T value_type;
485 typedef T_MaybeConst & reference;
486 typedef const value_type &const_reference;
487 typedef T_MaybeConst * pointer;
488 typedef const value_type *const_pointer;
490 typedef difference_type key_type;
491 typedef difference_type size_type;
492 typedef difference_type shape_type;
494 typedef ChunkIterator<actual_dimension, T_MaybeConst> chunk_iterator;
495 typedef ChunkIterator<actual_dimension, T const> chunk_const_iterator;
496 typedef StridedScanOrderIterator<actual_dimension, ChunkedMemory<T_MaybeConst>, T_MaybeConst&, T_MaybeConst*> iterator;
497 typedef StridedScanOrderIterator<actual_dimension, ChunkedMemory<T const>, T
const &, T
const *> const_iterator;
498 typedef MultiArrayView<N, T_MaybeConst, ChunkedArrayTag> view_type;
499 typedef MultiArrayView<N, T const, ChunkedArrayTag> const_view_type;
500 typedef ChunkedArrayTag StrideTag;
501 typedef ChunkBase<N, T> Chunk;
503 typedef MultiArray<N, Chunk> ChunkHolder;
506 :
public ChunkUnrefProxyBase
508 UnrefProxy(
int size, ChunkedArray<N, T> * array)
516 array_->unrefChunks(chunks_);
519 ArrayVector<SharedChunkHandle<N, T> *> chunks_;
520 ChunkedArray<N, T> * array_;
523 virtual shape_type chunkArrayShape()
const
525 return chunks_.shape();
528 shape_type chunkStart(shape_type
const & global_start)
const
530 shape_type chunk_start(SkipInitialization);
531 detail::ChunkIndexing<N>::chunkIndex(global_start, bits_, chunk_start);
535 shape_type chunkStop(shape_type global_stop)
const
537 global_stop -= shape_type(1);
538 shape_type chunk_stop(SkipInitialization);
539 detail::ChunkIndexing<N>::chunkIndex(global_stop, bits_, chunk_stop);
540 chunk_stop += shape_type(1);
544 virtual void unrefChunk(IteratorChunkHandle<N, T> *)
const {}
546 virtual T* chunkForIterator(shape_type
const & point,
547 shape_type & strides, shape_type & upper_bound,
548 IteratorChunkHandle<N, T> * h)
550 return const_cast<MultiArrayView
const *
>(
this)->chunkForIterator(point, strides, upper_bound, h);
553 virtual T* chunkForIterator(shape_type
const & point,
554 shape_type & strides, shape_type & upper_bound,
555 IteratorChunkHandle<N, T> * h)
const
557 shape_type global_point = point + h->offset_;
559 if(!this->isInside(global_point))
561 upper_bound = point + this->chunk_shape_;
565 global_point += offset_;
566 shape_type coffset = offset_ + h->offset_;
568 shape_type chunkIndex = chunkStart(global_point);
569 Chunk
const * chunk = &chunks_[chunkIndex];
570 strides = chunk->strides_;
571 upper_bound = (chunkIndex + shape_type(1)) * this->chunk_shape_ - coffset;
572 std::size_t offset = detail::ChunkIndexing<N>::offsetInChunk(global_point, mask_, strides);
573 return const_cast<T*
>(chunk->pointer_ + offset);
576 virtual std::string backend()
const
578 return "MultiArrayView<ChunkedArrayTag>";
582 : ChunkedArrayBase<N, T>()
585 MultiArrayView(shape_type
const & shape, shape_type
const & chunk_shape)
586 : ChunkedArrayBase<N, T>(shape, chunk_shape)
589 MultiArrayView & operator=(MultiArrayView
const & rhs)
595 ChunkedArrayBase<N, T>::operator=(rhs);
596 chunks_ = rhs.chunks_;
597 offset_ = rhs.offset_;
604 vigra_precondition(this->shape() == rhs.shape(),
605 "MultiArrayView::operator=(): shape mismatch.");
606 iterator i = begin(), ie = end();
607 const_iterator j = rhs.begin();
608 for(; i != ie; ++i, ++j)
615 #define VIGRA_CHUNKED_ARRAY_VIEW_ASSIGN(op) \
616 template<class U, class C1> \
617 MultiArrayView & operator op(MultiArrayView<N, U, C1> const & rhs) \
619 vigra_precondition(this->shape() == rhs.shape(), \
620 "MultiArrayView::operator" #op "(): shape mismatch."); \
621 iterator i = begin(), ie = end(); \
622 typename MultiArrayView<N, U, C1>::const_iterator j = rhs.begin(); \
623 for(; i != ie; ++i, ++j) \
624 *i op detail::RequiresExplicitCast<value_type>::cast(*j); \
628 MultiArrayView & operator op(value_type const & v) \
632 iterator i = begin(), ie = end(); \
633 for(; i != ie; ++i) \
639 VIGRA_CHUNKED_ARRAY_VIEW_ASSIGN(=)
640 VIGRA_CHUNKED_ARRAY_VIEW_ASSIGN(+=)
641 VIGRA_CHUNKED_ARRAY_VIEW_ASSIGN(-=)
642 VIGRA_CHUNKED_ARRAY_VIEW_ASSIGN(*=)
643 VIGRA_CHUNKED_ARRAY_VIEW_ASSIGN(/=)
645 #undef VIGRA_CHUNKED_ARRAY_VIEW_ASSIGN
694 reference operator[](shape_type point)
696 VIGRA_ASSERT_INSIDE(point);
698 Chunk * chunk = chunks_.data() +
699 detail::ChunkIndexing<N>::chunkOffset(point, bits_, chunks_.stride());
700 return *(chunk->pointer_ +
701 detail::ChunkIndexing<N>::offsetInChunk(point, mask_, chunk->strides_));
704 const_reference operator[](shape_type
const & point)
const
706 return const_cast<MultiArrayView *
>(
this)->
operator[](point);
710 MultiArrayView <N-M, T, ChunkedArrayTag>
711 operator[](
const TinyVector<MultiArrayIndex, M> &d)
const
716 reference operator[](difference_type_1 d)
718 return operator[](scanOrderIndexToCoordinate(d));
721 const_reference operator[](difference_type_1 d)
const
723 return operator[](scanOrderIndexToCoordinate(d));
726 difference_type scanOrderIndexToCoordinate(difference_type_1 d)
const
728 difference_type coord(SkipInitialization);
729 detail::ScanOrderToCoordinate<actual_dimension>::exec(d, this->shape_, coord);
735 difference_type_1 coordinateToScanOrderIndex(
const difference_type &d)
const
737 return detail::CoordinateToScanOrder<actual_dimension>::exec(this->shape_, d);
825 MultiArrayView & init(
const U & init)
827 return operator=(init);
830 template <
class U,
class CN>
831 void copy(
const MultiArrayView <N, U, CN>& rhs)
836 template <
class T2,
class C2>
837 void swapData(MultiArrayView <N, T2, C2> rhs)
841 vigra_precondition(this->shape() == rhs.shape(),
842 "MultiArrayView::swapData(): shape mismatch.");
843 iterator i = begin(), ie = end();
844 typename MultiArrayView<N, T2, C2>::iterator j = rhs.begin();
845 for(; i != ie; ++i, ++j)
849 bool isUnstrided(
unsigned int dimension = N-1)
const
851 if(chunks_.size() > 1)
853 difference_type s = vigra::detail::defaultStride<actual_dimension>(this->shape());
854 for(
unsigned int k = 0; k <= dimension; ++k)
855 if(chunks_.data()->strides_[k] != s[k])
860 MultiArrayView<N-1, value_type, ChunkedArrayTag>
863 MultiArrayView<N-1, value_type, ChunkedArrayTag> res(this->shape_.dropIndex(m), this->chunk_shape_.dropIndex(m));
864 res.offset_ = offset_.dropIndex(m);
865 res.bits_ = bits_.dropIndex(m);
866 res.mask_ = mask_.dropIndex(m);
867 res.chunks_.reshape(chunks_.shape().dropIndex(m));
870 typedef std::size_t UI;
871 UI start = offset_[m] + d;
872 UI chunk_start = start >> bits_[m];
873 UI startInChunk = start - chunk_start * this->chunk_shape_[m];
875 MultiArrayView<N-1, Chunk> view(chunks_.bindAt(m, chunk_start));
876 MultiCoordinateIterator<N-1> i(view.shape()),
877 end(i.getEndIterator());
880 res.chunks_[*i].pointer_ = view[*i].pointer_ + startInChunk*view[*i].strides_[m];
881 res.chunks_[*i].strides_ = view[*i].strides_.dropIndex(m);
887 template <
unsigned int M>
888 MultiArrayView <N-1, value_type, ChunkedArrayTag>
889 bind (difference_type_1 d)
const
894 MultiArrayView <N-1, value_type, ChunkedArrayTag>
895 bindOuter (difference_type_1 d)
const
897 return bindAt(N-1, d);
900 template <
int M,
class Index>
901 MultiArrayView <N-M, value_type, ChunkedArrayTag>
902 bindOuter(
const TinyVector <Index, M> &d)
const
904 return bindAt(N-1, d[M-1]).bindOuter(d.dropIndex(M-1));
907 template <
class Index>
908 MultiArrayView <N-1, value_type, ChunkedArrayTag>
909 bindOuter(
const TinyVector <Index, 1> &d)
const
911 return bindAt(N-1, d[0]);
914 MultiArrayView <N-1, value_type, ChunkedArrayTag>
915 bindInner (difference_type_1 d)
const
920 template <
int M,
class Index>
921 MultiArrayView <N-M, value_type, ChunkedArrayTag>
922 bindInner(
const TinyVector <Index, M> &d)
const
924 return bindAt(0, d[0]).bindInner(d.dropIndex(0));
927 template <
class Index>
928 MultiArrayView <N-1, value_type, ChunkedArrayTag>
929 bindInner(
const TinyVector <Index, 1> &d)
const
931 return bindAt(0, d[0]);
960 checkSubarrayBounds(shape_type
const & start, shape_type
const & stop,
961 std::string message)
const
963 message +=
": subarray out of bounds.";
970 MultiArrayView<N, value_type, ChunkedArrayTag>
971 subarray(shape_type start, shape_type stop)
973 checkSubarrayBounds(start, stop,
"MultiArrayView<N-1, T, ChunkedArrayTag>::subarray()");
976 shape_type chunk_start(chunkStart(start));
978 MultiArrayView<N, value_type, ChunkedArrayTag> view(stop-start, this->chunk_shape_);
979 view.chunks_ = chunks_.subarray(chunk_start, chunkStop(stop));
980 view.offset_ = start - chunk_start * this->chunk_shape_;
983 view.unref_ = unref_;
1002 MultiArrayView <N, value_type, ChunkedArrayTag>
1008 MultiArrayView <N, value_type, ChunkedArrayTag>
1009 transpose(
const difference_type &permutation)
const
1011 MultiArrayView<N, value_type, ChunkedArrayTag>
1012 view(vigra::transpose(this->shape_, permutation), vigra::transpose(this->chunk_shape_, permutation));
1013 view.chunks_ = chunks_.transpose(permutation);
1017 view.unref_ = unref_;
1019 iend = view.chunks_.end();
1020 for(; i != iend; ++i)
1021 i->strides_ = vigra::transpose(i->strides_, permutation);
1055 template <
class U,
class C1>
1056 bool operator==(MultiArrayView<N, U, C1>
const & rhs)
const
1058 if(this->shape() != rhs.shape())
1060 const_iterator i = begin(), ie = end();
1061 typename MultiArrayView<N, U, C1>::const_iterator j = rhs.begin();
1062 for(; i != ie; ++i, ++j)
1068 template <
class U,
class C1>
1069 bool operator!=(MultiArrayView<N, U, C1>
const & rhs)
const
1164 bool hasData ()
const
1166 return chunks_.hasData();
1171 return createCoupledIterator(*
this);
1176 return begin().getEndIterator();
1179 const_iterator cbegin()
const
1181 return createCoupledIterator(const_cast<MultiArrayView const &>(*
this));
1184 const_iterator cend()
const
1186 return cbegin().getEndIterator();
1189 const_iterator begin()
const
1191 return createCoupledIterator(*
this);
1194 const_iterator end()
const
1196 return begin().getEndIterator();
1199 chunk_iterator chunk_begin(shape_type
const & start, shape_type
const & stop)
1201 checkSubarrayBounds(start, stop,
"MultiArrayView<N-1, T, ChunkedArrayTag>::chunk_begin()");
1202 return chunk_iterator(
this, start, stop, chunkStart(start), chunkStop(stop), this->chunk_shape_);
1205 chunk_iterator chunk_end(shape_type
const & start, shape_type
const & stop)
1207 return chunk_begin(start, stop).getEndIterator();
1210 chunk_const_iterator chunk_begin(shape_type
const & start, shape_type
const & stop)
const
1212 checkSubarrayBounds(start, stop,
"MultiArrayView<N-1, T, ChunkedArrayTag>::chunk_begin()");
1213 return chunk_const_iterator(
this, start, stop, chunkStart(start), chunkStop(stop), this->chunk_shape_);
1216 chunk_const_iterator chunk_end(shape_type
const & start, shape_type
const & stop)
const
1218 return chunk_begin(start, stop).getEndIterator();
1221 chunk_const_iterator chunk_cbegin(shape_type
const & start, shape_type
const & stop)
const
1223 checkSubarrayBounds(start, stop,
"MultiArrayView<N-1, T, ChunkedArrayTag>::chunk_cbegin()");
1224 return chunk_const_iterator(
this, start, stop, chunkStart(start), chunkStop(stop), this->chunk_shape_);
1227 chunk_const_iterator chunk_cend(shape_type
const & start, shape_type
const & stop)
const
1229 return chunk_cbegin(start, stop).getEndIterator();
1237 MultiArray<N, Chunk> chunks_;
1238 shape_type offset_, bits_, mask_;
1239 VIGRA_SHARED_PTR<ChunkUnrefProxyBase> unref_;
1242 template <
unsigned int N,
class T>
1243 typename MultiArrayView<N, T, ChunkedArrayTag>::iterator
1244 createCoupledIterator(MultiArrayView<N, T, ChunkedArrayTag> & m)
1246 typedef typename MultiArrayView<N, T, ChunkedArrayTag>::iterator IteratorType;
1247 typedef typename IteratorType::handle_type P1;
1248 typedef typename P1::base_type P0;
1250 return IteratorType(P1(m,
1254 template <
unsigned int N,
class T>
1255 typename MultiArrayView<N, T, ChunkedArrayTag>::const_iterator
1256 createCoupledIterator(MultiArrayView<N, T, ChunkedArrayTag>
const & m)
1258 typedef typename MultiArrayView<N, T, ChunkedArrayTag>::const_iterator IteratorType;
1259 typedef typename IteratorType::handle_type P1;
1260 typedef typename P1::base_type P0;
1262 return IteratorType(P1(m,
1266 class ChunkedArrayOptions
1269 ChunkedArrayOptions()
1272 , compression_method(DEFAULT_COMPRESSION)
1275 ChunkedArrayOptions & fillValue(
double v)
1281 ChunkedArrayOptions fillValue(
double v)
const
1283 return ChunkedArrayOptions(*this).fillValue(v);
1286 ChunkedArrayOptions & cacheMax(
int v)
1292 ChunkedArrayOptions cacheMax(
int v)
const
1294 return ChunkedArrayOptions(*this).cacheMax(v);
1297 ChunkedArrayOptions & compression(CompressionMethod v)
1299 compression_method = v;
1303 ChunkedArrayOptions compression(CompressionMethod v)
const
1305 return ChunkedArrayOptions(*this).compression(v);
1310 CompressionMethod compression_method;
1413 template <
unsigned int N,
class T>
1415 :
public ChunkedArrayBase<N, T>
1418 typedef ChunkedArrayBase<N, T> base_type;
1419 typedef typename MultiArrayShape<N>::type shape_type;
1420 typedef typename shape_type::value_type difference_type_1;
1421 typedef T value_type;
1422 typedef value_type * pointer;
1423 typedef value_type
const * const_pointer;
1424 typedef value_type & reference;
1425 typedef value_type
const & const_reference;
1426 typedef ChunkIterator<N, T> chunk_iterator;
1427 typedef ChunkIterator<N, T const> chunk_const_iterator;
1428 typedef StridedScanOrderIterator<N, ChunkedMemory<T>, reference, pointer> iterator;
1429 typedef StridedScanOrderIterator<N, ChunkedMemory<T const>, const_reference, const_pointer> const_iterator;
1430 typedef SharedChunkHandle<N, T> Handle;
1431 typedef ChunkBase<N, T> Chunk;
1432 typedef MultiArrayView<N, T, ChunkedArrayTag> view_type;
1433 typedef MultiArrayView<N, T const, ChunkedArrayTag> const_view_type;
1434 typedef std::queue<Handle*> CacheType;
1436 static const long chunk_asleep = Handle::chunk_asleep;
1437 static const long chunk_uninitialized = Handle::chunk_uninitialized;
1438 static const long chunk_locked = Handle::chunk_locked;
1439 static const long chunk_failed = Handle::chunk_failed;
1441 explicit ChunkedArray(shape_type
const & shape,
1442 shape_type
const & chunk_shape = shape_type(),
1443 ChunkedArrayOptions
const & options = ChunkedArrayOptions())
1444 : ChunkedArrayBase<N, T>(shape, chunk_shape)
1445 , bits_(initBitMask(this->chunk_shape_))
1446 , mask_(this->chunk_shape_ -shape_type(1))
1447 , cache_max_size_(options.cache_max)
1448 , chunk_lock_(new threading::mutex())
1449 , fill_value_(T(options.fill_value))
1450 , fill_scalar_(options.fill_value)
1451 , handle_array_(detail::computeChunkArrayShape(shape, bits_, mask_))
1453 , overhead_bytes_(handle_array_.size()*sizeof(Handle))
1455 fill_value_chunk_.pointer_ = &fill_value_;
1456 fill_value_handle_.pointer_ = &fill_value_chunk_;
1457 fill_value_handle_.chunk_state_.store(1);
1460 static shape_type initBitMask(shape_type
const & chunk_shape)
1463 for(
unsigned int k=0; k<N; ++k)
1467 "ChunkedArray: chunk_shape elements must be powers of 2.");
1473 virtual ~ChunkedArray()
1478 int cacheSize()
const
1480 return cache_.size();
1483 std::size_t dataBytes()
const
1488 virtual shape_type chunkArrayShape()
const
1490 return handle_array_.shape();
1493 virtual std::size_t dataBytes(Chunk * c)
const = 0;
1495 std::size_t dataBytesPerChunk()
const
1497 return prod(this->chunk_shape_)*
sizeof(T);
1500 std::size_t overheadBytes()
const
1502 return overhead_bytes_;
1505 virtual std::size_t overheadBytesPerChunk()
const = 0;
1507 shape_type chunkStart(shape_type
const & global_start)
const
1509 shape_type chunk_start(SkipInitialization);
1510 detail::ChunkIndexing<N>::chunkIndex(global_start, bits_, chunk_start);
1514 shape_type chunkStop(shape_type global_stop)
const
1516 global_stop -= shape_type(1);
1517 shape_type chunk_stop(SkipInitialization);
1518 detail::ChunkIndexing<N>::chunkIndex(global_stop, bits_, chunk_stop);
1519 chunk_stop += shape_type(1);
1523 shape_type chunkShape(shape_type
const & chunk_index)
const
1525 return min(this->chunk_shape_,
1526 this->shape_ - chunk_index*this->chunk_shape_);
1529 using base_type::chunkShape;
1532 checkSubarrayBounds(shape_type
const & start, shape_type
const & stop,
1533 std::string message)
const
1535 message +=
": subarray out of bounds.";
1536 vigra_precondition(
allLessEqual(shape_type(), start) &&
1542 template <
class U,
class C1>
1543 bool operator==(MultiArrayView<N, U, C1>
const & rhs)
const
1545 if(this->shape() != rhs.shape())
1547 const_iterator i = begin(), ie = end();
1548 typename MultiArrayView<N, U, C1>::const_iterator j = rhs.begin();
1549 for(; i != ie; ++i, ++j)
1555 template <
class U,
class C1>
1556 bool operator!=(MultiArrayView<N, U, C1>
const & rhs)
const
1561 virtual pointer loadChunk(Chunk ** chunk, shape_type
const & chunk_index) = 0;
1563 virtual bool unloadHandle(Handle * handle,
bool destroy =
false)
1565 if(handle == &fill_value_handle_)
1567 return unloadChunk(handle->pointer_, destroy);
1570 virtual bool unloadChunk(Chunk * chunk,
bool destroy =
false) = 0;
1572 Handle * lookupHandle(shape_type
const & index)
1574 return &handle_array_[index];
1577 virtual void unrefChunk(IteratorChunkHandle<N, T> * h)
const
1579 unrefChunk(h->chunk_);
1583 void unrefChunk(Handle * chunk)
const
1587 long rc = chunk->chunk_state_.fetch_sub(1);
1588 #ifdef VIGRA_CHECK_BOUNDS
1589 vigra_invariant(rc >= 0,
1590 "ChunkedArray::unrefChunk(): chunk refcount got negative!");
1595 void unrefChunks(ArrayVector<Handle*>
const & chunks)
1597 for(
unsigned int k=0; k<chunks.size(); ++k)
1598 unrefChunk(chunks[k]);
1600 if(cacheMaxSize() > 0)
1602 threading::lock_guard<threading::mutex> guard(*chunk_lock_);
1603 cleanCache(cache_.size());
1607 long acquireRef(Handle * handle)
const
1615 long rc = handle->chunk_state_.load(threading::memory_order_acquire);
1620 if(handle->chunk_state_.compare_exchange_weak(rc, rc+1, threading::memory_order_seq_cst))
1627 if(rc == chunk_failed)
1629 vigra_precondition(
false,
1630 "ChunkedArray::acquireRef() attempt to access failed chunk.");
1632 else if(rc == chunk_locked)
1635 threading::this_thread::yield();
1636 rc = handle->chunk_state_.load(threading::memory_order_acquire);
1638 else if(handle->chunk_state_.compare_exchange_weak(rc, chunk_locked, threading::memory_order_seq_cst))
1646 pointer getChunk(Handle * handle,
bool isConst,
bool insertInCache, shape_type
const & chunk_index)
const
1648 ChunkedArray *
self =
const_cast<ChunkedArray *
>(
this);
1650 long rc = acquireRef(handle);
1652 return handle->pointer_->pointer_;
1654 threading::lock_guard<threading::mutex> guard(*chunk_lock_);
1657 T * p =
self->loadChunk(&handle->pointer_, chunk_index);
1658 Chunk * chunk = handle->pointer_;
1659 if(!isConst && rc == chunk_uninitialized)
1660 std::fill(p, p +
prod(chunkShape(chunk_index)), this->fill_value_);
1662 self->data_bytes_ += dataBytes(chunk);
1664 if(cacheMaxSize() > 0 && insertInCache)
1667 self->cache_.push(handle);
1671 self->cleanCache(2);
1673 handle->chunk_state_.store(1, threading::memory_order_release);
1678 handle->chunk_state_.store(chunk_failed);
1684 chunkForIteratorImpl(shape_type
const & point,
1685 shape_type & strides, shape_type & upper_bound,
1686 IteratorChunkHandle<N, T> * h,
1689 ChunkedArray *
self =
const_cast<ChunkedArray *
>(
this);
1691 unrefChunk(h->chunk_);
1694 shape_type global_point = point + h->offset_;
1696 if(!this->isInside(global_point))
1698 upper_bound = point + this->chunk_shape_;
1702 shape_type chunkIndex(chunkStart(global_point));
1704 bool insertInCache =
true;
1705 Handle * handle =
self->lookupHandle(chunkIndex);
1706 if(isConst && handle->chunk_state_.load() == chunk_uninitialized)
1708 handle = &
self->fill_value_handle_;
1709 insertInCache =
false;
1712 pointer p = getChunk(handle, isConst, insertInCache, chunkIndex);
1713 strides = handle->strides();
1714 upper_bound = (chunkIndex + shape_type(1)) * this->chunk_shape_ - h->offset_;
1715 std::size_t offset = detail::ChunkIndexing<N>::offsetInChunk(global_point, mask_, strides);
1720 virtual pointer chunkForIterator(shape_type
const & point,
1721 shape_type & strides, shape_type & upper_bound,
1722 IteratorChunkHandle<N, T> * h)
1724 return chunkForIteratorImpl(point, strides, upper_bound, h,
false);
1727 virtual pointer chunkForIterator(shape_type
const & point,
1728 shape_type & strides, shape_type & upper_bound,
1729 IteratorChunkHandle<N, T> * h)
const
1731 return chunkForIteratorImpl(point, strides, upper_bound, h,
true);
1736 long releaseChunk(Handle * handle,
bool destroy =
false)
1739 bool mayUnload = handle->chunk_state_.compare_exchange_strong(rc, chunk_locked);
1740 if(!mayUnload && destroy)
1743 mayUnload = handle->chunk_state_.compare_exchange_strong(rc, chunk_locked);
1750 vigra_invariant(handle != &fill_value_handle_,
1751 "ChunkedArray::releaseChunk(): attempt to release fill_value_handle_.");
1752 Chunk * chunk = handle->pointer_;
1753 this->data_bytes_ -= dataBytes(chunk);
1754 int didDestroy = unloadChunk(chunk, destroy);
1755 this->data_bytes_ += dataBytes(chunk);
1757 handle->chunk_state_.store(chunk_uninitialized);
1759 handle->chunk_state_.store(chunk_asleep);
1763 handle->chunk_state_.store(chunk_failed);
1771 void cleanCache(
int how_many = -1)
1774 how_many = cache_.size();
1775 for(; cache_.size() > cacheMaxSize() && how_many > 0; --how_many)
1777 Handle * handle = cache_.front();
1779 long rc = releaseChunk(handle);
1781 cache_.push(handle);
1792 void releaseChunks(shape_type
const & start, shape_type
const & stop,
bool destroy =
false)
1794 checkSubarrayBounds(start, stop,
"ChunkedArray::releaseChunks()");
1796 MultiCoordinateIterator<N> i(chunkStart(start), chunkStop(stop)),
1797 end(i.getEndIterator());
1798 for(; i != end; ++i)
1800 shape_type chunkOffset = *i * this->chunk_shape_;
1802 !
allLessEqual(min(chunkOffset+this->chunk_shape_, this->shape()), stop))
1808 Handle * handle = this->lookupHandle(*i);
1809 threading::lock_guard<threading::mutex> guard(*chunk_lock_);
1810 releaseChunk(handle, destroy);
1814 threading::lock_guard<threading::mutex> guard(*chunk_lock_);
1815 int cache_size = cache_.size();
1816 for(
int k=0; k < cache_size; ++k)
1818 Handle * handle = cache_.front();
1820 if(handle->chunk_state_.load() >= 0)
1821 cache_.push(handle);
1825 template <
class U,
class Str
ide>
1827 checkoutSubarray(shape_type
const & start,
1828 MultiArrayView<N, U, Stride> & subarray)
const
1830 shape_type stop = start + subarray.shape();
1832 checkSubarrayBounds(start, stop,
"ChunkedArray::checkoutSubarray()");
1834 chunk_const_iterator i = chunk_cbegin(start, stop);
1835 for(; i.isValid(); ++i)
1837 subarray.subarray(i.chunkStart()-start, i.chunkStop()-start) = *i;
1841 template <
class U,
class Str
ide>
1843 commitSubarray(shape_type
const & start,
1844 MultiArrayView<N, U, Stride>
const & subarray)
1846 shape_type stop = start + subarray.shape();
1848 vigra_precondition(!this->isReadOnly(),
1849 "ChunkedArray::commitSubarray(): array is read-only.");
1850 checkSubarrayBounds(start, stop,
"ChunkedArray::commitSubarray()");
1852 chunk_iterator i = chunk_begin(start, stop);
1853 for(; i.isValid(); ++i)
1855 *i = subarray.subarray(i.chunkStart()-start, i.chunkStop()-start);
1859 template <
class View>
1860 void subarrayImpl(shape_type
const & start, shape_type
const & stop,
1864 vigra_precondition(isConst || !this->isReadOnly(),
1865 "ChunkedArray::subarray(): array is read-only.");
1866 checkSubarrayBounds(start, stop,
"ChunkedArray::subarray()");
1867 shape_type chunk_start(chunkStart(start)), chunk_stop(chunkStop(stop));
1869 view.shape_ = stop-start;
1870 view.chunk_shape_ = this->chunk_shape_;
1871 view.chunks_.reshape(chunk_stop-chunk_start);
1872 view.offset_ = start - chunk_start * this->chunk_shape_;
1876 typedef typename View::UnrefProxy Unref;
1877 ChunkedArray*
self =
const_cast<ChunkedArray*
>(
this);
1878 Unref * unref =
new Unref(view.chunks_.size(),
self);
1879 view.unref_ = VIGRA_SHARED_PTR<Unref>(unref);
1881 MultiCoordinateIterator<N> i(chunk_start, chunk_stop),
1882 end(i.getEndIterator());
1883 for(; i != end; ++i)
1885 Handle * handle =
self->lookupHandle(*i);
1887 if(isConst && handle->chunk_state_.load() == chunk_uninitialized)
1888 handle = &self->fill_value_handle_;
1892 pointer p = getChunk(handle, isConst,
true, *i);
1894 ChunkBase<N, T> * mini_chunk = &view.chunks_[*i - chunk_start];
1895 mini_chunk->pointer_ = p;
1896 mini_chunk->strides_ = handle->strides();
1897 unref->chunks_[i.scanOrderIndex()] = handle;
1902 subarray(shape_type
const & start, shape_type
const & stop)
1905 subarrayImpl(start, stop, view,
false);
1910 subarray(shape_type
const & start, shape_type
const & stop)
const
1912 const_view_type view;
1913 subarrayImpl(start, stop, view,
true);
1918 const_subarray(shape_type
const & start, shape_type
const & stop)
const
1920 const_view_type view;
1921 subarrayImpl(start, stop, view,
true);
1925 value_type getItem(shape_type
const & point)
const
1927 vigra_precondition(this->isInside(point),
1928 "ChunkedArray::getItem(): index out of bounds.");
1930 ChunkedArray *
self =
const_cast<ChunkedArray*
>(
this);
1931 shape_type chunk_index(chunkStart(point));
1932 Handle * handle =
self->lookupHandle(chunk_index);
1933 if(handle->chunk_state_.load() == chunk_uninitialized)
1935 pointer p =
self->getChunk(handle,
true,
false, chunk_index);
1936 value_type res = *(p +
1937 detail::ChunkIndexing<N>::offsetInChunk(point, mask_, handle->strides()));
1938 self->unrefChunk(handle);
1942 void setItem(shape_type
const & point, value_type
const & v)
1944 vigra_precondition(!this->isReadOnly(),
1945 "ChunkedArray::setItem(): array is read-only.");
1946 vigra_precondition(this->isInside(point),
1947 "ChunkedArray::setItem(): index out of bounds.");
1949 shape_type chunk_index(chunkStart(point));
1950 Handle * handle = lookupHandle(chunk_index);
1951 pointer p = getChunk(handle,
false,
false, chunk_index);
1952 *(p + detail::ChunkIndexing<N>::offsetInChunk(point, mask_, handle->strides())) = v;
1956 MultiArrayView<N-1, T, ChunkedArrayTag>
1959 shape_type start, stop(this->shape());
1962 return subarray(start, stop).bindAt(m, 0);
1965 template <
unsigned int M>
1966 MultiArrayView <N-1, T, ChunkedArrayTag>
1967 bind (difference_type_1 d)
const
1969 return bindAt(M, d);
1972 MultiArrayView <N-1, T, ChunkedArrayTag>
1973 bindOuter (difference_type_1 d)
const
1975 return bindAt(N-1, d);
1978 template <
int M,
class Index>
1979 MultiArrayView <N-M, T, ChunkedArrayTag>
1980 bindOuter(
const TinyVector <Index, M> &d)
const
1982 return bindAt(N-1, d[M-1]).bindOuter(d.dropIndex(M-1));
1985 template <
class Index>
1986 MultiArrayView <N-1, T, ChunkedArrayTag>
1987 bindOuter(
const TinyVector <Index, 1> &d)
const
1989 return bindAt(N-1, d[0]);
1992 MultiArrayView <N-1, T, ChunkedArrayTag>
1993 bindInner (difference_type_1 d)
const
1995 return bindAt(0, d);
1998 template <
int M,
class Index>
1999 MultiArrayView <N-M, T, ChunkedArrayTag>
2000 bindInner(
const TinyVector <Index, M> &d)
const
2002 return bindAt(0, d[0]).bindInner(d.dropIndex(0));
2005 template <
class Index>
2006 MultiArrayView <N-1, T, ChunkedArrayTag>
2007 bindInner(
const TinyVector <Index, 1> &d)
const
2009 return bindAt(0, d[0]);
2012 std::size_t cacheMaxSize()
const
2014 if(cache_max_size_ < 0)
2015 const_cast<int &
>(cache_max_size_) = detail::defaultCacheSize(this->chunkArrayShape());
2016 return cache_max_size_;
2019 void setCacheMaxSize(std::size_t c)
2021 cache_max_size_ = c;
2022 if(c < cache_.size())
2024 threading::lock_guard<threading::mutex> guard(*chunk_lock_);
2031 return createCoupledIterator(*
this);
2036 return begin().getEndIterator();
2039 const_iterator cbegin()
const
2041 return createCoupledIterator(const_cast<ChunkedArray const &>(*
this));
2044 const_iterator cend()
const
2046 return cbegin().getEndIterator();
2049 const_iterator begin()
const
2051 return createCoupledIterator(*
this);
2054 const_iterator end()
const
2056 return begin().getEndIterator();
2059 chunk_iterator chunk_begin(shape_type
const & start, shape_type
const & stop)
2061 checkSubarrayBounds(start, stop,
"ChunkedArray::chunk_begin()");
2062 return chunk_iterator(
this, start, stop, chunkStart(start), chunkStop(stop), this->chunk_shape_);
2065 chunk_iterator chunk_end(shape_type
const & start, shape_type
const & stop)
2067 return chunk_begin(start, stop).getEndIterator();
2070 chunk_const_iterator chunk_begin(shape_type
const & start, shape_type
const & stop)
const
2072 checkSubarrayBounds(start, stop,
"ChunkedArray::chunk_begin()");
2073 return chunk_const_iterator(
this, start, stop, chunkStart(start), chunkStop(stop), this->chunk_shape_);
2076 chunk_const_iterator chunk_end(shape_type
const & start, shape_type
const & stop)
const
2078 return chunk_begin(start, stop).getEndIterator();
2081 chunk_const_iterator chunk_cbegin(shape_type
const & start, shape_type
const & stop)
const
2083 checkSubarrayBounds(start, stop,
"ChunkedArray::chunk_cbegin()");
2084 return chunk_const_iterator(
this, start, stop, chunkStart(start), chunkStop(stop), this->chunk_shape_);
2087 chunk_const_iterator chunk_cend(shape_type
const & start, shape_type
const & stop)
const
2089 return chunk_cbegin(start, stop).getEndIterator();
2092 shape_type bits_, mask_;
2093 int cache_max_size_;
2094 VIGRA_SHARED_PTR<threading::mutex> chunk_lock_;
2096 Chunk fill_value_chunk_;
2097 Handle fill_value_handle_;
2098 value_type fill_value_;
2099 double fill_scalar_;
2100 MultiArray<N, Handle> handle_array_;
2101 std::size_t data_bytes_, overhead_bytes_;
2106 template <
unsigned int N,
class T>
2107 typename ChunkedArray<N, T>::iterator
2108 createCoupledIterator(ChunkedArray<N, T> & m)
2111 typedef typename IteratorType::handle_type P1;
2112 typedef typename P1::base_type P0;
2114 return IteratorType(P1(m,
2118 template <
unsigned int N,
class T>
2119 typename ChunkedArray<N, T>::const_iterator
2120 createCoupledIterator(ChunkedArray<N, T>
const & m)
2122 typedef typename ChunkedArray<N, T>::const_iterator IteratorType;
2123 typedef typename IteratorType::handle_type P1;
2124 typedef typename P1::base_type P0;
2126 return IteratorType(P1(m,
2130 template <
unsigned int N,
class T,
class Alloc = std::allocator<T> >
2131 class ChunkedArrayFull
2132 :
public ChunkedArray<N, T>,
2133 public MultiArray<N, T, Alloc>
2137 typedef MultiArray<N, T, Alloc> Storage;
2138 typedef typename Storage::value_type value_type;
2139 typedef typename Storage::pointer pointer;
2140 typedef typename Storage::const_pointer const_pointer;
2141 typedef typename Storage::reference reference;
2142 typedef typename Storage::const_reference const_reference;
2143 typedef typename Storage::difference_type difference_type;
2144 typedef typename Storage::difference_type shape_type;
2145 typedef typename Storage::key_type key_type;
2146 typedef typename Storage::size_type size_type;
2147 typedef typename Storage::difference_type_1 difference_type_1;
2148 typedef typename Storage::iterator iterator;
2149 typedef typename Storage::const_iterator const_iterator;
2150 typedef typename Storage::view_type view_type;
2152 typedef typename ChunkedArray<N, T>::Chunk Chunk;
2154 static shape_type computeChunkShape(shape_type s)
2156 for(
int k=0; k<N; ++k)
2161 using Storage::subarray;
2162 using Storage::bindOuter;
2163 using Storage::bindInner;
2164 using Storage::bind;
2165 using Storage::bindAt;
2166 using Storage::isInside;
2167 using Storage::shape;
2168 using Storage::size;
2169 using Storage::begin;
2172 #ifndef DOXYGEN // doxygen doesn't understand this
2173 using Storage::operator==;
2174 using Storage::operator!=;
2177 explicit ChunkedArrayFull(shape_type
const & shape,
2178 ChunkedArrayOptions
const & options = ChunkedArrayOptions(),
2179 Alloc
const & alloc = Alloc())
2180 : ChunkedArray<N, T>(shape, computeChunkShape(shape), options.cacheMax(0)),
2181 Storage(shape, this->fill_value_, alloc),
2182 upper_bound_(shape),
2183 chunk_(detail::defaultStride(shape), this->data())
2185 this->handle_array_[0].pointer_ = &chunk_;
2186 this->handle_array_[0].chunk_state_.store(1);
2187 this->data_bytes_ = size()*
sizeof(T);
2188 this->overhead_bytes_ = overheadBytesPerChunk();
2191 ChunkedArrayFull(ChunkedArrayFull
const & rhs)
2192 : ChunkedArray<N, T>(rhs),
2194 upper_bound_(rhs.upper_bound_),
2195 chunk_(detail::defaultStride(shape), this->data())
2197 this->handle_array_[0].pointer_ = &chunk_;
2198 this->handle_array_[0].chunk_state_.store(1);
2201 ChunkedArrayFull & operator=(ChunkedArrayFull
const & rhs)
2205 ChunkedArray<N, T>::operator=(rhs);
2206 Storage::operator=(rhs);
2207 upper_bound_ = rhs.upper_bound_;
2215 virtual shape_type chunkArrayShape()
const
2217 return shape_type(1);
2220 virtual pointer loadChunk(ChunkBase<N, T> **, shape_type
const &)
2222 return this->data();
2225 virtual bool unloadChunk(ChunkBase<N, T> *,
bool )
2230 virtual std::size_t dataBytes(Chunk * c)
const
2232 return prod(this->shape());
2235 virtual std::size_t overheadBytesPerChunk()
const
2237 return sizeof(Chunk) +
sizeof(SharedChunkHandle<N, T>);
2240 virtual pointer chunkForIterator(shape_type
const & point,
2241 shape_type & strides, shape_type & upper_bound,
2242 IteratorChunkHandle<N, T> * h)
const
2244 shape_type global_point = point + h->offset_;
2246 if(!this->isInside(global_point))
2248 upper_bound = point + this->chunk_shape_;
2252 strides = this->stride();
2253 upper_bound = upper_bound_;
2254 return const_cast<pointer
>(&Storage::operator[](global_point));
2257 virtual pointer chunkForIterator(shape_type
const & point,
2258 shape_type & strides, shape_type & upper_bound,
2259 IteratorChunkHandle<N, T> * h)
2261 shape_type global_point = point + h->offset_;
2263 if(!this->isInside(global_point))
2265 upper_bound = point + this->chunk_shape_;
2269 strides = this->stride();
2270 upper_bound = upper_bound_;
2271 return &Storage::operator[](global_point);
2274 virtual std::string backend()
const
2276 return "ChunkedArrayFull";
2279 shape_type upper_bound_;
2283 template <
unsigned int N,
class T,
class Alloc = std::allocator<T> >
2284 class ChunkedArrayLazy
2285 :
public ChunkedArray<N, T>
2290 :
public ChunkBase<N, T>
2293 typedef typename MultiArrayShape<N>::type shape_type;
2294 typedef T value_type;
2295 typedef value_type * pointer;
2296 typedef value_type & reference;
2298 Chunk(shape_type
const & shape, Alloc
const & alloc = Alloc())
2299 : ChunkBase<N, T>(detail::defaultStride(shape))
2300 , size_(
prod(shape))
2311 if(this->pointer_ == 0)
2312 this->pointer_ = detail::alloc_initialize_n<T>(size_, T(), alloc_);
2313 return this->pointer_;
2318 detail::destroy_dealloc_n(this->pointer_, size_, alloc_);
2326 Chunk & operator=(Chunk
const &);
2329 typedef MultiArray<N, SharedChunkHandle<N, T> > ChunkStorage;
2330 typedef typename ChunkStorage::difference_type shape_type;
2331 typedef T value_type;
2332 typedef value_type * pointer;
2333 typedef value_type & reference;
2335 explicit ChunkedArrayLazy(shape_type
const & shape,
2336 shape_type
const & chunk_shape=shape_type(),
2337 ChunkedArrayOptions
const & options = ChunkedArrayOptions(),
2338 Alloc
const & alloc = Alloc())
2339 : ChunkedArray<N, T>(shape, chunk_shape, options.cacheMax(0))
2345 typename ChunkStorage::iterator i = this->handle_array_.begin(),
2346 end = this->handle_array_.end();
2347 for(; i != end; ++i)
2350 delete static_cast<Chunk*
>(i->pointer_);
2355 virtual pointer loadChunk(ChunkBase<N, T> ** p, shape_type
const & index)
2359 *p =
new Chunk(this->chunkShape(index));
2360 this->overhead_bytes_ +=
sizeof(Chunk);
2362 return static_cast<Chunk *
>(*p)->allocate();
2365 virtual bool unloadChunk(ChunkBase<N, T> * chunk,
bool destroy)
2368 static_cast<Chunk *
>(chunk)->deallocate();
2372 virtual std::string backend()
const
2374 return "ChunkedArrayLazy";
2377 virtual std::size_t dataBytes(ChunkBase<N,T> * c)
const
2379 return c->pointer_ == 0
2381 :
static_cast<Chunk*
>(c)->size_*
sizeof(T);
2384 virtual std::size_t overheadBytesPerChunk()
const
2386 return sizeof(Chunk) +
sizeof(SharedChunkHandle<N, T>);
2392 template <
unsigned int N,
class T,
class Alloc = std::allocator<T> >
2393 class ChunkedArrayCompressed
2394 :
public ChunkedArray<N, T>
2399 :
public ChunkBase<N, T>
2402 typedef typename MultiArrayShape<N>::type shape_type;
2403 typedef T value_type;
2404 typedef value_type * pointer;
2405 typedef value_type & reference;
2407 Chunk(shape_type
const & shape)
2408 : ChunkBase<N, T>(detail::defaultStride(shape))
2410 , size_(
prod(shape))
2420 if(this->pointer_ == 0)
2421 this->pointer_ = detail::alloc_initialize_n<T>(size_, T(), alloc_);
2422 return this->pointer_;
2427 detail::destroy_dealloc_n(this->pointer_, size_, alloc_);
2429 compressed_.clear();
2432 void compress(CompressionMethod method)
2434 if(this->pointer_ != 0)
2436 vigra_invariant(compressed_.size() == 0,
2437 "ChunkedArrayCompressed::Chunk::compress(): compressed and uncompressed pointer are both non-zero.");
2439 ::vigra::compress((
char const *)this->pointer_, size_*
sizeof(T), compressed_, method);
2442 detail::destroy_dealloc_n(this->pointer_, size_, alloc_);
2449 if(this->pointer_ == 0)
2451 if(compressed_.size())
2453 this->pointer_ = alloc_.allocate((
typename Alloc::size_type)size_);
2455 ::vigra::uncompress(compressed_.data(), compressed_.size(),
2456 (
char*)this->pointer_, size_*
sizeof(T), method);
2457 compressed_.clear();
2461 this->pointer_ = allocate();
2466 vigra_invariant(compressed_.size() == 0,
2467 "ChunkedArrayCompressed::Chunk::uncompress(): compressed and uncompressed pointer are both non-zero.");
2469 return this->pointer_;
2472 ArrayVector<char> compressed_;
2477 Chunk & operator=(Chunk
const &);
2480 typedef MultiArray<N, SharedChunkHandle<N, T> > ChunkStorage;
2481 typedef typename ChunkStorage::difference_type shape_type;
2482 typedef T value_type;
2483 typedef value_type * pointer;
2484 typedef value_type & reference;
2486 explicit ChunkedArrayCompressed(shape_type
const & shape,
2487 shape_type
const & chunk_shape=shape_type(),
2488 ChunkedArrayOptions
const & options = ChunkedArrayOptions())
2489 : ChunkedArray<N, T>(shape, chunk_shape, options),
2490 compression_method_(options.compression_method)
2492 if(compression_method_ == DEFAULT_COMPRESSION)
2493 compression_method_ = LZ4;
2496 ~ChunkedArrayCompressed()
2498 typename ChunkStorage::iterator i = this->handle_array_.begin(),
2499 end = this->handle_array_.end();
2500 for(; i != end; ++i)
2503 delete static_cast<Chunk*
>(i->pointer_);
2508 virtual pointer loadChunk(ChunkBase<N, T> ** p, shape_type
const & index)
2512 *p =
new Chunk(this->chunkShape(index));
2513 this->overhead_bytes_ +=
sizeof(Chunk);
2515 return static_cast<Chunk *
>(*p)->uncompress(compression_method_);
2518 virtual bool unloadChunk(ChunkBase<N, T> * chunk,
bool destroy)
2521 static_cast<Chunk *
>(chunk)->deallocate();
2523 static_cast<Chunk *
>(chunk)->
compress(compression_method_);
2527 virtual std::string backend()
const
2529 switch(compression_method_)
2532 return "ChunkedArrayCompressed<ZLIB>";
2534 return "ChunkedArrayCompressed<ZLIB_NONE>";
2536 return "ChunkedArrayCompressed<ZLIB_FAST>";
2538 return "ChunkedArrayCompressed<ZLIB_BEST>";
2540 return "ChunkedArrayCompressed<LZ4>";
2546 virtual std::size_t dataBytes(ChunkBase<N,T> * c)
const
2548 return c->pointer_ == 0
2549 ?
static_cast<Chunk*
>(c)->compressed_.size()
2550 :
static_cast<Chunk*
>(c)->size_*
sizeof(T);
2553 virtual std::size_t overheadBytesPerChunk()
const
2555 return sizeof(Chunk) +
sizeof(SharedChunkHandle<N, T>);
2558 CompressionMethod compression_method_;
2561 template <
unsigned int N,
class T>
2562 class ChunkedArrayTmpFile
2563 :
public ChunkedArray<N, T>
2567 typedef HANDLE FileHandle;
2569 typedef int FileHandle;
2573 :
public ChunkBase<N, T>
2576 typedef typename MultiArrayShape<N>::type shape_type;
2577 typedef T value_type;
2578 typedef value_type * pointer;
2579 typedef value_type & reference;
2581 Chunk(shape_type
const & shape,
2582 std::size_t offset,
size_t alloc_size,
2584 : ChunkBase<N, T>(detail::defaultStride(shape))
2586 , alloc_size_(alloc_size)
2597 if(this->pointer_ == 0)
2600 static const std::size_t bits =
sizeof(DWORD)*8,
2601 mask = (std::size_t(1) << bits) - 1;
2602 this->pointer_ = (pointer)MapViewOfFile(file_, FILE_MAP_ALL_ACCESS,
2603 std::size_t(offset_) >> bits, offset_ & mask, alloc_size_);
2604 if(this->pointer_ == 0)
2605 winErrorToException(
"ChunkedArrayChunk::map(): ");
2607 this->pointer_ = (pointer)mmap(0, alloc_size_, PROT_READ | PROT_WRITE, MAP_SHARED,
2609 if(this->pointer_ == 0)
2610 throw std::runtime_error(
"ChunkedArrayChunk::map(): mmap() failed.");
2613 return this->pointer_;
2618 if(this->pointer_ != 0)
2621 ::UnmapViewOfFile(this->pointer_);
2623 munmap(this->pointer_, alloc_size_);
2629 std::size_t offset_, alloc_size_;
2633 Chunk & operator=(Chunk
const &);
2636 typedef MultiArray<N, SharedChunkHandle<N, T> > ChunkStorage;
2637 typedef MultiArray<N, std::size_t> OffsetStorage;
2638 typedef typename ChunkStorage::difference_type shape_type;
2639 typedef T value_type;
2640 typedef value_type * pointer;
2641 typedef value_type & reference;
2643 static std::size_t computeAllocSize(shape_type
const & shape)
2645 std::size_t size =
prod(shape)*
sizeof(T);
2646 std::size_t mask = mmap_alignment - 1;
2647 return (size + mask) & ~mask;
2650 explicit ChunkedArrayTmpFile(shape_type
const & shape,
2651 shape_type
const & chunk_shape=shape_type(),
2652 ChunkedArrayOptions
const & options = ChunkedArrayOptions(),
2653 std::string
const & path =
"")
2654 : ChunkedArray<N, T>(shape, chunk_shape, options)
2655 #ifndef VIGRA_NO_SPARSE_FILE
2656 , offset_array_(this->chunkArrayShape())
2661 #ifdef VIGRA_NO_SPARSE_FILE
2662 file_capacity_ = 4*
prod(this->chunk_shape_)*
sizeof(T);
2665 typename OffsetStorage::iterator i = offset_array_.begin(),
2666 end = offset_array_.end();
2667 std::size_t size = 0;
2668 for(; i != end; ++i)
2671 size += computeAllocSize(this->chunkShape(i.point()));
2673 file_capacity_ = size;
2674 this->overhead_bytes_ += offset_array_.size()*
sizeof(std::size_t);
2680 file_ = ::CreateFile(winTempFileName(path).c_str(), GENERIC_READ | GENERIC_WRITE,
2681 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_TEMPORARY | FILE_FLAG_DELETE_ON_CLOSE, NULL);
2682 if (file_ == INVALID_HANDLE_VALUE)
2683 winErrorToException(
"ChunkedArrayTmpFile(): ");
2687 if(!::DeviceIoControl(file_, FSCTL_SET_SPARSE, NULL, 0, NULL, 0, &dwTemp, NULL))
2688 winErrorToException(
"ChunkedArrayTmpFile(): ");
2694 static const std::size_t bits =
sizeof(LONG)*8, mask = (std::size_t(1) << bits) - 1;
2695 mappedFile_ = CreateFileMapping(file_, NULL, PAGE_READWRITE,
2696 file_capacity_ >> bits, file_capacity_ & mask, NULL);
2698 winErrorToException(
"ChunkedArrayTmpFile(): ");
2700 mappedFile_ = file_ = fileno(tmpfile());
2702 throw std::runtime_error(
"ChunkedArrayTmpFile(): unable to open file.");
2703 lseek(file_, file_capacity_-1, SEEK_SET);
2704 if(write(file_,
"0", 1) == -1)
2705 throw std::runtime_error(
"ChunkedArrayTmpFile(): unable to resize file.");
2709 ~ChunkedArrayTmpFile()
2711 typename ChunkStorage::iterator i = this->handle_array_.begin(),
2712 end = this->handle_array_.end();
2713 for(; i != end; ++i)
2716 delete static_cast<Chunk*
>(i->pointer_);
2720 ::CloseHandle(mappedFile_);
2721 ::CloseHandle(file_);
2727 virtual pointer loadChunk(ChunkBase<N, T> ** p, shape_type
const & index)
2731 shape_type shape = this->chunkShape(index);
2732 std::size_t chunk_size = computeAllocSize(shape);
2733 #ifdef VIGRA_NO_SPARSE_FILE
2734 std::size_t offset = file_size_;
2735 if(offset + chunk_size > file_capacity_)
2737 file_capacity_ = max<std::size_t>(offset+chunk_size, file_capacity_ * 120 / 100);
2738 if(lseek(file_, file_capacity_-1, SEEK_SET) == -1)
2739 throw std::runtime_error(
"ChunkedArrayTmpFile(): unable to reset file size.");
2740 if(write(file_,
"0", 1) == -1)
2741 throw std::runtime_error(
"ChunkedArrayTmpFile(): unable to resize file.");
2743 file_size_ += chunk_size;
2745 std::size_t offset = offset_array_[index];
2747 *p =
new Chunk(shape, offset, chunk_size, mappedFile_);
2748 this->overhead_bytes_ +=
sizeof(Chunk);
2750 return static_cast<Chunk*
>(*p)->map();
2753 virtual bool unloadChunk(ChunkBase<N, T> * chunk,
bool )
2755 static_cast<Chunk *
>(chunk)->unmap();
2759 virtual std::string backend()
const
2761 return "ChunkedArrayTmpFile";
2764 virtual std::size_t dataBytes(ChunkBase<N,T> * c)
const
2766 return c->pointer_ == 0
2768 :
static_cast<Chunk*
>(c)->alloc_size_;
2771 virtual std::size_t overheadBytesPerChunk()
const
2773 #ifdef VIGRA_NO_SPARSE_FILE
2774 return sizeof(Chunk) +
sizeof(SharedChunkHandle<N, T>);
2776 return sizeof(Chunk) +
sizeof(SharedChunkHandle<N, T>) +
sizeof(std::size_t);
2780 #ifndef VIGRA_NO_SPARSE_FILE
2781 OffsetStorage offset_array_;
2783 FileHandle file_, mappedFile_;
2784 std::size_t file_size_, file_capacity_;
2787 template<
unsigned int N,
class U>
2789 :
public MultiCoordinateIterator<N>
2790 ,
private MultiArrayView<N, typename UnqualifiedType<U>::type>
2793 typedef typename UnqualifiedType<U>::type T;
2794 typedef MultiCoordinateIterator<N> base_type;
2795 typedef MultiArrayView<N, T> base_type2;
2797 typedef typename base_type::shape_type shape_type;
2798 typedef typename base_type::difference_type difference_type;
2799 typedef ChunkIterator iterator;
2800 typedef std::random_access_iterator_tag iterator_category;
2802 typedef MultiArrayView<N, T> value_type;
2803 typedef MultiArrayView<N, T> & reference;
2804 typedef MultiArrayView<N, T>
const & const_reference;
2805 typedef MultiArrayView<N, T> * pointer;
2806 typedef MultiArrayView<N, T>
const * const_pointer;
2808 typedef typename IfBool<UnqualifiedType<U>::isConst,
2809 ChunkedArrayBase<N, T>
const,
2810 ChunkedArrayBase<N, T> >::type array_type;
2811 typedef IteratorChunkHandle<N, T> Chunk;
2819 ChunkIterator(array_type * array,
2820 shape_type
const & start, shape_type
const & end,
2821 shape_type
const & chunk_start, shape_type
const & chunk_end,
2822 shape_type
const & chunk_shape)
2823 : base_type(chunk_start, chunk_end)
2825 , chunk_(chunk_start * chunk_shape)
2826 , start_(start - chunk_.offset_)
2827 , stop_(end - chunk_.offset_)
2828 , chunk_shape_(chunk_shape)
2833 ChunkIterator(ChunkIterator
const & rhs)
2836 , array_(rhs.array_)
2837 , chunk_(rhs.chunk_)
2838 , start_(rhs.start_)
2840 , chunk_shape_(rhs.chunk_shape_)
2845 ChunkIterator & operator=(ChunkIterator
const & rhs)
2849 base_type::operator=(rhs);
2850 array_ = rhs.array_;
2851 chunk_ = rhs.chunk_;
2852 start_ = rhs.start_;
2854 chunk_shape_ = rhs.chunk_shape_;
2860 reference operator*()
2865 const_reference operator*()
const
2870 pointer operator->()
2875 const_pointer operator->()
const
2882 return *(ChunkIterator(*
this) += i);
2885 value_type operator[](
const shape_type &coordOffset)
const
2887 return *(ChunkIterator(*
this) += coordOffset);
2894 shape_type array_point = max(start_, this->point()*chunk_shape_),
2895 upper_bound(SkipInitialization);
2896 this->m_ptr = array_->chunkForIterator(array_point, this->m_stride, upper_bound, &chunk_);
2897 this->m_shape = min(upper_bound, stop_) - array_point;
2901 shape_type chunkStart()
const
2903 return max(start_, this->point()*chunk_shape_) + chunk_.offset_;
2906 shape_type chunkStop()
const
2908 return chunkStart() + this->m_shape;
2911 ChunkIterator & operator++()
2913 base_type::operator++();
2918 ChunkIterator operator++(
int)
2920 ChunkIterator res(*
this);
2932 ChunkIterator &
operator+=(
const shape_type &coordOffset)
2939 ChunkIterator & operator--()
2941 base_type::operator--();
2946 ChunkIterator operator--(
int)
2948 ChunkIterator res(*
this);
2958 ChunkIterator &
operator-=(
const shape_type &coordOffset)
2963 ChunkIterator getEndIterator()
const
2965 ChunkIterator res(*
this);
2966 static_cast<base_type &
>(res) = base_type::getEndIterator();
2973 return ChunkIterator(*
this) += d;
2978 return ChunkIterator(*
this) -= d;
2981 ChunkIterator
operator+(
const shape_type &coordOffset)
const
2983 return ChunkIterator(*
this) += coordOffset;
2986 ChunkIterator
operator-(
const shape_type &coordOffset)
const
2988 return ChunkIterator(*
this) -= coordOffset;
2996 #ifndef DOXYGEN // doxygen doesn't understand this
2997 using base_type::operator==;
2998 using base_type::operator!=;
3000 using base_type::shape;
3002 array_type * array_;
3004 shape_type start_, stop_, chunk_shape_, array_point_;
3009 #undef VIGRA_ASSERT_INSIDE