xchunked_array.hpp 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686
  1. /***************************************************************************
  2. * Copyright (c) Johan Mabille, Sylvain Corlay and Wolf Vollprecht *
  3. * Copyright (c) QuantStack *
  4. * *
  5. * Distributed under the terms of the BSD 3-Clause License. *
  6. * *
  7. * The full license is in the file LICENSE, distributed with this software. *
  8. ****************************************************************************/
  9. #ifndef XTENSOR_CHUNKED_ARRAY_HPP
  10. #define XTENSOR_CHUNKED_ARRAY_HPP
  11. #include <array>
  12. #include <vector>
  13. #include "xarray.hpp"
  14. #include "xchunked_assign.hpp"
  15. namespace xt
  16. {
  17. /**
  18. * @defgroup xt_xchunked_array
  19. *
  20. * Chunked array container.
  21. * Defined in ``xtensor/xchunked_array.hpp``.
  22. */
  23. /******************************
  24. * xchunked_array declaration *
  25. ******************************/
  26. template <class chunk_storage>
  27. class xchunked_array;
  28. template <class chunk_storage>
  29. struct xcontainer_inner_types<xchunked_array<chunk_storage>>
  30. {
  31. using chunk_type = typename chunk_storage::value_type;
  32. using const_reference = typename chunk_type::const_reference;
  33. using reference = typename chunk_type::reference;
  34. using size_type = std::size_t;
  35. using storage_type = chunk_type;
  36. using temporary_type = xchunked_array<chunk_storage>;
  37. };
  38. template <class chunk_storage>
  39. struct xiterable_inner_types<xchunked_array<chunk_storage>>
  40. {
  41. using chunk_type = typename chunk_storage::value_type;
  42. using inner_shape_type = typename chunk_type::shape_type;
  43. using const_stepper = xindexed_stepper<xchunked_array<chunk_storage>, true>;
  44. using stepper = xindexed_stepper<xchunked_array<chunk_storage>, false>;
  45. };
  46. template <class chunk_storage>
  47. class xchunked_array : public xaccessible<xchunked_array<chunk_storage>>,
  48. public xiterable<xchunked_array<chunk_storage>>,
  49. public xchunked_semantic<xchunked_array<chunk_storage>>
  50. {
  51. public:
  52. using chunk_storage_type = chunk_storage;
  53. using chunk_type = typename chunk_storage::value_type;
  54. using grid_shape_type = typename chunk_storage::shape_type;
  55. using const_reference = typename chunk_type::const_reference;
  56. using reference = typename chunk_type::reference;
  57. using self_type = xchunked_array<chunk_storage>;
  58. using semantic_base = xchunked_semantic<self_type>;
  59. using iterable_base = xconst_iterable<self_type>;
  60. using const_stepper = typename iterable_base::const_stepper;
  61. using stepper = typename iterable_base::stepper;
  62. using inner_types = xcontainer_inner_types<self_type>;
  63. using size_type = typename inner_types::size_type;
  64. using storage_type = typename inner_types::storage_type;
  65. using value_type = typename storage_type::value_type;
  66. using pointer = value_type*;
  67. using const_pointer = const value_type*;
  68. using difference_type = std::ptrdiff_t;
  69. using shape_type = typename chunk_type::shape_type;
  70. using temporary_type = typename inner_types::temporary_type;
  71. using bool_load_type = xt::bool_load_type<value_type>;
  72. static constexpr layout_type static_layout = layout_type::dynamic;
  73. static constexpr bool contiguous_layout = false;
  74. using chunk_iterator = xchunk_iterator<self_type>;
  75. using const_chunk_iterator = xchunk_iterator<const self_type>;
  76. template <class S>
  77. xchunked_array(
  78. chunk_storage_type&& chunks,
  79. S&& shape,
  80. S&& chunk_shape,
  81. layout_type chunk_memory_layout = XTENSOR_DEFAULT_LAYOUT
  82. );
  83. ~xchunked_array() = default;
  84. xchunked_array(const xchunked_array&) = default;
  85. xchunked_array& operator=(const xchunked_array&) = default;
  86. xchunked_array(xchunked_array&&) = default;
  87. xchunked_array& operator=(xchunked_array&&) = default;
  88. template <class E>
  89. xchunked_array(
  90. const xexpression<E>& e,
  91. chunk_storage_type&& chunks,
  92. layout_type chunk_memory_layout = XTENSOR_DEFAULT_LAYOUT
  93. );
  94. template <class E, class S>
  95. xchunked_array(
  96. const xexpression<E>& e,
  97. chunk_storage_type&& chunks,
  98. S&& chunk_shape,
  99. layout_type chunk_memory_layout = XTENSOR_DEFAULT_LAYOUT
  100. );
  101. template <class E>
  102. xchunked_array& operator=(const xexpression<E>& e);
  103. size_type dimension() const noexcept;
  104. const shape_type& shape() const noexcept;
  105. layout_type layout() const noexcept;
  106. bool is_contiguous() const noexcept;
  107. template <class... Idxs>
  108. reference operator()(Idxs... idxs);
  109. template <class... Idxs>
  110. const_reference operator()(Idxs... idxs) const;
  111. template <class It>
  112. reference element(It first, It last);
  113. template <class It>
  114. const_reference element(It first, It last) const;
  115. template <class S>
  116. bool broadcast_shape(S& s, bool reuse_cache = false) const;
  117. template <class S>
  118. bool has_linear_assign(const S& strides) const noexcept;
  119. template <class S>
  120. stepper stepper_begin(const S& shape) noexcept;
  121. template <class S>
  122. stepper stepper_end(const S& shape, layout_type) noexcept;
  123. template <class S>
  124. const_stepper stepper_begin(const S& shape) const noexcept;
  125. template <class S>
  126. const_stepper stepper_end(const S& shape, layout_type) const noexcept;
  127. const shape_type& chunk_shape() const noexcept;
  128. size_type grid_size() const noexcept;
  129. const grid_shape_type& grid_shape() const noexcept;
  130. chunk_storage_type& chunks();
  131. const chunk_storage_type& chunks() const;
  132. chunk_iterator chunk_begin();
  133. chunk_iterator chunk_end();
  134. const_chunk_iterator chunk_begin() const;
  135. const_chunk_iterator chunk_end() const;
  136. const_chunk_iterator chunk_cbegin() const;
  137. const_chunk_iterator chunk_cend() const;
  138. private:
  139. template <class... Idxs>
  140. using indexes_type = std::
  141. pair<std::array<std::size_t, sizeof...(Idxs)>, std::array<std::size_t, sizeof...(Idxs)>>;
  142. template <class... Idxs>
  143. using chunk_indexes_type = std::array<std::pair<std::size_t, std::size_t>, sizeof...(Idxs)>;
  144. template <std::size_t N>
  145. using static_indexes_type = std::pair<std::array<std::size_t, N>, std::array<std::size_t, N>>;
  146. using dynamic_indexes_type = std::pair<std::vector<std::size_t>, std::vector<std::size_t>>;
  147. template <class S1, class S2>
  148. void resize(S1&& shape, S2&& chunk_shape, layout_type chunk_memory_layout = XTENSOR_DEFAULT_LAYOUT);
  149. template <class... Idxs>
  150. indexes_type<Idxs...> get_indexes(Idxs... idxs) const;
  151. template <class Idx>
  152. std::pair<std::size_t, std::size_t> get_chunk_indexes_in_dimension(std::size_t dim, Idx idx) const;
  153. template <std::size_t... dims, class... Idxs>
  154. chunk_indexes_type<Idxs...> get_chunk_indexes(std::index_sequence<dims...>, Idxs... idxs) const;
  155. template <class T, std::size_t N>
  156. static_indexes_type<N> unpack(const std::array<T, N>& arr) const;
  157. template <class It>
  158. dynamic_indexes_type get_indexes_dynamic(It first, It last) const;
  159. shape_type m_shape;
  160. shape_type m_chunk_shape;
  161. chunk_storage_type m_chunks;
  162. };
  163. template <class E>
  164. constexpr bool is_chunked(const xexpression<E>& e);
  165. template <class E>
  166. constexpr bool is_chunked();
  167. /**
  168. * Creates an in-memory chunked array.
  169. *
  170. * This function returns an uninitialized ``xt::xchunked_array<xt::xarray<T>>``.
  171. *
  172. * @ingroup xt_xchunked_array
  173. *
  174. * @tparam T The type of the elements (e.g. double)
  175. * @tparam L The layout_type of the array
  176. *
  177. * @param shape The shape of the array
  178. * @param chunk_shape The shape of a chunk
  179. * @param chunk_memory_layout The layout of each chunk (default: XTENSOR_DEFAULT_LAYOUT)
  180. *
  181. * @return returns a ``xt::xchunked_array<xt::xarray<T>>`` with the given shape, chunk shape and memory
  182. * layout.
  183. */
  184. template <class T, layout_type L = XTENSOR_DEFAULT_LAYOUT, class S>
  185. xchunked_array<xarray<xarray<T>>>
  186. chunked_array(S&& shape, S&& chunk_shape, layout_type chunk_memory_layout = XTENSOR_DEFAULT_LAYOUT);
  187. template <class T, layout_type L = XTENSOR_DEFAULT_LAYOUT, class S>
  188. xchunked_array<xarray<xarray<T>>> chunked_array(
  189. std::initializer_list<S> shape,
  190. std::initializer_list<S> chunk_shape,
  191. layout_type chunk_memory_layout = XTENSOR_DEFAULT_LAYOUT
  192. );
  193. /**
  194. * Creates an in-memory chunked array.
  195. *
  196. * This function returns a ``xt::xchunked_array<xt::xarray<T>>`` initialized from an expression.
  197. *
  198. * @ingroup xt_xchunked_array
  199. *
  200. * @tparam L The layout_type of the array
  201. *
  202. * @param e The expression to initialize the chunked array from
  203. * @param chunk_shape The shape of a chunk
  204. * @param chunk_memory_layout The layout of each chunk (default: XTENSOR_DEFAULT_LAYOUT)
  205. *
  206. * @return returns a ``xt::xchunked_array<xt::xarray<T>>`` from the given expression, with the given chunk
  207. * shape and memory layout.
  208. */
  209. template <layout_type L = XTENSOR_DEFAULT_LAYOUT, class E, class S>
  210. xchunked_array<xarray<xarray<typename E::value_type>>>
  211. chunked_array(const xexpression<E>& e, S&& chunk_shape, layout_type chunk_memory_layout = XTENSOR_DEFAULT_LAYOUT);
  212. /**
  213. * Creates an in-memory chunked array.
  214. *
  215. * This function returns a ``xt::xchunked_array<xt::xarray<T>>`` initialized from an expression.
  216. *
  217. * @ingroup xt_xchunked_array
  218. *
  219. * @tparam L The layout_type of the array
  220. *
  221. * @param e The expression to initialize the chunked array from
  222. * @param chunk_memory_layout The layout of each chunk (default: XTENSOR_DEFAULT_LAYOUT)
  223. *
  224. * @return returns a ``xt::xchunked_array<xt::xarray<T>>`` from the given expression, with the
  225. * expression's chunk shape and the given memory layout.
  226. */
  227. template <layout_type L = XTENSOR_DEFAULT_LAYOUT, class E>
  228. xchunked_array<xarray<xarray<typename E::value_type>>>
  229. chunked_array(const xexpression<E>& e, layout_type chunk_memory_layout = XTENSOR_DEFAULT_LAYOUT);
  230. /*******************************
  231. * chunk_helper implementation *
  232. *******************************/
  233. namespace detail
  234. {
  235. // Workaround for VS2015
  236. template <class E>
  237. using try_chunk_shape = decltype(std::declval<E>().chunk_shape());
  238. template <class E, template <class> class OP, class = void>
  239. struct chunk_helper_impl
  240. {
  241. using is_chunked = std::false_type;
  242. static const auto& chunk_shape(const xexpression<E>& e)
  243. {
  244. return e.derived_cast().shape();
  245. }
  246. template <class S1, class S2>
  247. static void
  248. resize(E& chunks, const S1& container_shape, const S2& chunk_shape, layout_type chunk_memory_layout)
  249. {
  250. chunks.resize(container_shape);
  251. for (auto& c : chunks)
  252. {
  253. c.resize(chunk_shape, chunk_memory_layout);
  254. }
  255. }
  256. };
  257. template <class E, template <class> class OP>
  258. struct chunk_helper_impl<E, OP, void_t<OP<E>>>
  259. {
  260. using is_chunked = std::true_type;
  261. static const auto& chunk_shape(const xexpression<E>& e)
  262. {
  263. return e.derived_cast().chunk_shape();
  264. }
  265. template <class S1, class S2>
  266. static void
  267. resize(E& chunks, const S1& container_shape, const S2& /*chunk_shape*/, layout_type /*chunk_memory_layout*/)
  268. {
  269. chunks.resize(container_shape);
  270. }
  271. };
  272. template <class E>
  273. using chunk_helper = chunk_helper_impl<E, try_chunk_shape>;
  274. }
  275. template <class E>
  276. constexpr bool is_chunked(const xexpression<E>&)
  277. {
  278. return is_chunked<E>();
  279. }
  280. template <class E>
  281. constexpr bool is_chunked()
  282. {
  283. using return_type = typename detail::chunk_helper<E>::is_chunked;
  284. return return_type::value;
  285. }
  286. template <class T, layout_type L, class S>
  287. inline xchunked_array<xarray<xarray<T>>>
  288. chunked_array(S&& shape, S&& chunk_shape, layout_type chunk_memory_layout)
  289. {
  290. using chunk_storage = xarray<xarray<T, L>>;
  291. return xchunked_array<chunk_storage>(
  292. chunk_storage(),
  293. std::forward<S>(shape),
  294. std::forward<S>(chunk_shape),
  295. chunk_memory_layout
  296. );
  297. }
  298. template <class T, layout_type L, class S>
  299. xchunked_array<xarray<xarray<T>>>
  300. chunked_array(std::initializer_list<S> shape, std::initializer_list<S> chunk_shape, layout_type chunk_memory_layout)
  301. {
  302. using sh_type = std::vector<std::size_t>;
  303. auto sh = xtl::forward_sequence<sh_type, std::initializer_list<S>>(shape);
  304. auto ch_sh = xtl::forward_sequence<sh_type, std::initializer_list<S>>(chunk_shape);
  305. return chunked_array<T, L, sh_type>(std::move(sh), std::move(ch_sh), chunk_memory_layout);
  306. }
  307. template <layout_type L, class E, class S>
  308. inline xchunked_array<xarray<xarray<typename E::value_type>>>
  309. chunked_array(const xexpression<E>& e, S&& chunk_shape, layout_type chunk_memory_layout)
  310. {
  311. using chunk_storage = xarray<xarray<typename E::value_type, L>>;
  312. return xchunked_array<chunk_storage>(e, chunk_storage(), std::forward<S>(chunk_shape), chunk_memory_layout);
  313. }
  314. template <layout_type L, class E>
  315. inline xchunked_array<xarray<xarray<typename E::value_type>>>
  316. chunked_array(const xexpression<E>& e, layout_type chunk_memory_layout)
  317. {
  318. using chunk_storage = xarray<xarray<typename E::value_type, L>>;
  319. return xchunked_array<chunk_storage>(e, chunk_storage(), chunk_memory_layout);
  320. }
  321. /*********************************
  322. * xchunked_array implementation *
  323. *********************************/
  324. template <class CS>
  325. template <class S>
  326. inline xchunked_array<CS>::xchunked_array(CS&& chunks, S&& shape, S&& chunk_shape, layout_type chunk_memory_layout)
  327. : m_chunks(std::move(chunks))
  328. {
  329. resize(std::forward<S>(shape), std::forward<S>(chunk_shape), chunk_memory_layout);
  330. }
  331. template <class CS>
  332. template <class E>
  333. inline xchunked_array<CS>::xchunked_array(const xexpression<E>& e, CS&& chunks, layout_type chunk_memory_layout)
  334. : xchunked_array(e, std::move(chunks), detail::chunk_helper<E>::chunk_shape(e), chunk_memory_layout)
  335. {
  336. }
  337. template <class CS>
  338. template <class E, class S>
  339. inline xchunked_array<CS>::xchunked_array(
  340. const xexpression<E>& e,
  341. CS&& chunks,
  342. S&& chunk_shape,
  343. layout_type chunk_memory_layout
  344. )
  345. : m_chunks(std::move(chunks))
  346. {
  347. resize(e.derived_cast().shape(), std::forward<S>(chunk_shape), chunk_memory_layout);
  348. semantic_base::assign_xexpression(e);
  349. }
  350. template <class CS>
  351. template <class E>
  352. inline auto xchunked_array<CS>::operator=(const xexpression<E>& e) -> self_type&
  353. {
  354. return semantic_base::operator=(e);
  355. }
  356. template <class CS>
  357. inline auto xchunked_array<CS>::dimension() const noexcept -> size_type
  358. {
  359. return m_shape.size();
  360. }
  361. template <class CS>
  362. inline auto xchunked_array<CS>::shape() const noexcept -> const shape_type&
  363. {
  364. return m_shape;
  365. }
  366. template <class CS>
  367. inline auto xchunked_array<CS>::layout() const noexcept -> layout_type
  368. {
  369. return static_layout;
  370. }
  371. template <class CS>
  372. inline bool xchunked_array<CS>::is_contiguous() const noexcept
  373. {
  374. return false;
  375. }
  376. template <class CS>
  377. template <class... Idxs>
  378. inline auto xchunked_array<CS>::operator()(Idxs... idxs) -> reference
  379. {
  380. auto ii = get_indexes(idxs...);
  381. auto& chunk = m_chunks.element(ii.first.cbegin(), ii.first.cend());
  382. return chunk.element(ii.second.cbegin(), ii.second.cend());
  383. }
  384. template <class CS>
  385. template <class... Idxs>
  386. inline auto xchunked_array<CS>::operator()(Idxs... idxs) const -> const_reference
  387. {
  388. auto ii = get_indexes(idxs...);
  389. auto& chunk = m_chunks.element(ii.first.cbegin(), ii.first.cend());
  390. return chunk.element(ii.second.cbegin(), ii.second.cend());
  391. }
  392. template <class CS>
  393. template <class It>
  394. inline auto xchunked_array<CS>::element(It first, It last) -> reference
  395. {
  396. auto ii = get_indexes_dynamic(first, last);
  397. auto& chunk = m_chunks.element(ii.first.begin(), ii.first.end());
  398. return chunk.element(ii.second.begin(), ii.second.end());
  399. }
  400. template <class CS>
  401. template <class It>
  402. inline auto xchunked_array<CS>::element(It first, It last) const -> const_reference
  403. {
  404. auto ii = get_indexes_dynamic(first, last);
  405. auto& chunk = m_chunks.element(ii.first.begin(), ii.first.end());
  406. return chunk.element(ii.second.begin(), ii.second.end());
  407. }
  408. template <class CS>
  409. template <class S>
  410. inline bool xchunked_array<CS>::broadcast_shape(S& s, bool) const
  411. {
  412. return xt::broadcast_shape(shape(), s);
  413. }
  414. template <class CS>
  415. template <class S>
  416. inline bool xchunked_array<CS>::has_linear_assign(const S&) const noexcept
  417. {
  418. return false;
  419. }
  420. template <class CS>
  421. template <class S>
  422. inline auto xchunked_array<CS>::stepper_begin(const S& shape) noexcept -> stepper
  423. {
  424. size_type offset = shape.size() - this->dimension();
  425. return stepper(this, offset);
  426. }
  427. template <class CS>
  428. template <class S>
  429. inline auto xchunked_array<CS>::stepper_end(const S& shape, layout_type) noexcept -> stepper
  430. {
  431. size_type offset = shape.size() - this->dimension();
  432. return stepper(this, offset, true);
  433. }
  434. template <class CS>
  435. template <class S>
  436. inline auto xchunked_array<CS>::stepper_begin(const S& shape) const noexcept -> const_stepper
  437. {
  438. size_type offset = shape.size() - this->dimension();
  439. return const_stepper(this, offset);
  440. }
  441. template <class CS>
  442. template <class S>
  443. inline auto xchunked_array<CS>::stepper_end(const S& shape, layout_type) const noexcept -> const_stepper
  444. {
  445. size_type offset = shape.size() - this->dimension();
  446. return const_stepper(this, offset, true);
  447. }
  448. template <class CS>
  449. inline auto xchunked_array<CS>::chunk_shape() const noexcept -> const shape_type&
  450. {
  451. return m_chunk_shape;
  452. }
  453. template <class CS>
  454. inline auto xchunked_array<CS>::grid_size() const noexcept -> size_type
  455. {
  456. return m_chunks.size();
  457. }
  458. template <class CS>
  459. inline auto xchunked_array<CS>::grid_shape() const noexcept -> const grid_shape_type&
  460. {
  461. return m_chunks.shape();
  462. }
  463. template <class CS>
  464. inline auto xchunked_array<CS>::chunks() -> chunk_storage_type&
  465. {
  466. return m_chunks;
  467. }
  468. template <class CS>
  469. inline auto xchunked_array<CS>::chunks() const -> const chunk_storage_type&
  470. {
  471. return m_chunks;
  472. }
  473. template <class CS>
  474. inline auto xchunked_array<CS>::chunk_begin() -> chunk_iterator
  475. {
  476. shape_type chunk_index(m_shape.size(), size_type(0));
  477. return chunk_iterator(*this, std::move(chunk_index), 0u);
  478. }
  479. template <class CS>
  480. inline auto xchunked_array<CS>::chunk_end() -> chunk_iterator
  481. {
  482. shape_type sh = xtl::forward_sequence<shape_type, const grid_shape_type>(grid_shape());
  483. return chunk_iterator(*this, std::move(sh), grid_size());
  484. }
  485. template <class CS>
  486. inline auto xchunked_array<CS>::chunk_begin() const -> const_chunk_iterator
  487. {
  488. shape_type chunk_index(m_shape.size(), size_type(0));
  489. return const_chunk_iterator(*this, std::move(chunk_index), 0u);
  490. }
  491. template <class CS>
  492. inline auto xchunked_array<CS>::chunk_end() const -> const_chunk_iterator
  493. {
  494. shape_type sh = xtl::forward_sequence<shape_type, const grid_shape_type>(grid_shape());
  495. return const_chunk_iterator(*this, std::move(sh), grid_size());
  496. }
  497. template <class CS>
  498. inline auto xchunked_array<CS>::chunk_cbegin() const -> const_chunk_iterator
  499. {
  500. return chunk_begin();
  501. }
  502. template <class CS>
  503. inline auto xchunked_array<CS>::chunk_cend() const -> const_chunk_iterator
  504. {
  505. return chunk_end();
  506. }
  507. template <class CS>
  508. template <class S1, class S2>
  509. inline void xchunked_array<CS>::resize(S1&& shape, S2&& chunk_shape, layout_type chunk_memory_layout)
  510. {
  511. // compute chunk number in each dimension (shape_of_chunks)
  512. std::vector<std::size_t> shape_of_chunks(shape.size());
  513. std::transform(
  514. shape.cbegin(),
  515. shape.cend(),
  516. chunk_shape.cbegin(),
  517. shape_of_chunks.begin(),
  518. [](auto s, auto cs)
  519. {
  520. std::size_t cn = s / cs;
  521. if (s % cs > 0)
  522. {
  523. cn += std::size_t(1); // edge_chunk
  524. }
  525. return cn;
  526. }
  527. );
  528. detail::chunk_helper<CS>::resize(m_chunks, shape_of_chunks, chunk_shape, chunk_memory_layout);
  529. m_shape = xtl::forward_sequence<shape_type, S1>(shape);
  530. m_chunk_shape = xtl::forward_sequence<shape_type, S2>(chunk_shape);
  531. }
  532. template <class CS>
  533. template <class... Idxs>
  534. inline auto xchunked_array<CS>::get_indexes(Idxs... idxs) const -> indexes_type<Idxs...>
  535. {
  536. auto chunk_indexes_packed = get_chunk_indexes(std::make_index_sequence<sizeof...(Idxs)>(), idxs...);
  537. return unpack(chunk_indexes_packed);
  538. }
  539. template <class CS>
  540. template <class Idx>
  541. inline std::pair<std::size_t, std::size_t>
  542. xchunked_array<CS>::get_chunk_indexes_in_dimension(std::size_t dim, Idx idx) const
  543. {
  544. std::size_t index_of_chunk = static_cast<size_t>(idx) / m_chunk_shape[dim];
  545. std::size_t index_in_chunk = static_cast<size_t>(idx) - index_of_chunk * m_chunk_shape[dim];
  546. return std::make_pair(index_of_chunk, index_in_chunk);
  547. }
  548. template <class CS>
  549. template <std::size_t... dims, class... Idxs>
  550. inline auto xchunked_array<CS>::get_chunk_indexes(std::index_sequence<dims...>, Idxs... idxs) const
  551. -> chunk_indexes_type<Idxs...>
  552. {
  553. chunk_indexes_type<Idxs...> chunk_indexes = {{get_chunk_indexes_in_dimension(dims, idxs)...}};
  554. return chunk_indexes;
  555. }
  556. template <class CS>
  557. template <class T, std::size_t N>
  558. inline auto xchunked_array<CS>::unpack(const std::array<T, N>& arr) const -> static_indexes_type<N>
  559. {
  560. std::array<std::size_t, N> arr0;
  561. std::array<std::size_t, N> arr1;
  562. for (std::size_t i = 0; i < N; ++i)
  563. {
  564. arr0[i] = std::get<0>(arr[i]);
  565. arr1[i] = std::get<1>(arr[i]);
  566. }
  567. return std::make_pair(arr0, arr1);
  568. }
  569. template <class CS>
  570. template <class It>
  571. inline auto xchunked_array<CS>::get_indexes_dynamic(It first, It last) const -> dynamic_indexes_type
  572. {
  573. auto size = static_cast<std::size_t>(std::distance(first, last));
  574. std::vector<std::size_t> indexes_of_chunk(size);
  575. std::vector<std::size_t> indexes_in_chunk(size);
  576. for (std::size_t dim = 0; dim < size; ++dim)
  577. {
  578. auto chunk_index = get_chunk_indexes_in_dimension(dim, *first++);
  579. indexes_of_chunk[dim] = chunk_index.first;
  580. indexes_in_chunk[dim] = chunk_index.second;
  581. }
  582. return std::make_pair(indexes_of_chunk, indexes_in_chunk);
  583. }
  584. }
  585. #endif