xblockwise_reducer.hpp 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533
  1. #ifndef XTENSOR_XBLOCKWISE_REDUCER_HPP
  2. #define XTENSOR_XBLOCKWISE_REDUCER_HPP
  3. #include "xblockwise_reducer_functors.hpp"
  4. #include "xmultiindex_iterator.hpp"
  5. #include "xreducer.hpp"
  6. #include "xshape.hpp"
  7. #include "xtl/xclosure.hpp"
  8. #include "xtl/xsequence.hpp"
  9. namespace xt
  10. {
  11. template <class CT, class F, class X, class O>
  12. class xblockwise_reducer
  13. {
  14. public:
  15. using self_type = xblockwise_reducer<CT, F, X, O>;
  16. using raw_options_type = std::decay_t<O>;
  17. using keep_dims = xtl::mpl::contains<raw_options_type, xt::keep_dims_type>;
  18. using xexpression_type = std::decay_t<CT>;
  19. using shape_type = typename xreducer_shape_type<typename xexpression_type::shape_type, std::decay_t<X>, keep_dims>::type;
  20. using functor_type = F;
  21. using value_type = typename functor_type::value_type;
  22. using input_shape_type = typename xexpression_type::shape_type;
  23. using input_chunk_index_type = filter_fixed_shape_t<input_shape_type>;
  24. using input_grid_strides = filter_fixed_shape_t<input_shape_type>;
  25. using axes_type = X;
  26. using chunk_shape_type = filter_fixed_shape_t<shape_type>;
  27. template <class E, class BS, class XX, class OO, class FF>
  28. xblockwise_reducer(E&& e, BS&& block_shape, XX&& axes, OO&& options, FF&& functor);
  29. const input_shape_type& input_shape() const;
  30. const axes_type& axes() const;
  31. std::size_t dimension() const;
  32. const shape_type& shape() const;
  33. const chunk_shape_type& chunk_shape() const;
  34. template <class R>
  35. void assign_to(R& result) const;
  36. private:
  37. using mapping_type = filter_fixed_shape_t<shape_type>;
  38. using input_chunked_view_type = xchunked_view<const std::decay_t<CT>&>;
  39. using input_const_chunked_iterator_type = typename input_chunked_view_type::const_chunk_iterator;
  40. using input_chunk_range_type = std::array<xmultiindex_iterator<input_chunk_index_type>, 2>;
  41. template <class CI>
  42. void assign_to_chunk(CI& result_chunk_iter) const;
  43. template <class CI>
  44. input_chunk_range_type compute_input_chunk_range(CI& result_chunk_iter) const;
  45. input_const_chunked_iterator_type get_input_chunk_iter(input_chunk_index_type input_chunk_index) const;
  46. void init_shapes();
  47. CT m_e;
  48. xchunked_view<const std::decay_t<CT>&> m_e_chunked_view;
  49. axes_type m_axes;
  50. raw_options_type m_options;
  51. functor_type m_functor;
  52. shape_type m_result_shape;
  53. chunk_shape_type m_result_chunk_shape;
  54. mapping_type m_mapping;
  55. input_grid_strides m_input_grid_strides;
  56. };
  57. template <class CT, class F, class X, class O>
  58. template <class E, class BS, class XX, class OO, class FF>
  59. xblockwise_reducer<CT, F, X, O>::xblockwise_reducer(E&& e, BS&& block_shape, XX&& axes, OO&& options, FF&& functor)
  60. : m_e(std::forward<E>(e))
  61. , m_e_chunked_view(m_e, std::forward<BS>(block_shape))
  62. , m_axes(std::forward<XX>(axes))
  63. , m_options(std::forward<OO>(options))
  64. , m_functor(std::forward<FF>(functor))
  65. , m_result_shape()
  66. , m_result_chunk_shape()
  67. , m_mapping()
  68. , m_input_grid_strides()
  69. {
  70. init_shapes();
  71. resize_container(m_input_grid_strides, m_e.dimension());
  72. std::size_t stride = 1;
  73. for (std::size_t i = m_input_grid_strides.size(); i != 0; --i)
  74. {
  75. m_input_grid_strides[i - 1] = stride;
  76. stride *= m_e_chunked_view.grid_shape()[i - 1];
  77. }
  78. }
  79. template <class CT, class F, class X, class O>
  80. inline auto xblockwise_reducer<CT, F, X, O>::input_shape() const -> const input_shape_type&
  81. {
  82. return m_e.shape();
  83. }
  84. template <class CT, class F, class X, class O>
  85. inline auto xblockwise_reducer<CT, F, X, O>::axes() const -> const axes_type&
  86. {
  87. return m_axes;
  88. }
  89. template <class CT, class F, class X, class O>
  90. inline std::size_t xblockwise_reducer<CT, F, X, O>::dimension() const
  91. {
  92. return m_result_shape.size();
  93. }
  94. template <class CT, class F, class X, class O>
  95. inline auto xblockwise_reducer<CT, F, X, O>::shape() const -> const shape_type&
  96. {
  97. return m_result_shape;
  98. }
  99. template <class CT, class F, class X, class O>
  100. inline auto xblockwise_reducer<CT, F, X, O>::chunk_shape() const -> const chunk_shape_type&
  101. {
  102. return m_result_chunk_shape;
  103. }
  104. template <class CT, class F, class X, class O>
  105. template <class R>
  106. inline void xblockwise_reducer<CT, F, X, O>::assign_to(R& result) const
  107. {
  108. auto result_chunked_view = as_chunked(result, m_result_chunk_shape);
  109. for (auto chunk_iter = result_chunked_view.chunk_begin(); chunk_iter != result_chunked_view.chunk_end();
  110. ++chunk_iter)
  111. {
  112. assign_to_chunk(chunk_iter);
  113. }
  114. }
  115. template <class CT, class F, class X, class O>
  116. auto xblockwise_reducer<CT, F, X, O>::get_input_chunk_iter(input_chunk_index_type input_chunk_index) const
  117. -> input_const_chunked_iterator_type
  118. {
  119. std::size_t chunk_linear_index = 0;
  120. for (std::size_t i = 0; i < m_e_chunked_view.dimension(); ++i)
  121. {
  122. chunk_linear_index += input_chunk_index[i] * m_input_grid_strides[i];
  123. }
  124. return input_const_chunked_iterator_type(m_e_chunked_view, std::move(input_chunk_index), chunk_linear_index);
  125. }
  126. template <class CT, class F, class X, class O>
  127. template <class CI>
  128. void xblockwise_reducer<CT, F, X, O>::assign_to_chunk(CI& result_chunk_iter) const
  129. {
  130. auto result_chunk_view = *result_chunk_iter;
  131. auto reduction_variable = m_functor.reduction_variable(result_chunk_view);
  132. // get the range of input chunks we need to compute the desired ouput chunk
  133. auto range = compute_input_chunk_range(result_chunk_iter);
  134. // iterate over input chunk (indics)
  135. auto first = true;
  136. // std::for_each(std::get<0>(range), std::get<1>(range), [&](auto && input_chunk_index)
  137. auto iter = std::get<0>(range);
  138. while (iter != std::get<1>(range))
  139. {
  140. const auto& input_chunk_index = *iter;
  141. // get input chunk iterator from chunk index
  142. auto chunked_input_iter = this->get_input_chunk_iter(input_chunk_index);
  143. auto input_chunk_view = *chunked_input_iter;
  144. // compute the per block result
  145. auto block_res = m_functor.compute(input_chunk_view, m_axes, m_options);
  146. // merge
  147. m_functor.merge(block_res, first, result_chunk_view, reduction_variable);
  148. first = false;
  149. ++iter;
  150. }
  151. // finalize (ie smth like normalization)
  152. m_functor.finalize(reduction_variable, result_chunk_view, *this);
  153. }
  154. template <class CT, class F, class X, class O>
  155. template <class CI>
  156. auto xblockwise_reducer<CT, F, X, O>::compute_input_chunk_range(CI& result_chunk_iter) const
  157. -> input_chunk_range_type
  158. {
  159. auto input_chunks_begin = xtl::make_sequence<input_chunk_index_type>(m_e_chunked_view.dimension(), 0);
  160. auto input_chunks_end = xtl::make_sequence<input_chunk_index_type>(m_e_chunked_view.dimension());
  161. XTENSOR_ASSERT(input_chunks_begin.size() == m_e_chunked_view.dimension());
  162. XTENSOR_ASSERT(input_chunks_end.size() == m_e_chunked_view.dimension());
  163. std::copy(
  164. m_e_chunked_view.grid_shape().begin(),
  165. m_e_chunked_view.grid_shape().end(),
  166. input_chunks_end.begin()
  167. );
  168. const auto& chunk_index = result_chunk_iter.chunk_index();
  169. for (std::size_t result_ax_index = 0; result_ax_index < m_result_shape.size(); ++result_ax_index)
  170. {
  171. if (m_result_shape[result_ax_index] != 1)
  172. {
  173. const auto input_ax_index = m_mapping[result_ax_index];
  174. input_chunks_begin[input_ax_index] = chunk_index[result_ax_index];
  175. input_chunks_end[input_ax_index] = chunk_index[result_ax_index] + 1;
  176. }
  177. }
  178. return input_chunk_range_type{
  179. multiindex_iterator_begin<input_chunk_index_type>(input_chunks_begin, input_chunks_end),
  180. multiindex_iterator_end<input_chunk_index_type>(input_chunks_begin, input_chunks_end)
  181. };
  182. }
  183. template <class CT, class F, class X, class O>
  184. void xblockwise_reducer<CT, F, X, O>::init_shapes()
  185. {
  186. const auto& shape = m_e.shape();
  187. const auto dimension = m_e.dimension();
  188. const auto& block_shape = m_e_chunked_view.chunk_shape();
  189. if (xtl::mpl::contains<raw_options_type, xt::keep_dims_type>::value)
  190. {
  191. resize_container(m_result_shape, dimension);
  192. resize_container(m_result_chunk_shape, dimension);
  193. resize_container(m_mapping, dimension);
  194. for (std::size_t i = 0; i < dimension; ++i)
  195. {
  196. m_mapping[i] = i;
  197. if (std::find(m_axes.begin(), m_axes.end(), i) == m_axes.end())
  198. {
  199. // i not in m_axes!
  200. m_result_shape[i] = shape[i];
  201. m_result_chunk_shape[i] = block_shape[i];
  202. }
  203. else
  204. {
  205. m_result_shape[i] = 1;
  206. m_result_chunk_shape[i] = 1;
  207. }
  208. }
  209. }
  210. else
  211. {
  212. const auto result_dim = dimension - m_axes.size();
  213. resize_container(m_result_shape, result_dim);
  214. resize_container(m_result_chunk_shape, result_dim);
  215. resize_container(m_mapping, result_dim);
  216. for (std::size_t i = 0, idx = 0; i < dimension; ++i)
  217. {
  218. if (std::find(m_axes.begin(), m_axes.end(), i) == m_axes.end())
  219. {
  220. // i not in axes!
  221. m_result_shape[idx] = shape[i];
  222. m_result_chunk_shape[idx] = block_shape[i];
  223. m_mapping[idx] = i;
  224. ++idx;
  225. }
  226. }
  227. }
  228. }
  229. template <class E, class CS, class A, class O, class FF>
  230. inline auto blockwise_reducer(E&& e, CS&& chunk_shape, A&& axes, O&& raw_options, FF&& functor)
  231. {
  232. using functor_type = std::decay_t<FF>;
  233. using closure_type = xtl::const_closure_type_t<E>;
  234. using axes_type = std::decay_t<A>;
  235. return xblockwise_reducer<closure_type, functor_type, axes_type, O>(
  236. std::forward<E>(e),
  237. std::forward<CS>(chunk_shape),
  238. std::forward<A>(axes),
  239. std::forward<O>(raw_options),
  240. std::forward<FF>(functor)
  241. );
  242. }
  243. namespace blockwise
  244. {
  245. #define XTENSOR_BLOCKWISE_REDUCER_FUNC(FNAME, FUNCTOR) \
  246. template < \
  247. class T = void, \
  248. class E, \
  249. class BS, \
  250. class X, \
  251. class O = DEFAULT_STRATEGY_REDUCERS, \
  252. XTL_REQUIRES(xtl::negation<is_reducer_options<X>>, xtl::negation<xtl::is_integral<std::decay_t<X>>>)> \
  253. auto FNAME(E&& e, BS&& block_shape, X&& axes, O options = O()) \
  254. { \
  255. using input_expression_type = std::decay_t<E>; \
  256. using functor_type = FUNCTOR<typename input_expression_type::value_type, T>; \
  257. return blockwise_reducer( \
  258. std::forward<E>(e), \
  259. std::forward<BS>(block_shape), \
  260. std::forward<X>(axes), \
  261. std::forward<O>(options), \
  262. functor_type() \
  263. ); \
  264. } \
  265. template < \
  266. class T = void, \
  267. class E, \
  268. class BS, \
  269. class X, \
  270. class O = DEFAULT_STRATEGY_REDUCERS, \
  271. XTL_REQUIRES(xtl::is_integral<std::decay_t<X>>)> \
  272. auto FNAME(E&& e, BS&& block_shape, X axis, O options = O()) \
  273. { \
  274. std::array<X, 1> axes{axis}; \
  275. using input_expression_type = std::decay_t<E>; \
  276. using functor_type = FUNCTOR<typename input_expression_type::value_type, T>; \
  277. return blockwise_reducer( \
  278. std::forward<E>(e), \
  279. std::forward<BS>(block_shape), \
  280. axes, \
  281. std::forward<O>(options), \
  282. functor_type() \
  283. ); \
  284. } \
  285. template < \
  286. class T = void, \
  287. class E, \
  288. class BS, \
  289. class O = DEFAULT_STRATEGY_REDUCERS, \
  290. XTL_REQUIRES(is_reducer_options<O>, xtl::negation<xtl::is_integral<std::decay_t<O>>>)> \
  291. auto FNAME(E&& e, BS&& block_shape, O options = O()) \
  292. { \
  293. using input_expression_type = std::decay_t<E>; \
  294. using axes_type = filter_fixed_shape_t<typename input_expression_type::shape_type>; \
  295. axes_type axes = xtl::make_sequence<axes_type>(e.dimension()); \
  296. XTENSOR_ASSERT(axes.size() == e.dimension()); \
  297. std::iota(axes.begin(), axes.end(), 0); \
  298. using functor_type = FUNCTOR<typename input_expression_type::value_type, T>; \
  299. return blockwise_reducer( \
  300. std::forward<E>(e), \
  301. std::forward<BS>(block_shape), \
  302. std::move(axes), \
  303. std::forward<O>(options), \
  304. functor_type() \
  305. ); \
  306. } \
  307. template <class T = void, class E, class BS, class I, std::size_t N, class O = DEFAULT_STRATEGY_REDUCERS> \
  308. auto FNAME(E&& e, BS&& block_shape, const I(&axes)[N], O options = O()) \
  309. { \
  310. using input_expression_type = std::decay_t<E>; \
  311. using functor_type = FUNCTOR<typename input_expression_type::value_type, T>; \
  312. using axes_type = std::array<std::size_t, N>; \
  313. auto ax = xt::forward_normalize<axes_type>(e, axes); \
  314. return blockwise_reducer( \
  315. std::forward<E>(e), \
  316. std::forward<BS>(block_shape), \
  317. std::move(ax), \
  318. std::forward<O>(options), \
  319. functor_type() \
  320. ); \
  321. }
  322. XTENSOR_BLOCKWISE_REDUCER_FUNC(sum, xt::detail::blockwise::sum_functor)
  323. XTENSOR_BLOCKWISE_REDUCER_FUNC(prod, xt::detail::blockwise::prod_functor)
  324. XTENSOR_BLOCKWISE_REDUCER_FUNC(amin, xt::detail::blockwise::amin_functor)
  325. XTENSOR_BLOCKWISE_REDUCER_FUNC(amax, xt::detail::blockwise::amax_functor)
  326. XTENSOR_BLOCKWISE_REDUCER_FUNC(mean, xt::detail::blockwise::mean_functor)
  327. XTENSOR_BLOCKWISE_REDUCER_FUNC(variance, xt::detail::blockwise::variance_functor)
  328. XTENSOR_BLOCKWISE_REDUCER_FUNC(stddev, xt::detail::blockwise::stddev_functor)
  329. #undef XTENSOR_BLOCKWISE_REDUCER_FUNC
  330. // norm reducers do *not* allow to to pass a template
  331. // parameter to specifiy the internal computation type
  332. #define XTENSOR_BLOCKWISE_NORM_REDUCER_FUNC(FNAME, FUNCTOR) \
  333. template < \
  334. class E, \
  335. class BS, \
  336. class X, \
  337. class O = DEFAULT_STRATEGY_REDUCERS, \
  338. XTL_REQUIRES(xtl::negation<is_reducer_options<X>>, xtl::negation<xtl::is_integral<std::decay_t<X>>>)> \
  339. auto FNAME(E&& e, BS&& block_shape, X&& axes, O options = O()) \
  340. { \
  341. using input_expression_type = std::decay_t<E>; \
  342. using functor_type = FUNCTOR<typename input_expression_type::value_type>; \
  343. return blockwise_reducer( \
  344. std::forward<E>(e), \
  345. std::forward<BS>(block_shape), \
  346. std::forward<X>(axes), \
  347. std::forward<O>(options), \
  348. functor_type() \
  349. ); \
  350. } \
  351. template <class E, class BS, class X, class O = DEFAULT_STRATEGY_REDUCERS, XTL_REQUIRES(xtl::is_integral<std::decay_t<X>>)> \
  352. auto FNAME(E&& e, BS&& block_shape, X axis, O options = O()) \
  353. { \
  354. std::array<X, 1> axes{axis}; \
  355. using input_expression_type = std::decay_t<E>; \
  356. using functor_type = FUNCTOR<typename input_expression_type::value_type>; \
  357. return blockwise_reducer( \
  358. std::forward<E>(e), \
  359. std::forward<BS>(block_shape), \
  360. axes, \
  361. std::forward<O>(options), \
  362. functor_type() \
  363. ); \
  364. } \
  365. template < \
  366. class E, \
  367. class BS, \
  368. class O = DEFAULT_STRATEGY_REDUCERS, \
  369. XTL_REQUIRES(is_reducer_options<O>, xtl::negation<xtl::is_integral<std::decay_t<O>>>)> \
  370. auto FNAME(E&& e, BS&& block_shape, O options = O()) \
  371. { \
  372. using input_expression_type = std::decay_t<E>; \
  373. using axes_type = filter_fixed_shape_t<typename input_expression_type::shape_type>; \
  374. axes_type axes = xtl::make_sequence<axes_type>(e.dimension()); \
  375. XTENSOR_ASSERT(axes.size() == e.dimension()); \
  376. std::iota(axes.begin(), axes.end(), 0); \
  377. using functor_type = FUNCTOR<typename input_expression_type::value_type>; \
  378. return blockwise_reducer( \
  379. std::forward<E>(e), \
  380. std::forward<BS>(block_shape), \
  381. std::move(axes), \
  382. std::forward<O>(options), \
  383. functor_type() \
  384. ); \
  385. } \
  386. template <class E, class BS, class I, std::size_t N, class O = DEFAULT_STRATEGY_REDUCERS> \
  387. auto FNAME(E&& e, BS&& block_shape, const I(&axes)[N], O options = O()) \
  388. { \
  389. using input_expression_type = std::decay_t<E>; \
  390. using functor_type = FUNCTOR<typename input_expression_type::value_type>; \
  391. using axes_type = std::array<std::size_t, N>; \
  392. auto ax = xt::forward_normalize<axes_type>(e, axes); \
  393. return blockwise_reducer( \
  394. std::forward<E>(e), \
  395. std::forward<BS>(block_shape), \
  396. std::move(ax), \
  397. std::forward<O>(options), \
  398. functor_type() \
  399. ); \
  400. }
  401. XTENSOR_BLOCKWISE_NORM_REDUCER_FUNC(norm_l0, xt::detail::blockwise::norm_l0_functor)
  402. XTENSOR_BLOCKWISE_NORM_REDUCER_FUNC(norm_l1, xt::detail::blockwise::norm_l1_functor)
  403. XTENSOR_BLOCKWISE_NORM_REDUCER_FUNC(norm_l2, xt::detail::blockwise::norm_l2_functor)
  404. XTENSOR_BLOCKWISE_NORM_REDUCER_FUNC(norm_sq, xt::detail::blockwise::norm_sq_functor)
  405. XTENSOR_BLOCKWISE_NORM_REDUCER_FUNC(norm_linf, xt::detail::blockwise::norm_linf_functor)
  406. #undef XTENSOR_BLOCKWISE_NORM_REDUCER_FUNC
  407. #define XTENSOR_BLOCKWISE_NORM_REDUCER_FUNC(FNAME, FUNCTOR) \
  408. template < \
  409. class E, \
  410. class BS, \
  411. class X, \
  412. class O = DEFAULT_STRATEGY_REDUCERS, \
  413. XTL_REQUIRES(xtl::negation<is_reducer_options<X>>, xtl::negation<xtl::is_integral<std::decay_t<X>>>)> \
  414. auto FNAME(E&& e, BS&& block_shape, double p, X&& axes, O options = O()) \
  415. { \
  416. using input_expression_type = std::decay_t<E>; \
  417. using functor_type = FUNCTOR<typename input_expression_type::value_type>; \
  418. return blockwise_reducer( \
  419. std::forward<E>(e), \
  420. std::forward<BS>(block_shape), \
  421. std::forward<X>(axes), \
  422. std::forward<O>(options), \
  423. functor_type(p) \
  424. ); \
  425. } \
  426. template <class E, class BS, class X, class O = DEFAULT_STRATEGY_REDUCERS, XTL_REQUIRES(xtl::is_integral<std::decay_t<X>>)> \
  427. auto FNAME(E&& e, BS&& block_shape, double p, X axis, O options = O()) \
  428. { \
  429. std::array<X, 1> axes{axis}; \
  430. using input_expression_type = std::decay_t<E>; \
  431. using functor_type = FUNCTOR<typename input_expression_type::value_type>; \
  432. return blockwise_reducer( \
  433. std::forward<E>(e), \
  434. std::forward<BS>(block_shape), \
  435. axes, \
  436. std::forward<O>(options), \
  437. functor_type(p) \
  438. ); \
  439. } \
  440. template < \
  441. class E, \
  442. class BS, \
  443. class O = DEFAULT_STRATEGY_REDUCERS, \
  444. XTL_REQUIRES(is_reducer_options<O>, xtl::negation<xtl::is_integral<std::decay_t<O>>>)> \
  445. auto FNAME(E&& e, BS&& block_shape, double p, O options = O()) \
  446. { \
  447. using input_expression_type = std::decay_t<E>; \
  448. using axes_type = filter_fixed_shape_t<typename input_expression_type::shape_type>; \
  449. axes_type axes = xtl::make_sequence<axes_type>(e.dimension()); \
  450. XTENSOR_ASSERT(axes.size() == e.dimension()); \
  451. std::iota(axes.begin(), axes.end(), 0); \
  452. using functor_type = FUNCTOR<typename input_expression_type::value_type>; \
  453. return blockwise_reducer( \
  454. std::forward<E>(e), \
  455. std::forward<BS>(block_shape), \
  456. std::move(axes), \
  457. std::forward<O>(options), \
  458. functor_type(p) \
  459. ); \
  460. } \
  461. template <class E, class BS, class I, std::size_t N, class O = DEFAULT_STRATEGY_REDUCERS> \
  462. auto FNAME(E&& e, BS&& block_shape, double p, const I(&axes)[N], O options = O()) \
  463. { \
  464. using input_expression_type = std::decay_t<E>; \
  465. using functor_type = FUNCTOR<typename input_expression_type::value_type>; \
  466. using axes_type = std::array<std::size_t, N>; \
  467. auto ax = xt::forward_normalize<axes_type>(e, axes); \
  468. return blockwise_reducer( \
  469. std::forward<E>(e), \
  470. std::forward<BS>(block_shape), \
  471. std::move(ax), \
  472. std::forward<O>(options), \
  473. functor_type(p) \
  474. ); \
  475. }
  476. XTENSOR_BLOCKWISE_NORM_REDUCER_FUNC(norm_lp_to_p, xt::detail::blockwise::norm_lp_to_p_functor);
  477. XTENSOR_BLOCKWISE_NORM_REDUCER_FUNC(norm_lp, xt::detail::blockwise::norm_lp_functor);
  478. #undef XTENSOR_BLOCKWISE_NORM_REDUCER_FUNC
  479. }
  480. }
  481. #endif