1// The template and inlines for the -*- C++ -*- internal _Array helper class.
2
3// Copyright (C) 1997-2018 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file bits/valarray_array.h
26 * This is an internal header file, included by other library headers.
27 * Do not attempt to use it directly. @headername{valarray}
28 */
29
30// Written by Gabriel Dos Reis <Gabriel.Dos-Reis@DPTMaths.ENS-Cachan.Fr>
31
32#ifndef _VALARRAY_ARRAY_H
33#define _VALARRAY_ARRAY_H 1
34
35#pragma GCC system_header
36
37#include <bits/c++config.h>
38#include <bits/cpp_type_traits.h>
39#include <cstdlib>
40#include <new>
41
42namespace std _GLIBCXX_VISIBILITY(default)
43{
44_GLIBCXX_BEGIN_NAMESPACE_VERSION
45
46 //
47 // Helper functions on raw pointers
48 //
49
50 // We get memory by the old fashion way
51 inline void*
52 __valarray_get_memory(size_t __n)
53 { return operator new(__n); }
54
55 template<typename _Tp>
56 inline _Tp*__restrict__
57 __valarray_get_storage(size_t __n)
58 {
59 return static_cast<_Tp*__restrict__>
60 (std::__valarray_get_memory(__n * sizeof(_Tp)));
61 }
62
63 // Return memory to the system
64 inline void
65 __valarray_release_memory(void* __p)
66 { operator delete(__p); }
67
68 // Turn a raw-memory into an array of _Tp filled with _Tp()
69 // This is required in 'valarray<T> v(n);'
70 template<typename _Tp, bool>
71 struct _Array_default_ctor
72 {
73 // Please note that this isn't exception safe. But
74 // valarrays aren't required to be exception safe.
75 inline static void
76 _S_do_it(_Tp* __b, _Tp* __e)
77 {
78 while (__b != __e)
79 new(__b++) _Tp();
80 }
81 };
82
83 template<typename _Tp>
84 struct _Array_default_ctor<_Tp, true>
85 {
86 // For fundamental types, it suffices to say 'memset()'
87 inline static void
88 _S_do_it(_Tp* __b, _Tp* __e)
89 { __builtin_memset(__b, 0, (__e - __b) * sizeof(_Tp)); }
90 };
91
92 template<typename _Tp>
93 inline void
94 __valarray_default_construct(_Tp* __b, _Tp* __e)
95 {
96 _Array_default_ctor<_Tp, __is_scalar<_Tp>::__value>::_S_do_it(__b, __e);
97 }
98
99 // Turn a raw-memory into an array of _Tp filled with __t
100 // This is the required in valarray<T> v(n, t). Also
101 // used in valarray<>::resize().
102 template<typename _Tp, bool>
103 struct _Array_init_ctor
104 {
105 // Please note that this isn't exception safe. But
106 // valarrays aren't required to be exception safe.
107 inline static void
108 _S_do_it(_Tp* __b, _Tp* __e, const _Tp __t)
109 {
110 while (__b != __e)
111 new(__b++) _Tp(__t);
112 }
113 };
114
115 template<typename _Tp>
116 struct _Array_init_ctor<_Tp, true>
117 {
118 inline static void
119 _S_do_it(_Tp* __b, _Tp* __e, const _Tp __t)
120 {
121 while (__b != __e)
122 *__b++ = __t;
123 }
124 };
125
126 template<typename _Tp>
127 inline void
128 __valarray_fill_construct(_Tp* __b, _Tp* __e, const _Tp __t)
129 {
130 _Array_init_ctor<_Tp, __is_trivial(_Tp)>::_S_do_it(__b, __e, __t);
131 }
132
133 //
134 // copy-construct raw array [__o, *) from plain array [__b, __e)
135 // We can't just say 'memcpy()'
136 //
137 template<typename _Tp, bool>
138 struct _Array_copy_ctor
139 {
140 // Please note that this isn't exception safe. But
141 // valarrays aren't required to be exception safe.
142 inline static void
143 _S_do_it(const _Tp* __b, const _Tp* __e, _Tp* __restrict__ __o)
144 {
145 while (__b != __e)
146 new(__o++) _Tp(*__b++);
147 }
148 };
149
150 template<typename _Tp>
151 struct _Array_copy_ctor<_Tp, true>
152 {
153 inline static void
154 _S_do_it(const _Tp* __b, const _Tp* __e, _Tp* __restrict__ __o)
155 {
156 if (__b)
157 __builtin_memcpy(__o, __b, (__e - __b) * sizeof(_Tp));
158 }
159 };
160
161 template<typename _Tp>
162 inline void
163 __valarray_copy_construct(const _Tp* __b, const _Tp* __e,
164 _Tp* __restrict__ __o)
165 {
166 _Array_copy_ctor<_Tp, __is_trivial(_Tp)>::_S_do_it(__b, __e, __o);
167 }
168
169 // copy-construct raw array [__o, *) from strided array __a[<__n : __s>]
170 template<typename _Tp>
171 inline void
172 __valarray_copy_construct (const _Tp* __restrict__ __a, size_t __n,
173 size_t __s, _Tp* __restrict__ __o)
174 {
175 if (__is_trivial(_Tp))
176 while (__n--)
177 {
178 *__o++ = *__a;
179 __a += __s;
180 }
181 else
182 while (__n--)
183 {
184 new(__o++) _Tp(*__a);
185 __a += __s;
186 }
187 }
188
189 // copy-construct raw array [__o, *) from indexed array __a[__i[<__n>]]
190 template<typename _Tp>
191 inline void
192 __valarray_copy_construct (const _Tp* __restrict__ __a,
193 const size_t* __restrict__ __i,
194 _Tp* __restrict__ __o, size_t __n)
195 {
196 if (__is_trivial(_Tp))
197 while (__n--)
198 *__o++ = __a[*__i++];
199 else
200 while (__n--)
201 new (__o++) _Tp(__a[*__i++]);
202 }
203
204 // Do the necessary cleanup when we're done with arrays.
205 template<typename _Tp>
206 inline void
207 __valarray_destroy_elements(_Tp* __b, _Tp* __e)
208 {
209 if (!__is_trivial(_Tp))
210 while (__b != __e)
211 {
212 __b->~_Tp();
213 ++__b;
214 }
215 }
216
217 // Fill a plain array __a[<__n>] with __t
218 template<typename _Tp>
219 inline void
220 __valarray_fill(_Tp* __restrict__ __a, size_t __n, const _Tp& __t)
221 {
222 while (__n--)
223 *__a++ = __t;
224 }
225
226 // fill strided array __a[<__n-1 : __s>] with __t
227 template<typename _Tp>
228 inline void
229 __valarray_fill(_Tp* __restrict__ __a, size_t __n,
230 size_t __s, const _Tp& __t)
231 {
232 for (size_t __i = 0; __i < __n; ++__i, __a += __s)
233 *__a = __t;
234 }
235
236 // fill indirect array __a[__i[<__n>]] with __i
237 template<typename _Tp>
238 inline void
239 __valarray_fill(_Tp* __restrict__ __a, const size_t* __restrict__ __i,
240 size_t __n, const _Tp& __t)
241 {
242 for (size_t __j = 0; __j < __n; ++__j, ++__i)
243 __a[*__i] = __t;
244 }
245
246 // copy plain array __a[<__n>] in __b[<__n>]
247 // For non-fundamental types, it is wrong to say 'memcpy()'
248 template<typename _Tp, bool>
249 struct _Array_copier
250 {
251 inline static void
252 _S_do_it(const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b)
253 {
254 while(__n--)
255 *__b++ = *__a++;
256 }
257 };
258
259 template<typename _Tp>
260 struct _Array_copier<_Tp, true>
261 {
262 inline static void
263 _S_do_it(const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b)
264 {
265 if (__n != 0)
266 __builtin_memcpy(__b, __a, __n * sizeof (_Tp));
267 }
268 };
269
270 // Copy a plain array __a[<__n>] into a play array __b[<>]
271 template<typename _Tp>
272 inline void
273 __valarray_copy(const _Tp* __restrict__ __a, size_t __n,
274 _Tp* __restrict__ __b)
275 {
276 _Array_copier<_Tp, __is_trivial(_Tp)>::_S_do_it(__a, __n, __b);
277 }
278
279 // Copy strided array __a[<__n : __s>] in plain __b[<__n>]
280 template<typename _Tp>
281 inline void
282 __valarray_copy(const _Tp* __restrict__ __a, size_t __n, size_t __s,
283 _Tp* __restrict__ __b)
284 {
285 for (size_t __i = 0; __i < __n; ++__i, ++__b, __a += __s)
286 *__b = *__a;
287 }
288
289 // Copy a plain array __a[<__n>] into a strided array __b[<__n : __s>]
290 template<typename _Tp>
291 inline void
292 __valarray_copy(const _Tp* __restrict__ __a, _Tp* __restrict__ __b,
293 size_t __n, size_t __s)
294 {
295 for (size_t __i = 0; __i < __n; ++__i, ++__a, __b += __s)
296 *__b = *__a;
297 }
298
299 // Copy strided array __src[<__n : __s1>] into another
300 // strided array __dst[< : __s2>]. Their sizes must match.
301 template<typename _Tp>
302 inline void
303 __valarray_copy(const _Tp* __restrict__ __src, size_t __n, size_t __s1,
304 _Tp* __restrict__ __dst, size_t __s2)
305 {
306 for (size_t __i = 0; __i < __n; ++__i)
307 __dst[__i * __s2] = __src[__i * __s1];
308 }
309
310 // Copy an indexed array __a[__i[<__n>]] in plain array __b[<__n>]
311 template<typename _Tp>
312 inline void
313 __valarray_copy(const _Tp* __restrict__ __a,
314 const size_t* __restrict__ __i,
315 _Tp* __restrict__ __b, size_t __n)
316 {
317 for (size_t __j = 0; __j < __n; ++__j, ++__b, ++__i)
318 *__b = __a[*__i];
319 }
320
321 // Copy a plain array __a[<__n>] in an indexed array __b[__i[<__n>]]
322 template<typename _Tp>
323 inline void
324 __valarray_copy(const _Tp* __restrict__ __a, size_t __n,
325 _Tp* __restrict__ __b, const size_t* __restrict__ __i)
326 {
327 for (size_t __j = 0; __j < __n; ++__j, ++__a, ++__i)
328 __b[*__i] = *__a;
329 }
330
331 // Copy the __n first elements of an indexed array __src[<__i>] into
332 // another indexed array __dst[<__j>].
333 template<typename _Tp>
334 inline void
335 __valarray_copy(const _Tp* __restrict__ __src, size_t __n,
336 const size_t* __restrict__ __i,
337 _Tp* __restrict__ __dst, const size_t* __restrict__ __j)
338 {
339 for (size_t __k = 0; __k < __n; ++__k)
340 __dst[*__j++] = __src[*__i++];
341 }
342
343 //
344 // Compute the sum of elements in range [__f, __l) which must not be empty.
345 // This is a naive algorithm. It suffers from cancelling.
346 // In the future try to specialize for _Tp = float, double, long double
347 // using a more accurate algorithm.
348 //
349 template<typename _Tp>
350 inline _Tp
351 __valarray_sum(const _Tp* __f, const _Tp* __l)
352 {
353 _Tp __r = *__f++;
354 while (__f != __l)
355 __r += *__f++;
356 return __r;
357 }
358
359 // Compute the product of all elements in range [__f, __l)
360 template<typename _Tp>
361 inline _Tp
362 __valarray_product(const _Tp* __f, const _Tp* __l)
363 {
364 _Tp __r = _Tp(1);
365 while (__f != __l)
366 __r = __r * *__f++;
367 return __r;
368 }
369
370 // Compute the min/max of an array-expression
371 template<typename _Ta>
372 inline typename _Ta::value_type
373 __valarray_min(const _Ta& __a)
374 {
375 size_t __s = __a.size();
376 typedef typename _Ta::value_type _Value_type;
377 _Value_type __r = __s == 0 ? _Value_type() : __a[0];
378 for (size_t __i = 1; __i < __s; ++__i)
379 {
380 _Value_type __t = __a[__i];
381 if (__t < __r)
382 __r = __t;
383 }
384 return __r;
385 }
386
387 template<typename _Ta>
388 inline typename _Ta::value_type
389 __valarray_max(const _Ta& __a)
390 {
391 size_t __s = __a.size();
392 typedef typename _Ta::value_type _Value_type;
393 _Value_type __r = __s == 0 ? _Value_type() : __a[0];
394 for (size_t __i = 1; __i < __s; ++__i)
395 {
396 _Value_type __t = __a[__i];
397 if (__t > __r)
398 __r = __t;
399 }
400 return __r;
401 }
402
403 //
404 // Helper class _Array, first layer of valarray abstraction.
405 // All operations on valarray should be forwarded to this class
406 // whenever possible. -- gdr
407 //
408
409 template<typename _Tp>
410 struct _Array
411 {
412 explicit _Array(size_t);
413 explicit _Array(_Tp* const __restrict__);
414 explicit _Array(const valarray<_Tp>&);
415 _Array(const _Tp* __restrict__, size_t);
416
417 _Tp* begin() const;
418
419 _Tp* const __restrict__ _M_data;
420 };
421
422
423 // Copy-construct plain array __b[<__n>] from indexed array __a[__i[<__n>]]
424 template<typename _Tp>
425 inline void
426 __valarray_copy_construct(_Array<_Tp> __a, _Array<size_t> __i,
427 _Array<_Tp> __b, size_t __n)
428 { std::__valarray_copy_construct(__a._M_data, __i._M_data,
429 __b._M_data, __n); }
430
431 // Copy-construct plain array __b[<__n>] from strided array __a[<__n : __s>]
432 template<typename _Tp>
433 inline void
434 __valarray_copy_construct(_Array<_Tp> __a, size_t __n, size_t __s,
435 _Array<_Tp> __b)
436 { std::__valarray_copy_construct(__a._M_data, __n, __s, __b._M_data); }
437
438 template<typename _Tp>
439 inline void
440 __valarray_fill (_Array<_Tp> __a, size_t __n, const _Tp& __t)
441 { std::__valarray_fill(__a._M_data, __n, __t); }
442
443 template<typename _Tp>
444 inline void
445 __valarray_fill(_Array<_Tp> __a, size_t __n, size_t __s, const _Tp& __t)
446 { std::__valarray_fill(__a._M_data, __n, __s, __t); }
447
448 template<typename _Tp>
449 inline void
450 __valarray_fill(_Array<_Tp> __a, _Array<size_t> __i,
451 size_t __n, const _Tp& __t)
452 { std::__valarray_fill(__a._M_data, __i._M_data, __n, __t); }
453
454 // Copy a plain array __a[<__n>] into a play array __b[<>]
455 template<typename _Tp>
456 inline void
457 __valarray_copy(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b)
458 { std::__valarray_copy(__a._M_data, __n, __b._M_data); }
459
460 // Copy strided array __a[<__n : __s>] in plain __b[<__n>]
461 template<typename _Tp>
462 inline void
463 __valarray_copy(_Array<_Tp> __a, size_t __n, size_t __s, _Array<_Tp> __b)
464 { std::__valarray_copy(__a._M_data, __n, __s, __b._M_data); }
465
466 // Copy a plain array __a[<__n>] into a strided array __b[<__n : __s>]
467 template<typename _Tp>
468 inline void
469 __valarray_copy(_Array<_Tp> __a, _Array<_Tp> __b, size_t __n, size_t __s)
470 { __valarray_copy(__a._M_data, __b._M_data, __n, __s); }
471
472 // Copy strided array __src[<__n : __s1>] into another
473 // strided array __dst[< : __s2>]. Their sizes must match.
474 template<typename _Tp>
475 inline void
476 __valarray_copy(_Array<_Tp> __a, size_t __n, size_t __s1,
477 _Array<_Tp> __b, size_t __s2)
478 { std::__valarray_copy(__a._M_data, __n, __s1, __b._M_data, __s2); }
479
480 // Copy an indexed array __a[__i[<__n>]] in plain array __b[<__n>]
481 template<typename _Tp>
482 inline void
483 __valarray_copy(_Array<_Tp> __a, _Array<size_t> __i,
484 _Array<_Tp> __b, size_t __n)
485 { std::__valarray_copy(__a._M_data, __i._M_data, __b._M_data, __n); }
486
487 // Copy a plain array __a[<__n>] in an indexed array __b[__i[<__n>]]
488 template<typename _Tp>
489 inline void
490 __valarray_copy(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b,
491 _Array<size_t> __i)
492 { std::__valarray_copy(__a._M_data, __n, __b._M_data, __i._M_data); }
493
494 // Copy the __n first elements of an indexed array __src[<__i>] into
495 // another indexed array __dst[<__j>].
496 template<typename _Tp>
497 inline void
498 __valarray_copy(_Array<_Tp> __src, size_t __n, _Array<size_t> __i,
499 _Array<_Tp> __dst, _Array<size_t> __j)
500 {
501 std::__valarray_copy(__src._M_data, __n, __i._M_data,
502 __dst._M_data, __j._M_data);
503 }
504
505 template<typename _Tp>
506 inline
507 _Array<_Tp>::_Array(size_t __n)
508 : _M_data(__valarray_get_storage<_Tp>(__n))
509 { std::__valarray_default_construct(_M_data, _M_data + __n); }
510
511 template<typename _Tp>
512 inline
513 _Array<_Tp>::_Array(_Tp* const __restrict__ __p)
514 : _M_data (__p) {}
515
516 template<typename _Tp>
517 inline
518 _Array<_Tp>::_Array(const valarray<_Tp>& __v)
519 : _M_data (__v._M_data) {}
520
521 template<typename _Tp>
522 inline
523 _Array<_Tp>::_Array(const _Tp* __restrict__ __b, size_t __s)
524 : _M_data(__valarray_get_storage<_Tp>(__s))
525 { std::__valarray_copy_construct(__b, __s, _M_data); }
526
527 template<typename _Tp>
528 inline _Tp*
529 _Array<_Tp>::begin () const
530 { return _M_data; }
531
532#define _DEFINE_ARRAY_FUNCTION(_Op, _Name) \
533 template<typename _Tp> \
534 inline void \
535 _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, const _Tp& __t) \
536 { \
537 for (_Tp* __p = __a._M_data; __p < __a._M_data + __n; ++__p) \
538 *__p _Op##= __t; \
539 } \
540 \
541 template<typename _Tp> \
542 inline void \
543 _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b) \
544 { \
545 _Tp* __p = __a._M_data; \
546 for (_Tp* __q = __b._M_data; __q < __b._M_data + __n; ++__p, ++__q) \
547 *__p _Op##= *__q; \
548 } \
549 \
550 template<typename _Tp, class _Dom> \
551 void \
552 _Array_augmented_##_Name(_Array<_Tp> __a, \
553 const _Expr<_Dom, _Tp>& __e, size_t __n) \
554 { \
555 _Tp* __p(__a._M_data); \
556 for (size_t __i = 0; __i < __n; ++__i, ++__p) \
557 *__p _Op##= __e[__i]; \
558 } \
559 \
560 template<typename _Tp> \
561 inline void \
562 _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, size_t __s, \
563 _Array<_Tp> __b) \
564 { \
565 _Tp* __q(__b._M_data); \
566 for (_Tp* __p = __a._M_data; __p < __a._M_data + __s * __n; \
567 __p += __s, ++__q) \
568 *__p _Op##= *__q; \
569 } \
570 \
571 template<typename _Tp> \
572 inline void \
573 _Array_augmented_##_Name(_Array<_Tp> __a, _Array<_Tp> __b, \
574 size_t __n, size_t __s) \
575 { \
576 _Tp* __q(__b._M_data); \
577 for (_Tp* __p = __a._M_data; __p < __a._M_data + __n; \
578 ++__p, __q += __s) \
579 *__p _Op##= *__q; \
580 } \
581 \
582 template<typename _Tp, class _Dom> \
583 void \
584 _Array_augmented_##_Name(_Array<_Tp> __a, size_t __s, \
585 const _Expr<_Dom, _Tp>& __e, size_t __n) \
586 { \
587 _Tp* __p(__a._M_data); \
588 for (size_t __i = 0; __i < __n; ++__i, __p += __s) \
589 *__p _Op##= __e[__i]; \
590 } \
591 \
592 template<typename _Tp> \
593 inline void \
594 _Array_augmented_##_Name(_Array<_Tp> __a, _Array<size_t> __i, \
595 _Array<_Tp> __b, size_t __n) \
596 { \
597 _Tp* __q(__b._M_data); \
598 for (size_t* __j = __i._M_data; __j < __i._M_data + __n; \
599 ++__j, ++__q) \
600 __a._M_data[*__j] _Op##= *__q; \
601 } \
602 \
603 template<typename _Tp> \
604 inline void \
605 _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, \
606 _Array<_Tp> __b, _Array<size_t> __i) \
607 { \
608 _Tp* __p(__a._M_data); \
609 for (size_t* __j = __i._M_data; __j<__i._M_data + __n; \
610 ++__j, ++__p) \
611 *__p _Op##= __b._M_data[*__j]; \
612 } \
613 \
614 template<typename _Tp, class _Dom> \
615 void \
616 _Array_augmented_##_Name(_Array<_Tp> __a, _Array<size_t> __i, \
617 const _Expr<_Dom, _Tp>& __e, size_t __n) \
618 { \
619 size_t* __j(__i._M_data); \
620 for (size_t __k = 0; __k<__n; ++__k, ++__j) \
621 __a._M_data[*__j] _Op##= __e[__k]; \
622 } \
623 \
624 template<typename _Tp> \
625 void \
626 _Array_augmented_##_Name(_Array<_Tp> __a, _Array<bool> __m, \
627 _Array<_Tp> __b, size_t __n) \
628 { \
629 bool* __ok(__m._M_data); \
630 _Tp* __p(__a._M_data); \
631 for (_Tp* __q = __b._M_data; __q < __b._M_data + __n; \
632 ++__q, ++__ok, ++__p) \
633 { \
634 while (! *__ok) \
635 { \
636 ++__ok; \
637 ++__p; \
638 } \
639 *__p _Op##= *__q; \
640 } \
641 } \
642 \
643 template<typename _Tp> \
644 void \
645 _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, \
646 _Array<_Tp> __b, _Array<bool> __m) \
647 { \
648 bool* __ok(__m._M_data); \
649 _Tp* __q(__b._M_data); \
650 for (_Tp* __p = __a._M_data; __p < __a._M_data + __n; \
651 ++__p, ++__ok, ++__q) \
652 { \
653 while (! *__ok) \
654 { \
655 ++__ok; \
656 ++__q; \
657 } \
658 *__p _Op##= *__q; \
659 } \
660 } \
661 \
662 template<typename _Tp, class _Dom> \
663 void \
664 _Array_augmented_##_Name(_Array<_Tp> __a, _Array<bool> __m, \
665 const _Expr<_Dom, _Tp>& __e, size_t __n) \
666 { \
667 bool* __ok(__m._M_data); \
668 _Tp* __p(__a._M_data); \
669 for (size_t __i = 0; __i < __n; ++__i, ++__ok, ++__p) \
670 { \
671 while (! *__ok) \
672 { \
673 ++__ok; \
674 ++__p; \
675 } \
676 *__p _Op##= __e[__i]; \
677 } \
678 }
679
680 _DEFINE_ARRAY_FUNCTION(+, __plus)
681 _DEFINE_ARRAY_FUNCTION(-, __minus)
682 _DEFINE_ARRAY_FUNCTION(*, __multiplies)
683 _DEFINE_ARRAY_FUNCTION(/, __divides)
684 _DEFINE_ARRAY_FUNCTION(%, __modulus)
685 _DEFINE_ARRAY_FUNCTION(^, __bitwise_xor)
686 _DEFINE_ARRAY_FUNCTION(|, __bitwise_or)
687 _DEFINE_ARRAY_FUNCTION(&, __bitwise_and)
688 _DEFINE_ARRAY_FUNCTION(<<, __shift_left)
689 _DEFINE_ARRAY_FUNCTION(>>, __shift_right)
690
691#undef _DEFINE_ARRAY_FUNCTION
692
693_GLIBCXX_END_NAMESPACE_VERSION
694} // namespace
695
696# include <bits/valarray_array.tcc>
697
698#endif /* _ARRAY_H */
699