1 | // The template and inlines for the -*- C++ -*- internal _Array helper class. |
2 | |
3 | // Copyright (C) 1997-2018 Free Software Foundation, Inc. |
4 | // |
5 | // This file is part of the GNU ISO C++ Library. This library is free |
6 | // software; you can redistribute it and/or modify it under the |
7 | // terms of the GNU General Public License as published by the |
8 | // Free Software Foundation; either version 3, or (at your option) |
9 | // any later version. |
10 | |
11 | // This library is distributed in the hope that it will be useful, |
12 | // but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | // GNU General Public License for more details. |
15 | |
16 | // Under Section 7 of GPL version 3, you are granted additional |
17 | // permissions described in the GCC Runtime Library Exception, version |
18 | // 3.1, as published by the Free Software Foundation. |
19 | |
20 | // You should have received a copy of the GNU General Public License and |
21 | // a copy of the GCC Runtime Library Exception along with this program; |
22 | // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see |
23 | // <http://www.gnu.org/licenses/>. |
24 | |
25 | /** @file bits/valarray_array.h |
26 | * This is an internal header file, included by other library headers. |
27 | * Do not attempt to use it directly. @headername{valarray} |
28 | */ |
29 | |
30 | // Written by Gabriel Dos Reis <Gabriel.Dos-Reis@DPTMaths.ENS-Cachan.Fr> |
31 | |
32 | #ifndef _VALARRAY_ARRAY_H |
33 | #define _VALARRAY_ARRAY_H 1 |
34 | |
35 | #pragma GCC system_header |
36 | |
37 | #include <bits/c++config.h> |
38 | #include <bits/cpp_type_traits.h> |
39 | #include <cstdlib> |
40 | #include <new> |
41 | |
42 | namespace std _GLIBCXX_VISIBILITY(default) |
43 | { |
44 | _GLIBCXX_BEGIN_NAMESPACE_VERSION |
45 | |
46 | // |
47 | // Helper functions on raw pointers |
48 | // |
49 | |
50 | // We get memory by the old fashion way |
51 | inline void* |
52 | __valarray_get_memory(size_t __n) |
53 | { return operator new(__n); } |
54 | |
55 | template<typename _Tp> |
56 | inline _Tp*__restrict__ |
57 | __valarray_get_storage(size_t __n) |
58 | { |
59 | return static_cast<_Tp*__restrict__> |
60 | (std::__valarray_get_memory(__n * sizeof(_Tp))); |
61 | } |
62 | |
63 | // Return memory to the system |
64 | inline void |
65 | __valarray_release_memory(void* __p) |
66 | { operator delete(__p); } |
67 | |
68 | // Turn a raw-memory into an array of _Tp filled with _Tp() |
69 | // This is required in 'valarray<T> v(n);' |
70 | template<typename _Tp, bool> |
71 | struct _Array_default_ctor |
72 | { |
73 | // Please note that this isn't exception safe. But |
74 | // valarrays aren't required to be exception safe. |
75 | inline static void |
76 | _S_do_it(_Tp* __b, _Tp* __e) |
77 | { |
78 | while (__b != __e) |
79 | new(__b++) _Tp(); |
80 | } |
81 | }; |
82 | |
83 | template<typename _Tp> |
84 | struct _Array_default_ctor<_Tp, true> |
85 | { |
86 | // For fundamental types, it suffices to say 'memset()' |
87 | inline static void |
88 | _S_do_it(_Tp* __b, _Tp* __e) |
89 | { __builtin_memset(__b, 0, (__e - __b) * sizeof(_Tp)); } |
90 | }; |
91 | |
92 | template<typename _Tp> |
93 | inline void |
94 | __valarray_default_construct(_Tp* __b, _Tp* __e) |
95 | { |
96 | _Array_default_ctor<_Tp, __is_scalar<_Tp>::__value>::_S_do_it(__b, __e); |
97 | } |
98 | |
99 | // Turn a raw-memory into an array of _Tp filled with __t |
100 | // This is the required in valarray<T> v(n, t). Also |
101 | // used in valarray<>::resize(). |
102 | template<typename _Tp, bool> |
103 | struct _Array_init_ctor |
104 | { |
105 | // Please note that this isn't exception safe. But |
106 | // valarrays aren't required to be exception safe. |
107 | inline static void |
108 | _S_do_it(_Tp* __b, _Tp* __e, const _Tp __t) |
109 | { |
110 | while (__b != __e) |
111 | new(__b++) _Tp(__t); |
112 | } |
113 | }; |
114 | |
115 | template<typename _Tp> |
116 | struct _Array_init_ctor<_Tp, true> |
117 | { |
118 | inline static void |
119 | _S_do_it(_Tp* __b, _Tp* __e, const _Tp __t) |
120 | { |
121 | while (__b != __e) |
122 | *__b++ = __t; |
123 | } |
124 | }; |
125 | |
126 | template<typename _Tp> |
127 | inline void |
128 | __valarray_fill_construct(_Tp* __b, _Tp* __e, const _Tp __t) |
129 | { |
130 | _Array_init_ctor<_Tp, __is_trivial(_Tp)>::_S_do_it(__b, __e, __t); |
131 | } |
132 | |
133 | // |
134 | // copy-construct raw array [__o, *) from plain array [__b, __e) |
135 | // We can't just say 'memcpy()' |
136 | // |
137 | template<typename _Tp, bool> |
138 | struct _Array_copy_ctor |
139 | { |
140 | // Please note that this isn't exception safe. But |
141 | // valarrays aren't required to be exception safe. |
142 | inline static void |
143 | _S_do_it(const _Tp* __b, const _Tp* __e, _Tp* __restrict__ __o) |
144 | { |
145 | while (__b != __e) |
146 | new(__o++) _Tp(*__b++); |
147 | } |
148 | }; |
149 | |
150 | template<typename _Tp> |
151 | struct _Array_copy_ctor<_Tp, true> |
152 | { |
153 | inline static void |
154 | _S_do_it(const _Tp* __b, const _Tp* __e, _Tp* __restrict__ __o) |
155 | { |
156 | if (__b) |
157 | __builtin_memcpy(__o, __b, (__e - __b) * sizeof(_Tp)); |
158 | } |
159 | }; |
160 | |
161 | template<typename _Tp> |
162 | inline void |
163 | __valarray_copy_construct(const _Tp* __b, const _Tp* __e, |
164 | _Tp* __restrict__ __o) |
165 | { |
166 | _Array_copy_ctor<_Tp, __is_trivial(_Tp)>::_S_do_it(__b, __e, __o); |
167 | } |
168 | |
169 | // copy-construct raw array [__o, *) from strided array __a[<__n : __s>] |
170 | template<typename _Tp> |
171 | inline void |
172 | __valarray_copy_construct (const _Tp* __restrict__ __a, size_t __n, |
173 | size_t __s, _Tp* __restrict__ __o) |
174 | { |
175 | if (__is_trivial(_Tp)) |
176 | while (__n--) |
177 | { |
178 | *__o++ = *__a; |
179 | __a += __s; |
180 | } |
181 | else |
182 | while (__n--) |
183 | { |
184 | new(__o++) _Tp(*__a); |
185 | __a += __s; |
186 | } |
187 | } |
188 | |
189 | // copy-construct raw array [__o, *) from indexed array __a[__i[<__n>]] |
190 | template<typename _Tp> |
191 | inline void |
192 | __valarray_copy_construct (const _Tp* __restrict__ __a, |
193 | const size_t* __restrict__ __i, |
194 | _Tp* __restrict__ __o, size_t __n) |
195 | { |
196 | if (__is_trivial(_Tp)) |
197 | while (__n--) |
198 | *__o++ = __a[*__i++]; |
199 | else |
200 | while (__n--) |
201 | new (__o++) _Tp(__a[*__i++]); |
202 | } |
203 | |
204 | // Do the necessary cleanup when we're done with arrays. |
205 | template<typename _Tp> |
206 | inline void |
207 | __valarray_destroy_elements(_Tp* __b, _Tp* __e) |
208 | { |
209 | if (!__is_trivial(_Tp)) |
210 | while (__b != __e) |
211 | { |
212 | __b->~_Tp(); |
213 | ++__b; |
214 | } |
215 | } |
216 | |
217 | // Fill a plain array __a[<__n>] with __t |
218 | template<typename _Tp> |
219 | inline void |
220 | __valarray_fill(_Tp* __restrict__ __a, size_t __n, const _Tp& __t) |
221 | { |
222 | while (__n--) |
223 | *__a++ = __t; |
224 | } |
225 | |
226 | // fill strided array __a[<__n-1 : __s>] with __t |
227 | template<typename _Tp> |
228 | inline void |
229 | __valarray_fill(_Tp* __restrict__ __a, size_t __n, |
230 | size_t __s, const _Tp& __t) |
231 | { |
232 | for (size_t __i = 0; __i < __n; ++__i, __a += __s) |
233 | *__a = __t; |
234 | } |
235 | |
236 | // fill indirect array __a[__i[<__n>]] with __i |
237 | template<typename _Tp> |
238 | inline void |
239 | __valarray_fill(_Tp* __restrict__ __a, const size_t* __restrict__ __i, |
240 | size_t __n, const _Tp& __t) |
241 | { |
242 | for (size_t __j = 0; __j < __n; ++__j, ++__i) |
243 | __a[*__i] = __t; |
244 | } |
245 | |
246 | // copy plain array __a[<__n>] in __b[<__n>] |
247 | // For non-fundamental types, it is wrong to say 'memcpy()' |
248 | template<typename _Tp, bool> |
249 | struct _Array_copier |
250 | { |
251 | inline static void |
252 | _S_do_it(const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b) |
253 | { |
254 | while(__n--) |
255 | *__b++ = *__a++; |
256 | } |
257 | }; |
258 | |
259 | template<typename _Tp> |
260 | struct _Array_copier<_Tp, true> |
261 | { |
262 | inline static void |
263 | _S_do_it(const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b) |
264 | { |
265 | if (__n != 0) |
266 | __builtin_memcpy(__b, __a, __n * sizeof (_Tp)); |
267 | } |
268 | }; |
269 | |
270 | // Copy a plain array __a[<__n>] into a play array __b[<>] |
271 | template<typename _Tp> |
272 | inline void |
273 | __valarray_copy(const _Tp* __restrict__ __a, size_t __n, |
274 | _Tp* __restrict__ __b) |
275 | { |
276 | _Array_copier<_Tp, __is_trivial(_Tp)>::_S_do_it(__a, __n, __b); |
277 | } |
278 | |
279 | // Copy strided array __a[<__n : __s>] in plain __b[<__n>] |
280 | template<typename _Tp> |
281 | inline void |
282 | __valarray_copy(const _Tp* __restrict__ __a, size_t __n, size_t __s, |
283 | _Tp* __restrict__ __b) |
284 | { |
285 | for (size_t __i = 0; __i < __n; ++__i, ++__b, __a += __s) |
286 | *__b = *__a; |
287 | } |
288 | |
289 | // Copy a plain array __a[<__n>] into a strided array __b[<__n : __s>] |
290 | template<typename _Tp> |
291 | inline void |
292 | __valarray_copy(const _Tp* __restrict__ __a, _Tp* __restrict__ __b, |
293 | size_t __n, size_t __s) |
294 | { |
295 | for (size_t __i = 0; __i < __n; ++__i, ++__a, __b += __s) |
296 | *__b = *__a; |
297 | } |
298 | |
299 | // Copy strided array __src[<__n : __s1>] into another |
300 | // strided array __dst[< : __s2>]. Their sizes must match. |
301 | template<typename _Tp> |
302 | inline void |
303 | __valarray_copy(const _Tp* __restrict__ __src, size_t __n, size_t __s1, |
304 | _Tp* __restrict__ __dst, size_t __s2) |
305 | { |
306 | for (size_t __i = 0; __i < __n; ++__i) |
307 | __dst[__i * __s2] = __src[__i * __s1]; |
308 | } |
309 | |
310 | // Copy an indexed array __a[__i[<__n>]] in plain array __b[<__n>] |
311 | template<typename _Tp> |
312 | inline void |
313 | __valarray_copy(const _Tp* __restrict__ __a, |
314 | const size_t* __restrict__ __i, |
315 | _Tp* __restrict__ __b, size_t __n) |
316 | { |
317 | for (size_t __j = 0; __j < __n; ++__j, ++__b, ++__i) |
318 | *__b = __a[*__i]; |
319 | } |
320 | |
321 | // Copy a plain array __a[<__n>] in an indexed array __b[__i[<__n>]] |
322 | template<typename _Tp> |
323 | inline void |
324 | __valarray_copy(const _Tp* __restrict__ __a, size_t __n, |
325 | _Tp* __restrict__ __b, const size_t* __restrict__ __i) |
326 | { |
327 | for (size_t __j = 0; __j < __n; ++__j, ++__a, ++__i) |
328 | __b[*__i] = *__a; |
329 | } |
330 | |
331 | // Copy the __n first elements of an indexed array __src[<__i>] into |
332 | // another indexed array __dst[<__j>]. |
333 | template<typename _Tp> |
334 | inline void |
335 | __valarray_copy(const _Tp* __restrict__ __src, size_t __n, |
336 | const size_t* __restrict__ __i, |
337 | _Tp* __restrict__ __dst, const size_t* __restrict__ __j) |
338 | { |
339 | for (size_t __k = 0; __k < __n; ++__k) |
340 | __dst[*__j++] = __src[*__i++]; |
341 | } |
342 | |
343 | // |
344 | // Compute the sum of elements in range [__f, __l) |
345 | // This is a naive algorithm. It suffers from cancelling. |
346 | // In the future try to specialize |
347 | // for _Tp = float, double, long double using a more accurate |
348 | // algorithm. |
349 | // |
350 | template<typename _Tp> |
351 | inline _Tp |
352 | __valarray_sum(const _Tp* __f, const _Tp* __l) |
353 | { |
354 | _Tp __r = _Tp(); |
355 | while (__f != __l) |
356 | __r += *__f++; |
357 | return __r; |
358 | } |
359 | |
360 | // Compute the product of all elements in range [__f, __l) |
361 | template<typename _Tp> |
362 | inline _Tp |
363 | __valarray_product(const _Tp* __f, const _Tp* __l) |
364 | { |
365 | _Tp __r = _Tp(1); |
366 | while (__f != __l) |
367 | __r = __r * *__f++; |
368 | return __r; |
369 | } |
370 | |
371 | // Compute the min/max of an array-expression |
372 | template<typename _Ta> |
373 | inline typename _Ta::value_type |
374 | __valarray_min(const _Ta& __a) |
375 | { |
376 | size_t __s = __a.size(); |
377 | typedef typename _Ta::value_type _Value_type; |
378 | _Value_type __r = __s == 0 ? _Value_type() : __a[0]; |
379 | for (size_t __i = 1; __i < __s; ++__i) |
380 | { |
381 | _Value_type __t = __a[__i]; |
382 | if (__t < __r) |
383 | __r = __t; |
384 | } |
385 | return __r; |
386 | } |
387 | |
388 | template<typename _Ta> |
389 | inline typename _Ta::value_type |
390 | __valarray_max(const _Ta& __a) |
391 | { |
392 | size_t __s = __a.size(); |
393 | typedef typename _Ta::value_type _Value_type; |
394 | _Value_type __r = __s == 0 ? _Value_type() : __a[0]; |
395 | for (size_t __i = 1; __i < __s; ++__i) |
396 | { |
397 | _Value_type __t = __a[__i]; |
398 | if (__t > __r) |
399 | __r = __t; |
400 | } |
401 | return __r; |
402 | } |
403 | |
404 | // |
405 | // Helper class _Array, first layer of valarray abstraction. |
406 | // All operations on valarray should be forwarded to this class |
407 | // whenever possible. -- gdr |
408 | // |
409 | |
410 | template<typename _Tp> |
411 | struct _Array |
412 | { |
413 | explicit _Array(size_t); |
414 | explicit _Array(_Tp* const __restrict__); |
415 | explicit _Array(const valarray<_Tp>&); |
416 | _Array(const _Tp* __restrict__, size_t); |
417 | |
418 | _Tp* begin() const; |
419 | |
420 | _Tp* const __restrict__ _M_data; |
421 | }; |
422 | |
423 | |
424 | // Copy-construct plain array __b[<__n>] from indexed array __a[__i[<__n>]] |
425 | template<typename _Tp> |
426 | inline void |
427 | __valarray_copy_construct(_Array<_Tp> __a, _Array<size_t> __i, |
428 | _Array<_Tp> __b, size_t __n) |
429 | { std::__valarray_copy_construct(__a._M_data, __i._M_data, |
430 | __b._M_data, __n); } |
431 | |
432 | // Copy-construct plain array __b[<__n>] from strided array __a[<__n : __s>] |
433 | template<typename _Tp> |
434 | inline void |
435 | __valarray_copy_construct(_Array<_Tp> __a, size_t __n, size_t __s, |
436 | _Array<_Tp> __b) |
437 | { std::__valarray_copy_construct(__a._M_data, __n, __s, __b._M_data); } |
438 | |
439 | template<typename _Tp> |
440 | inline void |
441 | __valarray_fill (_Array<_Tp> __a, size_t __n, const _Tp& __t) |
442 | { std::__valarray_fill(__a._M_data, __n, __t); } |
443 | |
444 | template<typename _Tp> |
445 | inline void |
446 | __valarray_fill(_Array<_Tp> __a, size_t __n, size_t __s, const _Tp& __t) |
447 | { std::__valarray_fill(__a._M_data, __n, __s, __t); } |
448 | |
449 | template<typename _Tp> |
450 | inline void |
451 | __valarray_fill(_Array<_Tp> __a, _Array<size_t> __i, |
452 | size_t __n, const _Tp& __t) |
453 | { std::__valarray_fill(__a._M_data, __i._M_data, __n, __t); } |
454 | |
455 | // Copy a plain array __a[<__n>] into a play array __b[<>] |
456 | template<typename _Tp> |
457 | inline void |
458 | __valarray_copy(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b) |
459 | { std::__valarray_copy(__a._M_data, __n, __b._M_data); } |
460 | |
461 | // Copy strided array __a[<__n : __s>] in plain __b[<__n>] |
462 | template<typename _Tp> |
463 | inline void |
464 | __valarray_copy(_Array<_Tp> __a, size_t __n, size_t __s, _Array<_Tp> __b) |
465 | { std::__valarray_copy(__a._M_data, __n, __s, __b._M_data); } |
466 | |
467 | // Copy a plain array __a[<__n>] into a strided array __b[<__n : __s>] |
468 | template<typename _Tp> |
469 | inline void |
470 | __valarray_copy(_Array<_Tp> __a, _Array<_Tp> __b, size_t __n, size_t __s) |
471 | { __valarray_copy(__a._M_data, __b._M_data, __n, __s); } |
472 | |
473 | // Copy strided array __src[<__n : __s1>] into another |
474 | // strided array __dst[< : __s2>]. Their sizes must match. |
475 | template<typename _Tp> |
476 | inline void |
477 | __valarray_copy(_Array<_Tp> __a, size_t __n, size_t __s1, |
478 | _Array<_Tp> __b, size_t __s2) |
479 | { std::__valarray_copy(__a._M_data, __n, __s1, __b._M_data, __s2); } |
480 | |
481 | // Copy an indexed array __a[__i[<__n>]] in plain array __b[<__n>] |
482 | template<typename _Tp> |
483 | inline void |
484 | __valarray_copy(_Array<_Tp> __a, _Array<size_t> __i, |
485 | _Array<_Tp> __b, size_t __n) |
486 | { std::__valarray_copy(__a._M_data, __i._M_data, __b._M_data, __n); } |
487 | |
488 | // Copy a plain array __a[<__n>] in an indexed array __b[__i[<__n>]] |
489 | template<typename _Tp> |
490 | inline void |
491 | __valarray_copy(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b, |
492 | _Array<size_t> __i) |
493 | { std::__valarray_copy(__a._M_data, __n, __b._M_data, __i._M_data); } |
494 | |
495 | // Copy the __n first elements of an indexed array __src[<__i>] into |
496 | // another indexed array __dst[<__j>]. |
497 | template<typename _Tp> |
498 | inline void |
499 | __valarray_copy(_Array<_Tp> __src, size_t __n, _Array<size_t> __i, |
500 | _Array<_Tp> __dst, _Array<size_t> __j) |
501 | { |
502 | std::__valarray_copy(__src._M_data, __n, __i._M_data, |
503 | __dst._M_data, __j._M_data); |
504 | } |
505 | |
506 | template<typename _Tp> |
507 | inline |
508 | _Array<_Tp>::_Array(size_t __n) |
509 | : _M_data(__valarray_get_storage<_Tp>(__n)) |
510 | { std::__valarray_default_construct(_M_data, _M_data + __n); } |
511 | |
512 | template<typename _Tp> |
513 | inline |
514 | _Array<_Tp>::_Array(_Tp* const __restrict__ __p) |
515 | : _M_data (__p) {} |
516 | |
517 | template<typename _Tp> |
518 | inline |
519 | _Array<_Tp>::_Array(const valarray<_Tp>& __v) |
520 | : _M_data (__v._M_data) {} |
521 | |
522 | template<typename _Tp> |
523 | inline |
524 | _Array<_Tp>::_Array(const _Tp* __restrict__ __b, size_t __s) |
525 | : _M_data(__valarray_get_storage<_Tp>(__s)) |
526 | { std::__valarray_copy_construct(__b, __s, _M_data); } |
527 | |
528 | template<typename _Tp> |
529 | inline _Tp* |
530 | _Array<_Tp>::begin () const |
531 | { return _M_data; } |
532 | |
533 | #define _DEFINE_ARRAY_FUNCTION(_Op, _Name) \ |
534 | template<typename _Tp> \ |
535 | inline void \ |
536 | _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, const _Tp& __t) \ |
537 | { \ |
538 | for (_Tp* __p = __a._M_data; __p < __a._M_data + __n; ++__p) \ |
539 | *__p _Op##= __t; \ |
540 | } \ |
541 | \ |
542 | template<typename _Tp> \ |
543 | inline void \ |
544 | _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b) \ |
545 | { \ |
546 | _Tp* __p = __a._M_data; \ |
547 | for (_Tp* __q = __b._M_data; __q < __b._M_data + __n; ++__p, ++__q) \ |
548 | *__p _Op##= *__q; \ |
549 | } \ |
550 | \ |
551 | template<typename _Tp, class _Dom> \ |
552 | void \ |
553 | _Array_augmented_##_Name(_Array<_Tp> __a, \ |
554 | const _Expr<_Dom, _Tp>& __e, size_t __n) \ |
555 | { \ |
556 | _Tp* __p(__a._M_data); \ |
557 | for (size_t __i = 0; __i < __n; ++__i, ++__p) \ |
558 | *__p _Op##= __e[__i]; \ |
559 | } \ |
560 | \ |
561 | template<typename _Tp> \ |
562 | inline void \ |
563 | _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, size_t __s, \ |
564 | _Array<_Tp> __b) \ |
565 | { \ |
566 | _Tp* __q(__b._M_data); \ |
567 | for (_Tp* __p = __a._M_data; __p < __a._M_data + __s * __n; \ |
568 | __p += __s, ++__q) \ |
569 | *__p _Op##= *__q; \ |
570 | } \ |
571 | \ |
572 | template<typename _Tp> \ |
573 | inline void \ |
574 | _Array_augmented_##_Name(_Array<_Tp> __a, _Array<_Tp> __b, \ |
575 | size_t __n, size_t __s) \ |
576 | { \ |
577 | _Tp* __q(__b._M_data); \ |
578 | for (_Tp* __p = __a._M_data; __p < __a._M_data + __n; \ |
579 | ++__p, __q += __s) \ |
580 | *__p _Op##= *__q; \ |
581 | } \ |
582 | \ |
583 | template<typename _Tp, class _Dom> \ |
584 | void \ |
585 | _Array_augmented_##_Name(_Array<_Tp> __a, size_t __s, \ |
586 | const _Expr<_Dom, _Tp>& __e, size_t __n) \ |
587 | { \ |
588 | _Tp* __p(__a._M_data); \ |
589 | for (size_t __i = 0; __i < __n; ++__i, __p += __s) \ |
590 | *__p _Op##= __e[__i]; \ |
591 | } \ |
592 | \ |
593 | template<typename _Tp> \ |
594 | inline void \ |
595 | _Array_augmented_##_Name(_Array<_Tp> __a, _Array<size_t> __i, \ |
596 | _Array<_Tp> __b, size_t __n) \ |
597 | { \ |
598 | _Tp* __q(__b._M_data); \ |
599 | for (size_t* __j = __i._M_data; __j < __i._M_data + __n; \ |
600 | ++__j, ++__q) \ |
601 | __a._M_data[*__j] _Op##= *__q; \ |
602 | } \ |
603 | \ |
604 | template<typename _Tp> \ |
605 | inline void \ |
606 | _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, \ |
607 | _Array<_Tp> __b, _Array<size_t> __i) \ |
608 | { \ |
609 | _Tp* __p(__a._M_data); \ |
610 | for (size_t* __j = __i._M_data; __j<__i._M_data + __n; \ |
611 | ++__j, ++__p) \ |
612 | *__p _Op##= __b._M_data[*__j]; \ |
613 | } \ |
614 | \ |
615 | template<typename _Tp, class _Dom> \ |
616 | void \ |
617 | _Array_augmented_##_Name(_Array<_Tp> __a, _Array<size_t> __i, \ |
618 | const _Expr<_Dom, _Tp>& __e, size_t __n) \ |
619 | { \ |
620 | size_t* __j(__i._M_data); \ |
621 | for (size_t __k = 0; __k<__n; ++__k, ++__j) \ |
622 | __a._M_data[*__j] _Op##= __e[__k]; \ |
623 | } \ |
624 | \ |
625 | template<typename _Tp> \ |
626 | void \ |
627 | _Array_augmented_##_Name(_Array<_Tp> __a, _Array<bool> __m, \ |
628 | _Array<_Tp> __b, size_t __n) \ |
629 | { \ |
630 | bool* __ok(__m._M_data); \ |
631 | _Tp* __p(__a._M_data); \ |
632 | for (_Tp* __q = __b._M_data; __q < __b._M_data + __n; \ |
633 | ++__q, ++__ok, ++__p) \ |
634 | { \ |
635 | while (! *__ok) \ |
636 | { \ |
637 | ++__ok; \ |
638 | ++__p; \ |
639 | } \ |
640 | *__p _Op##= *__q; \ |
641 | } \ |
642 | } \ |
643 | \ |
644 | template<typename _Tp> \ |
645 | void \ |
646 | _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, \ |
647 | _Array<_Tp> __b, _Array<bool> __m) \ |
648 | { \ |
649 | bool* __ok(__m._M_data); \ |
650 | _Tp* __q(__b._M_data); \ |
651 | for (_Tp* __p = __a._M_data; __p < __a._M_data + __n; \ |
652 | ++__p, ++__ok, ++__q) \ |
653 | { \ |
654 | while (! *__ok) \ |
655 | { \ |
656 | ++__ok; \ |
657 | ++__q; \ |
658 | } \ |
659 | *__p _Op##= *__q; \ |
660 | } \ |
661 | } \ |
662 | \ |
663 | template<typename _Tp, class _Dom> \ |
664 | void \ |
665 | _Array_augmented_##_Name(_Array<_Tp> __a, _Array<bool> __m, \ |
666 | const _Expr<_Dom, _Tp>& __e, size_t __n) \ |
667 | { \ |
668 | bool* __ok(__m._M_data); \ |
669 | _Tp* __p(__a._M_data); \ |
670 | for (size_t __i = 0; __i < __n; ++__i, ++__ok, ++__p) \ |
671 | { \ |
672 | while (! *__ok) \ |
673 | { \ |
674 | ++__ok; \ |
675 | ++__p; \ |
676 | } \ |
677 | *__p _Op##= __e[__i]; \ |
678 | } \ |
679 | } |
680 | |
681 | _DEFINE_ARRAY_FUNCTION(+, __plus) |
682 | _DEFINE_ARRAY_FUNCTION(-, __minus) |
683 | _DEFINE_ARRAY_FUNCTION(*, __multiplies) |
684 | _DEFINE_ARRAY_FUNCTION(/, __divides) |
685 | _DEFINE_ARRAY_FUNCTION(%, __modulus) |
686 | _DEFINE_ARRAY_FUNCTION(^, __bitwise_xor) |
687 | _DEFINE_ARRAY_FUNCTION(|, __bitwise_or) |
688 | _DEFINE_ARRAY_FUNCTION(&, __bitwise_and) |
689 | _DEFINE_ARRAY_FUNCTION(<<, __shift_left) |
690 | _DEFINE_ARRAY_FUNCTION(>>, __shift_right) |
691 | |
692 | #undef _DEFINE_ARRAY_FUNCTION |
693 | |
694 | _GLIBCXX_END_NAMESPACE_VERSION |
695 | } // namespace |
696 | |
697 | # include <bits/valarray_array.tcc> |
698 | |
699 | #endif /* _ARRAY_H */ |
700 | |