]>
Commit | Line | Data |
---|---|---|
b2dad0e3 BK |
1 | // The template and inlines for the -*- C++ -*- internal _Array helper class. |
2 | ||
3 | // Copyright (C) 1997-2000 Free Software Foundation, Inc. | |
4 | // | |
5 | // This file is part of the GNU ISO C++ Library. This library is free | |
6 | // software; you can redistribute it and/or modify it under the | |
7 | // terms of the GNU General Public License as published by the | |
8 | // Free Software Foundation; either version 2, or (at your option) | |
9 | // any later version. | |
10 | ||
11 | // This library is distributed in the hope that it will be useful, | |
12 | // but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | // GNU General Public License for more details. | |
15 | ||
16 | // You should have received a copy of the GNU General Public License along | |
17 | // with this library; see the file COPYING. If not, write to the Free | |
18 | // Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, | |
19 | // USA. | |
20 | ||
21 | // As a special exception, you may use this file as part of a free software | |
22 | // library without restriction. Specifically, if other files instantiate | |
23 | // templates or use macros or inline functions from this file, or you compile | |
24 | // this file and link it with other files to produce an executable, this | |
25 | // file does not by itself cause the resulting executable to be covered by | |
26 | // the GNU General Public License. This exception does not however | |
27 | // invalidate any other reasons why the executable file might be covered by | |
28 | // the GNU General Public License. | |
29 | ||
30 | // Written by Gabriel Dos Reis <Gabriel.Dos-Reis@DPTMaths.ENS-Cachan.Fr> | |
31 | ||
32 | #ifndef _CPP_BITS_ARRAY_H | |
33 | #define _CPP_BITS_ARRAY_H 1 | |
34 | ||
35 | #include <bits/c++config.h> | |
36 | #include <bits/cpp_type_traits.h> | |
37 | #include <bits/std_cstdlib.h> | |
38 | #include <bits/std_cstring.h> | |
39 | #include <bits/std_new.h> | |
40 | ||
41 | namespace std | |
42 | { | |
43 | ||
44 | // | |
45 | // Helper functions on raw pointers | |
46 | // | |
47 | ||
48 | // We get memory by the old fashion way | |
49 | inline void* | |
50 | __valarray_get_memory(size_t __n) | |
51 | { return operator new(__n); } | |
52 | ||
d4c4ae6f GDR |
53 | template<typename _Tp> |
54 | inline _Tp*__restrict__ | |
55 | __valarray_get_storage(size_t __n) | |
56 | { | |
57 | return static_cast<_Tp*__restrict__> | |
58 | (__valarray_get_memory(__n * sizeof(_Tp))); | |
59 | } | |
60 | ||
b2dad0e3 BK |
61 | // Return memory to the system |
62 | inline void | |
63 | __valarray_release_memory(void* __p) | |
64 | { operator delete(__p); } | |
65 | ||
66 | // Turn a raw-memory into an array of _Tp filled with _Tp() | |
67 | // This is required in 'valarray<T> v(n);' | |
68 | template<typename _Tp, bool> | |
69 | struct _Array_default_ctor | |
70 | { | |
71 | // Please note that this isn't exception safe. But | |
72 | // valarrays aren't required to be exception safe. | |
73 | inline static void | |
74 | _S_do_it(_Tp* __restrict__ __b, _Tp* __restrict__ __e) | |
75 | { while (__b != __e) new(__b++) _Tp(); } | |
76 | }; | |
77 | ||
78 | template<typename _Tp> | |
79 | struct _Array_default_ctor<_Tp, true> | |
80 | { | |
81 | // For fundamental types, it suffices to say 'memset()' | |
82 | inline static void | |
83 | _S_do_it(_Tp* __restrict__ __b, _Tp* __restrict__ __e) | |
3ad62e75 | 84 | { memset(__b, 0, (__e - __b)*sizeof(_Tp)); } |
b2dad0e3 BK |
85 | }; |
86 | ||
87 | template<typename _Tp> | |
88 | inline void | |
89 | __valarray_default_construct(_Tp* __restrict__ __b, _Tp* __restrict__ __e) | |
90 | { | |
91 | _Array_default_ctor<_Tp, __is_fundamental<_Tp>::_M_type>:: | |
92 | _S_do_it(__b, __e); | |
93 | } | |
94 | ||
95 | // Turn a raw-memory into an array of _Tp filled with __t | |
96 | // This is the required in valarray<T> v(n, t). Also | |
97 | // used in valarray<>::resize(). | |
98 | template<typename _Tp, bool> | |
99 | struct _Array_init_ctor | |
100 | { | |
101 | // Please note that this isn't exception safe. But | |
102 | // valarrays aren't required to be exception safe. | |
103 | inline static void | |
104 | _S_do_it(_Tp* __restrict__ __b, _Tp* __restrict__ __e, const _Tp __t) | |
105 | { while (__b != __e) new(__b++) _Tp(__t); } | |
106 | }; | |
107 | ||
108 | template<typename _Tp> | |
109 | struct _Array_init_ctor<_Tp, true> | |
110 | { | |
111 | inline static void | |
112 | _S_do_it(_Tp* __restrict__ __b, _Tp* __restrict__ __e, const _Tp __t) | |
113 | { while (__b != __e) *__b++ = __t; } | |
114 | }; | |
115 | ||
116 | template<typename _Tp> | |
117 | inline void | |
118 | __valarray_fill_construct(_Tp* __restrict__ __b, _Tp* __restrict__ __e, | |
119 | const _Tp __t) | |
120 | { | |
121 | _Array_init_ctor<_Tp, __is_fundamental<_Tp>::_M_type>:: | |
122 | _S_do_it(__b, __e, __t); | |
123 | } | |
124 | ||
125 | // | |
126 | // copy-construct raw array [__o, *) from plain array [__b, __e) | |
127 | // We can't just say 'memcpy()' | |
128 | // | |
129 | template<typename _Tp, bool> | |
130 | struct _Array_copy_ctor | |
131 | { | |
132 | // Please note that this isn't exception safe. But | |
133 | // valarrays aren't required to be exception safe. | |
134 | inline static void | |
135 | _S_do_it(const _Tp* __restrict__ __b, const _Tp* __restrict__ __e, | |
136 | _Tp* __restrict__ __o) | |
137 | { while (__b != __e) new(__o++) _Tp(*__b++); } | |
138 | }; | |
139 | ||
140 | template<typename _Tp> | |
141 | struct _Array_copy_ctor<_Tp, true> | |
142 | { | |
143 | inline static void | |
144 | _S_do_it(const _Tp* __restrict__ __b, const _Tp* __restrict__ __e, | |
145 | _Tp* __restrict__ __o) | |
3ad62e75 | 146 | { memcpy(__o, __b, (__e - __b)*sizeof(_Tp)); } |
b2dad0e3 BK |
147 | }; |
148 | ||
149 | template<typename _Tp> | |
150 | inline void | |
151 | __valarray_copy_construct(const _Tp* __restrict__ __b, | |
152 | const _Tp* __restrict__ __e, | |
153 | _Tp* __restrict__ __o) | |
154 | { | |
155 | _Array_copy_ctor<_Tp, __is_fundamental<_Tp>::_M_type>:: | |
156 | _S_do_it(__b, __e, __o); | |
157 | } | |
158 | ||
159 | // copy-construct raw array [__o, *) from strided array __a[<__n : __s>] | |
160 | template<typename _Tp> | |
161 | inline void | |
162 | __valarray_copy_construct (const _Tp* __restrict__ __a, size_t __n, | |
163 | size_t __s, _Tp* __restrict__ __o) | |
164 | { | |
165 | if (__is_fundamental<_Tp>::_M_type) | |
166 | while (__n--) { *__o++ = *__a; __a += __s; } | |
167 | else | |
168 | while (__n--) { new(__o++) _Tp(*__a); __a += __s; } | |
169 | } | |
170 | ||
171 | // copy-construct raw array [__o, *) from indexed array __a[__i[<__n>]] | |
172 | template<typename _Tp> | |
173 | inline void | |
174 | __valarray_copy_construct (const _Tp* __restrict__ __a, | |
175 | const size_t* __restrict__ __i, | |
176 | _Tp* __restrict__ __o, size_t __n) | |
177 | { | |
178 | if (__is_fundamental<_Tp>::_M_type) | |
179 | while (__n--) *__o++ = __a[*__i++]; | |
180 | else | |
181 | while (__n--) new (__o++) _Tp(__a[*__i++]); | |
182 | } | |
183 | ||
184 | // Do the necessary cleanup when we're done with arrays. | |
185 | template<typename _Tp> | |
186 | inline void | |
187 | __valarray_destroy_elements(_Tp* __restrict__ __b, _Tp* __restrict__ __e) | |
188 | { | |
189 | if (!__is_fundamental<_Tp>::_M_type) | |
190 | while (__b != __e) { __b->~_Tp(); ++__b; } | |
191 | } | |
192 | ||
193 | // fill plain array __a[<__n>] with __t | |
194 | template<typename _Tp> | |
195 | void | |
196 | __valarray_fill (_Tp* __restrict__ __a, size_t __n, const _Tp& __t) | |
197 | { while (__n--) *__a++ = __t; } | |
198 | ||
199 | // fill strided array __a[<__n-1 : __s>] with __t | |
200 | template<typename _Tp> | |
201 | inline void | |
202 | __valarray_fill (_Tp* __restrict__ __a, size_t __n, | |
203 | size_t __s, const _Tp& __t) | |
204 | { for (size_t __i=0; __i<__n; ++__i, __a+=__s) *__a = __t; } | |
205 | ||
206 | // fill indir ect array __a[__i[<__n>]] with __i | |
207 | template<typename _Tp> | |
208 | inline void | |
209 | __valarray_fill(_Tp* __restrict__ __a, const size_t* __restrict__ __i, | |
210 | size_t __n, const _Tp& __t) | |
211 | { for (size_t __j=0; __j<__n; ++__j, ++__i) __a[*__i] = __t; } | |
212 | ||
213 | // copy plain array __a[<__n>] in __b[<__n>] | |
214 | // For non-fundamental types, it is wrong to say 'memcpy()' | |
215 | template<typename _Tp, bool> | |
216 | struct _Array_copier | |
217 | { | |
218 | inline static void | |
219 | _S_do_it(const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b) | |
220 | { while (__n--) *__b++ = *__a++; } | |
221 | }; | |
222 | ||
223 | template<typename _Tp> | |
224 | struct _Array_copier<_Tp, true> | |
225 | { | |
226 | inline static void | |
227 | _S_do_it(const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b) | |
228 | { memcpy (__b, __a, __n * sizeof (_Tp)); } | |
229 | }; | |
230 | ||
231 | template<typename _Tp> | |
232 | inline void | |
233 | __valarray_copy (const _Tp* __restrict__ __a, size_t __n, | |
234 | _Tp* __restrict__ __b) | |
235 | { | |
236 | _Array_copier<_Tp, __is_fundamental<_Tp>::_M_type>:: | |
237 | _S_do_it(__a, __n, __b); | |
238 | } | |
239 | ||
240 | // copy strided array __a[<__n : __s>] in plain __b[<__n>] | |
241 | template<typename _Tp> | |
242 | inline void | |
243 | __valarray_copy (const _Tp* __restrict__ __a, size_t __n, size_t __s, | |
244 | _Tp* __restrict__ __b) | |
245 | { for (size_t __i=0; __i<__n; ++__i, ++__b, __a += __s) *__b = *__a; } | |
246 | ||
247 | // copy plain __a[<__n>] in strided __b[<__n : __s>] | |
248 | template<typename _Tp> | |
249 | inline void | |
250 | __valarray_copy (const _Tp* __restrict__ __a, _Tp* __restrict__ __b, | |
251 | size_t __n, size_t __s) | |
252 | { for (size_t __i=0; __i<__n; ++__i, ++__a, __b+=__s) *__b = *__a; } | |
253 | ||
254 | // copy indexed __a[__i[<__n>]] in plain __b[<__n>] | |
255 | template<typename _Tp> | |
256 | inline void | |
257 | __valarray_copy (const _Tp* __restrict__ __a, | |
258 | const size_t* __restrict__ __i, | |
259 | _Tp* __restrict__ __b, size_t __n) | |
260 | { for (size_t __j=0; __j<__n; ++__j, ++__b, ++__i) *__b = __a[*__i]; } | |
261 | ||
262 | // copy plain __a[<__n>] in indexed __b[__i[<__n>]] | |
263 | template<typename _Tp> | |
264 | inline void | |
265 | __valarray_copy (const _Tp* __restrict__ __a, size_t __n, | |
266 | _Tp* __restrict__ __b, const size_t* __restrict__ __i) | |
267 | { for (size_t __j=0; __j<__n; ++__j, ++__a, ++__i) __b[*__i] = *__a; } | |
268 | ||
269 | ||
270 | // | |
271 | // Compute the sum of elements in range [__f, __l) | |
272 | // This is a naive algorithm. It suffers from cancelling. | |
273 | // In the future try to specialize | |
274 | // for _Tp = float, double, long double using a more accurate | |
275 | // algorithm. | |
276 | // | |
277 | template<typename _Tp> | |
278 | inline _Tp | |
279 | __valarray_sum(const _Tp* __restrict__ __f, const _Tp* __restrict__ __l) | |
280 | { | |
281 | _Tp __r = _Tp(); | |
282 | while (__f != __l) __r += *__f++; | |
283 | return __r; | |
284 | } | |
285 | ||
286 | // Compute the product of all elements in range [__f, __l) | |
287 | template<typename _Tp> | |
288 | _Tp | |
289 | __valarray_product(const _Tp* __restrict__ __f, | |
290 | const _Tp* __restrict__ __l) | |
291 | { | |
292 | _Tp __r = _Tp(1); | |
293 | while (__f != __l) __r = __r * *__f++; | |
294 | return __r; | |
295 | } | |
296 | ||
297 | ||
298 | // | |
299 | // Helper class _Array, first layer of valarray abstraction. | |
300 | // All operations on valarray should be forwarded to this class | |
301 | // whenever possible. -- gdr | |
302 | // | |
303 | ||
304 | template<typename _Tp> | |
305 | struct _Array | |
306 | { | |
307 | explicit _Array (size_t); | |
308 | explicit _Array (_Tp* const __restrict__); | |
309 | explicit _Array (const valarray<_Tp>&); | |
310 | _Array (const _Tp* __restrict__, size_t); | |
311 | ||
312 | _Tp* begin () const; | |
313 | ||
314 | _Tp* const __restrict__ _M_data; | |
315 | }; | |
316 | ||
317 | template<typename _Tp> | |
318 | inline void | |
319 | __valarray_fill (_Array<_Tp> __a, size_t __n, const _Tp& __t) | |
320 | { __valarray_fill (__a._M_data, __n, __t); } | |
321 | ||
322 | template<typename _Tp> | |
323 | inline void | |
324 | __valarray_fill (_Array<_Tp> __a, size_t __n, size_t __s, const _Tp& __t) | |
325 | { __valarray_fill (__a._M_data, __n, __s, __t); } | |
326 | ||
327 | template<typename _Tp> | |
328 | inline void | |
329 | __valarray_fill (_Array<_Tp> __a, _Array<size_t> __i, | |
330 | size_t __n, const _Tp& __t) | |
331 | { __valarray_fill (__a._M_data, __i._M_data, __n, __t); } | |
332 | ||
333 | template<typename _Tp> | |
334 | inline void | |
335 | __valarray_copy (_Array<_Tp> __a, size_t __n, _Array<_Tp> __b) | |
336 | { __valarray_copy (__a._M_data, __n, __b._M_data); } | |
337 | ||
338 | template<typename _Tp> | |
339 | inline void | |
340 | __valarray_copy (_Array<_Tp> __a, size_t __n, size_t __s, _Array<_Tp> __b) | |
341 | { __valarray_copy(__a._M_data, __n, __s, __b._M_data); } | |
342 | ||
343 | template<typename _Tp> | |
344 | inline void | |
345 | __valarray_copy (_Array<_Tp> __a, _Array<_Tp> __b, size_t __n, size_t __s) | |
346 | { __valarray_copy (__a._M_data, __b._M_data, __n, __s); } | |
347 | ||
348 | template<typename _Tp> | |
349 | inline void | |
350 | __valarray_copy (_Array<_Tp> __a, _Array<size_t> __i, | |
351 | _Array<_Tp> __b, size_t __n) | |
352 | { __valarray_copy (__a._M_data, __i._M_data, __b._M_data, __n); } | |
353 | ||
354 | template<typename _Tp> | |
355 | inline void | |
356 | __valarray_copy (_Array<_Tp> __a, size_t __n, _Array<_Tp> __b, | |
357 | _Array<size_t> __i) | |
358 | { __valarray_copy (__a._M_data, __n, __b._M_data, __i._M_data); } | |
359 | ||
360 | template<typename _Tp> | |
361 | inline | |
362 | _Array<_Tp>::_Array (size_t __n) | |
d4c4ae6f | 363 | : _M_data(__valarray_get_storage<_Tp>(__n)) |
b2dad0e3 BK |
364 | { __valarray_default_construct(_M_data, _M_data + __n); } |
365 | ||
366 | template<typename _Tp> | |
367 | inline | |
368 | _Array<_Tp>::_Array (_Tp* const __restrict__ __p) : _M_data (__p) {} | |
369 | ||
370 | template<typename _Tp> | |
371 | inline _Array<_Tp>::_Array (const valarray<_Tp>& __v) | |
372 | : _M_data (__v._M_data) {} | |
373 | ||
374 | template<typename _Tp> | |
375 | inline | |
376 | _Array<_Tp>::_Array (const _Tp* __restrict__ __b, size_t __s) | |
d4c4ae6f | 377 | : _M_data(__valarray_get_storage<_Tp>(__s)) |
b2dad0e3 BK |
378 | { __valarray_copy_construct(__b, __s, _M_data); } |
379 | ||
380 | template<typename _Tp> | |
381 | inline _Tp* | |
382 | _Array<_Tp>::begin () const | |
383 | { return _M_data; } | |
384 | ||
385 | #define _DEFINE_ARRAY_FUNCTION(_Op, _Name) \ | |
386 | template<typename _Tp> \ | |
387 | inline void \ | |
388 | _Array_augmented_##_Name (_Array<_Tp> __a, size_t __n, const _Tp& __t) \ | |
389 | { \ | |
390 | for (_Tp* __p=__a._M_data; __p<__a._M_data+__n; ++__p) \ | |
391 | *__p _Op##= __t; \ | |
392 | } \ | |
393 | \ | |
394 | template<typename _Tp> \ | |
395 | inline void \ | |
396 | _Array_augmented_##_Name (_Array<_Tp> __a, size_t __n, _Array<_Tp> __b) \ | |
397 | { \ | |
398 | _Tp* __p = __a._M_data; \ | |
399 | for (_Tp* __q=__b._M_data; __q<__b._M_data+__n; ++__p, ++__q) \ | |
400 | *__p _Op##= *__q; \ | |
401 | } \ | |
402 | \ | |
403 | template<typename _Tp, class _Dom> \ | |
404 | void \ | |
405 | _Array_augmented_##_Name (_Array<_Tp> __a, \ | |
406 | const _Expr<_Dom,_Tp>& __e, size_t __n) \ | |
407 | { \ | |
408 | _Tp* __p (__a._M_data); \ | |
409 | for (size_t __i=0; __i<__n; ++__i, ++__p) *__p _Op##= __e[__i]; \ | |
410 | } \ | |
411 | \ | |
412 | template<typename _Tp> \ | |
413 | inline void \ | |
414 | _Array_augmented_##_Name (_Array<_Tp> __a, size_t __n, size_t __s, \ | |
415 | _Array<_Tp> __b) \ | |
416 | { \ | |
417 | _Tp* __q (__b._M_data); \ | |
418 | for (_Tp* __p=__a._M_data; __p<__a._M_data+__s*__n; __p+=__s, ++__q) \ | |
419 | *__p _Op##= *__q; \ | |
420 | } \ | |
421 | \ | |
422 | template<typename _Tp> \ | |
423 | inline void \ | |
424 | _Array_augmented_##_Name (_Array<_Tp> __a, _Array<_Tp> __b, \ | |
425 | size_t __n, size_t __s) \ | |
426 | { \ | |
427 | _Tp* __q (__b._M_data); \ | |
428 | for (_Tp* __p=__a._M_data; __p<__a._M_data+__n; ++__p, __q+=__s) \ | |
429 | *__p _Op##= *__q; \ | |
430 | } \ | |
431 | \ | |
432 | template<typename _Tp, class _Dom> \ | |
433 | void \ | |
434 | _Array_augmented_##_Name (_Array<_Tp> __a, size_t __s, \ | |
435 | const _Expr<_Dom,_Tp>& __e, size_t __n) \ | |
436 | { \ | |
437 | _Tp* __p (__a._M_data); \ | |
438 | for (size_t __i=0; __i<__n; ++__i, __p+=__s) *__p _Op##= __e[__i]; \ | |
439 | } \ | |
440 | \ | |
441 | template<typename _Tp> \ | |
442 | inline void \ | |
443 | _Array_augmented_##_Name (_Array<_Tp> __a, _Array<size_t> __i, \ | |
444 | _Array<_Tp> __b, size_t __n) \ | |
445 | { \ | |
446 | _Tp* __q (__b._M_data); \ | |
447 | for (size_t* __j=__i._M_data; __j<__i._M_data+__n; ++__j, ++__q) \ | |
448 | __a._M_data[*__j] _Op##= *__q; \ | |
449 | } \ | |
450 | \ | |
451 | template<typename _Tp> \ | |
452 | inline void \ | |
453 | _Array_augmented_##_Name (_Array<_Tp> __a, size_t __n, \ | |
454 | _Array<_Tp> __b, _Array<size_t> __i) \ | |
455 | { \ | |
456 | _Tp* __p (__a._M_data); \ | |
457 | for (size_t* __j=__i._M_data; __j<__i._M_data+__n; ++__j, ++__p) \ | |
458 | *__p _Op##= __b._M_data[*__j]; \ | |
459 | } \ | |
460 | \ | |
461 | template<typename _Tp, class _Dom> \ | |
462 | void \ | |
463 | _Array_augmented_##_Name (_Array<_Tp> __a, _Array<size_t> __i, \ | |
464 | const _Expr<_Dom, _Tp>& __e, size_t __n) \ | |
465 | { \ | |
466 | size_t* __j (__i._M_data); \ | |
467 | for (size_t __k=0; __k<__n; ++__k, ++__j) \ | |
468 | __a._M_data[*__j] _Op##= __e[__k]; \ | |
469 | } \ | |
470 | \ | |
471 | template<typename _Tp> \ | |
472 | void \ | |
473 | _Array_augmented_##_Name (_Array<_Tp> __a, _Array<bool> __m, \ | |
474 | _Array<_Tp> __b, size_t __n) \ | |
475 | { \ | |
476 | bool* ok (__m._M_data); \ | |
477 | _Tp* __p (__a._M_data); \ | |
478 | for (_Tp* __q=__b._M_data; __q<__b._M_data+__n; ++__q, ++ok, ++__p) { \ | |
479 | while (! *ok) { \ | |
480 | ++ok; \ | |
481 | ++__p; \ | |
482 | } \ | |
483 | *__p _Op##= *__q; \ | |
484 | } \ | |
485 | } \ | |
486 | \ | |
487 | template<typename _Tp> \ | |
488 | void \ | |
489 | _Array_augmented_##_Name (_Array<_Tp> __a, size_t __n, \ | |
490 | _Array<_Tp> __b, _Array<bool> __m) \ | |
491 | { \ | |
492 | bool* ok (__m._M_data); \ | |
493 | _Tp* __q (__b._M_data); \ | |
494 | for (_Tp* __p=__a._M_data; __p<__a._M_data+__n; ++__p, ++ok, ++__q) { \ | |
495 | while (! *ok) { \ | |
496 | ++ok; \ | |
497 | ++__q; \ | |
498 | } \ | |
499 | *__p _Op##= *__q; \ | |
500 | } \ | |
501 | } \ | |
502 | \ | |
503 | template<typename _Tp, class _Dom> \ | |
504 | void \ | |
505 | _Array_augmented_##_Name (_Array<_Tp> __a, _Array<bool> __m, \ | |
506 | const _Expr<_Dom, _Tp>& __e, size_t __n) \ | |
507 | { \ | |
508 | bool* ok(__m._M_data); \ | |
509 | _Tp* __p (__a._M_data); \ | |
510 | for (size_t __i=0; __i<__n; ++__i, ++ok, ++__p) { \ | |
511 | while (! *ok) { \ | |
512 | ++ok; \ | |
513 | ++__p; \ | |
514 | } \ | |
515 | *__p _Op##= __e[__i]; \ | |
516 | } \ | |
517 | } | |
518 | ||
519 | _DEFINE_ARRAY_FUNCTION(+, plus) | |
520 | _DEFINE_ARRAY_FUNCTION(-, minus) | |
521 | _DEFINE_ARRAY_FUNCTION(*, multiplies) | |
522 | _DEFINE_ARRAY_FUNCTION(/, divides) | |
523 | _DEFINE_ARRAY_FUNCTION(%, modulus) | |
524 | _DEFINE_ARRAY_FUNCTION(^, xor) | |
525 | _DEFINE_ARRAY_FUNCTION(|, or) | |
526 | _DEFINE_ARRAY_FUNCTION(&, and) | |
527 | _DEFINE_ARRAY_FUNCTION(<<, shift_left) | |
528 | _DEFINE_ARRAY_FUNCTION(>>, shift_right) | |
529 | ||
530 | #undef _DEFINE_VALARRAY_FUNCTION | |
531 | ||
532 | } // std:: | |
533 | ||
534 | #ifdef _GLIBCPP_NO_TEMPLATE_EXPORT | |
535 | # define export | |
536 | # include <bits/valarray_array.tcc> | |
537 | #endif | |
538 | ||
539 | #endif /* _CPP_BITS_ARRAY_H */ | |
540 | ||
541 | // Local Variables: | |
542 | // mode:c++ | |
543 | // End: |