]> gcc.gnu.org Git - gcc.git/blob - libgfortran/intrinsics/pack_generic.c
re PR fortran/30814 (non-conforming array sizes in PACK should raise an error)
[gcc.git] / libgfortran / intrinsics / pack_generic.c
1 /* Generic implementation of the PACK intrinsic
2 Copyright (C) 2002, 2004, 2005, 2006 Free Software Foundation, Inc.
3 Contributed by Paul Brook <paul@nowt.org>
4
5 This file is part of the GNU Fortran 95 runtime library (libgfortran).
6
7 Libgfortran is free software; you can redistribute it and/or
8 modify it under the terms of the GNU General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
11
12 In addition to the permissions in the GNU General Public License, the
13 Free Software Foundation gives you unlimited permission to link the
14 compiled version of this file into combinations with other programs,
15 and to distribute those combinations without any restriction coming
16 from the use of this file. (The General Public License restrictions
17 do apply in other respects; for example, they cover modification of
18 the file, and distribution when not linked into a combine
19 executable.)
20
21 Ligbfortran is distributed in the hope that it will be useful,
22 but WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 GNU General Public License for more details.
25
26 You should have received a copy of the GNU General Public
27 License along with libgfortran; see the file COPYING. If not,
28 write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
29 Boston, MA 02110-1301, USA. */
30
31 #include "config.h"
32 #include <stdlib.h>
33 #include <assert.h>
34 #include <string.h>
35 #include "libgfortran.h"
36
37 /* PACK is specified as follows:
38
39 13.14.80 PACK (ARRAY, MASK, [VECTOR])
40
41 Description: Pack an array into an array of rank one under the
42 control of a mask.
43
44 Class: Transformational function.
45
46 Arguments:
47 ARRAY may be of any type. It shall not be scalar.
48 MASK shall be of type LOGICAL. It shall be conformable with ARRAY.
49 VECTOR (optional) shall be of the same type and type parameters
50 as ARRAY. VECTOR shall have at least as many elements as
51 there are true elements in MASK. If MASK is a scalar
52 with the value true, VECTOR shall have at least as many
53 elements as there are in ARRAY.
54
55 Result Characteristics: The result is an array of rank one with the
56 same type and type parameters as ARRAY. If VECTOR is present, the
57 result size is that of VECTOR; otherwise, the result size is the
58 number /t/ of true elements in MASK unless MASK is scalar with the
59 value true, in which case the result size is the size of ARRAY.
60
61 Result Value: Element /i/ of the result is the element of ARRAY
62 that corresponds to the /i/th true element of MASK, taking elements
63 in array element order, for /i/ = 1, 2, ..., /t/. If VECTOR is
64 present and has size /n/ > /t/, element /i/ of the result has the
65 value VECTOR(/i/), for /i/ = /t/ + 1, ..., /n/.
66
67 Examples: The nonzero elements of an array M with the value
68 | 0 0 0 |
69 | 9 0 0 | may be "gathered" by the function PACK. The result of
70 | 0 0 7 |
71 PACK (M, MASK = M.NE.0) is [9,7] and the result of PACK (M, M.NE.0,
72 VECTOR = (/ 2,4,6,8,10,12 /)) is [9,7,6,8,10,12].
73
74 There are two variants of the PACK intrinsic: one, where MASK is
75 array valued, and the other one where MASK is scalar. */
76
77 static void
78 pack_internal (gfc_array_char *ret, const gfc_array_char *array,
79 const gfc_array_l4 *mask, const gfc_array_char *vector,
80 index_type size)
81 {
82 /* r.* indicates the return array. */
83 index_type rstride0;
84 char *rptr;
85 /* s.* indicates the source array. */
86 index_type sstride[GFC_MAX_DIMENSIONS];
87 index_type sstride0;
88 const char *sptr;
89 /* m.* indicates the mask array. */
90 index_type mstride[GFC_MAX_DIMENSIONS];
91 index_type mstride0;
92 const GFC_LOGICAL_4 *mptr;
93
94 index_type count[GFC_MAX_DIMENSIONS];
95 index_type extent[GFC_MAX_DIMENSIONS];
96 int zero_sized;
97 index_type n;
98 index_type dim;
99 index_type nelem;
100 index_type total;
101
102 dim = GFC_DESCRIPTOR_RANK (array);
103 zero_sized = 0;
104 for (n = 0; n < dim; n++)
105 {
106 count[n] = 0;
107 extent[n] = array->dim[n].ubound + 1 - array->dim[n].lbound;
108 if (extent[n] <= 0)
109 zero_sized = 1;
110 sstride[n] = array->dim[n].stride * size;
111 mstride[n] = mask->dim[n].stride;
112 }
113 if (sstride[0] == 0)
114 sstride[0] = size;
115 if (mstride[0] == 0)
116 mstride[0] = 1;
117
118 sptr = array->data;
119 mptr = mask->data;
120
121 /* Use the same loop for both logical types. */
122 if (GFC_DESCRIPTOR_SIZE (mask) != 4)
123 {
124 if (GFC_DESCRIPTOR_SIZE (mask) != 8)
125 runtime_error ("Funny sized logical array");
126 for (n = 0; n < dim; n++)
127 mstride[n] <<= 1;
128 mptr = GFOR_POINTER_L8_TO_L4 (mptr);
129 }
130
131 if (ret->data == NULL || compile_options.bounds_check)
132 {
133 /* Count the elements, either for allocating memory or
134 for bounds checking. */
135
136 if (vector != NULL)
137 {
138 /* The return array will have as many
139 elements as there are in VECTOR. */
140 total = vector->dim[0].ubound + 1 - vector->dim[0].lbound;
141 }
142 else
143 {
144 /* We have to count the true elements in MASK. */
145
146 /* TODO: We could speed up pack easily in the case of only
147 few .TRUE. entries in MASK, by keeping track of where we
148 would be in the source array during the initial traversal
149 of MASK, and caching the pointers to those elements. Then,
150 supposed the number of elements is small enough, we would
151 only have to traverse the list, and copy those elements
152 into the result array. In the case of datatypes which fit
153 in one of the integer types we could also cache the
154 value instead of a pointer to it.
155 This approach might be bad from the point of view of
156 cache behavior in the case where our cache is not big
157 enough to hold all elements that have to be copied. */
158
159 const GFC_LOGICAL_4 *m = mptr;
160
161 total = 0;
162 if (zero_sized)
163 m = NULL;
164
165 while (m)
166 {
167 /* Test this element. */
168 if (*m)
169 total++;
170
171 /* Advance to the next element. */
172 m += mstride[0];
173 count[0]++;
174 n = 0;
175 while (count[n] == extent[n])
176 {
177 /* When we get to the end of a dimension, reset it
178 and increment the next dimension. */
179 count[n] = 0;
180 /* We could precalculate this product, but this is a
181 less frequently used path so probably not worth
182 it. */
183 m -= mstride[n] * extent[n];
184 n++;
185 if (n >= dim)
186 {
187 /* Break out of the loop. */
188 m = NULL;
189 break;
190 }
191 else
192 {
193 count[n]++;
194 m += mstride[n];
195 }
196 }
197 }
198 }
199
200 if (ret->data == NULL)
201 {
202 /* Setup the array descriptor. */
203 ret->dim[0].lbound = 0;
204 ret->dim[0].ubound = total - 1;
205 ret->dim[0].stride = 1;
206
207 ret->offset = 0;
208 if (total == 0)
209 {
210 /* In this case, nothing remains to be done. */
211 ret->data = internal_malloc_size (1);
212 return;
213 }
214 else
215 ret->data = internal_malloc_size (size * total);
216 }
217 else
218 {
219 /* We come here because of range checking. */
220 if (total != ret->dim[0].ubound + 1 - ret->dim[0].lbound)
221 runtime_error ("Incorrect extent in return value of"
222 " PACK intrinsic");
223 }
224 }
225
226 rstride0 = ret->dim[0].stride * size;
227 if (rstride0 == 0)
228 rstride0 = size;
229 sstride0 = sstride[0];
230 mstride0 = mstride[0];
231 rptr = ret->data;
232
233 while (sptr && mptr)
234 {
235 /* Test this element. */
236 if (*mptr)
237 {
238 /* Add it. */
239 memcpy (rptr, sptr, size);
240 rptr += rstride0;
241 }
242 /* Advance to the next element. */
243 sptr += sstride0;
244 mptr += mstride0;
245 count[0]++;
246 n = 0;
247 while (count[n] == extent[n])
248 {
249 /* When we get to the end of a dimension, reset it and increment
250 the next dimension. */
251 count[n] = 0;
252 /* We could precalculate these products, but this is a less
253 frequently used path so probably not worth it. */
254 sptr -= sstride[n] * extent[n];
255 mptr -= mstride[n] * extent[n];
256 n++;
257 if (n >= dim)
258 {
259 /* Break out of the loop. */
260 sptr = NULL;
261 break;
262 }
263 else
264 {
265 count[n]++;
266 sptr += sstride[n];
267 mptr += mstride[n];
268 }
269 }
270 }
271
272 /* Add any remaining elements from VECTOR. */
273 if (vector)
274 {
275 n = vector->dim[0].ubound + 1 - vector->dim[0].lbound;
276 nelem = ((rptr - ret->data) / rstride0);
277 if (n > nelem)
278 {
279 sstride0 = vector->dim[0].stride * size;
280 if (sstride0 == 0)
281 sstride0 = size;
282
283 sptr = vector->data + sstride0 * nelem;
284 n -= nelem;
285 while (n--)
286 {
287 memcpy (rptr, sptr, size);
288 rptr += rstride0;
289 sptr += sstride0;
290 }
291 }
292 }
293 }
294
295 extern void pack (gfc_array_char *, const gfc_array_char *,
296 const gfc_array_l4 *, const gfc_array_char *);
297 export_proto(pack);
298
299 void
300 pack (gfc_array_char *ret, const gfc_array_char *array,
301 const gfc_array_l4 *mask, const gfc_array_char *vector)
302 {
303 pack_internal (ret, array, mask, vector, GFC_DESCRIPTOR_SIZE (array));
304 }
305
306 extern void pack_char (gfc_array_char *, GFC_INTEGER_4, const gfc_array_char *,
307 const gfc_array_l4 *, const gfc_array_char *,
308 GFC_INTEGER_4, GFC_INTEGER_4);
309 export_proto(pack_char);
310
311 void
312 pack_char (gfc_array_char *ret,
313 GFC_INTEGER_4 ret_length __attribute__((unused)),
314 const gfc_array_char *array, const gfc_array_l4 *mask,
315 const gfc_array_char *vector, GFC_INTEGER_4 array_length,
316 GFC_INTEGER_4 vector_length __attribute__((unused)))
317 {
318 pack_internal (ret, array, mask, vector, array_length);
319 }
320
321 static void
322 pack_s_internal (gfc_array_char *ret, const gfc_array_char *array,
323 const GFC_LOGICAL_4 *mask, const gfc_array_char *vector,
324 index_type size)
325 {
326 /* r.* indicates the return array. */
327 index_type rstride0;
328 char *rptr;
329 /* s.* indicates the source array. */
330 index_type sstride[GFC_MAX_DIMENSIONS];
331 index_type sstride0;
332 const char *sptr;
333
334 index_type count[GFC_MAX_DIMENSIONS];
335 index_type extent[GFC_MAX_DIMENSIONS];
336 index_type n;
337 index_type dim;
338 index_type ssize;
339 index_type nelem;
340
341 dim = GFC_DESCRIPTOR_RANK (array);
342 ssize = 1;
343 for (n = 0; n < dim; n++)
344 {
345 count[n] = 0;
346 extent[n] = array->dim[n].ubound + 1 - array->dim[n].lbound;
347 sstride[n] = array->dim[n].stride * size;
348 ssize *= extent[n];
349 }
350 if (sstride[0] == 0)
351 sstride[0] = size;
352
353 sstride0 = sstride[0];
354 sptr = array->data;
355
356 if (ret->data == NULL)
357 {
358 /* Allocate the memory for the result. */
359 int total;
360
361 if (vector != NULL)
362 {
363 /* The return array will have as many elements as there are
364 in vector. */
365 total = vector->dim[0].ubound + 1 - vector->dim[0].lbound;
366 }
367 else
368 {
369 if (*mask)
370 {
371 /* The result array will have as many elements as the input
372 array. */
373 total = extent[0];
374 for (n = 1; n < dim; n++)
375 total *= extent[n];
376 }
377 else
378 /* The result array will be empty. */
379 total = 0;
380 }
381
382 /* Setup the array descriptor. */
383 ret->dim[0].lbound = 0;
384 ret->dim[0].ubound = total - 1;
385 ret->dim[0].stride = 1;
386 ret->offset = 0;
387
388 if (total == 0)
389 {
390 ret->data = internal_malloc_size (1);
391 return;
392 }
393 else
394 ret->data = internal_malloc_size (size * total);
395 }
396
397 rstride0 = ret->dim[0].stride * size;
398 if (rstride0 == 0)
399 rstride0 = size;
400 rptr = ret->data;
401
402 /* The remaining possibilities are now:
403 If MASK is .TRUE., we have to copy the source array into the
404 result array. We then have to fill it up with elements from VECTOR.
405 If MASK is .FALSE., we have to copy VECTOR into the result
406 array. If VECTOR were not present we would have already returned. */
407
408 if (*mask && ssize != 0)
409 {
410 while (sptr)
411 {
412 /* Add this element. */
413 memcpy (rptr, sptr, size);
414 rptr += rstride0;
415
416 /* Advance to the next element. */
417 sptr += sstride0;
418 count[0]++;
419 n = 0;
420 while (count[n] == extent[n])
421 {
422 /* When we get to the end of a dimension, reset it and
423 increment the next dimension. */
424 count[n] = 0;
425 /* We could precalculate these products, but this is a
426 less frequently used path so probably not worth it. */
427 sptr -= sstride[n] * extent[n];
428 n++;
429 if (n >= dim)
430 {
431 /* Break out of the loop. */
432 sptr = NULL;
433 break;
434 }
435 else
436 {
437 count[n]++;
438 sptr += sstride[n];
439 }
440 }
441 }
442 }
443
444 /* Add any remaining elements from VECTOR. */
445 if (vector)
446 {
447 n = vector->dim[0].ubound + 1 - vector->dim[0].lbound;
448 nelem = ((rptr - ret->data) / rstride0);
449 if (n > nelem)
450 {
451 sstride0 = vector->dim[0].stride * size;
452 if (sstride0 == 0)
453 sstride0 = size;
454
455 sptr = vector->data + sstride0 * nelem;
456 n -= nelem;
457 while (n--)
458 {
459 memcpy (rptr, sptr, size);
460 rptr += rstride0;
461 sptr += sstride0;
462 }
463 }
464 }
465 }
466
467 extern void pack_s (gfc_array_char *ret, const gfc_array_char *array,
468 const GFC_LOGICAL_4 *, const gfc_array_char *);
469 export_proto(pack_s);
470
471 void
472 pack_s (gfc_array_char *ret, const gfc_array_char *array,
473 const GFC_LOGICAL_4 *mask, const gfc_array_char *vector)
474 {
475 pack_s_internal (ret, array, mask, vector, GFC_DESCRIPTOR_SIZE (array));
476 }
477
478 extern void pack_s_char (gfc_array_char *ret, GFC_INTEGER_4,
479 const gfc_array_char *array, const GFC_LOGICAL_4 *,
480 const gfc_array_char *, GFC_INTEGER_4,
481 GFC_INTEGER_4);
482 export_proto(pack_s_char);
483
484 void
485 pack_s_char (gfc_array_char *ret,
486 GFC_INTEGER_4 ret_length __attribute__((unused)),
487 const gfc_array_char *array, const GFC_LOGICAL_4 *mask,
488 const gfc_array_char *vector, GFC_INTEGER_4 array_length,
489 GFC_INTEGER_4 vector_length __attribute__((unused)))
490 {
491 pack_s_internal (ret, array, mask, vector, array_length);
492 }
This page took 0.065469 seconds and 6 git commands to generate.