]> gcc.gnu.org Git - gcc.git/blob - libgomp/allocator.c
libgomp: Add Fortran routine support for allocators
[gcc.git] / libgomp / allocator.c
1 /* Copyright (C) 2020 Free Software Foundation, Inc.
2 Contributed by Jakub Jelinek <jakub@redhat.com>.
3
4 This file is part of the GNU Offloading and Multi Processing Library
5 (libgomp).
6
7 Libgomp is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 more details.
16
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
20
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
25
26 /* This file contains wrappers for the system allocation routines. Most
27 places in the OpenMP API do not make any provision for failure, so in
28 general we cannot allow memory allocation to fail. */
29
30 #define _GNU_SOURCE
31 #include "libgomp.h"
32 #include <stdlib.h>
33
34 #define omp_max_predefined_alloc omp_thread_mem_alloc
35
36 struct omp_allocator_data
37 {
38 omp_memspace_handle_t memspace;
39 omp_uintptr_t alignment;
40 omp_uintptr_t pool_size;
41 omp_uintptr_t used_pool_size;
42 omp_allocator_handle_t fb_data;
43 unsigned int sync_hint : 8;
44 unsigned int access : 8;
45 unsigned int fallback : 8;
46 unsigned int pinned : 1;
47 unsigned int partition : 7;
48 #ifndef HAVE_SYNC_BUILTINS
49 gomp_mutex_t lock;
50 #endif
51 };
52
53 struct omp_mem_header
54 {
55 void *ptr;
56 size_t size;
57 omp_allocator_handle_t allocator;
58 void *pad;
59 };
60
61 omp_allocator_handle_t
62 omp_init_allocator (omp_memspace_handle_t memspace, int ntraits,
63 const omp_alloctrait_t traits[])
64 {
65 struct omp_allocator_data data
66 = { memspace, 1, ~(uintptr_t) 0, 0, 0, omp_atv_contended, omp_atv_all,
67 omp_atv_default_mem_fb, omp_atv_false, omp_atv_environment };
68 struct omp_allocator_data *ret;
69 int i;
70
71 if (memspace > omp_low_lat_mem_space)
72 return omp_null_allocator;
73 for (i = 0; i < ntraits; i++)
74 switch (traits[i].key)
75 {
76 case omp_atk_sync_hint:
77 switch (traits[i].value)
78 {
79 case omp_atv_default:
80 data.sync_hint = omp_atv_contended;
81 break;
82 case omp_atv_contended:
83 case omp_atv_uncontended:
84 case omp_atv_sequential:
85 case omp_atv_private:
86 data.sync_hint = traits[i].value;
87 break;
88 default:
89 return omp_null_allocator;
90 }
91 break;
92 case omp_atk_alignment:
93 if (traits[i].value == omp_atv_default)
94 {
95 data.alignment = 1;
96 break;
97 }
98 if ((traits[i].value & (traits[i].value - 1)) != 0
99 || !traits[i].value)
100 return omp_null_allocator;
101 data.alignment = traits[i].value;
102 break;
103 case omp_atk_access:
104 switch (traits[i].value)
105 {
106 case omp_atv_default:
107 data.access = omp_atv_all;
108 break;
109 case omp_atv_all:
110 case omp_atv_cgroup:
111 case omp_atv_pteam:
112 case omp_atv_thread:
113 data.access = traits[i].value;
114 break;
115 default:
116 return omp_null_allocator;
117 }
118 break;
119 case omp_atk_pool_size:
120 if (traits[i].value == omp_atv_default)
121 data.pool_size = ~(uintptr_t) 0;
122 else
123 data.pool_size = traits[i].value;
124 break;
125 case omp_atk_fallback:
126 switch (traits[i].value)
127 {
128 case omp_atv_default:
129 data.fallback = omp_atv_default_mem_fb;
130 break;
131 case omp_atv_default_mem_fb:
132 case omp_atv_null_fb:
133 case omp_atv_abort_fb:
134 case omp_atv_allocator_fb:
135 data.fallback = traits[i].value;
136 break;
137 default:
138 return omp_null_allocator;
139 }
140 break;
141 case omp_atk_fb_data:
142 data.fb_data = traits[i].value;
143 break;
144 case omp_atk_pinned:
145 switch (traits[i].value)
146 {
147 case omp_atv_default:
148 case omp_atv_false:
149 data.pinned = omp_atv_false;
150 break;
151 case omp_atv_true:
152 data.pinned = omp_atv_true;
153 break;
154 default:
155 return omp_null_allocator;
156 }
157 break;
158 case omp_atk_partition:
159 switch (traits[i].value)
160 {
161 case omp_atv_default:
162 data.partition = omp_atv_environment;
163 break;
164 case omp_atv_environment:
165 case omp_atv_nearest:
166 case omp_atv_blocked:
167 case omp_atv_interleaved:
168 data.partition = traits[i].value;
169 break;
170 default:
171 return omp_null_allocator;
172 }
173 break;
174 default:
175 return omp_null_allocator;
176 }
177
178 if (data.alignment < sizeof (void *))
179 data.alignment = sizeof (void *);
180
181 /* No support for these so far (for hbw will use memkind). */
182 if (data.pinned || data.memspace == omp_high_bw_mem_space)
183 return omp_null_allocator;
184
185 ret = gomp_malloc (sizeof (struct omp_allocator_data));
186 *ret = data;
187 #ifndef HAVE_SYNC_BUILTINS
188 gomp_mutex_init (&ret->lock);
189 #endif
190 return (omp_allocator_handle_t) ret;
191 }
192
193 void
194 omp_destroy_allocator (omp_allocator_handle_t allocator)
195 {
196 if (allocator != omp_null_allocator)
197 {
198 #ifndef HAVE_SYNC_BUILTINS
199 gomp_mutex_destroy (&((struct omp_allocator_data *) allocator)->lock);
200 #endif
201 free ((void *) allocator);
202 }
203 }
204
205 ialias (omp_init_allocator)
206 ialias (omp_destroy_allocator)
207
208 void *
209 omp_alloc (size_t size, omp_allocator_handle_t allocator)
210 {
211 struct omp_allocator_data *allocator_data;
212 size_t alignment, new_size;
213 void *ptr, *ret;
214
215 if (__builtin_expect (size == 0, 0))
216 return NULL;
217
218 retry:
219 if (allocator == omp_null_allocator)
220 {
221 struct gomp_thread *thr = gomp_thread ();
222 if (thr->ts.def_allocator == omp_null_allocator)
223 thr->ts.def_allocator = gomp_def_allocator;
224 allocator = (omp_allocator_handle_t) thr->ts.def_allocator;
225 }
226
227 if (allocator > omp_max_predefined_alloc)
228 {
229 allocator_data = (struct omp_allocator_data *) allocator;
230 alignment = allocator_data->alignment;
231 }
232 else
233 {
234 allocator_data = NULL;
235 alignment = sizeof (void *);
236 }
237
238 new_size = sizeof (struct omp_mem_header);
239 if (alignment > sizeof (void *))
240 new_size += alignment - sizeof (void *);
241 if (__builtin_add_overflow (size, new_size, &new_size))
242 goto fail;
243
244 if (__builtin_expect (allocator_data
245 && allocator_data->pool_size < ~(uintptr_t) 0, 0))
246 {
247 uintptr_t used_pool_size;
248 if (new_size > allocator_data->pool_size)
249 goto fail;
250 #ifdef HAVE_SYNC_BUILTINS
251 used_pool_size = __atomic_load_n (&allocator_data->used_pool_size,
252 MEMMODEL_RELAXED);
253 do
254 {
255 uintptr_t new_pool_size;
256 if (__builtin_add_overflow (used_pool_size, new_size,
257 &new_pool_size)
258 || new_pool_size > allocator_data->pool_size)
259 goto fail;
260 if (__atomic_compare_exchange_n (&allocator_data->used_pool_size,
261 &used_pool_size, new_pool_size,
262 true, MEMMODEL_RELAXED,
263 MEMMODEL_RELAXED))
264 break;
265 }
266 while (1);
267 #else
268 gomp_mutex_lock (&allocator_data->lock);
269 if (__builtin_add_overflow (allocator_data->used_pool_size, new_size,
270 &used_pool_size)
271 || used_pool_size > allocator_data->pool_size)
272 {
273 gomp_mutex_unlock (&allocator_data->lock);
274 goto fail;
275 }
276 allocator_data->used_pool_size = used_pool_size;
277 gomp_mutex_unlock (&allocator_data->lock);
278 #endif
279 ptr = malloc (new_size);
280 if (ptr == NULL)
281 {
282 #ifdef HAVE_SYNC_BUILTINS
283 __atomic_add_fetch (&allocator_data->used_pool_size, -new_size,
284 MEMMODEL_RELAXED);
285 #else
286 gomp_mutex_lock (&allocator_data->lock);
287 allocator_data->used_pool_size -= new_size;
288 gomp_mutex_unlock (&allocator_data->lock);
289 #endif
290 goto fail;
291 }
292 }
293 else
294 {
295 ptr = malloc (new_size);
296 if (ptr == NULL)
297 goto fail;
298 }
299
300 if (alignment > sizeof (void *))
301 ret = (void *) (((uintptr_t) ptr
302 + sizeof (struct omp_mem_header)
303 + alignment - sizeof (void *)) & ~(alignment - 1));
304 else
305 ret = (char *) ptr + sizeof (struct omp_mem_header);
306 ((struct omp_mem_header *) ret)[-1].ptr = ptr;
307 ((struct omp_mem_header *) ret)[-1].size = new_size;
308 ((struct omp_mem_header *) ret)[-1].allocator = allocator;
309 return ret;
310
311 fail:
312 if (allocator_data)
313 {
314 switch (allocator_data->fallback)
315 {
316 case omp_atv_default_mem_fb:
317 if (alignment > sizeof (void *)
318 || (allocator_data
319 && allocator_data->pool_size < ~(uintptr_t) 0))
320 {
321 allocator = omp_default_mem_alloc;
322 goto retry;
323 }
324 /* Otherwise, we've already performed default mem allocation
325 and if that failed, it won't succeed again (unless it was
326 intermitent. Return NULL then, as that is the fallback. */
327 break;
328 case omp_atv_null_fb:
329 break;
330 default:
331 case omp_atv_abort_fb:
332 gomp_fatal ("Out of memory allocating %lu bytes",
333 (unsigned long) size);
334 case omp_atv_allocator_fb:
335 allocator = allocator_data->fb_data;
336 goto retry;
337 }
338 }
339 return NULL;
340 }
341
342 void
343 omp_free (void *ptr, omp_allocator_handle_t allocator)
344 {
345 struct omp_mem_header *data;
346
347 if (ptr == NULL)
348 return;
349 (void) allocator;
350 data = &((struct omp_mem_header *) ptr)[-1];
351 if (data->allocator > omp_max_predefined_alloc)
352 {
353 struct omp_allocator_data *allocator_data
354 = (struct omp_allocator_data *) (data->allocator);
355 if (allocator_data->pool_size < ~(uintptr_t) 0)
356 {
357 #ifdef HAVE_SYNC_BUILTINS
358 __atomic_add_fetch (&allocator_data->used_pool_size, -data->size,
359 MEMMODEL_RELAXED);
360 #else
361 gomp_mutex_lock (&allocator_data->lock);
362 allocator_data->used_pool_size -= data->size;
363 gomp_mutex_unlock (&allocator_data->lock);
364 #endif
365 }
366 }
367 free (data->ptr);
368 }
This page took 0.055871 seconds and 6 git commands to generate.