1 | /* Copyright (C) 2003,2004 Andi Kleen, SuSE Labs. |
2 | |
3 | libnuma is free software; you can redistribute it and/or |
4 | modify it under the terms of the GNU Lesser General Public |
5 | License as published by the Free Software Foundation; version |
6 | 2.1. |
7 | |
8 | libnuma is distributed in the hope that it will be useful, |
9 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
10 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
11 | Lesser General Public License for more details. |
12 | |
13 | You should find a copy of v2.1 of the GNU Lesser General Public License |
14 | somewhere on your Linux system; if not, write to the Free Software |
15 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ |
16 | |
17 | #ifndef _NUMA_H |
18 | #define _NUMA_H 1 |
19 | |
20 | /* allow an application to test for the current programming interface: */ |
21 | #define LIBNUMA_API_VERSION 2 |
22 | |
23 | /* Simple NUMA policy library */ |
24 | |
25 | #include <stddef.h> |
26 | #include <string.h> |
27 | #include <sys/types.h> |
28 | #include <stdlib.h> |
29 | |
30 | #if defined(__x86_64__) || defined(__i386__) |
31 | #define NUMA_NUM_NODES 128 |
32 | #else |
33 | #define NUMA_NUM_NODES 2048 |
34 | #endif |
35 | |
36 | #ifdef __cplusplus |
37 | extern "C" { |
38 | #endif |
39 | |
40 | typedef struct { |
41 | unsigned long n[NUMA_NUM_NODES/(sizeof(unsigned long)*8)]; |
42 | } nodemask_t; |
43 | |
44 | struct bitmask { |
45 | unsigned long size; /* number of bits in the map */ |
46 | unsigned long *maskp; |
47 | }; |
48 | |
49 | /* operations on struct bitmask */ |
50 | int numa_bitmask_isbitset(const struct bitmask *, unsigned int); |
51 | struct bitmask *numa_bitmask_setall(struct bitmask *); |
52 | struct bitmask *numa_bitmask_clearall(struct bitmask *); |
53 | struct bitmask *numa_bitmask_setbit(struct bitmask *, unsigned int); |
54 | struct bitmask *numa_bitmask_clearbit(struct bitmask *, unsigned int); |
55 | unsigned int numa_bitmask_nbytes(struct bitmask *); |
56 | unsigned int numa_bitmask_weight(const struct bitmask *); |
57 | struct bitmask *numa_bitmask_alloc(unsigned int); |
58 | void numa_bitmask_free(struct bitmask *); |
59 | int numa_bitmask_equal(const struct bitmask *, const struct bitmask *); |
60 | void copy_nodemask_to_bitmask(nodemask_t *, struct bitmask *); |
61 | void copy_bitmask_to_nodemask(struct bitmask *, nodemask_t *); |
62 | void copy_bitmask_to_bitmask(struct bitmask *, struct bitmask *); |
63 | |
64 | /* compatibility for codes that used them: */ |
65 | |
66 | static inline void nodemask_zero(nodemask_t *mask) |
67 | { |
68 | struct bitmask tmp; |
69 | |
70 | tmp.maskp = (unsigned long *)mask; |
71 | tmp.size = sizeof(nodemask_t) * 8; |
72 | numa_bitmask_clearall(&tmp); |
73 | } |
74 | |
75 | static inline void nodemask_zero_compat(nodemask_t *mask) |
76 | { |
77 | struct bitmask tmp; |
78 | |
79 | tmp.maskp = (unsigned long *)mask; |
80 | tmp.size = sizeof(nodemask_t) * 8; |
81 | numa_bitmask_clearall(&tmp); |
82 | } |
83 | |
84 | static inline void nodemask_set_compat(nodemask_t *mask, int node) |
85 | { |
86 | mask->n[node / (8*sizeof(unsigned long))] |= |
87 | (1UL<<(node%(8*sizeof(unsigned long)))); |
88 | } |
89 | |
90 | static inline void nodemask_clr_compat(nodemask_t *mask, int node) |
91 | { |
92 | mask->n[node / (8*sizeof(unsigned long))] &= |
93 | ~(1UL<<(node%(8*sizeof(unsigned long)))); |
94 | } |
95 | |
96 | static inline int nodemask_isset_compat(const nodemask_t *mask, int node) |
97 | { |
98 | if ((unsigned)node >= NUMA_NUM_NODES) |
99 | return 0; |
100 | if (mask->n[node / (8*sizeof(unsigned long))] & |
101 | (1UL<<(node%(8*sizeof(unsigned long))))) |
102 | return 1; |
103 | return 0; |
104 | } |
105 | |
106 | static inline int nodemask_equal(const nodemask_t *a, const nodemask_t *b) |
107 | { |
108 | struct bitmask tmp_a, tmp_b; |
109 | |
110 | tmp_a.maskp = (unsigned long *)a; |
111 | tmp_a.size = sizeof(nodemask_t) * 8; |
112 | |
113 | tmp_b.maskp = (unsigned long *)b; |
114 | tmp_b.size = sizeof(nodemask_t) * 8; |
115 | |
116 | return numa_bitmask_equal(&tmp_a, &tmp_b); |
117 | } |
118 | |
119 | static inline int nodemask_equal_compat(const nodemask_t *a, const nodemask_t *b) |
120 | { |
121 | struct bitmask tmp_a, tmp_b; |
122 | |
123 | tmp_a.maskp = (unsigned long *)a; |
124 | tmp_a.size = sizeof(nodemask_t) * 8; |
125 | |
126 | tmp_b.maskp = (unsigned long *)b; |
127 | tmp_b.size = sizeof(nodemask_t) * 8; |
128 | |
129 | return numa_bitmask_equal(&tmp_a, &tmp_b); |
130 | } |
131 | |
132 | /* NUMA support available. If this returns a negative value all other function |
133 | in this library are undefined. */ |
134 | int numa_available(void); |
135 | |
136 | /* Basic NUMA state */ |
137 | |
138 | /* Get max available node */ |
139 | int numa_max_node(void); |
140 | int numa_max_possible_node(void); |
141 | /* Return preferred node */ |
142 | int numa_preferred(void); |
143 | |
144 | /* Return node size and free memory */ |
145 | long long numa_node_size64(int node, long long *freep); |
146 | long numa_node_size(int node, long *freep); |
147 | |
148 | int numa_pagesize(void); |
149 | |
150 | /* Set with all nodes from which the calling process may allocate memory. |
151 | Only valid after numa_available. */ |
152 | extern struct bitmask *numa_all_nodes_ptr; |
153 | |
154 | /* Set with all nodes the kernel has exposed to userspace */ |
155 | extern struct bitmask *numa_nodes_ptr; |
156 | |
157 | /* For source compatibility */ |
158 | extern nodemask_t numa_all_nodes; |
159 | |
160 | /* Set with all cpus. */ |
161 | extern struct bitmask *numa_all_cpus_ptr; |
162 | |
163 | /* Set with no nodes */ |
164 | extern struct bitmask *numa_no_nodes_ptr; |
165 | |
166 | /* Source compatibility */ |
167 | extern nodemask_t numa_no_nodes; |
168 | |
169 | /* Only run and allocate memory from a specific set of nodes. */ |
170 | void numa_bind(struct bitmask *nodes); |
171 | |
172 | /* Set the NUMA node interleaving mask. 0 to turn off interleaving */ |
173 | void numa_set_interleave_mask(struct bitmask *nodemask); |
174 | |
175 | /* Return the current interleaving mask */ |
176 | struct bitmask *numa_get_interleave_mask(void); |
177 | |
178 | /* allocate a bitmask big enough for all nodes */ |
179 | struct bitmask *numa_allocate_nodemask(void); |
180 | |
181 | static inline void numa_free_nodemask(struct bitmask *b) |
182 | { |
183 | numa_bitmask_free(b); |
184 | } |
185 | |
186 | /* Some node to preferably allocate memory from for task. */ |
187 | void numa_set_preferred(int node); |
188 | |
189 | /* Set local memory allocation policy for task */ |
190 | void numa_set_localalloc(void); |
191 | |
192 | /* Only allocate memory from the nodes set in mask. 0 to turn off */ |
193 | void numa_set_membind(struct bitmask *nodemask); |
194 | |
195 | /* Return current membind */ |
196 | struct bitmask *numa_get_membind(void); |
197 | |
198 | /* Return allowed memories [nodes] */ |
199 | struct bitmask *numa_get_mems_allowed(void); |
200 | |
201 | int numa_get_interleave_node(void); |
202 | |
203 | /* NUMA memory allocation. These functions always round to page size |
204 | and are relatively slow. */ |
205 | |
206 | /* Alloc memory page interleaved on nodes in mask */ |
207 | void *numa_alloc_interleaved_subset(size_t size, struct bitmask *nodemask); |
208 | /* Alloc memory page interleaved on all nodes. */ |
209 | void *numa_alloc_interleaved(size_t size); |
210 | /* Alloc memory located on node */ |
211 | void *numa_alloc_onnode(size_t size, int node); |
212 | /* Alloc memory on local node */ |
213 | void *numa_alloc_local(size_t size); |
214 | /* Allocation with current policy */ |
215 | void *numa_alloc(size_t size); |
216 | /* Change the size of a memory area preserving the memory policy */ |
217 | void *numa_realloc(void *old_addr, size_t old_size, size_t new_size); |
218 | /* Free memory allocated by the functions above */ |
219 | void numa_free(void *mem, size_t size); |
220 | |
221 | /* Low level functions, primarily for shared memory. All memory |
222 | processed by these must not be touched yet */ |
223 | |
224 | /* Interleave an memory area. */ |
225 | void numa_interleave_memory(void *mem, size_t size, struct bitmask *mask); |
226 | |
227 | /* Allocate a memory area on a specific node. */ |
228 | void numa_tonode_memory(void *start, size_t size, int node); |
229 | |
230 | /* Allocate memory on a mask of nodes. */ |
231 | void numa_tonodemask_memory(void *mem, size_t size, struct bitmask *mask); |
232 | |
233 | /* Allocate a memory area on the current node. */ |
234 | void numa_setlocal_memory(void *start, size_t size); |
235 | |
236 | /* Allocate memory area with current memory policy */ |
237 | void numa_police_memory(void *start, size_t size); |
238 | |
239 | /* Run current task only on nodes in mask */ |
240 | int numa_run_on_node_mask(struct bitmask *mask); |
241 | /* Run current task on nodes in mask without any cpuset awareness */ |
242 | int numa_run_on_node_mask_all(struct bitmask *mask); |
243 | /* Run current task only on node */ |
244 | int numa_run_on_node(int node); |
245 | /* Return current mask of nodes the task can run on */ |
246 | struct bitmask * numa_get_run_node_mask(void); |
247 | |
248 | /* When strict fail allocation when memory cannot be allocated in target node(s). */ |
249 | void numa_set_bind_policy(int strict); |
250 | |
251 | /* Fail when existing memory has incompatible policy */ |
252 | void numa_set_strict(int flag); |
253 | |
254 | /* maximum nodes (size of kernel nodemask_t) */ |
255 | int numa_num_possible_nodes(); |
256 | |
257 | /* maximum cpus (size of kernel cpumask_t) */ |
258 | int numa_num_possible_cpus(); |
259 | |
260 | /* nodes in the system */ |
261 | int numa_num_configured_nodes(); |
262 | |
263 | /* maximum cpus */ |
264 | int numa_num_configured_cpus(); |
265 | |
266 | /* maximum cpus allowed to current task */ |
267 | int numa_num_task_cpus(); |
268 | int numa_num_thread_cpus(); /* backward compatibility */ |
269 | |
270 | /* maximum nodes allowed to current task */ |
271 | int numa_num_task_nodes(); |
272 | int numa_num_thread_nodes(); /* backward compatibility */ |
273 | |
274 | /* allocate a bitmask the size of the kernel cpumask_t */ |
275 | struct bitmask *numa_allocate_cpumask(); |
276 | |
277 | static inline void numa_free_cpumask(struct bitmask *b) |
278 | { |
279 | numa_bitmask_free(b); |
280 | } |
281 | |
282 | /* Convert node to CPU mask. -1/errno on failure, otherwise 0. */ |
283 | int numa_node_to_cpus(int, struct bitmask *); |
284 | |
285 | /* report the node of the specified cpu. -1/errno on invalid cpu. */ |
286 | int numa_node_of_cpu(int cpu); |
287 | |
288 | /* Report distance of node1 from node2. 0 on error.*/ |
289 | int numa_distance(int node1, int node2); |
290 | |
291 | /* Error handling. */ |
292 | /* This is an internal function in libnuma that can be overwritten by an user |
293 | program. Default is to print an error to stderr and exit if numa_exit_on_error |
294 | is true. */ |
295 | void numa_error(char *where); |
296 | |
297 | /* When true exit the program when a NUMA system call (except numa_available) |
298 | fails */ |
299 | extern int numa_exit_on_error; |
300 | /* Warning function. Can also be overwritten. Default is to print on stderr |
301 | once. */ |
302 | void numa_warn(int num, char *fmt, ...); |
303 | |
304 | /* When true exit the program on a numa_warn() call */ |
305 | extern int numa_exit_on_warn; |
306 | |
307 | int numa_migrate_pages(int pid, struct bitmask *from, struct bitmask *to); |
308 | |
309 | int numa_move_pages(int pid, unsigned long count, void **pages, |
310 | const int *nodes, int *status, int flags); |
311 | |
312 | int numa_sched_getaffinity(pid_t, struct bitmask *); |
313 | int numa_sched_setaffinity(pid_t, struct bitmask *); |
314 | |
315 | /* Convert an ascii list of nodes to a bitmask */ |
316 | struct bitmask *numa_parse_nodestring(const char *); |
317 | |
318 | /* Convert an ascii list of nodes to a bitmask without current nodeset |
319 | * dependency */ |
320 | struct bitmask *numa_parse_nodestring_all(const char *); |
321 | |
322 | /* Convert an ascii list of cpu to a bitmask */ |
323 | struct bitmask *numa_parse_cpustring(const char *); |
324 | |
325 | /* Convert an ascii list of cpu to a bitmask without current taskset |
326 | * dependency */ |
327 | struct bitmask *numa_parse_cpustring_all(const char *); |
328 | |
329 | /* |
330 | * The following functions are for source code compatibility |
331 | * with releases prior to version 2. |
332 | * Such codes should be compiled with NUMA_VERSION1_COMPATIBILITY defined. |
333 | */ |
334 | |
335 | static inline void numa_set_interleave_mask_compat(nodemask_t *nodemask) |
336 | { |
337 | struct bitmask tmp; |
338 | |
339 | tmp.maskp = (unsigned long *)nodemask; |
340 | tmp.size = sizeof(nodemask_t) * 8; |
341 | numa_set_interleave_mask(&tmp); |
342 | } |
343 | |
344 | static inline nodemask_t numa_get_interleave_mask_compat() |
345 | { |
346 | struct bitmask *tp; |
347 | nodemask_t mask; |
348 | |
349 | tp = numa_get_interleave_mask(); |
350 | copy_bitmask_to_nodemask(tp, &mask); |
351 | numa_bitmask_free(tp); |
352 | return mask; |
353 | } |
354 | |
355 | static inline void numa_bind_compat(nodemask_t *mask) |
356 | { |
357 | struct bitmask *tp; |
358 | |
359 | tp = numa_allocate_nodemask(); |
360 | copy_nodemask_to_bitmask(mask, tp); |
361 | numa_bind(tp); |
362 | numa_bitmask_free(tp); |
363 | } |
364 | |
365 | static inline void numa_set_membind_compat(nodemask_t *mask) |
366 | { |
367 | struct bitmask tmp; |
368 | |
369 | tmp.maskp = (unsigned long *)mask; |
370 | tmp.size = sizeof(nodemask_t) * 8; |
371 | numa_set_membind(&tmp); |
372 | } |
373 | |
374 | static inline nodemask_t numa_get_membind_compat() |
375 | { |
376 | struct bitmask *tp; |
377 | nodemask_t mask; |
378 | |
379 | tp = numa_get_membind(); |
380 | copy_bitmask_to_nodemask(tp, &mask); |
381 | numa_bitmask_free(tp); |
382 | return mask; |
383 | } |
384 | |
385 | static inline void *numa_alloc_interleaved_subset_compat(size_t size, |
386 | const nodemask_t *mask) |
387 | { |
388 | struct bitmask tmp; |
389 | |
390 | tmp.maskp = (unsigned long *)mask; |
391 | tmp.size = sizeof(nodemask_t) * 8; |
392 | return numa_alloc_interleaved_subset(size, &tmp); |
393 | } |
394 | |
395 | static inline int numa_run_on_node_mask_compat(const nodemask_t *mask) |
396 | { |
397 | struct bitmask tmp; |
398 | |
399 | tmp.maskp = (unsigned long *)mask; |
400 | tmp.size = sizeof(nodemask_t) * 8; |
401 | return numa_run_on_node_mask(&tmp); |
402 | } |
403 | |
404 | static inline nodemask_t numa_get_run_node_mask_compat() |
405 | { |
406 | struct bitmask *tp; |
407 | nodemask_t mask; |
408 | |
409 | tp = numa_get_run_node_mask(); |
410 | copy_bitmask_to_nodemask(tp, &mask); |
411 | numa_bitmask_free(tp); |
412 | return mask; |
413 | } |
414 | |
415 | static inline void numa_interleave_memory_compat(void *mem, size_t size, |
416 | const nodemask_t *mask) |
417 | { |
418 | struct bitmask tmp; |
419 | |
420 | tmp.maskp = (unsigned long *)mask; |
421 | tmp.size = sizeof(nodemask_t) * 8; |
422 | numa_interleave_memory(mem, size, &tmp); |
423 | } |
424 | |
425 | static inline void numa_tonodemask_memory_compat(void *mem, size_t size, |
426 | const nodemask_t *mask) |
427 | { |
428 | struct bitmask tmp; |
429 | |
430 | tmp.maskp = (unsigned long *)mask; |
431 | tmp.size = sizeof(nodemask_t) * 8; |
432 | numa_tonodemask_memory(mem, size, &tmp); |
433 | } |
434 | |
435 | static inline int numa_sched_getaffinity_compat(pid_t pid, unsigned len, |
436 | unsigned long *mask) |
437 | { |
438 | struct bitmask tmp; |
439 | |
440 | tmp.maskp = (unsigned long *)mask; |
441 | tmp.size = len * 8; |
442 | return numa_sched_getaffinity(pid, &tmp); |
443 | } |
444 | |
445 | static inline int numa_sched_setaffinity_compat(pid_t pid, unsigned len, |
446 | unsigned long *mask) |
447 | { |
448 | struct bitmask tmp; |
449 | |
450 | tmp.maskp = (unsigned long *)mask; |
451 | tmp.size = len * 8; |
452 | return numa_sched_setaffinity(pid, &tmp); |
453 | } |
454 | |
455 | static inline int numa_node_to_cpus_compat(int node, unsigned long *buffer, |
456 | int buffer_len) |
457 | { |
458 | struct bitmask tmp; |
459 | |
460 | tmp.maskp = (unsigned long *)buffer; |
461 | tmp.size = buffer_len * 8; |
462 | return numa_node_to_cpus(node, &tmp); |
463 | } |
464 | |
465 | /* end of version 1 compatibility functions */ |
466 | |
467 | /* |
468 | * To compile an application that uses libnuma version 1: |
469 | * add -DNUMA_VERSION1_COMPATIBILITY to your Makefile's CFLAGS |
470 | */ |
471 | #ifdef NUMA_VERSION1_COMPATIBILITY |
472 | #include <numacompat1.h> |
473 | #endif |
474 | |
475 | #ifdef __cplusplus |
476 | } |
477 | #endif |
478 | |
479 | #endif |
480 | |