LLVM OpenMP* Runtime Library
kmp_affinity.cpp
1 /*
2  * kmp_affinity.cpp -- affinity management
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "kmp.h"
14 #include "kmp_affinity.h"
15 #include "kmp_i18n.h"
16 #include "kmp_io.h"
17 #include "kmp_str.h"
18 #include "kmp_wrapper_getpid.h"
19 #if KMP_USE_HIER_SCHED
20 #include "kmp_dispatch_hier.h"
21 #endif
22 
23 // Store the real or imagined machine hierarchy here
24 static hierarchy_info machine_hierarchy;
25 
26 void __kmp_cleanup_hierarchy() { machine_hierarchy.fini(); }
27 
28 void __kmp_get_hierarchy(kmp_uint32 nproc, kmp_bstate_t *thr_bar) {
29  kmp_uint32 depth;
30  // The test below is true if affinity is available, but set to "none". Need to
31  // init on first use of hierarchical barrier.
32  if (TCR_1(machine_hierarchy.uninitialized))
33  machine_hierarchy.init(NULL, nproc);
34 
35  // Adjust the hierarchy in case num threads exceeds original
36  if (nproc > machine_hierarchy.base_num_threads)
37  machine_hierarchy.resize(nproc);
38 
39  depth = machine_hierarchy.depth;
40  KMP_DEBUG_ASSERT(depth > 0);
41 
42  thr_bar->depth = depth;
43  __kmp_type_convert(machine_hierarchy.numPerLevel[0] - 1,
44  &(thr_bar->base_leaf_kids));
45  thr_bar->skip_per_level = machine_hierarchy.skipPerLevel;
46 }
47 
48 #if KMP_AFFINITY_SUPPORTED
49 
50 const char *__kmp_hw_get_catalog_string(kmp_hw_t type, bool plural) {
51  switch (type) {
52  case KMP_HW_SOCKET:
53  return ((plural) ? KMP_I18N_STR(Sockets) : KMP_I18N_STR(Socket));
54  case KMP_HW_DIE:
55  return ((plural) ? KMP_I18N_STR(Dice) : KMP_I18N_STR(Die));
56  case KMP_HW_MODULE:
57  return ((plural) ? KMP_I18N_STR(Modules) : KMP_I18N_STR(Module));
58  case KMP_HW_TILE:
59  return ((plural) ? KMP_I18N_STR(Tiles) : KMP_I18N_STR(Tile));
60  case KMP_HW_NUMA:
61  return ((plural) ? KMP_I18N_STR(NumaDomains) : KMP_I18N_STR(NumaDomain));
62  case KMP_HW_L3:
63  return ((plural) ? KMP_I18N_STR(L3Caches) : KMP_I18N_STR(L3Cache));
64  case KMP_HW_L2:
65  return ((plural) ? KMP_I18N_STR(L2Caches) : KMP_I18N_STR(L2Cache));
66  case KMP_HW_L1:
67  return ((plural) ? KMP_I18N_STR(L1Caches) : KMP_I18N_STR(L1Cache));
68  case KMP_HW_CORE:
69  return ((plural) ? KMP_I18N_STR(Cores) : KMP_I18N_STR(Core));
70  case KMP_HW_THREAD:
71  return ((plural) ? KMP_I18N_STR(Threads) : KMP_I18N_STR(Thread));
72  case KMP_HW_PROC_GROUP:
73  return ((plural) ? KMP_I18N_STR(ProcGroups) : KMP_I18N_STR(ProcGroup));
74  }
75  return KMP_I18N_STR(Unknown);
76 }
77 
78 // This function removes the topology levels that are radix 1 and don't offer
79 // further information about the topology. The most common example is when you
80 // have one thread context per core, we don't want the extra thread context
81 // level if it offers no unique labels. So they are removed.
82 // return value: the new depth of address2os
83 static int __kmp_affinity_remove_radix_one_levels(AddrUnsPair *addrP, int nTh,
84  int depth, kmp_hw_t *types) {
85  int preference[KMP_HW_LAST];
86  int top_index1, top_index2;
87  // Set up preference associative array
88  preference[KMP_HW_PROC_GROUP] = 110;
89  preference[KMP_HW_SOCKET] = 100;
90  preference[KMP_HW_CORE] = 95;
91  preference[KMP_HW_THREAD] = 90;
92  preference[KMP_HW_DIE] = 85;
93  preference[KMP_HW_NUMA] = 80;
94  preference[KMP_HW_TILE] = 75;
95  preference[KMP_HW_MODULE] = 73;
96  preference[KMP_HW_L3] = 70;
97  preference[KMP_HW_L2] = 65;
98  preference[KMP_HW_L1] = 60;
99  top_index1 = 0;
100  top_index2 = 1;
101  while (top_index1 < depth - 1 && top_index2 < depth) {
102  KMP_DEBUG_ASSERT(top_index1 >= 0 && top_index1 < depth);
103  KMP_DEBUG_ASSERT(top_index2 >= 0 && top_index2 < depth);
104  kmp_hw_t type1 = types[top_index1];
105  kmp_hw_t type2 = types[top_index2];
106  if (type1 == KMP_HW_SOCKET && type2 == KMP_HW_CORE) {
107  top_index1 = top_index2++;
108  continue;
109  }
110  bool radix1 = true;
111  bool all_same = true;
112  int id1 = addrP[0].first.labels[top_index1];
113  int id2 = addrP[0].first.labels[top_index2];
114  int pref1 = preference[type1];
115  int pref2 = preference[type2];
116  for (int hwidx = 1; hwidx < nTh; ++hwidx) {
117  if (addrP[hwidx].first.labels[top_index1] == id1 &&
118  addrP[hwidx].first.labels[top_index2] != id2) {
119  radix1 = false;
120  break;
121  }
122  if (addrP[hwidx].first.labels[top_index2] != id2)
123  all_same = false;
124  id1 = addrP[hwidx].first.labels[top_index1];
125  id2 = addrP[hwidx].first.labels[top_index2];
126  }
127  if (radix1) {
128  // Select the layer to remove based on preference
129  kmp_hw_t remove_type, keep_type;
130  int remove_layer, remove_layer_ids;
131  if (pref1 > pref2) {
132  remove_type = type2;
133  remove_layer = remove_layer_ids = top_index2;
134  keep_type = type1;
135  } else {
136  remove_type = type1;
137  remove_layer = remove_layer_ids = top_index1;
138  keep_type = type2;
139  }
140  // If all the indexes for the second (deeper) layer are the same.
141  // e.g., all are zero, then make sure to keep the first layer's ids
142  if (all_same)
143  remove_layer_ids = top_index2;
144  // Remove radix one type by setting the equivalence, removing the id from
145  // the hw threads and removing the layer from types and depth
146  for (int idx = 0; idx < nTh; ++idx) {
147  Address &hw_thread = addrP[idx].first;
148  for (int d = remove_layer_ids; d < depth - 1; ++d)
149  hw_thread.labels[d] = hw_thread.labels[d + 1];
150  hw_thread.depth--;
151  }
152  for (int idx = remove_layer; idx < depth - 1; ++idx)
153  types[idx] = types[idx + 1];
154  depth--;
155  } else {
156  top_index1 = top_index2++;
157  }
158  }
159  KMP_ASSERT(depth > 0);
160  return depth;
161 }
162 // Gather the count of each topology layer and the ratio
163 // ratio contains the number of types[i] / types[i+1] and so forth
164 // count contains the absolute number of types[i]
165 static void __kmp_affinity_gather_enumeration_information(AddrUnsPair *addrP,
166  int nTh, int depth,
167  kmp_hw_t *types,
168  int *ratio,
169  int *count) {
170  int previous_id[KMP_HW_LAST];
171  int max[KMP_HW_LAST];
172 
173  for (int i = 0; i < depth; ++i) {
174  previous_id[i] = -1;
175  max[i] = 0;
176  count[i] = 0;
177  ratio[i] = 0;
178  }
179  for (int i = 0; i < nTh; ++i) {
180  Address &hw_thread = addrP[i].first;
181  for (int layer = 0; layer < depth; ++layer) {
182  int id = hw_thread.labels[layer];
183  if (id != previous_id[layer]) {
184  // Add an additional increment to each count
185  for (int l = layer; l < depth; ++l)
186  count[l]++;
187  // Keep track of topology layer ratio statistics
188  max[layer]++;
189  for (int l = layer + 1; l < depth; ++l) {
190  if (max[l] > ratio[l])
191  ratio[l] = max[l];
192  max[l] = 1;
193  }
194  break;
195  }
196  }
197  for (int layer = 0; layer < depth; ++layer) {
198  previous_id[layer] = hw_thread.labels[layer];
199  }
200  }
201  for (int layer = 0; layer < depth; ++layer) {
202  if (max[layer] > ratio[layer])
203  ratio[layer] = max[layer];
204  }
205 }
206 
207 // Find out if the topology is uniform
208 static bool __kmp_affinity_discover_uniformity(int depth, int *ratio,
209  int *count) {
210  int num = 1;
211  for (int level = 0; level < depth; ++level)
212  num *= ratio[level];
213  return (num == count[depth - 1]);
214 }
215 
216 // calculate the number of X's per Y
217 static inline int __kmp_affinity_calculate_ratio(int *ratio, int deep_level,
218  int shallow_level) {
219  int retval = 1;
220  if (deep_level < 0 || shallow_level < 0)
221  return retval;
222  for (int level = deep_level; level > shallow_level; --level)
223  retval *= ratio[level];
224  return retval;
225 }
226 
227 static void __kmp_affinity_print_topology(AddrUnsPair *addrP, int len,
228  int depth, kmp_hw_t *types) {
229  int proc;
230  kmp_str_buf_t buf;
231  __kmp_str_buf_init(&buf);
232  KMP_INFORM(OSProcToPhysicalThreadMap, "KMP_AFFINITY");
233  for (proc = 0; proc < len; proc++) {
234  for (int i = 0; i < depth; ++i) {
235  __kmp_str_buf_print(&buf, "%s %d ", __kmp_hw_get_catalog_string(types[i]),
236  addrP[proc].first.labels[i]);
237  }
238  KMP_INFORM(OSProcMapToPack, "KMP_AFFINITY", addrP[proc].second, buf.str);
239  __kmp_str_buf_clear(&buf);
240  }
241  __kmp_str_buf_free(&buf);
242 }
243 
244 // Print out the detailed machine topology map, i.e. the physical locations
245 // of each OS proc.
246 static void __kmp_affinity_print_topology(AddrUnsPair *address2os, int len,
247  int depth, int pkgLevel,
248  int coreLevel, int threadLevel) {
249  int proc;
250 
251  KMP_INFORM(OSProcToPhysicalThreadMap, "KMP_AFFINITY");
252  for (proc = 0; proc < len; proc++) {
253  int level;
254  kmp_str_buf_t buf;
255  __kmp_str_buf_init(&buf);
256  for (level = 0; level < depth; level++) {
257  if (level == threadLevel) {
258  __kmp_str_buf_print(&buf, "%s ", KMP_I18N_STR(Thread));
259  } else if (level == coreLevel) {
260  __kmp_str_buf_print(&buf, "%s ", KMP_I18N_STR(Core));
261  } else if (level == pkgLevel) {
262  __kmp_str_buf_print(&buf, "%s ", KMP_I18N_STR(Package));
263  } else if (level > pkgLevel) {
264  __kmp_str_buf_print(&buf, "%s_%d ", KMP_I18N_STR(Node),
265  level - pkgLevel - 1);
266  } else {
267  __kmp_str_buf_print(&buf, "L%d ", level);
268  }
269  __kmp_str_buf_print(&buf, "%d ", address2os[proc].first.labels[level]);
270  }
271  KMP_INFORM(OSProcMapToPack, "KMP_AFFINITY", address2os[proc].second,
272  buf.str);
273  __kmp_str_buf_free(&buf);
274  }
275 }
276 
277 bool KMPAffinity::picked_api = false;
278 
279 void *KMPAffinity::Mask::operator new(size_t n) { return __kmp_allocate(n); }
280 void *KMPAffinity::Mask::operator new[](size_t n) { return __kmp_allocate(n); }
281 void KMPAffinity::Mask::operator delete(void *p) { __kmp_free(p); }
282 void KMPAffinity::Mask::operator delete[](void *p) { __kmp_free(p); }
283 void *KMPAffinity::operator new(size_t n) { return __kmp_allocate(n); }
284 void KMPAffinity::operator delete(void *p) { __kmp_free(p); }
285 
286 void KMPAffinity::pick_api() {
287  KMPAffinity *affinity_dispatch;
288  if (picked_api)
289  return;
290 #if KMP_USE_HWLOC
291  // Only use Hwloc if affinity isn't explicitly disabled and
292  // user requests Hwloc topology method
293  if (__kmp_affinity_top_method == affinity_top_method_hwloc &&
294  __kmp_affinity_type != affinity_disabled) {
295  affinity_dispatch = new KMPHwlocAffinity();
296  } else
297 #endif
298  {
299  affinity_dispatch = new KMPNativeAffinity();
300  }
301  __kmp_affinity_dispatch = affinity_dispatch;
302  picked_api = true;
303 }
304 
305 void KMPAffinity::destroy_api() {
306  if (__kmp_affinity_dispatch != NULL) {
307  delete __kmp_affinity_dispatch;
308  __kmp_affinity_dispatch = NULL;
309  picked_api = false;
310  }
311 }
312 
313 #define KMP_ADVANCE_SCAN(scan) \
314  while (*scan != '\0') { \
315  scan++; \
316  }
317 
318 // Print the affinity mask to the character array in a pretty format.
319 // The format is a comma separated list of non-negative integers or integer
320 // ranges: e.g., 1,2,3-5,7,9-15
321 // The format can also be the string "{<empty>}" if no bits are set in mask
322 char *__kmp_affinity_print_mask(char *buf, int buf_len,
323  kmp_affin_mask_t *mask) {
324  int start = 0, finish = 0, previous = 0;
325  bool first_range;
326  KMP_ASSERT(buf);
327  KMP_ASSERT(buf_len >= 40);
328  KMP_ASSERT(mask);
329  char *scan = buf;
330  char *end = buf + buf_len - 1;
331 
332  // Check for empty set.
333  if (mask->begin() == mask->end()) {
334  KMP_SNPRINTF(scan, end - scan + 1, "{<empty>}");
335  KMP_ADVANCE_SCAN(scan);
336  KMP_ASSERT(scan <= end);
337  return buf;
338  }
339 
340  first_range = true;
341  start = mask->begin();
342  while (1) {
343  // Find next range
344  // [start, previous] is inclusive range of contiguous bits in mask
345  for (finish = mask->next(start), previous = start;
346  finish == previous + 1 && finish != mask->end();
347  finish = mask->next(finish)) {
348  previous = finish;
349  }
350 
351  // The first range does not need a comma printed before it, but the rest
352  // of the ranges do need a comma beforehand
353  if (!first_range) {
354  KMP_SNPRINTF(scan, end - scan + 1, "%s", ",");
355  KMP_ADVANCE_SCAN(scan);
356  } else {
357  first_range = false;
358  }
359  // Range with three or more contiguous bits in the affinity mask
360  if (previous - start > 1) {
361  KMP_SNPRINTF(scan, end - scan + 1, "%u-%u", start, previous);
362  } else {
363  // Range with one or two contiguous bits in the affinity mask
364  KMP_SNPRINTF(scan, end - scan + 1, "%u", start);
365  KMP_ADVANCE_SCAN(scan);
366  if (previous - start > 0) {
367  KMP_SNPRINTF(scan, end - scan + 1, ",%u", previous);
368  }
369  }
370  KMP_ADVANCE_SCAN(scan);
371  // Start over with new start point
372  start = finish;
373  if (start == mask->end())
374  break;
375  // Check for overflow
376  if (end - scan < 2)
377  break;
378  }
379 
380  // Check for overflow
381  KMP_ASSERT(scan <= end);
382  return buf;
383 }
384 #undef KMP_ADVANCE_SCAN
385 
386 // Print the affinity mask to the string buffer object in a pretty format
387 // The format is a comma separated list of non-negative integers or integer
388 // ranges: e.g., 1,2,3-5,7,9-15
389 // The format can also be the string "{<empty>}" if no bits are set in mask
390 kmp_str_buf_t *__kmp_affinity_str_buf_mask(kmp_str_buf_t *buf,
391  kmp_affin_mask_t *mask) {
392  int start = 0, finish = 0, previous = 0;
393  bool first_range;
394  KMP_ASSERT(buf);
395  KMP_ASSERT(mask);
396 
397  __kmp_str_buf_clear(buf);
398 
399  // Check for empty set.
400  if (mask->begin() == mask->end()) {
401  __kmp_str_buf_print(buf, "%s", "{<empty>}");
402  return buf;
403  }
404 
405  first_range = true;
406  start = mask->begin();
407  while (1) {
408  // Find next range
409  // [start, previous] is inclusive range of contiguous bits in mask
410  for (finish = mask->next(start), previous = start;
411  finish == previous + 1 && finish != mask->end();
412  finish = mask->next(finish)) {
413  previous = finish;
414  }
415 
416  // The first range does not need a comma printed before it, but the rest
417  // of the ranges do need a comma beforehand
418  if (!first_range) {
419  __kmp_str_buf_print(buf, "%s", ",");
420  } else {
421  first_range = false;
422  }
423  // Range with three or more contiguous bits in the affinity mask
424  if (previous - start > 1) {
425  __kmp_str_buf_print(buf, "%u-%u", start, previous);
426  } else {
427  // Range with one or two contiguous bits in the affinity mask
428  __kmp_str_buf_print(buf, "%u", start);
429  if (previous - start > 0) {
430  __kmp_str_buf_print(buf, ",%u", previous);
431  }
432  }
433  // Start over with new start point
434  start = finish;
435  if (start == mask->end())
436  break;
437  }
438  return buf;
439 }
440 
441 void __kmp_affinity_entire_machine_mask(kmp_affin_mask_t *mask) {
442  KMP_CPU_ZERO(mask);
443 
444 #if KMP_GROUP_AFFINITY
445 
446  if (__kmp_num_proc_groups > 1) {
447  int group;
448  KMP_DEBUG_ASSERT(__kmp_GetActiveProcessorCount != NULL);
449  for (group = 0; group < __kmp_num_proc_groups; group++) {
450  int i;
451  int num = __kmp_GetActiveProcessorCount(group);
452  for (i = 0; i < num; i++) {
453  KMP_CPU_SET(i + group * (CHAR_BIT * sizeof(DWORD_PTR)), mask);
454  }
455  }
456  } else
457 
458 #endif /* KMP_GROUP_AFFINITY */
459 
460  {
461  int proc;
462  for (proc = 0; proc < __kmp_xproc; proc++) {
463  KMP_CPU_SET(proc, mask);
464  }
465  }
466 }
467 
468 // When sorting by labels, __kmp_affinity_assign_child_nums() must first be
469 // called to renumber the labels from [0..n] and place them into the child_num
470 // vector of the address object. This is done in case the labels used for
471 // the children at one node of the hierarchy differ from those used for
472 // another node at the same level. Example: suppose the machine has 2 nodes
473 // with 2 packages each. The first node contains packages 601 and 602, and
474 // second node contains packages 603 and 604. If we try to sort the table
475 // for "scatter" affinity, the table will still be sorted 601, 602, 603, 604
476 // because we are paying attention to the labels themselves, not the ordinal
477 // child numbers. By using the child numbers in the sort, the result is
478 // {0,0}=601, {0,1}=603, {1,0}=602, {1,1}=604.
479 static void __kmp_affinity_assign_child_nums(AddrUnsPair *address2os,
480  int numAddrs) {
481  KMP_DEBUG_ASSERT(numAddrs > 0);
482  int depth = address2os->first.depth;
483  unsigned *counts = (unsigned *)__kmp_allocate(depth * sizeof(unsigned));
484  unsigned *lastLabel = (unsigned *)__kmp_allocate(depth * sizeof(unsigned));
485  int labCt;
486  for (labCt = 0; labCt < depth; labCt++) {
487  address2os[0].first.childNums[labCt] = counts[labCt] = 0;
488  lastLabel[labCt] = address2os[0].first.labels[labCt];
489  }
490  int i;
491  for (i = 1; i < numAddrs; i++) {
492  for (labCt = 0; labCt < depth; labCt++) {
493  if (address2os[i].first.labels[labCt] != lastLabel[labCt]) {
494  int labCt2;
495  for (labCt2 = labCt + 1; labCt2 < depth; labCt2++) {
496  counts[labCt2] = 0;
497  lastLabel[labCt2] = address2os[i].first.labels[labCt2];
498  }
499  counts[labCt]++;
500  lastLabel[labCt] = address2os[i].first.labels[labCt];
501  break;
502  }
503  }
504  for (labCt = 0; labCt < depth; labCt++) {
505  address2os[i].first.childNums[labCt] = counts[labCt];
506  }
507  for (; labCt < (int)Address::maxDepth; labCt++) {
508  address2os[i].first.childNums[labCt] = 0;
509  }
510  }
511  __kmp_free(lastLabel);
512  __kmp_free(counts);
513 }
514 
515 // All of the __kmp_affinity_create_*_map() routines should set
516 // __kmp_affinity_masks to a vector of affinity mask objects of length
517 // __kmp_affinity_num_masks, if __kmp_affinity_type != affinity_none, and return
518 // the number of levels in the machine topology tree (zero if
519 // __kmp_affinity_type == affinity_none).
520 //
521 // All of the __kmp_affinity_create_*_map() routines should set
522 // *__kmp_affin_fullMask to the affinity mask for the initialization thread.
523 // They need to save and restore the mask, and it could be needed later, so
524 // saving it is just an optimization to avoid calling kmp_get_system_affinity()
525 // again.
526 kmp_affin_mask_t *__kmp_affin_fullMask = NULL;
527 
528 static int nCoresPerPkg, nPackages;
529 static int __kmp_nThreadsPerCore;
530 #ifndef KMP_DFLT_NTH_CORES
531 static int __kmp_ncores;
532 #endif
533 static int *__kmp_pu_os_idx = NULL;
534 static int nDiesPerPkg = 1;
535 
536 // __kmp_affinity_uniform_topology() doesn't work when called from
537 // places which support arbitrarily many levels in the machine topology
538 // map, i.e. the non-default cases in __kmp_affinity_create_cpuinfo_map()
539 // __kmp_affinity_create_x2apicid_map().
540 inline static bool __kmp_affinity_uniform_topology() {
541  return __kmp_avail_proc ==
542  (__kmp_nThreadsPerCore * nCoresPerPkg * nDiesPerPkg * nPackages);
543 }
544 
545 #if KMP_USE_HWLOC
546 
547 static inline bool __kmp_hwloc_is_cache_type(hwloc_obj_t obj) {
548 #if HWLOC_API_VERSION >= 0x00020000
549  return hwloc_obj_type_is_cache(obj->type);
550 #else
551  return obj->type == HWLOC_OBJ_CACHE;
552 #endif
553 }
554 
555 // Returns KMP_HW_* type derived from HWLOC_* type
556 static inline kmp_hw_t __kmp_hwloc_type_2_topology_type(hwloc_obj_t obj) {
557 
558  if (__kmp_hwloc_is_cache_type(obj)) {
559  if (obj->attr->cache.type == HWLOC_OBJ_CACHE_INSTRUCTION)
560  return KMP_HW_UNKNOWN;
561  switch (obj->attr->cache.depth) {
562  case 1:
563  return KMP_HW_L1;
564  case 2:
565 #if KMP_MIC_SUPPORTED
566  if (__kmp_mic_type == mic3) {
567  return KMP_HW_TILE;
568  }
569 #endif
570  return KMP_HW_L2;
571  case 3:
572  return KMP_HW_L3;
573  }
574  return KMP_HW_UNKNOWN;
575  }
576 
577  switch (obj->type) {
578  case HWLOC_OBJ_PACKAGE:
579  return KMP_HW_SOCKET;
580  case HWLOC_OBJ_NUMANODE:
581  return KMP_HW_NUMA;
582  case HWLOC_OBJ_CORE:
583  return KMP_HW_CORE;
584  case HWLOC_OBJ_PU:
585  return KMP_HW_THREAD;
586  }
587  return KMP_HW_UNKNOWN;
588 }
589 
590 // Returns the number of objects of type 'type' below 'obj' within the topology
591 // tree structure. e.g., if obj is a HWLOC_OBJ_PACKAGE object, and type is
592 // HWLOC_OBJ_PU, then this will return the number of PU's under the SOCKET
593 // object.
594 static int __kmp_hwloc_get_nobjs_under_obj(hwloc_obj_t obj,
595  hwloc_obj_type_t type) {
596  int retval = 0;
597  hwloc_obj_t first;
598  for (first = hwloc_get_obj_below_by_type(__kmp_hwloc_topology, obj->type,
599  obj->logical_index, type, 0);
600  first != NULL &&
601  hwloc_get_ancestor_obj_by_type(__kmp_hwloc_topology, obj->type, first) ==
602  obj;
603  first = hwloc_get_next_obj_by_type(__kmp_hwloc_topology, first->type,
604  first)) {
605  ++retval;
606  }
607  return retval;
608 }
609 
610 static int __kmp_hwloc_count_children_by_depth(hwloc_topology_t t,
611  hwloc_obj_t o,
612  kmp_hwloc_depth_t depth,
613  hwloc_obj_t *f) {
614  if (o->depth == depth) {
615  if (*f == NULL)
616  *f = o; // output first descendant found
617  return 1;
618  }
619  int sum = 0;
620  for (unsigned i = 0; i < o->arity; i++)
621  sum += __kmp_hwloc_count_children_by_depth(t, o->children[i], depth, f);
622  return sum; // will be 0 if no one found (as PU arity is 0)
623 }
624 
625 static int __kmp_hwloc_count_children_by_type(hwloc_topology_t t, hwloc_obj_t o,
626  hwloc_obj_type_t type,
627  hwloc_obj_t *f) {
628  if (!hwloc_compare_types(o->type, type)) {
629  if (*f == NULL)
630  *f = o; // output first descendant found
631  return 1;
632  }
633  int sum = 0;
634  for (unsigned i = 0; i < o->arity; i++)
635  sum += __kmp_hwloc_count_children_by_type(t, o->children[i], type, f);
636  return sum; // will be 0 if no one found (as PU arity is 0)
637 }
638 
639 // This gets the sub_id for a lower object under a higher object in the
640 // topology tree
641 static int __kmp_hwloc_get_sub_id(hwloc_topology_t t, hwloc_obj_t higher,
642  hwloc_obj_t lower) {
643  hwloc_obj_t obj;
644  hwloc_obj_type_t ltype = lower->type;
645  int lindex = lower->logical_index - 1;
646  int sub_id = 0;
647  // Get the previous lower object
648  obj = hwloc_get_obj_by_type(t, ltype, lindex);
649  while (obj && lindex >= 0 &&
650  hwloc_bitmap_isincluded(obj->cpuset, higher->cpuset)) {
651  if (obj->userdata) {
652  sub_id = (int)(RCAST(kmp_intptr_t, obj->userdata));
653  break;
654  }
655  sub_id++;
656  lindex--;
657  obj = hwloc_get_obj_by_type(t, ltype, lindex);
658  }
659  // store sub_id + 1 so that 0 is differed from NULL
660  lower->userdata = RCAST(void *, sub_id + 1);
661  return sub_id;
662 }
663 
664 static int __kmp_affinity_create_hwloc_map(AddrUnsPair **address2os,
665  kmp_i18n_id_t *const msg_id) {
666  kmp_hw_t type;
667  int hw_thread_index, sub_id, nActiveThreads;
668  int depth;
669  hwloc_obj_t pu, obj, root, prev;
670  int ratio[KMP_HW_LAST];
671  int count[KMP_HW_LAST];
672  kmp_hw_t types[KMP_HW_LAST];
673 
674  hwloc_topology_t tp = __kmp_hwloc_topology;
675  *msg_id = kmp_i18n_null;
676 
677  // Save the affinity mask for the current thread.
678  kmp_affin_mask_t *oldMask;
679  KMP_CPU_ALLOC(oldMask);
680  __kmp_get_system_affinity(oldMask, TRUE);
681 
682  if (!KMP_AFFINITY_CAPABLE()) {
683  // Hack to try and infer the machine topology using only the data
684  // available from cpuid on the current thread, and __kmp_xproc.
685  KMP_ASSERT(__kmp_affinity_type == affinity_none);
686  // hwloc only guarantees existance of PU object, so check PACKAGE and CORE
687  hwloc_obj_t o = hwloc_get_obj_by_type(tp, HWLOC_OBJ_PACKAGE, 0);
688  if (o != NULL)
689  nCoresPerPkg = __kmp_hwloc_get_nobjs_under_obj(o, HWLOC_OBJ_CORE);
690  else
691  nCoresPerPkg = 1; // no PACKAGE found
692  o = hwloc_get_obj_by_type(tp, HWLOC_OBJ_CORE, 0);
693  if (o != NULL)
694  __kmp_nThreadsPerCore = __kmp_hwloc_get_nobjs_under_obj(o, HWLOC_OBJ_PU);
695  else
696  __kmp_nThreadsPerCore = 1; // no CORE found
697  __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore;
698  if (nCoresPerPkg == 0)
699  nCoresPerPkg = 1; // to prevent possible division by 0
700  nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
701  if (__kmp_affinity_verbose) {
702  KMP_INFORM(AffNotUsingHwloc, "KMP_AFFINITY");
703  KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
704  if (__kmp_affinity_uniform_topology()) {
705  KMP_INFORM(Uniform, "KMP_AFFINITY");
706  } else {
707  KMP_INFORM(NonUniform, "KMP_AFFINITY");
708  }
709  KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
710  __kmp_nThreadsPerCore, __kmp_ncores);
711  }
712  KMP_CPU_FREE(oldMask);
713  return 0;
714  }
715 
716  root = hwloc_get_root_obj(tp);
717 
718  // Figure out the depth and types in the topology
719  depth = 0;
720  pu = hwloc_get_pu_obj_by_os_index(tp, __kmp_affin_fullMask->begin());
721  obj = pu;
722  types[depth] = KMP_HW_THREAD;
723  depth++;
724  while (obj != root && obj != NULL) {
725  obj = obj->parent;
726 #if HWLOC_API_VERSION >= 0x00020000
727  if (obj->memory_arity) {
728  hwloc_obj_t memory;
729  for (memory = obj->memory_first_child; memory;
730  memory = hwloc_get_next_child(tp, obj, memory)) {
731  if (memory->type == HWLOC_OBJ_NUMANODE)
732  break;
733  }
734  if (memory && memory->type == HWLOC_OBJ_NUMANODE) {
735  types[depth] = KMP_HW_NUMA;
736  depth++;
737  }
738  }
739 #endif
740  type = __kmp_hwloc_type_2_topology_type(obj);
741  if (type != KMP_HW_UNKNOWN) {
742  types[depth] = type;
743  depth++;
744  }
745  }
746  KMP_ASSERT(depth > 0 && depth <= KMP_HW_LAST);
747 
748  // Get the order for the types correct
749  for (int i = 0, j = depth - 1; i < j; ++i, --j) {
750  kmp_hw_t temp = types[i];
751  types[i] = types[j];
752  types[j] = temp;
753  }
754 
755  // Allocate the data structure to be returned.
756  AddrUnsPair *retval =
757  (AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair) * __kmp_avail_proc);
758  KMP_DEBUG_ASSERT(__kmp_pu_os_idx == NULL);
759  __kmp_pu_os_idx = (int *)__kmp_allocate(sizeof(int) * __kmp_avail_proc);
760 
761  hw_thread_index = 0;
762  pu = NULL;
763  nActiveThreads = 0;
764  while (pu = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, pu)) {
765  int index = depth - 1;
766  bool included = KMP_CPU_ISSET(pu->os_index, __kmp_affin_fullMask);
767  Address hw_thread(depth);
768  if (included) {
769  hw_thread.labels[index] = pu->logical_index;
770  __kmp_pu_os_idx[hw_thread_index] = pu->os_index;
771  index--;
772  nActiveThreads++;
773  }
774  obj = pu;
775  prev = obj;
776  while (obj != root && obj != NULL) {
777  obj = obj->parent;
778 #if HWLOC_API_VERSION >= 0x00020000
779  // NUMA Nodes are handled differently since they are not within the
780  // parent/child structure anymore. They are separate children
781  // of obj (memory_first_child points to first memory child)
782  if (obj->memory_arity) {
783  hwloc_obj_t memory;
784  for (memory = obj->memory_first_child; memory;
785  memory = hwloc_get_next_child(tp, obj, memory)) {
786  if (memory->type == HWLOC_OBJ_NUMANODE)
787  break;
788  }
789  if (memory && memory->type == HWLOC_OBJ_NUMANODE) {
790  sub_id = __kmp_hwloc_get_sub_id(tp, memory, prev);
791  if (included) {
792  hw_thread.labels[index] = memory->logical_index;
793  hw_thread.labels[index + 1] = sub_id;
794  index--;
795  }
796  prev = memory;
797  }
798  }
799 #endif
800  type = __kmp_hwloc_type_2_topology_type(obj);
801  if (type != KMP_HW_UNKNOWN) {
802  sub_id = __kmp_hwloc_get_sub_id(tp, obj, prev);
803  if (included) {
804  hw_thread.labels[index] = obj->logical_index;
805  hw_thread.labels[index + 1] = sub_id;
806  index--;
807  }
808  prev = obj;
809  }
810  }
811  if (included) {
812  retval[hw_thread_index] = AddrUnsPair(hw_thread, pu->os_index);
813  hw_thread_index++;
814  }
815  }
816 
817  // If there's only one thread context to bind to, return now.
818  KMP_DEBUG_ASSERT(nActiveThreads == __kmp_avail_proc);
819  KMP_ASSERT(nActiveThreads > 0);
820  if (nActiveThreads == 1) {
821  __kmp_ncores = nPackages = 1;
822  __kmp_nThreadsPerCore = nCoresPerPkg = 1;
823  if (__kmp_affinity_verbose) {
824  KMP_INFORM(AffUsingHwloc, "KMP_AFFINITY");
825  KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
826  KMP_INFORM(Uniform, "KMP_AFFINITY");
827  KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
828  __kmp_nThreadsPerCore, __kmp_ncores);
829  }
830 
831  if (__kmp_affinity_type == affinity_none) {
832  __kmp_free(retval);
833  KMP_CPU_FREE(oldMask);
834  return 0;
835  }
836 
837  // Form an Address object which only includes the package level.
838  Address addr(1);
839  addr.labels[0] = retval[0].first.labels[0];
840  retval[0].first = addr;
841 
842  if (__kmp_affinity_gran_levels < 0) {
843  __kmp_affinity_gran_levels = 0;
844  }
845 
846  if (__kmp_affinity_verbose) {
847  __kmp_affinity_print_topology(retval, 1, 1, 0, -1, -1);
848  }
849 
850  *address2os = retval;
851  KMP_CPU_FREE(oldMask);
852  return 1;
853  }
854 
855  // Sort the table by physical Id.
856  qsort(retval, nActiveThreads, sizeof(*retval),
857  __kmp_affinity_cmp_Address_labels);
858 
859  // Find any levels with radiix 1, and remove them from the map
860  // (except for the package level).
861  depth = __kmp_affinity_remove_radix_one_levels(retval, nActiveThreads, depth,
862  types);
863 
864  __kmp_affinity_gather_enumeration_information(retval, nActiveThreads, depth,
865  types, ratio, count);
866 
867  for (int level = 0; level < depth; ++level) {
868  if ((types[level] == KMP_HW_L2 || types[level] == KMP_HW_L3))
869  __kmp_tile_depth = level;
870  }
871 
872  // This routine should set __kmp_ncores, as well as
873  // __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
874  int thread_level, core_level, tile_level, numa_level, socket_level;
875  thread_level = core_level = tile_level = numa_level = socket_level = -1;
876  for (int level = 0; level < depth; ++level) {
877  if (types[level] == KMP_HW_THREAD)
878  thread_level = level;
879  else if (types[level] == KMP_HW_CORE)
880  core_level = level;
881  else if (types[level] == KMP_HW_SOCKET)
882  socket_level = level;
883  else if (types[level] == KMP_HW_TILE)
884  tile_level = level;
885  else if (types[level] == KMP_HW_NUMA)
886  numa_level = level;
887  }
888  __kmp_nThreadsPerCore =
889  __kmp_affinity_calculate_ratio(ratio, thread_level, core_level);
890  nCoresPerPkg =
891  __kmp_affinity_calculate_ratio(ratio, core_level, socket_level);
892  if (socket_level >= 0)
893  nPackages = count[socket_level];
894  else
895  nPackages = 1;
896  if (core_level >= 0)
897  __kmp_ncores = count[core_level];
898  else
899  __kmp_ncores = 1;
900 
901  unsigned uniform = __kmp_affinity_discover_uniformity(depth, ratio, count);
902 
903  // Print the machine topology summary.
904  if (__kmp_affinity_verbose) {
905  kmp_hw_t numerator_type, denominator_type;
906  kmp_str_buf_t buf;
907  __kmp_str_buf_init(&buf);
908  KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
909  if (uniform) {
910  KMP_INFORM(Uniform, "KMP_AFFINITY");
911  } else {
912  KMP_INFORM(NonUniform, "KMP_AFFINITY");
913  }
914 
915  __kmp_str_buf_clear(&buf);
916 
917  if (core_level < 0)
918  core_level = depth - 1;
919  int ncores = count[core_level];
920 
921  denominator_type = KMP_HW_UNKNOWN;
922  for (int level = 0; level < depth; ++level) {
923  int c;
924  bool plural;
925  numerator_type = types[level];
926  c = ratio[level];
927  plural = (c > 1);
928  if (level == 0) {
929  __kmp_str_buf_print(&buf, "%d %s", c, __kmp_hw_get_catalog_string(
930  numerator_type, plural));
931  } else {
932  __kmp_str_buf_print(&buf, " x %d %s/%s", c,
933  __kmp_hw_get_catalog_string(numerator_type, plural),
934  __kmp_hw_get_catalog_string(denominator_type));
935  }
936  denominator_type = numerator_type;
937  }
938  KMP_INFORM(TopologyGeneric, "KMP_AFFINITY", buf.str, ncores);
939  __kmp_str_buf_free(&buf);
940  }
941 
942  if (__kmp_affinity_type == affinity_none) {
943  __kmp_free(retval);
944  KMP_CPU_FREE(oldMask);
945  return 0;
946  }
947 
948  // Set the granularity level based on what levels are modeled
949  // in the machine topology map.
950  if (__kmp_affinity_gran == affinity_gran_node)
951  __kmp_affinity_gran = affinity_gran_numa;
952  KMP_DEBUG_ASSERT(__kmp_affinity_gran != affinity_gran_default);
953  if (__kmp_affinity_gran_levels < 0) {
954  __kmp_affinity_gran_levels = 0; // lowest level (e.g. fine)
955  if ((thread_level >= 0) && (__kmp_affinity_gran > affinity_gran_thread))
956  __kmp_affinity_gran_levels++;
957  if ((core_level >= 0) && (__kmp_affinity_gran > affinity_gran_core))
958  __kmp_affinity_gran_levels++;
959  if ((tile_level >= 0) && (__kmp_affinity_gran > affinity_gran_tile))
960  __kmp_affinity_gran_levels++;
961  if ((numa_level >= 0) && (__kmp_affinity_gran > affinity_gran_numa))
962  __kmp_affinity_gran_levels++;
963  if ((socket_level >= 0) && (__kmp_affinity_gran > affinity_gran_package))
964  __kmp_affinity_gran_levels++;
965  }
966 
967  if (__kmp_affinity_verbose)
968  __kmp_affinity_print_topology(retval, nActiveThreads, depth, types);
969 
970  KMP_CPU_FREE(oldMask);
971  *address2os = retval;
972  return depth;
973 }
974 #endif // KMP_USE_HWLOC
975 
976 // If we don't know how to retrieve the machine's processor topology, or
977 // encounter an error in doing so, this routine is called to form a "flat"
978 // mapping of os thread id's <-> processor id's.
979 static int __kmp_affinity_create_flat_map(AddrUnsPair **address2os,
980  kmp_i18n_id_t *const msg_id) {
981  *address2os = NULL;
982  *msg_id = kmp_i18n_null;
983 
984  // Even if __kmp_affinity_type == affinity_none, this routine might still
985  // called to set __kmp_ncores, as well as
986  // __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
987  if (!KMP_AFFINITY_CAPABLE()) {
988  KMP_ASSERT(__kmp_affinity_type == affinity_none);
989  __kmp_ncores = nPackages = __kmp_xproc;
990  __kmp_nThreadsPerCore = nCoresPerPkg = 1;
991  if (__kmp_affinity_verbose) {
992  KMP_INFORM(AffFlatTopology, "KMP_AFFINITY");
993  KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
994  KMP_INFORM(Uniform, "KMP_AFFINITY");
995  KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
996  __kmp_nThreadsPerCore, __kmp_ncores);
997  }
998  return 0;
999  }
1000 
1001  // When affinity is off, this routine will still be called to set
1002  // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
1003  // Make sure all these vars are set correctly, and return now if affinity is
1004  // not enabled.
1005  __kmp_ncores = nPackages = __kmp_avail_proc;
1006  __kmp_nThreadsPerCore = nCoresPerPkg = 1;
1007  if (__kmp_affinity_verbose) {
1008  KMP_INFORM(AffCapableUseFlat, "KMP_AFFINITY");
1009  KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
1010  KMP_INFORM(Uniform, "KMP_AFFINITY");
1011  KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
1012  __kmp_nThreadsPerCore, __kmp_ncores);
1013  }
1014  KMP_DEBUG_ASSERT(__kmp_pu_os_idx == NULL);
1015  __kmp_pu_os_idx = (int *)__kmp_allocate(sizeof(int) * __kmp_avail_proc);
1016  if (__kmp_affinity_type == affinity_none) {
1017  int avail_ct = 0;
1018  int i;
1019  KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
1020  if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask))
1021  continue;
1022  __kmp_pu_os_idx[avail_ct++] = i; // suppose indices are flat
1023  }
1024  return 0;
1025  }
1026 
1027  // Construct the data structure to be returned.
1028  *address2os =
1029  (AddrUnsPair *)__kmp_allocate(sizeof(**address2os) * __kmp_avail_proc);
1030  int avail_ct = 0;
1031  int i;
1032  KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
1033  // Skip this proc if it is not included in the machine model.
1034  if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
1035  continue;
1036  }
1037  __kmp_pu_os_idx[avail_ct] = i; // suppose indices are flat
1038  Address addr(1);
1039  addr.labels[0] = i;
1040  (*address2os)[avail_ct++] = AddrUnsPair(addr, i);
1041  }
1042  if (__kmp_affinity_verbose) {
1043  KMP_INFORM(OSProcToPackage, "KMP_AFFINITY");
1044  }
1045 
1046  if (__kmp_affinity_gran_levels < 0) {
1047  // Only the package level is modeled in the machine topology map,
1048  // so the #levels of granularity is either 0 or 1.
1049  if (__kmp_affinity_gran > affinity_gran_package) {
1050  __kmp_affinity_gran_levels = 1;
1051  } else {
1052  __kmp_affinity_gran_levels = 0;
1053  }
1054  }
1055  return 1;
1056 }
1057 
1058 #if KMP_GROUP_AFFINITY
1059 
1060 // If multiple Windows* OS processor groups exist, we can create a 2-level
1061 // topology map with the groups at level 0 and the individual procs at level 1.
1062 // This facilitates letting the threads float among all procs in a group,
1063 // if granularity=group (the default when there are multiple groups).
1064 static int __kmp_affinity_create_proc_group_map(AddrUnsPair **address2os,
1065  kmp_i18n_id_t *const msg_id) {
1066  *address2os = NULL;
1067  *msg_id = kmp_i18n_null;
1068 
1069  // If we aren't affinity capable, then return now.
1070  // The flat mapping will be used.
1071  if (!KMP_AFFINITY_CAPABLE()) {
1072  // FIXME set *msg_id
1073  return -1;
1074  }
1075 
1076  // Construct the data structure to be returned.
1077  *address2os =
1078  (AddrUnsPair *)__kmp_allocate(sizeof(**address2os) * __kmp_avail_proc);
1079  KMP_DEBUG_ASSERT(__kmp_pu_os_idx == NULL);
1080  __kmp_pu_os_idx = (int *)__kmp_allocate(sizeof(int) * __kmp_avail_proc);
1081  int avail_ct = 0;
1082  int i;
1083  KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
1084  // Skip this proc if it is not included in the machine model.
1085  if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
1086  continue;
1087  }
1088  __kmp_pu_os_idx[avail_ct] = i; // suppose indices are flat
1089  Address addr(2);
1090  addr.labels[0] = i / (CHAR_BIT * sizeof(DWORD_PTR));
1091  addr.labels[1] = i % (CHAR_BIT * sizeof(DWORD_PTR));
1092  (*address2os)[avail_ct++] = AddrUnsPair(addr, i);
1093 
1094  if (__kmp_affinity_verbose) {
1095  KMP_INFORM(AffOSProcToGroup, "KMP_AFFINITY", i, addr.labels[0],
1096  addr.labels[1]);
1097  }
1098  }
1099 
1100  if (__kmp_affinity_gran_levels < 0) {
1101  if (__kmp_affinity_gran == affinity_gran_group) {
1102  __kmp_affinity_gran_levels = 1;
1103  } else if ((__kmp_affinity_gran == affinity_gran_fine) ||
1104  (__kmp_affinity_gran == affinity_gran_thread)) {
1105  __kmp_affinity_gran_levels = 0;
1106  } else {
1107  const char *gran_str = NULL;
1108  if (__kmp_affinity_gran == affinity_gran_core) {
1109  gran_str = "core";
1110  } else if (__kmp_affinity_gran == affinity_gran_package) {
1111  gran_str = "package";
1112  } else if (__kmp_affinity_gran == affinity_gran_node) {
1113  gran_str = "node";
1114  } else {
1115  KMP_ASSERT(0);
1116  }
1117 
1118  // Warning: can't use affinity granularity \"gran\" with group topology
1119  // method, using "thread"
1120  __kmp_affinity_gran_levels = 0;
1121  }
1122  }
1123  return 2;
1124 }
1125 
1126 #endif /* KMP_GROUP_AFFINITY */
1127 
1128 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1129 
1130 /*
1131  * CPUID.B or 1F, Input ECX (sub leaf # aka level number)
1132  Bits Bits Bits Bits
1133  31-16 15-8 7-4 4-0
1134 ---+-----------+--------------+-------------+-----------------+
1135 EAX| reserved | reserved | reserved | Bits to Shift |
1136 ---+-----------|--------------+-------------+-----------------|
1137 EBX| reserved | Num logical processors at level (16 bits) |
1138 ---+-----------|--------------+-------------------------------|
1139 ECX| reserved | Level Type | Level Number (8 bits) |
1140 ---+-----------+--------------+-------------------------------|
1141 EDX| X2APIC ID (32 bits) |
1142 ---+----------------------------------------------------------+
1143 */
1144 
1145 enum {
1146  INTEL_LEVEL_TYPE_INVALID = 0, // Package level
1147  INTEL_LEVEL_TYPE_SMT = 1,
1148  INTEL_LEVEL_TYPE_CORE = 2,
1149  INTEL_LEVEL_TYPE_TILE = 3,
1150  INTEL_LEVEL_TYPE_MODULE = 4,
1151  INTEL_LEVEL_TYPE_DIE = 5,
1152  INTEL_LEVEL_TYPE_LAST = 6,
1153 };
1154 
1155 struct cpuid_level_info_t {
1156  unsigned level_type, mask, mask_width, nitems, cache_mask;
1157 };
1158 
1159 template <kmp_uint32 LSB, kmp_uint32 MSB>
1160 static inline unsigned __kmp_extract_bits(kmp_uint32 v) {
1161  const kmp_uint32 SHIFT_LEFT = sizeof(kmp_uint32) * 8 - 1 - MSB;
1162  const kmp_uint32 SHIFT_RIGHT = LSB;
1163  kmp_uint32 retval = v;
1164  retval <<= SHIFT_LEFT;
1165  retval >>= (SHIFT_LEFT + SHIFT_RIGHT);
1166  return retval;
1167 }
1168 
1169 static kmp_hw_t __kmp_intel_type_2_topology_type(int intel_type) {
1170  switch (intel_type) {
1171  case INTEL_LEVEL_TYPE_INVALID:
1172  return KMP_HW_SOCKET;
1173  case INTEL_LEVEL_TYPE_SMT:
1174  return KMP_HW_THREAD;
1175  case INTEL_LEVEL_TYPE_CORE:
1176  return KMP_HW_CORE;
1177  // TODO: add support for the tile and module
1178  case INTEL_LEVEL_TYPE_TILE:
1179  return KMP_HW_UNKNOWN;
1180  case INTEL_LEVEL_TYPE_MODULE:
1181  return KMP_HW_UNKNOWN;
1182  case INTEL_LEVEL_TYPE_DIE:
1183  return KMP_HW_DIE;
1184  }
1185  return KMP_HW_UNKNOWN;
1186 }
1187 
1188 // This function takes the topology leaf, a levels array to store the levels
1189 // detected and a bitmap of the known levels.
1190 // Returns the number of levels in the topology
1191 static unsigned
1192 __kmp_x2apicid_get_levels(int leaf,
1193  cpuid_level_info_t levels[INTEL_LEVEL_TYPE_LAST],
1194  kmp_uint64 known_levels) {
1195  unsigned level, levels_index;
1196  unsigned level_type, mask_width, nitems;
1197  kmp_cpuid buf;
1198 
1199  // The new algorithm has known topology layers act as highest unknown topology
1200  // layers when unknown topology layers exist.
1201  // e.g., Suppose layers were SMT CORE <Y> <Z> PACKAGE
1202  // Then CORE will take the characteristics (nitems and mask width) of <Z>.
1203  // In developing the id mask for each layer, this eliminates unknown portions
1204  // of the topology while still keeping the correct underlying structure.
1205  level = levels_index = 0;
1206  do {
1207  __kmp_x86_cpuid(leaf, level, &buf);
1208  level_type = __kmp_extract_bits<8, 15>(buf.ecx);
1209  mask_width = __kmp_extract_bits<0, 4>(buf.eax);
1210  nitems = __kmp_extract_bits<0, 15>(buf.ebx);
1211  if (level_type != INTEL_LEVEL_TYPE_INVALID && nitems == 0)
1212  return 0;
1213 
1214  if (known_levels & (1ull << level_type)) {
1215  // Add a new level to the topology
1216  KMP_ASSERT(levels_index < INTEL_LEVEL_TYPE_LAST);
1217  levels[levels_index].level_type = level_type;
1218  levels[levels_index].mask_width = mask_width;
1219  levels[levels_index].nitems = nitems;
1220  levels_index++;
1221  } else {
1222  // If it is an unknown level, then logically move the previous layer up
1223  if (levels_index > 0) {
1224  levels[levels_index - 1].mask_width = mask_width;
1225  levels[levels_index - 1].nitems = nitems;
1226  }
1227  }
1228  level++;
1229  } while (level_type != INTEL_LEVEL_TYPE_INVALID);
1230 
1231  // Set the masks to & with apicid
1232  for (unsigned i = 0; i < levels_index; ++i) {
1233  if (levels[i].level_type != INTEL_LEVEL_TYPE_INVALID) {
1234  levels[i].mask = ~((-1) << levels[i].mask_width);
1235  levels[i].cache_mask = (-1) << levels[i].mask_width;
1236  for (unsigned j = 0; j < i; ++j)
1237  levels[i].mask ^= levels[j].mask;
1238  } else {
1239  KMP_DEBUG_ASSERT(levels_index > 0);
1240  levels[i].mask = (-1) << levels[i - 1].mask_width;
1241  levels[i].cache_mask = 0;
1242  }
1243  }
1244  return levels_index;
1245 }
1246 
1247 static int __kmp_cpuid_mask_width(int count) {
1248  int r = 0;
1249 
1250  while ((1 << r) < count)
1251  ++r;
1252  return r;
1253 }
1254 
1255 class apicThreadInfo {
1256 public:
1257  unsigned osId; // param to __kmp_affinity_bind_thread
1258  unsigned apicId; // from cpuid after binding
1259  unsigned maxCoresPerPkg; // ""
1260  unsigned maxThreadsPerPkg; // ""
1261  unsigned pkgId; // inferred from above values
1262  unsigned coreId; // ""
1263  unsigned threadId; // ""
1264 };
1265 
1266 static int __kmp_affinity_cmp_apicThreadInfo_phys_id(const void *a,
1267  const void *b) {
1268  const apicThreadInfo *aa = (const apicThreadInfo *)a;
1269  const apicThreadInfo *bb = (const apicThreadInfo *)b;
1270  if (aa->pkgId < bb->pkgId)
1271  return -1;
1272  if (aa->pkgId > bb->pkgId)
1273  return 1;
1274  if (aa->coreId < bb->coreId)
1275  return -1;
1276  if (aa->coreId > bb->coreId)
1277  return 1;
1278  if (aa->threadId < bb->threadId)
1279  return -1;
1280  if (aa->threadId > bb->threadId)
1281  return 1;
1282  return 0;
1283 }
1284 
1285 // On IA-32 architecture and Intel(R) 64 architecture, we attempt to use
1286 // an algorithm which cycles through the available os threads, setting
1287 // the current thread's affinity mask to that thread, and then retrieves
1288 // the Apic Id for each thread context using the cpuid instruction.
1289 static int __kmp_affinity_create_apicid_map(AddrUnsPair **address2os,
1290  kmp_i18n_id_t *const msg_id) {
1291  kmp_cpuid buf;
1292  *address2os = NULL;
1293  *msg_id = kmp_i18n_null;
1294 
1295  // Check if cpuid leaf 4 is supported.
1296  __kmp_x86_cpuid(0, 0, &buf);
1297  if (buf.eax < 4) {
1298  *msg_id = kmp_i18n_str_NoLeaf4Support;
1299  return -1;
1300  }
1301 
1302  // The algorithm used starts by setting the affinity to each available thread
1303  // and retrieving info from the cpuid instruction, so if we are not capable of
1304  // calling __kmp_get_system_affinity() and _kmp_get_system_affinity(), then we
1305  // need to do something else - use the defaults that we calculated from
1306  // issuing cpuid without binding to each proc.
1307  if (!KMP_AFFINITY_CAPABLE()) {
1308  // Hack to try and infer the machine topology using only the data
1309  // available from cpuid on the current thread, and __kmp_xproc.
1310  KMP_ASSERT(__kmp_affinity_type == affinity_none);
1311 
1312  // Get an upper bound on the number of threads per package using cpuid(1).
1313  // On some OS/chps combinations where HT is supported by the chip but is
1314  // disabled, this value will be 2 on a single core chip. Usually, it will be
1315  // 2 if HT is enabled and 1 if HT is disabled.
1316  __kmp_x86_cpuid(1, 0, &buf);
1317  int maxThreadsPerPkg = (buf.ebx >> 16) & 0xff;
1318  if (maxThreadsPerPkg == 0) {
1319  maxThreadsPerPkg = 1;
1320  }
1321 
1322  // The num cores per pkg comes from cpuid(4). 1 must be added to the encoded
1323  // value.
1324  //
1325  // The author of cpu_count.cpp treated this only an upper bound on the
1326  // number of cores, but I haven't seen any cases where it was greater than
1327  // the actual number of cores, so we will treat it as exact in this block of
1328  // code.
1329  //
1330  // First, we need to check if cpuid(4) is supported on this chip. To see if
1331  // cpuid(n) is supported, issue cpuid(0) and check if eax has the value n or
1332  // greater.
1333  __kmp_x86_cpuid(0, 0, &buf);
1334  if (buf.eax >= 4) {
1335  __kmp_x86_cpuid(4, 0, &buf);
1336  nCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1;
1337  } else {
1338  nCoresPerPkg = 1;
1339  }
1340 
1341  // There is no way to reliably tell if HT is enabled without issuing the
1342  // cpuid instruction from every thread, can correlating the cpuid info, so
1343  // if the machine is not affinity capable, we assume that HT is off. We have
1344  // seen quite a few machines where maxThreadsPerPkg is 2, yet the machine
1345  // does not support HT.
1346  //
1347  // - Older OSes are usually found on machines with older chips, which do not
1348  // support HT.
1349  // - The performance penalty for mistakenly identifying a machine as HT when
1350  // it isn't (which results in blocktime being incorrectly set to 0) is
1351  // greater than the penalty when for mistakenly identifying a machine as
1352  // being 1 thread/core when it is really HT enabled (which results in
1353  // blocktime being incorrectly set to a positive value).
1354  __kmp_ncores = __kmp_xproc;
1355  nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
1356  __kmp_nThreadsPerCore = 1;
1357  if (__kmp_affinity_verbose) {
1358  KMP_INFORM(AffNotCapableUseLocCpuid, "KMP_AFFINITY");
1359  KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
1360  if (__kmp_affinity_uniform_topology()) {
1361  KMP_INFORM(Uniform, "KMP_AFFINITY");
1362  } else {
1363  KMP_INFORM(NonUniform, "KMP_AFFINITY");
1364  }
1365  KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
1366  __kmp_nThreadsPerCore, __kmp_ncores);
1367  }
1368  return 0;
1369  }
1370 
1371  // From here on, we can assume that it is safe to call
1372  // __kmp_get_system_affinity() and __kmp_set_system_affinity(), even if
1373  // __kmp_affinity_type = affinity_none.
1374 
1375  // Save the affinity mask for the current thread.
1376  kmp_affin_mask_t *oldMask;
1377  KMP_CPU_ALLOC(oldMask);
1378  KMP_ASSERT(oldMask != NULL);
1379  __kmp_get_system_affinity(oldMask, TRUE);
1380 
1381  // Run through each of the available contexts, binding the current thread
1382  // to it, and obtaining the pertinent information using the cpuid instr.
1383  //
1384  // The relevant information is:
1385  // - Apic Id: Bits 24:31 of ebx after issuing cpuid(1) - each thread context
1386  // has a uniqie Apic Id, which is of the form pkg# : core# : thread#.
1387  // - Max Threads Per Pkg: Bits 16:23 of ebx after issuing cpuid(1). The value
1388  // of this field determines the width of the core# + thread# fields in the
1389  // Apic Id. It is also an upper bound on the number of threads per
1390  // package, but it has been verified that situations happen were it is not
1391  // exact. In particular, on certain OS/chip combinations where Intel(R)
1392  // Hyper-Threading Technology is supported by the chip but has been
1393  // disabled, the value of this field will be 2 (for a single core chip).
1394  // On other OS/chip combinations supporting Intel(R) Hyper-Threading
1395  // Technology, the value of this field will be 1 when Intel(R)
1396  // Hyper-Threading Technology is disabled and 2 when it is enabled.
1397  // - Max Cores Per Pkg: Bits 26:31 of eax after issuing cpuid(4). The value
1398  // of this field (+1) determines the width of the core# field in the Apic
1399  // Id. The comments in "cpucount.cpp" say that this value is an upper
1400  // bound, but the IA-32 architecture manual says that it is exactly the
1401  // number of cores per package, and I haven't seen any case where it
1402  // wasn't.
1403  //
1404  // From this information, deduce the package Id, core Id, and thread Id,
1405  // and set the corresponding fields in the apicThreadInfo struct.
1406  unsigned i;
1407  apicThreadInfo *threadInfo = (apicThreadInfo *)__kmp_allocate(
1408  __kmp_avail_proc * sizeof(apicThreadInfo));
1409  unsigned nApics = 0;
1410  KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
1411  // Skip this proc if it is not included in the machine model.
1412  if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
1413  continue;
1414  }
1415  KMP_DEBUG_ASSERT((int)nApics < __kmp_avail_proc);
1416 
1417  __kmp_affinity_dispatch->bind_thread(i);
1418  threadInfo[nApics].osId = i;
1419 
1420  // The apic id and max threads per pkg come from cpuid(1).
1421  __kmp_x86_cpuid(1, 0, &buf);
1422  if (((buf.edx >> 9) & 1) == 0) {
1423  __kmp_set_system_affinity(oldMask, TRUE);
1424  __kmp_free(threadInfo);
1425  KMP_CPU_FREE(oldMask);
1426  *msg_id = kmp_i18n_str_ApicNotPresent;
1427  return -1;
1428  }
1429  threadInfo[nApics].apicId = (buf.ebx >> 24) & 0xff;
1430  threadInfo[nApics].maxThreadsPerPkg = (buf.ebx >> 16) & 0xff;
1431  if (threadInfo[nApics].maxThreadsPerPkg == 0) {
1432  threadInfo[nApics].maxThreadsPerPkg = 1;
1433  }
1434 
1435  // Max cores per pkg comes from cpuid(4). 1 must be added to the encoded
1436  // value.
1437  //
1438  // First, we need to check if cpuid(4) is supported on this chip. To see if
1439  // cpuid(n) is supported, issue cpuid(0) and check if eax has the value n
1440  // or greater.
1441  __kmp_x86_cpuid(0, 0, &buf);
1442  if (buf.eax >= 4) {
1443  __kmp_x86_cpuid(4, 0, &buf);
1444  threadInfo[nApics].maxCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1;
1445  } else {
1446  threadInfo[nApics].maxCoresPerPkg = 1;
1447  }
1448 
1449  // Infer the pkgId / coreId / threadId using only the info obtained locally.
1450  int widthCT = __kmp_cpuid_mask_width(threadInfo[nApics].maxThreadsPerPkg);
1451  threadInfo[nApics].pkgId = threadInfo[nApics].apicId >> widthCT;
1452 
1453  int widthC = __kmp_cpuid_mask_width(threadInfo[nApics].maxCoresPerPkg);
1454  int widthT = widthCT - widthC;
1455  if (widthT < 0) {
1456  // I've never seen this one happen, but I suppose it could, if the cpuid
1457  // instruction on a chip was really screwed up. Make sure to restore the
1458  // affinity mask before the tail call.
1459  __kmp_set_system_affinity(oldMask, TRUE);
1460  __kmp_free(threadInfo);
1461  KMP_CPU_FREE(oldMask);
1462  *msg_id = kmp_i18n_str_InvalidCpuidInfo;
1463  return -1;
1464  }
1465 
1466  int maskC = (1 << widthC) - 1;
1467  threadInfo[nApics].coreId = (threadInfo[nApics].apicId >> widthT) & maskC;
1468 
1469  int maskT = (1 << widthT) - 1;
1470  threadInfo[nApics].threadId = threadInfo[nApics].apicId & maskT;
1471 
1472  nApics++;
1473  }
1474 
1475  // We've collected all the info we need.
1476  // Restore the old affinity mask for this thread.
1477  __kmp_set_system_affinity(oldMask, TRUE);
1478 
1479  // If there's only one thread context to bind to, form an Address object
1480  // with depth 1 and return immediately (or, if affinity is off, set
1481  // address2os to NULL and return).
1482  //
1483  // If it is configured to omit the package level when there is only a single
1484  // package, the logic at the end of this routine won't work if there is only
1485  // a single thread - it would try to form an Address object with depth 0.
1486  KMP_ASSERT(nApics > 0);
1487  if (nApics == 1) {
1488  __kmp_ncores = nPackages = 1;
1489  __kmp_nThreadsPerCore = nCoresPerPkg = 1;
1490  if (__kmp_affinity_verbose) {
1491  KMP_INFORM(AffUseGlobCpuid, "KMP_AFFINITY");
1492  KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
1493  KMP_INFORM(Uniform, "KMP_AFFINITY");
1494  KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
1495  __kmp_nThreadsPerCore, __kmp_ncores);
1496  }
1497 
1498  if (__kmp_affinity_type == affinity_none) {
1499  __kmp_free(threadInfo);
1500  KMP_CPU_FREE(oldMask);
1501  return 0;
1502  }
1503 
1504  *address2os = (AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair));
1505  Address addr(1);
1506  addr.labels[0] = threadInfo[0].pkgId;
1507  (*address2os)[0] = AddrUnsPair(addr, threadInfo[0].osId);
1508 
1509  if (__kmp_affinity_gran_levels < 0) {
1510  __kmp_affinity_gran_levels = 0;
1511  }
1512 
1513  if (__kmp_affinity_verbose) {
1514  __kmp_affinity_print_topology(*address2os, 1, 1, 0, -1, -1);
1515  }
1516 
1517  __kmp_free(threadInfo);
1518  KMP_CPU_FREE(oldMask);
1519  return 1;
1520  }
1521 
1522  // Sort the threadInfo table by physical Id.
1523  qsort(threadInfo, nApics, sizeof(*threadInfo),
1524  __kmp_affinity_cmp_apicThreadInfo_phys_id);
1525 
1526  // The table is now sorted by pkgId / coreId / threadId, but we really don't
1527  // know the radix of any of the fields. pkgId's may be sparsely assigned among
1528  // the chips on a system. Although coreId's are usually assigned
1529  // [0 .. coresPerPkg-1] and threadId's are usually assigned
1530  // [0..threadsPerCore-1], we don't want to make any such assumptions.
1531  //
1532  // For that matter, we don't know what coresPerPkg and threadsPerCore (or the
1533  // total # packages) are at this point - we want to determine that now. We
1534  // only have an upper bound on the first two figures.
1535  //
1536  // We also perform a consistency check at this point: the values returned by
1537  // the cpuid instruction for any thread bound to a given package had better
1538  // return the same info for maxThreadsPerPkg and maxCoresPerPkg.
1539  nPackages = 1;
1540  nCoresPerPkg = 1;
1541  __kmp_nThreadsPerCore = 1;
1542  unsigned nCores = 1;
1543 
1544  unsigned pkgCt = 1; // to determine radii
1545  unsigned lastPkgId = threadInfo[0].pkgId;
1546  unsigned coreCt = 1;
1547  unsigned lastCoreId = threadInfo[0].coreId;
1548  unsigned threadCt = 1;
1549  unsigned lastThreadId = threadInfo[0].threadId;
1550 
1551  // intra-pkg consist checks
1552  unsigned prevMaxCoresPerPkg = threadInfo[0].maxCoresPerPkg;
1553  unsigned prevMaxThreadsPerPkg = threadInfo[0].maxThreadsPerPkg;
1554 
1555  for (i = 1; i < nApics; i++) {
1556  if (threadInfo[i].pkgId != lastPkgId) {
1557  nCores++;
1558  pkgCt++;
1559  lastPkgId = threadInfo[i].pkgId;
1560  if ((int)coreCt > nCoresPerPkg)
1561  nCoresPerPkg = coreCt;
1562  coreCt = 1;
1563  lastCoreId = threadInfo[i].coreId;
1564  if ((int)threadCt > __kmp_nThreadsPerCore)
1565  __kmp_nThreadsPerCore = threadCt;
1566  threadCt = 1;
1567  lastThreadId = threadInfo[i].threadId;
1568 
1569  // This is a different package, so go on to the next iteration without
1570  // doing any consistency checks. Reset the consistency check vars, though.
1571  prevMaxCoresPerPkg = threadInfo[i].maxCoresPerPkg;
1572  prevMaxThreadsPerPkg = threadInfo[i].maxThreadsPerPkg;
1573  continue;
1574  }
1575 
1576  if (threadInfo[i].coreId != lastCoreId) {
1577  nCores++;
1578  coreCt++;
1579  lastCoreId = threadInfo[i].coreId;
1580  if ((int)threadCt > __kmp_nThreadsPerCore)
1581  __kmp_nThreadsPerCore = threadCt;
1582  threadCt = 1;
1583  lastThreadId = threadInfo[i].threadId;
1584  } else if (threadInfo[i].threadId != lastThreadId) {
1585  threadCt++;
1586  lastThreadId = threadInfo[i].threadId;
1587  } else {
1588  __kmp_free(threadInfo);
1589  KMP_CPU_FREE(oldMask);
1590  *msg_id = kmp_i18n_str_LegacyApicIDsNotUnique;
1591  return -1;
1592  }
1593 
1594  // Check to make certain that the maxCoresPerPkg and maxThreadsPerPkg
1595  // fields agree between all the threads bounds to a given package.
1596  if ((prevMaxCoresPerPkg != threadInfo[i].maxCoresPerPkg) ||
1597  (prevMaxThreadsPerPkg != threadInfo[i].maxThreadsPerPkg)) {
1598  __kmp_free(threadInfo);
1599  KMP_CPU_FREE(oldMask);
1600  *msg_id = kmp_i18n_str_InconsistentCpuidInfo;
1601  return -1;
1602  }
1603  }
1604  nPackages = pkgCt;
1605  if ((int)coreCt > nCoresPerPkg)
1606  nCoresPerPkg = coreCt;
1607  if ((int)threadCt > __kmp_nThreadsPerCore)
1608  __kmp_nThreadsPerCore = threadCt;
1609 
1610  // When affinity is off, this routine will still be called to set
1611  // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
1612  // Make sure all these vars are set correctly, and return now if affinity is
1613  // not enabled.
1614  __kmp_ncores = nCores;
1615  if (__kmp_affinity_verbose) {
1616  KMP_INFORM(AffUseGlobCpuid, "KMP_AFFINITY");
1617  KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
1618  if (__kmp_affinity_uniform_topology()) {
1619  KMP_INFORM(Uniform, "KMP_AFFINITY");
1620  } else {
1621  KMP_INFORM(NonUniform, "KMP_AFFINITY");
1622  }
1623  KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
1624  __kmp_nThreadsPerCore, __kmp_ncores);
1625  }
1626  KMP_DEBUG_ASSERT(__kmp_pu_os_idx == NULL);
1627  KMP_DEBUG_ASSERT(nApics == (unsigned)__kmp_avail_proc);
1628  __kmp_pu_os_idx = (int *)__kmp_allocate(sizeof(int) * __kmp_avail_proc);
1629  for (i = 0; i < nApics; ++i) {
1630  __kmp_pu_os_idx[i] = threadInfo[i].osId;
1631  }
1632  if (__kmp_affinity_type == affinity_none) {
1633  __kmp_free(threadInfo);
1634  KMP_CPU_FREE(oldMask);
1635  return 0;
1636  }
1637 
1638  // Now that we've determined the number of packages, the number of cores per
1639  // package, and the number of threads per core, we can construct the data
1640  // structure that is to be returned.
1641  int pkgLevel = 0;
1642  int coreLevel = (nCoresPerPkg <= 1) ? -1 : 1;
1643  int threadLevel =
1644  (__kmp_nThreadsPerCore <= 1) ? -1 : ((coreLevel >= 0) ? 2 : 1);
1645  unsigned depth = (pkgLevel >= 0) + (coreLevel >= 0) + (threadLevel >= 0);
1646 
1647  KMP_ASSERT(depth > 0);
1648  *address2os = (AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair) * nApics);
1649 
1650  for (i = 0; i < nApics; ++i) {
1651  Address addr(depth);
1652  unsigned os = threadInfo[i].osId;
1653  int d = 0;
1654 
1655  if (pkgLevel >= 0) {
1656  addr.labels[d++] = threadInfo[i].pkgId;
1657  }
1658  if (coreLevel >= 0) {
1659  addr.labels[d++] = threadInfo[i].coreId;
1660  }
1661  if (threadLevel >= 0) {
1662  addr.labels[d++] = threadInfo[i].threadId;
1663  }
1664  (*address2os)[i] = AddrUnsPair(addr, os);
1665  }
1666 
1667  if (__kmp_affinity_gran_levels < 0) {
1668  // Set the granularity level based on what levels are modeled in the machine
1669  // topology map.
1670  __kmp_affinity_gran_levels = 0;
1671  if ((threadLevel >= 0) && (__kmp_affinity_gran > affinity_gran_thread)) {
1672  __kmp_affinity_gran_levels++;
1673  }
1674  if ((coreLevel >= 0) && (__kmp_affinity_gran > affinity_gran_core)) {
1675  __kmp_affinity_gran_levels++;
1676  }
1677  if ((pkgLevel >= 0) && (__kmp_affinity_gran > affinity_gran_package)) {
1678  __kmp_affinity_gran_levels++;
1679  }
1680  }
1681 
1682  if (__kmp_affinity_verbose) {
1683  __kmp_affinity_print_topology(*address2os, nApics, depth, pkgLevel,
1684  coreLevel, threadLevel);
1685  }
1686 
1687  __kmp_free(threadInfo);
1688  KMP_CPU_FREE(oldMask);
1689  return depth;
1690 }
1691 
1692 // Intel(R) microarchitecture code name Nehalem, Dunnington and later
1693 // architectures support a newer interface for specifying the x2APIC Ids,
1694 // based on CPUID.B or CPUID.1F
1695 static int __kmp_affinity_create_x2apicid_map(AddrUnsPair **address2os,
1696  kmp_i18n_id_t *const msg_id) {
1697 
1698  cpuid_level_info_t levels[INTEL_LEVEL_TYPE_LAST];
1699  int ratio[KMP_HW_LAST];
1700  int count[KMP_HW_LAST];
1701  kmp_hw_t types[INTEL_LEVEL_TYPE_LAST];
1702  unsigned levels_index;
1703  kmp_cpuid buf;
1704  kmp_uint64 known_levels;
1705  int topology_leaf, highest_leaf, apic_id;
1706  int num_leaves;
1707  static int leaves[] = {0, 0};
1708 
1709  kmp_i18n_id_t leaf_message_id;
1710 
1711  KMP_BUILD_ASSERT(sizeof(known_levels) * CHAR_BIT > KMP_HW_LAST);
1712 
1713  *msg_id = kmp_i18n_null;
1714 
1715  // Figure out the known topology levels
1716  known_levels = 0ull;
1717  for (int i = 0; i < INTEL_LEVEL_TYPE_LAST; ++i) {
1718  if (__kmp_intel_type_2_topology_type(i) != KMP_HW_UNKNOWN) {
1719  known_levels |= (1ull << i);
1720  }
1721  }
1722 
1723  // Get the highest cpuid leaf supported
1724  __kmp_x86_cpuid(0, 0, &buf);
1725  highest_leaf = buf.eax;
1726 
1727  // If a specific topology method was requested, only allow that specific leaf
1728  // otherwise, try both leaves 31 and 11 in that order
1729  num_leaves = 0;
1730  if (__kmp_affinity_top_method == affinity_top_method_x2apicid) {
1731  num_leaves = 1;
1732  leaves[0] = 11;
1733  leaf_message_id = kmp_i18n_str_NoLeaf11Support;
1734  } else if (__kmp_affinity_top_method == affinity_top_method_x2apicid_1f) {
1735  num_leaves = 1;
1736  leaves[0] = 31;
1737  leaf_message_id = kmp_i18n_str_NoLeaf31Support;
1738  } else {
1739  num_leaves = 2;
1740  leaves[0] = 31;
1741  leaves[1] = 11;
1742  leaf_message_id = kmp_i18n_str_NoLeaf11Support;
1743  }
1744 
1745  // Check to see if cpuid leaf 31 or 11 is supported.
1746  __kmp_nThreadsPerCore = nCoresPerPkg = nPackages = 1;
1747  topology_leaf = -1;
1748  for (int i = 0; i < num_leaves; ++i) {
1749  int leaf = leaves[i];
1750  if (highest_leaf < leaf)
1751  continue;
1752  __kmp_x86_cpuid(leaf, 0, &buf);
1753  if (buf.ebx == 0)
1754  continue;
1755  topology_leaf = leaf;
1756  levels_index = __kmp_x2apicid_get_levels(leaf, levels, known_levels);
1757  if (levels_index == 0)
1758  continue;
1759  break;
1760  }
1761  if (topology_leaf == -1 || levels_index == 0) {
1762  *msg_id = leaf_message_id;
1763  return -1;
1764  }
1765  KMP_ASSERT(levels_index <= INTEL_LEVEL_TYPE_LAST);
1766 
1767  // The algorithm used starts by setting the affinity to each available thread
1768  // and retrieving info from the cpuid instruction, so if we are not capable of
1769  // calling __kmp_get_system_affinity() and __kmp_get_system_affinity(), then
1770  // we need to do something else - use the defaults that we calculated from
1771  // issuing cpuid without binding to each proc.
1772  if (!KMP_AFFINITY_CAPABLE()) {
1773  // Hack to try and infer the machine topology using only the data
1774  // available from cpuid on the current thread, and __kmp_xproc.
1775  KMP_ASSERT(__kmp_affinity_type == affinity_none);
1776 
1777  for (unsigned i = 0; i < levels_index; ++i) {
1778  if (levels[i].level_type == INTEL_LEVEL_TYPE_SMT) {
1779  __kmp_nThreadsPerCore = levels[i].nitems;
1780  } else if (levels[i].level_type == INTEL_LEVEL_TYPE_CORE) {
1781  nCoresPerPkg = levels[i].nitems;
1782  } else if (levels[i].level_type == INTEL_LEVEL_TYPE_DIE) {
1783  nDiesPerPkg = levels[i].nitems;
1784  }
1785  }
1786  __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore;
1787  nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
1788  if (__kmp_affinity_verbose) {
1789  KMP_INFORM(AffNotCapableUseLocCpuidL, "KMP_AFFINITY", topology_leaf);
1790  KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
1791  if (__kmp_affinity_uniform_topology()) {
1792  KMP_INFORM(Uniform, "KMP_AFFINITY");
1793  } else {
1794  KMP_INFORM(NonUniform, "KMP_AFFINITY");
1795  }
1796  KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
1797  __kmp_nThreadsPerCore, __kmp_ncores);
1798  }
1799  return 0;
1800  }
1801 
1802  // From here on, we can assume that it is safe to call
1803  // __kmp_get_system_affinity() and __kmp_set_system_affinity(), even if
1804  // __kmp_affinity_type = affinity_none.
1805 
1806  // Save the affinity mask for the current thread.
1807  kmp_affin_mask_t *oldMask;
1808  KMP_CPU_ALLOC(oldMask);
1809  __kmp_get_system_affinity(oldMask, TRUE);
1810 
1811  // Allocate the data structure to be returned.
1812  int depth = levels_index;
1813  for (int i = depth - 1, j = 0; i >= 0; --i, ++j)
1814  types[j] = __kmp_intel_type_2_topology_type(levels[i].level_type);
1815  AddrUnsPair *retval =
1816  (AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair) * __kmp_avail_proc);
1817 
1818  // Run through each of the available contexts, binding the current thread
1819  // to it, and obtaining the pertinent information using the cpuid instr.
1820  unsigned int proc;
1821  int nApics = 0;
1822  KMP_CPU_SET_ITERATE(proc, __kmp_affin_fullMask) {
1823  cpuid_level_info_t my_levels[INTEL_LEVEL_TYPE_LAST];
1824  unsigned my_levels_index;
1825 
1826  // Skip this proc if it is not included in the machine model.
1827  if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
1828  continue;
1829  }
1830  KMP_DEBUG_ASSERT(nApics < __kmp_avail_proc);
1831 
1832  __kmp_affinity_dispatch->bind_thread(proc);
1833 
1834  // New algorithm
1835  __kmp_x86_cpuid(topology_leaf, 0, &buf);
1836  apic_id = buf.edx;
1837  Address addr(depth);
1838  my_levels_index =
1839  __kmp_x2apicid_get_levels(topology_leaf, my_levels, known_levels);
1840  if (my_levels_index == 0 || my_levels_index != levels_index) {
1841  KMP_CPU_FREE(oldMask);
1842  *msg_id = kmp_i18n_str_InvalidCpuidInfo;
1843  return -1;
1844  }
1845  // Put in topology information
1846  for (unsigned j = 0, idx = depth - 1; j < my_levels_index; ++j, --idx) {
1847  addr.labels[idx] = apic_id & my_levels[j].mask;
1848  if (j > 0)
1849  addr.labels[idx] >>= my_levels[j - 1].mask_width;
1850  }
1851  retval[nApics++] = AddrUnsPair(addr, proc);
1852  }
1853 
1854  // We've collected all the info we need.
1855  // Restore the old affinity mask for this thread.
1856  __kmp_set_system_affinity(oldMask, TRUE);
1857 
1858  // If there's only one thread context to bind to, return now.
1859  KMP_ASSERT(nApics > 0);
1860  if (nApics == 1) {
1861  int pkg_level;
1862  __kmp_ncores = nPackages = 1;
1863  __kmp_nThreadsPerCore = nCoresPerPkg = 1;
1864  if (__kmp_affinity_verbose) {
1865  KMP_INFORM(AffUseGlobCpuidL, "KMP_AFFINITY", topology_leaf);
1866  KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
1867  KMP_INFORM(Uniform, "KMP_AFFINITY");
1868  KMP_INFORM(Topology, "KMP_AFFINITY", nPackages, nCoresPerPkg,
1869  __kmp_nThreadsPerCore, __kmp_ncores);
1870  }
1871 
1872  if (__kmp_affinity_type == affinity_none) {
1873  __kmp_free(retval);
1874  KMP_CPU_FREE(oldMask);
1875  return 0;
1876  }
1877 
1878  pkg_level = 0;
1879  for (int i = 0; i < depth; ++i)
1880  if (types[i] == KMP_HW_SOCKET) {
1881  pkg_level = i;
1882  break;
1883  }
1884  // Form an Address object which only includes the package level.
1885  Address addr(1);
1886  addr.labels[0] = retval[0].first.labels[pkg_level];
1887  retval[0].first = addr;
1888 
1889  if (__kmp_affinity_gran_levels < 0) {
1890  __kmp_affinity_gran_levels = 0;
1891  }
1892 
1893  if (__kmp_affinity_verbose) {
1894  __kmp_affinity_print_topology(retval, 1, 1, 0, -1, -1);
1895  }
1896 
1897  *address2os = retval;
1898  KMP_CPU_FREE(oldMask);
1899  return 1;
1900  }
1901 
1902  // Sort the table by physical Id.
1903  qsort(retval, nApics, sizeof(*retval), __kmp_affinity_cmp_Address_labels);
1904 
1905  __kmp_affinity_gather_enumeration_information(retval, nApics, depth, types,
1906  ratio, count);
1907 
1908  // When affinity is off, this routine will still be called to set
1909  // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
1910  // Make sure all these vars are set correctly, and return if affinity is not
1911  // enabled.
1912  int thread_level, core_level, socket_level, die_level;
1913  thread_level = core_level = die_level = socket_level = -1;
1914  for (int level = 0; level < depth; ++level) {
1915  if (types[level] == KMP_HW_THREAD)
1916  thread_level = level;
1917  else if (types[level] == KMP_HW_CORE)
1918  core_level = level;
1919  else if (types[level] == KMP_HW_DIE)
1920  die_level = level;
1921  else if (types[level] == KMP_HW_SOCKET)
1922  socket_level = level;
1923  }
1924  __kmp_nThreadsPerCore =
1925  __kmp_affinity_calculate_ratio(ratio, thread_level, core_level);
1926  if (die_level > 0) {
1927  nDiesPerPkg =
1928  __kmp_affinity_calculate_ratio(ratio, die_level, socket_level);
1929  nCoresPerPkg = __kmp_affinity_calculate_ratio(ratio, core_level, die_level);
1930  } else {
1931  nCoresPerPkg =
1932  __kmp_affinity_calculate_ratio(ratio, core_level, socket_level);
1933  }
1934  if (socket_level >= 0)
1935  nPackages = count[socket_level];
1936  else
1937  nPackages = 1;
1938  if (core_level >= 0)
1939  __kmp_ncores = count[core_level];
1940  else
1941  __kmp_ncores = 1;
1942 
1943  // Check to see if the machine topology is uniform
1944  unsigned uniform = __kmp_affinity_discover_uniformity(depth, ratio, count);
1945 
1946  // Print the machine topology summary.
1947  if (__kmp_affinity_verbose) {
1948  kmp_hw_t numerator_type, denominator_type;
1949  KMP_INFORM(AffUseGlobCpuidL, "KMP_AFFINITY", topology_leaf);
1950  KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
1951  if (uniform) {
1952  KMP_INFORM(Uniform, "KMP_AFFINITY");
1953  } else {
1954  KMP_INFORM(NonUniform, "KMP_AFFINITY");
1955  }
1956 
1957  kmp_str_buf_t buf;
1958  __kmp_str_buf_init(&buf);
1959 
1960  if (core_level < 0)
1961  core_level = depth - 1;
1962  int ncores = count[core_level];
1963 
1964  denominator_type = KMP_HW_UNKNOWN;
1965  for (int level = 0; level < depth; ++level) {
1966  int c;
1967  bool plural;
1968  numerator_type = types[level];
1969  c = ratio[level];
1970  plural = (c > 1);
1971  if (level == 0) {
1972  __kmp_str_buf_print(&buf, "%d %s", c, __kmp_hw_get_catalog_string(
1973  numerator_type, plural));
1974  } else {
1975  __kmp_str_buf_print(&buf, " x %d %s/%s", c,
1976  __kmp_hw_get_catalog_string(numerator_type, plural),
1977  __kmp_hw_get_catalog_string(denominator_type));
1978  }
1979  denominator_type = numerator_type;
1980  }
1981  KMP_INFORM(TopologyGeneric, "KMP_AFFINITY", buf.str, ncores);
1982  __kmp_str_buf_free(&buf);
1983  }
1984 
1985  KMP_DEBUG_ASSERT(__kmp_pu_os_idx == NULL);
1986  KMP_DEBUG_ASSERT(nApics == __kmp_avail_proc);
1987  __kmp_pu_os_idx = (int *)__kmp_allocate(sizeof(int) * __kmp_avail_proc);
1988  for (proc = 0; (int)proc < nApics; ++proc) {
1989  __kmp_pu_os_idx[proc] = retval[proc].second;
1990  }
1991  if (__kmp_affinity_type == affinity_none) {
1992  __kmp_free(retval);
1993  KMP_CPU_FREE(oldMask);
1994  return 0;
1995  }
1996 
1997  // Find any levels with radix 1, and remove them from the map
1998  // (except for the package level).
1999  depth = __kmp_affinity_remove_radix_one_levels(retval, nApics, depth, types);
2000  thread_level = core_level = die_level = socket_level = -1;
2001  for (int level = 0; level < depth; ++level) {
2002  if (types[level] == KMP_HW_THREAD)
2003  thread_level = level;
2004  else if (types[level] == KMP_HW_CORE)
2005  core_level = level;
2006  else if (types[level] == KMP_HW_DIE)
2007  die_level = level;
2008  else if (types[level] == KMP_HW_SOCKET)
2009  socket_level = level;
2010  }
2011 
2012  if (__kmp_affinity_gran_levels < 0) {
2013  // Set the granularity level based on what levels are modeled
2014  // in the machine topology map.
2015  __kmp_affinity_gran_levels = 0;
2016  if ((thread_level >= 0) && (__kmp_affinity_gran > affinity_gran_thread)) {
2017  __kmp_affinity_gran_levels++;
2018  }
2019  if ((core_level >= 0) && (__kmp_affinity_gran > affinity_gran_core)) {
2020  __kmp_affinity_gran_levels++;
2021  }
2022  if ((die_level >= 0) && (__kmp_affinity_gran > affinity_gran_die)) {
2023  __kmp_affinity_gran_levels++;
2024  }
2025  if (__kmp_affinity_gran > affinity_gran_package) {
2026  __kmp_affinity_gran_levels++;
2027  }
2028  }
2029 
2030  if (__kmp_affinity_verbose) {
2031  __kmp_affinity_print_topology(retval, nApics, depth, types);
2032  }
2033 
2034  KMP_CPU_FREE(oldMask);
2035  *address2os = retval;
2036  return depth;
2037 }
2038 
2039 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
2040 
2041 #define osIdIndex 0
2042 #define threadIdIndex 1
2043 #define coreIdIndex 2
2044 #define pkgIdIndex 3
2045 #define nodeIdIndex 4
2046 
2047 typedef unsigned *ProcCpuInfo;
2048 static unsigned maxIndex = pkgIdIndex;
2049 
2050 static int __kmp_affinity_cmp_ProcCpuInfo_phys_id(const void *a,
2051  const void *b) {
2052  unsigned i;
2053  const unsigned *aa = *(unsigned *const *)a;
2054  const unsigned *bb = *(unsigned *const *)b;
2055  for (i = maxIndex;; i--) {
2056  if (aa[i] < bb[i])
2057  return -1;
2058  if (aa[i] > bb[i])
2059  return 1;
2060  if (i == osIdIndex)
2061  break;
2062  }
2063  return 0;
2064 }
2065 
2066 #if KMP_USE_HIER_SCHED
2067 // Set the array sizes for the hierarchy layers
2068 static void __kmp_dispatch_set_hierarchy_values() {
2069  // Set the maximum number of L1's to number of cores
2070  // Set the maximum number of L2's to to either number of cores / 2 for
2071  // Intel(R) Xeon Phi(TM) coprocessor formally codenamed Knights Landing
2072  // Or the number of cores for Intel(R) Xeon(R) processors
2073  // Set the maximum number of NUMA nodes and L3's to number of packages
2074  __kmp_hier_max_units[kmp_hier_layer_e::LAYER_THREAD + 1] =
2075  nPackages * nCoresPerPkg * __kmp_nThreadsPerCore;
2076  __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L1 + 1] = __kmp_ncores;
2077 #if KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_WINDOWS) && \
2078  KMP_MIC_SUPPORTED
2079  if (__kmp_mic_type >= mic3)
2080  __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L2 + 1] = __kmp_ncores / 2;
2081  else
2082 #endif // KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS)
2083  __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L2 + 1] = __kmp_ncores;
2084  __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L3 + 1] = nPackages;
2085  __kmp_hier_max_units[kmp_hier_layer_e::LAYER_NUMA + 1] = nPackages;
2086  __kmp_hier_max_units[kmp_hier_layer_e::LAYER_LOOP + 1] = 1;
2087  // Set the number of threads per unit
2088  // Number of hardware threads per L1/L2/L3/NUMA/LOOP
2089  __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_THREAD + 1] = 1;
2090  __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L1 + 1] =
2091  __kmp_nThreadsPerCore;
2092 #if KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_WINDOWS) && \
2093  KMP_MIC_SUPPORTED
2094  if (__kmp_mic_type >= mic3)
2095  __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L2 + 1] =
2096  2 * __kmp_nThreadsPerCore;
2097  else
2098 #endif // KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS)
2099  __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L2 + 1] =
2100  __kmp_nThreadsPerCore;
2101  __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L3 + 1] =
2102  nCoresPerPkg * __kmp_nThreadsPerCore;
2103  __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_NUMA + 1] =
2104  nCoresPerPkg * __kmp_nThreadsPerCore;
2105  __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_LOOP + 1] =
2106  nPackages * nCoresPerPkg * __kmp_nThreadsPerCore;
2107 }
2108 
2109 // Return the index into the hierarchy for this tid and layer type (L1, L2, etc)
2110 // i.e., this thread's L1 or this thread's L2, etc.
2111 int __kmp_dispatch_get_index(int tid, kmp_hier_layer_e type) {
2112  int index = type + 1;
2113  int num_hw_threads = __kmp_hier_max_units[kmp_hier_layer_e::LAYER_THREAD + 1];
2114  KMP_DEBUG_ASSERT(type != kmp_hier_layer_e::LAYER_LAST);
2115  if (type == kmp_hier_layer_e::LAYER_THREAD)
2116  return tid;
2117  else if (type == kmp_hier_layer_e::LAYER_LOOP)
2118  return 0;
2119  KMP_DEBUG_ASSERT(__kmp_hier_max_units[index] != 0);
2120  if (tid >= num_hw_threads)
2121  tid = tid % num_hw_threads;
2122  return (tid / __kmp_hier_threads_per[index]) % __kmp_hier_max_units[index];
2123 }
2124 
2125 // Return the number of t1's per t2
2126 int __kmp_dispatch_get_t1_per_t2(kmp_hier_layer_e t1, kmp_hier_layer_e t2) {
2127  int i1 = t1 + 1;
2128  int i2 = t2 + 1;
2129  KMP_DEBUG_ASSERT(i1 <= i2);
2130  KMP_DEBUG_ASSERT(t1 != kmp_hier_layer_e::LAYER_LAST);
2131  KMP_DEBUG_ASSERT(t2 != kmp_hier_layer_e::LAYER_LAST);
2132  KMP_DEBUG_ASSERT(__kmp_hier_threads_per[i1] != 0);
2133  // (nthreads/t2) / (nthreads/t1) = t1 / t2
2134  return __kmp_hier_threads_per[i2] / __kmp_hier_threads_per[i1];
2135 }
2136 #endif // KMP_USE_HIER_SCHED
2137 
2138 // Parse /proc/cpuinfo (or an alternate file in the same format) to obtain the
2139 // affinity map.
2140 static int __kmp_affinity_create_cpuinfo_map(AddrUnsPair **address2os,
2141  int *line,
2142  kmp_i18n_id_t *const msg_id,
2143  FILE *f) {
2144  *address2os = NULL;
2145  *msg_id = kmp_i18n_null;
2146 
2147  // Scan of the file, and count the number of "processor" (osId) fields,
2148  // and find the highest value of <n> for a node_<n> field.
2149  char buf[256];
2150  unsigned num_records = 0;
2151  while (!feof(f)) {
2152  buf[sizeof(buf) - 1] = 1;
2153  if (!fgets(buf, sizeof(buf), f)) {
2154  // Read errors presumably because of EOF
2155  break;
2156  }
2157 
2158  char s1[] = "processor";
2159  if (strncmp(buf, s1, sizeof(s1) - 1) == 0) {
2160  num_records++;
2161  continue;
2162  }
2163 
2164  // FIXME - this will match "node_<n> <garbage>"
2165  unsigned level;
2166  if (KMP_SSCANF(buf, "node_%u id", &level) == 1) {
2167  if (nodeIdIndex + level >= maxIndex) {
2168  maxIndex = nodeIdIndex + level;
2169  }
2170  continue;
2171  }
2172  }
2173 
2174  // Check for empty file / no valid processor records, or too many. The number
2175  // of records can't exceed the number of valid bits in the affinity mask.
2176  if (num_records == 0) {
2177  *line = 0;
2178  *msg_id = kmp_i18n_str_NoProcRecords;
2179  return -1;
2180  }
2181  if (num_records > (unsigned)__kmp_xproc) {
2182  *line = 0;
2183  *msg_id = kmp_i18n_str_TooManyProcRecords;
2184  return -1;
2185  }
2186 
2187  // Set the file pointer back to the beginning, so that we can scan the file
2188  // again, this time performing a full parse of the data. Allocate a vector of
2189  // ProcCpuInfo object, where we will place the data. Adding an extra element
2190  // at the end allows us to remove a lot of extra checks for termination
2191  // conditions.
2192  if (fseek(f, 0, SEEK_SET) != 0) {
2193  *line = 0;
2194  *msg_id = kmp_i18n_str_CantRewindCpuinfo;
2195  return -1;
2196  }
2197 
2198  // Allocate the array of records to store the proc info in. The dummy
2199  // element at the end makes the logic in filling them out easier to code.
2200  unsigned **threadInfo =
2201  (unsigned **)__kmp_allocate((num_records + 1) * sizeof(unsigned *));
2202  unsigned i;
2203  for (i = 0; i <= num_records; i++) {
2204  threadInfo[i] =
2205  (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
2206  }
2207 
2208 #define CLEANUP_THREAD_INFO \
2209  for (i = 0; i <= num_records; i++) { \
2210  __kmp_free(threadInfo[i]); \
2211  } \
2212  __kmp_free(threadInfo);
2213 
2214  // A value of UINT_MAX means that we didn't find the field
2215  unsigned __index;
2216 
2217 #define INIT_PROC_INFO(p) \
2218  for (__index = 0; __index <= maxIndex; __index++) { \
2219  (p)[__index] = UINT_MAX; \
2220  }
2221 
2222  for (i = 0; i <= num_records; i++) {
2223  INIT_PROC_INFO(threadInfo[i]);
2224  }
2225 
2226  unsigned num_avail = 0;
2227  *line = 0;
2228  while (!feof(f)) {
2229  // Create an inner scoping level, so that all the goto targets at the end of
2230  // the loop appear in an outer scoping level. This avoids warnings about
2231  // jumping past an initialization to a target in the same block.
2232  {
2233  buf[sizeof(buf) - 1] = 1;
2234  bool long_line = false;
2235  if (!fgets(buf, sizeof(buf), f)) {
2236  // Read errors presumably because of EOF
2237  // If there is valid data in threadInfo[num_avail], then fake
2238  // a blank line in ensure that the last address gets parsed.
2239  bool valid = false;
2240  for (i = 0; i <= maxIndex; i++) {
2241  if (threadInfo[num_avail][i] != UINT_MAX) {
2242  valid = true;
2243  }
2244  }
2245  if (!valid) {
2246  break;
2247  }
2248  buf[0] = 0;
2249  } else if (!buf[sizeof(buf) - 1]) {
2250  // The line is longer than the buffer. Set a flag and don't
2251  // emit an error if we were going to ignore the line, anyway.
2252  long_line = true;
2253 
2254 #define CHECK_LINE \
2255  if (long_line) { \
2256  CLEANUP_THREAD_INFO; \
2257  *msg_id = kmp_i18n_str_LongLineCpuinfo; \
2258  return -1; \
2259  }
2260  }
2261  (*line)++;
2262 
2263  char s1[] = "processor";
2264  if (strncmp(buf, s1, sizeof(s1) - 1) == 0) {
2265  CHECK_LINE;
2266  char *p = strchr(buf + sizeof(s1) - 1, ':');
2267  unsigned val;
2268  if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
2269  goto no_val;
2270  if (threadInfo[num_avail][osIdIndex] != UINT_MAX)
2271 #if KMP_ARCH_AARCH64
2272  // Handle the old AArch64 /proc/cpuinfo layout differently,
2273  // it contains all of the 'processor' entries listed in a
2274  // single 'Processor' section, therefore the normal looking
2275  // for duplicates in that section will always fail.
2276  num_avail++;
2277 #else
2278  goto dup_field;
2279 #endif
2280  threadInfo[num_avail][osIdIndex] = val;
2281 #if KMP_OS_LINUX && !(KMP_ARCH_X86 || KMP_ARCH_X86_64)
2282  char path[256];
2283  KMP_SNPRINTF(
2284  path, sizeof(path),
2285  "/sys/devices/system/cpu/cpu%u/topology/physical_package_id",
2286  threadInfo[num_avail][osIdIndex]);
2287  __kmp_read_from_file(path, "%u", &threadInfo[num_avail][pkgIdIndex]);
2288 
2289  KMP_SNPRINTF(path, sizeof(path),
2290  "/sys/devices/system/cpu/cpu%u/topology/core_id",
2291  threadInfo[num_avail][osIdIndex]);
2292  __kmp_read_from_file(path, "%u", &threadInfo[num_avail][coreIdIndex]);
2293  continue;
2294 #else
2295  }
2296  char s2[] = "physical id";
2297  if (strncmp(buf, s2, sizeof(s2) - 1) == 0) {
2298  CHECK_LINE;
2299  char *p = strchr(buf + sizeof(s2) - 1, ':');
2300  unsigned val;
2301  if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
2302  goto no_val;
2303  if (threadInfo[num_avail][pkgIdIndex] != UINT_MAX)
2304  goto dup_field;
2305  threadInfo[num_avail][pkgIdIndex] = val;
2306  continue;
2307  }
2308  char s3[] = "core id";
2309  if (strncmp(buf, s3, sizeof(s3) - 1) == 0) {
2310  CHECK_LINE;
2311  char *p = strchr(buf + sizeof(s3) - 1, ':');
2312  unsigned val;
2313  if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
2314  goto no_val;
2315  if (threadInfo[num_avail][coreIdIndex] != UINT_MAX)
2316  goto dup_field;
2317  threadInfo[num_avail][coreIdIndex] = val;
2318  continue;
2319 #endif // KMP_OS_LINUX && USE_SYSFS_INFO
2320  }
2321  char s4[] = "thread id";
2322  if (strncmp(buf, s4, sizeof(s4) - 1) == 0) {
2323  CHECK_LINE;
2324  char *p = strchr(buf + sizeof(s4) - 1, ':');
2325  unsigned val;
2326  if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
2327  goto no_val;
2328  if (threadInfo[num_avail][threadIdIndex] != UINT_MAX)
2329  goto dup_field;
2330  threadInfo[num_avail][threadIdIndex] = val;
2331  continue;
2332  }
2333  unsigned level;
2334  if (KMP_SSCANF(buf, "node_%u id", &level) == 1) {
2335  CHECK_LINE;
2336  char *p = strchr(buf + sizeof(s4) - 1, ':');
2337  unsigned val;
2338  if ((p == NULL) || (KMP_SSCANF(p + 1, "%u\n", &val) != 1))
2339  goto no_val;
2340  KMP_ASSERT(nodeIdIndex + level <= maxIndex);
2341  if (threadInfo[num_avail][nodeIdIndex + level] != UINT_MAX)
2342  goto dup_field;
2343  threadInfo[num_avail][nodeIdIndex + level] = val;
2344  continue;
2345  }
2346 
2347  // We didn't recognize the leading token on the line. There are lots of
2348  // leading tokens that we don't recognize - if the line isn't empty, go on
2349  // to the next line.
2350  if ((*buf != 0) && (*buf != '\n')) {
2351  // If the line is longer than the buffer, read characters
2352  // until we find a newline.
2353  if (long_line) {
2354  int ch;
2355  while (((ch = fgetc(f)) != EOF) && (ch != '\n'))
2356  ;
2357  }
2358  continue;
2359  }
2360 
2361  // A newline has signalled the end of the processor record.
2362  // Check that there aren't too many procs specified.
2363  if ((int)num_avail == __kmp_xproc) {
2364  CLEANUP_THREAD_INFO;
2365  *msg_id = kmp_i18n_str_TooManyEntries;
2366  return -1;
2367  }
2368 
2369  // Check for missing fields. The osId field must be there, and we
2370  // currently require that the physical id field is specified, also.
2371  if (threadInfo[num_avail][osIdIndex] == UINT_MAX) {
2372  CLEANUP_THREAD_INFO;
2373  *msg_id = kmp_i18n_str_MissingProcField;
2374  return -1;
2375  }
2376  if (threadInfo[0][pkgIdIndex] == UINT_MAX) {
2377  CLEANUP_THREAD_INFO;
2378  *msg_id = kmp_i18n_str_MissingPhysicalIDField;
2379  return -1;
2380  }
2381 
2382  // Skip this proc if it is not included in the machine model.
2383  if (!KMP_CPU_ISSET(threadInfo[num_avail][osIdIndex],
2384  __kmp_affin_fullMask)) {
2385  INIT_PROC_INFO(threadInfo[num_avail]);
2386  continue;
2387  }
2388 
2389  // We have a successful parse of this proc's info.
2390  // Increment the counter, and prepare for the next proc.
2391  num_avail++;
2392  KMP_ASSERT(num_avail <= num_records);
2393  INIT_PROC_INFO(threadInfo[num_avail]);
2394  }
2395  continue;
2396 
2397  no_val:
2398  CLEANUP_THREAD_INFO;
2399  *msg_id = kmp_i18n_str_MissingValCpuinfo;
2400  return -1;
2401 
2402  dup_field:
2403  CLEANUP_THREAD_INFO;
2404  *msg_id = kmp_i18n_str_DuplicateFieldCpuinfo;
2405  return -1;
2406  }
2407  *line = 0;
2408 
2409 #if KMP_MIC && REDUCE_TEAM_SIZE
2410  unsigned teamSize = 0;
2411 #endif // KMP_MIC && REDUCE_TEAM_SIZE
2412 
2413  // check for num_records == __kmp_xproc ???
2414 
2415  // If there's only one thread context to bind to, form an Address object with
2416  // depth 1 and return immediately (or, if affinity is off, set address2os to
2417  // NULL and return).
2418  //
2419  // If it is configured to omit the package level when there is only a single
2420  // package, the logic at the end of this routine won't work if there is only a
2421  // single thread - it would try to form an Address object with depth 0.
2422  KMP_ASSERT(num_avail > 0);
2423  KMP_ASSERT(num_avail <= num_records);
2424  if (num_avail == 1) {
2425  __kmp_ncores = 1;
2426  __kmp_nThreadsPerCore = nCoresPerPkg = nPackages = 1;
2427  if (__kmp_affinity_verbose) {
2428  if (!KMP_AFFINITY_CAPABLE()) {
2429  KMP_INFORM(AffNotCapableUseCpuinfo, "KMP_AFFINITY");
2430  KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
2431  KMP_INFORM(Uniform, "KMP_AFFINITY");
2432  } else {
2433  KMP_INFORM(AffCapableUseCpuinfo, "KMP_AFFINITY");
2434  KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
2435  KMP_INFORM(Uniform, "KMP_AFFINITY");
2436  }
2437  int index;
2438  kmp_str_buf_t buf;
2439  __kmp_str_buf_init(&buf);
2440  __kmp_str_buf_print(&buf, "1");
2441  for (index = maxIndex - 1; index > pkgIdIndex; index--) {
2442  __kmp_str_buf_print(&buf, " x 1");
2443  }
2444  KMP_INFORM(TopologyExtra, "KMP_AFFINITY", buf.str, 1, 1, 1);
2445  __kmp_str_buf_free(&buf);
2446  }
2447 
2448  if (__kmp_affinity_type == affinity_none) {
2449  CLEANUP_THREAD_INFO;
2450  return 0;
2451  }
2452 
2453  *address2os = (AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair));
2454  Address addr(1);
2455  addr.labels[0] = threadInfo[0][pkgIdIndex];
2456  (*address2os)[0] = AddrUnsPair(addr, threadInfo[0][osIdIndex]);
2457 
2458  if (__kmp_affinity_gran_levels < 0) {
2459  __kmp_affinity_gran_levels = 0;
2460  }
2461 
2462  if (__kmp_affinity_verbose) {
2463  __kmp_affinity_print_topology(*address2os, 1, 1, 0, -1, -1);
2464  }
2465 
2466  CLEANUP_THREAD_INFO;
2467  return 1;
2468  }
2469 
2470  // Sort the threadInfo table by physical Id.
2471  qsort(threadInfo, num_avail, sizeof(*threadInfo),
2472  __kmp_affinity_cmp_ProcCpuInfo_phys_id);
2473 
2474  // The table is now sorted by pkgId / coreId / threadId, but we really don't
2475  // know the radix of any of the fields. pkgId's may be sparsely assigned among
2476  // the chips on a system. Although coreId's are usually assigned
2477  // [0 .. coresPerPkg-1] and threadId's are usually assigned
2478  // [0..threadsPerCore-1], we don't want to make any such assumptions.
2479  //
2480  // For that matter, we don't know what coresPerPkg and threadsPerCore (or the
2481  // total # packages) are at this point - we want to determine that now. We
2482  // only have an upper bound on the first two figures.
2483  unsigned *counts =
2484  (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
2485  unsigned *maxCt =
2486  (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
2487  unsigned *totals =
2488  (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
2489  unsigned *lastId =
2490  (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned));
2491 
2492  bool assign_thread_ids = false;
2493  unsigned threadIdCt;
2494  unsigned index;
2495 
2496 restart_radix_check:
2497  threadIdCt = 0;
2498 
2499  // Initialize the counter arrays with data from threadInfo[0].
2500  if (assign_thread_ids) {
2501  if (threadInfo[0][threadIdIndex] == UINT_MAX) {
2502  threadInfo[0][threadIdIndex] = threadIdCt++;
2503  } else if (threadIdCt <= threadInfo[0][threadIdIndex]) {
2504  threadIdCt = threadInfo[0][threadIdIndex] + 1;
2505  }
2506  }
2507  for (index = 0; index <= maxIndex; index++) {
2508  counts[index] = 1;
2509  maxCt[index] = 1;
2510  totals[index] = 1;
2511  lastId[index] = threadInfo[0][index];
2512  ;
2513  }
2514 
2515  // Run through the rest of the OS procs.
2516  for (i = 1; i < num_avail; i++) {
2517  // Find the most significant index whose id differs from the id for the
2518  // previous OS proc.
2519  for (index = maxIndex; index >= threadIdIndex; index--) {
2520  if (assign_thread_ids && (index == threadIdIndex)) {
2521  // Auto-assign the thread id field if it wasn't specified.
2522  if (threadInfo[i][threadIdIndex] == UINT_MAX) {
2523  threadInfo[i][threadIdIndex] = threadIdCt++;
2524  }
2525  // Apparently the thread id field was specified for some entries and not
2526  // others. Start the thread id counter off at the next higher thread id.
2527  else if (threadIdCt <= threadInfo[i][threadIdIndex]) {
2528  threadIdCt = threadInfo[i][threadIdIndex] + 1;
2529  }
2530  }
2531  if (threadInfo[i][index] != lastId[index]) {
2532  // Run through all indices which are less significant, and reset the
2533  // counts to 1. At all levels up to and including index, we need to
2534  // increment the totals and record the last id.
2535  unsigned index2;
2536  for (index2 = threadIdIndex; index2 < index; index2++) {
2537  totals[index2]++;
2538  if (counts[index2] > maxCt[index2]) {
2539  maxCt[index2] = counts[index2];
2540  }
2541  counts[index2] = 1;
2542  lastId[index2] = threadInfo[i][index2];
2543  }
2544  counts[index]++;
2545  totals[index]++;
2546  lastId[index] = threadInfo[i][index];
2547 
2548  if (assign_thread_ids && (index > threadIdIndex)) {
2549 
2550 #if KMP_MIC && REDUCE_TEAM_SIZE
2551  // The default team size is the total #threads in the machine
2552  // minus 1 thread for every core that has 3 or more threads.
2553  teamSize += (threadIdCt <= 2) ? (threadIdCt) : (threadIdCt - 1);
2554 #endif // KMP_MIC && REDUCE_TEAM_SIZE
2555 
2556  // Restart the thread counter, as we are on a new core.
2557  threadIdCt = 0;
2558 
2559  // Auto-assign the thread id field if it wasn't specified.
2560  if (threadInfo[i][threadIdIndex] == UINT_MAX) {
2561  threadInfo[i][threadIdIndex] = threadIdCt++;
2562  }
2563 
2564  // Apparently the thread id field was specified for some entries and
2565  // not others. Start the thread id counter off at the next higher
2566  // thread id.
2567  else if (threadIdCt <= threadInfo[i][threadIdIndex]) {
2568  threadIdCt = threadInfo[i][threadIdIndex] + 1;
2569  }
2570  }
2571  break;
2572  }
2573  }
2574  if (index < threadIdIndex) {
2575  // If thread ids were specified, it is an error if they are not unique.
2576  // Also, check that we waven't already restarted the loop (to be safe -
2577  // shouldn't need to).
2578  if ((threadInfo[i][threadIdIndex] != UINT_MAX) || assign_thread_ids) {
2579  __kmp_free(lastId);
2580  __kmp_free(totals);
2581  __kmp_free(maxCt);
2582  __kmp_free(counts);
2583  CLEANUP_THREAD_INFO;
2584  *msg_id = kmp_i18n_str_PhysicalIDsNotUnique;
2585  return -1;
2586  }
2587 
2588  // If the thread ids were not specified and we see entries entries that
2589  // are duplicates, start the loop over and assign the thread ids manually.
2590  assign_thread_ids = true;
2591  goto restart_radix_check;
2592  }
2593  }
2594 
2595 #if KMP_MIC && REDUCE_TEAM_SIZE
2596  // The default team size is the total #threads in the machine
2597  // minus 1 thread for every core that has 3 or more threads.
2598  teamSize += (threadIdCt <= 2) ? (threadIdCt) : (threadIdCt - 1);
2599 #endif // KMP_MIC && REDUCE_TEAM_SIZE
2600 
2601  for (index = threadIdIndex; index <= maxIndex; index++) {
2602  if (counts[index] > maxCt[index]) {
2603  maxCt[index] = counts[index];
2604  }
2605  }
2606 
2607  __kmp_nThreadsPerCore = maxCt[threadIdIndex];
2608  nCoresPerPkg = maxCt[coreIdIndex];
2609  nPackages = totals[pkgIdIndex];
2610 
2611  // Check to see if the machine topology is uniform
2612  unsigned prod = totals[maxIndex];
2613  for (index = threadIdIndex; index < maxIndex; index++) {
2614  prod *= maxCt[index];
2615  }
2616  bool uniform = (prod == totals[threadIdIndex]);
2617 
2618  // When affinity is off, this routine will still be called to set
2619  // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages.
2620  // Make sure all these vars are set correctly, and return now if affinity is
2621  // not enabled.
2622  __kmp_ncores = totals[coreIdIndex];
2623 
2624  if (__kmp_affinity_verbose) {
2625  if (!KMP_AFFINITY_CAPABLE()) {
2626  KMP_INFORM(AffNotCapableUseCpuinfo, "KMP_AFFINITY");
2627  KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
2628  if (uniform) {
2629  KMP_INFORM(Uniform, "KMP_AFFINITY");
2630  } else {
2631  KMP_INFORM(NonUniform, "KMP_AFFINITY");
2632  }
2633  } else {
2634  KMP_INFORM(AffCapableUseCpuinfo, "KMP_AFFINITY");
2635  KMP_INFORM(AvailableOSProc, "KMP_AFFINITY", __kmp_avail_proc);
2636  if (uniform) {
2637  KMP_INFORM(Uniform, "KMP_AFFINITY");
2638  } else {
2639  KMP_INFORM(NonUniform, "KMP_AFFINITY");
2640  }
2641  }
2642  kmp_str_buf_t buf;
2643  __kmp_str_buf_init(&buf);
2644 
2645  __kmp_str_buf_print(&buf, "%d", totals[maxIndex]);
2646  for (index = maxIndex - 1; index >= pkgIdIndex; index--) {
2647  __kmp_str_buf_print(&buf, " x %d", maxCt[index]);
2648  }
2649  KMP_INFORM(TopologyExtra, "KMP_AFFINITY", buf.str, maxCt[coreIdIndex],
2650  maxCt[threadIdIndex], __kmp_ncores);
2651 
2652  __kmp_str_buf_free(&buf);
2653  }
2654 
2655 #if KMP_MIC && REDUCE_TEAM_SIZE
2656  // Set the default team size.
2657  if ((__kmp_dflt_team_nth == 0) && (teamSize > 0)) {
2658  __kmp_dflt_team_nth = teamSize;
2659  KA_TRACE(20, ("__kmp_affinity_create_cpuinfo_map: setting "
2660  "__kmp_dflt_team_nth = %d\n",
2661  __kmp_dflt_team_nth));
2662  }
2663 #endif // KMP_MIC && REDUCE_TEAM_SIZE
2664 
2665  KMP_DEBUG_ASSERT(__kmp_pu_os_idx == NULL);
2666  KMP_DEBUG_ASSERT(num_avail == (unsigned)__kmp_avail_proc);
2667  __kmp_pu_os_idx = (int *)__kmp_allocate(sizeof(int) * __kmp_avail_proc);
2668  for (i = 0; i < num_avail; ++i) { // fill the os indices
2669  __kmp_pu_os_idx[i] = threadInfo[i][osIdIndex];
2670  }
2671 
2672  if (__kmp_affinity_type == affinity_none) {
2673  __kmp_free(lastId);
2674  __kmp_free(totals);
2675  __kmp_free(maxCt);
2676  __kmp_free(counts);
2677  CLEANUP_THREAD_INFO;
2678  return 0;
2679  }
2680 
2681  // Count the number of levels which have more nodes at that level than at the
2682  // parent's level (with there being an implicit root node of the top level).
2683  // This is equivalent to saying that there is at least one node at this level
2684  // which has a sibling. These levels are in the map, and the package level is
2685  // always in the map.
2686  bool *inMap = (bool *)__kmp_allocate((maxIndex + 1) * sizeof(bool));
2687  for (index = threadIdIndex; index < maxIndex; index++) {
2688  KMP_ASSERT(totals[index] >= totals[index + 1]);
2689  inMap[index] = (totals[index] > totals[index + 1]);
2690  }
2691  inMap[maxIndex] = (totals[maxIndex] > 1);
2692  inMap[pkgIdIndex] = true;
2693 
2694  int depth = 0;
2695  for (index = threadIdIndex; index <= maxIndex; index++) {
2696  if (inMap[index]) {
2697  depth++;
2698  }
2699  }
2700  KMP_ASSERT(depth > 0);
2701 
2702  // Construct the data structure that is to be returned.
2703  *address2os = (AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair) * num_avail);
2704  int pkgLevel = -1;
2705  int coreLevel = -1;
2706  int threadLevel = -1;
2707 
2708  for (i = 0; i < num_avail; ++i) {
2709  Address addr(depth);
2710  unsigned os = threadInfo[i][osIdIndex];
2711  int src_index;
2712  int dst_index = 0;
2713 
2714  for (src_index = maxIndex; src_index >= threadIdIndex; src_index--) {
2715  if (!inMap[src_index]) {
2716  continue;
2717  }
2718  addr.labels[dst_index] = threadInfo[i][src_index];
2719  if (src_index == pkgIdIndex) {
2720  pkgLevel = dst_index;
2721  } else if (src_index == coreIdIndex) {
2722  coreLevel = dst_index;
2723  } else if (src_index == threadIdIndex) {
2724  threadLevel = dst_index;
2725  }
2726  dst_index++;
2727  }
2728  (*address2os)[i] = AddrUnsPair(addr, os);
2729  }
2730 
2731  if (__kmp_affinity_gran_levels < 0) {
2732  // Set the granularity level based on what levels are modeled
2733  // in the machine topology map.
2734  unsigned src_index;
2735  __kmp_affinity_gran_levels = 0;
2736  for (src_index = threadIdIndex; src_index <= maxIndex; src_index++) {
2737  if (!inMap[src_index]) {
2738  continue;
2739  }
2740  switch (src_index) {
2741  case threadIdIndex:
2742  if (__kmp_affinity_gran > affinity_gran_thread) {
2743  __kmp_affinity_gran_levels++;
2744  }
2745 
2746  break;
2747  case coreIdIndex:
2748  if (__kmp_affinity_gran > affinity_gran_core) {
2749  __kmp_affinity_gran_levels++;
2750  }
2751  break;
2752 
2753  case pkgIdIndex:
2754  if (__kmp_affinity_gran > affinity_gran_package) {
2755  __kmp_affinity_gran_levels++;
2756  }
2757  break;
2758  }
2759  }
2760  }
2761 
2762  if (__kmp_affinity_verbose) {
2763  __kmp_affinity_print_topology(*address2os, num_avail, depth, pkgLevel,
2764  coreLevel, threadLevel);
2765  }
2766 
2767  __kmp_free(inMap);
2768  __kmp_free(lastId);
2769  __kmp_free(totals);
2770  __kmp_free(maxCt);
2771  __kmp_free(counts);
2772  CLEANUP_THREAD_INFO;
2773  return depth;
2774 }
2775 
2776 // Create and return a table of affinity masks, indexed by OS thread ID.
2777 // This routine handles OR'ing together all the affinity masks of threads
2778 // that are sufficiently close, if granularity > fine.
2779 static kmp_affin_mask_t *__kmp_create_masks(unsigned *maxIndex,
2780  unsigned *numUnique,
2781  AddrUnsPair *address2os,
2782  unsigned numAddrs) {
2783  // First form a table of affinity masks in order of OS thread id.
2784  unsigned depth;
2785  unsigned maxOsId;
2786  unsigned i;
2787 
2788  KMP_ASSERT(numAddrs > 0);
2789  depth = address2os[0].first.depth;
2790 
2791  maxOsId = 0;
2792  for (i = numAddrs - 1;; --i) {
2793  unsigned osId = address2os[i].second;
2794  if (osId > maxOsId) {
2795  maxOsId = osId;
2796  }
2797  if (i == 0)
2798  break;
2799  }
2800  kmp_affin_mask_t *osId2Mask;
2801  KMP_CPU_ALLOC_ARRAY(osId2Mask, (maxOsId + 1));
2802 
2803  // Sort the address2os table according to physical order. Doing so will put
2804  // all threads on the same core/package/node in consecutive locations.
2805  qsort(address2os, numAddrs, sizeof(*address2os),
2806  __kmp_affinity_cmp_Address_labels);
2807 
2808  KMP_ASSERT(__kmp_affinity_gran_levels >= 0);
2809  if (__kmp_affinity_verbose && (__kmp_affinity_gran_levels > 0)) {
2810  KMP_INFORM(ThreadsMigrate, "KMP_AFFINITY", __kmp_affinity_gran_levels);
2811  }
2812  if (__kmp_affinity_gran_levels >= (int)depth) {
2813  if (__kmp_affinity_verbose ||
2814  (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none))) {
2815  KMP_WARNING(AffThreadsMayMigrate);
2816  }
2817  }
2818 
2819  // Run through the table, forming the masks for all threads on each core.
2820  // Threads on the same core will have identical "Address" objects, not
2821  // considering the last level, which must be the thread id. All threads on a
2822  // core will appear consecutively.
2823  unsigned unique = 0;
2824  unsigned j = 0; // index of 1st thread on core
2825  unsigned leader = 0;
2826  Address *leaderAddr = &(address2os[0].first);
2827  kmp_affin_mask_t *sum;
2828  KMP_CPU_ALLOC_ON_STACK(sum);
2829  KMP_CPU_ZERO(sum);
2830  KMP_CPU_SET(address2os[0].second, sum);
2831  for (i = 1; i < numAddrs; i++) {
2832  // If this thread is sufficiently close to the leader (within the
2833  // granularity setting), then set the bit for this os thread in the
2834  // affinity mask for this group, and go on to the next thread.
2835  if (leaderAddr->isClose(address2os[i].first, __kmp_affinity_gran_levels)) {
2836  KMP_CPU_SET(address2os[i].second, sum);
2837  continue;
2838  }
2839 
2840  // For every thread in this group, copy the mask to the thread's entry in
2841  // the osId2Mask table. Mark the first address as a leader.
2842  for (; j < i; j++) {
2843  unsigned osId = address2os[j].second;
2844  KMP_DEBUG_ASSERT(osId <= maxOsId);
2845  kmp_affin_mask_t *mask = KMP_CPU_INDEX(osId2Mask, osId);
2846  KMP_CPU_COPY(mask, sum);
2847  address2os[j].first.leader = (j == leader);
2848  }
2849  unique++;
2850 
2851  // Start a new mask.
2852  leader = i;
2853  leaderAddr = &(address2os[i].first);
2854  KMP_CPU_ZERO(sum);
2855  KMP_CPU_SET(address2os[i].second, sum);
2856  }
2857 
2858  // For every thread in last group, copy the mask to the thread's
2859  // entry in the osId2Mask table.
2860  for (; j < i; j++) {
2861  unsigned osId = address2os[j].second;
2862  KMP_DEBUG_ASSERT(osId <= maxOsId);
2863  kmp_affin_mask_t *mask = KMP_CPU_INDEX(osId2Mask, osId);
2864  KMP_CPU_COPY(mask, sum);
2865  address2os[j].first.leader = (j == leader);
2866  }
2867  unique++;
2868  KMP_CPU_FREE_FROM_STACK(sum);
2869 
2870  *maxIndex = maxOsId;
2871  *numUnique = unique;
2872  return osId2Mask;
2873 }
2874 
2875 // Stuff for the affinity proclist parsers. It's easier to declare these vars
2876 // as file-static than to try and pass them through the calling sequence of
2877 // the recursive-descent OMP_PLACES parser.
2878 static kmp_affin_mask_t *newMasks;
2879 static int numNewMasks;
2880 static int nextNewMask;
2881 
2882 #define ADD_MASK(_mask) \
2883  { \
2884  if (nextNewMask >= numNewMasks) { \
2885  int i; \
2886  numNewMasks *= 2; \
2887  kmp_affin_mask_t *temp; \
2888  KMP_CPU_INTERNAL_ALLOC_ARRAY(temp, numNewMasks); \
2889  for (i = 0; i < numNewMasks / 2; i++) { \
2890  kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i); \
2891  kmp_affin_mask_t *dest = KMP_CPU_INDEX(temp, i); \
2892  KMP_CPU_COPY(dest, src); \
2893  } \
2894  KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks / 2); \
2895  newMasks = temp; \
2896  } \
2897  KMP_CPU_COPY(KMP_CPU_INDEX(newMasks, nextNewMask), (_mask)); \
2898  nextNewMask++; \
2899  }
2900 
2901 #define ADD_MASK_OSID(_osId, _osId2Mask, _maxOsId) \
2902  { \
2903  if (((_osId) > _maxOsId) || \
2904  (!KMP_CPU_ISSET((_osId), KMP_CPU_INDEX((_osId2Mask), (_osId))))) { \
2905  if (__kmp_affinity_verbose || \
2906  (__kmp_affinity_warnings && \
2907  (__kmp_affinity_type != affinity_none))) { \
2908  KMP_WARNING(AffIgnoreInvalidProcID, _osId); \
2909  } \
2910  } else { \
2911  ADD_MASK(KMP_CPU_INDEX(_osId2Mask, (_osId))); \
2912  } \
2913  }
2914 
2915 // Re-parse the proclist (for the explicit affinity type), and form the list
2916 // of affinity newMasks indexed by gtid.
2917 static void __kmp_affinity_process_proclist(kmp_affin_mask_t **out_masks,
2918  unsigned int *out_numMasks,
2919  const char *proclist,
2920  kmp_affin_mask_t *osId2Mask,
2921  int maxOsId) {
2922  int i;
2923  const char *scan = proclist;
2924  const char *next = proclist;
2925 
2926  // We use malloc() for the temporary mask vector, so that we can use
2927  // realloc() to extend it.
2928  numNewMasks = 2;
2929  KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks);
2930  nextNewMask = 0;
2931  kmp_affin_mask_t *sumMask;
2932  KMP_CPU_ALLOC(sumMask);
2933  int setSize = 0;
2934 
2935  for (;;) {
2936  int start, end, stride;
2937 
2938  SKIP_WS(scan);
2939  next = scan;
2940  if (*next == '\0') {
2941  break;
2942  }
2943 
2944  if (*next == '{') {
2945  int num;
2946  setSize = 0;
2947  next++; // skip '{'
2948  SKIP_WS(next);
2949  scan = next;
2950 
2951  // Read the first integer in the set.
2952  KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad proclist");
2953  SKIP_DIGITS(next);
2954  num = __kmp_str_to_int(scan, *next);
2955  KMP_ASSERT2(num >= 0, "bad explicit proc list");
2956 
2957  // Copy the mask for that osId to the sum (union) mask.
2958  if ((num > maxOsId) ||
2959  (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
2960  if (__kmp_affinity_verbose ||
2961  (__kmp_affinity_warnings &&
2962  (__kmp_affinity_type != affinity_none))) {
2963  KMP_WARNING(AffIgnoreInvalidProcID, num);
2964  }
2965  KMP_CPU_ZERO(sumMask);
2966  } else {
2967  KMP_CPU_COPY(sumMask, KMP_CPU_INDEX(osId2Mask, num));
2968  setSize = 1;
2969  }
2970 
2971  for (;;) {
2972  // Check for end of set.
2973  SKIP_WS(next);
2974  if (*next == '}') {
2975  next++; // skip '}'
2976  break;
2977  }
2978 
2979  // Skip optional comma.
2980  if (*next == ',') {
2981  next++;
2982  }
2983  SKIP_WS(next);
2984 
2985  // Read the next integer in the set.
2986  scan = next;
2987  KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list");
2988 
2989  SKIP_DIGITS(next);
2990  num = __kmp_str_to_int(scan, *next);
2991  KMP_ASSERT2(num >= 0, "bad explicit proc list");
2992 
2993  // Add the mask for that osId to the sum mask.
2994  if ((num > maxOsId) ||
2995  (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
2996  if (__kmp_affinity_verbose ||
2997  (__kmp_affinity_warnings &&
2998  (__kmp_affinity_type != affinity_none))) {
2999  KMP_WARNING(AffIgnoreInvalidProcID, num);
3000  }
3001  } else {
3002  KMP_CPU_UNION(sumMask, KMP_CPU_INDEX(osId2Mask, num));
3003  setSize++;
3004  }
3005  }
3006  if (setSize > 0) {
3007  ADD_MASK(sumMask);
3008  }
3009 
3010  SKIP_WS(next);
3011  if (*next == ',') {
3012  next++;
3013  }
3014  scan = next;
3015  continue;
3016  }
3017 
3018  // Read the first integer.
3019  KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list");
3020  SKIP_DIGITS(next);
3021  start = __kmp_str_to_int(scan, *next);
3022  KMP_ASSERT2(start >= 0, "bad explicit proc list");
3023  SKIP_WS(next);
3024 
3025  // If this isn't a range, then add a mask to the list and go on.
3026  if (*next != '-') {
3027  ADD_MASK_OSID(start, osId2Mask, maxOsId);
3028 
3029  // Skip optional comma.
3030  if (*next == ',') {
3031  next++;
3032  }
3033  scan = next;
3034  continue;
3035  }
3036 
3037  // This is a range. Skip over the '-' and read in the 2nd int.
3038  next++; // skip '-'
3039  SKIP_WS(next);
3040  scan = next;
3041  KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list");
3042  SKIP_DIGITS(next);
3043  end = __kmp_str_to_int(scan, *next);
3044  KMP_ASSERT2(end >= 0, "bad explicit proc list");
3045 
3046  // Check for a stride parameter
3047  stride = 1;
3048  SKIP_WS(next);
3049  if (*next == ':') {
3050  // A stride is specified. Skip over the ':" and read the 3rd int.
3051  int sign = +1;
3052  next++; // skip ':'
3053  SKIP_WS(next);
3054  scan = next;
3055  if (*next == '-') {
3056  sign = -1;
3057  next++;
3058  SKIP_WS(next);
3059  scan = next;
3060  }
3061  KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list");
3062  SKIP_DIGITS(next);
3063  stride = __kmp_str_to_int(scan, *next);
3064  KMP_ASSERT2(stride >= 0, "bad explicit proc list");
3065  stride *= sign;
3066  }
3067 
3068  // Do some range checks.
3069  KMP_ASSERT2(stride != 0, "bad explicit proc list");
3070  if (stride > 0) {
3071  KMP_ASSERT2(start <= end, "bad explicit proc list");
3072  } else {
3073  KMP_ASSERT2(start >= end, "bad explicit proc list");
3074  }
3075  KMP_ASSERT2((end - start) / stride <= 65536, "bad explicit proc list");
3076 
3077  // Add the mask for each OS proc # to the list.
3078  if (stride > 0) {
3079  do {
3080  ADD_MASK_OSID(start, osId2Mask, maxOsId);
3081  start += stride;
3082  } while (start <= end);
3083  } else {
3084  do {
3085  ADD_MASK_OSID(start, osId2Mask, maxOsId);
3086  start += stride;
3087  } while (start >= end);
3088  }
3089 
3090  // Skip optional comma.
3091  SKIP_WS(next);
3092  if (*next == ',') {
3093  next++;
3094  }
3095  scan = next;
3096  }
3097 
3098  *out_numMasks = nextNewMask;
3099  if (nextNewMask == 0) {
3100  *out_masks = NULL;
3101  KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3102  return;
3103  }
3104  KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask);
3105  for (i = 0; i < nextNewMask; i++) {
3106  kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i);
3107  kmp_affin_mask_t *dest = KMP_CPU_INDEX((*out_masks), i);
3108  KMP_CPU_COPY(dest, src);
3109  }
3110  KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3111  KMP_CPU_FREE(sumMask);
3112 }
3113 
3114 /*-----------------------------------------------------------------------------
3115 Re-parse the OMP_PLACES proc id list, forming the newMasks for the different
3116 places. Again, Here is the grammar:
3117 
3118 place_list := place
3119 place_list := place , place_list
3120 place := num
3121 place := place : num
3122 place := place : num : signed
3123 place := { subplacelist }
3124 place := ! place // (lowest priority)
3125 subplace_list := subplace
3126 subplace_list := subplace , subplace_list
3127 subplace := num
3128 subplace := num : num
3129 subplace := num : num : signed
3130 signed := num
3131 signed := + signed
3132 signed := - signed
3133 -----------------------------------------------------------------------------*/
3134 static void __kmp_process_subplace_list(const char **scan,
3135  kmp_affin_mask_t *osId2Mask,
3136  int maxOsId, kmp_affin_mask_t *tempMask,
3137  int *setSize) {
3138  const char *next;
3139 
3140  for (;;) {
3141  int start, count, stride, i;
3142 
3143  // Read in the starting proc id
3144  SKIP_WS(*scan);
3145  KMP_ASSERT2((**scan >= '0') && (**scan <= '9'), "bad explicit places list");
3146  next = *scan;
3147  SKIP_DIGITS(next);
3148  start = __kmp_str_to_int(*scan, *next);
3149  KMP_ASSERT(start >= 0);
3150  *scan = next;
3151 
3152  // valid follow sets are ',' ':' and '}'
3153  SKIP_WS(*scan);
3154  if (**scan == '}' || **scan == ',') {
3155  if ((start > maxOsId) ||
3156  (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3157  if (__kmp_affinity_verbose ||
3158  (__kmp_affinity_warnings &&
3159  (__kmp_affinity_type != affinity_none))) {
3160  KMP_WARNING(AffIgnoreInvalidProcID, start);
3161  }
3162  } else {
3163  KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3164  (*setSize)++;
3165  }
3166  if (**scan == '}') {
3167  break;
3168  }
3169  (*scan)++; // skip ','
3170  continue;
3171  }
3172  KMP_ASSERT2(**scan == ':', "bad explicit places list");
3173  (*scan)++; // skip ':'
3174 
3175  // Read count parameter
3176  SKIP_WS(*scan);
3177  KMP_ASSERT2((**scan >= '0') && (**scan <= '9'), "bad explicit places list");
3178  next = *scan;
3179  SKIP_DIGITS(next);
3180  count = __kmp_str_to_int(*scan, *next);
3181  KMP_ASSERT(count >= 0);
3182  *scan = next;
3183 
3184  // valid follow sets are ',' ':' and '}'
3185  SKIP_WS(*scan);
3186  if (**scan == '}' || **scan == ',') {
3187  for (i = 0; i < count; i++) {
3188  if ((start > maxOsId) ||
3189  (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3190  if (__kmp_affinity_verbose ||
3191  (__kmp_affinity_warnings &&
3192  (__kmp_affinity_type != affinity_none))) {
3193  KMP_WARNING(AffIgnoreInvalidProcID, start);
3194  }
3195  break; // don't proliferate warnings for large count
3196  } else {
3197  KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3198  start++;
3199  (*setSize)++;
3200  }
3201  }
3202  if (**scan == '}') {
3203  break;
3204  }
3205  (*scan)++; // skip ','
3206  continue;
3207  }
3208  KMP_ASSERT2(**scan == ':', "bad explicit places list");
3209  (*scan)++; // skip ':'
3210 
3211  // Read stride parameter
3212  int sign = +1;
3213  for (;;) {
3214  SKIP_WS(*scan);
3215  if (**scan == '+') {
3216  (*scan)++; // skip '+'
3217  continue;
3218  }
3219  if (**scan == '-') {
3220  sign *= -1;
3221  (*scan)++; // skip '-'
3222  continue;
3223  }
3224  break;
3225  }
3226  SKIP_WS(*scan);
3227  KMP_ASSERT2((**scan >= '0') && (**scan <= '9'), "bad explicit places list");
3228  next = *scan;
3229  SKIP_DIGITS(next);
3230  stride = __kmp_str_to_int(*scan, *next);
3231  KMP_ASSERT(stride >= 0);
3232  *scan = next;
3233  stride *= sign;
3234 
3235  // valid follow sets are ',' and '}'
3236  SKIP_WS(*scan);
3237  if (**scan == '}' || **scan == ',') {
3238  for (i = 0; i < count; i++) {
3239  if ((start > maxOsId) ||
3240  (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3241  if (__kmp_affinity_verbose ||
3242  (__kmp_affinity_warnings &&
3243  (__kmp_affinity_type != affinity_none))) {
3244  KMP_WARNING(AffIgnoreInvalidProcID, start);
3245  }
3246  break; // don't proliferate warnings for large count
3247  } else {
3248  KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3249  start += stride;
3250  (*setSize)++;
3251  }
3252  }
3253  if (**scan == '}') {
3254  break;
3255  }
3256  (*scan)++; // skip ','
3257  continue;
3258  }
3259 
3260  KMP_ASSERT2(0, "bad explicit places list");
3261  }
3262 }
3263 
3264 static void __kmp_process_place(const char **scan, kmp_affin_mask_t *osId2Mask,
3265  int maxOsId, kmp_affin_mask_t *tempMask,
3266  int *setSize) {
3267  const char *next;
3268 
3269  // valid follow sets are '{' '!' and num
3270  SKIP_WS(*scan);
3271  if (**scan == '{') {
3272  (*scan)++; // skip '{'
3273  __kmp_process_subplace_list(scan, osId2Mask, maxOsId, tempMask, setSize);
3274  KMP_ASSERT2(**scan == '}', "bad explicit places list");
3275  (*scan)++; // skip '}'
3276  } else if (**scan == '!') {
3277  (*scan)++; // skip '!'
3278  __kmp_process_place(scan, osId2Mask, maxOsId, tempMask, setSize);
3279  KMP_CPU_COMPLEMENT(maxOsId, tempMask);
3280  } else if ((**scan >= '0') && (**scan <= '9')) {
3281  next = *scan;
3282  SKIP_DIGITS(next);
3283  int num = __kmp_str_to_int(*scan, *next);
3284  KMP_ASSERT(num >= 0);
3285  if ((num > maxOsId) ||
3286  (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
3287  if (__kmp_affinity_verbose ||
3288  (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none))) {
3289  KMP_WARNING(AffIgnoreInvalidProcID, num);
3290  }
3291  } else {
3292  KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, num));
3293  (*setSize)++;
3294  }
3295  *scan = next; // skip num
3296  } else {
3297  KMP_ASSERT2(0, "bad explicit places list");
3298  }
3299 }
3300 
3301 // static void
3302 void __kmp_affinity_process_placelist(kmp_affin_mask_t **out_masks,
3303  unsigned int *out_numMasks,
3304  const char *placelist,
3305  kmp_affin_mask_t *osId2Mask,
3306  int maxOsId) {
3307  int i, j, count, stride, sign;
3308  const char *scan = placelist;
3309  const char *next = placelist;
3310 
3311  numNewMasks = 2;
3312  KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks);
3313  nextNewMask = 0;
3314 
3315  // tempMask is modified based on the previous or initial
3316  // place to form the current place
3317  // previousMask contains the previous place
3318  kmp_affin_mask_t *tempMask;
3319  kmp_affin_mask_t *previousMask;
3320  KMP_CPU_ALLOC(tempMask);
3321  KMP_CPU_ZERO(tempMask);
3322  KMP_CPU_ALLOC(previousMask);
3323  KMP_CPU_ZERO(previousMask);
3324  int setSize = 0;
3325 
3326  for (;;) {
3327  __kmp_process_place(&scan, osId2Mask, maxOsId, tempMask, &setSize);
3328 
3329  // valid follow sets are ',' ':' and EOL
3330  SKIP_WS(scan);
3331  if (*scan == '\0' || *scan == ',') {
3332  if (setSize > 0) {
3333  ADD_MASK(tempMask);
3334  }
3335  KMP_CPU_ZERO(tempMask);
3336  setSize = 0;
3337  if (*scan == '\0') {
3338  break;
3339  }
3340  scan++; // skip ','
3341  continue;
3342  }
3343 
3344  KMP_ASSERT2(*scan == ':', "bad explicit places list");
3345  scan++; // skip ':'
3346 
3347  // Read count parameter
3348  SKIP_WS(scan);
3349  KMP_ASSERT2((*scan >= '0') && (*scan <= '9'), "bad explicit places list");
3350  next = scan;
3351  SKIP_DIGITS(next);
3352  count = __kmp_str_to_int(scan, *next);
3353  KMP_ASSERT(count >= 0);
3354  scan = next;
3355 
3356  // valid follow sets are ',' ':' and EOL
3357  SKIP_WS(scan);
3358  if (*scan == '\0' || *scan == ',') {
3359  stride = +1;
3360  } else {
3361  KMP_ASSERT2(*scan == ':', "bad explicit places list");
3362  scan++; // skip ':'
3363 
3364  // Read stride parameter
3365  sign = +1;
3366  for (;;) {
3367  SKIP_WS(scan);
3368  if (*scan == '+') {
3369  scan++; // skip '+'
3370  continue;
3371  }
3372  if (*scan == '-') {
3373  sign *= -1;
3374  scan++; // skip '-'
3375  continue;
3376  }
3377  break;
3378  }
3379  SKIP_WS(scan);
3380  KMP_ASSERT2((*scan >= '0') && (*scan <= '9'), "bad explicit places list");
3381  next = scan;
3382  SKIP_DIGITS(next);
3383  stride = __kmp_str_to_int(scan, *next);
3384  KMP_DEBUG_ASSERT(stride >= 0);
3385  scan = next;
3386  stride *= sign;
3387  }
3388 
3389  // Add places determined by initial_place : count : stride
3390  for (i = 0; i < count; i++) {
3391  if (setSize == 0) {
3392  break;
3393  }
3394  // Add the current place, then build the next place (tempMask) from that
3395  KMP_CPU_COPY(previousMask, tempMask);
3396  ADD_MASK(previousMask);
3397  KMP_CPU_ZERO(tempMask);
3398  setSize = 0;
3399  KMP_CPU_SET_ITERATE(j, previousMask) {
3400  if (!KMP_CPU_ISSET(j, previousMask)) {
3401  continue;
3402  }
3403  if ((j + stride > maxOsId) || (j + stride < 0) ||
3404  (!KMP_CPU_ISSET(j, __kmp_affin_fullMask)) ||
3405  (!KMP_CPU_ISSET(j + stride,
3406  KMP_CPU_INDEX(osId2Mask, j + stride)))) {
3407  if ((__kmp_affinity_verbose ||
3408  (__kmp_affinity_warnings &&
3409  (__kmp_affinity_type != affinity_none))) &&
3410  i < count - 1) {
3411  KMP_WARNING(AffIgnoreInvalidProcID, j + stride);
3412  }
3413  continue;
3414  }
3415  KMP_CPU_SET(j + stride, tempMask);
3416  setSize++;
3417  }
3418  }
3419  KMP_CPU_ZERO(tempMask);
3420  setSize = 0;
3421 
3422  // valid follow sets are ',' and EOL
3423  SKIP_WS(scan);
3424  if (*scan == '\0') {
3425  break;
3426  }
3427  if (*scan == ',') {
3428  scan++; // skip ','
3429  continue;
3430  }
3431 
3432  KMP_ASSERT2(0, "bad explicit places list");
3433  }
3434 
3435  *out_numMasks = nextNewMask;
3436  if (nextNewMask == 0) {
3437  *out_masks = NULL;
3438  KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3439  return;
3440  }
3441  KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask);
3442  KMP_CPU_FREE(tempMask);
3443  KMP_CPU_FREE(previousMask);
3444  for (i = 0; i < nextNewMask; i++) {
3445  kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i);
3446  kmp_affin_mask_t *dest = KMP_CPU_INDEX((*out_masks), i);
3447  KMP_CPU_COPY(dest, src);
3448  }
3449  KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3450 }
3451 
3452 #undef ADD_MASK
3453 #undef ADD_MASK_OSID
3454 
3455 #if KMP_USE_HWLOC
3456 static int __kmp_hwloc_skip_PUs_obj(hwloc_topology_t t, hwloc_obj_t o) {
3457  // skip PUs descendants of the object o
3458  int skipped = 0;
3459  hwloc_obj_t hT = NULL;
3460  int N = __kmp_hwloc_count_children_by_type(t, o, HWLOC_OBJ_PU, &hT);
3461  for (int i = 0; i < N; ++i) {
3462  KMP_DEBUG_ASSERT(hT);
3463  unsigned idx = hT->os_index;
3464  if (KMP_CPU_ISSET(idx, __kmp_affin_fullMask)) {
3465  KMP_CPU_CLR(idx, __kmp_affin_fullMask);
3466  KC_TRACE(200, ("KMP_HW_SUBSET: skipped proc %d\n", idx));
3467  ++skipped;
3468  }
3469  hT = hwloc_get_next_obj_by_type(t, HWLOC_OBJ_PU, hT);
3470  }
3471  return skipped; // count number of skipped units
3472 }
3473 
3474 static int __kmp_hwloc_obj_has_PUs(hwloc_topology_t t, hwloc_obj_t o) {
3475  // check if obj has PUs present in fullMask
3476  hwloc_obj_t hT = NULL;
3477  int N = __kmp_hwloc_count_children_by_type(t, o, HWLOC_OBJ_PU, &hT);
3478  for (int i = 0; i < N; ++i) {
3479  KMP_DEBUG_ASSERT(hT);
3480  unsigned idx = hT->os_index;
3481  if (KMP_CPU_ISSET(idx, __kmp_affin_fullMask))
3482  return 1; // found PU
3483  hT = hwloc_get_next_obj_by_type(t, HWLOC_OBJ_PU, hT);
3484  }
3485  return 0; // no PUs found
3486 }
3487 #endif // KMP_USE_HWLOC
3488 
3489 static void __kmp_apply_thread_places(AddrUnsPair **pAddr, int depth) {
3490  AddrUnsPair *newAddr;
3491  if (__kmp_hws_requested == 0)
3492  goto _exit; // no topology limiting actions requested, exit
3493 #if KMP_USE_HWLOC
3494  if (__kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC) {
3495  // Number of subobjects calculated dynamically, this works fine for
3496  // any non-uniform topology.
3497  // L2 cache objects are determined by depth, other objects - by type.
3498  hwloc_topology_t tp = __kmp_hwloc_topology;
3499  int nS = 0, nN = 0, nL = 0, nC = 0,
3500  nT = 0; // logical index including skipped
3501  int nCr = 0, nTr = 0; // number of requested units
3502  int nPkg = 0, nCo = 0, n_new = 0, n_old = 0, nCpP = 0, nTpC = 0; // counters
3503  hwloc_obj_t hT, hC, hL, hN, hS; // hwloc objects (pointers to)
3504  int L2depth, idx;
3505 
3506  // check support of extensions ----------------------------------
3507  int numa_support = 0, tile_support = 0;
3508  if (__kmp_pu_os_idx)
3509  hT = hwloc_get_pu_obj_by_os_index(tp,
3510  __kmp_pu_os_idx[__kmp_avail_proc - 1]);
3511  else
3512  hT = hwloc_get_obj_by_type(tp, HWLOC_OBJ_PU, __kmp_avail_proc - 1);
3513  if (hT == NULL) { // something's gone wrong
3514  KMP_WARNING(AffHWSubsetUnsupported);
3515  goto _exit;
3516  }
3517  // check NUMA node
3518  hN = hwloc_get_ancestor_obj_by_type(tp, HWLOC_OBJ_NUMANODE, hT);
3519  hS = hwloc_get_ancestor_obj_by_type(tp, HWLOC_OBJ_PACKAGE, hT);
3520  if (hN != NULL && hN->depth > hS->depth) {
3521  numa_support = 1; // 1 in case socket includes node(s)
3522  } else if (__kmp_hws_node.num > 0) {
3523  // don't support sockets inside NUMA node (no such HW found for testing)
3524  KMP_WARNING(AffHWSubsetUnsupported);
3525  goto _exit;
3526  }
3527  // check L2 cahce, get object by depth because of multiple caches
3528  L2depth = hwloc_get_cache_type_depth(tp, 2, HWLOC_OBJ_CACHE_UNIFIED);
3529  hL = hwloc_get_ancestor_obj_by_depth(tp, L2depth, hT);
3530  if (hL != NULL &&
3531  __kmp_hwloc_count_children_by_type(tp, hL, HWLOC_OBJ_CORE, &hC) > 1) {
3532  tile_support = 1; // no sense to count L2 if it includes single core
3533  } else if (__kmp_hws_tile.num > 0) {
3534  if (__kmp_hws_core.num == 0) {
3535  __kmp_hws_core = __kmp_hws_tile; // replace L2 with core
3536  __kmp_hws_tile.num = 0;
3537  } else {
3538  // L2 and core are both requested, but represent same object
3539  KMP_WARNING(AffHWSubsetInvalid);
3540  goto _exit;
3541  }
3542  }
3543  // end of check of extensions -----------------------------------
3544 
3545  // fill in unset items, validate settings -----------------------
3546  if (__kmp_hws_socket.num == 0)
3547  __kmp_hws_socket.num = nPackages; // use all available sockets
3548  if (__kmp_hws_socket.offset >= nPackages) {
3549  KMP_WARNING(AffHWSubsetManySockets);
3550  goto _exit;
3551  }
3552  if (numa_support) {
3553  hN = NULL;
3554  int NN = __kmp_hwloc_count_children_by_type(tp, hS, HWLOC_OBJ_NUMANODE,
3555  &hN); // num nodes in socket
3556  if (__kmp_hws_node.num == 0)
3557  __kmp_hws_node.num = NN; // use all available nodes
3558  if (__kmp_hws_node.offset >= NN) {
3559  KMP_WARNING(AffHWSubsetManyNodes);
3560  goto _exit;
3561  }
3562  if (tile_support) {
3563  // get num tiles in node
3564  int NL = __kmp_hwloc_count_children_by_depth(tp, hN, L2depth, &hL);
3565  if (__kmp_hws_tile.num == 0) {
3566  __kmp_hws_tile.num = NL + 1;
3567  } // use all available tiles, some node may have more tiles, thus +1
3568  if (__kmp_hws_tile.offset >= NL) {
3569  KMP_WARNING(AffHWSubsetManyTiles);
3570  goto _exit;
3571  }
3572  int NC = __kmp_hwloc_count_children_by_type(tp, hL, HWLOC_OBJ_CORE,
3573  &hC); // num cores in tile
3574  if (__kmp_hws_core.num == 0)
3575  __kmp_hws_core.num = NC; // use all available cores
3576  if (__kmp_hws_core.offset >= NC) {
3577  KMP_WARNING(AffHWSubsetManyCores);
3578  goto _exit;
3579  }
3580  } else { // tile_support
3581  int NC = __kmp_hwloc_count_children_by_type(tp, hN, HWLOC_OBJ_CORE,
3582  &hC); // num cores in node
3583  if (__kmp_hws_core.num == 0)
3584  __kmp_hws_core.num = NC; // use all available cores
3585  if (__kmp_hws_core.offset >= NC) {
3586  KMP_WARNING(AffHWSubsetManyCores);
3587  goto _exit;
3588  }
3589  } // tile_support
3590  } else { // numa_support
3591  if (tile_support) {
3592  // get num tiles in socket
3593  int NL = __kmp_hwloc_count_children_by_depth(tp, hS, L2depth, &hL);
3594  if (__kmp_hws_tile.num == 0)
3595  __kmp_hws_tile.num = NL; // use all available tiles
3596  if (__kmp_hws_tile.offset >= NL) {
3597  KMP_WARNING(AffHWSubsetManyTiles);
3598  goto _exit;
3599  }
3600  int NC = __kmp_hwloc_count_children_by_type(tp, hL, HWLOC_OBJ_CORE,
3601  &hC); // num cores in tile
3602  if (__kmp_hws_core.num == 0)
3603  __kmp_hws_core.num = NC; // use all available cores
3604  if (__kmp_hws_core.offset >= NC) {
3605  KMP_WARNING(AffHWSubsetManyCores);
3606  goto _exit;
3607  }
3608  } else { // tile_support
3609  int NC = __kmp_hwloc_count_children_by_type(tp, hS, HWLOC_OBJ_CORE,
3610  &hC); // num cores in socket
3611  if (__kmp_hws_core.num == 0)
3612  __kmp_hws_core.num = NC; // use all available cores
3613  if (__kmp_hws_core.offset >= NC) {
3614  KMP_WARNING(AffHWSubsetManyCores);
3615  goto _exit;
3616  }
3617  } // tile_support
3618  }
3619  if (__kmp_hws_proc.num == 0)
3620  __kmp_hws_proc.num = __kmp_nThreadsPerCore; // use all available procs
3621  if (__kmp_hws_proc.offset >= __kmp_nThreadsPerCore) {
3622  KMP_WARNING(AffHWSubsetManyProcs);
3623  goto _exit;
3624  }
3625  // end of validation --------------------------------------------
3626 
3627  if (pAddr) // pAddr is NULL in case of affinity_none
3628  newAddr = (AddrUnsPair *)__kmp_allocate(sizeof(AddrUnsPair) *
3629  __kmp_avail_proc); // max size
3630  // main loop to form HW subset ----------------------------------
3631  hS = NULL;
3632  int NP = hwloc_get_nbobjs_by_type(tp, HWLOC_OBJ_PACKAGE);
3633  for (int s = 0; s < NP; ++s) {
3634  // Check Socket -----------------------------------------------
3635  hS = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PACKAGE, hS);
3636  if (!__kmp_hwloc_obj_has_PUs(tp, hS))
3637  continue; // skip socket if all PUs are out of fullMask
3638  ++nS; // only count objects those have PUs in affinity mask
3639  if (nS <= __kmp_hws_socket.offset ||
3640  nS > __kmp_hws_socket.num + __kmp_hws_socket.offset) {
3641  n_old += __kmp_hwloc_skip_PUs_obj(tp, hS); // skip socket
3642  continue; // move to next socket
3643  }
3644  nCr = 0; // count number of cores per socket
3645  // socket requested, go down the topology tree
3646  // check 4 cases: (+NUMA+Tile), (+NUMA-Tile), (-NUMA+Tile), (-NUMA-Tile)
3647  if (numa_support) {
3648  nN = 0;
3649  hN = NULL;
3650  // num nodes in current socket
3651  int NN =
3652  __kmp_hwloc_count_children_by_type(tp, hS, HWLOC_OBJ_NUMANODE, &hN);
3653  for (int n = 0; n < NN; ++n) {
3654  // Check NUMA Node ----------------------------------------
3655  if (!__kmp_hwloc_obj_has_PUs(tp, hN)) {
3656  hN = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_NUMANODE, hN);
3657  continue; // skip node if all PUs are out of fullMask
3658  }
3659  ++nN;
3660  if (nN <= __kmp_hws_node.offset ||
3661  nN > __kmp_hws_node.num + __kmp_hws_node.offset) {
3662  // skip node as not requested
3663  n_old += __kmp_hwloc_skip_PUs_obj(tp, hN); // skip node
3664  hN = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_NUMANODE, hN);
3665  continue; // move to next node
3666  }
3667  // node requested, go down the topology tree
3668  if (tile_support) {
3669  nL = 0;
3670  hL = NULL;
3671  int NL = __kmp_hwloc_count_children_by_depth(tp, hN, L2depth, &hL);
3672  for (int l = 0; l < NL; ++l) {
3673  // Check L2 (tile) ------------------------------------
3674  if (!__kmp_hwloc_obj_has_PUs(tp, hL)) {
3675  hL = hwloc_get_next_obj_by_depth(tp, L2depth, hL);
3676  continue; // skip tile if all PUs are out of fullMask
3677  }
3678  ++nL;
3679  if (nL <= __kmp_hws_tile.offset ||
3680  nL > __kmp_hws_tile.num + __kmp_hws_tile.offset) {
3681  // skip tile as not requested
3682  n_old += __kmp_hwloc_skip_PUs_obj(tp, hL); // skip tile
3683  hL = hwloc_get_next_obj_by_depth(tp, L2depth, hL);
3684  continue; // move to next tile
3685  }
3686  // tile requested, go down the topology tree
3687  nC = 0;
3688  hC = NULL;
3689  // num cores in current tile
3690  int NC = __kmp_hwloc_count_children_by_type(tp, hL,
3691  HWLOC_OBJ_CORE, &hC);
3692  for (int c = 0; c < NC; ++c) {
3693  // Check Core ---------------------------------------
3694  if (!__kmp_hwloc_obj_has_PUs(tp, hC)) {
3695  hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3696  continue; // skip core if all PUs are out of fullMask
3697  }
3698  ++nC;
3699  if (nC <= __kmp_hws_core.offset ||
3700  nC > __kmp_hws_core.num + __kmp_hws_core.offset) {
3701  // skip node as not requested
3702  n_old += __kmp_hwloc_skip_PUs_obj(tp, hC); // skip core
3703  hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3704  continue; // move to next node
3705  }
3706  // core requested, go down to PUs
3707  nT = 0;
3708  nTr = 0;
3709  hT = NULL;
3710  // num procs in current core
3711  int NT = __kmp_hwloc_count_children_by_type(tp, hC,
3712  HWLOC_OBJ_PU, &hT);
3713  for (int t = 0; t < NT; ++t) {
3714  // Check PU ---------------------------------------
3715  idx = hT->os_index;
3716  if (!KMP_CPU_ISSET(idx, __kmp_affin_fullMask)) {
3717  hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3718  continue; // skip PU if not in fullMask
3719  }
3720  ++nT;
3721  if (nT <= __kmp_hws_proc.offset ||
3722  nT > __kmp_hws_proc.num + __kmp_hws_proc.offset) {
3723  // skip PU
3724  KMP_CPU_CLR(idx, __kmp_affin_fullMask);
3725  ++n_old;
3726  KC_TRACE(200, ("KMP_HW_SUBSET: skipped proc %d\n", idx));
3727  hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3728  continue; // move to next node
3729  }
3730  ++nTr;
3731  if (pAddr) // collect requested thread's data
3732  newAddr[n_new] = (*pAddr)[n_old];
3733  ++n_new;
3734  ++n_old;
3735  hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3736  } // threads loop
3737  if (nTr > 0) {
3738  ++nCr; // num cores per socket
3739  ++nCo; // total num cores
3740  if (nTr > nTpC)
3741  nTpC = nTr; // calc max threads per core
3742  }
3743  hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3744  } // cores loop
3745  hL = hwloc_get_next_obj_by_depth(tp, L2depth, hL);
3746  } // tiles loop
3747  } else { // tile_support
3748  // no tiles, check cores
3749  nC = 0;
3750  hC = NULL;
3751  // num cores in current node
3752  int NC =
3753  __kmp_hwloc_count_children_by_type(tp, hN, HWLOC_OBJ_CORE, &hC);
3754  for (int c = 0; c < NC; ++c) {
3755  // Check Core ---------------------------------------
3756  if (!__kmp_hwloc_obj_has_PUs(tp, hC)) {
3757  hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3758  continue; // skip core if all PUs are out of fullMask
3759  }
3760  ++nC;
3761  if (nC <= __kmp_hws_core.offset ||
3762  nC > __kmp_hws_core.num + __kmp_hws_core.offset) {
3763  // skip node as not requested
3764  n_old += __kmp_hwloc_skip_PUs_obj(tp, hC); // skip core
3765  hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3766  continue; // move to next node
3767  }
3768  // core requested, go down to PUs
3769  nT = 0;
3770  nTr = 0;
3771  hT = NULL;
3772  int NT =
3773  __kmp_hwloc_count_children_by_type(tp, hC, HWLOC_OBJ_PU, &hT);
3774  for (int t = 0; t < NT; ++t) {
3775  // Check PU ---------------------------------------
3776  idx = hT->os_index;
3777  if (!KMP_CPU_ISSET(idx, __kmp_affin_fullMask)) {
3778  hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3779  continue; // skip PU if not in fullMask
3780  }
3781  ++nT;
3782  if (nT <= __kmp_hws_proc.offset ||
3783  nT > __kmp_hws_proc.num + __kmp_hws_proc.offset) {
3784  // skip PU
3785  KMP_CPU_CLR(idx, __kmp_affin_fullMask);
3786  ++n_old;
3787  KC_TRACE(200, ("KMP_HW_SUBSET: skipped proc %d\n", idx));
3788  hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3789  continue; // move to next node
3790  }
3791  ++nTr;
3792  if (pAddr) // collect requested thread's data
3793  newAddr[n_new] = (*pAddr)[n_old];
3794  ++n_new;
3795  ++n_old;
3796  hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3797  } // threads loop
3798  if (nTr > 0) {
3799  ++nCr; // num cores per socket
3800  ++nCo; // total num cores
3801  if (nTr > nTpC)
3802  nTpC = nTr; // calc max threads per core
3803  }
3804  hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3805  } // cores loop
3806  } // tiles support
3807  hN = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_NUMANODE, hN);
3808  } // nodes loop
3809  } else { // numa_support
3810  // no NUMA support
3811  if (tile_support) {
3812  nL = 0;
3813  hL = NULL;
3814  // num tiles in current socket
3815  int NL = __kmp_hwloc_count_children_by_depth(tp, hS, L2depth, &hL);
3816  for (int l = 0; l < NL; ++l) {
3817  // Check L2 (tile) ------------------------------------
3818  if (!__kmp_hwloc_obj_has_PUs(tp, hL)) {
3819  hL = hwloc_get_next_obj_by_depth(tp, L2depth, hL);
3820  continue; // skip tile if all PUs are out of fullMask
3821  }
3822  ++nL;
3823  if (nL <= __kmp_hws_tile.offset ||
3824  nL > __kmp_hws_tile.num + __kmp_hws_tile.offset) {
3825  // skip tile as not requested
3826  n_old += __kmp_hwloc_skip_PUs_obj(tp, hL); // skip tile
3827  hL = hwloc_get_next_obj_by_depth(tp, L2depth, hL);
3828  continue; // move to next tile
3829  }
3830  // tile requested, go down the topology tree
3831  nC = 0;
3832  hC = NULL;
3833  // num cores per tile
3834  int NC =
3835  __kmp_hwloc_count_children_by_type(tp, hL, HWLOC_OBJ_CORE, &hC);
3836  for (int c = 0; c < NC; ++c) {
3837  // Check Core ---------------------------------------
3838  if (!__kmp_hwloc_obj_has_PUs(tp, hC)) {
3839  hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3840  continue; // skip core if all PUs are out of fullMask
3841  }
3842  ++nC;
3843  if (nC <= __kmp_hws_core.offset ||
3844  nC > __kmp_hws_core.num + __kmp_hws_core.offset) {
3845  // skip node as not requested
3846  n_old += __kmp_hwloc_skip_PUs_obj(tp, hC); // skip core
3847  hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3848  continue; // move to next node
3849  }
3850  // core requested, go down to PUs
3851  nT = 0;
3852  nTr = 0;
3853  hT = NULL;
3854  // num procs per core
3855  int NT =
3856  __kmp_hwloc_count_children_by_type(tp, hC, HWLOC_OBJ_PU, &hT);
3857  for (int t = 0; t < NT; ++t) {
3858  // Check PU ---------------------------------------
3859  idx = hT->os_index;
3860  if (!KMP_CPU_ISSET(idx, __kmp_affin_fullMask)) {
3861  hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3862  continue; // skip PU if not in fullMask
3863  }
3864  ++nT;
3865  if (nT <= __kmp_hws_proc.offset ||
3866  nT > __kmp_hws_proc.num + __kmp_hws_proc.offset) {
3867  // skip PU
3868  KMP_CPU_CLR(idx, __kmp_affin_fullMask);
3869  ++n_old;
3870  KC_TRACE(200, ("KMP_HW_SUBSET: skipped proc %d\n", idx));
3871  hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3872  continue; // move to next node
3873  }
3874  ++nTr;
3875  if (pAddr) // collect requested thread's data
3876  newAddr[n_new] = (*pAddr)[n_old];
3877  ++n_new;
3878  ++n_old;
3879  hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3880  } // threads loop
3881  if (nTr > 0) {
3882  ++nCr; // num cores per socket
3883  ++nCo; // total num cores
3884  if (nTr > nTpC)
3885  nTpC = nTr; // calc max threads per core
3886  }
3887  hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3888  } // cores loop
3889  hL = hwloc_get_next_obj_by_depth(tp, L2depth, hL);
3890  } // tiles loop
3891  } else { // tile_support
3892  // no tiles, check cores
3893  nC = 0;
3894  hC = NULL;
3895  // num cores in socket
3896  int NC =
3897  __kmp_hwloc_count_children_by_type(tp, hS, HWLOC_OBJ_CORE, &hC);
3898  for (int c = 0; c < NC; ++c) {
3899  // Check Core -------------------------------------------
3900  if (!__kmp_hwloc_obj_has_PUs(tp, hC)) {
3901  hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3902  continue; // skip core if all PUs are out of fullMask
3903  }
3904  ++nC;
3905  if (nC <= __kmp_hws_core.offset ||
3906  nC > __kmp_hws_core.num + __kmp_hws_core.offset) {
3907  // skip node as not requested
3908  n_old += __kmp_hwloc_skip_PUs_obj(tp, hC); // skip core
3909  hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3910  continue; // move to next node
3911  }
3912  // core requested, go down to PUs
3913  nT = 0;
3914  nTr = 0;
3915  hT = NULL;
3916  // num procs per core
3917  int NT =
3918  __kmp_hwloc_count_children_by_type(tp, hC, HWLOC_OBJ_PU, &hT);
3919  for (int t = 0; t < NT; ++t) {
3920  // Check PU ---------------------------------------
3921  idx = hT->os_index;
3922  if (!KMP_CPU_ISSET(idx, __kmp_affin_fullMask)) {
3923  hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3924  continue; // skip PU if not in fullMask
3925  }
3926  ++nT;
3927  if (nT <= __kmp_hws_proc.offset ||
3928  nT > __kmp_hws_proc.num + __kmp_hws_proc.offset) {
3929  // skip PU
3930  KMP_CPU_CLR(idx, __kmp_affin_fullMask);
3931  ++n_old;
3932  KC_TRACE(200, ("KMP_HW_SUBSET: skipped proc %d\n", idx));
3933  hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3934  continue; // move to next node
3935  }
3936  ++nTr;
3937  if (pAddr) // collect requested thread's data
3938  newAddr[n_new] = (*pAddr)[n_old];
3939  ++n_new;
3940  ++n_old;
3941  hT = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, hT);
3942  } // threads loop
3943  if (nTr > 0) {
3944  ++nCr; // num cores per socket
3945  ++nCo; // total num cores
3946  if (nTr > nTpC)
3947  nTpC = nTr; // calc max threads per core
3948  }
3949  hC = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_CORE, hC);
3950  } // cores loop
3951  } // tiles support
3952  } // numa_support
3953  if (nCr > 0) { // found cores?
3954  ++nPkg; // num sockets
3955  if (nCr > nCpP)
3956  nCpP = nCr; // calc max cores per socket
3957  }
3958  } // sockets loop
3959 
3960  // check the subset is valid
3961  KMP_DEBUG_ASSERT(n_old == __kmp_avail_proc);
3962  KMP_DEBUG_ASSERT(nPkg > 0);
3963  KMP_DEBUG_ASSERT(nCpP > 0);
3964  KMP_DEBUG_ASSERT(nTpC > 0);
3965  KMP_DEBUG_ASSERT(nCo > 0);
3966  KMP_DEBUG_ASSERT(nPkg <= nPackages);
3967  KMP_DEBUG_ASSERT(nCpP <= nCoresPerPkg);
3968  KMP_DEBUG_ASSERT(nTpC <= __kmp_nThreadsPerCore);
3969  KMP_DEBUG_ASSERT(nCo <= __kmp_ncores);
3970 
3971  nPackages = nPkg; // correct num sockets
3972  nCoresPerPkg = nCpP; // correct num cores per socket
3973  __kmp_nThreadsPerCore = nTpC; // correct num threads per core
3974  __kmp_avail_proc = n_new; // correct num procs
3975  __kmp_ncores = nCo; // correct num cores
3976  // hwloc topology method end
3977  } else
3978 #endif // KMP_USE_HWLOC
3979  {
3980  int n_old = 0, n_new = 0, proc_num = 0;
3981  if (__kmp_hws_node.num > 0 || __kmp_hws_tile.num > 0) {
3982  KMP_WARNING(AffHWSubsetNoHWLOC);
3983  goto _exit;
3984  }
3985  if (__kmp_hws_socket.num == 0)
3986  __kmp_hws_socket.num = nPackages; // use all available sockets
3987  if (__kmp_hws_die.num == 0)
3988  __kmp_hws_die.num = nDiesPerPkg; // use all available dies
3989  if (__kmp_hws_core.num == 0)
3990  __kmp_hws_core.num = nCoresPerPkg; // use all available cores
3991  if (__kmp_hws_proc.num == 0 || __kmp_hws_proc.num > __kmp_nThreadsPerCore)
3992  __kmp_hws_proc.num = __kmp_nThreadsPerCore; // use all HW contexts
3993  if (!__kmp_affinity_uniform_topology()) {
3994  KMP_WARNING(AffHWSubsetNonUniform);
3995  goto _exit; // don't support non-uniform topology
3996  }
3997  if (depth > 4) {
3998  KMP_WARNING(AffHWSubsetNonThreeLevel);
3999  goto _exit; // don't support not-3-level topology
4000  }
4001  if (__kmp_hws_socket.offset + __kmp_hws_socket.num > nPackages) {
4002  KMP_WARNING(AffHWSubsetManySockets);
4003  goto _exit;
4004  }
4005  if (depth == 4 && __kmp_hws_die.offset + __kmp_hws_die.num > nDiesPerPkg) {
4006  KMP_WARNING(AffHWSubsetManyDies);
4007  goto _exit;
4008  }
4009  if (__kmp_hws_core.offset + __kmp_hws_core.num > nCoresPerPkg) {
4010  KMP_WARNING(AffHWSubsetManyCores);
4011  goto _exit;
4012  }
4013  // Form the requested subset
4014  if (pAddr) // pAddr is NULL in case of affinity_none
4015  newAddr = (AddrUnsPair *)__kmp_allocate(
4016  sizeof(AddrUnsPair) * __kmp_hws_socket.num * __kmp_hws_die.num *
4017  __kmp_hws_core.num * __kmp_hws_proc.num);
4018  for (int i = 0; i < nPackages; ++i) {
4019  if (i < __kmp_hws_socket.offset ||
4020  i >= __kmp_hws_socket.offset + __kmp_hws_socket.num) {
4021  // skip not-requested socket
4022  n_old += nDiesPerPkg * nCoresPerPkg * __kmp_nThreadsPerCore;
4023  if (__kmp_pu_os_idx != NULL) {
4024  // walk through skipped socket
4025  for (int l = 0; l < nDiesPerPkg; ++l) {
4026  for (int j = 0; j < nCoresPerPkg; ++j) {
4027  for (int k = 0; k < __kmp_nThreadsPerCore; ++k) {
4028  KMP_CPU_CLR(__kmp_pu_os_idx[proc_num], __kmp_affin_fullMask);
4029  ++proc_num;
4030  }
4031  }
4032  }
4033  }
4034  } else {
4035  // walk through requested socket
4036  for (int l = 0; l < nDiesPerPkg; ++l) {
4037  // skip unwanted die
4038  if (l < __kmp_hws_die.offset ||
4039  l >= __kmp_hws_die.offset + __kmp_hws_die.num) {
4040  n_old += nCoresPerPkg;
4041  if (__kmp_pu_os_idx != NULL) {
4042  for (int k = 0; k < nCoresPerPkg; ++k) {
4043  KMP_CPU_CLR(__kmp_pu_os_idx[proc_num], __kmp_affin_fullMask);
4044  ++proc_num;
4045  }
4046  }
4047  } else {
4048  for (int j = 0; j < nCoresPerPkg; ++j) {
4049  if (j < __kmp_hws_core.offset ||
4050  j >= __kmp_hws_core.offset +
4051  __kmp_hws_core.num) { // skip not-requested core
4052  n_old += __kmp_nThreadsPerCore;
4053  if (__kmp_pu_os_idx != NULL) {
4054  for (int k = 0; k < __kmp_nThreadsPerCore; ++k) {
4055  KMP_CPU_CLR(__kmp_pu_os_idx[proc_num],
4056  __kmp_affin_fullMask);
4057  ++proc_num;
4058  }
4059  }
4060  } else {
4061  // walk through requested core
4062  for (int k = 0; k < __kmp_nThreadsPerCore; ++k) {
4063  if (k < __kmp_hws_proc.num) {
4064  if (pAddr) // collect requested thread's data
4065  newAddr[n_new] = (*pAddr)[n_old];
4066  n_new++;
4067  } else {
4068  if (__kmp_pu_os_idx != NULL)
4069  KMP_CPU_CLR(__kmp_pu_os_idx[proc_num],
4070  __kmp_affin_fullMask);
4071  }
4072  n_old++;
4073  ++proc_num;
4074  }
4075  }
4076  }
4077  }
4078  }
4079  }
4080  }
4081  KMP_DEBUG_ASSERT(n_old ==
4082  nPackages * nDiesPerPkg * nCoresPerPkg *
4083  __kmp_nThreadsPerCore);
4084  KMP_DEBUG_ASSERT(n_new ==
4085  __kmp_hws_socket.num * __kmp_hws_die.num *
4086  __kmp_hws_core.num * __kmp_hws_proc.num);
4087  nPackages = __kmp_hws_socket.num; // correct nPackages
4088  nCoresPerPkg = __kmp_hws_core.num; // correct nCoresPerPkg
4089  nDiesPerPkg = __kmp_hws_die.num; // correct nDiesPerPkg
4090  __kmp_nThreadsPerCore = __kmp_hws_proc.num; // correct __kmp_nThreadsPerCore
4091  __kmp_avail_proc = n_new; // correct avail_proc
4092  __kmp_ncores =
4093  nPackages * nDiesPerPkg * __kmp_hws_core.num; // correct ncores
4094  } // non-hwloc topology method
4095  if (pAddr) {
4096  __kmp_free(*pAddr);
4097  *pAddr = newAddr; // replace old topology with new one
4098  }
4099  if (__kmp_affinity_verbose) {
4100  KMP_INFORM(AvailableOSProc, "KMP_HW_SUBSET", __kmp_avail_proc);
4101  kmp_str_buf_t buf;
4102  __kmp_str_buf_init(&buf);
4103  __kmp_str_buf_print(&buf, "%d", nPackages);
4104  KMP_INFORM(TopologyExtra, "KMP_HW_SUBSET", buf.str, nCoresPerPkg,
4105  __kmp_nThreadsPerCore, __kmp_ncores);
4106  __kmp_str_buf_free(&buf);
4107  }
4108 _exit:
4109  if (__kmp_pu_os_idx != NULL) {
4110  __kmp_free(__kmp_pu_os_idx);
4111  __kmp_pu_os_idx = NULL;
4112  }
4113 }
4114 
4115 // This function figures out the deepest level at which there is at least one
4116 // cluster/core with more than one processing unit bound to it.
4117 static int __kmp_affinity_find_core_level(const AddrUnsPair *address2os,
4118  int nprocs, int bottom_level) {
4119  int core_level = 0;
4120 
4121  for (int i = 0; i < nprocs; i++) {
4122  for (int j = bottom_level; j > 0; j--) {
4123  if (address2os[i].first.labels[j] > 0) {
4124  if (core_level < (j - 1)) {
4125  core_level = j - 1;
4126  }
4127  }
4128  }
4129  }
4130  return core_level;
4131 }
4132 
4133 // This function counts number of clusters/cores at given level.
4134 static int __kmp_affinity_compute_ncores(const AddrUnsPair *address2os,
4135  int nprocs, int bottom_level,
4136  int core_level) {
4137  int ncores = 0;
4138  int i, j;
4139 
4140  j = bottom_level;
4141  for (i = 0; i < nprocs; i++) {
4142  for (j = bottom_level; j > core_level; j--) {
4143  if ((i + 1) < nprocs) {
4144  if (address2os[i + 1].first.labels[j] > 0) {
4145  break;
4146  }
4147  }
4148  }
4149  if (j == core_level) {
4150  ncores++;
4151  }
4152  }
4153  if (j > core_level) {
4154  // In case of ( nprocs < __kmp_avail_proc ) we may end too deep and miss one
4155  // core. May occur when called from __kmp_affinity_find_core().
4156  ncores++;
4157  }
4158  return ncores;
4159 }
4160 
4161 // This function finds to which cluster/core given processing unit is bound.
4162 static int __kmp_affinity_find_core(const AddrUnsPair *address2os, int proc,
4163  int bottom_level, int core_level) {
4164  return __kmp_affinity_compute_ncores(address2os, proc + 1, bottom_level,
4165  core_level) -
4166  1;
4167 }
4168 
4169 // This function finds maximal number of processing units bound to a
4170 // cluster/core at given level.
4171 static int __kmp_affinity_max_proc_per_core(const AddrUnsPair *address2os,
4172  int nprocs, int bottom_level,
4173  int core_level) {
4174  int maxprocpercore = 0;
4175 
4176  if (core_level < bottom_level) {
4177  for (int i = 0; i < nprocs; i++) {
4178  int percore = address2os[i].first.labels[core_level + 1] + 1;
4179 
4180  if (percore > maxprocpercore) {
4181  maxprocpercore = percore;
4182  }
4183  }
4184  } else {
4185  maxprocpercore = 1;
4186  }
4187  return maxprocpercore;
4188 }
4189 
4190 static AddrUnsPair *address2os = NULL;
4191 static int *procarr = NULL;
4192 static int __kmp_aff_depth = 0;
4193 
4194 #if KMP_USE_HIER_SCHED
4195 #define KMP_EXIT_AFF_NONE \
4196  KMP_ASSERT(__kmp_affinity_type == affinity_none); \
4197  KMP_ASSERT(address2os == NULL); \
4198  __kmp_apply_thread_places(NULL, 0); \
4199  __kmp_create_affinity_none_places(); \
4200  __kmp_dispatch_set_hierarchy_values(); \
4201  return;
4202 #else
4203 #define KMP_EXIT_AFF_NONE \
4204  KMP_ASSERT(__kmp_affinity_type == affinity_none); \
4205  KMP_ASSERT(address2os == NULL); \
4206  __kmp_apply_thread_places(NULL, 0); \
4207  __kmp_create_affinity_none_places(); \
4208  return;
4209 #endif
4210 
4211 // Create a one element mask array (set of places) which only contains the
4212 // initial process's affinity mask
4213 static void __kmp_create_affinity_none_places() {
4214  KMP_ASSERT(__kmp_affin_fullMask != NULL);
4215  KMP_ASSERT(__kmp_affinity_type == affinity_none);
4216  __kmp_affinity_num_masks = 1;
4217  KMP_CPU_ALLOC_ARRAY(__kmp_affinity_masks, __kmp_affinity_num_masks);
4218  kmp_affin_mask_t *dest = KMP_CPU_INDEX(__kmp_affinity_masks, 0);
4219  KMP_CPU_COPY(dest, __kmp_affin_fullMask);
4220 }
4221 
4222 static int __kmp_affinity_cmp_Address_child_num(const void *a, const void *b) {
4223  const Address *aa = &(((const AddrUnsPair *)a)->first);
4224  const Address *bb = &(((const AddrUnsPair *)b)->first);
4225  unsigned depth = aa->depth;
4226  unsigned i;
4227  KMP_DEBUG_ASSERT(depth == bb->depth);
4228  KMP_DEBUG_ASSERT((unsigned)__kmp_affinity_compact <= depth);
4229  KMP_DEBUG_ASSERT(__kmp_affinity_compact >= 0);
4230  for (i = 0; i < (unsigned)__kmp_affinity_compact; i++) {
4231  int j = depth - i - 1;
4232  if (aa->childNums[j] < bb->childNums[j])
4233  return -1;
4234  if (aa->childNums[j] > bb->childNums[j])
4235  return 1;
4236  }
4237  for (; i < depth; i++) {
4238  int j = i - __kmp_affinity_compact;
4239  if (aa->childNums[j] < bb->childNums[j])
4240  return -1;
4241  if (aa->childNums[j] > bb->childNums[j])
4242  return 1;
4243  }
4244  return 0;
4245 }
4246 
4247 static void __kmp_aux_affinity_initialize(void) {
4248  if (__kmp_affinity_masks != NULL) {
4249  KMP_ASSERT(__kmp_affin_fullMask != NULL);
4250  return;
4251  }
4252 
4253  // Create the "full" mask - this defines all of the processors that we
4254  // consider to be in the machine model. If respect is set, then it is the
4255  // initialization thread's affinity mask. Otherwise, it is all processors that
4256  // we know about on the machine.
4257  if (__kmp_affin_fullMask == NULL) {
4258  KMP_CPU_ALLOC(__kmp_affin_fullMask);
4259  }
4260  if (KMP_AFFINITY_CAPABLE()) {
4261  __kmp_get_system_affinity(__kmp_affin_fullMask, TRUE);
4262  if (__kmp_affinity_respect_mask) {
4263  // Count the number of available processors.
4264  unsigned i;
4265  __kmp_avail_proc = 0;
4266  KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
4267  if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
4268  continue;
4269  }
4270  __kmp_avail_proc++;
4271  }
4272  if (__kmp_avail_proc > __kmp_xproc) {
4273  if (__kmp_affinity_verbose ||
4274  (__kmp_affinity_warnings &&
4275  (__kmp_affinity_type != affinity_none))) {
4276  KMP_WARNING(ErrorInitializeAffinity);
4277  }
4278  __kmp_affinity_type = affinity_none;
4279  KMP_AFFINITY_DISABLE();
4280  return;
4281  }
4282 
4283  if (__kmp_affinity_verbose) {
4284  char buf[KMP_AFFIN_MASK_PRINT_LEN];
4285  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4286  __kmp_affin_fullMask);
4287  KMP_INFORM(InitOSProcSetRespect, "KMP_AFFINITY", buf);
4288  }
4289  } else {
4290  if (__kmp_affinity_verbose) {
4291  char buf[KMP_AFFIN_MASK_PRINT_LEN];
4292  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4293  __kmp_affin_fullMask);
4294  KMP_INFORM(InitOSProcSetNotRespect, "KMP_AFFINITY", buf);
4295  }
4296  __kmp_affinity_entire_machine_mask(__kmp_affin_fullMask);
4297  __kmp_avail_proc = __kmp_xproc;
4298 #if KMP_OS_WINDOWS
4299  // Set the process affinity mask since threads' affinity
4300  // masks must be subset of process mask in Windows* OS
4301  __kmp_affin_fullMask->set_process_affinity(true);
4302 #endif
4303  }
4304  }
4305 
4306  if (__kmp_affinity_gran == affinity_gran_tile &&
4307  // check if user's request is valid
4308  __kmp_affinity_dispatch->get_api_type() == KMPAffinity::NATIVE_OS) {
4309  KMP_WARNING(AffTilesNoHWLOC, "KMP_AFFINITY");
4310  __kmp_affinity_gran = affinity_gran_package;
4311  }
4312 
4313  int depth = -1;
4314  kmp_i18n_id_t msg_id = kmp_i18n_null;
4315 
4316  // For backward compatibility, setting KMP_CPUINFO_FILE =>
4317  // KMP_TOPOLOGY_METHOD=cpuinfo
4318  if ((__kmp_cpuinfo_file != NULL) &&
4319  (__kmp_affinity_top_method == affinity_top_method_all)) {
4320  __kmp_affinity_top_method = affinity_top_method_cpuinfo;
4321  }
4322 
4323  if (__kmp_affinity_top_method == affinity_top_method_all) {
4324  // In the default code path, errors are not fatal - we just try using
4325  // another method. We only emit a warning message if affinity is on, or the
4326  // verbose flag is set, and the nowarnings flag was not set.
4327  const char *file_name = NULL;
4328  int line = 0;
4329 #if KMP_USE_HWLOC
4330  if (depth < 0 &&
4331  __kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC) {
4332  if (__kmp_affinity_verbose) {
4333  KMP_INFORM(AffUsingHwloc, "KMP_AFFINITY");
4334  }
4335  if (!__kmp_hwloc_error) {
4336  depth = __kmp_affinity_create_hwloc_map(&address2os, &msg_id);
4337  if (depth == 0) {
4338  KMP_EXIT_AFF_NONE;
4339  } else if (depth < 0 && __kmp_affinity_verbose) {
4340  KMP_INFORM(AffIgnoringHwloc, "KMP_AFFINITY");
4341  }
4342  } else if (__kmp_affinity_verbose) {
4343  KMP_INFORM(AffIgnoringHwloc, "KMP_AFFINITY");
4344  }
4345  }
4346 #endif
4347 
4348 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
4349 
4350  if (depth < 0) {
4351  if (__kmp_affinity_verbose) {
4352  KMP_INFORM(AffInfoStr, "KMP_AFFINITY", KMP_I18N_STR(Decodingx2APIC));
4353  }
4354 
4355  file_name = NULL;
4356  depth = __kmp_affinity_create_x2apicid_map(&address2os, &msg_id);
4357  if (depth == 0) {
4358  KMP_EXIT_AFF_NONE;
4359  }
4360 
4361  if (depth < 0) {
4362  if (__kmp_affinity_verbose) {
4363  if (msg_id != kmp_i18n_null) {
4364  KMP_INFORM(AffInfoStrStr, "KMP_AFFINITY",
4365  __kmp_i18n_catgets(msg_id),
4366  KMP_I18N_STR(DecodingLegacyAPIC));
4367  } else {
4368  KMP_INFORM(AffInfoStr, "KMP_AFFINITY",
4369  KMP_I18N_STR(DecodingLegacyAPIC));
4370  }
4371  }
4372 
4373  file_name = NULL;
4374  depth = __kmp_affinity_create_apicid_map(&address2os, &msg_id);
4375  if (depth == 0) {
4376  KMP_EXIT_AFF_NONE;
4377  }
4378  }
4379  }
4380 
4381 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
4382 
4383 #if KMP_OS_LINUX
4384 
4385  if (depth < 0) {
4386  if (__kmp_affinity_verbose) {
4387  if (msg_id != kmp_i18n_null) {
4388  KMP_INFORM(AffStrParseFilename, "KMP_AFFINITY",
4389  __kmp_i18n_catgets(msg_id), "/proc/cpuinfo");
4390  } else {
4391  KMP_INFORM(AffParseFilename, "KMP_AFFINITY", "/proc/cpuinfo");
4392  }
4393  }
4394 
4395  kmp_safe_raii_file_t f("/proc/cpuinfo", "r");
4396  depth = __kmp_affinity_create_cpuinfo_map(&address2os, &line, &msg_id, f);
4397  if (depth == 0) {
4398  KMP_EXIT_AFF_NONE;
4399  }
4400  }
4401 
4402 #endif /* KMP_OS_LINUX */
4403 
4404 #if KMP_GROUP_AFFINITY
4405 
4406  if ((depth < 0) && (__kmp_num_proc_groups > 1)) {
4407  if (__kmp_affinity_verbose) {
4408  KMP_INFORM(AffWindowsProcGroupMap, "KMP_AFFINITY");
4409  }
4410 
4411  depth = __kmp_affinity_create_proc_group_map(&address2os, &msg_id);
4412  KMP_ASSERT(depth != 0);
4413  }
4414 
4415 #endif /* KMP_GROUP_AFFINITY */
4416 
4417  if (depth < 0) {
4418  if (__kmp_affinity_verbose && (msg_id != kmp_i18n_null)) {
4419  if (file_name == NULL) {
4420  KMP_INFORM(UsingFlatOS, __kmp_i18n_catgets(msg_id));
4421  } else if (line == 0) {
4422  KMP_INFORM(UsingFlatOSFile, file_name, __kmp_i18n_catgets(msg_id));
4423  } else {
4424  KMP_INFORM(UsingFlatOSFileLine, file_name, line,
4425  __kmp_i18n_catgets(msg_id));
4426  }
4427  }
4428  // FIXME - print msg if msg_id = kmp_i18n_null ???
4429 
4430  file_name = "";
4431  depth = __kmp_affinity_create_flat_map(&address2os, &msg_id);
4432  if (depth == 0) {
4433  KMP_EXIT_AFF_NONE;
4434  }
4435  KMP_ASSERT(depth > 0);
4436  KMP_ASSERT(address2os != NULL);
4437  }
4438  }
4439 
4440 #if KMP_USE_HWLOC
4441  else if (__kmp_affinity_top_method == affinity_top_method_hwloc) {
4442  KMP_ASSERT(__kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC);
4443  if (__kmp_affinity_verbose) {
4444  KMP_INFORM(AffUsingHwloc, "KMP_AFFINITY");
4445  }
4446  depth = __kmp_affinity_create_hwloc_map(&address2os, &msg_id);
4447  if (depth == 0) {
4448  KMP_EXIT_AFF_NONE;
4449  }
4450  }
4451 #endif // KMP_USE_HWLOC
4452 
4453 // If the user has specified that a particular topology discovery method is to be
4454 // used, then we abort if that method fails. The exception is group affinity,
4455 // which might have been implicitly set.
4456 
4457 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
4458 
4459  else if (__kmp_affinity_top_method == affinity_top_method_x2apicid ||
4460  __kmp_affinity_top_method == affinity_top_method_x2apicid_1f) {
4461  if (__kmp_affinity_verbose) {
4462  KMP_INFORM(AffInfoStr, "KMP_AFFINITY", KMP_I18N_STR(Decodingx2APIC));
4463  }
4464 
4465  depth = __kmp_affinity_create_x2apicid_map(&address2os, &msg_id);
4466  if (depth == 0) {
4467  KMP_EXIT_AFF_NONE;
4468  }
4469  if (depth < 0) {
4470  KMP_ASSERT(msg_id != kmp_i18n_null);
4471  KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4472  }
4473  } else if (__kmp_affinity_top_method == affinity_top_method_apicid) {
4474  if (__kmp_affinity_verbose) {
4475  KMP_INFORM(AffInfoStr, "KMP_AFFINITY", KMP_I18N_STR(DecodingLegacyAPIC));
4476  }
4477 
4478  depth = __kmp_affinity_create_apicid_map(&address2os, &msg_id);
4479  if (depth == 0) {
4480  KMP_EXIT_AFF_NONE;
4481  }
4482  if (depth < 0) {
4483  KMP_ASSERT(msg_id != kmp_i18n_null);
4484  KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4485  }
4486  }
4487 
4488 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
4489 
4490  else if (__kmp_affinity_top_method == affinity_top_method_cpuinfo) {
4491  const char *filename;
4492  const char *env_var = nullptr;
4493  if (__kmp_cpuinfo_file != NULL) {
4494  filename = __kmp_cpuinfo_file;
4495  env_var = "KMP_CPUINFO_FILE";
4496  } else {
4497  filename = "/proc/cpuinfo";
4498  }
4499 
4500  if (__kmp_affinity_verbose) {
4501  KMP_INFORM(AffParseFilename, "KMP_AFFINITY", filename);
4502  }
4503 
4504  kmp_safe_raii_file_t f(filename, "r", env_var);
4505  int line = 0;
4506  depth = __kmp_affinity_create_cpuinfo_map(&address2os, &line, &msg_id, f);
4507  if (depth < 0) {
4508  KMP_ASSERT(msg_id != kmp_i18n_null);
4509  if (line > 0) {
4510  KMP_FATAL(FileLineMsgExiting, filename, line,
4511  __kmp_i18n_catgets(msg_id));
4512  } else {
4513  KMP_FATAL(FileMsgExiting, filename, __kmp_i18n_catgets(msg_id));
4514  }
4515  }
4516  if (__kmp_affinity_type == affinity_none) {
4517  KMP_ASSERT(depth == 0);
4518  KMP_EXIT_AFF_NONE;
4519  }
4520  }
4521 
4522 #if KMP_GROUP_AFFINITY
4523 
4524  else if (__kmp_affinity_top_method == affinity_top_method_group) {
4525  if (__kmp_affinity_verbose) {
4526  KMP_INFORM(AffWindowsProcGroupMap, "KMP_AFFINITY");
4527  }
4528 
4529  depth = __kmp_affinity_create_proc_group_map(&address2os, &msg_id);
4530  KMP_ASSERT(depth != 0);
4531  if (depth < 0) {
4532  KMP_ASSERT(msg_id != kmp_i18n_null);
4533  KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
4534  }
4535  }
4536 
4537 #endif /* KMP_GROUP_AFFINITY */
4538 
4539  else if (__kmp_affinity_top_method == affinity_top_method_flat) {
4540  if (__kmp_affinity_verbose) {
4541  KMP_INFORM(AffUsingFlatOS, "KMP_AFFINITY");
4542  }
4543 
4544  depth = __kmp_affinity_create_flat_map(&address2os, &msg_id);
4545  if (depth == 0) {
4546  KMP_EXIT_AFF_NONE;
4547  }
4548  // should not fail
4549  KMP_ASSERT(depth > 0);
4550  KMP_ASSERT(address2os != NULL);
4551  }
4552 
4553 #if KMP_USE_HIER_SCHED
4554  __kmp_dispatch_set_hierarchy_values();
4555 #endif
4556 
4557  if (address2os == NULL) {
4558  if (KMP_AFFINITY_CAPABLE() &&
4559  (__kmp_affinity_verbose ||
4560  (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none)))) {
4561  KMP_WARNING(ErrorInitializeAffinity);
4562  }
4563  __kmp_affinity_type = affinity_none;
4564  __kmp_create_affinity_none_places();
4565  KMP_AFFINITY_DISABLE();
4566  return;
4567  }
4568 
4569  if (__kmp_affinity_gran == affinity_gran_tile
4570 #if KMP_USE_HWLOC
4571  && __kmp_tile_depth == 0
4572 #endif
4573  ) {
4574  // tiles requested but not detected, warn user on this
4575  KMP_WARNING(AffTilesNoTiles, "KMP_AFFINITY");
4576  }
4577 
4578  __kmp_apply_thread_places(&address2os, depth);
4579 
4580  // Create the table of masks, indexed by thread Id.
4581  unsigned maxIndex;
4582  unsigned numUnique;
4583  kmp_affin_mask_t *osId2Mask =
4584  __kmp_create_masks(&maxIndex, &numUnique, address2os, __kmp_avail_proc);
4585  if (__kmp_affinity_gran_levels == 0) {
4586  KMP_DEBUG_ASSERT((int)numUnique == __kmp_avail_proc);
4587  }
4588 
4589  // Set the childNums vector in all Address objects. This must be done before
4590  // we can sort using __kmp_affinity_cmp_Address_child_num(), which takes into
4591  // account the setting of __kmp_affinity_compact.
4592  __kmp_affinity_assign_child_nums(address2os, __kmp_avail_proc);
4593 
4594  switch (__kmp_affinity_type) {
4595 
4596  case affinity_explicit:
4597  KMP_DEBUG_ASSERT(__kmp_affinity_proclist != NULL);
4598  if (__kmp_nested_proc_bind.bind_types[0] == proc_bind_intel) {
4599  __kmp_affinity_process_proclist(
4600  &__kmp_affinity_masks, &__kmp_affinity_num_masks,
4601  __kmp_affinity_proclist, osId2Mask, maxIndex);
4602  } else {
4603  __kmp_affinity_process_placelist(
4604  &__kmp_affinity_masks, &__kmp_affinity_num_masks,
4605  __kmp_affinity_proclist, osId2Mask, maxIndex);
4606  }
4607  if (__kmp_affinity_num_masks == 0) {
4608  if (__kmp_affinity_verbose ||
4609  (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none))) {
4610  KMP_WARNING(AffNoValidProcID);
4611  }
4612  __kmp_affinity_type = affinity_none;
4613  __kmp_create_affinity_none_places();
4614  return;
4615  }
4616  break;
4617 
4618  // The other affinity types rely on sorting the Addresses according to some
4619  // permutation of the machine topology tree. Set __kmp_affinity_compact and
4620  // __kmp_affinity_offset appropriately, then jump to a common code fragment
4621  // to do the sort and create the array of affinity masks.
4622 
4623  case affinity_logical:
4624  __kmp_affinity_compact = 0;
4625  if (__kmp_affinity_offset) {
4626  __kmp_affinity_offset =
4627  __kmp_nThreadsPerCore * __kmp_affinity_offset % __kmp_avail_proc;
4628  }
4629  goto sortAddresses;
4630 
4631  case affinity_physical:
4632  if (__kmp_nThreadsPerCore > 1) {
4633  __kmp_affinity_compact = 1;
4634  if (__kmp_affinity_compact >= depth) {
4635  __kmp_affinity_compact = 0;
4636  }
4637  } else {
4638  __kmp_affinity_compact = 0;
4639  }
4640  if (__kmp_affinity_offset) {
4641  __kmp_affinity_offset =
4642  __kmp_nThreadsPerCore * __kmp_affinity_offset % __kmp_avail_proc;
4643  }
4644  goto sortAddresses;
4645 
4646  case affinity_scatter:
4647  if (__kmp_affinity_compact >= depth) {
4648  __kmp_affinity_compact = 0;
4649  } else {
4650  __kmp_affinity_compact = depth - 1 - __kmp_affinity_compact;
4651  }
4652  goto sortAddresses;
4653 
4654  case affinity_compact:
4655  if (__kmp_affinity_compact >= depth) {
4656  __kmp_affinity_compact = depth - 1;
4657  }
4658  goto sortAddresses;
4659 
4660  case affinity_balanced:
4661  if (depth <= 1) {
4662  if (__kmp_affinity_verbose || __kmp_affinity_warnings) {
4663  KMP_WARNING(AffBalancedNotAvail, "KMP_AFFINITY");
4664  }
4665  __kmp_affinity_type = affinity_none;
4666  __kmp_create_affinity_none_places();
4667  return;
4668  } else if (!__kmp_affinity_uniform_topology()) {
4669  // Save the depth for further usage
4670  __kmp_aff_depth = depth;
4671 
4672  int core_level = __kmp_affinity_find_core_level(
4673  address2os, __kmp_avail_proc, depth - 1);
4674  int ncores = __kmp_affinity_compute_ncores(address2os, __kmp_avail_proc,
4675  depth - 1, core_level);
4676  int maxprocpercore = __kmp_affinity_max_proc_per_core(
4677  address2os, __kmp_avail_proc, depth - 1, core_level);
4678 
4679  int nproc = ncores * maxprocpercore;
4680  if ((nproc < 2) || (nproc < __kmp_avail_proc)) {
4681  if (__kmp_affinity_verbose || __kmp_affinity_warnings) {
4682  KMP_WARNING(AffBalancedNotAvail, "KMP_AFFINITY");
4683  }
4684  __kmp_affinity_type = affinity_none;
4685  return;
4686  }
4687 
4688  procarr = (int *)__kmp_allocate(sizeof(int) * nproc);
4689  for (int i = 0; i < nproc; i++) {
4690  procarr[i] = -1;
4691  }
4692 
4693  int lastcore = -1;
4694  int inlastcore = 0;
4695  for (int i = 0; i < __kmp_avail_proc; i++) {
4696  int proc = address2os[i].second;
4697  int core =
4698  __kmp_affinity_find_core(address2os, i, depth - 1, core_level);
4699 
4700  if (core == lastcore) {
4701  inlastcore++;
4702  } else {
4703  inlastcore = 0;
4704  }
4705  lastcore = core;
4706 
4707  procarr[core * maxprocpercore + inlastcore] = proc;
4708  }
4709  }
4710  if (__kmp_affinity_compact >= depth) {
4711  __kmp_affinity_compact = depth - 1;
4712  }
4713 
4714  sortAddresses:
4715  // Allocate the gtid->affinity mask table.
4716  if (__kmp_affinity_dups) {
4717  __kmp_affinity_num_masks = __kmp_avail_proc;
4718  } else {
4719  __kmp_affinity_num_masks = numUnique;
4720  }
4721 
4722  if ((__kmp_nested_proc_bind.bind_types[0] != proc_bind_intel) &&
4723  (__kmp_affinity_num_places > 0) &&
4724  ((unsigned)__kmp_affinity_num_places < __kmp_affinity_num_masks)) {
4725  __kmp_affinity_num_masks = __kmp_affinity_num_places;
4726  }
4727 
4728  KMP_CPU_ALLOC_ARRAY(__kmp_affinity_masks, __kmp_affinity_num_masks);
4729 
4730  // Sort the address2os table according to the current setting of
4731  // __kmp_affinity_compact, then fill out __kmp_affinity_masks.
4732  qsort(address2os, __kmp_avail_proc, sizeof(*address2os),
4733  __kmp_affinity_cmp_Address_child_num);
4734  {
4735  int i;
4736  unsigned j;
4737  for (i = 0, j = 0; i < __kmp_avail_proc; i++) {
4738  if ((!__kmp_affinity_dups) && (!address2os[i].first.leader)) {
4739  continue;
4740  }
4741  unsigned osId = address2os[i].second;
4742  kmp_affin_mask_t *src = KMP_CPU_INDEX(osId2Mask, osId);
4743  kmp_affin_mask_t *dest = KMP_CPU_INDEX(__kmp_affinity_masks, j);
4744  KMP_ASSERT(KMP_CPU_ISSET(osId, src));
4745  KMP_CPU_COPY(dest, src);
4746  if (++j >= __kmp_affinity_num_masks) {
4747  break;
4748  }
4749  }
4750  KMP_DEBUG_ASSERT(j == __kmp_affinity_num_masks);
4751  }
4752  break;
4753 
4754  default:
4755  KMP_ASSERT2(0, "Unexpected affinity setting");
4756  }
4757 
4758  KMP_CPU_FREE_ARRAY(osId2Mask, maxIndex + 1);
4759  machine_hierarchy.init(address2os, __kmp_avail_proc);
4760 }
4761 #undef KMP_EXIT_AFF_NONE
4762 
4763 void __kmp_affinity_initialize(void) {
4764  // Much of the code above was written assuming that if a machine was not
4765  // affinity capable, then __kmp_affinity_type == affinity_none. We now
4766  // explicitly represent this as __kmp_affinity_type == affinity_disabled.
4767  // There are too many checks for __kmp_affinity_type == affinity_none
4768  // in this code. Instead of trying to change them all, check if
4769  // __kmp_affinity_type == affinity_disabled, and if so, slam it with
4770  // affinity_none, call the real initialization routine, then restore
4771  // __kmp_affinity_type to affinity_disabled.
4772  int disabled = (__kmp_affinity_type == affinity_disabled);
4773  if (!KMP_AFFINITY_CAPABLE()) {
4774  KMP_ASSERT(disabled);
4775  }
4776  if (disabled) {
4777  __kmp_affinity_type = affinity_none;
4778  }
4779  __kmp_aux_affinity_initialize();
4780  if (disabled) {
4781  __kmp_affinity_type = affinity_disabled;
4782  }
4783 }
4784 
4785 void __kmp_affinity_uninitialize(void) {
4786  if (__kmp_affinity_masks != NULL) {
4787  KMP_CPU_FREE_ARRAY(__kmp_affinity_masks, __kmp_affinity_num_masks);
4788  __kmp_affinity_masks = NULL;
4789  }
4790  if (__kmp_affin_fullMask != NULL) {
4791  KMP_CPU_FREE(__kmp_affin_fullMask);
4792  __kmp_affin_fullMask = NULL;
4793  }
4794  __kmp_affinity_num_masks = 0;
4795  __kmp_affinity_type = affinity_default;
4796  __kmp_affinity_num_places = 0;
4797  if (__kmp_affinity_proclist != NULL) {
4798  __kmp_free(__kmp_affinity_proclist);
4799  __kmp_affinity_proclist = NULL;
4800  }
4801  if (address2os != NULL) {
4802  __kmp_free(address2os);
4803  address2os = NULL;
4804  }
4805  if (procarr != NULL) {
4806  __kmp_free(procarr);
4807  procarr = NULL;
4808  }
4809 #if KMP_USE_HWLOC
4810  if (__kmp_hwloc_topology != NULL) {
4811  hwloc_topology_destroy(__kmp_hwloc_topology);
4812  __kmp_hwloc_topology = NULL;
4813  }
4814 #endif
4815  KMPAffinity::destroy_api();
4816 }
4817 
4818 void __kmp_affinity_set_init_mask(int gtid, int isa_root) {
4819  if (!KMP_AFFINITY_CAPABLE()) {
4820  return;
4821  }
4822 
4823  kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]);
4824  if (th->th.th_affin_mask == NULL) {
4825  KMP_CPU_ALLOC(th->th.th_affin_mask);
4826  } else {
4827  KMP_CPU_ZERO(th->th.th_affin_mask);
4828  }
4829 
4830  // Copy the thread mask to the kmp_info_t structure. If
4831  // __kmp_affinity_type == affinity_none, copy the "full" mask, i.e. one that
4832  // has all of the OS proc ids set, or if __kmp_affinity_respect_mask is set,
4833  // then the full mask is the same as the mask of the initialization thread.
4834  kmp_affin_mask_t *mask;
4835  int i;
4836 
4837  if (KMP_AFFINITY_NON_PROC_BIND) {
4838  if ((__kmp_affinity_type == affinity_none) ||
4839  (__kmp_affinity_type == affinity_balanced)) {
4840 #if KMP_GROUP_AFFINITY
4841  if (__kmp_num_proc_groups > 1) {
4842  return;
4843  }
4844 #endif
4845  KMP_ASSERT(__kmp_affin_fullMask != NULL);
4846  i = 0;
4847  mask = __kmp_affin_fullMask;
4848  } else {
4849  KMP_DEBUG_ASSERT(__kmp_affinity_num_masks > 0);
4850  i = (gtid + __kmp_affinity_offset) % __kmp_affinity_num_masks;
4851  mask = KMP_CPU_INDEX(__kmp_affinity_masks, i);
4852  }
4853  } else {
4854  if ((!isa_root) ||
4855  (__kmp_nested_proc_bind.bind_types[0] == proc_bind_false)) {
4856 #if KMP_GROUP_AFFINITY
4857  if (__kmp_num_proc_groups > 1) {
4858  return;
4859  }
4860 #endif
4861  KMP_ASSERT(__kmp_affin_fullMask != NULL);
4862  i = KMP_PLACE_ALL;
4863  mask = __kmp_affin_fullMask;
4864  } else {
4865  // int i = some hash function or just a counter that doesn't
4866  // always start at 0. Use gtid for now.
4867  KMP_DEBUG_ASSERT(__kmp_affinity_num_masks > 0);
4868  i = (gtid + __kmp_affinity_offset) % __kmp_affinity_num_masks;
4869  mask = KMP_CPU_INDEX(__kmp_affinity_masks, i);
4870  }
4871  }
4872 
4873  th->th.th_current_place = i;
4874  if (isa_root) {
4875  th->th.th_new_place = i;
4876  th->th.th_first_place = 0;
4877  th->th.th_last_place = __kmp_affinity_num_masks - 1;
4878  } else if (KMP_AFFINITY_NON_PROC_BIND) {
4879  // When using a Non-OMP_PROC_BIND affinity method,
4880  // set all threads' place-partition-var to the entire place list
4881  th->th.th_first_place = 0;
4882  th->th.th_last_place = __kmp_affinity_num_masks - 1;
4883  }
4884 
4885  if (i == KMP_PLACE_ALL) {
4886  KA_TRACE(100, ("__kmp_affinity_set_init_mask: binding T#%d to all places\n",
4887  gtid));
4888  } else {
4889  KA_TRACE(100, ("__kmp_affinity_set_init_mask: binding T#%d to place %d\n",
4890  gtid, i));
4891  }
4892 
4893  KMP_CPU_COPY(th->th.th_affin_mask, mask);
4894 
4895  if (__kmp_affinity_verbose
4896  /* to avoid duplicate printing (will be correctly printed on barrier) */
4897  && (__kmp_affinity_type == affinity_none ||
4898  (i != KMP_PLACE_ALL && __kmp_affinity_type != affinity_balanced))) {
4899  char buf[KMP_AFFIN_MASK_PRINT_LEN];
4900  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4901  th->th.th_affin_mask);
4902  KMP_INFORM(BoundToOSProcSet, "KMP_AFFINITY", (kmp_int32)getpid(),
4903  __kmp_gettid(), gtid, buf);
4904  }
4905 
4906 #if KMP_OS_WINDOWS
4907  // On Windows* OS, the process affinity mask might have changed. If the user
4908  // didn't request affinity and this call fails, just continue silently.
4909  // See CQ171393.
4910  if (__kmp_affinity_type == affinity_none) {
4911  __kmp_set_system_affinity(th->th.th_affin_mask, FALSE);
4912  } else
4913 #endif
4914  __kmp_set_system_affinity(th->th.th_affin_mask, TRUE);
4915 }
4916 
4917 void __kmp_affinity_set_place(int gtid) {
4918  if (!KMP_AFFINITY_CAPABLE()) {
4919  return;
4920  }
4921 
4922  kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]);
4923 
4924  KA_TRACE(100, ("__kmp_affinity_set_place: binding T#%d to place %d (current "
4925  "place = %d)\n",
4926  gtid, th->th.th_new_place, th->th.th_current_place));
4927 
4928  // Check that the new place is within this thread's partition.
4929  KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4930  KMP_ASSERT(th->th.th_new_place >= 0);
4931  KMP_ASSERT((unsigned)th->th.th_new_place <= __kmp_affinity_num_masks);
4932  if (th->th.th_first_place <= th->th.th_last_place) {
4933  KMP_ASSERT((th->th.th_new_place >= th->th.th_first_place) &&
4934  (th->th.th_new_place <= th->th.th_last_place));
4935  } else {
4936  KMP_ASSERT((th->th.th_new_place <= th->th.th_first_place) ||
4937  (th->th.th_new_place >= th->th.th_last_place));
4938  }
4939 
4940  // Copy the thread mask to the kmp_info_t structure,
4941  // and set this thread's affinity.
4942  kmp_affin_mask_t *mask =
4943  KMP_CPU_INDEX(__kmp_affinity_masks, th->th.th_new_place);
4944  KMP_CPU_COPY(th->th.th_affin_mask, mask);
4945  th->th.th_current_place = th->th.th_new_place;
4946 
4947  if (__kmp_affinity_verbose) {
4948  char buf[KMP_AFFIN_MASK_PRINT_LEN];
4949  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4950  th->th.th_affin_mask);
4951  KMP_INFORM(BoundToOSProcSet, "OMP_PROC_BIND", (kmp_int32)getpid(),
4952  __kmp_gettid(), gtid, buf);
4953  }
4954  __kmp_set_system_affinity(th->th.th_affin_mask, TRUE);
4955 }
4956 
4957 int __kmp_aux_set_affinity(void **mask) {
4958  int gtid;
4959  kmp_info_t *th;
4960  int retval;
4961 
4962  if (!KMP_AFFINITY_CAPABLE()) {
4963  return -1;
4964  }
4965 
4966  gtid = __kmp_entry_gtid();
4967  KA_TRACE(1000, (""); {
4968  char buf[KMP_AFFIN_MASK_PRINT_LEN];
4969  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4970  (kmp_affin_mask_t *)(*mask));
4971  __kmp_debug_printf(
4972  "kmp_set_affinity: setting affinity mask for thread %d = %s\n", gtid,
4973  buf);
4974  });
4975 
4976  if (__kmp_env_consistency_check) {
4977  if ((mask == NULL) || (*mask == NULL)) {
4978  KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity");
4979  } else {
4980  unsigned proc;
4981  int num_procs = 0;
4982 
4983  KMP_CPU_SET_ITERATE(proc, ((kmp_affin_mask_t *)(*mask))) {
4984  if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
4985  KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity");
4986  }
4987  if (!KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask))) {
4988  continue;
4989  }
4990  num_procs++;
4991  }
4992  if (num_procs == 0) {
4993  KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity");
4994  }
4995 
4996 #if KMP_GROUP_AFFINITY
4997  if (__kmp_get_proc_group((kmp_affin_mask_t *)(*mask)) < 0) {
4998  KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity");
4999  }
5000 #endif /* KMP_GROUP_AFFINITY */
5001  }
5002  }
5003 
5004  th = __kmp_threads[gtid];
5005  KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
5006  retval = __kmp_set_system_affinity((kmp_affin_mask_t *)(*mask), FALSE);
5007  if (retval == 0) {
5008  KMP_CPU_COPY(th->th.th_affin_mask, (kmp_affin_mask_t *)(*mask));
5009  }
5010 
5011  th->th.th_current_place = KMP_PLACE_UNDEFINED;
5012  th->th.th_new_place = KMP_PLACE_UNDEFINED;
5013  th->th.th_first_place = 0;
5014  th->th.th_last_place = __kmp_affinity_num_masks - 1;
5015 
5016  // Turn off 4.0 affinity for the current tread at this parallel level.
5017  th->th.th_current_task->td_icvs.proc_bind = proc_bind_false;
5018 
5019  return retval;
5020 }
5021 
5022 int __kmp_aux_get_affinity(void **mask) {
5023  int gtid;
5024  int retval;
5025  kmp_info_t *th;
5026 
5027  if (!KMP_AFFINITY_CAPABLE()) {
5028  return -1;
5029  }
5030 
5031  gtid = __kmp_entry_gtid();
5032  th = __kmp_threads[gtid];
5033  KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
5034 
5035  KA_TRACE(1000, (""); {
5036  char buf[KMP_AFFIN_MASK_PRINT_LEN];
5037  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
5038  th->th.th_affin_mask);
5039  __kmp_printf("kmp_get_affinity: stored affinity mask for thread %d = %s\n",
5040  gtid, buf);
5041  });
5042 
5043  if (__kmp_env_consistency_check) {
5044  if ((mask == NULL) || (*mask == NULL)) {
5045  KMP_FATAL(AffinityInvalidMask, "kmp_get_affinity");
5046  }
5047  }
5048 
5049 #if !KMP_OS_WINDOWS
5050 
5051  retval = __kmp_get_system_affinity((kmp_affin_mask_t *)(*mask), FALSE);
5052  KA_TRACE(1000, (""); {
5053  char buf[KMP_AFFIN_MASK_PRINT_LEN];
5054  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
5055  (kmp_affin_mask_t *)(*mask));
5056  __kmp_printf("kmp_get_affinity: system affinity mask for thread %d = %s\n",
5057  gtid, buf);
5058  });
5059  return retval;
5060 
5061 #else
5062 
5063  KMP_CPU_COPY((kmp_affin_mask_t *)(*mask), th->th.th_affin_mask);
5064  return 0;
5065 
5066 #endif /* KMP_OS_WINDOWS */
5067 }
5068 
5069 int __kmp_aux_get_affinity_max_proc() {
5070  if (!KMP_AFFINITY_CAPABLE()) {
5071  return 0;
5072  }
5073 #if KMP_GROUP_AFFINITY
5074  if (__kmp_num_proc_groups > 1) {
5075  return (int)(__kmp_num_proc_groups * sizeof(DWORD_PTR) * CHAR_BIT);
5076  }
5077 #endif
5078  return __kmp_xproc;
5079 }
5080 
5081 int __kmp_aux_set_affinity_mask_proc(int proc, void **mask) {
5082  if (!KMP_AFFINITY_CAPABLE()) {
5083  return -1;
5084  }
5085 
5086  KA_TRACE(1000, (""); {
5087  int gtid = __kmp_entry_gtid();
5088  char buf[KMP_AFFIN_MASK_PRINT_LEN];
5089  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
5090  (kmp_affin_mask_t *)(*mask));
5091  __kmp_debug_printf("kmp_set_affinity_mask_proc: setting proc %d in "
5092  "affinity mask for thread %d = %s\n",
5093  proc, gtid, buf);
5094  });
5095 
5096  if (__kmp_env_consistency_check) {
5097  if ((mask == NULL) || (*mask == NULL)) {
5098  KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity_mask_proc");
5099  }
5100  }
5101 
5102  if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
5103  return -1;
5104  }
5105  if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
5106  return -2;
5107  }
5108 
5109  KMP_CPU_SET(proc, (kmp_affin_mask_t *)(*mask));
5110  return 0;
5111 }
5112 
5113 int __kmp_aux_unset_affinity_mask_proc(int proc, void **mask) {
5114  if (!KMP_AFFINITY_CAPABLE()) {
5115  return -1;
5116  }
5117 
5118  KA_TRACE(1000, (""); {
5119  int gtid = __kmp_entry_gtid();
5120  char buf[KMP_AFFIN_MASK_PRINT_LEN];
5121  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
5122  (kmp_affin_mask_t *)(*mask));
5123  __kmp_debug_printf("kmp_unset_affinity_mask_proc: unsetting proc %d in "
5124  "affinity mask for thread %d = %s\n",
5125  proc, gtid, buf);
5126  });
5127 
5128  if (__kmp_env_consistency_check) {
5129  if ((mask == NULL) || (*mask == NULL)) {
5130  KMP_FATAL(AffinityInvalidMask, "kmp_unset_affinity_mask_proc");
5131  }
5132  }
5133 
5134  if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
5135  return -1;
5136  }
5137  if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
5138  return -2;
5139  }
5140 
5141  KMP_CPU_CLR(proc, (kmp_affin_mask_t *)(*mask));
5142  return 0;
5143 }
5144 
5145 int __kmp_aux_get_affinity_mask_proc(int proc, void **mask) {
5146  if (!KMP_AFFINITY_CAPABLE()) {
5147  return -1;
5148  }
5149 
5150  KA_TRACE(1000, (""); {
5151  int gtid = __kmp_entry_gtid();
5152  char buf[KMP_AFFIN_MASK_PRINT_LEN];
5153  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
5154  (kmp_affin_mask_t *)(*mask));
5155  __kmp_debug_printf("kmp_get_affinity_mask_proc: getting proc %d in "
5156  "affinity mask for thread %d = %s\n",
5157  proc, gtid, buf);
5158  });
5159 
5160  if (__kmp_env_consistency_check) {
5161  if ((mask == NULL) || (*mask == NULL)) {
5162  KMP_FATAL(AffinityInvalidMask, "kmp_get_affinity_mask_proc");
5163  }
5164  }
5165 
5166  if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) {
5167  return -1;
5168  }
5169  if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
5170  return 0;
5171  }
5172 
5173  return KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask));
5174 }
5175 
5176 // Dynamic affinity settings - Affinity balanced
5177 void __kmp_balanced_affinity(kmp_info_t *th, int nthreads) {
5178  KMP_DEBUG_ASSERT(th);
5179  bool fine_gran = true;
5180  int tid = th->th.th_info.ds.ds_tid;
5181 
5182  switch (__kmp_affinity_gran) {
5183  case affinity_gran_fine:
5184  case affinity_gran_thread:
5185  break;
5186  case affinity_gran_core:
5187  if (__kmp_nThreadsPerCore > 1) {
5188  fine_gran = false;
5189  }
5190  break;
5191  case affinity_gran_package:
5192  if (nCoresPerPkg > 1) {
5193  fine_gran = false;
5194  }
5195  break;
5196  default:
5197  fine_gran = false;
5198  }
5199 
5200  if (__kmp_affinity_uniform_topology()) {
5201  int coreID;
5202  int threadID;
5203  // Number of hyper threads per core in HT machine
5204  int __kmp_nth_per_core = __kmp_avail_proc / __kmp_ncores;
5205  // Number of cores
5206  int ncores = __kmp_ncores;
5207  if ((nPackages > 1) && (__kmp_nth_per_core <= 1)) {
5208  __kmp_nth_per_core = __kmp_avail_proc / nPackages;
5209  ncores = nPackages;
5210  }
5211  // How many threads will be bound to each core
5212  int chunk = nthreads / ncores;
5213  // How many cores will have an additional thread bound to it - "big cores"
5214  int big_cores = nthreads % ncores;
5215  // Number of threads on the big cores
5216  int big_nth = (chunk + 1) * big_cores;
5217  if (tid < big_nth) {
5218  coreID = tid / (chunk + 1);
5219  threadID = (tid % (chunk + 1)) % __kmp_nth_per_core;
5220  } else { // tid >= big_nth
5221  coreID = (tid - big_cores) / chunk;
5222  threadID = ((tid - big_cores) % chunk) % __kmp_nth_per_core;
5223  }
5224 
5225  KMP_DEBUG_ASSERT2(KMP_AFFINITY_CAPABLE(),
5226  "Illegal set affinity operation when not capable");
5227 
5228  kmp_affin_mask_t *mask = th->th.th_affin_mask;
5229  KMP_CPU_ZERO(mask);
5230 
5231  if (fine_gran) {
5232  int osID = address2os[coreID * __kmp_nth_per_core + threadID].second;
5233  KMP_CPU_SET(osID, mask);
5234  } else {
5235  for (int i = 0; i < __kmp_nth_per_core; i++) {
5236  int osID;
5237  osID = address2os[coreID * __kmp_nth_per_core + i].second;
5238  KMP_CPU_SET(osID, mask);
5239  }
5240  }
5241  if (__kmp_affinity_verbose) {
5242  char buf[KMP_AFFIN_MASK_PRINT_LEN];
5243  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, mask);
5244  KMP_INFORM(BoundToOSProcSet, "KMP_AFFINITY", (kmp_int32)getpid(),
5245  __kmp_gettid(), tid, buf);
5246  }
5247  __kmp_set_system_affinity(mask, TRUE);
5248  } else { // Non-uniform topology
5249 
5250  kmp_affin_mask_t *mask = th->th.th_affin_mask;
5251  KMP_CPU_ZERO(mask);
5252 
5253  int core_level = __kmp_affinity_find_core_level(
5254  address2os, __kmp_avail_proc, __kmp_aff_depth - 1);
5255  int ncores = __kmp_affinity_compute_ncores(address2os, __kmp_avail_proc,
5256  __kmp_aff_depth - 1, core_level);
5257  int nth_per_core = __kmp_affinity_max_proc_per_core(
5258  address2os, __kmp_avail_proc, __kmp_aff_depth - 1, core_level);
5259 
5260  // For performance gain consider the special case nthreads ==
5261  // __kmp_avail_proc
5262  if (nthreads == __kmp_avail_proc) {
5263  if (fine_gran) {
5264  int osID = address2os[tid].second;
5265  KMP_CPU_SET(osID, mask);
5266  } else {
5267  int core = __kmp_affinity_find_core(address2os, tid,
5268  __kmp_aff_depth - 1, core_level);
5269  for (int i = 0; i < __kmp_avail_proc; i++) {
5270  int osID = address2os[i].second;
5271  if (__kmp_affinity_find_core(address2os, i, __kmp_aff_depth - 1,
5272  core_level) == core) {
5273  KMP_CPU_SET(osID, mask);
5274  }
5275  }
5276  }
5277  } else if (nthreads <= ncores) {
5278 
5279  int core = 0;
5280  for (int i = 0; i < ncores; i++) {
5281  // Check if this core from procarr[] is in the mask
5282  int in_mask = 0;
5283  for (int j = 0; j < nth_per_core; j++) {
5284  if (procarr[i * nth_per_core + j] != -1) {
5285  in_mask = 1;
5286  break;
5287  }
5288  }
5289  if (in_mask) {
5290  if (tid == core) {
5291  for (int j = 0; j < nth_per_core; j++) {
5292  int osID = procarr[i * nth_per_core + j];
5293  if (osID != -1) {
5294  KMP_CPU_SET(osID, mask);
5295  // For fine granularity it is enough to set the first available
5296  // osID for this core
5297  if (fine_gran) {
5298  break;
5299  }
5300  }
5301  }
5302  break;
5303  } else {
5304  core++;
5305  }
5306  }
5307  }
5308  } else { // nthreads > ncores
5309  // Array to save the number of processors at each core
5310  int *nproc_at_core = (int *)KMP_ALLOCA(sizeof(int) * ncores);
5311  // Array to save the number of cores with "x" available processors;
5312  int *ncores_with_x_procs =
5313  (int *)KMP_ALLOCA(sizeof(int) * (nth_per_core + 1));
5314  // Array to save the number of cores with # procs from x to nth_per_core
5315  int *ncores_with_x_to_max_procs =
5316  (int *)KMP_ALLOCA(sizeof(int) * (nth_per_core + 1));
5317 
5318  for (int i = 0; i <= nth_per_core; i++) {
5319  ncores_with_x_procs[i] = 0;
5320  ncores_with_x_to_max_procs[i] = 0;
5321  }
5322 
5323  for (int i = 0; i < ncores; i++) {
5324  int cnt = 0;
5325  for (int j = 0; j < nth_per_core; j++) {
5326  if (procarr[i * nth_per_core + j] != -1) {
5327  cnt++;
5328  }
5329  }
5330  nproc_at_core[i] = cnt;
5331  ncores_with_x_procs[cnt]++;
5332  }
5333 
5334  for (int i = 0; i <= nth_per_core; i++) {
5335  for (int j = i; j <= nth_per_core; j++) {
5336  ncores_with_x_to_max_procs[i] += ncores_with_x_procs[j];
5337  }
5338  }
5339 
5340  // Max number of processors
5341  int nproc = nth_per_core * ncores;
5342  // An array to keep number of threads per each context
5343  int *newarr = (int *)__kmp_allocate(sizeof(int) * nproc);
5344  for (int i = 0; i < nproc; i++) {
5345  newarr[i] = 0;
5346  }
5347 
5348  int nth = nthreads;
5349  int flag = 0;
5350  while (nth > 0) {
5351  for (int j = 1; j <= nth_per_core; j++) {
5352  int cnt = ncores_with_x_to_max_procs[j];
5353  for (int i = 0; i < ncores; i++) {
5354  // Skip the core with 0 processors
5355  if (nproc_at_core[i] == 0) {
5356  continue;
5357  }
5358  for (int k = 0; k < nth_per_core; k++) {
5359  if (procarr[i * nth_per_core + k] != -1) {
5360  if (newarr[i * nth_per_core + k] == 0) {
5361  newarr[i * nth_per_core + k] = 1;
5362  cnt--;
5363  nth--;
5364  break;
5365  } else {
5366  if (flag != 0) {
5367  newarr[i * nth_per_core + k]++;
5368  cnt--;
5369  nth--;
5370  break;
5371  }
5372  }
5373  }
5374  }
5375  if (cnt == 0 || nth == 0) {
5376  break;
5377  }
5378  }
5379  if (nth == 0) {
5380  break;
5381  }
5382  }
5383  flag = 1;
5384  }
5385  int sum = 0;
5386  for (int i = 0; i < nproc; i++) {
5387  sum += newarr[i];
5388  if (sum > tid) {
5389  if (fine_gran) {
5390  int osID = procarr[i];
5391  KMP_CPU_SET(osID, mask);
5392  } else {
5393  int coreID = i / nth_per_core;
5394  for (int ii = 0; ii < nth_per_core; ii++) {
5395  int osID = procarr[coreID * nth_per_core + ii];
5396  if (osID != -1) {
5397  KMP_CPU_SET(osID, mask);
5398  }
5399  }
5400  }
5401  break;
5402  }
5403  }
5404  __kmp_free(newarr);
5405  }
5406 
5407  if (__kmp_affinity_verbose) {
5408  char buf[KMP_AFFIN_MASK_PRINT_LEN];
5409  __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, mask);
5410  KMP_INFORM(BoundToOSProcSet, "KMP_AFFINITY", (kmp_int32)getpid(),
5411  __kmp_gettid(), tid, buf);
5412  }
5413  __kmp_set_system_affinity(mask, TRUE);
5414  }
5415 }
5416 
5417 #if KMP_OS_LINUX || KMP_OS_FREEBSD
5418 // We don't need this entry for Windows because
5419 // there is GetProcessAffinityMask() api
5420 //
5421 // The intended usage is indicated by these steps:
5422 // 1) The user gets the current affinity mask
5423 // 2) Then sets the affinity by calling this function
5424 // 3) Error check the return value
5425 // 4) Use non-OpenMP parallelization
5426 // 5) Reset the affinity to what was stored in step 1)
5427 #ifdef __cplusplus
5428 extern "C"
5429 #endif
5430  int
5431  kmp_set_thread_affinity_mask_initial()
5432 // the function returns 0 on success,
5433 // -1 if we cannot bind thread
5434 // >0 (errno) if an error happened during binding
5435 {
5436  int gtid = __kmp_get_gtid();
5437  if (gtid < 0) {
5438  // Do not touch non-omp threads
5439  KA_TRACE(30, ("kmp_set_thread_affinity_mask_initial: "
5440  "non-omp thread, returning\n"));
5441  return -1;
5442  }
5443  if (!KMP_AFFINITY_CAPABLE() || !__kmp_init_middle) {
5444  KA_TRACE(30, ("kmp_set_thread_affinity_mask_initial: "
5445  "affinity not initialized, returning\n"));
5446  return -1;
5447  }
5448  KA_TRACE(30, ("kmp_set_thread_affinity_mask_initial: "
5449  "set full mask for thread %d\n",
5450  gtid));
5451  KMP_DEBUG_ASSERT(__kmp_affin_fullMask != NULL);
5452  return __kmp_set_system_affinity(__kmp_affin_fullMask, FALSE);
5453 }
5454 #endif
5455 
5456 #endif // KMP_AFFINITY_SUPPORTED