13 #ifndef KMP_DISPATCH_H
14 #define KMP_DISPATCH_H
20 #include "kmp_error.h"
23 #include "kmp_stats.h"
25 #if KMP_OS_WINDOWS && KMP_ARCH_X86
30 #include "ompt-internal.h"
31 #include "ompt-specific.h"
36 #if KMP_USE_HIER_SCHED
38 template <
typename T>
struct kmp_hier_t;
39 template <
typename T>
struct kmp_hier_top_unit_t;
42 template <
typename T>
struct dispatch_shared_info_template;
43 template <
typename T>
struct dispatch_private_info_template;
46 extern void __kmp_dispatch_init_algorithm(
ident_t *loc,
int gtid,
47 dispatch_private_info_template<T> *pr,
49 typename traits_t<T>::signed_t st,
51 kmp_uint64 *cur_chunk,
53 typename traits_t<T>::signed_t chunk,
56 extern int __kmp_dispatch_next_algorithm(
57 int gtid, dispatch_private_info_template<T> *pr,
58 dispatch_shared_info_template<T>
volatile *sh, kmp_int32 *p_last, T *p_lb,
59 T *p_ub,
typename traits_t<T>::signed_t *p_st, T nproc, T unit_id);
61 void __kmp_dispatch_dxo_error(
int *gtid_ref,
int *cid_ref,
ident_t *loc_ref);
62 void __kmp_dispatch_deo_error(
int *gtid_ref,
int *cid_ref,
ident_t *loc_ref);
64 #if KMP_STATIC_STEAL_ENABLED
68 template <
typename T>
struct dispatch_private_infoXX_template {
69 typedef typename traits_t<T>::unsigned_t UT;
70 typedef typename traits_t<T>::signed_t ST;
77 kmp_lock_t *steal_lock;
86 struct KMP_ALIGN(32) {
104 template <
typename T>
struct dispatch_private_infoXX_template {
105 typedef typename traits_t<T>::unsigned_t UT;
106 typedef typename traits_t<T>::signed_t ST;
127 template <
typename T>
struct KMP_ALIGN_CACHE dispatch_private_info_template {
130 union KMP_ALIGN_CACHE private_info_tmpl {
131 dispatch_private_infoXX_template<T> p;
132 dispatch_private_info64_t p64;
135 kmp_sched_flags_t flags;
136 std::atomic<kmp_uint32> steal_flag;
137 kmp_uint32 ordered_bumped;
138 dispatch_private_info *next;
139 kmp_uint32 type_size;
140 #if KMP_USE_HIER_SCHED
142 kmp_hier_top_unit_t<T> *hier_parent;
144 kmp_int32 get_hier_id()
const {
return hier_id; }
145 kmp_hier_top_unit_t<T> *get_parent() {
return hier_parent; }
147 enum cons_type pushed_ws;
152 template <
typename T>
struct dispatch_shared_infoXX_template {
153 typedef typename traits_t<T>::unsigned_t UT;
154 typedef typename traits_t<T>::signed_t ST;
157 volatile UT iteration;
158 volatile ST num_done;
159 volatile UT ordered_iteration;
161 UT ordered_dummy[KMP_MAX_ORDERED - 3];
165 template <
typename T>
struct dispatch_shared_info_template {
166 typedef typename traits_t<T>::unsigned_t UT;
168 union shared_info_tmpl {
169 dispatch_shared_infoXX_template<UT> s;
170 dispatch_shared_info64_t s64;
172 volatile kmp_uint32 buffer_index;
173 volatile kmp_int32 doacross_buf_idx;
174 kmp_uint32 *doacross_flags;
175 kmp_int32 doacross_num_done;
176 #if KMP_USE_HIER_SCHED
190 #undef USE_TEST_LOCKS
193 template <
typename T>
static __forceinline T test_then_add(
volatile T *p, T d);
196 __forceinline kmp_int32 test_then_add<kmp_int32>(
volatile kmp_int32 *p,
199 r = KMP_TEST_THEN_ADD32(p, d);
204 __forceinline kmp_int64 test_then_add<kmp_int64>(
volatile kmp_int64 *p,
207 r = KMP_TEST_THEN_ADD64(p, d);
212 template <
typename T>
static __forceinline T test_then_inc_acq(
volatile T *p);
215 __forceinline kmp_int32 test_then_inc_acq<kmp_int32>(
volatile kmp_int32 *p) {
217 r = KMP_TEST_THEN_INC_ACQ32(p);
222 __forceinline kmp_int64 test_then_inc_acq<kmp_int64>(
volatile kmp_int64 *p) {
224 r = KMP_TEST_THEN_INC_ACQ64(p);
229 template <
typename T>
static __forceinline T test_then_inc(
volatile T *p);
232 __forceinline kmp_int32 test_then_inc<kmp_int32>(
volatile kmp_int32 *p) {
234 r = KMP_TEST_THEN_INC32(p);
239 __forceinline kmp_int64 test_then_inc<kmp_int64>(
volatile kmp_int64 *p) {
241 r = KMP_TEST_THEN_INC64(p);
246 template <
typename T>
247 static __forceinline kmp_int32 compare_and_swap(
volatile T *p, T c, T s);
250 __forceinline kmp_int32 compare_and_swap<kmp_int32>(
volatile kmp_int32 *p,
251 kmp_int32 c, kmp_int32 s) {
252 return KMP_COMPARE_AND_STORE_REL32(p, c, s);
256 __forceinline kmp_int32 compare_and_swap<kmp_int64>(
volatile kmp_int64 *p,
257 kmp_int64 c, kmp_int64 s) {
258 return KMP_COMPARE_AND_STORE_REL64(p, c, s);
261 template <
typename T> kmp_uint32 __kmp_ge(T value, T checker) {
262 return value >= checker;
264 template <
typename T> kmp_uint32 __kmp_eq(T value, T checker) {
265 return value == checker;
287 template <
typename UT>
288 static UT __kmp_wait(
volatile UT *spinner, UT checker,
289 kmp_uint32 (*pred)(UT, UT) USE_ITT_BUILD_ARG(
void *obj)) {
291 volatile UT *spin = spinner;
294 kmp_uint32 (*f)(UT, UT) = pred;
297 KMP_FSYNC_SPIN_INIT(obj, CCAST(UT *, spin));
298 KMP_INIT_YIELD(spins);
300 while (!f(r = *spin, check)) {
301 KMP_FSYNC_SPIN_PREPARE(obj);
308 KMP_YIELD_OVERSUB_ELSE_SPIN(spins);
310 KMP_FSYNC_SPIN_ACQUIRED(obj);
317 template <
typename UT>
318 void __kmp_dispatch_deo(
int *gtid_ref,
int *cid_ref,
ident_t *loc_ref) {
319 dispatch_private_info_template<UT> *pr;
321 int gtid = *gtid_ref;
323 kmp_info_t *th = __kmp_threads[gtid];
324 KMP_DEBUG_ASSERT(th->th.th_dispatch);
326 KD_TRACE(100, (
"__kmp_dispatch_deo: T#%d called\n", gtid));
327 if (__kmp_env_consistency_check) {
328 pr =
reinterpret_cast<dispatch_private_info_template<UT> *
>(
329 th->th.th_dispatch->th_dispatch_pr_current);
330 if (pr->pushed_ws != ct_none) {
331 #if KMP_USE_DYNAMIC_LOCK
332 __kmp_push_sync(gtid, ct_ordered_in_pdo, loc_ref, NULL, 0);
334 __kmp_push_sync(gtid, ct_ordered_in_pdo, loc_ref, NULL);
339 if (!th->th.th_team->t.t_serialized) {
340 dispatch_shared_info_template<UT> *sh =
341 reinterpret_cast<dispatch_shared_info_template<UT> *
>(
342 th->th.th_dispatch->th_dispatch_sh_current);
345 if (!__kmp_env_consistency_check) {
346 pr =
reinterpret_cast<dispatch_private_info_template<UT> *
>(
347 th->th.th_dispatch->th_dispatch_pr_current);
349 lower = pr->u.p.ordered_lower;
351 #if !defined(KMP_GOMP_COMPAT)
352 if (__kmp_env_consistency_check) {
353 if (pr->ordered_bumped) {
354 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
355 __kmp_error_construct2(kmp_i18n_msg_CnsMultipleNesting,
356 ct_ordered_in_pdo, loc_ref,
357 &p->stack_data[p->w_top]);
367 buff = __kmp_str_format(
"__kmp_dispatch_deo: T#%%d before wait: "
368 "ordered_iter:%%%s lower:%%%s\n",
369 traits_t<UT>::spec, traits_t<UT>::spec);
370 KD_TRACE(1000, (buff, gtid, sh->u.s.ordered_iteration, lower));
371 __kmp_str_free(&buff);
374 __kmp_wait<UT>(&sh->u.s.ordered_iteration, lower,
375 __kmp_ge<UT> USE_ITT_BUILD_ARG(NULL));
381 buff = __kmp_str_format(
"__kmp_dispatch_deo: T#%%d after wait: "
382 "ordered_iter:%%%s lower:%%%s\n",
383 traits_t<UT>::spec, traits_t<UT>::spec);
384 KD_TRACE(1000, (buff, gtid, sh->u.s.ordered_iteration, lower));
385 __kmp_str_free(&buff);
389 KD_TRACE(100, (
"__kmp_dispatch_deo: T#%d returned\n", gtid));
392 template <
typename UT>
393 void __kmp_dispatch_dxo(
int *gtid_ref,
int *cid_ref,
ident_t *loc_ref) {
394 typedef typename traits_t<UT>::signed_t ST;
395 dispatch_private_info_template<UT> *pr;
397 int gtid = *gtid_ref;
399 kmp_info_t *th = __kmp_threads[gtid];
400 KMP_DEBUG_ASSERT(th->th.th_dispatch);
402 KD_TRACE(100, (
"__kmp_dispatch_dxo: T#%d called\n", gtid));
403 if (__kmp_env_consistency_check) {
404 pr =
reinterpret_cast<dispatch_private_info_template<UT> *
>(
405 th->th.th_dispatch->th_dispatch_pr_current);
406 if (pr->pushed_ws != ct_none) {
407 __kmp_pop_sync(gtid, ct_ordered_in_pdo, loc_ref);
411 if (!th->th.th_team->t.t_serialized) {
412 dispatch_shared_info_template<UT> *sh =
413 reinterpret_cast<dispatch_shared_info_template<UT> *
>(
414 th->th.th_dispatch->th_dispatch_sh_current);
416 if (!__kmp_env_consistency_check) {
417 pr =
reinterpret_cast<dispatch_private_info_template<UT> *
>(
418 th->th.th_dispatch->th_dispatch_pr_current);
421 KMP_FSYNC_RELEASING(CCAST(UT *, &sh->u.s.ordered_iteration));
422 #if !defined(KMP_GOMP_COMPAT)
423 if (__kmp_env_consistency_check) {
424 if (pr->ordered_bumped != 0) {
425 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
427 __kmp_error_construct2(kmp_i18n_msg_CnsMultipleNesting,
428 ct_ordered_in_pdo, loc_ref,
429 &p->stack_data[p->w_top]);
436 pr->ordered_bumped += 1;
439 (
"__kmp_dispatch_dxo: T#%d bumping ordered ordered_bumped=%d\n",
440 gtid, pr->ordered_bumped));
445 test_then_inc<ST>((
volatile ST *)&sh->u.s.ordered_iteration);
449 KD_TRACE(100, (
"__kmp_dispatch_dxo: T#%d returned\n", gtid));
454 template <
typename UT>
455 static __forceinline
long double __kmp_pow(
long double x, UT y) {
456 long double s = 1.0L;
458 KMP_DEBUG_ASSERT(x > 0.0 && x < 1.0);
477 template <
typename T>
478 static __inline
typename traits_t<T>::unsigned_t
479 __kmp_dispatch_guided_remaining(T tc,
typename traits_t<T>::floating_t base,
480 typename traits_t<T>::unsigned_t idx) {
488 typedef typename traits_t<T>::unsigned_t UT;
490 long double x = tc * __kmp_pow<UT>(base, idx);
503 static const int guided_int_param = 2;
504 static const double guided_flt_param = 0.5;