ReactOS 0.4.16-dev-2122-g1628f5e
concurrency.c
Go to the documentation of this file.
1/*
2 * Concurrency namespace implementation
3 *
4 * Copyright 2017 Piotr Caban
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
19 */
20
21#include <stdarg.h>
22#include <stdbool.h>
23
24#include "windef.h"
25#include "winternl.h"
26#include "wine/debug.h"
27#include "wine/exception.h"
28#include "wine/list.h"
29#include "msvcrt.h"
30#include "cppexcept.h"
31
32#if _MSVCR_VER >= 100
33
35
36typedef exception cexception;
38DEFINE_CXX_TYPE_INFO(cexception)
39
40static LONG context_id = -1;
41static LONG scheduler_id = -1;
42
43typedef enum {
44 SchedulerKind,
45 MaxConcurrency,
46 MinConcurrency,
47 TargetOversubscriptionFactor,
48 LocalContextCacheSize,
49 ContextStackSize,
50 ContextPriority,
51 SchedulingProtocol,
52 DynamicProgressFeedback,
53 WinRTInitialization,
54 last_policy_id
55} PolicyElementKey;
56
57typedef struct {
58 struct _policy_container {
59 unsigned int policies[last_policy_id];
60 } *policy_container;
61} SchedulerPolicy;
62
63typedef struct {
64 const vtable_ptr *vtable;
65} Context;
66#define call_Context_GetId(this) CALL_VTBL_FUNC(this, 0, \
67 unsigned int, (const Context*), (this))
68#define call_Context_GetVirtualProcessorId(this) CALL_VTBL_FUNC(this, 4, \
69 unsigned int, (const Context*), (this))
70#define call_Context_GetScheduleGroupId(this) CALL_VTBL_FUNC(this, 8, \
71 unsigned int, (const Context*), (this))
72#define call_Context_Unblock(this) CALL_VTBL_FUNC(this, 12, \
73 void, (Context*), (this))
74#define call_Context_IsSynchronouslyBlocked(this) CALL_VTBL_FUNC(this, 16, \
75 bool, (const Context*), (this))
76#define call_Context_dtor(this, flags) CALL_VTBL_FUNC(this, 20, \
77 Context*, (Context*, unsigned int), (this, flags))
78#define call_Context_Block(this) CALL_VTBL_FUNC(this, 24, \
79 void, (Context*), (this))
80
81typedef struct {
83} _Context;
84
85union allocator_cache_entry {
86 struct _free {
87 int depth;
88 union allocator_cache_entry *next;
89 } free;
90 struct _alloc {
91 int bucket;
92 char mem[1];
93 } alloc;
94};
95
96struct scheduler_list {
97 struct Scheduler *scheduler;
98 struct scheduler_list *next;
99};
100
101struct beacon {
102 LONG cancelling;
103 struct list entry;
104 struct _StructuredTaskCollection *task_collection;
105};
106
107typedef struct {
109 struct scheduler_list scheduler;
110 unsigned int id;
111 union allocator_cache_entry *allocator_cache[8];
112 LONG blocked;
113 struct _StructuredTaskCollection *task_collection;
114 CRITICAL_SECTION beacons_cs;
115 struct list beacons;
116} ExternalContextBase;
117extern const vtable_ptr ExternalContextBase_vtable;
118static void ExternalContextBase_ctor(ExternalContextBase*);
119
120typedef struct Scheduler {
121 const vtable_ptr *vtable;
122} Scheduler;
123#define call_Scheduler_Id(this) CALL_VTBL_FUNC(this, 4, unsigned int, (const Scheduler*), (this))
124#define call_Scheduler_GetNumberOfVirtualProcessors(this) CALL_VTBL_FUNC(this, 8, unsigned int, (const Scheduler*), (this))
125#define call_Scheduler_GetPolicy(this,policy) CALL_VTBL_FUNC(this, 12, \
126 SchedulerPolicy*, (Scheduler*,SchedulerPolicy*), (this,policy))
127#define call_Scheduler_Reference(this) CALL_VTBL_FUNC(this, 16, unsigned int, (Scheduler*), (this))
128#define call_Scheduler_Release(this) CALL_VTBL_FUNC(this, 20, unsigned int, (Scheduler*), (this))
129#define call_Scheduler_RegisterShutdownEvent(this,event) CALL_VTBL_FUNC(this, 24, void, (Scheduler*,HANDLE), (this,event))
130#define call_Scheduler_Attach(this) CALL_VTBL_FUNC(this, 28, void, (Scheduler*), (this))
131#if _MSVCR_VER > 100
132#define call_Scheduler_CreateScheduleGroup_loc(this,placement) CALL_VTBL_FUNC(this, 32, \
133 /*ScheduleGroup*/void*, (Scheduler*,/*location*/void*), (this,placement))
134#define call_Scheduler_CreateScheduleGroup(this) CALL_VTBL_FUNC(this, 36, /*ScheduleGroup*/void*, (Scheduler*), (this))
135#define call_Scheduler_ScheduleTask_loc(this,proc,data,placement) CALL_VTBL_FUNC(this, 40, \
136 void, (Scheduler*,void (__cdecl*)(void*),void*,/*location*/void*), (this,proc,data,placement))
137#define call_Scheduler_ScheduleTask(this,proc,data) CALL_VTBL_FUNC(this, 44, \
138 void, (Scheduler*,void (__cdecl*)(void*),void*), (this,proc,data))
139#define call_Scheduler_IsAvailableLocation(this,placement) CALL_VTBL_FUNC(this, 48, \
140 bool, (Scheduler*,const /*location*/void*), (this,placement))
141#else
142#define call_Scheduler_CreateScheduleGroup(this) CALL_VTBL_FUNC(this, 32, /*ScheduleGroup*/void*, (Scheduler*), (this))
143#define call_Scheduler_ScheduleTask(this,proc,data) CALL_VTBL_FUNC(this, 36, \
144 void, (Scheduler*,void (__cdecl*)(void*),void*), (this,proc,data))
145#endif
146
147typedef struct {
148 Scheduler scheduler;
149 LONG ref;
150 unsigned int id;
151 unsigned int virt_proc_no;
152 SchedulerPolicy policy;
153 int shutdown_count;
154 int shutdown_size;
155 HANDLE *shutdown_events;
157 struct list scheduled_chores;
158} ThreadScheduler;
159extern const vtable_ptr ThreadScheduler_vtable;
160
161typedef struct {
162 Scheduler *scheduler;
163} _Scheduler;
164
165typedef struct {
166 char empty;
167} _CurrentScheduler;
168
169typedef enum
170{
171 SPINWAIT_INIT,
172 SPINWAIT_SPIN,
173 SPINWAIT_YIELD,
174 SPINWAIT_DONE
175} SpinWait_state;
176
177typedef void (__cdecl *yield_func)(void);
178
179typedef struct
180{
181 ULONG spin;
183 SpinWait_state state;
184 yield_func yield_func;
185} SpinWait;
186
187#define FINISHED_INITIAL 0x80000000
188typedef struct _StructuredTaskCollection
189{
190 void *unk1;
191 unsigned int unk2;
192 void *unk3;
194 volatile LONG count;
195 volatile LONG finished;
196 void *exception;
197 Context *event;
198} _StructuredTaskCollection;
199
200bool __thiscall _StructuredTaskCollection__IsCanceling(_StructuredTaskCollection*);
201
202typedef enum
203{
204 TASK_COLLECTION_SUCCESS = 1,
205 TASK_COLLECTION_CANCELLED
206} _TaskCollectionStatus;
207
208typedef enum
209{
210 STRUCTURED_TASK_COLLECTION_CANCELLED = 0x2,
211 STRUCTURED_TASK_COLLECTION_STATUS_MASK = 0x7
212} _StructuredTaskCollectionStatusBits;
213
214typedef struct _UnrealizedChore
215{
216 const vtable_ptr *vtable;
217 void (__cdecl *chore_proc)(struct _UnrealizedChore*);
218 _StructuredTaskCollection *task_collection;
219 void (__cdecl *chore_wrapper)(struct _UnrealizedChore*);
220 void *unk[6];
221} _UnrealizedChore;
222
223struct scheduled_chore {
224 struct list entry;
225 _UnrealizedChore *chore;
226};
227
228/* keep in sync with msvcp90/msvcp90.h */
229typedef struct cs_queue
230{
231 Context *ctx;
232 struct cs_queue *next;
233#if _MSVCR_VER >= 110
234 LONG free;
235 int unknown;
236#endif
237} cs_queue;
238
239typedef struct
240{
241 cs_queue unk_active;
242#if _MSVCR_VER >= 110
243 void *unknown[2];
244#else
245 void *unknown[1];
246#endif
247 cs_queue *head;
248 void *tail;
250
251typedef struct
252{
254 union {
255 cs_queue q;
256 struct {
257 void *unknown[4];
258 int unknown2[2];
259 } unknown;
260 } lock;
261} critical_section_scoped_lock;
262
263typedef struct
264{
266} _NonReentrantPPLLock;
267
268typedef struct
269{
270 _NonReentrantPPLLock *lock;
271 union {
272 cs_queue q;
273 struct {
274 void *unknown[4];
275 int unknown2[2];
276 } unknown;
277 } wait;
278} _NonReentrantPPLLock__Scoped_lock;
279
280typedef struct
281{
283 LONG count;
284 LONG owner;
285} _ReentrantPPLLock;
286
287typedef struct
288{
289 _ReentrantPPLLock *lock;
290 union {
291 cs_queue q;
292 struct {
293 void *unknown[4];
294 int unknown2[2];
295 } unknown;
296 } wait;
297} _ReentrantPPLLock__Scoped_lock;
298
299typedef struct
300{
301 LONG state;
302 LONG count;
303} _ReaderWriterLock;
304
305#define EVT_RUNNING (void*)1
306#define EVT_WAITING NULL
307
308struct thread_wait;
309typedef struct thread_wait_entry
310{
311 struct thread_wait *wait;
312 struct thread_wait_entry *next;
313 struct thread_wait_entry *prev;
314} thread_wait_entry;
315
316typedef struct thread_wait
317{
318 Context *ctx;
319 void *signaled;
320 LONG pending_waits;
321 thread_wait_entry entries[1];
322} thread_wait;
323
324typedef struct
325{
326 thread_wait_entry *waiters;
327 INT_PTR signaled;
329} event;
330
331#if _MSVCR_VER >= 110
332#define CV_WAKE (void*)1
333typedef struct cv_queue {
334 Context *ctx;
335 struct cv_queue *next;
336 LONG expired;
337} cv_queue;
338
339typedef struct {
340 struct beacon *beacon;
341} _Cancellation_beacon;
342
343typedef struct {
344 /* cv_queue structure is not binary compatible */
345 cv_queue *queue;
347} _Condition_variable;
348#endif
349
350typedef struct rwl_queue
351{
352 struct rwl_queue *next;
353 Context *ctx;
354} rwl_queue;
355
356#define WRITER_WAITING 0x80000000
357/* FIXME: reader_writer_lock structure is not binary compatible
358 * it can't exceed 28/56 bytes */
359typedef struct
360{
361 LONG count;
363 rwl_queue active;
364 rwl_queue *writer_head;
365 rwl_queue *writer_tail;
366 rwl_queue *reader_head;
367} reader_writer_lock;
368
369typedef struct {
370 reader_writer_lock *lock;
371} reader_writer_lock_scoped_lock;
372
373typedef struct {
375} _ReentrantBlockingLock;
376
377#define TICKSPERMSEC 10000
378typedef struct {
379 const vtable_ptr *vtable;
380 TP_TIMER *timer;
381 unsigned int elapse;
382 bool repeat;
383} _Timer;
384extern const vtable_ptr _Timer_vtable;
385#define call__Timer_callback(this) CALL_VTBL_FUNC(this, 4, void, (_Timer*), (this))
386
387typedef exception improper_lock;
388extern const vtable_ptr improper_lock_vtable;
389
390typedef exception improper_scheduler_attach;
391extern const vtable_ptr improper_scheduler_attach_vtable;
392
393typedef exception improper_scheduler_detach;
394extern const vtable_ptr improper_scheduler_detach_vtable;
395
396typedef exception invalid_multiple_scheduling;
397extern const vtable_ptr invalid_multiple_scheduling_vtable;
398
399typedef exception invalid_scheduler_policy_key;
400extern const vtable_ptr invalid_scheduler_policy_key_vtable;
401
402typedef exception invalid_scheduler_policy_thread_specification;
403extern const vtable_ptr invalid_scheduler_policy_thread_specification_vtable;
404
405typedef exception invalid_scheduler_policy_value;
406extern const vtable_ptr invalid_scheduler_policy_value_vtable;
407
408typedef exception missing_wait;
409extern const vtable_ptr missing_wait_vtable;
410
411typedef struct {
412 exception e;
413 HRESULT hr;
414} scheduler_resource_allocation_error;
415extern const vtable_ptr scheduler_resource_allocation_error_vtable;
416
417enum ConcRT_EventType
418{
419 CONCRT_EVENT_GENERIC,
420 CONCRT_EVENT_START,
421 CONCRT_EVENT_END,
422 CONCRT_EVENT_BLOCK,
423 CONCRT_EVENT_UNBLOCK,
424 CONCRT_EVENT_YIELD,
425 CONCRT_EVENT_ATTACH,
426 CONCRT_EVENT_DETACH
427};
428
429static DWORD context_tls_index = TLS_OUT_OF_INDEXES;
430
431static CRITICAL_SECTION default_scheduler_cs;
432static CRITICAL_SECTION_DEBUG default_scheduler_cs_debug =
433{
434 0, 0, &default_scheduler_cs,
435 { &default_scheduler_cs_debug.ProcessLocksList, &default_scheduler_cs_debug.ProcessLocksList },
436 0, 0, { (DWORD_PTR)(__FILE__ ": default_scheduler_cs") }
437};
438static CRITICAL_SECTION default_scheduler_cs = { &default_scheduler_cs_debug, -1, 0, 0, 0, 0 };
439static SchedulerPolicy default_scheduler_policy;
440static ThreadScheduler *default_scheduler;
441
442static void create_default_scheduler(void);
443
444/* ??0improper_lock@Concurrency@@QAE@PBD@Z */
445/* ??0improper_lock@Concurrency@@QEAA@PEBD@Z */
446DEFINE_THISCALL_WRAPPER(improper_lock_ctor_str, 8)
447improper_lock* __thiscall improper_lock_ctor_str(improper_lock *this, const char *str)
448{
449 TRACE("(%p %s)\n", this, str);
450 return __exception_ctor(this, str, &improper_lock_vtable);
451}
452
453/* ??0improper_lock@Concurrency@@QAE@XZ */
454/* ??0improper_lock@Concurrency@@QEAA@XZ */
455DEFINE_THISCALL_WRAPPER(improper_lock_ctor, 4)
456improper_lock* __thiscall improper_lock_ctor(improper_lock *this)
457{
458 return improper_lock_ctor_str(this, NULL);
459}
460
461DEFINE_THISCALL_WRAPPER(improper_lock_copy_ctor,8)
462improper_lock * __thiscall improper_lock_copy_ctor(improper_lock *this, const improper_lock *rhs)
463{
464 TRACE("(%p %p)\n", this, rhs);
465 return __exception_copy_ctor(this, rhs, &improper_lock_vtable);
466}
467
468/* ??0improper_scheduler_attach@Concurrency@@QAE@PBD@Z */
469/* ??0improper_scheduler_attach@Concurrency@@QEAA@PEBD@Z */
470DEFINE_THISCALL_WRAPPER(improper_scheduler_attach_ctor_str, 8)
471improper_scheduler_attach* __thiscall improper_scheduler_attach_ctor_str(
472 improper_scheduler_attach *this, const char *str)
473{
474 TRACE("(%p %s)\n", this, str);
475 return __exception_ctor(this, str, &improper_scheduler_attach_vtable);
476}
477
478/* ??0improper_scheduler_attach@Concurrency@@QAE@XZ */
479/* ??0improper_scheduler_attach@Concurrency@@QEAA@XZ */
480DEFINE_THISCALL_WRAPPER(improper_scheduler_attach_ctor, 4)
481improper_scheduler_attach* __thiscall improper_scheduler_attach_ctor(
482 improper_scheduler_attach *this)
483{
484 return improper_scheduler_attach_ctor_str(this, NULL);
485}
486
487DEFINE_THISCALL_WRAPPER(improper_scheduler_attach_copy_ctor,8)
488improper_scheduler_attach * __thiscall improper_scheduler_attach_copy_ctor(
489 improper_scheduler_attach * _this, const improper_scheduler_attach * rhs)
490{
491 TRACE("(%p %p)\n", _this, rhs);
492 return __exception_copy_ctor(_this, rhs, &improper_scheduler_attach_vtable);
493}
494
495/* ??0improper_scheduler_detach@Concurrency@@QAE@PBD@Z */
496/* ??0improper_scheduler_detach@Concurrency@@QEAA@PEBD@Z */
497DEFINE_THISCALL_WRAPPER(improper_scheduler_detach_ctor_str, 8)
498improper_scheduler_detach* __thiscall improper_scheduler_detach_ctor_str(
499 improper_scheduler_detach *this, const char *str)
500{
501 TRACE("(%p %s)\n", this, str);
502 return __exception_ctor(this, str, &improper_scheduler_detach_vtable);
503}
504
505/* ??0improper_scheduler_detach@Concurrency@@QAE@XZ */
506/* ??0improper_scheduler_detach@Concurrency@@QEAA@XZ */
507DEFINE_THISCALL_WRAPPER(improper_scheduler_detach_ctor, 4)
508improper_scheduler_detach* __thiscall improper_scheduler_detach_ctor(
509 improper_scheduler_detach *this)
510{
511 return improper_scheduler_detach_ctor_str(this, NULL);
512}
513
514DEFINE_THISCALL_WRAPPER(improper_scheduler_detach_copy_ctor,8)
515improper_scheduler_detach * __thiscall improper_scheduler_detach_copy_ctor(
516 improper_scheduler_detach * _this, const improper_scheduler_detach * rhs)
517{
518 TRACE("(%p %p)\n", _this, rhs);
519 return __exception_copy_ctor(_this, rhs, &improper_scheduler_detach_vtable);
520}
521
522/* ??0invalid_multiple_scheduling@Concurrency@@QAA@PBD@Z */
523/* ??0invalid_multiple_scheduling@Concurrency@@QAE@PBD@Z */
524/* ??0invalid_multiple_scheduling@Concurrency@@QEAA@PEBD@Z */
525DEFINE_THISCALL_WRAPPER(invalid_multiple_scheduling_ctor_str, 8)
526invalid_multiple_scheduling* __thiscall invalid_multiple_scheduling_ctor_str(
527 invalid_multiple_scheduling *this, const char *str)
528{
529 TRACE("(%p %s)\n", this, str);
530 return __exception_ctor(this, str, &invalid_multiple_scheduling_vtable);
531}
532
533/* ??0invalid_multiple_scheduling@Concurrency@@QAA@XZ */
534/* ??0invalid_multiple_scheduling@Concurrency@@QAE@XZ */
535/* ??0invalid_multiple_scheduling@Concurrency@@QEAA@XZ */
536DEFINE_THISCALL_WRAPPER(invalid_multiple_scheduling_ctor, 4)
537invalid_multiple_scheduling* __thiscall invalid_multiple_scheduling_ctor(
538 invalid_multiple_scheduling *this)
539{
540 return invalid_multiple_scheduling_ctor_str(this, NULL);
541}
542
543DEFINE_THISCALL_WRAPPER(invalid_multiple_scheduling_copy_ctor,8)
544invalid_multiple_scheduling * __thiscall invalid_multiple_scheduling_copy_ctor(
545 invalid_multiple_scheduling * _this, const invalid_multiple_scheduling * rhs)
546{
547 TRACE("(%p %p)\n", _this, rhs);
548 return __exception_copy_ctor(_this, rhs, &invalid_multiple_scheduling_vtable);
549}
550
551/* ??0invalid_scheduler_policy_key@Concurrency@@QAE@PBD@Z */
552/* ??0invalid_scheduler_policy_key@Concurrency@@QEAA@PEBD@Z */
553DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_key_ctor_str, 8)
554invalid_scheduler_policy_key* __thiscall invalid_scheduler_policy_key_ctor_str(
555 invalid_scheduler_policy_key *this, const char *str)
556{
557 TRACE("(%p %s)\n", this, str);
558 return __exception_ctor(this, str, &invalid_scheduler_policy_key_vtable);
559}
560
561/* ??0invalid_scheduler_policy_key@Concurrency@@QAE@XZ */
562/* ??0invalid_scheduler_policy_key@Concurrency@@QEAA@XZ */
563DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_key_ctor, 4)
564invalid_scheduler_policy_key* __thiscall invalid_scheduler_policy_key_ctor(
565 invalid_scheduler_policy_key *this)
566{
567 return invalid_scheduler_policy_key_ctor_str(this, NULL);
568}
569
570DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_key_copy_ctor,8)
571invalid_scheduler_policy_key * __thiscall invalid_scheduler_policy_key_copy_ctor(
572 invalid_scheduler_policy_key * _this, const invalid_scheduler_policy_key * rhs)
573{
574 TRACE("(%p %p)\n", _this, rhs);
575 return __exception_copy_ctor(_this, rhs, &invalid_scheduler_policy_key_vtable);
576}
577
578/* ??0invalid_scheduler_policy_thread_specification@Concurrency@@QAE@PBD@Z */
579/* ??0invalid_scheduler_policy_thread_specification@Concurrency@@QEAA@PEBD@Z */
580DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_thread_specification_ctor_str, 8)
581invalid_scheduler_policy_thread_specification* __thiscall invalid_scheduler_policy_thread_specification_ctor_str(
582 invalid_scheduler_policy_thread_specification *this, const char *str)
583{
584 TRACE("(%p %s)\n", this, str);
585 return __exception_ctor(this, str, &invalid_scheduler_policy_thread_specification_vtable);
586}
587
588/* ??0invalid_scheduler_policy_thread_specification@Concurrency@@QAE@XZ */
589/* ??0invalid_scheduler_policy_thread_specification@Concurrency@@QEAA@XZ */
590DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_thread_specification_ctor, 4)
591invalid_scheduler_policy_thread_specification* __thiscall invalid_scheduler_policy_thread_specification_ctor(
592 invalid_scheduler_policy_thread_specification *this)
593{
594 return invalid_scheduler_policy_thread_specification_ctor_str(this, NULL);
595}
596
597DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_thread_specification_copy_ctor,8)
598invalid_scheduler_policy_thread_specification * __thiscall invalid_scheduler_policy_thread_specification_copy_ctor(
599 invalid_scheduler_policy_thread_specification * _this, const invalid_scheduler_policy_thread_specification * rhs)
600{
601 TRACE("(%p %p)\n", _this, rhs);
602 return __exception_copy_ctor(_this, rhs, &invalid_scheduler_policy_thread_specification_vtable);
603}
604
605/* ??0invalid_scheduler_policy_value@Concurrency@@QAE@PBD@Z */
606/* ??0invalid_scheduler_policy_value@Concurrency@@QEAA@PEBD@Z */
607DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_value_ctor_str, 8)
608invalid_scheduler_policy_value* __thiscall invalid_scheduler_policy_value_ctor_str(
609 invalid_scheduler_policy_value *this, const char *str)
610{
611 TRACE("(%p %s)\n", this, str);
612 return __exception_ctor(this, str, &invalid_scheduler_policy_value_vtable);
613}
614
615/* ??0invalid_scheduler_policy_value@Concurrency@@QAE@XZ */
616/* ??0invalid_scheduler_policy_value@Concurrency@@QEAA@XZ */
617DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_value_ctor, 4)
618invalid_scheduler_policy_value* __thiscall invalid_scheduler_policy_value_ctor(
619 invalid_scheduler_policy_value *this)
620{
621 return invalid_scheduler_policy_value_ctor_str(this, NULL);
622}
623
624DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_value_copy_ctor,8)
625invalid_scheduler_policy_value * __thiscall invalid_scheduler_policy_value_copy_ctor(
626 invalid_scheduler_policy_value * _this, const invalid_scheduler_policy_value * rhs)
627{
628 TRACE("(%p %p)\n", _this, rhs);
629 return __exception_copy_ctor(_this, rhs, &invalid_scheduler_policy_value_vtable);
630}
631
632/* ??0missing_wait@Concurrency@@QAA@PBD@Z */
633/* ??0missing_wait@Concurrency@@QAE@PBD@Z */
634/* ??0missing_wait@Concurrency@@QEAA@PEBD@Z */
635DEFINE_THISCALL_WRAPPER(missing_wait_ctor_str, 8)
636missing_wait* __thiscall missing_wait_ctor_str(
637 missing_wait *this, const char *str)
638{
639 TRACE("(%p %p)\n", this, str);
640 return __exception_ctor(this, str, &missing_wait_vtable);
641}
642
643/* ??0missing_wait@Concurrency@@QAA@XZ */
644/* ??0missing_wait@Concurrency@@QAE@XZ */
645/* ??0missing_wait@Concurrency@@QEAA@XZ */
646DEFINE_THISCALL_WRAPPER(missing_wait_ctor, 4)
647missing_wait* __thiscall missing_wait_ctor(missing_wait *this)
648{
649 return missing_wait_ctor_str(this, NULL);
650}
651
652DEFINE_THISCALL_WRAPPER(missing_wait_copy_ctor,8)
653missing_wait * __thiscall missing_wait_copy_ctor(
654 missing_wait * _this, const missing_wait * rhs)
655{
656 TRACE("(%p %p)\n", _this, rhs);
657 return __exception_copy_ctor(_this, rhs, &missing_wait_vtable);
658}
659
660/* ??0scheduler_resource_allocation_error@Concurrency@@QAE@PBDJ@Z */
661/* ??0scheduler_resource_allocation_error@Concurrency@@QEAA@PEBDJ@Z */
662DEFINE_THISCALL_WRAPPER(scheduler_resource_allocation_error_ctor_name, 12)
663scheduler_resource_allocation_error* __thiscall scheduler_resource_allocation_error_ctor_name(
664 scheduler_resource_allocation_error *this, const char *name, HRESULT hr)
665{
666 TRACE("(%p %s %lx)\n", this, wine_dbgstr_a(name), hr);
667 __exception_ctor(&this->e, name, &scheduler_resource_allocation_error_vtable);
668 this->hr = hr;
669 return this;
670}
671
672/* ??0scheduler_resource_allocation_error@Concurrency@@QAE@J@Z */
673/* ??0scheduler_resource_allocation_error@Concurrency@@QEAA@J@Z */
674DEFINE_THISCALL_WRAPPER(scheduler_resource_allocation_error_ctor, 8)
675scheduler_resource_allocation_error* __thiscall scheduler_resource_allocation_error_ctor(
676 scheduler_resource_allocation_error *this, HRESULT hr)
677{
678 return scheduler_resource_allocation_error_ctor_name(this, NULL, hr);
679}
680
681DEFINE_THISCALL_WRAPPER(scheduler_resource_allocation_error_copy_ctor,8)
682scheduler_resource_allocation_error* __thiscall scheduler_resource_allocation_error_copy_ctor(
683 scheduler_resource_allocation_error *this,
684 const scheduler_resource_allocation_error *rhs)
685{
686 TRACE("(%p,%p)\n", this, rhs);
687
688 if (!rhs->e.do_free)
689 memcpy(this, rhs, sizeof(*this));
690 else
691 scheduler_resource_allocation_error_ctor_name(this, rhs->e.name, rhs->hr);
692 return this;
693}
694
695/* ?get_error_code@scheduler_resource_allocation_error@Concurrency@@QBEJXZ */
696/* ?get_error_code@scheduler_resource_allocation_error@Concurrency@@QEBAJXZ */
697DEFINE_THISCALL_WRAPPER(scheduler_resource_allocation_error_get_error_code, 4)
698HRESULT __thiscall scheduler_resource_allocation_error_get_error_code(
699 const scheduler_resource_allocation_error *this)
700{
701 TRACE("(%p)\n", this);
702 return this->hr;
703}
704
705DEFINE_RTTI_DATA1(improper_lock, 0, &cexception_rtti_base_descriptor,
706 ".?AVimproper_lock@Concurrency@@")
707DEFINE_RTTI_DATA1(improper_scheduler_attach, 0, &cexception_rtti_base_descriptor,
708 ".?AVimproper_scheduler_attach@Concurrency@@")
709DEFINE_RTTI_DATA1(improper_scheduler_detach, 0, &cexception_rtti_base_descriptor,
710 ".?AVimproper_scheduler_detach@Concurrency@@")
711DEFINE_RTTI_DATA1(invalid_multiple_scheduling, 0, &cexception_rtti_base_descriptor,
712 ".?AVinvalid_multiple_scheduling@Concurrency@@")
713DEFINE_RTTI_DATA1(invalid_scheduler_policy_key, 0, &cexception_rtti_base_descriptor,
714 ".?AVinvalid_scheduler_policy_key@Concurrency@@")
715DEFINE_RTTI_DATA1(invalid_scheduler_policy_thread_specification, 0, &cexception_rtti_base_descriptor,
716 ".?AVinvalid_scheduler_policy_thread_specification@Concurrency@@")
717DEFINE_RTTI_DATA1(invalid_scheduler_policy_value, 0, &cexception_rtti_base_descriptor,
718 ".?AVinvalid_scheduler_policy_value@Concurrency@@")
719DEFINE_RTTI_DATA1(missing_wait, 0, &cexception_rtti_base_descriptor,
720 ".?AVmissing_wait@Concurrency@@")
721DEFINE_RTTI_DATA1(scheduler_resource_allocation_error, 0, &cexception_rtti_base_descriptor,
722 ".?AVscheduler_resource_allocation_error@Concurrency@@")
723
724DEFINE_CXX_DATA1(improper_lock, &cexception_cxx_type_info, cexception_dtor)
725DEFINE_CXX_DATA1(improper_scheduler_attach, &cexception_cxx_type_info, cexception_dtor)
726DEFINE_CXX_DATA1(improper_scheduler_detach, &cexception_cxx_type_info, cexception_dtor)
727DEFINE_CXX_DATA1(invalid_multiple_scheduling, &cexception_cxx_type_info, cexception_dtor)
728DEFINE_CXX_DATA1(invalid_scheduler_policy_key, &cexception_cxx_type_info, cexception_dtor)
729DEFINE_CXX_DATA1(invalid_scheduler_policy_thread_specification, &cexception_cxx_type_info, cexception_dtor)
730DEFINE_CXX_DATA1(invalid_scheduler_policy_value, &cexception_cxx_type_info, cexception_dtor)
731#if _MSVCR_VER >= 120
732DEFINE_CXX_DATA1(missing_wait, &cexception_cxx_type_info, cexception_dtor)
733#endif
734DEFINE_CXX_DATA1(scheduler_resource_allocation_error, &cexception_cxx_type_info, cexception_dtor)
735
736__ASM_BLOCK_BEGIN(concurrency_exception_vtables)
737 __ASM_VTABLE(improper_lock,
738 VTABLE_ADD_FUNC(cexception_vector_dtor)
739 VTABLE_ADD_FUNC(cexception_what));
740 __ASM_VTABLE(improper_scheduler_attach,
741 VTABLE_ADD_FUNC(cexception_vector_dtor)
742 VTABLE_ADD_FUNC(cexception_what));
743 __ASM_VTABLE(improper_scheduler_detach,
744 VTABLE_ADD_FUNC(cexception_vector_dtor)
745 VTABLE_ADD_FUNC(cexception_what));
746 __ASM_VTABLE(invalid_multiple_scheduling,
747 VTABLE_ADD_FUNC(cexception_vector_dtor)
748 VTABLE_ADD_FUNC(cexception_what));
749 __ASM_VTABLE(invalid_scheduler_policy_key,
750 VTABLE_ADD_FUNC(cexception_vector_dtor)
751 VTABLE_ADD_FUNC(cexception_what));
752 __ASM_VTABLE(invalid_scheduler_policy_thread_specification,
753 VTABLE_ADD_FUNC(cexception_vector_dtor)
754 VTABLE_ADD_FUNC(cexception_what));
755 __ASM_VTABLE(invalid_scheduler_policy_value,
756 VTABLE_ADD_FUNC(cexception_vector_dtor)
757 VTABLE_ADD_FUNC(cexception_what));
758 __ASM_VTABLE(missing_wait,
759 VTABLE_ADD_FUNC(cexception_vector_dtor)
760 VTABLE_ADD_FUNC(cexception_what));
761 __ASM_VTABLE(scheduler_resource_allocation_error,
762 VTABLE_ADD_FUNC(cexception_vector_dtor)
763 VTABLE_ADD_FUNC(cexception_what));
764__ASM_BLOCK_END
765
766static Context* try_get_current_context(void)
767{
768 if (context_tls_index == TLS_OUT_OF_INDEXES)
769 return NULL;
770 return TlsGetValue(context_tls_index);
771}
772
773static BOOL WINAPI init_context_tls_index(INIT_ONCE *once, void *param, void **context)
774{
775 context_tls_index = TlsAlloc();
776 return context_tls_index != TLS_OUT_OF_INDEXES;
777}
778
779static Context* get_current_context(void)
780{
781 static INIT_ONCE init_once = INIT_ONCE_STATIC_INIT;
782 Context *ret;
783
784 if(!InitOnceExecuteOnce(&init_once, init_context_tls_index, NULL, NULL))
785 {
786 scheduler_resource_allocation_error e;
787 scheduler_resource_allocation_error_ctor_name(&e, NULL,
788 HRESULT_FROM_WIN32(GetLastError()));
789 _CxxThrowException(&e, &scheduler_resource_allocation_error_exception_type);
790 }
791
792 ret = TlsGetValue(context_tls_index);
793 if (!ret) {
794 ExternalContextBase *context = operator_new(sizeof(ExternalContextBase));
795 ExternalContextBase_ctor(context);
796 TlsSetValue(context_tls_index, context);
797 ret = &context->context;
798 }
799 return ret;
800}
801
802static Scheduler* get_scheduler_from_context(Context *ctx)
803{
804 ExternalContextBase *context = (ExternalContextBase*)ctx;
805
806 if (context->context.vtable != &ExternalContextBase_vtable)
807 return NULL;
808 return context->scheduler.scheduler;
809}
810
811static Scheduler* try_get_current_scheduler(void)
812{
813 Context *context = try_get_current_context();
814 Scheduler *ret;
815
816 if (!context)
817 return NULL;
818
819 ret = get_scheduler_from_context(context);
820 if (!ret)
821 ERR("unknown context set\n");
822 return ret;
823}
824
825static Scheduler* get_current_scheduler(void)
826{
827 Context *context = get_current_context();
828 Scheduler *ret;
829
830 ret = get_scheduler_from_context(context);
831 if (!ret)
832 ERR("unknown context set\n");
833 return ret;
834}
835
836/* ?CurrentContext@Context@Concurrency@@SAPAV12@XZ */
837/* ?CurrentContext@Context@Concurrency@@SAPEAV12@XZ */
838Context* __cdecl Context_CurrentContext(void)
839{
840 TRACE("()\n");
841 return get_current_context();
842}
843
844/* ?Id@Context@Concurrency@@SAIXZ */
845unsigned int __cdecl Context_Id(void)
846{
847 Context *ctx = try_get_current_context();
848 TRACE("()\n");
849 return ctx ? call_Context_GetId(ctx) : -1;
850}
851
852/* ?Block@Context@Concurrency@@SAXXZ */
853void __cdecl Context_Block(void)
854{
855 Context *ctx = get_current_context();
856 TRACE("()\n");
857 call_Context_Block(ctx);
858}
859
860/* ?Yield@Context@Concurrency@@SAXXZ */
861/* ?_Yield@_Context@details@Concurrency@@SAXXZ */
862void __cdecl Context_Yield(void)
863{
864 static unsigned int once;
865
866 if (!once++)
867 FIXME("()\n");
868}
869
870/* ?_SpinYield@Context@Concurrency@@SAXXZ */
871void __cdecl Context__SpinYield(void)
872{
873 FIXME("()\n");
874}
875
876/* ?IsCurrentTaskCollectionCanceling@Context@Concurrency@@SA_NXZ */
877bool __cdecl Context_IsCurrentTaskCollectionCanceling(void)
878{
879 ExternalContextBase *ctx = (ExternalContextBase*)try_get_current_context();
880
881 TRACE("()\n");
882
883 if (ctx && ctx->context.vtable != &ExternalContextBase_vtable) {
884 ERR("unknown context set\n");
885 return FALSE;
886 }
887
888 if (ctx && ctx->task_collection)
889 return _StructuredTaskCollection__IsCanceling(ctx->task_collection);
890 return FALSE;
891}
892
893/* ?Oversubscribe@Context@Concurrency@@SAX_N@Z */
894void __cdecl Context_Oversubscribe(bool begin)
895{
896 FIXME("(%x)\n", begin);
897}
898
899/* ?ScheduleGroupId@Context@Concurrency@@SAIXZ */
900unsigned int __cdecl Context_ScheduleGroupId(void)
901{
902 Context *ctx = try_get_current_context();
903 TRACE("()\n");
904 return ctx ? call_Context_GetScheduleGroupId(ctx) : -1;
905}
906
907/* ?VirtualProcessorId@Context@Concurrency@@SAIXZ */
908unsigned int __cdecl Context_VirtualProcessorId(void)
909{
910 Context *ctx = try_get_current_context();
911 TRACE("()\n");
912 return ctx ? call_Context_GetVirtualProcessorId(ctx) : -1;
913}
914
915#if _MSVCR_VER > 100
916/* ?_CurrentContext@_Context@details@Concurrency@@SA?AV123@XZ */
917_Context *__cdecl _Context__CurrentContext(_Context *ret)
918{
919 TRACE("(%p)\n", ret);
920 ret->context = Context_CurrentContext();
921 return ret;
922}
923
924DEFINE_THISCALL_WRAPPER(_Context_IsSynchronouslyBlocked, 4)
925BOOL __thiscall _Context_IsSynchronouslyBlocked(const _Context *this)
926{
927 TRACE("(%p)\n", this);
928 return call_Context_IsSynchronouslyBlocked(this->context);
929}
930#endif
931
932DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetId, 4)
933unsigned int __thiscall ExternalContextBase_GetId(const ExternalContextBase *this)
934{
935 TRACE("(%p)->()\n", this);
936 return this->id;
937}
938
939DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetVirtualProcessorId, 4)
940unsigned int __thiscall ExternalContextBase_GetVirtualProcessorId(const ExternalContextBase *this)
941{
942 FIXME("(%p)->() stub\n", this);
943 return -1;
944}
945
946DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetScheduleGroupId, 4)
947unsigned int __thiscall ExternalContextBase_GetScheduleGroupId(const ExternalContextBase *this)
948{
949 FIXME("(%p)->() stub\n", this);
950 return -1;
951}
952
953DEFINE_THISCALL_WRAPPER(ExternalContextBase_Unblock, 4)
954void __thiscall ExternalContextBase_Unblock(ExternalContextBase *this)
955{
956 TRACE("(%p)->()\n", this);
957
958 /* TODO: throw context_unblock_unbalanced if this->blocked goes below -1 */
959 if (!InterlockedDecrement(&this->blocked))
960 RtlWakeAddressSingle(&this->blocked);
961}
962
963DEFINE_THISCALL_WRAPPER(ExternalContextBase_IsSynchronouslyBlocked, 4)
964bool __thiscall ExternalContextBase_IsSynchronouslyBlocked(const ExternalContextBase *this)
965{
966 TRACE("(%p)->()\n", this);
967 return this->blocked >= 1;
968}
969
970DEFINE_THISCALL_WRAPPER(ExternalContextBase_Block, 4)
971void __thiscall ExternalContextBase_Block(ExternalContextBase *this)
972{
973 LONG blocked;
974
975 TRACE("(%p)->()\n", this);
976
977 blocked = InterlockedIncrement(&this->blocked);
978 while (blocked >= 1)
979 {
980 RtlWaitOnAddress(&this->blocked, &blocked, sizeof(LONG), NULL);
981 blocked = this->blocked;
982 }
983}
984
985DEFINE_THISCALL_WRAPPER(ExternalContextBase_Yield, 4)
986void __thiscall ExternalContextBase_Yield(ExternalContextBase *this)
987{
988 FIXME("(%p)->() stub\n", this);
989}
990
991DEFINE_THISCALL_WRAPPER(ExternalContextBase_SpinYield, 4)
992void __thiscall ExternalContextBase_SpinYield(ExternalContextBase *this)
993{
994 FIXME("(%p)->() stub\n", this);
995}
996
997DEFINE_THISCALL_WRAPPER(ExternalContextBase_Oversubscribe, 8)
998void __thiscall ExternalContextBase_Oversubscribe(
999 ExternalContextBase *this, bool oversubscribe)
1000{
1001 FIXME("(%p)->(%x) stub\n", this, oversubscribe);
1002}
1003
1004DEFINE_THISCALL_WRAPPER(ExternalContextBase_Alloc, 8)
1005void* __thiscall ExternalContextBase_Alloc(ExternalContextBase *this, size_t size)
1006{
1007 FIXME("(%p)->(%Iu) stub\n", this, size);
1008 return NULL;
1009}
1010
1011DEFINE_THISCALL_WRAPPER(ExternalContextBase_Free, 8)
1012void __thiscall ExternalContextBase_Free(ExternalContextBase *this, void *addr)
1013{
1014 FIXME("(%p)->(%p) stub\n", this, addr);
1015}
1016
1017DEFINE_THISCALL_WRAPPER(ExternalContextBase_EnterCriticalRegionHelper, 4)
1018int __thiscall ExternalContextBase_EnterCriticalRegionHelper(ExternalContextBase *this)
1019{
1020 FIXME("(%p)->() stub\n", this);
1021 return 0;
1022}
1023
1024DEFINE_THISCALL_WRAPPER(ExternalContextBase_EnterHyperCriticalRegionHelper, 4)
1025int __thiscall ExternalContextBase_EnterHyperCriticalRegionHelper(ExternalContextBase *this)
1026{
1027 FIXME("(%p)->() stub\n", this);
1028 return 0;
1029}
1030
1031DEFINE_THISCALL_WRAPPER(ExternalContextBase_ExitCriticalRegionHelper, 4)
1032int __thiscall ExternalContextBase_ExitCriticalRegionHelper(ExternalContextBase *this)
1033{
1034 FIXME("(%p)->() stub\n", this);
1035 return 0;
1036}
1037
1038DEFINE_THISCALL_WRAPPER(ExternalContextBase_ExitHyperCriticalRegionHelper, 4)
1039int __thiscall ExternalContextBase_ExitHyperCriticalRegionHelper(ExternalContextBase *this)
1040{
1041 FIXME("(%p)->() stub\n", this);
1042 return 0;
1043}
1044
1045DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetCriticalRegionType, 4)
1046int __thiscall ExternalContextBase_GetCriticalRegionType(const ExternalContextBase *this)
1047{
1048 FIXME("(%p)->() stub\n", this);
1049 return 0;
1050}
1051
1052DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetContextKind, 4)
1053int __thiscall ExternalContextBase_GetContextKind(const ExternalContextBase *this)
1054{
1055 FIXME("(%p)->() stub\n", this);
1056 return 0;
1057}
1058
1059static void remove_scheduled_chores(Scheduler *scheduler, const ExternalContextBase *context)
1060{
1061 ThreadScheduler *tscheduler = (ThreadScheduler*)scheduler;
1062 struct scheduled_chore *sc, *next;
1063
1064 if (tscheduler->scheduler.vtable != &ThreadScheduler_vtable)
1065 return;
1066
1067 EnterCriticalSection(&tscheduler->cs);
1068 LIST_FOR_EACH_ENTRY_SAFE(sc, next, &tscheduler->scheduled_chores,
1069 struct scheduled_chore, entry) {
1070 if (sc->chore->task_collection->context == &context->context) {
1071 list_remove(&sc->entry);
1072 operator_delete(sc);
1073 }
1074 }
1075 LeaveCriticalSection(&tscheduler->cs);
1076}
1077
1078static void ExternalContextBase_dtor(ExternalContextBase *this)
1079{
1080 struct scheduler_list *scheduler_cur, *scheduler_next;
1081 union allocator_cache_entry *next, *cur;
1082 int i;
1083
1084 /* TODO: move the allocator cache to scheduler so it can be reused */
1085 for(i=0; i<ARRAY_SIZE(this->allocator_cache); i++) {
1086 for(cur = this->allocator_cache[i]; cur; cur=next) {
1087 next = cur->free.next;
1089 }
1090 }
1091
1092 if (this->scheduler.scheduler) {
1093 remove_scheduled_chores(this->scheduler.scheduler, this);
1094 call_Scheduler_Release(this->scheduler.scheduler);
1095
1096 for(scheduler_cur=this->scheduler.next; scheduler_cur; scheduler_cur=scheduler_next) {
1097 scheduler_next = scheduler_cur->next;
1098 remove_scheduled_chores(scheduler_cur->scheduler, this);
1099 call_Scheduler_Release(scheduler_cur->scheduler);
1100 operator_delete(scheduler_cur);
1101 }
1102 }
1103
1104 DeleteCriticalSection(&this->beacons_cs);
1105 if (!list_empty(&this->beacons))
1106 ERR("beacons list is not empty - expect crash\n");
1107}
1108
1109DEFINE_THISCALL_WRAPPER(ExternalContextBase_vector_dtor, 8)
1110Context* __thiscall ExternalContextBase_vector_dtor(ExternalContextBase *this, unsigned int flags)
1111{
1112 TRACE("(%p %x)\n", this, flags);
1113 if(flags & 2) {
1114 /* we have an array, with the number of elements stored before the first object */
1115 INT_PTR i, *ptr = (INT_PTR *)this-1;
1116
1117 for(i=*ptr-1; i>=0; i--)
1118 ExternalContextBase_dtor(this+i);
1120 } else {
1121 ExternalContextBase_dtor(this);
1122 if(flags & 1)
1123 operator_delete(this);
1124 }
1125
1126 return &this->context;
1127}
1128
1129static void ExternalContextBase_ctor(ExternalContextBase *this)
1130{
1131 TRACE("(%p)->()\n", this);
1132
1133 memset(this, 0, sizeof(*this));
1134 this->context.vtable = &ExternalContextBase_vtable;
1135 this->id = InterlockedIncrement(&context_id);
1136 InitializeCriticalSection(&this->beacons_cs);
1137 list_init(&this->beacons);
1138
1139 create_default_scheduler();
1140 this->scheduler.scheduler = &default_scheduler->scheduler;
1141 call_Scheduler_Reference(&default_scheduler->scheduler);
1142}
1143
1144/* ?Alloc@Concurrency@@YAPAXI@Z */
1145/* ?Alloc@Concurrency@@YAPEAX_K@Z */
1146void * CDECL Concurrency_Alloc(size_t size)
1147{
1148 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
1149 union allocator_cache_entry *p;
1150
1151 size += FIELD_OFFSET(union allocator_cache_entry, alloc.mem);
1152 if (size < sizeof(*p))
1153 size = sizeof(*p);
1154
1155 if (context->context.vtable != &ExternalContextBase_vtable) {
1156 p = operator_new(size);
1157 p->alloc.bucket = -1;
1158 }else {
1159 int i;
1160
1161 C_ASSERT(sizeof(union allocator_cache_entry) <= 1 << 4);
1162 for(i=0; i<ARRAY_SIZE(context->allocator_cache); i++)
1163 if (1 << (i+4) >= size) break;
1164
1165 if(i==ARRAY_SIZE(context->allocator_cache)) {
1166 p = operator_new(size);
1167 p->alloc.bucket = -1;
1168 }else if (context->allocator_cache[i]) {
1169 p = context->allocator_cache[i];
1170 context->allocator_cache[i] = p->free.next;
1171 p->alloc.bucket = i;
1172 }else {
1173 p = operator_new(1 << (i+4));
1174 p->alloc.bucket = i;
1175 }
1176 }
1177
1178 TRACE("(%Iu) returning %p\n", size, p->alloc.mem);
1179 return p->alloc.mem;
1180}
1181
1182/* ?Free@Concurrency@@YAXPAX@Z */
1183/* ?Free@Concurrency@@YAXPEAX@Z */
1184void CDECL Concurrency_Free(void* mem)
1185{
1186 union allocator_cache_entry *p = (union allocator_cache_entry*)((char*)mem-FIELD_OFFSET(union allocator_cache_entry, alloc.mem));
1187 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
1188 int bucket = p->alloc.bucket;
1189
1190 TRACE("(%p)\n", mem);
1191
1192 if (context->context.vtable != &ExternalContextBase_vtable) {
1194 }else {
1195 if(bucket >= 0 && bucket < ARRAY_SIZE(context->allocator_cache) &&
1196 (!context->allocator_cache[bucket] || context->allocator_cache[bucket]->free.depth < 20)) {
1197 p->free.next = context->allocator_cache[bucket];
1198 p->free.depth = p->free.next ? p->free.next->free.depth+1 : 0;
1199 context->allocator_cache[bucket] = p;
1200 }else {
1202 }
1203 }
1204}
1205
1206/* ?SetPolicyValue@SchedulerPolicy@Concurrency@@QAEIW4PolicyElementKey@2@I@Z */
1207/* ?SetPolicyValue@SchedulerPolicy@Concurrency@@QEAAIW4PolicyElementKey@2@I@Z */
1208DEFINE_THISCALL_WRAPPER(SchedulerPolicy_SetPolicyValue, 12)
1209unsigned int __thiscall SchedulerPolicy_SetPolicyValue(SchedulerPolicy *this,
1210 PolicyElementKey policy, unsigned int val)
1211{
1212 unsigned int ret;
1213
1214 TRACE("(%p %d %d)\n", this, policy, val);
1215
1216 if (policy == MinConcurrency) {
1217 invalid_scheduler_policy_key e;
1218 invalid_scheduler_policy_key_ctor_str(&e, "MinConcurrency");
1219 _CxxThrowException(&e, &invalid_scheduler_policy_key_exception_type);
1220 }
1221 if (policy == MaxConcurrency) {
1222 invalid_scheduler_policy_key e;
1223 invalid_scheduler_policy_key_ctor_str(&e, "MaxConcurrency");
1224 _CxxThrowException(&e, &invalid_scheduler_policy_key_exception_type);
1225 }
1226 if (policy >= last_policy_id) {
1227 invalid_scheduler_policy_key e;
1228 invalid_scheduler_policy_key_ctor_str(&e, "Invalid policy");
1229 _CxxThrowException(&e, &invalid_scheduler_policy_key_exception_type);
1230 }
1231
1232 switch(policy) {
1233 case SchedulerKind:
1234 if (val) {
1235 invalid_scheduler_policy_value e;
1236 invalid_scheduler_policy_value_ctor_str(&e, "SchedulerKind");
1237 _CxxThrowException(&e, &invalid_scheduler_policy_value_exception_type);
1238 }
1239 break;
1240 case TargetOversubscriptionFactor:
1241 if (!val) {
1242 invalid_scheduler_policy_value e;
1243 invalid_scheduler_policy_value_ctor_str(&e, "TargetOversubscriptionFactor");
1244 _CxxThrowException(&e, &invalid_scheduler_policy_value_exception_type);
1245 }
1246 break;
1247 case ContextPriority:
1248 if (((int)val < -7 /* THREAD_PRIORITY_REALTIME_LOWEST */
1249 || val > 6 /* THREAD_PRIORITY_REALTIME_HIGHEST */)
1252 invalid_scheduler_policy_value e;
1253 invalid_scheduler_policy_value_ctor_str(&e, "ContextPriority");
1254 _CxxThrowException(&e, &invalid_scheduler_policy_value_exception_type);
1255 }
1256 break;
1257 case SchedulingProtocol:
1258 case DynamicProgressFeedback:
1259 case WinRTInitialization:
1260 if (val != 0 && val != 1) {
1261 invalid_scheduler_policy_value e;
1262 invalid_scheduler_policy_value_ctor_str(&e, "SchedulingProtocol");
1263 _CxxThrowException(&e, &invalid_scheduler_policy_value_exception_type);
1264 }
1265 break;
1266 default:
1267 break;
1268 }
1269
1270 ret = this->policy_container->policies[policy];
1271 this->policy_container->policies[policy] = val;
1272 return ret;
1273}
1274
1275/* ?SetConcurrencyLimits@SchedulerPolicy@Concurrency@@QAEXII@Z */
1276/* ?SetConcurrencyLimits@SchedulerPolicy@Concurrency@@QEAAXII@Z */
1277DEFINE_THISCALL_WRAPPER(SchedulerPolicy_SetConcurrencyLimits, 12)
1278void __thiscall SchedulerPolicy_SetConcurrencyLimits(SchedulerPolicy *this,
1279 unsigned int min_concurrency, unsigned int max_concurrency)
1280{
1281 TRACE("(%p %d %d)\n", this, min_concurrency, max_concurrency);
1282
1283 if (min_concurrency > max_concurrency) {
1284 invalid_scheduler_policy_thread_specification e;
1285 invalid_scheduler_policy_thread_specification_ctor_str(&e, NULL);
1286 _CxxThrowException(&e, &invalid_scheduler_policy_thread_specification_exception_type);
1287 }
1288 if (!max_concurrency) {
1289 invalid_scheduler_policy_value e;
1290 invalid_scheduler_policy_value_ctor_str(&e, "MaxConcurrency");
1291 _CxxThrowException(&e, &invalid_scheduler_policy_value_exception_type);
1292 }
1293
1294 this->policy_container->policies[MinConcurrency] = min_concurrency;
1295 this->policy_container->policies[MaxConcurrency] = max_concurrency;
1296}
1297
1298/* ?GetPolicyValue@SchedulerPolicy@Concurrency@@QBEIW4PolicyElementKey@2@@Z */
1299/* ?GetPolicyValue@SchedulerPolicy@Concurrency@@QEBAIW4PolicyElementKey@2@@Z */
1300DEFINE_THISCALL_WRAPPER(SchedulerPolicy_GetPolicyValue, 8)
1301unsigned int __thiscall SchedulerPolicy_GetPolicyValue(
1302 const SchedulerPolicy *this, PolicyElementKey policy)
1303{
1304 TRACE("(%p %d)\n", this, policy);
1305
1306 if (policy >= last_policy_id) {
1307 invalid_scheduler_policy_key e;
1308 invalid_scheduler_policy_key_ctor_str(&e, "Invalid policy");
1309 _CxxThrowException(&e, &invalid_scheduler_policy_key_exception_type);
1310 }
1311 return this->policy_container->policies[policy];
1312}
1313
1314/* ??0SchedulerPolicy@Concurrency@@QAE@XZ */
1315/* ??0SchedulerPolicy@Concurrency@@QEAA@XZ */
1316DEFINE_THISCALL_WRAPPER(SchedulerPolicy_ctor, 4)
1317SchedulerPolicy* __thiscall SchedulerPolicy_ctor(SchedulerPolicy *this)
1318{
1319 TRACE("(%p)\n", this);
1320
1321 this->policy_container = operator_new(sizeof(*this->policy_container));
1322 /* TODO: default values can probably be affected by CurrentScheduler */
1323 this->policy_container->policies[SchedulerKind] = 0;
1324 this->policy_container->policies[MaxConcurrency] = -1;
1325 this->policy_container->policies[MinConcurrency] = 1;
1326 this->policy_container->policies[TargetOversubscriptionFactor] = 1;
1327 this->policy_container->policies[LocalContextCacheSize] = 8;
1328 this->policy_container->policies[ContextStackSize] = 0;
1329 this->policy_container->policies[ContextPriority] = THREAD_PRIORITY_NORMAL;
1330 this->policy_container->policies[SchedulingProtocol] = 0;
1331 this->policy_container->policies[DynamicProgressFeedback] = 1;
1332 return this;
1333}
1334
1335/* ??0SchedulerPolicy@Concurrency@@QAA@IZZ */
1336/* ??0SchedulerPolicy@Concurrency@@QEAA@_KZZ */
1337/* TODO: don't leak policy_container on exception */
1338SchedulerPolicy* WINAPIV SchedulerPolicy_ctor_policies(
1339 SchedulerPolicy *this, size_t n, ...)
1340{
1341 unsigned int min_concurrency, max_concurrency;
1343 size_t i;
1344
1345 TRACE("(%p %Iu)\n", this, n);
1346
1347 SchedulerPolicy_ctor(this);
1348 min_concurrency = this->policy_container->policies[MinConcurrency];
1349 max_concurrency = this->policy_container->policies[MaxConcurrency];
1350
1351 va_start(valist, n);
1352 for(i=0; i<n; i++) {
1353 PolicyElementKey policy = va_arg(valist, PolicyElementKey);
1354 unsigned int val = va_arg(valist, unsigned int);
1355
1356 if(policy == MinConcurrency)
1357 min_concurrency = val;
1358 else if(policy == MaxConcurrency)
1359 max_concurrency = val;
1360 else
1361 SchedulerPolicy_SetPolicyValue(this, policy, val);
1362 }
1363 va_end(valist);
1364
1365 SchedulerPolicy_SetConcurrencyLimits(this, min_concurrency, max_concurrency);
1366 return this;
1367}
1368
1369/* ??4SchedulerPolicy@Concurrency@@QAEAAV01@ABV01@@Z */
1370/* ??4SchedulerPolicy@Concurrency@@QEAAAEAV01@AEBV01@@Z */
1371DEFINE_THISCALL_WRAPPER(SchedulerPolicy_op_assign, 8)
1372SchedulerPolicy* __thiscall SchedulerPolicy_op_assign(
1373 SchedulerPolicy *this, const SchedulerPolicy *rhs)
1374{
1375 TRACE("(%p %p)\n", this, rhs);
1376 memcpy(this->policy_container->policies, rhs->policy_container->policies,
1377 sizeof(this->policy_container->policies));
1378 return this;
1379}
1380
1381/* ??0SchedulerPolicy@Concurrency@@QAE@ABV01@@Z */
1382/* ??0SchedulerPolicy@Concurrency@@QEAA@AEBV01@@Z */
1383DEFINE_THISCALL_WRAPPER(SchedulerPolicy_copy_ctor, 8)
1384SchedulerPolicy* __thiscall SchedulerPolicy_copy_ctor(
1385 SchedulerPolicy *this, const SchedulerPolicy *rhs)
1386{
1387 TRACE("(%p %p)\n", this, rhs);
1388 SchedulerPolicy_ctor(this);
1389 return SchedulerPolicy_op_assign(this, rhs);
1390}
1391
1392/* ??1SchedulerPolicy@Concurrency@@QAE@XZ */
1393/* ??1SchedulerPolicy@Concurrency@@QEAA@XZ */
1394DEFINE_THISCALL_WRAPPER(SchedulerPolicy_dtor, 4)
1395void __thiscall SchedulerPolicy_dtor(SchedulerPolicy *this)
1396{
1397 TRACE("(%p)\n", this);
1398 operator_delete(this->policy_container);
1399}
1400
1401static void ThreadScheduler_dtor(ThreadScheduler *this)
1402{
1403 int i;
1404 struct scheduled_chore *sc, *next;
1405
1406 if(this->ref != 0) WARN("ref = %ld\n", this->ref);
1407 SchedulerPolicy_dtor(&this->policy);
1408
1409 for(i=0; i<this->shutdown_count; i++)
1410 SetEvent(this->shutdown_events[i]);
1411 operator_delete(this->shutdown_events);
1412
1413 this->cs.DebugInfo->Spare[0] = 0;
1414 DeleteCriticalSection(&this->cs);
1415
1416 if (!list_empty(&this->scheduled_chores))
1417 ERR("scheduled chore list is not empty\n");
1418 LIST_FOR_EACH_ENTRY_SAFE(sc, next, &this->scheduled_chores,
1419 struct scheduled_chore, entry)
1420 operator_delete(sc);
1421}
1422
1423DEFINE_THISCALL_WRAPPER(ThreadScheduler_Id, 4)
1424unsigned int __thiscall ThreadScheduler_Id(const ThreadScheduler *this)
1425{
1426 TRACE("(%p)\n", this);
1427 return this->id;
1428}
1429
1430DEFINE_THISCALL_WRAPPER(ThreadScheduler_GetNumberOfVirtualProcessors, 4)
1431unsigned int __thiscall ThreadScheduler_GetNumberOfVirtualProcessors(const ThreadScheduler *this)
1432{
1433 TRACE("(%p)\n", this);
1434 return this->virt_proc_no;
1435}
1436
1437DEFINE_THISCALL_WRAPPER(ThreadScheduler_GetPolicy, 8)
1438SchedulerPolicy* __thiscall ThreadScheduler_GetPolicy(
1439 const ThreadScheduler *this, SchedulerPolicy *ret)
1440{
1441 TRACE("(%p %p)\n", this, ret);
1442 return SchedulerPolicy_copy_ctor(ret, &this->policy);
1443}
1444
1445DEFINE_THISCALL_WRAPPER(ThreadScheduler_Reference, 4)
1446unsigned int __thiscall ThreadScheduler_Reference(ThreadScheduler *this)
1447{
1448 TRACE("(%p)\n", this);
1449 return InterlockedIncrement(&this->ref);
1450}
1451
1452DEFINE_THISCALL_WRAPPER(ThreadScheduler_Release, 4)
1453unsigned int __thiscall ThreadScheduler_Release(ThreadScheduler *this)
1454{
1455 unsigned int ret = InterlockedDecrement(&this->ref);
1456
1457 TRACE("(%p)\n", this);
1458
1459 if(!ret) {
1460 ThreadScheduler_dtor(this);
1461 operator_delete(this);
1462 }
1463 return ret;
1464}
1465
1466DEFINE_THISCALL_WRAPPER(ThreadScheduler_RegisterShutdownEvent, 8)
1467void __thiscall ThreadScheduler_RegisterShutdownEvent(ThreadScheduler *this, HANDLE event)
1468{
1469 HANDLE *shutdown_events;
1470 int size;
1471
1472 TRACE("(%p %p)\n", this, event);
1473
1474 EnterCriticalSection(&this->cs);
1475
1476 size = this->shutdown_size ? this->shutdown_size * 2 : 1;
1477 shutdown_events = operator_new(size * sizeof(*shutdown_events));
1478 memcpy(shutdown_events, this->shutdown_events,
1479 this->shutdown_count * sizeof(*shutdown_events));
1480 operator_delete(this->shutdown_events);
1481 this->shutdown_size = size;
1482 this->shutdown_events = shutdown_events;
1483 this->shutdown_events[this->shutdown_count++] = event;
1484
1485 LeaveCriticalSection(&this->cs);
1486}
1487
1488DEFINE_THISCALL_WRAPPER(ThreadScheduler_Attach, 4)
1489void __thiscall ThreadScheduler_Attach(ThreadScheduler *this)
1490{
1491 ExternalContextBase *context = (ExternalContextBase*)get_current_context();
1492
1493 TRACE("(%p)\n", this);
1494
1495 if(context->context.vtable != &ExternalContextBase_vtable) {
1496 ERR("unknown context set\n");
1497 return;
1498 }
1499
1500 if(context->scheduler.scheduler == &this->scheduler) {
1501 improper_scheduler_attach e;
1502 improper_scheduler_attach_ctor_str(&e, NULL);
1503 _CxxThrowException(&e, &improper_scheduler_attach_exception_type);
1504 }
1505
1506 if(context->scheduler.scheduler) {
1507 struct scheduler_list *l = operator_new(sizeof(*l));
1508 *l = context->scheduler;
1509 context->scheduler.next = l;
1510 }
1511 context->scheduler.scheduler = &this->scheduler;
1512 ThreadScheduler_Reference(this);
1513}
1514
1515DEFINE_THISCALL_WRAPPER(ThreadScheduler_CreateScheduleGroup_loc, 8)
1516/*ScheduleGroup*/void* __thiscall ThreadScheduler_CreateScheduleGroup_loc(
1517 ThreadScheduler *this, /*location*/void *placement)
1518{
1519 FIXME("(%p %p) stub\n", this, placement);
1520 return NULL;
1521}
1522
1523DEFINE_THISCALL_WRAPPER(ThreadScheduler_CreateScheduleGroup, 4)
1524/*ScheduleGroup*/void* __thiscall ThreadScheduler_CreateScheduleGroup(ThreadScheduler *this)
1525{
1526 FIXME("(%p) stub\n", this);
1527 return NULL;
1528}
1529
1530typedef struct
1531{
1532 void (__cdecl *proc)(void*);
1533 void *data;
1534 ThreadScheduler *scheduler;
1535} schedule_task_arg;
1536
1537void __cdecl CurrentScheduler_Detach(void);
1538
1539static void WINAPI schedule_task_proc(PTP_CALLBACK_INSTANCE instance, void *context, PTP_WORK work)
1540{
1541 schedule_task_arg arg;
1542 BOOL detach = FALSE;
1543
1544 arg = *(schedule_task_arg*)context;
1546
1547 if(&arg.scheduler->scheduler != get_current_scheduler()) {
1548 ThreadScheduler_Attach(arg.scheduler);
1549 detach = TRUE;
1550 }
1551 ThreadScheduler_Release(arg.scheduler);
1552
1553 arg.proc(arg.data);
1554
1555 if(detach)
1556 CurrentScheduler_Detach();
1557}
1558
1559DEFINE_THISCALL_WRAPPER(ThreadScheduler_ScheduleTask_loc, 16)
1560void __thiscall ThreadScheduler_ScheduleTask_loc(ThreadScheduler *this,
1561 void (__cdecl *proc)(void*), void* data, /*location*/void *placement)
1562{
1563 static unsigned int once;
1564 schedule_task_arg *arg;
1565 TP_WORK *work;
1566
1567 if(!once++)
1568 FIXME("(%p %p %p %p) semi-stub\n", this, proc, data, placement);
1569 else
1570 TRACE("(%p %p %p %p) semi-stub\n", this, proc, data, placement);
1571
1572 arg = operator_new(sizeof(*arg));
1573 arg->proc = proc;
1574 arg->data = data;
1575 arg->scheduler = this;
1576 ThreadScheduler_Reference(this);
1577
1578 work = CreateThreadpoolWork(schedule_task_proc, arg, NULL);
1579 if(!work) {
1580 scheduler_resource_allocation_error e;
1581
1582 ThreadScheduler_Release(this);
1584 scheduler_resource_allocation_error_ctor_name(&e, NULL,
1586 _CxxThrowException(&e, &scheduler_resource_allocation_error_exception_type);
1587 }
1589 CloseThreadpoolWork(work);
1590}
1591
1592DEFINE_THISCALL_WRAPPER(ThreadScheduler_ScheduleTask, 12)
1593void __thiscall ThreadScheduler_ScheduleTask(ThreadScheduler *this,
1594 void (__cdecl *proc)(void*), void* data)
1595{
1596 TRACE("(%p %p %p)\n", this, proc, data);
1597 ThreadScheduler_ScheduleTask_loc(this, proc, data, NULL);
1598}
1599
1600DEFINE_THISCALL_WRAPPER(ThreadScheduler_IsAvailableLocation, 8)
1601bool __thiscall ThreadScheduler_IsAvailableLocation(
1602 const ThreadScheduler *this, const /*location*/void *placement)
1603{
1604 FIXME("(%p %p) stub\n", this, placement);
1605 return FALSE;
1606}
1607
1608DEFINE_THISCALL_WRAPPER(ThreadScheduler_vector_dtor, 8)
1609Scheduler* __thiscall ThreadScheduler_vector_dtor(ThreadScheduler *this, unsigned int flags)
1610{
1611 TRACE("(%p %x)\n", this, flags);
1612 if(flags & 2) {
1613 /* we have an array, with the number of elements stored before the first object */
1614 INT_PTR i, *ptr = (INT_PTR *)this-1;
1615
1616 for(i=*ptr-1; i>=0; i--)
1617 ThreadScheduler_dtor(this+i);
1619 } else {
1620 ThreadScheduler_dtor(this);
1621 if(flags & 1)
1622 operator_delete(this);
1623 }
1624
1625 return &this->scheduler;
1626}
1627
1628static ThreadScheduler* ThreadScheduler_ctor(ThreadScheduler *this,
1629 const SchedulerPolicy *policy)
1630{
1632
1633 TRACE("(%p)->()\n", this);
1634
1635 this->scheduler.vtable = &ThreadScheduler_vtable;
1636 this->ref = 1;
1637 this->id = InterlockedIncrement(&scheduler_id);
1638 SchedulerPolicy_copy_ctor(&this->policy, policy);
1639
1640 GetSystemInfo(&si);
1641 this->virt_proc_no = SchedulerPolicy_GetPolicyValue(&this->policy, MaxConcurrency);
1642 if(this->virt_proc_no > si.dwNumberOfProcessors)
1643 this->virt_proc_no = si.dwNumberOfProcessors;
1644
1645 this->shutdown_count = this->shutdown_size = 0;
1646 this->shutdown_events = NULL;
1647
1649 this->cs.DebugInfo->Spare[0] = (DWORD_PTR)(__FILE__ ": ThreadScheduler");
1650
1651 list_init(&this->scheduled_chores);
1652 return this;
1653}
1654
1655/* ?Create@Scheduler@Concurrency@@SAPAV12@ABVSchedulerPolicy@2@@Z */
1656/* ?Create@Scheduler@Concurrency@@SAPEAV12@AEBVSchedulerPolicy@2@@Z */
1657Scheduler* __cdecl Scheduler_Create(const SchedulerPolicy *policy)
1658{
1659 ThreadScheduler *ret;
1660
1661 TRACE("(%p)\n", policy);
1662
1663 ret = operator_new(sizeof(*ret));
1664 return &ThreadScheduler_ctor(ret, policy)->scheduler;
1665}
1666
1667/* ?ResetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXXZ */
1668void __cdecl Scheduler_ResetDefaultSchedulerPolicy(void)
1669{
1670 TRACE("()\n");
1671
1672 EnterCriticalSection(&default_scheduler_cs);
1673 if(default_scheduler_policy.policy_container)
1674 SchedulerPolicy_dtor(&default_scheduler_policy);
1675 SchedulerPolicy_ctor(&default_scheduler_policy);
1676 LeaveCriticalSection(&default_scheduler_cs);
1677}
1678
1679/* ?SetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXABVSchedulerPolicy@2@@Z */
1680/* ?SetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXAEBVSchedulerPolicy@2@@Z */
1681void __cdecl Scheduler_SetDefaultSchedulerPolicy(const SchedulerPolicy *policy)
1682{
1683 TRACE("(%p)\n", policy);
1684
1685 EnterCriticalSection(&default_scheduler_cs);
1686 if(!default_scheduler_policy.policy_container)
1687 SchedulerPolicy_copy_ctor(&default_scheduler_policy, policy);
1688 else
1689 SchedulerPolicy_op_assign(&default_scheduler_policy, policy);
1690 LeaveCriticalSection(&default_scheduler_cs);
1691}
1692
1693/* ?Create@CurrentScheduler@Concurrency@@SAXABVSchedulerPolicy@2@@Z */
1694/* ?Create@CurrentScheduler@Concurrency@@SAXAEBVSchedulerPolicy@2@@Z */
1695void __cdecl CurrentScheduler_Create(const SchedulerPolicy *policy)
1696{
1697 Scheduler *scheduler;
1698
1699 TRACE("(%p)\n", policy);
1700
1701 scheduler = Scheduler_Create(policy);
1702 call_Scheduler_Attach(scheduler);
1703}
1704
1705/* ?Detach@CurrentScheduler@Concurrency@@SAXXZ */
1706void __cdecl CurrentScheduler_Detach(void)
1707{
1708 ExternalContextBase *context = (ExternalContextBase*)try_get_current_context();
1709
1710 TRACE("()\n");
1711
1712 if(!context) {
1713 improper_scheduler_detach e;
1714 improper_scheduler_detach_ctor_str(&e, NULL);
1715 _CxxThrowException(&e, &improper_scheduler_detach_exception_type);
1716 }
1717
1718 if(context->context.vtable != &ExternalContextBase_vtable) {
1719 ERR("unknown context set\n");
1720 return;
1721 }
1722
1723 if(!context->scheduler.next) {
1724 improper_scheduler_detach e;
1725 improper_scheduler_detach_ctor_str(&e, NULL);
1726 _CxxThrowException(&e, &improper_scheduler_detach_exception_type);
1727 }
1728
1729 call_Scheduler_Release(context->scheduler.scheduler);
1730 if(!context->scheduler.next) {
1731 context->scheduler.scheduler = NULL;
1732 }else {
1733 struct scheduler_list *entry = context->scheduler.next;
1734 context->scheduler.scheduler = entry->scheduler;
1735 context->scheduler.next = entry->next;
1737 }
1738}
1739
1740static void create_default_scheduler(void)
1741{
1742 if(default_scheduler)
1743 return;
1744
1745 EnterCriticalSection(&default_scheduler_cs);
1746 if(!default_scheduler) {
1747 ThreadScheduler *scheduler;
1748
1749 if(!default_scheduler_policy.policy_container)
1750 SchedulerPolicy_ctor(&default_scheduler_policy);
1751
1752 scheduler = operator_new(sizeof(*scheduler));
1753 ThreadScheduler_ctor(scheduler, &default_scheduler_policy);
1754 default_scheduler = scheduler;
1755 }
1756 LeaveCriticalSection(&default_scheduler_cs);
1757}
1758
1759/* ?Get@CurrentScheduler@Concurrency@@SAPAVScheduler@2@XZ */
1760/* ?Get@CurrentScheduler@Concurrency@@SAPEAVScheduler@2@XZ */
1761Scheduler* __cdecl CurrentScheduler_Get(void)
1762{
1763 TRACE("()\n");
1764 return get_current_scheduler();
1765}
1766
1767#if _MSVCR_VER > 100
1768/* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPAVScheduleGroup@2@AAVlocation@2@@Z */
1769/* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPEAVScheduleGroup@2@AEAVlocation@2@@Z */
1770/*ScheduleGroup*/void* __cdecl CurrentScheduler_CreateScheduleGroup_loc(/*location*/void *placement)
1771{
1772 TRACE("(%p)\n", placement);
1773 return call_Scheduler_CreateScheduleGroup_loc(get_current_scheduler(), placement);
1774}
1775#endif
1776
1777/* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPAVScheduleGroup@2@XZ */
1778/* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPEAVScheduleGroup@2@XZ */
1779/*ScheduleGroup*/void* __cdecl CurrentScheduler_CreateScheduleGroup(void)
1780{
1781 TRACE("()\n");
1782 return call_Scheduler_CreateScheduleGroup(get_current_scheduler());
1783}
1784
1785/* ?GetNumberOfVirtualProcessors@CurrentScheduler@Concurrency@@SAIXZ */
1786unsigned int __cdecl CurrentScheduler_GetNumberOfVirtualProcessors(void)
1787{
1788 Scheduler *scheduler = try_get_current_scheduler();
1789
1790 TRACE("()\n");
1791
1792 if(!scheduler)
1793 return -1;
1794 return call_Scheduler_GetNumberOfVirtualProcessors(scheduler);
1795}
1796
1797/* ?GetPolicy@CurrentScheduler@Concurrency@@SA?AVSchedulerPolicy@2@XZ */
1798SchedulerPolicy* __cdecl CurrentScheduler_GetPolicy(SchedulerPolicy *policy)
1799{
1800 TRACE("(%p)\n", policy);
1801 return call_Scheduler_GetPolicy(get_current_scheduler(), policy);
1802}
1803
1804/* ?Id@CurrentScheduler@Concurrency@@SAIXZ */
1805unsigned int __cdecl CurrentScheduler_Id(void)
1806{
1807 Scheduler *scheduler = try_get_current_scheduler();
1808
1809 TRACE("()\n");
1810
1811 if(!scheduler)
1812 return -1;
1813 return call_Scheduler_Id(scheduler);
1814}
1815
1816#if _MSVCR_VER > 100
1817/* ?IsAvailableLocation@CurrentScheduler@Concurrency@@SA_NABVlocation@2@@Z */
1818/* ?IsAvailableLocation@CurrentScheduler@Concurrency@@SA_NAEBVlocation@2@@Z */
1819bool __cdecl CurrentScheduler_IsAvailableLocation(const /*location*/void *placement)
1820{
1821 Scheduler *scheduler = try_get_current_scheduler();
1822
1823 TRACE("(%p)\n", placement);
1824
1825 if(!scheduler)
1826 return FALSE;
1827 return call_Scheduler_IsAvailableLocation(scheduler, placement);
1828}
1829#endif
1830
1831/* ?RegisterShutdownEvent@CurrentScheduler@Concurrency@@SAXPAX@Z */
1832/* ?RegisterShutdownEvent@CurrentScheduler@Concurrency@@SAXPEAX@Z */
1833void __cdecl CurrentScheduler_RegisterShutdownEvent(HANDLE event)
1834{
1835 TRACE("(%p)\n", event);
1836 call_Scheduler_RegisterShutdownEvent(get_current_scheduler(), event);
1837}
1838
1839#if _MSVCR_VER > 100
1840/* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPAX@Z0AAVlocation@2@@Z */
1841/* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPEAX@Z0AEAVlocation@2@@Z */
1842void __cdecl CurrentScheduler_ScheduleTask_loc(void (__cdecl *proc)(void*),
1843 void *data, /*location*/void *placement)
1844{
1845 TRACE("(%p %p %p)\n", proc, data, placement);
1846 call_Scheduler_ScheduleTask_loc(get_current_scheduler(), proc, data, placement);
1847}
1848#endif
1849
1850/* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPAX@Z0@Z */
1851/* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPEAX@Z0@Z */
1852void __cdecl CurrentScheduler_ScheduleTask(void (__cdecl *proc)(void*), void *data)
1853{
1854 TRACE("(%p %p)\n", proc, data);
1855 call_Scheduler_ScheduleTask(get_current_scheduler(), proc, data);
1856}
1857
1858/* ??0_Scheduler@details@Concurrency@@QAE@PAVScheduler@2@@Z */
1859/* ??0_Scheduler@details@Concurrency@@QEAA@PEAVScheduler@2@@Z */
1860DEFINE_THISCALL_WRAPPER(_Scheduler_ctor_sched, 8)
1861_Scheduler* __thiscall _Scheduler_ctor_sched(_Scheduler *this, Scheduler *scheduler)
1862{
1863 TRACE("(%p %p)\n", this, scheduler);
1864
1865 this->scheduler = scheduler;
1866 return this;
1867}
1868
1869/* ??_F_Scheduler@details@Concurrency@@QAEXXZ */
1870/* ??_F_Scheduler@details@Concurrency@@QEAAXXZ */
1871DEFINE_THISCALL_WRAPPER(_Scheduler_ctor, 4)
1872_Scheduler* __thiscall _Scheduler_ctor(_Scheduler *this)
1873{
1874 return _Scheduler_ctor_sched(this, NULL);
1875}
1876
1877/* ?_GetScheduler@_Scheduler@details@Concurrency@@QAEPAVScheduler@3@XZ */
1878/* ?_GetScheduler@_Scheduler@details@Concurrency@@QEAAPEAVScheduler@3@XZ */
1879DEFINE_THISCALL_WRAPPER(_Scheduler__GetScheduler, 4)
1880Scheduler* __thiscall _Scheduler__GetScheduler(_Scheduler *this)
1881{
1882 TRACE("(%p)\n", this);
1883 return this->scheduler;
1884}
1885
1886/* ?_Reference@_Scheduler@details@Concurrency@@QAEIXZ */
1887/* ?_Reference@_Scheduler@details@Concurrency@@QEAAIXZ */
1888DEFINE_THISCALL_WRAPPER(_Scheduler__Reference, 4)
1889unsigned int __thiscall _Scheduler__Reference(_Scheduler *this)
1890{
1891 TRACE("(%p)\n", this);
1892 return call_Scheduler_Reference(this->scheduler);
1893}
1894
1895/* ?_Release@_Scheduler@details@Concurrency@@QAEIXZ */
1896/* ?_Release@_Scheduler@details@Concurrency@@QEAAIXZ */
1897DEFINE_THISCALL_WRAPPER(_Scheduler__Release, 4)
1898unsigned int __thiscall _Scheduler__Release(_Scheduler *this)
1899{
1900 TRACE("(%p)\n", this);
1901 return call_Scheduler_Release(this->scheduler);
1902}
1903
1904/* ?_Get@_CurrentScheduler@details@Concurrency@@SA?AV_Scheduler@23@XZ */
1905_Scheduler* __cdecl _CurrentScheduler__Get(_Scheduler *ret)
1906{
1907 TRACE("()\n");
1908 return _Scheduler_ctor_sched(ret, get_current_scheduler());
1909}
1910
1911/* ?_GetNumberOfVirtualProcessors@_CurrentScheduler@details@Concurrency@@SAIXZ */
1912unsigned int __cdecl _CurrentScheduler__GetNumberOfVirtualProcessors(void)
1913{
1914 TRACE("()\n");
1915 get_current_scheduler();
1916 return CurrentScheduler_GetNumberOfVirtualProcessors();
1917}
1918
1919/* ?_Id@_CurrentScheduler@details@Concurrency@@SAIXZ */
1920unsigned int __cdecl _CurrentScheduler__Id(void)
1921{
1922 TRACE("()\n");
1923 get_current_scheduler();
1924 return CurrentScheduler_Id();
1925}
1926
1927/* ?_ScheduleTask@_CurrentScheduler@details@Concurrency@@SAXP6AXPAX@Z0@Z */
1928/* ?_ScheduleTask@_CurrentScheduler@details@Concurrency@@SAXP6AXPEAX@Z0@Z */
1929void __cdecl _CurrentScheduler__ScheduleTask(void (__cdecl *proc)(void*), void *data)
1930{
1931 TRACE("(%p %p)\n", proc, data);
1932 CurrentScheduler_ScheduleTask(proc, data);
1933}
1934
1935/* ?_Value@_SpinCount@details@Concurrency@@SAIXZ */
1936unsigned int __cdecl SpinCount__Value(void)
1937{
1938 static unsigned int val = -1;
1939
1940 TRACE("()\n");
1941
1942 if(val == -1) {
1944
1945 GetSystemInfo(&si);
1946 val = si.dwNumberOfProcessors>1 ? 4000 : 0;
1947 }
1948
1949 return val;
1950}
1951
1952/* ??0?$_SpinWait@$00@details@Concurrency@@QAE@P6AXXZ@Z */
1953/* ??0?$_SpinWait@$00@details@Concurrency@@QEAA@P6AXXZ@Z */
1954DEFINE_THISCALL_WRAPPER(SpinWait_ctor_yield, 8)
1955SpinWait* __thiscall SpinWait_ctor_yield(SpinWait *this, yield_func yf)
1956{
1957 TRACE("(%p %p)\n", this, yf);
1958
1959 this->state = SPINWAIT_INIT;
1960 this->unknown = 1;
1961 this->yield_func = yf;
1962 return this;
1963}
1964
1965/* ??0?$_SpinWait@$0A@@details@Concurrency@@QAE@P6AXXZ@Z */
1966/* ??0?$_SpinWait@$0A@@details@Concurrency@@QEAA@P6AXXZ@Z */
1967DEFINE_THISCALL_WRAPPER(SpinWait_ctor, 8)
1968SpinWait* __thiscall SpinWait_ctor(SpinWait *this, yield_func yf)
1969{
1970 TRACE("(%p %p)\n", this, yf);
1971
1972 this->state = SPINWAIT_INIT;
1973 this->unknown = 0;
1974 this->yield_func = yf;
1975 return this;
1976}
1977
1978/* ??_F?$_SpinWait@$00@details@Concurrency@@QAEXXZ */
1979/* ??_F?$_SpinWait@$00@details@Concurrency@@QEAAXXZ */
1980/* ??_F?$_SpinWait@$0A@@details@Concurrency@@QAEXXZ */
1981/* ??_F?$_SpinWait@$0A@@details@Concurrency@@QEAAXXZ */
1982DEFINE_THISCALL_WRAPPER(SpinWait_dtor, 4)
1983void __thiscall SpinWait_dtor(SpinWait *this)
1984{
1985 TRACE("(%p)\n", this);
1986}
1987
1988/* ?_DoYield@?$_SpinWait@$00@details@Concurrency@@IAEXXZ */
1989/* ?_DoYield@?$_SpinWait@$00@details@Concurrency@@IEAAXXZ */
1990/* ?_DoYield@?$_SpinWait@$0A@@details@Concurrency@@IAEXXZ */
1991/* ?_DoYield@?$_SpinWait@$0A@@details@Concurrency@@IEAAXXZ */
1992DEFINE_THISCALL_WRAPPER(SpinWait__DoYield, 4)
1993void __thiscall SpinWait__DoYield(SpinWait *this)
1994{
1995 TRACE("(%p)\n", this);
1996
1997 if(this->unknown)
1998 this->yield_func();
1999}
2000
2001/* ?_NumberOfSpins@?$_SpinWait@$00@details@Concurrency@@IAEKXZ */
2002/* ?_NumberOfSpins@?$_SpinWait@$00@details@Concurrency@@IEAAKXZ */
2003/* ?_NumberOfSpins@?$_SpinWait@$0A@@details@Concurrency@@IAEKXZ */
2004/* ?_NumberOfSpins@?$_SpinWait@$0A@@details@Concurrency@@IEAAKXZ */
2005DEFINE_THISCALL_WRAPPER(SpinWait__NumberOfSpins, 4)
2006ULONG __thiscall SpinWait__NumberOfSpins(SpinWait *this)
2007{
2008 TRACE("(%p)\n", this);
2009 return 1;
2010}
2011
2012/* ?_SetSpinCount@?$_SpinWait@$00@details@Concurrency@@QAEXI@Z */
2013/* ?_SetSpinCount@?$_SpinWait@$00@details@Concurrency@@QEAAXI@Z */
2014/* ?_SetSpinCount@?$_SpinWait@$0A@@details@Concurrency@@QAEXI@Z */
2015/* ?_SetSpinCount@?$_SpinWait@$0A@@details@Concurrency@@QEAAXI@Z */
2016DEFINE_THISCALL_WRAPPER(SpinWait__SetSpinCount, 8)
2017void __thiscall SpinWait__SetSpinCount(SpinWait *this, unsigned int spin)
2018{
2019 TRACE("(%p %d)\n", this, spin);
2020
2021 this->spin = spin;
2022 this->state = spin ? SPINWAIT_SPIN : SPINWAIT_YIELD;
2023}
2024
2025/* ?_Reset@?$_SpinWait@$00@details@Concurrency@@IAEXXZ */
2026/* ?_Reset@?$_SpinWait@$00@details@Concurrency@@IEAAXXZ */
2027/* ?_Reset@?$_SpinWait@$0A@@details@Concurrency@@IAEXXZ */
2028/* ?_Reset@?$_SpinWait@$0A@@details@Concurrency@@IEAAXXZ */
2029DEFINE_THISCALL_WRAPPER(SpinWait__Reset, 4)
2030void __thiscall SpinWait__Reset(SpinWait *this)
2031{
2032 SpinWait__SetSpinCount(this, SpinCount__Value());
2033}
2034
2035/* ?_ShouldSpinAgain@?$_SpinWait@$00@details@Concurrency@@IAE_NXZ */
2036/* ?_ShouldSpinAgain@?$_SpinWait@$00@details@Concurrency@@IEAA_NXZ */
2037/* ?_ShouldSpinAgain@?$_SpinWait@$0A@@details@Concurrency@@IAE_NXZ */
2038/* ?_ShouldSpinAgain@?$_SpinWait@$0A@@details@Concurrency@@IEAA_NXZ */
2039DEFINE_THISCALL_WRAPPER(SpinWait__ShouldSpinAgain, 4)
2040bool __thiscall SpinWait__ShouldSpinAgain(SpinWait *this)
2041{
2042 TRACE("(%p)\n", this);
2043
2044 this->spin--;
2045 return this->spin > 0;
2046}
2047
2048/* ?_SpinOnce@?$_SpinWait@$00@details@Concurrency@@QAE_NXZ */
2049/* ?_SpinOnce@?$_SpinWait@$00@details@Concurrency@@QEAA_NXZ */
2050/* ?_SpinOnce@?$_SpinWait@$0A@@details@Concurrency@@QAE_NXZ */
2051/* ?_SpinOnce@?$_SpinWait@$0A@@details@Concurrency@@QEAA_NXZ */
2052DEFINE_THISCALL_WRAPPER(SpinWait__SpinOnce, 4)
2053bool __thiscall SpinWait__SpinOnce(SpinWait *this)
2054{
2055 switch(this->state) {
2056 case SPINWAIT_INIT:
2057 SpinWait__Reset(this);
2058 /* fall through */
2059 case SPINWAIT_SPIN:
2060 InterlockedDecrement((LONG*)&this->spin);
2061 if(!this->spin)
2062 this->state = this->unknown ? SPINWAIT_YIELD : SPINWAIT_DONE;
2063 return TRUE;
2064 case SPINWAIT_YIELD:
2065 this->state = SPINWAIT_DONE;
2066 this->yield_func();
2067 return TRUE;
2068 default:
2069 SpinWait__Reset(this);
2070 return FALSE;
2071 }
2072}
2073
2074#if _MSVCR_VER >= 110
2075
2076/* ??0_StructuredTaskCollection@details@Concurrency@@QAE@PAV_CancellationTokenState@12@@Z */
2077/* ??0_StructuredTaskCollection@details@Concurrency@@QEAA@PEAV_CancellationTokenState@12@@Z */
2078DEFINE_THISCALL_WRAPPER(_StructuredTaskCollection_ctor, 8)
2079_StructuredTaskCollection* __thiscall _StructuredTaskCollection_ctor(
2080 _StructuredTaskCollection *this, /*_CancellationTokenState*/void *token)
2081{
2082 TRACE("(%p)\n", this);
2083
2084 if (token)
2085 FIXME("_StructuredTaskCollection with cancellation token not implemented!\n");
2086
2087 memset(this, 0, sizeof(*this));
2088 this->finished = FINISHED_INITIAL;
2089 return this;
2090}
2091
2092#endif /* _MSVCR_VER >= 110 */
2093
2094#if _MSVCR_VER >= 120
2095
2096/* ??1_StructuredTaskCollection@details@Concurrency@@QAA@XZ */
2097/* ??1_StructuredTaskCollection@details@Concurrency@@QAE@XZ */
2098/* ??1_StructuredTaskCollection@details@Concurrency@@QEAA@XZ */
2099DEFINE_THISCALL_WRAPPER(_StructuredTaskCollection_dtor, 4)
2100void __thiscall _StructuredTaskCollection_dtor(_StructuredTaskCollection *this)
2101{
2102 TRACE("(%p)\n", this);
2103
2104 if (this->count && !__uncaught_exception()) {
2105 missing_wait e;
2106 missing_wait_ctor_str(&e, "Missing call to _RunAndWait");
2107 _CxxThrowException(&e, &missing_wait_exception_type);
2108 }
2109}
2110
2111#endif /* _MSVCR_VER >= 120 */
2112
2113static ThreadScheduler *get_thread_scheduler_from_context(Context *context)
2114{
2115 Scheduler *scheduler = get_scheduler_from_context(context);
2116 if (scheduler && scheduler->vtable == &ThreadScheduler_vtable)
2117 return (ThreadScheduler*)scheduler;
2118 return NULL;
2119}
2120
2121struct execute_chore_data {
2122 _UnrealizedChore *chore;
2123 _StructuredTaskCollection *task_collection;
2124};
2125
2126/* ?_Cancel@_StructuredTaskCollection@details@Concurrency@@QAAXXZ */
2127/* ?_Cancel@_StructuredTaskCollection@details@Concurrency@@QAEXXZ */
2128/* ?_Cancel@_StructuredTaskCollection@details@Concurrency@@QEAAXXZ */
2129DEFINE_THISCALL_WRAPPER(_StructuredTaskCollection__Cancel, 4)
2130void __thiscall _StructuredTaskCollection__Cancel(
2131 _StructuredTaskCollection *this)
2132{
2133 ThreadScheduler *scheduler;
2134 void *prev_exception, *new_exception;
2135 struct scheduled_chore *sc, *next;
2136 LONG removed = 0, finished = 1;
2137 struct beacon *beacon;
2138
2139 TRACE("(%p)\n", this);
2140
2141 if (!this->context)
2142 this->context = get_current_context();
2143 scheduler = get_thread_scheduler_from_context(this->context);
2144 if (!scheduler)
2145 return;
2146
2147 new_exception = this->exception;
2148 do {
2149 prev_exception = new_exception;
2150 if ((ULONG_PTR)prev_exception & STRUCTURED_TASK_COLLECTION_CANCELLED)
2151 return;
2152 new_exception = (void*)((ULONG_PTR)prev_exception |
2153 STRUCTURED_TASK_COLLECTION_CANCELLED);
2154 } while ((new_exception = InterlockedCompareExchangePointer(
2155 &this->exception, new_exception, prev_exception))
2156 != prev_exception);
2157
2158 EnterCriticalSection(&((ExternalContextBase*)this->context)->beacons_cs);
2159 LIST_FOR_EACH_ENTRY(beacon, &((ExternalContextBase*)this->context)->beacons, struct beacon, entry) {
2160 if (beacon->task_collection == this)
2161 InterlockedIncrement(&beacon->cancelling);
2162 }
2163 LeaveCriticalSection(&((ExternalContextBase*)this->context)->beacons_cs);
2164
2165 EnterCriticalSection(&scheduler->cs);
2166 LIST_FOR_EACH_ENTRY_SAFE(sc, next, &scheduler->scheduled_chores,
2167 struct scheduled_chore, entry) {
2168 if (sc->chore->task_collection != this)
2169 continue;
2170 sc->chore->task_collection = NULL;
2171 list_remove(&sc->entry);
2172 removed++;
2173 operator_delete(sc);
2174 }
2175 LeaveCriticalSection(&scheduler->cs);
2176 if (!removed)
2177 return;
2178
2179 if (InterlockedCompareExchange(&this->finished, removed, FINISHED_INITIAL) != FINISHED_INITIAL)
2180 finished = InterlockedAdd(&this->finished, removed);
2181 if (!finished)
2182 call_Context_Unblock(this->event);
2183}
2184
2185static LONG CALLBACK execute_chore_except(EXCEPTION_POINTERS *pexc, void *_data)
2186{
2187 struct execute_chore_data *data = _data;
2188 void *prev_exception, *new_exception;
2190
2193
2194 _StructuredTaskCollection__Cancel(data->task_collection);
2195
2196 ptr = operator_new(sizeof(*ptr));
2199
2200 new_exception = data->task_collection->exception;
2201 do {
2202 if ((ULONG_PTR)new_exception & ~STRUCTURED_TASK_COLLECTION_STATUS_MASK) {
2205 break;
2206 }
2207 prev_exception = new_exception;
2208 new_exception = (void*)((ULONG_PTR)new_exception | (ULONG_PTR)ptr);
2209 } while ((new_exception = InterlockedCompareExchangePointer(
2210 &data->task_collection->exception, new_exception,
2211 prev_exception)) != prev_exception);
2212 data->task_collection->event = 0;
2214}
2215
2216static void CALLBACK execute_chore_finally(BOOL normal, void *data)
2217{
2218 ExternalContextBase *ctx = (ExternalContextBase*)try_get_current_context();
2219 _StructuredTaskCollection *old_collection = data;
2220
2221 if (ctx && ctx->context.vtable == &ExternalContextBase_vtable)
2222 ctx->task_collection = old_collection;
2223}
2224
2225static void execute_chore(_UnrealizedChore *chore,
2226 _StructuredTaskCollection *task_collection)
2227{
2228 ExternalContextBase *ctx = (ExternalContextBase*)try_get_current_context();
2229 struct execute_chore_data data = { chore, task_collection };
2230 _StructuredTaskCollection *old_collection;
2231
2232 TRACE("(%p %p)\n", chore, task_collection);
2233
2234 if (ctx && ctx->context.vtable == &ExternalContextBase_vtable)
2235 {
2236 old_collection = ctx->task_collection;
2237 ctx->task_collection = task_collection;
2238 }
2239
2240 __TRY
2241 {
2242 __TRY
2243 {
2244 if (!((ULONG_PTR)task_collection->exception & ~STRUCTURED_TASK_COLLECTION_STATUS_MASK) &&
2245 chore->chore_proc)
2246 chore->chore_proc(chore);
2247 }
2248 __EXCEPT_CTX(execute_chore_except, &data)
2249 {
2250 }
2251 __ENDTRY
2252 }
2253 __FINALLY_CTX(execute_chore_finally, old_collection)
2254}
2255
2256static void CALLBACK chore_wrapper_finally(BOOL normal, void *data)
2257{
2258 _UnrealizedChore *chore = data;
2259 struct _StructuredTaskCollection *task_collection = chore->task_collection;
2260 LONG finished = 1;
2261
2262 TRACE("(%u %p)\n", normal, data);
2263
2264 if (!task_collection)
2265 return;
2266 chore->task_collection = NULL;
2267
2268 if (InterlockedCompareExchange(&task_collection->finished, 1, FINISHED_INITIAL) != FINISHED_INITIAL)
2269 finished = InterlockedIncrement(&task_collection->finished);
2270 if (!finished)
2271 call_Context_Unblock(task_collection->event);
2272}
2273
2274static void __cdecl chore_wrapper(_UnrealizedChore *chore)
2275{
2276 __TRY
2277 {
2278 execute_chore(chore, chore->task_collection);
2279 }
2280 __FINALLY_CTX(chore_wrapper_finally, chore)
2281}
2282
2283static BOOL pick_and_execute_chore(ThreadScheduler *scheduler)
2284{
2285 struct list *entry;
2286 struct scheduled_chore *sc;
2287 _UnrealizedChore *chore;
2288
2289 TRACE("(%p)\n", scheduler);
2290
2291 if (scheduler->scheduler.vtable != &ThreadScheduler_vtable)
2292 {
2293 ERR("unknown scheduler set\n");
2294 return FALSE;
2295 }
2296
2297 EnterCriticalSection(&scheduler->cs);
2298 entry = list_head(&scheduler->scheduled_chores);
2299 if (entry)
2301 LeaveCriticalSection(&scheduler->cs);
2302 if (!entry)
2303 return FALSE;
2304
2305 sc = LIST_ENTRY(entry, struct scheduled_chore, entry);
2306 chore = sc->chore;
2307 operator_delete(sc);
2308
2309 chore->chore_wrapper(chore);
2310 return TRUE;
2311}
2312
2313static void __cdecl _StructuredTaskCollection_scheduler_cb(void *data)
2314{
2315 pick_and_execute_chore((ThreadScheduler*)get_current_scheduler());
2316}
2317
2318static bool schedule_chore(_StructuredTaskCollection *this,
2319 _UnrealizedChore *chore, Scheduler **pscheduler)
2320{
2321 struct scheduled_chore *sc;
2322 ThreadScheduler *scheduler;
2323
2324 if (chore->task_collection) {
2325 invalid_multiple_scheduling e;
2326 invalid_multiple_scheduling_ctor_str(&e, "Chore scheduled multiple times");
2327 _CxxThrowException(&e, &invalid_multiple_scheduling_exception_type);
2328 return FALSE;
2329 }
2330
2331 if (!this->context)
2332 this->context = get_current_context();
2333 scheduler = get_thread_scheduler_from_context(this->context);
2334 if (!scheduler) {
2335 ERR("unknown context or scheduler set\n");
2336 return FALSE;
2337 }
2338
2339 sc = operator_new(sizeof(*sc));
2340 sc->chore = chore;
2341
2342 chore->task_collection = this;
2343 chore->chore_wrapper = chore_wrapper;
2345
2346 EnterCriticalSection(&scheduler->cs);
2347 list_add_head(&scheduler->scheduled_chores, &sc->entry);
2348 LeaveCriticalSection(&scheduler->cs);
2349 *pscheduler = &scheduler->scheduler;
2350 return TRUE;
2351}
2352
2353#if _MSVCR_VER >= 110
2354
2355/* ?_Schedule@_StructuredTaskCollection@details@Concurrency@@QAAXPAV_UnrealizedChore@23@PAVlocation@3@@Z */
2356/* ?_Schedule@_StructuredTaskCollection@details@Concurrency@@QAEXPAV_UnrealizedChore@23@PAVlocation@3@@Z */
2357/* ?_Schedule@_StructuredTaskCollection@details@Concurrency@@QEAAXPEAV_UnrealizedChore@23@PEAVlocation@3@@Z */
2358DEFINE_THISCALL_WRAPPER(_StructuredTaskCollection__Schedule_loc, 12)
2359void __thiscall _StructuredTaskCollection__Schedule_loc(
2360 _StructuredTaskCollection *this, _UnrealizedChore *chore,
2361 /*location*/void *placement)
2362{
2363 Scheduler *scheduler;
2364
2365 TRACE("(%p %p %p)\n", this, chore, placement);
2366
2367 if (schedule_chore(this, chore, &scheduler))
2368 {
2369 call_Scheduler_ScheduleTask_loc(scheduler,
2370 _StructuredTaskCollection_scheduler_cb, NULL, placement);
2371 }
2372}
2373
2374#endif /* _MSVCR_VER >= 110 */
2375
2376/* ?_Schedule@_StructuredTaskCollection@details@Concurrency@@QAAXPAV_UnrealizedChore@23@@Z */
2377/* ?_Schedule@_StructuredTaskCollection@details@Concurrency@@QAEXPAV_UnrealizedChore@23@@Z */
2378/* ?_Schedule@_StructuredTaskCollection@details@Concurrency@@QEAAXPEAV_UnrealizedChore@23@@Z */
2379DEFINE_THISCALL_WRAPPER(_StructuredTaskCollection__Schedule, 8)
2380void __thiscall _StructuredTaskCollection__Schedule(
2381 _StructuredTaskCollection *this, _UnrealizedChore *chore)
2382{
2383 Scheduler *scheduler;
2384
2385 TRACE("(%p %p)\n", this, chore);
2386
2387 if (schedule_chore(this, chore, &scheduler))
2388 {
2389 call_Scheduler_ScheduleTask(scheduler,
2390 _StructuredTaskCollection_scheduler_cb, NULL);
2391 }
2392}
2393
2394static void CALLBACK exception_ptr_rethrow_finally(BOOL normal, void *data)
2395{
2396 exception_ptr *ep = data;
2397
2398 TRACE("(%u %p)\n", normal, data);
2399
2401 operator_delete(ep);
2402}
2403
2404/* ?_RunAndWait@_StructuredTaskCollection@details@Concurrency@@QAA?AW4_TaskCollectionStatus@23@PAV_UnrealizedChore@23@@Z */
2405/* ?_RunAndWait@_StructuredTaskCollection@details@Concurrency@@QAG?AW4_TaskCollectionStatus@23@PAV_UnrealizedChore@23@@Z */
2406/* ?_RunAndWait@_StructuredTaskCollection@details@Concurrency@@QEAA?AW4_TaskCollectionStatus@23@PEAV_UnrealizedChore@23@@Z */
2407_TaskCollectionStatus __stdcall _StructuredTaskCollection__RunAndWait(
2408 _StructuredTaskCollection *this, _UnrealizedChore *chore)
2409{
2411 exception_ptr *ep;
2412 LONG count;
2413
2414 TRACE("(%p %p)\n", this, chore);
2415
2416 if (chore) {
2417 if (chore->task_collection) {
2418 invalid_multiple_scheduling e;
2419 invalid_multiple_scheduling_ctor_str(&e, "Chore scheduled multiple times");
2420 _CxxThrowException(&e, &invalid_multiple_scheduling_exception_type);
2421 }
2422 execute_chore(chore, this);
2423 }
2424
2425 if (this->context) {
2426 ThreadScheduler *scheduler = get_thread_scheduler_from_context(this->context);
2427 if (scheduler) {
2428 while (pick_and_execute_chore(scheduler)) ;
2429 }
2430 }
2431
2432 this->event = get_current_context();
2433 InterlockedCompareExchange(&this->finished, 0, FINISHED_INITIAL);
2434
2435 while (this->count != 0) {
2436 count = this->count;
2437 InterlockedAdd(&this->count, -count);
2438 count = InterlockedAdd(&this->finished, -count);
2439
2440 if (count < 0)
2441 call_Context_Block(this->event);
2442 }
2443
2444 exception = (ULONG_PTR)this->exception;
2445 ep = (exception_ptr*)(exception & ~STRUCTURED_TASK_COLLECTION_STATUS_MASK);
2446 if (ep) {
2447 this->exception = 0;
2448 __TRY
2449 {
2451 }
2452 __FINALLY_CTX(exception_ptr_rethrow_finally, ep)
2453 }
2454 if (exception & STRUCTURED_TASK_COLLECTION_CANCELLED)
2455 return TASK_COLLECTION_CANCELLED;
2456 return TASK_COLLECTION_SUCCESS;
2457}
2458
2459/* ?_IsCanceling@_StructuredTaskCollection@details@Concurrency@@QAA_NXZ */
2460/* ?_IsCanceling@_StructuredTaskCollection@details@Concurrency@@QAE_NXZ */
2461/* ?_IsCanceling@_StructuredTaskCollection@details@Concurrency@@QEAA_NXZ */
2462DEFINE_THISCALL_WRAPPER(_StructuredTaskCollection__IsCanceling, 4)
2463bool __thiscall _StructuredTaskCollection__IsCanceling(
2464 _StructuredTaskCollection *this)
2465{
2466 TRACE("(%p)\n", this);
2467 return !!((ULONG_PTR)this->exception & STRUCTURED_TASK_COLLECTION_CANCELLED);
2468}
2469
2470/* ?_CheckTaskCollection@_UnrealizedChore@details@Concurrency@@IAEXXZ */
2471/* ?_CheckTaskCollection@_UnrealizedChore@details@Concurrency@@IEAAXXZ */
2472DEFINE_THISCALL_WRAPPER(_UnrealizedChore__CheckTaskCollection, 4)
2473void __thiscall _UnrealizedChore__CheckTaskCollection(_UnrealizedChore *this)
2474{
2475 FIXME("() stub\n");
2476}
2477
2478/* ??0critical_section@Concurrency@@QAE@XZ */
2479/* ??0critical_section@Concurrency@@QEAA@XZ */
2480DEFINE_THISCALL_WRAPPER(critical_section_ctor, 4)
2481critical_section* __thiscall critical_section_ctor(critical_section *this)
2482{
2483 TRACE("(%p)\n", this);
2484
2485 this->unk_active.ctx = NULL;
2486 this->head = this->tail = NULL;
2487 return this;
2488}
2489
2490/* ??1critical_section@Concurrency@@QAE@XZ */
2491/* ??1critical_section@Concurrency@@QEAA@XZ */
2492DEFINE_THISCALL_WRAPPER(critical_section_dtor, 4)
2493void __thiscall critical_section_dtor(critical_section *this)
2494{
2495 TRACE("(%p)\n", this);
2496}
2497
2498static void __cdecl spin_wait_yield(void)
2499{
2500 Sleep(0);
2501}
2502
2503static inline void spin_wait_for_next_cs(cs_queue *q)
2504{
2505 SpinWait sw;
2506
2507 if(q->next) return;
2508
2509 SpinWait_ctor(&sw, &spin_wait_yield);
2510 SpinWait__Reset(&sw);
2511 while(!q->next)
2512 SpinWait__SpinOnce(&sw);
2513 SpinWait_dtor(&sw);
2514}
2515
2516static inline void cs_set_head(critical_section *cs, cs_queue *q)
2517{
2518 cs->unk_active.ctx = get_current_context();
2519 cs->unk_active.next = q->next;
2520 cs->head = &cs->unk_active;
2521}
2522
2523static inline void cs_lock(critical_section *cs, cs_queue *q)
2524{
2525 cs_queue *last;
2526
2527 if(cs->unk_active.ctx == get_current_context()) {
2528 improper_lock e;
2529 improper_lock_ctor_str(&e, "Already locked");
2530 _CxxThrowException(&e, &improper_lock_exception_type);
2531 }
2532
2533 memset(q, 0, sizeof(*q));
2534 q->ctx = get_current_context();
2536 if(last) {
2537 last->next = q;
2538 call_Context_Block(q->ctx);
2539 }
2540
2541 cs_set_head(cs, q);
2542 if(InterlockedCompareExchangePointer(&cs->tail, &cs->unk_active, q) != q) {
2543 spin_wait_for_next_cs(q);
2544 cs->unk_active.next = q->next;
2545 }
2546}
2547
2548/* ?lock@critical_section@Concurrency@@QAEXXZ */
2549/* ?lock@critical_section@Concurrency@@QEAAXXZ */
2550DEFINE_THISCALL_WRAPPER(critical_section_lock, 4)
2551void __thiscall critical_section_lock(critical_section *this)
2552{
2553 cs_queue q;
2554
2555 TRACE("(%p)\n", this);
2556 cs_lock(this, &q);
2557}
2558
2559/* ?try_lock@critical_section@Concurrency@@QAE_NXZ */
2560/* ?try_lock@critical_section@Concurrency@@QEAA_NXZ */
2561DEFINE_THISCALL_WRAPPER(critical_section_try_lock, 4)
2562bool __thiscall critical_section_try_lock(critical_section *this)
2563{
2564 cs_queue q;
2565
2566 TRACE("(%p)\n", this);
2567
2568 if(this->unk_active.ctx == get_current_context())
2569 return FALSE;
2570
2571 memset(&q, 0, sizeof(q));
2572 if(!InterlockedCompareExchangePointer(&this->tail, &q, NULL)) {
2573 cs_set_head(this, &q);
2574 if(InterlockedCompareExchangePointer(&this->tail, &this->unk_active, &q) != &q) {
2575 spin_wait_for_next_cs(&q);
2576 this->unk_active.next = q.next;
2577 }
2578 return TRUE;
2579 }
2580 return FALSE;
2581}
2582
2583/* ?unlock@critical_section@Concurrency@@QAEXXZ */
2584/* ?unlock@critical_section@Concurrency@@QEAAXXZ */
2585DEFINE_THISCALL_WRAPPER(critical_section_unlock, 4)
2586void __thiscall critical_section_unlock(critical_section *this)
2587{
2588 TRACE("(%p)\n", this);
2589
2590 this->unk_active.ctx = NULL;
2591 this->head = NULL;
2592 if(InterlockedCompareExchangePointer(&this->tail, NULL, &this->unk_active)
2593 == &this->unk_active) return;
2594 spin_wait_for_next_cs(&this->unk_active);
2595
2596#if _MSVCR_VER >= 110
2597 while(1) {
2598 cs_queue *next;
2599
2600 if(!InterlockedExchange(&this->unk_active.next->free, TRUE))
2601 break;
2602
2603 next = this->unk_active.next;
2604 if(InterlockedCompareExchangePointer(&this->tail, NULL, next) == next) {
2606 return;
2607 }
2608 spin_wait_for_next_cs(next);
2609
2610 this->unk_active.next = next->next;
2612 }
2613#endif
2614
2615 call_Context_Unblock(this->unk_active.next->ctx);
2616}
2617
2618/* ?native_handle@critical_section@Concurrency@@QAEAAV12@XZ */
2619/* ?native_handle@critical_section@Concurrency@@QEAAAEAV12@XZ */
2620DEFINE_THISCALL_WRAPPER(critical_section_native_handle, 4)
2621critical_section* __thiscall critical_section_native_handle(critical_section *this)
2622{
2623 TRACE("(%p)\n", this);
2624 return this;
2625}
2626
2627static void set_timeout(FILETIME *ft, unsigned int timeout)
2628{
2629 LARGE_INTEGER to;
2630
2632 to.QuadPart = ((LONGLONG)ft->dwHighDateTime << 32) +
2634 ft->dwHighDateTime = to.QuadPart >> 32;
2635 ft->dwLowDateTime = to.QuadPart;
2636}
2637
2638struct timeout_unlock
2639{
2640 Context *ctx;
2641 BOOL timed_out;
2642};
2643
2644static void WINAPI timeout_unlock(TP_CALLBACK_INSTANCE *instance, void *ctx, TP_TIMER *timer)
2645{
2646 struct timeout_unlock *tu = ctx;
2647 tu->timed_out = TRUE;
2648 call_Context_Unblock(tu->ctx);
2649}
2650
2651/* returns TRUE if wait has timed out */
2652static BOOL block_context_for(Context *ctx, unsigned int timeout)
2653{
2654 struct timeout_unlock tu = { ctx };
2655 TP_TIMER *tp_timer;
2656 FILETIME ft;
2657
2659 call_Context_Block(ctx);
2660 return FALSE;
2661 }
2662
2663 tp_timer = CreateThreadpoolTimer(timeout_unlock, &tu, NULL);
2664 if(!tp_timer) {
2665 FIXME("throw exception?\n");
2666 return TRUE;
2667 }
2668 set_timeout(&ft, timeout);
2669 SetThreadpoolTimer(tp_timer, &ft, 0, 0);
2670
2671 call_Context_Block(ctx);
2672
2673 SetThreadpoolTimer(tp_timer, NULL, 0, 0);
2675 CloseThreadpoolTimer(tp_timer);
2676 return tu.timed_out;
2677}
2678
2679#if _MSVCR_VER >= 110
2680/* ?try_lock_for@critical_section@Concurrency@@QAE_NI@Z */
2681/* ?try_lock_for@critical_section@Concurrency@@QEAA_NI@Z */
2682DEFINE_THISCALL_WRAPPER(critical_section_try_lock_for, 8)
2683bool __thiscall critical_section_try_lock_for(
2684 critical_section *this, unsigned int timeout)
2685{
2686 Context *ctx = get_current_context();
2687 cs_queue *q, *last;
2688
2689 TRACE("(%p %d)\n", this, timeout);
2690
2691 if(this->unk_active.ctx == ctx) {
2692 improper_lock e;
2693 improper_lock_ctor_str(&e, "Already locked");
2694 _CxxThrowException(&e, &improper_lock_exception_type);
2695 }
2696
2697 if(!(q = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, sizeof(*q))))
2698 return critical_section_try_lock(this);
2699 q->ctx = ctx;
2700
2702 if(last) {
2703 last->next = q;
2704
2705 if(block_context_for(q->ctx, timeout))
2706 {
2707 if(!InterlockedExchange(&q->free, TRUE))
2708 return FALSE;
2709 /* Context was unblocked because of timeout and unlock operation */
2710 call_Context_Block(ctx);
2711 }
2712 }
2713
2714 cs_set_head(this, q);
2715 if(InterlockedCompareExchangePointer(&this->tail, &this->unk_active, q) != q) {
2716 spin_wait_for_next_cs(q);
2717 this->unk_active.next = q->next;
2718 }
2719
2720 HeapFree(GetProcessHeap(), 0, q);
2721 return TRUE;
2722}
2723#endif
2724
2725/* ??0scoped_lock@critical_section@Concurrency@@QAE@AAV12@@Z */
2726/* ??0scoped_lock@critical_section@Concurrency@@QEAA@AEAV12@@Z */
2727DEFINE_THISCALL_WRAPPER(critical_section_scoped_lock_ctor, 8)
2728critical_section_scoped_lock* __thiscall critical_section_scoped_lock_ctor(
2729 critical_section_scoped_lock *this, critical_section *cs)
2730{
2731 TRACE("(%p %p)\n", this, cs);
2732 this->cs = cs;
2733 cs_lock(this->cs, &this->lock.q);
2734 return this;
2735}
2736
2737/* ??1scoped_lock@critical_section@Concurrency@@QAE@XZ */
2738/* ??1scoped_lock@critical_section@Concurrency@@QEAA@XZ */
2739DEFINE_THISCALL_WRAPPER(critical_section_scoped_lock_dtor, 4)
2740void __thiscall critical_section_scoped_lock_dtor(critical_section_scoped_lock *this)
2741{
2742 TRACE("(%p)\n", this);
2743 critical_section_unlock(this->cs);
2744}
2745
2746/* ??0_NonReentrantPPLLock@details@Concurrency@@QAE@XZ */
2747/* ??0_NonReentrantPPLLock@details@Concurrency@@QEAA@XZ */
2748DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock_ctor, 4)
2749_NonReentrantPPLLock* __thiscall _NonReentrantPPLLock_ctor(_NonReentrantPPLLock *this)
2750{
2751 TRACE("(%p)\n", this);
2752
2753 critical_section_ctor(&this->cs);
2754 return this;
2755}
2756
2757/* ?_Acquire@_NonReentrantPPLLock@details@Concurrency@@QAEXPAX@Z */
2758/* ?_Acquire@_NonReentrantPPLLock@details@Concurrency@@QEAAXPEAX@Z */
2759DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Acquire, 8)
2760void __thiscall _NonReentrantPPLLock__Acquire(_NonReentrantPPLLock *this, cs_queue *q)
2761{
2762 TRACE("(%p %p)\n", this, q);
2763 cs_lock(&this->cs, q);
2764}
2765
2766/* ?_Release@_NonReentrantPPLLock@details@Concurrency@@QAEXXZ */
2767/* ?_Release@_NonReentrantPPLLock@details@Concurrency@@QEAAXXZ */
2768DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Release, 4)
2769void __thiscall _NonReentrantPPLLock__Release(_NonReentrantPPLLock *this)
2770{
2771 TRACE("(%p)\n", this);
2772 critical_section_unlock(&this->cs);
2773}
2774
2775/* ??0_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QAE@AAV123@@Z */
2776/* ??0_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QEAA@AEAV123@@Z */
2777DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Scoped_lock_ctor, 8)
2778_NonReentrantPPLLock__Scoped_lock* __thiscall _NonReentrantPPLLock__Scoped_lock_ctor(
2779 _NonReentrantPPLLock__Scoped_lock *this, _NonReentrantPPLLock *lock)
2780{
2781 TRACE("(%p %p)\n", this, lock);
2782
2783 this->lock = lock;
2784 _NonReentrantPPLLock__Acquire(this->lock, &this->wait.q);
2785 return this;
2786}
2787
2788/* ??1_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QAE@XZ */
2789/* ??1_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QEAA@XZ */
2790DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Scoped_lock_dtor, 4)
2791void __thiscall _NonReentrantPPLLock__Scoped_lock_dtor(_NonReentrantPPLLock__Scoped_lock *this)
2792{
2793 TRACE("(%p)\n", this);
2794
2795 _NonReentrantPPLLock__Release(this->lock);
2796}
2797
2798/* ??0_ReentrantPPLLock@details@Concurrency@@QAE@XZ */
2799/* ??0_ReentrantPPLLock@details@Concurrency@@QEAA@XZ */
2800DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock_ctor, 4)
2801_ReentrantPPLLock* __thiscall _ReentrantPPLLock_ctor(_ReentrantPPLLock *this)
2802{
2803 TRACE("(%p)\n", this);
2804
2805 critical_section_ctor(&this->cs);
2806 this->count = 0;
2807 this->owner = -1;
2808 return this;
2809}
2810
2811/* ?_Acquire@_ReentrantPPLLock@details@Concurrency@@QAEXPAX@Z */
2812/* ?_Acquire@_ReentrantPPLLock@details@Concurrency@@QEAAXPEAX@Z */
2813DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Acquire, 8)
2814void __thiscall _ReentrantPPLLock__Acquire(_ReentrantPPLLock *this, cs_queue *q)
2815{
2816 TRACE("(%p %p)\n", this, q);
2817
2818 if(this->owner == GetCurrentThreadId()) {
2819 this->count++;
2820 return;
2821 }
2822
2823 cs_lock(&this->cs, q);
2824 this->count++;
2825 this->owner = GetCurrentThreadId();
2826}
2827
2828/* ?_Release@_ReentrantPPLLock@details@Concurrency@@QAEXXZ */
2829/* ?_Release@_ReentrantPPLLock@details@Concurrency@@QEAAXXZ */
2830DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Release, 4)
2831void __thiscall _ReentrantPPLLock__Release(_ReentrantPPLLock *this)
2832{
2833 TRACE("(%p)\n", this);
2834
2835 this->count--;
2836 if(this->count)
2837 return;
2838
2839 this->owner = -1;
2840 critical_section_unlock(&this->cs);
2841}
2842
2843/* ??0_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QAE@AAV123@@Z */
2844/* ??0_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QEAA@AEAV123@@Z */
2845DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Scoped_lock_ctor, 8)
2846_ReentrantPPLLock__Scoped_lock* __thiscall _ReentrantPPLLock__Scoped_lock_ctor(
2847 _ReentrantPPLLock__Scoped_lock *this, _ReentrantPPLLock *lock)
2848{
2849 TRACE("(%p %p)\n", this, lock);
2850
2851 this->lock = lock;
2852 _ReentrantPPLLock__Acquire(this->lock, &this->wait.q);
2853 return this;
2854}
2855
2856/* ??1_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QAE@XZ */
2857/* ??1_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QEAA@XZ */
2858DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Scoped_lock_dtor, 4)
2859void __thiscall _ReentrantPPLLock__Scoped_lock_dtor(_ReentrantPPLLock__Scoped_lock *this)
2860{
2861 TRACE("(%p)\n", this);
2862
2863 _ReentrantPPLLock__Release(this->lock);
2864}
2865
2866/* ?_GetConcurrency@details@Concurrency@@YAIXZ */
2867unsigned int __cdecl _GetConcurrency(void)
2868{
2869 static unsigned int val = -1;
2870
2871 TRACE("()\n");
2872
2873 if(val == -1) {
2875
2876 GetSystemInfo(&si);
2878 }
2879
2880 return val;
2881}
2882
2883static void evt_add_queue(thread_wait_entry **head, thread_wait_entry *entry)
2884{
2885 entry->next = *head;
2886 entry->prev = NULL;
2887 if(*head) (*head)->prev = entry;
2888 *head = entry;
2889}
2890
2891static void evt_remove_queue(thread_wait_entry **head, thread_wait_entry *entry)
2892{
2893 if(entry == *head)
2894 *head = entry->next;
2895 else if(entry->prev)
2896 entry->prev->next = entry->next;
2897 if(entry->next) entry->next->prev = entry->prev;
2898}
2899
2900static size_t evt_end_wait(thread_wait *wait, event **events, int count)
2901{
2902 size_t i, ret = COOPERATIVE_WAIT_TIMEOUT;
2903
2904 for(i = 0; i < count; i++) {
2905 critical_section_lock(&events[i]->cs);
2906 if(events[i] == wait->signaled) ret = i;
2907 evt_remove_queue(&events[i]->waiters, &wait->entries[i]);
2908 critical_section_unlock(&events[i]->cs);
2909 }
2910
2911 return ret;
2912}
2913
2914static inline int evt_transition(void **state, void *from, void *to)
2915{
2917}
2918
2919static size_t evt_wait(thread_wait *wait, event **events, int count, bool wait_all, unsigned int timeout)
2920{
2921 int i;
2922
2923 wait->signaled = EVT_RUNNING;
2924 wait->pending_waits = wait_all ? count : 1;
2925 for(i = 0; i < count; i++) {
2926 wait->entries[i].wait = wait;
2927
2928 critical_section_lock(&events[i]->cs);
2929 evt_add_queue(&events[i]->waiters, &wait->entries[i]);
2930 if(events[i]->signaled) {
2931 if(!InterlockedDecrement(&wait->pending_waits)) {
2932 wait->signaled = events[i];
2933 critical_section_unlock(&events[i]->cs);
2934
2935 return evt_end_wait(wait, events, i+1);
2936 }
2937 }
2938 critical_section_unlock(&events[i]->cs);
2939 }
2940
2941 if(!timeout)
2942 return evt_end_wait(wait, events, count);
2943
2944 if(!evt_transition(&wait->signaled, EVT_RUNNING, EVT_WAITING))
2945 return evt_end_wait(wait, events, count);
2946
2947 if(block_context_for(wait->ctx, timeout) &&
2948 !evt_transition(&wait->signaled, EVT_WAITING, EVT_RUNNING))
2949 call_Context_Block(wait->ctx);
2950
2951 return evt_end_wait(wait, events, count);
2952}
2953
2954/* ??0event@Concurrency@@QAE@XZ */
2955/* ??0event@Concurrency@@QEAA@XZ */
2956DEFINE_THISCALL_WRAPPER(event_ctor, 4)
2957event* __thiscall event_ctor(event *this)
2958{
2959 TRACE("(%p)\n", this);
2960
2961 this->waiters = NULL;
2962 this->signaled = FALSE;
2963 critical_section_ctor(&this->cs);
2964
2965 return this;
2966}
2967
2968/* ??1event@Concurrency@@QAE@XZ */
2969/* ??1event@Concurrency@@QEAA@XZ */
2970DEFINE_THISCALL_WRAPPER(event_dtor, 4)
2971void __thiscall event_dtor(event *this)
2972{
2973 TRACE("(%p)\n", this);
2974 critical_section_dtor(&this->cs);
2975 if(this->waiters)
2976 ERR("there's a wait on destroyed event\n");
2977}
2978
2979/* ?reset@event@Concurrency@@QAEXXZ */
2980/* ?reset@event@Concurrency@@QEAAXXZ */
2983{
2984 thread_wait_entry *entry;
2985
2986 TRACE("(%p)\n", this);
2987
2988 critical_section_lock(&this->cs);
2989 if(this->signaled) {
2990 this->signaled = FALSE;
2991 for(entry=this->waiters; entry; entry = entry->next)
2992 InterlockedIncrement(&entry->wait->pending_waits);
2993 }
2994 critical_section_unlock(&this->cs);
2995}
2996
2997/* ?set@event@Concurrency@@QAEXXZ */
2998/* ?set@event@Concurrency@@QEAAXXZ */
2999DEFINE_THISCALL_WRAPPER(event_set, 4)
3000void __thiscall event_set(event *this)
3001{
3002 thread_wait_entry *wakeup = NULL;
3003 thread_wait_entry *entry, *next;
3004
3005 TRACE("(%p)\n", this);
3006
3007 critical_section_lock(&this->cs);
3008 if(!this->signaled) {
3009 this->signaled = TRUE;
3010 for(entry=this->waiters; entry; entry=next) {
3011 next = entry->next;
3012 if(!InterlockedDecrement(&entry->wait->pending_waits)) {
3013 if(InterlockedExchangePointer(&entry->wait->signaled, this) == EVT_WAITING) {
3014 evt_remove_queue(&this->waiters, entry);
3015 evt_add_queue(&wakeup, entry);
3016 }
3017 }
3018 }
3019 }
3020 critical_section_unlock(&this->cs);
3021
3022 for(entry=wakeup; entry; entry=next) {
3023 next = entry->next;
3024 entry->next = entry->prev = NULL;
3025 call_Context_Unblock(entry->wait->ctx);
3026 }
3027}
3028
3029/* ?wait@event@Concurrency@@QAEII@Z */
3030/* ?wait@event@Concurrency@@QEAA_KI@Z */
3031DEFINE_THISCALL_WRAPPER(event_wait, 8)
3032size_t __thiscall event_wait(event *this, unsigned int timeout)
3033{
3034 thread_wait wait;
3035 size_t signaled;
3036
3037 TRACE("(%p %u)\n", this, timeout);
3038
3039 critical_section_lock(&this->cs);
3040 signaled = this->signaled;
3041 critical_section_unlock(&this->cs);
3042
3043 if(!timeout) return signaled ? 0 : COOPERATIVE_WAIT_TIMEOUT;
3044 wait.ctx = get_current_context();
3045 return signaled ? 0 : evt_wait(&wait, &this, 1, FALSE, timeout);
3046}
3047
3048/* ?wait_for_multiple@event@Concurrency@@SAIPAPAV12@I_NI@Z */
3049/* ?wait_for_multiple@event@Concurrency@@SA_KPEAPEAV12@_K_NI@Z */
3050int __cdecl event_wait_for_multiple(event **events, size_t count, bool wait_all, unsigned int timeout)
3051{
3052 thread_wait *wait;
3053 size_t ret;
3054
3055 TRACE("(%p %Iu %d %u)\n", events, count, wait_all, timeout);
3056
3057 if(count == 0)
3058 return 0;
3059
3060 wait = operator_new(FIELD_OFFSET(thread_wait, entries[count]));
3061 wait->ctx = get_current_context();
3062 ret = evt_wait(wait, events, count, wait_all, timeout);
3063 operator_delete(wait);
3064
3065 return ret;
3066}
3067
3068#if _MSVCR_VER >= 110
3069
3070/* ??0_Cancellation_beacon@details@Concurrency@@QAE@XZ */
3071/* ??0_Cancellation_beacon@details@Concurrency@@QEAA@XZ */
3072DEFINE_THISCALL_WRAPPER(_Cancellation_beacon_ctor, 4)
3073_Cancellation_beacon* __thiscall _Cancellation_beacon_ctor(_Cancellation_beacon *this)
3074{
3075 ExternalContextBase *ctx = (ExternalContextBase*)get_current_context();
3076 _StructuredTaskCollection *task_collection = NULL;
3077 struct beacon *beacon;
3078
3079 TRACE("(%p)\n", this);
3080
3081 if (ctx->context.vtable == &ExternalContextBase_vtable) {
3082 task_collection = ctx->task_collection;
3083 if (task_collection)
3084 ctx = (ExternalContextBase*)task_collection->context;
3085 }
3086
3087 if (ctx->context.vtable != &ExternalContextBase_vtable) {
3088 ERR("unknown context\n");
3089 return NULL;
3090 }
3091
3092 beacon = malloc(sizeof(*beacon));
3093 beacon->cancelling = Context_IsCurrentTaskCollectionCanceling();
3094 beacon->task_collection = task_collection;
3095
3096 if (task_collection) {
3097 EnterCriticalSection(&ctx->beacons_cs);
3098 list_add_head(&ctx->beacons, &beacon->entry);
3099 LeaveCriticalSection(&ctx->beacons_cs);
3100 }
3101
3102 this->beacon = beacon;
3103 return this;
3104}
3105
3106/* ??1_Cancellation_beacon@details@Concurrency@@QAE@XZ */
3107/* ??1_Cancellation_beacon@details@Concurrency@@QEAA@XZ */
3108DEFINE_THISCALL_WRAPPER(_Cancellation_beacon_dtor, 4)
3109void __thiscall _Cancellation_beacon_dtor(_Cancellation_beacon *this)
3110{
3111 TRACE("(%p)\n", this);
3112
3113 if (this->beacon->task_collection) {
3114 ExternalContextBase *ctx = (ExternalContextBase*)this->beacon->task_collection->context;
3115
3116 EnterCriticalSection(&ctx->beacons_cs);
3117 list_remove(&this->beacon->entry);
3118 LeaveCriticalSection(&ctx->beacons_cs);
3119 }
3120
3121 free(this->beacon);
3122}
3123
3124/* ?_Confirm_cancel@_Cancellation_beacon@details@Concurrency@@QAA_NXZ */
3125/* ?_Confirm_cancel@_Cancellation_beacon@details@Concurrency@@QAE_NXZ */
3126/* ?_Confirm_cancel@_Cancellation_beacon@details@Concurrency@@QEAA_NXZ */
3127DEFINE_THISCALL_WRAPPER(_Cancellation_beacon__Confirm_cancel, 4)
3128bool __thiscall _Cancellation_beacon__Confirm_cancel(_Cancellation_beacon *this)
3129{
3130 bool ret;
3131
3132 TRACE("(%p)\n", this);
3133
3134 ret = Context_IsCurrentTaskCollectionCanceling();
3135 if (!ret)
3136 InterlockedDecrement(&this->beacon->cancelling);
3137 return ret;
3138}
3139
3140/* ??0_Condition_variable@details@Concurrency@@QAE@XZ */
3141/* ??0_Condition_variable@details@Concurrency@@QEAA@XZ */
3142DEFINE_THISCALL_WRAPPER(_Condition_variable_ctor, 4)
3143_Condition_variable* __thiscall _Condition_variable_ctor(_Condition_variable *this)
3144{
3145 TRACE("(%p)\n", this);
3146
3147 this->queue = NULL;
3148 critical_section_ctor(&this->lock);
3149 return this;
3150}
3151
3152/* ??1_Condition_variable@details@Concurrency@@QAE@XZ */
3153/* ??1_Condition_variable@details@Concurrency@@QEAA@XZ */
3154DEFINE_THISCALL_WRAPPER(_Condition_variable_dtor, 4)
3155void __thiscall _Condition_variable_dtor(_Condition_variable *this)
3156{
3157 TRACE("(%p)\n", this);
3158
3159 while(this->queue) {
3160 cv_queue *next = this->queue->next;
3161 if(!this->queue->expired)
3162 ERR("there's an active wait\n");
3163 operator_delete(this->queue);
3164 this->queue = next;
3165 }
3166 critical_section_dtor(&this->lock);
3167}
3168
3169/* ?wait@_Condition_variable@details@Concurrency@@QAEXAAVcritical_section@3@@Z */
3170/* ?wait@_Condition_variable@details@Concurrency@@QEAAXAEAVcritical_section@3@@Z */
3171DEFINE_THISCALL_WRAPPER(_Condition_variable_wait, 8)
3172void __thiscall _Condition_variable_wait(_Condition_variable *this, critical_section *cs)
3173{
3174 cv_queue q;
3175
3176 TRACE("(%p, %p)\n", this, cs);
3177
3178 q.ctx = get_current_context();
3179 q.expired = FALSE;
3180 critical_section_lock(&this->lock);
3181 q.next = this->queue;
3182 this->queue = &q;
3183 critical_section_unlock(&this->lock);
3184
3185 critical_section_unlock(cs);
3186 call_Context_Block(q.ctx);
3187 critical_section_lock(cs);
3188}
3189
3190/* ?wait_for@_Condition_variable@details@Concurrency@@QAE_NAAVcritical_section@3@I@Z */
3191/* ?wait_for@_Condition_variable@details@Concurrency@@QEAA_NAEAVcritical_section@3@I@Z */
3192DEFINE_THISCALL_WRAPPER(_Condition_variable_wait_for, 12)
3193bool __thiscall _Condition_variable_wait_for(_Condition_variable *this,
3194 critical_section *cs, unsigned int timeout)
3195{
3196 cv_queue *q;
3197
3198 TRACE("(%p %p %d)\n", this, cs, timeout);
3199
3200 q = operator_new(sizeof(cv_queue));
3201 q->ctx = get_current_context();
3202 q->expired = FALSE;
3203 critical_section_lock(&this->lock);
3204 q->next = this->queue;
3205 this->queue = q;
3206 critical_section_unlock(&this->lock);
3207
3208 critical_section_unlock(cs);
3209
3210 if(block_context_for(q->ctx, timeout)) {
3211 if(!InterlockedExchange(&q->expired, TRUE)) {
3212 critical_section_lock(cs);
3213 return FALSE;
3214 }
3215 call_Context_Block(q->ctx);
3216 }
3217
3219 critical_section_lock(cs);
3220 return TRUE;
3221}
3222
3223/* ?notify_one@_Condition_variable@details@Concurrency@@QAEXXZ */
3224/* ?notify_one@_Condition_variable@details@Concurrency@@QEAAXXZ */
3225DEFINE_THISCALL_WRAPPER(_Condition_variable_notify_one, 4)
3226void __thiscall _Condition_variable_notify_one(_Condition_variable *this)
3227{
3228 cv_queue *node;
3229
3230 TRACE("(%p)\n", this);
3231
3232 if(!this->queue)
3233 return;
3234
3235 while(1) {
3236 critical_section_lock(&this->lock);
3237 node = this->queue;
3238 if(!node) {
3239 critical_section_unlock(&this->lock);
3240 return;
3241 }
3242 this->queue = node->next;
3243 critical_section_unlock(&this->lock);
3244
3245 node->next = CV_WAKE;
3246 if(!InterlockedExchange(&node->expired, TRUE)) {
3247 call_Context_Unblock(node->ctx);
3248 return;
3249 } else {
3251 }
3252 }
3253}
3254
3255/* ?notify_all@_Condition_variable@details@Concurrency@@QAEXXZ */
3256/* ?notify_all@_Condition_variable@details@Concurrency@@QEAAXXZ */
3257DEFINE_THISCALL_WRAPPER(_Condition_variable_notify_all, 4)
3258void __thiscall _Condition_variable_notify_all(_Condition_variable *this)
3259{
3260 cv_queue *ptr;
3261
3262 TRACE("(%p)\n", this);
3263
3264 if(!this->queue)
3265 return;
3266
3267 critical_section_lock(&this->lock);
3268 ptr = this->queue;
3269 this->queue = NULL;
3270 critical_section_unlock(&this->lock);
3271
3272 while(ptr) {
3273 cv_queue *next = ptr->next;
3274
3275 ptr->next = CV_WAKE;
3276 if(!InterlockedExchange(&ptr->expired, TRUE))
3277 call_Context_Unblock(ptr->ctx);
3278 else
3280 ptr = next;
3281 }
3282}
3283#endif
3284
3285/* ??0reader_writer_lock@Concurrency@@QAE@XZ */
3286/* ??0reader_writer_lock@Concurrency@@QEAA@XZ */
3287DEFINE_THISCALL_WRAPPER(reader_writer_lock_ctor, 4)
3288reader_writer_lock* __thiscall reader_writer_lock_ctor(reader_writer_lock *this)
3289{
3290 TRACE("(%p)\n", this);
3291
3292 memset(this, 0, sizeof(*this));
3293 return this;
3294}
3295
3296/* ??1reader_writer_lock@Concurrency@@QAE@XZ */
3297/* ??1reader_writer_lock@Concurrency@@QEAA@XZ */
3298DEFINE_THISCALL_WRAPPER(reader_writer_lock_dtor, 4)
3299void __thiscall reader_writer_lock_dtor(reader_writer_lock *this)
3300{
3301 TRACE("(%p)\n", this);
3302
3303 if (this->thread_id != 0 || this->count)
3304 WARN("destroying locked reader_writer_lock\n");
3305}
3306
3307static inline void spin_wait_for_next_rwl(rwl_queue *q)
3308{
3309 SpinWait sw;
3310
3311 if(q->next) return;
3312
3313 SpinWait_ctor(&sw, &spin_wait_yield);
3314 SpinWait__Reset(&sw);
3315 while(!q->next)
3316 SpinWait__SpinOnce(&sw);
3317 SpinWait_dtor(&sw);
3318}
3319
3320/* ?lock@reader_writer_lock@Concurrency@@QAEXXZ */
3321/* ?lock@reader_writer_lock@Concurrency@@QEAAXXZ */
3322DEFINE_THISCALL_WRAPPER(reader_writer_lock_lock, 4)
3323void __thiscall reader_writer_lock_lock(reader_writer_lock *this)
3324{
3325 rwl_queue q = { NULL, get_current_context() }, *last;
3326
3327 TRACE("(%p)\n", this);
3328
3329 if (this->thread_id == GetCurrentThreadId()) {
3330 improper_lock e;
3331 improper_lock_ctor_str(&e, "Already locked");
3332 _CxxThrowException(&e, &improper_lock_exception_type);
3333 }
3334
3335 last = InterlockedExchangePointer((void**)&this->writer_tail, &q);
3336 if (last) {
3337 last->next = &q;
3338 call_Context_Block(q.ctx);
3339 } else {
3340 this->writer_head = &q;
3341 if (InterlockedOr(&this->count, WRITER_WAITING))
3342 call_Context_Block(q.ctx);
3343 }
3344
3345 this->thread_id = GetCurrentThreadId();
3346 this->writer_head = &this->active;
3347 this->active.next = NULL;
3348 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, &this->active, &q) != &q) {
3349 spin_wait_for_next_rwl(&q);
3350 this->active.next = q.next;
3351 }
3352}
3353
3354/* ?lock_read@reader_writer_lock@Concurrency@@QAEXXZ */
3355/* ?lock_read@reader_writer_lock@Concurrency@@QEAAXXZ */
3356DEFINE_THISCALL_WRAPPER(reader_writer_lock_lock_read, 4)
3357void __thiscall reader_writer_lock_lock_read(reader_writer_lock *this)
3358{
3359 rwl_queue q = { NULL, get_current_context() };
3360
3361 TRACE("(%p)\n", this);
3362
3363 if (this->thread_id == GetCurrentThreadId()) {
3364 improper_lock e;
3365 improper_lock_ctor_str(&e, "Already locked as writer");
3366 _CxxThrowException(&e, &improper_lock_exception_type);
3367 }
3368
3369 do {
3370 q.next = this->reader_head;
3371 } while(InterlockedCompareExchangePointer((void**)&this->reader_head, &q, q.next) != q.next);
3372
3373 if (!q.next) {
3374 rwl_queue *head;
3375 LONG count;
3376
3377 while (!((count = this->count) & WRITER_WAITING))
3378 if (InterlockedCompareExchange(&this->count, count+1, count) == count) break;
3379
3380 if (count & WRITER_WAITING)
3381 call_Context_Block(q.ctx);
3382
3383 head = InterlockedExchangePointer((void**)&this->reader_head, NULL);
3384 while(head && head != &q) {
3385 rwl_queue *next = head->next;
3386 InterlockedIncrement(&this->count);
3387 call_Context_Unblock(head->ctx);
3388 head = next;
3389 }
3390 } else {
3391 call_Context_Block(q.ctx);
3392 }
3393}
3394
3395/* ?try_lock@reader_writer_lock@Concurrency@@QAE_NXZ */
3396/* ?try_lock@reader_writer_lock@Concurrency@@QEAA_NXZ */
3397DEFINE_THISCALL_WRAPPER(reader_writer_lock_try_lock, 4)
3398bool __thiscall reader_writer_lock_try_lock(reader_writer_lock *this)
3399{
3400 rwl_queue q = { NULL };
3401
3402 TRACE("(%p)\n", this);
3403
3404 if (this->thread_id == GetCurrentThreadId())
3405 return FALSE;
3406
3407 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, &q, NULL))
3408 return FALSE;
3409 this->writer_head = &q;
3410 if (!InterlockedCompareExchange(&this->count, WRITER_WAITING, 0)) {
3411 this->thread_id = GetCurrentThreadId();
3412 this->writer_head = &this->active;
3413 this->active.next = NULL;
3414 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, &this->active, &q) != &q) {
3415 spin_wait_for_next_rwl(&q);
3416 this->active.next = q.next;
3417 }
3418 return TRUE;
3419 }
3420
3421 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, NULL, &q) == &q)
3422 return FALSE;
3423 spin_wait_for_next_rwl(&q);
3424 this->writer_head = q.next;
3425 if (!InterlockedOr(&this->count, WRITER_WAITING)) {
3426 this->thread_id = GetCurrentThreadId();
3427 this->writer_head = &this->active;
3428 this->active.next = q.next;
3429 return TRUE;
3430 }
3431 return FALSE;
3432}
3433
3434/* ?try_lock_read@reader_writer_lock@Concurrency@@QAE_NXZ */
3435/* ?try_lock_read@reader_writer_lock@Concurrency@@QEAA_NXZ */
3436DEFINE_THISCALL_WRAPPER(reader_writer_lock_try_lock_read, 4)
3437bool __thiscall reader_writer_lock_try_lock_read(reader_writer_lock *this)
3438{
3439 LONG count;
3440
3441 TRACE("(%p)\n", this);
3442
3443 while (!((count = this->count) & WRITER_WAITING))
3444 if (InterlockedCompareExchange(&this->count, count+1, count) == count) return TRUE;
3445 return FALSE;
3446}
3447
3448/* ?unlock@reader_writer_lock@Concurrency@@QAEXXZ */
3449/* ?unlock@reader_writer_lock@Concurrency@@QEAAXXZ */
3450DEFINE_THISCALL_WRAPPER(reader_writer_lock_unlock, 4)
3451void __thiscall reader_writer_lock_unlock(reader_writer_lock *this)
3452{
3453 LONG count;
3454 rwl_queue *head, *next;
3455
3456 TRACE("(%p)\n", this);
3457
3458 if ((count = this->count) & ~WRITER_WAITING) {
3459 count = InterlockedDecrement(&this->count);
3460 if (count != WRITER_WAITING)
3461 return;
3462 call_Context_Unblock(this->writer_head->ctx);
3463 return;
3464 }
3465
3466 this->thread_id = 0;
3467 next = this->writer_head->next;
3468 if (next) {
3469 call_Context_Unblock(next->ctx);
3470 return;
3471 }
3472 InterlockedAnd(&this->count, ~WRITER_WAITING);
3473 head = InterlockedExchangePointer((void**)&this->reader_head, NULL);
3474 while (head) {
3475 next = head->next;
3476 InterlockedIncrement(&this->count);
3477 call_Context_Unblock(head->ctx);
3478 head = next;
3479 }
3480
3481 if (InterlockedCompareExchangePointer((void**)&this->writer_tail, NULL, this->writer_head) == this->writer_head)
3482 return;
3483 InterlockedOr(&this->count, WRITER_WAITING);
3484}
3485
3486/* ??0scoped_lock@reader_writer_lock@Concurrency@@QAE@AAV12@@Z */
3487/* ??0scoped_lock@reader_writer_lock@Concurrency@@QEAA@AEAV12@@Z */
3488DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_ctor, 8)
3489reader_writer_lock_scoped_lock* __thiscall reader_writer_lock_scoped_lock_ctor(
3490 reader_writer_lock_scoped_lock *this, reader_writer_lock *lock)
3491{
3492 TRACE("(%p %p)\n", this, lock);
3493
3494 this->lock = lock;
3495 reader_writer_lock_lock(lock);
3496 return this;
3497}
3498
3499/* ??1scoped_lock@reader_writer_lock@Concurrency@@QAE@XZ */
3500/* ??1scoped_lock@reader_writer_lock@Concurrency@@QEAA@XZ */
3501DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_dtor, 4)
3502void __thiscall reader_writer_lock_scoped_lock_dtor(reader_writer_lock_scoped_lock *this)
3503{
3504 TRACE("(%p)\n", this);
3505 reader_writer_lock_unlock(this->lock);
3506}
3507
3508/* ??0scoped_lock_read@reader_writer_lock@Concurrency@@QAE@AAV12@@Z */
3509/* ??0scoped_lock_read@reader_writer_lock@Concurrency@@QEAA@AEAV12@@Z */
3510DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_read_ctor, 8)
3511reader_writer_lock_scoped_lock* __thiscall reader_writer_lock_scoped_lock_read_ctor(
3512 reader_writer_lock_scoped_lock *this, reader_writer_lock *lock)
3513{
3514 TRACE("(%p %p)\n", this, lock);
3515
3516 this->lock = lock;
3517 reader_writer_lock_lock_read(lock);
3518 return this;
3519}
3520
3521/* ??1scoped_lock_read@reader_writer_lock@Concurrency@@QAE@XZ */
3522/* ??1scoped_lock_read@reader_writer_lock@Concurrency@@QEAA@XZ */
3523DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_read_dtor, 4)
3524void __thiscall reader_writer_lock_scoped_lock_read_dtor(reader_writer_lock_scoped_lock *this)
3525{
3526 TRACE("(%p)\n", this);
3527 reader_writer_lock_unlock(this->lock);
3528}
3529
3530/* ??0_ReentrantBlockingLock@details@Concurrency@@QAE@XZ */
3531/* ??0_ReentrantBlockingLock@details@Concurrency@@QEAA@XZ */
3532DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock_ctor, 4)
3533_ReentrantBlockingLock* __thiscall _ReentrantBlockingLock_ctor(_ReentrantBlockingLock *this)
3534{
3535 TRACE("(%p)\n", this);
3536
3538 this->cs.DebugInfo->Spare[0] = (DWORD_PTR)(__FILE__ ": _ReentrantBlockingLock");
3539 return this;
3540}
3541
3542/* ??1_ReentrantBlockingLock@details@Concurrency@@QAE@XZ */
3543/* ??1_ReentrantBlockingLock@details@Concurrency@@QEAA@XZ */
3544DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock_dtor, 4)
3545void __thiscall _ReentrantBlockingLock_dtor(_ReentrantBlockingLock *this)
3546{
3547 TRACE("(%p)\n", this);
3548
3549 this->cs.DebugInfo->Spare[0] = 0;
3550 DeleteCriticalSection(&this->cs);
3551}
3552
3553/* ?_Acquire@_ReentrantBlockingLock@details@Concurrency@@QAEXXZ */
3554/* ?_Acquire@_ReentrantBlockingLock@details@Concurrency@@QEAAXXZ */
3555DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__Acquire, 4)
3556void __thiscall _ReentrantBlockingLock__Acquire(_ReentrantBlockingLock *this)
3557{
3558 TRACE("(%p)\n", this);
3559 EnterCriticalSection(&this->cs);
3560}
3561
3562/* ?_Release@_ReentrantBlockingLock@details@Concurrency@@QAEXXZ */
3563/* ?_Release@_ReentrantBlockingLock@details@Concurrency@@QEAAXXZ */
3564DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__Release, 4)
3565void __thiscall _ReentrantBlockingLock__Release(_ReentrantBlockingLock *this)
3566{
3567 TRACE("(%p)\n", this);
3568 LeaveCriticalSection(&this->cs);
3569}
3570
3571/* ?_TryAcquire@_ReentrantBlockingLock@details@Concurrency@@QAE_NXZ */
3572/* ?_TryAcquire@_ReentrantBlockingLock@details@Concurrency@@QEAA_NXZ */
3573DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__TryAcquire, 4)
3574bool __thiscall _ReentrantBlockingLock__TryAcquire(_ReentrantBlockingLock *this)
3575{
3576 TRACE("(%p)\n", this);
3577 return TryEnterCriticalSection(&this->cs);
3578}
3579
3580/* ??0_ReaderWriterLock@details@Concurrency@@QAA@XZ */
3581/* ??0_ReaderWriterLock@details@Concurrency@@QAE@XZ */
3582/* ??0_ReaderWriterLock@details@Concurrency@@QEAA@XZ */
3583DEFINE_THISCALL_WRAPPER(_ReaderWriterLock_ctor, 4)
3584_ReaderWriterLock* __thiscall _ReaderWriterLock_ctor(_ReaderWriterLock *this)
3585{
3586 TRACE("(%p)\n", this);
3587
3588 this->state = 0;
3589 this->count = 0;
3590 return this;
3591}
3592
3593/* ?wait@Concurrency@@YAXI@Z */
3594void __cdecl Concurrency_wait(unsigned int time)
3595{
3596 TRACE("(%d)\n", time);
3597 block_context_for(get_current_context(), time);
3598}
3599
3600#if _MSVCR_VER>=110
3601/* ?_Trace_agents@Concurrency@@YAXW4Agents_EventType@1@_JZZ */
3602void WINAPIV _Trace_agents(/*enum Concurrency::Agents_EventType*/int type, __int64 id, ...)
3603{
3604 FIXME("(%d %#I64x)\n", type, id);
3605}
3606#endif
3607
3608/* ?_Trace_ppl_function@Concurrency@@YAXABU_GUID@@EW4ConcRT_EventType@1@@Z */
3609/* ?_Trace_ppl_function@Concurrency@@YAXAEBU_GUID@@EW4ConcRT_EventType@1@@Z */
3610void __cdecl _Trace_ppl_function(const GUID *guid, unsigned char level, enum ConcRT_EventType type)
3611{
3612 FIXME("(%s %u %i) stub\n", debugstr_guid(guid), level, type);
3613}
3614
3615/* ??0_Timer@details@Concurrency@@IAE@I_N@Z */
3616/* ??0_Timer@details@Concurrency@@IEAA@I_N@Z */
3617DEFINE_THISCALL_WRAPPER(_Timer_ctor, 12)
3618_Timer* __thiscall _Timer_ctor(_Timer *this, unsigned int elapse, bool repeat)
3619{
3620 TRACE("(%p %u %x)\n", this, elapse, repeat);
3621
3622 this->vtable = &_Timer_vtable;
3623 this->timer = NULL;
3624 this->elapse = elapse;
3625 this->repeat = repeat;
3626 return this;
3627}
3628
3629static void WINAPI timer_callback(TP_CALLBACK_INSTANCE *instance, void *ctx, TP_TIMER *timer)
3630{
3631 _Timer *this = ctx;
3632 TRACE("calling _Timer(%p) callback\n", this);
3633 call__Timer_callback(this);
3634}
3635
3636/* ?_Start@_Timer@details@Concurrency@@IAEXXZ */
3637/* ?_Start@_Timer@details@Concurrency@@IEAAXXZ */
3638DEFINE_THISCALL_WRAPPER(_Timer__Start, 4)
3639void __thiscall _Timer__Start(_Timer *this)
3640{
3641 LONGLONG ll;
3642 FILETIME ft;
3643
3644 TRACE("(%p)\n", this);
3645
3646 this->timer = CreateThreadpoolTimer(timer_callback, this, NULL);
3647 if (!this->timer)
3648 {
3649 FIXME("throw exception?\n");
3650 return;
3651 }
3652
3653 ll = -(LONGLONG)this->elapse * TICKSPERMSEC;
3654 ft.dwLowDateTime = ll & 0xffffffff;
3655 ft.dwHighDateTime = ll >> 32;
3656 SetThreadpoolTimer(this->timer, &ft, this->repeat ? this->elapse : 0, 0);
3657}
3658
3659/* ?_Stop@_Timer@details@Concurrency@@IAEXXZ */
3660/* ?_Stop@_Timer@details@Concurrency@@IEAAXXZ */
3661DEFINE_THISCALL_WRAPPER(_Timer__Stop, 4)
3662void __thiscall _Timer__Stop(_Timer *this)
3663{
3664 TRACE("(%p)\n", this);
3665
3666 SetThreadpoolTimer(this->timer, NULL, 0, 0);
3668 CloseThreadpoolTimer(this->timer);
3669 this->timer = NULL;
3670}
3671
3672/* ??1_Timer@details@Concurrency@@MAE@XZ */
3673/* ??1_Timer@details@Concurrency@@MEAA@XZ */
3674DEFINE_THISCALL_WRAPPER(_Timer_dtor, 4)
3675void __thiscall _Timer_dtor(_Timer *this)
3676{
3677 TRACE("(%p)\n", this);
3678
3679 if (this->timer)
3680 _Timer__Stop(this);
3681}
3682
3683DEFINE_THISCALL_WRAPPER(_Timer_vector_dtor, 8)
3684_Timer* __thiscall _Timer_vector_dtor(_Timer *this, unsigned int flags)
3685{
3686 TRACE("(%p %x)\n", this, flags);
3687 if (flags & 2) {
3688 /* we have an array, with the number of elements stored before the first object */
3689 INT_PTR i, *ptr = (INT_PTR *)this-1;
3690
3691 for (i=*ptr-1; i>=0; i--)
3692 _Timer_dtor(this+i);
3694 } else {
3695 _Timer_dtor(this);
3696 if (flags & 1)
3697 operator_delete(this);
3698 }
3699
3700 return this;
3701}
3702
3703#ifdef __ASM_USE_THISCALL_WRAPPER
3704
3705#define DEFINE_VTBL_WRAPPER(off) \
3706 __ASM_GLOBAL_FUNC(vtbl_wrapper_ ## off, \
3707 "popl %eax\n\t" \
3708 "popl %ecx\n\t" \
3709 "pushl %eax\n\t" \
3710 "movl 0(%ecx), %eax\n\t" \
3711 "jmp *" #off "(%eax)\n\t")
3712
3713DEFINE_VTBL_WRAPPER(0);
3714DEFINE_VTBL_WRAPPER(4);
3715DEFINE_VTBL_WRAPPER(8);
3716DEFINE_VTBL_WRAPPER(12);
3717DEFINE_VTBL_WRAPPER(16);
3718DEFINE_VTBL_WRAPPER(20);
3719DEFINE_VTBL_WRAPPER(24);
3720DEFINE_VTBL_WRAPPER(28);
3721DEFINE_VTBL_WRAPPER(32);
3722DEFINE_VTBL_WRAPPER(36);
3723DEFINE_VTBL_WRAPPER(40);
3724DEFINE_VTBL_WRAPPER(44);
3725DEFINE_VTBL_WRAPPER(48);
3726
3727#endif
3728
3729DEFINE_RTTI_DATA0(Context, 0, ".?AVContext@Concurrency@@")
3730DEFINE_RTTI_DATA1(ContextBase, 0, &Context_rtti_base_descriptor, ".?AVContextBase@details@Concurrency@@")
3731DEFINE_RTTI_DATA2(ExternalContextBase, 0, &ContextBase_rtti_base_descriptor,
3732 &Context_rtti_base_descriptor, ".?AVExternalContextBase@details@Concurrency@@")
3733DEFINE_RTTI_DATA0(Scheduler, 0, ".?AVScheduler@Concurrency@@")
3734DEFINE_RTTI_DATA1(SchedulerBase, 0, &Scheduler_rtti_base_descriptor, ".?AVSchedulerBase@details@Concurrency@@")
3735DEFINE_RTTI_DATA2(ThreadScheduler, 0, &SchedulerBase_rtti_base_descriptor,
3736 &Scheduler_rtti_base_descriptor, ".?AVThreadScheduler@details@Concurrency@@")
3737DEFINE_RTTI_DATA0(_Timer, 0, ".?AV_Timer@details@Concurrency@@");
3738
3739__ASM_BLOCK_BEGIN(concurrency_vtables)
3740 __ASM_VTABLE(ExternalContextBase,
3741 VTABLE_ADD_FUNC(ExternalContextBase_GetId)
3742 VTABLE_ADD_FUNC(ExternalContextBase_GetVirtualProcessorId)
3743 VTABLE_ADD_FUNC(ExternalContextBase_GetScheduleGroupId)
3744 VTABLE_ADD_FUNC(ExternalContextBase_Unblock)
3745 VTABLE_ADD_FUNC(ExternalContextBase_IsSynchronouslyBlocked)
3746 VTABLE_ADD_FUNC(ExternalContextBase_vector_dtor)
3747 VTABLE_ADD_FUNC(ExternalContextBase_Block)
3748 VTABLE_ADD_FUNC(ExternalContextBase_Yield)
3749 VTABLE_ADD_FUNC(ExternalContextBase_SpinYield)
3750 VTABLE_ADD_FUNC(ExternalContextBase_Oversubscribe)
3751 VTABLE_ADD_FUNC(ExternalContextBase_Alloc)
3752 VTABLE_ADD_FUNC(ExternalContextBase_Free)
3753 VTABLE_ADD_FUNC(ExternalContextBase_EnterCriticalRegionHelper)
3754 VTABLE_ADD_FUNC(ExternalContextBase_EnterHyperCriticalRegionHelper)
3755 VTABLE_ADD_FUNC(ExternalContextBase_ExitCriticalRegionHelper)
3756 VTABLE_ADD_FUNC(ExternalContextBase_ExitHyperCriticalRegionHelper)
3757 VTABLE_ADD_FUNC(ExternalContextBase_GetCriticalRegionType)
3758 VTABLE_ADD_FUNC(ExternalContextBase_GetContextKind));
3759 __ASM_VTABLE(ThreadScheduler,
3760 VTABLE_ADD_FUNC(ThreadScheduler_vector_dtor)
3761 VTABLE_ADD_FUNC(ThreadScheduler_Id)
3762 VTABLE_ADD_FUNC(ThreadScheduler_GetNumberOfVirtualProcessors)
3763 VTABLE_ADD_FUNC(ThreadScheduler_GetPolicy)
3764 VTABLE_ADD_FUNC(ThreadScheduler_Reference)
3765 VTABLE_ADD_FUNC(ThreadScheduler_Release)
3766 VTABLE_ADD_FUNC(ThreadScheduler_RegisterShutdownEvent)
3767 VTABLE_ADD_FUNC(ThreadScheduler_Attach)
3768#if _MSVCR_VER > 100
3769 VTABLE_ADD_FUNC(ThreadScheduler_CreateScheduleGroup_loc)
3770#endif
3771 VTABLE_ADD_FUNC(ThreadScheduler_CreateScheduleGroup)
3772#if _MSVCR_VER > 100
3773 VTABLE_ADD_FUNC(ThreadScheduler_ScheduleTask_loc)
3774#endif
3775 VTABLE_ADD_FUNC(ThreadScheduler_ScheduleTask)
3776#if _MSVCR_VER > 100
3777 VTABLE_ADD_FUNC(ThreadScheduler_IsAvailableLocation)
3778#endif
3779 );
3780 __ASM_VTABLE(_Timer,
3781 VTABLE_ADD_FUNC(_Timer_vector_dtor));
3782__ASM_BLOCK_END
3783
3784void msvcrt_init_concurrency(void *base)
3785{
3786#ifdef RTTI_USE_RVA
3787 init_cexception_rtti(base);
3788 init_improper_lock_rtti(base);
3789 init_improper_scheduler_attach_rtti(base);
3790 init_improper_scheduler_detach_rtti(base);
3791 init_invalid_multiple_scheduling_rtti(base);
3792 init_invalid_scheduler_policy_key_rtti(base);
3793 init_invalid_scheduler_policy_thread_specification_rtti(base);
3794 init_invalid_scheduler_policy_value_rtti(base);
3795 init_missing_wait_rtti(base);
3796 init_scheduler_resource_allocation_error_rtti(base);
3797 init_Context_rtti(base);
3798 init_ContextBase_rtti(base);
3799 init_ExternalContextBase_rtti(base);
3800 init_Scheduler_rtti(base);
3801 init_SchedulerBase_rtti(base);
3802 init_ThreadScheduler_rtti(base);
3803 init__Timer_rtti(base);
3804
3805 init_cexception_cxx_type_info(base);
3806 init_improper_lock_cxx(base);
3807 init_improper_scheduler_attach_cxx(base);
3808 init_improper_scheduler_detach_cxx(base);
3809 init_invalid_multiple_scheduling_cxx(base);
3810 init_invalid_scheduler_policy_key_cxx(base);
3811 init_invalid_scheduler_policy_thread_specification_cxx(base);
3812 init_invalid_scheduler_policy_value_cxx(base);
3813#if _MSVCR_VER >= 120
3814 init_missing_wait_cxx(base);
3815#endif
3816 init_scheduler_resource_allocation_error_cxx(base);
3817#endif
3818}
3819
3820void msvcrt_free_concurrency(void)
3821{
3822 if (context_tls_index != TLS_OUT_OF_INDEXES)
3823 TlsFree(context_tls_index);
3824 if(default_scheduler_policy.policy_container)
3825 SchedulerPolicy_dtor(&default_scheduler_policy);
3826 if(default_scheduler) {
3827 ThreadScheduler_dtor(default_scheduler);
3828 operator_delete(default_scheduler);
3829 }
3830}
3831
3832void msvcrt_free_scheduler_thread(void)
3833{
3834 Context *context = try_get_current_context();
3835 if (!context) return;
3836 call_Context_dtor(context, 1);
3837}
3838
3839#endif /* _MSVCR_VER >= 100 */
struct outqueuenode * tail
Definition: adnsresfilter.c:66
struct outqueuenode * head
Definition: adnsresfilter.c:66
static int state
Definition: maze.c:121
WINBASEAPI _Check_return_ _Out_ AppPolicyProcessTerminationMethod * policy
Definition: appmodel.h:73
#define InterlockedIncrement
Definition: armddk.h:53
#define InterlockedExchange
Definition: armddk.h:54
#define InterlockedDecrement
Definition: armddk.h:52
#define WINE_DEFAULT_DEBUG_CHANNEL(t)
Definition: precomp.h:23
#define ARRAY_SIZE(A)
Definition: main.h:20
static void list_remove(struct list_entry *entry)
Definition: list.h:90
static int list_empty(struct list_entry *head)
Definition: list.h:58
static void list_add_head(struct list_entry *head, struct list_entry *entry)
Definition: list.h:76
static void list_init(struct list_entry *head)
Definition: list.h:51
#define FIXME(fmt,...)
Definition: precomp.h:53
#define WARN(fmt,...)
Definition: precomp.h:61
#define ERR(fmt,...)
Definition: precomp.h:57
w ll
Definition: byte_order.h:167
r l[0]
Definition: byte_order.h:168
return
Definition: dirsup.c:529
Definition: list.h:37
Definition: _queue.h:67
Definition: _set.h:50
#define free
Definition: debug_ros.c:5
#define malloc
Definition: debug_ros.c:4
#define NULL
Definition: types.h:112
#define TRUE
Definition: types.h:120
#define FALSE
Definition: types.h:117
static HINSTANCE instance
Definition: main.c:40
static WCHAR unknown[MAX_STRING_RESOURCE_LEN]
Definition: object.c:1605
static const WCHAR empty[]
Definition: main.c:47
#define CDECL
Definition: compat.h:29
#define GetProcessHeap()
Definition: compat.h:736
#define HeapAlloc
Definition: compat.h:733
#define __TRY
Definition: compat.h:80
#define HeapFree(x, y, z)
Definition: compat.h:735
#define __ENDTRY
Definition: compat.h:82
#define CALLBACK
Definition: compat.h:35
#define HEAP_ZERO_MEMORY
Definition: compat.h:134
VOID WINAPI GetSystemInfo(IN LPSYSTEM_INFO lpSystemInfo)
Definition: sysinfo.c:143
VOID WINAPI GetSystemTimeAsFileTime(OUT PFILETIME lpFileTime)
Definition: time.c:128
BOOL WINAPI InitializeCriticalSectionEx(OUT LPCRITICAL_SECTION lpCriticalSection, IN DWORD dwSpinCount, IN DWORD flags)
Definition: sync.c:107
PTP_TIMER WINAPI DECLSPEC_HOTPATCH CreateThreadpoolTimer(PTP_TIMER_CALLBACK callback, PVOID userdata, TP_CALLBACK_ENVIRON *environment)
Definition: thread.c:1299
PTP_WORK WINAPI DECLSPEC_HOTPATCH CreateThreadpoolWork(PTP_WORK_CALLBACK callback, PVOID userdata, TP_CALLBACK_ENVIRON *environment)
Definition: thread.c:1325
GUID guid
Definition: version.c:147
static UINT event_reset(msi_dialog *dialog, const WCHAR *argument)
Definition: dialog.c:4448
void WINAPI _CxxThrowException(void *object, const cxx_exception_type *type)
Definition: cpp.c:890
#define CXX_EXCEPTION
Definition: cppexcept.h:34
void __cdecl __ExceptionPtrCreate(exception_ptr *)
void exception_ptr_from_record(exception_ptr *, EXCEPTION_RECORD *)
BOOL __cdecl __uncaught_exception(void)
Definition: except.c:821
void __cdecl __ExceptionPtrDestroy(exception_ptr *)
#define CREATE_EXCEPTION_OBJECT(exception_name)
Definition: cppexcept.h:300
void __cdecl __ExceptionPtrRethrow(const exception_ptr *)
Definition: cpp.c:1245
#define DEFINE_CXX_TYPE_INFO(type)
Definition: cxx.h:216
#define DEFINE_RTTI_DATA1(name, off, cl1, mangled_name)
Definition: cxx.h:269
void(* vtable_ptr)(void)
Definition: cxx.h:322
#define DEFINE_RTTI_DATA0(name, off, mangled_name)
Definition: cxx.h:267
void CDECL DECLSPEC_HOTPATCH operator_delete(void *mem)
Definition: heap.c:177
void *CDECL DECLSPEC_HOTPATCH operator_new(size_t size)
Definition: heap.c:143
#define __cdecl
Definition: corecrt.h:121
unsigned int size_t
Definition: corecrt.h:203
#define __int64
Definition: corecrt.h:72
#define __stdcall
Definition: corecrt.h:120
#define va_end(v)
Definition: stdarg.h:28
#define va_arg(v, l)
Definition: stdarg.h:27
#define va_start(v, l)
Definition: stdarg.h:26
char * va_list
Definition: vadefs.h:50
#define INHERIT_THREAD_PRIORITY
Definition: msvcrt.h:420
#define COOPERATIVE_TIMEOUT_INFINITE
Definition: msvcrt.h:417
#define COOPERATIVE_WAIT_TIMEOUT
Definition: msvcrt.h:418
#define TICKSPERMSEC
Definition: time.c:237
unsigned int(__cdecl typeof(jpeg_read_scanlines))(struct jpeg_decompress_struct *
Definition: typeof.h:31
return ret
Definition: mutex.c:146
#define ULONG_PTR
Definition: config.h:101
#define InterlockedExchangePointer(Target, Value)
Definition: dshow.h:45
unsigned int BOOL
Definition: ntddk_ex.h:94
unsigned long DWORD
Definition: ntddk_ex.h:95
FxCollectionEntry * cur
GLint GLint GLsizei GLsizei GLsizei depth
Definition: gl.h:1546
GLint level
Definition: gl.h:1546
GLuint GLuint GLsizei count
Definition: gl.h:1545
GLuint GLuint GLsizei GLenum type
Definition: gl.h:1545
GLint GLenum GLsizei GLsizei GLsizei GLint GLsizei const GLvoid * data
Definition: gl.h:1950
GLdouble GLdouble GLdouble GLdouble q
Definition: gl.h:2063
struct _cl_event * event
Definition: glext.h:7739
GLdouble n
Definition: glext.h:7729
GLsizeiptr size
Definition: glext.h:5919
GLbitfield flags
Definition: glext.h:7161
GLenum const GLvoid * addr
Definition: glext.h:9621
GLuint GLfloat * val
Definition: glext.h:7180
GLfloat GLfloat p
Definition: glext.h:8902
GLuint id
Definition: glext.h:5910
GLsizei GLenum const GLvoid GLsizei GLenum GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLint GLint GLint GLshort GLshort GLshort GLubyte GLubyte GLubyte GLuint GLuint GLuint GLushort GLushort GLushort GLbyte GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLfloat GLint GLint GLint GLint GLshort GLshort GLshort GLshort GLubyte GLubyte GLubyte GLubyte GLuint GLuint GLuint GLuint GLushort GLushort GLushort GLushort GLboolean const GLdouble const GLfloat const GLint const GLshort const GLbyte const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLdouble const GLfloat const GLfloat const GLint const GLint const GLshort const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort GLenum GLenum GLenum GLfloat GLenum GLint GLenum GLenum GLenum GLfloat GLenum GLenum GLint GLenum GLfloat GLenum GLint GLint GLushort GLenum GLenum GLfloat GLenum GLenum GLint GLfloat const GLubyte GLenum GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLint GLint GLsizei GLsizei GLint GLenum GLenum const GLvoid GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLenum const GLdouble GLenum GLenum const GLfloat GLenum GLenum const GLint GLsizei GLuint GLfloat GLuint GLbitfield GLfloat GLint GLuint GLboolean GLenum GLfloat GLenum GLbitfield GLenum GLfloat GLfloat GLint GLint const GLfloat GLenum GLfloat GLfloat GLint GLint GLfloat GLfloat GLint GLint const GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat GLint GLfloat GLfloat const GLdouble const GLfloat const GLdouble const GLfloat GLint i
Definition: glfuncs.h:248
GLsizei GLenum const GLvoid GLsizei GLenum GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLint GLint GLint GLshort GLshort GLshort GLubyte GLubyte GLubyte GLuint GLuint GLuint GLushort GLushort GLushort GLbyte GLbyte GLbyte GLbyte GLdouble GLdouble GLdouble GLdouble GLfloat GLfloat GLfloat GLfloat GLint GLint GLint GLint GLshort GLshort GLshort GLshort GLubyte GLubyte GLubyte GLubyte GLuint GLuint GLuint GLuint GLushort GLushort GLushort GLushort GLboolean const GLdouble const GLfloat const GLint const GLshort const GLbyte const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLdouble const GLfloat const GLfloat const GLint const GLint const GLshort const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort const GLdouble const GLfloat const GLint const GLshort GLenum GLenum GLenum GLfloat GLenum GLint GLenum GLenum GLenum GLfloat GLenum GLenum GLint GLenum GLfloat GLenum GLint GLint GLushort GLenum GLenum GLfloat GLenum GLenum GLint GLfloat const GLubyte GLenum GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLint GLint GLsizei GLsizei GLint GLenum GLenum const GLvoid GLenum GLenum const GLfloat GLenum GLenum const GLint GLenum GLenum const GLdouble GLenum GLenum const GLfloat GLenum GLenum const GLint GLsizei GLuint GLfloat token
Definition: glfuncs.h:210
#define cs
Definition: i386-dis.c:442
#define EXCEPTION_EXECUTE_HANDLER
Definition: excpt.h:90
#define EXCEPTION_CONTINUE_SEARCH
Definition: excpt.h:91
NTSYSAPI NTSTATUS WINAPI RtlWaitOnAddress(const void *, const void *, SIZE_T, const LARGE_INTEGER *)
NTSYSAPI void WINAPI RtlWakeAddressSingle(const void *)
#define InterlockedOr
Definition: interlocked.h:239
#define InterlockedCompareExchangePointer
Definition: interlocked.h:144
#define InterlockedAdd
Definition: interlocked.h:66
#define InterlockedCompareExchange
Definition: interlocked.h:119
#define InterlockedAnd
Definition: interlocked.h:77
#define C_ASSERT(e)
Definition: intsafe.h:73
uint32_t entry
Definition: isohybrid.c:63
#define e
Definition: ke_i.h:82
#define debugstr_guid
Definition: kernel32.h:35
__u16 time
Definition: mkdosfs.c:8
#define memcpy(s1, s2, n)
Definition: mkisofs.h:878
CRITICAL_SECTION critical_section
static PVOID ptr
Definition: dispmode.c:27
HANDLE events[2]
Definition: event.c:4
static UINT UINT last
Definition: font.c:45
static CRITICAL_SECTION cs_lock
Definition: loader.c:2949
static SYSTEM_INFO si
Definition: virtual.c:39
#define __thiscall
Definition: cpp.c:43
static va_list valist
Definition: printf.c:46
static void * vtable[]
Definition: typelib.c:1231
static unsigned(__cdecl *hash_bstr)(bstr_t s)
static DWORD thread_id
Definition: protocol.c:159
static DWORD unk1
Definition: cursoricon.c:1638
int details
Definition: msacm.c:1366
#define bool
Definition: nsiface.idl:72
#define DEFINE_THISCALL_WRAPPER(func, args)
Definition: msvc-thiscall.c:2
@ normal
Definition: optimize.h:166
static HANDLE proc()
Definition: pdb.c:34
long LONG
Definition: pedump.c:60
static unsigned __int64 next
Definition: rand_nt.c:6
#define alloc
Definition: rosglue.h:13
const WCHAR * str
#define WINAPIV
Definition: sdbpapi.h:64
#define exception
Definition: math.h:26
_Check_return_ _In_z_ wchar_t const _Inout_opt_ _Deref_prepost_opt_z_ wchar_t ** _Context
const char int int int static __inline const char * wine_dbgstr_a(const char *s)
Definition: debug.h:187
#define LIST_FOR_EACH_ENTRY(elem, list, type, field)
Definition: list.h:198
#define LIST_FOR_EACH_ENTRY_SAFE(cursor, cursor2, list, type, field)
Definition: list.h:204
#define memset(x, y, z)
Definition: compat.h:39
HRESULT hr
Definition: shlfolder.c:183
#define TRACE(s)
Definition: solgame.cpp:4
CardRegion * from
Definition: spigame.cpp:19
PEXCEPTION_RECORD ExceptionRecord
Definition: rtltypes.h:200
DWORD ExceptionCode
Definition: compat.h:208
DWORD dwHighDateTime
Definition: mapidefs.h:66
DWORD dwLowDateTime
Definition: mapidefs.h:65
DWORD dwNumberOfProcessors
Definition: winbase.h:902
Definition: http.c:7252
struct define * next
Definition: compiler.c:65
Definition: list.h:15
Definition: mem.c:349
Definition: name.c:39
Definition: send.c:48
Definition: dhcpd.h:248
VOID WINAPI DECLSPEC_HOTPATCH Sleep(IN DWORD dwMilliseconds)
Definition: synch.c:790
VOID WINAPI InitializeCriticalSection(OUT LPCRITICAL_SECTION lpCriticalSection)
Definition: synch.c:751
BOOL WINAPI DECLSPEC_HOTPATCH SetEvent(IN HANDLE hEvent)
Definition: synch.c:733
#define LIST_ENTRY(type)
Definition: queue.h:175
rwlock_t lock
Definition: tcpcore.h:0
WINBASEAPI VOID WINAPI WaitForThreadpoolTimerCallbacks(_Inout_ PTP_TIMER pti, _In_ BOOL fCancelPendingCallbacks)
WINBASEAPI VOID WINAPI CloseThreadpoolWork(_Inout_ PTP_WORK pwk)
WINBASEAPI VOID WINAPI CloseThreadpoolTimer(_Inout_ PTP_TIMER pti)
WINBASEAPI VOID WINAPI SetThreadpoolTimer(_Inout_ PTP_TIMER pti, _In_opt_ PFILETIME pftDueTime, _In_ DWORD msPeriod, _In_opt_ DWORD msWindowLength)
WINBASEAPI VOID WINAPI SubmitThreadpoolWork(_Inout_ PTP_WORK pwk)
#define DWORD_PTR
Definition: treelist.c:76
int32_t INT_PTR
Definition: typedefs.h:64
#define FIELD_OFFSET(t, f)
Definition: typedefs.h:255
int64_t LONGLONG
Definition: typedefs.h:68
uint32_t ULONG_PTR
Definition: typedefs.h:65
uint32_t ULONG
Definition: typedefs.h:59
LONGLONG QuadPart
Definition: typedefs.h:114
Definition: dlist.c:348
void * next
Definition: dlist.c:360
DWORD WINAPI GetLastError(void)
Definition: except.c:1042
BOOL WINAPI TryEnterCriticalSection(LPCRITICAL_SECTION)
DWORD WINAPI GetCurrentThreadId(void)
Definition: thread.c:459
void WINAPI LeaveCriticalSection(LPCRITICAL_SECTION)
void WINAPI EnterCriticalSection(LPCRITICAL_SECTION)
#define TLS_OUT_OF_INDEXES
Definition: winbase.h:529
void WINAPI DeleteCriticalSection(PCRITICAL_SECTION)
#define THREAD_PRIORITY_TIME_CRITICAL
Definition: winbase.h:305
#define THREAD_PRIORITY_NORMAL
Definition: winbase.h:304
#define THREAD_PRIORITY_IDLE
Definition: winbase.h:302
void * arg
Definition: msvc.h:10
#define WINAPI
Definition: msvc.h:6
#define __EXCEPT_CTX(func, ctx)
Definition: exception.h:63
#define __FINALLY_CTX(func, ctx)
Definition: exception.h:68
static HRESULT HRESULT_FROM_WIN32(unsigned int x)
Definition: winerror.h:210
struct _TP_CALLBACK_INSTANCE * PTP_CALLBACK_INSTANCE
Definition: winnt_old.h:4651
struct _TP_WORK * PTP_WORK
Definition: winnt_old.h:4650
#define RTL_CRITICAL_SECTION_FLAG_FORCE_DEBUG_INFO
Definition: winnt_old.h:1146
struct _TP_WORK TP_WORK
Definition: winnt_old.h:4650
struct _TP_TIMER TP_TIMER
Definition: winnt_old.h:4652
struct _TP_CALLBACK_INSTANCE TP_CALLBACK_INSTANCE
Definition: winnt_old.h:4651
static int repeat
Definition: xmllint.c:137
static clock_t begin
Definition: xmllint.c:458
#define const
Definition: zconf.h:233