From 851c040519e8b758ef5b8b1c4e4eb698c3fbb605 Mon Sep 17 00:00:00 2001 From: David Grove Date: Fri, 19 May 2017 12:30:13 -0400 Subject: [PATCH 01/46] update README and INSTALL for libpwq removal Signed-off-by: Daniel A. Steffen --- INSTALL.md | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/INSTALL.md b/INSTALL.md index 01e78128c..9940c2cf7 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -148,16 +148,7 @@ too old, see http://apt.llvm.org/ to install a newer version. On older Ubuntu releases, you may need to install binutils-gold to get the gold linker. -2. Initialize git submodules. - We are using git submodules to incorporate specific revisions of the - upstream pthread_workqueue and libkqueue projects into the build. - - ``` - git submodule init - git submodule update - ``` - -3. Build (as in the general instructions above) +2. Build (as in the general instructions above) ``` sh autogen.sh From 72c267b743462d8ba64f54257238c92192dcdf31 Mon Sep 17 00:00:00 2001 From: Pierre Habouzit Date: Fri, 19 May 2017 10:31:34 -0700 Subject: [PATCH 02/46] Merge pull request #249 from dgrove-oss/update-docs update README and INSTALL for libpwq removal Signed-off-by: Daniel A. Steffen --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 963c8b113..8a0d1346b 100644 --- a/PATCHES +++ b/PATCHES @@ -332,3 +332,4 @@ github commits starting with 29bdc2f from [5e8789e] APPLIED rdar://32283666 [3fba60a] APPLIED rdar://32283666 [d6eb245] APPLIED rdar://32283666 +[0b6c22e] APPLIED rdar://33531111 From bdcd75337d28964cfed72f42eb6585580f111b74 Mon Sep 17 00:00:00 2001 From: David Grove Date: Fri, 19 May 2017 11:22:54 -0400 Subject: [PATCH 03/46] clarify config of pthread_workqueue vs. internal_workqueue As suggested in review of PR-206, DISPATCH_USE_INTERNAL_WORKQUEUES should not enable HAVE_PTHREAD_WORKQUEUES. Instead, a new DISPATCH_USE_WORKQUEUES is defined in queue.c to guard code that should be enabled in either case. Signed-off-by: Daniel A. Steffen --- configure.ac | 22 ++++++++++-------- src/Makefile.am | 5 ----- src/queue.c | 59 +++++++++++++++++++++++++------------------------ src/shims.h | 6 +++-- 4 files changed, 47 insertions(+), 45 deletions(-) diff --git a/configure.ac b/configure.ac index f9b359260..5599cb030 100644 --- a/configure.ac +++ b/configure.ac @@ -307,7 +307,11 @@ AS_IF([test -n "$apple_libpthread_source_path" -a -n "$apple_xnu_source_osfmk_pa AC_CHECK_HEADERS([pthread_machdep.h pthread/qos.h]) # pthread_workqueues. -# Look for own version first, than see if there is a system version. +# We can either use libdispatch's internal_workqueue or pthread_workqueue. +# If not specifically configured, default to internal_workqueues on +# Linux and pthread_workqueue on all other platforms. +# On any platform, if pthread_workqueue is not available, fall back +# to using internal_workqueue. AC_ARG_ENABLE([internal-libpwq], [AS_HELP_STRING([--enable-internal-libpwq], [Use libdispatch's own implementation of pthread workqueues.])],, @@ -320,15 +324,15 @@ AC_ARG_ENABLE([internal-libpwq], esac] ) AS_IF([test "x$enable_internal_libpwq" = "xyes"], - [AC_DEFINE(DISPATCH_USE_INTERNAL_WORKQUEUE, 1, [Use libdispatch's own implementation of pthread_workqueue API]) - AC_DEFINE(HAVE_PTHREAD_WORKQUEUES, 1, [Define if pthread work queues are present]) - dispatch_use_internal_workqueue=true - have_pthread_workqueues=true], - [dispatch_use_internal_workqueue=false - AC_CHECK_HEADERS([pthread/workqueue_private.h pthread_workqueue.h], + [AC_DEFINE(DISPATCH_USE_INTERNAL_WORKQUEUE, 1, [Use libdispatch's own implementation of pthread workqueues]) + have_pthread_workqueues=false, + dispatch_use_internal_workqueue=true], + [AC_CHECK_HEADERS([pthread/workqueue_private.h pthread_workqueue.h], [AC_DEFINE(HAVE_PTHREAD_WORKQUEUES, 1, [Define if pthread work queues are present]) - have_pthread_workqueues=true], - [have_pthread_workqueues=false] + have_pthread_workqueues=true, + dispatch_use_internal_workqueue=false], + [have_pthread_workqueues=false, + dispatch_use_internal_workqueue=true] )] ) AM_CONDITIONAL(DISPATCH_USE_INTERNAL_WORKQUEUE, $dispatch_use_internal_workqueue) diff --git a/src/Makefile.am b/src/Makefile.am index ac2f74cfd..8beaf1e85 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -84,15 +84,10 @@ AM_OBJCFLAGS=$(DISPATCH_CFLAGS) $(CBLOCKS_FLAGS) AM_CXXFLAGS=$(PTHREAD_WORKQUEUE_CFLAGS) $(DISPATCH_CFLAGS) $(CXXBLOCKS_FLAGS) AM_OBJCXXFLAGS=$(DISPATCH_CFLAGS) $(CXXBLOCKS_FLAGS) -if DISPATCH_USE_INTERNAL_WORKQUEUE - PTHREAD_WORKQUEUE_LIBS= - PTHREAD_WORKQUEUE_CFLAGS= -else if HAVE_PTHREAD_WORKQUEUES PTHREAD_WORKQUEUE_LIBS=-lpthread_workqueue PTHREAD_WORKQUEUE_CFLAGS= endif -endif if BUILD_OWN_BLOCKS_RUNTIME libdispatch_la_SOURCES+= BlocksRuntime/data.c BlocksRuntime/runtime.c diff --git a/src/queue.c b/src/queue.c index a828e5713..94ddd42d4 100644 --- a/src/queue.c +++ b/src/queue.c @@ -23,8 +23,10 @@ #include "protocol.h" // _dispatch_send_wakeup_runloop_thread #endif -#if (!HAVE_PTHREAD_WORKQUEUES || DISPATCH_DEBUG || DISPATCH_USE_INTERNAL_WORKQUEUE) && \ - !defined(DISPATCH_ENABLE_THREAD_POOL) +#if HAVE_PTHREAD_WORKQUEUES || DISPATCH_USE_INTERNAL_WORKQUEUE +#define DISPATCH_USE_WORKQUEUES 1 +#endif +#if (!HAVE_PTHREAD_WORKQUEUES || DISPATCH_DEBUG) && !defined(DISPATCH_ENABLE_THREAD_POOL) #define DISPATCH_ENABLE_THREAD_POOL 1 #endif #if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES || DISPATCH_ENABLE_THREAD_POOL @@ -32,11 +34,10 @@ #endif #if HAVE_PTHREAD_WORKQUEUES && (!HAVE_PTHREAD_WORKQUEUE_QOS || DISPATCH_DEBUG) && \ !HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP && \ - !DISPATCH_USE_INTERNAL_WORKQUEUE && \ !defined(DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK) #define DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK 1 #endif -#if HAVE_PTHREAD_WORKQUEUES && DISPATCH_USE_PTHREAD_POOL && \ +#if DISPATCH_USE_WORKQUEUES && DISPATCH_USE_PTHREAD_POOL && \ !DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK #define pthread_workqueue_t void* #endif @@ -155,13 +156,13 @@ struct dispatch_root_queue_context_s { union { struct { int volatile dgq_pending; -#if HAVE_PTHREAD_WORKQUEUES +#if DISPATCH_USE_WORKQUEUES qos_class_t dgq_qos; int dgq_wq_priority, dgq_wq_options; #if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK || DISPATCH_USE_PTHREAD_POOL pthread_workqueue_t dgq_kworkqueue; #endif -#endif // HAVE_PTHREAD_WORKQUEUES +#endif // DISPATCH_USE_WORKQUEUES #if DISPATCH_USE_PTHREAD_POOL void *dgq_ctxt; int32_t volatile dgq_thread_pool_size; @@ -183,7 +184,7 @@ typedef struct dispatch_root_queue_context_s *dispatch_root_queue_context_t; DISPATCH_CACHELINE_ALIGN static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS] = {{{ -#if HAVE_PTHREAD_WORKQUEUES +#if DISPATCH_USE_WORKQUEUES .dgq_qos = QOS_CLASS_MAINTENANCE, .dgq_wq_priority = WORKQ_BG_PRIOQUEUE, .dgq_wq_options = 0, @@ -194,7 +195,7 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #endif }}}, [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT] = {{{ -#if HAVE_PTHREAD_WORKQUEUES +#if DISPATCH_USE_WORKQUEUES .dgq_qos = QOS_CLASS_MAINTENANCE, .dgq_wq_priority = WORKQ_BG_PRIOQUEUE, .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, @@ -205,7 +206,7 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #endif }}}, [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS] = {{{ -#if HAVE_PTHREAD_WORKQUEUES +#if DISPATCH_USE_WORKQUEUES .dgq_qos = QOS_CLASS_BACKGROUND, .dgq_wq_priority = WORKQ_BG_PRIOQUEUE_CONDITIONAL, .dgq_wq_options = 0, @@ -216,7 +217,7 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #endif }}}, [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT] = {{{ -#if HAVE_PTHREAD_WORKQUEUES +#if DISPATCH_USE_WORKQUEUES .dgq_qos = QOS_CLASS_BACKGROUND, .dgq_wq_priority = WORKQ_BG_PRIOQUEUE_CONDITIONAL, .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, @@ -227,7 +228,7 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #endif }}}, [DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS] = {{{ -#if HAVE_PTHREAD_WORKQUEUES +#if DISPATCH_USE_WORKQUEUES .dgq_qos = QOS_CLASS_UTILITY, .dgq_wq_priority = WORKQ_LOW_PRIOQUEUE, .dgq_wq_options = 0, @@ -238,7 +239,7 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #endif }}}, [DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT] = {{{ -#if HAVE_PTHREAD_WORKQUEUES +#if DISPATCH_USE_WORKQUEUES .dgq_qos = QOS_CLASS_UTILITY, .dgq_wq_priority = WORKQ_LOW_PRIOQUEUE, .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, @@ -249,7 +250,7 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #endif }}}, [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS] = {{{ -#if HAVE_PTHREAD_WORKQUEUES +#if DISPATCH_USE_WORKQUEUES .dgq_qos = QOS_CLASS_DEFAULT, .dgq_wq_priority = WORKQ_DEFAULT_PRIOQUEUE, .dgq_wq_options = 0, @@ -260,7 +261,7 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #endif }}}, [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT] = {{{ -#if HAVE_PTHREAD_WORKQUEUES +#if DISPATCH_USE_WORKQUEUES .dgq_qos = QOS_CLASS_DEFAULT, .dgq_wq_priority = WORKQ_DEFAULT_PRIOQUEUE, .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, @@ -271,7 +272,7 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #endif }}}, [DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS] = {{{ -#if HAVE_PTHREAD_WORKQUEUES +#if DISPATCH_USE_WORKQUEUES .dgq_qos = QOS_CLASS_USER_INITIATED, .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE, .dgq_wq_options = 0, @@ -282,7 +283,7 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #endif }}}, [DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT] = {{{ -#if HAVE_PTHREAD_WORKQUEUES +#if DISPATCH_USE_WORKQUEUES .dgq_qos = QOS_CLASS_USER_INITIATED, .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE, .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, @@ -293,7 +294,7 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #endif }}}, [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS] = {{{ -#if HAVE_PTHREAD_WORKQUEUES +#if DISPATCH_USE_WORKQUEUES .dgq_qos = QOS_CLASS_USER_INTERACTIVE, .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE_CONDITIONAL, .dgq_wq_options = 0, @@ -304,7 +305,7 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #endif }}}, [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT] = {{{ -#if HAVE_PTHREAD_WORKQUEUES +#if DISPATCH_USE_WORKQUEUES .dgq_qos = QOS_CLASS_USER_INTERACTIVE, .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE_CONDITIONAL, .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, @@ -572,11 +573,11 @@ dispatch_assert_queue_barrier(dispatch_queue_t dq) static inline bool _dispatch_root_queues_init_workq(int *wq_supported) { - int r; + int r; (void)r; bool result = false; *wq_supported = 0; -#if HAVE_PTHREAD_WORKQUEUES - bool disable_wq = false; +#if DISPATCH_USE_WORKQUEUES + bool disable_wq = false; (void)disable_wq; #if DISPATCH_ENABLE_THREAD_POOL && DISPATCH_DEBUG disable_wq = slowpath(getenv("LIBDISPATCH_DISABLE_KWQ")); #endif @@ -676,7 +677,7 @@ _dispatch_root_queues_init_workq(int *wq_supported) #endif } #endif // DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK || DISPATCH_ENABLE_THREAD_POOL -#endif // HAVE_PTHREAD_WORKQUEUES +#endif // DISPATCH_USE_WORKQUEUES return result; } @@ -692,7 +693,7 @@ _dispatch_root_queue_init_pthread_pool(dispatch_root_queue_context_t qc, thread_pool_size = pool_size; } qc->dgq_thread_pool_size = thread_pool_size; -#if HAVE_PTHREAD_WORKQUEUES +#if DISPATCH_USE_WORKQUEUES if (qc->dgq_qos) { (void)dispatch_assume_zero(pthread_attr_init(&pqc->dpq_thread_attr)); (void)dispatch_assume_zero(pthread_attr_setdetachstate( @@ -1906,7 +1907,7 @@ static struct dispatch_pthread_root_queue_context_s _dispatch_mgr_root_queue_pthread_context; static struct dispatch_root_queue_context_s _dispatch_mgr_root_queue_context = {{{ -#if HAVE_PTHREAD_WORKQUEUES +#if DISPATCH_USE_WORKQUEUES .dgq_kworkqueue = (void*)(~0ul), #endif .dgq_ctxt = &_dispatch_mgr_root_queue_pthread_context, @@ -2165,7 +2166,7 @@ _dispatch_pthread_root_queue_create(const char *label, unsigned long flags, pqc->dpq_thread_mediator.do_vtable = DISPATCH_VTABLE(semaphore); qc->dgq_ctxt = pqc; -#if HAVE_PTHREAD_WORKQUEUES +#if DISPATCH_USE_WORKQUEUES qc->dgq_kworkqueue = (void*)(~0ul); #endif _dispatch_root_queue_init_pthread_pool(qc, pool_size, true); @@ -4378,7 +4379,7 @@ _dispatch_global_queue_poke_slow(dispatch_queue_t dq, int n, int floor) _dispatch_root_queues_init(); _dispatch_debug_root_queue(dq, __func__); -#if HAVE_PTHREAD_WORKQUEUES +#if DISPATCH_USE_WORKQUEUES #if DISPATCH_USE_PTHREAD_POOL if (qc->dgq_kworkqueue != (void*)(~0ul)) #endif @@ -4407,7 +4408,7 @@ _dispatch_global_queue_poke_slow(dispatch_queue_t dq, int n, int floor) (void)dispatch_assume_zero(r); return; } -#endif // HAVE_PTHREAD_WORKQUEUES +#endif // DISPATCH_USE_WORKQUEUES #if DISPATCH_USE_PTHREAD_POOL dispatch_pthread_root_queue_context_t pqc = qc->dgq_ctxt; if (fastpath(pqc->dpq_thread_mediator.do_vtable)) { @@ -4476,7 +4477,7 @@ _dispatch_global_queue_poke(dispatch_queue_t dq, int n, int floor) if (!_dispatch_queue_class_probe(dq)) { return; } -#if HAVE_PTHREAD_WORKQUEUES +#if DISPATCH_USE_WORKQUEUES dispatch_root_queue_context_t qc = dq->do_ctxt; if ( #if DISPATCH_USE_PTHREAD_POOL @@ -4487,7 +4488,7 @@ _dispatch_global_queue_poke(dispatch_queue_t dq, int n, int floor) "global queue: %p", dq); return; } -#endif // HAVE_PTHREAD_WORKQUEUES +#endif // DISPATCH_USE_WORKQUEUES return _dispatch_global_queue_poke_slow(dq, n, floor); } diff --git a/src/shims.h b/src/shims.h index eba277470..8dd23ee24 100644 --- a/src/shims.h +++ b/src/shims.h @@ -41,8 +41,6 @@ #if HAVE_PTHREAD_WORKQUEUES #if __has_include() #include -#elif DISPATCH_USE_INTERNAL_WORKQUEUE -#include "event/workqueue_internal.h" #else #include #endif @@ -51,6 +49,10 @@ #endif #endif // HAVE_PTHREAD_WORKQUEUES +#if DISPATCH_USE_INTERNAL_WORKQUEUE +#include "event/workqueue_internal.h" +#endif + #if HAVE_PTHREAD_NP_H #include #endif From fba90ce0f7839d780b21e336c3c5ce000a45eba0 Mon Sep 17 00:00:00 2001 From: "Daniel A. Steffen" Date: Wed, 24 May 2017 21:03:49 -0700 Subject: [PATCH 04/46] Merge pull request #248 from dgrove-oss/queue_config_cleanup clarify config of pthread_workqueue vs. internal_workqueue Signed-off-by: Daniel A. Steffen --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 8a0d1346b..cb0069357 100644 --- a/PATCHES +++ b/PATCHES @@ -333,3 +333,4 @@ github commits starting with 29bdc2f from [3fba60a] APPLIED rdar://32283666 [d6eb245] APPLIED rdar://32283666 [0b6c22e] APPLIED rdar://33531111 +[5a3c02a] APPLIED rdar://33531111 From bde13a1786739d9fd5b6cb28d849f807f8af9be8 Mon Sep 17 00:00:00 2001 From: "Daniel A. Steffen" Date: Thu, 1 Jun 2017 01:04:28 -0700 Subject: [PATCH 05/46] Linux build fixes define WORKQ_ADDTHREADS_OPTION_OVERCOMMIT for DISPATCH_USE_INTERNAL_WORKQUEUE remove unused _swift_dispatch_apply_current_root_queue symbol remove duplicate _dispatch_adopt_wlh_anon() in _dispatch_mgr_thread(), this is already set by its caller _dispatch_root_queue_drain() Signed-off-by: Daniel A. Steffen --- src/event/workqueue_internal.h | 2 ++ src/internal.h | 6 +++--- src/queue.c | 2 ++ src/shims/lock.c | 4 ++-- src/source.c | 1 - src/swift/Queue.swift | 3 --- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/event/workqueue_internal.h b/src/event/workqueue_internal.h index 012e554fb..9f8fc3adb 100644 --- a/src/event/workqueue_internal.h +++ b/src/event/workqueue_internal.h @@ -37,6 +37,8 @@ #define WORKQ_NUM_PRIORITIES 6 +#define WORKQ_ADDTHREADS_OPTION_OVERCOMMIT 0x1 + #define DISPATCH_WORKQ_MAX_PTHREAD_COUNT 255 void _dispatch_workq_worker_register(dispatch_queue_t root_q, int priority); diff --git a/src/internal.h b/src/internal.h index 3d54e06a7..0536db107 100644 --- a/src/internal.h +++ b/src/internal.h @@ -454,7 +454,7 @@ void _dispatch_log(const char *msg, ...); * For reporting bugs within libdispatch when using the "_debug" version of the * library. */ -#if __GNUC__ +#if __APPLE__ #define dispatch_assert(e) do { \ if (__builtin_constant_p(e)) { \ dispatch_static_assert(e); \ @@ -472,7 +472,7 @@ static inline void _dispatch_assert(long e, long line) { #define dispatch_assert(e) _dispatch_assert((long)(e), __LINE__) #endif /* __GNUC__ */ -#if __GNUC__ +#if __APPLE__ /* * A lot of API return zero upon success and not-zero on fail. Let's capture * and log the non-zero value @@ -491,7 +491,7 @@ static inline void _dispatch_assert(long e, long line) { static inline void _dispatch_assert_zero(long e, long line) { if (DISPATCH_DEBUG && e) _dispatch_abort(line, e); } -#define dispatch_assert_zero(e) _dispatch_assert((long)(e), __LINE__) +#define dispatch_assert_zero(e) _dispatch_assert_zero((long)(e), __LINE__) #endif /* __GNUC__ */ /* diff --git a/src/queue.c b/src/queue.c index 94ddd42d4..435ac96ea 100644 --- a/src/queue.c +++ b/src/queue.c @@ -4523,10 +4523,12 @@ _dispatch_return_to_kernel(void) void _dispatch_poll_for_events_4launchd(void) { +#if DISPATCH_USE_KEVENT_WORKQUEUE if (_dispatch_get_wlh()) { dispatch_assert(_dispatch_deferred_items_get()->ddi_wlh_servicing); _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE); } +#endif } #if HAVE_PTHREAD_WORKQUEUE_NARROWING diff --git a/src/shims/lock.c b/src/shims/lock.c index 29e32f479..617fa016d 100644 --- a/src/shims/lock.c +++ b/src/shims/lock.c @@ -601,9 +601,9 @@ _dispatch_gate_wait_slow(dispatch_gate_t dgl, dispatch_lock value, #if HAVE_UL_UNFAIR_LOCK _dispatch_unfair_lock_wait(&dgl->dgl_lock, new_value, 0, flags); #elif HAVE_FUTEX - _dispatch_futex_wait(&dgl->dgl_lock, tid_new, NULL, FUTEX_PRIVATE_FLAG); + _dispatch_futex_wait(&dgl->dgl_lock, new_value, NULL, FUTEX_PRIVATE_FLAG); #else - _dispatch_thread_switch(tid_new, flags, timeout++); + _dispatch_thread_switch(new_value, flags, timeout++); #endif (void)timeout; } diff --git a/src/source.c b/src/source.c index bbfa22178..fd337a9a3 100644 --- a/src/source.c +++ b/src/source.c @@ -2321,7 +2321,6 @@ _dispatch_mgr_thread(dispatch_queue_t dq DISPATCH_UNUSED, } #endif #if DISPATCH_USE_MGR_THREAD - _dispatch_adopt_wlh_anon(); _dispatch_queue_set_current(&_dispatch_mgr_q); _dispatch_mgr_priority_init(); _dispatch_queue_mgr_lock(&_dispatch_mgr_q); diff --git a/src/swift/Queue.swift b/src/swift/Queue.swift index b7628c9cf..9075e9791 100644 --- a/src/swift/Queue.swift +++ b/src/swift/Queue.swift @@ -344,8 +344,5 @@ internal func _swift_dispatch_queue_concurrent() -> dispatch_queue_attr_t @_silgen_name("_swift_dispatch_get_main_queue") internal func _swift_dispatch_get_main_queue() -> dispatch_queue_t -@_silgen_name("_swift_dispatch_apply_current_root_queue") -internal func _swift_dispatch_apply_current_root_queue() -> dispatch_queue_t - @_silgen_name("_swift_dispatch_apply_current") internal func _swift_dispatch_apply_current(_ iterations: Int, _ block: @convention(block) (Int) -> Void) From 6c1c16ad3d28149a707529b285d5f18be2647f28 Mon Sep 17 00:00:00 2001 From: "Daniel A. Steffen" Date: Fri, 2 Jun 2017 10:44:14 -0700 Subject: [PATCH 06/46] Merge pull request #254 from apple/das-darwin-libdispatch-890-merge-master Merge darwin/libdispatch-890 to master Signed-off-by: Daniel A. Steffen --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index cb0069357..f9915719c 100644 --- a/PATCHES +++ b/PATCHES @@ -334,3 +334,4 @@ github commits starting with 29bdc2f from [d6eb245] APPLIED rdar://32283666 [0b6c22e] APPLIED rdar://33531111 [5a3c02a] APPLIED rdar://33531111 +[22df1e7] APPLIED rdar://33531111 From f45ac22cf186c81ee942caaf0f92d8b667c1a3cb Mon Sep 17 00:00:00 2001 From: "Daniel A. Steffen" Date: Sat, 3 Jun 2017 19:45:52 -0700 Subject: [PATCH 07/46] attempt to fix autoconf ObjC runtime check Signed-off-by: Daniel A. Steffen --- configure.ac | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/configure.ac b/configure.ac index 5599cb030..8f38f0829 100644 --- a/configure.ac +++ b/configure.ac @@ -374,10 +374,9 @@ AC_CHECK_HEADER([Foundation/Foundation.h], [have_foundation=true], [have_foundation=false] ) AM_CONDITIONAL(HAVE_FOUNDATION, $have_foundation) -AC_CHECK_HEADER([objc/runtime.h], [ +AC_CHECK_HEADER([objc/NSObject.h], [ AC_DEFINE(HAVE_OBJC, 1, [Define if you have the Objective-C runtime]) - have_objc=true], [have_objc=false], - [#include ] + have_objc=true], [have_objc=false] ) AM_CONDITIONAL(USE_OBJC, $have_objc) AC_LANG_POP([Objective C]) @@ -417,7 +416,10 @@ AC_CHECK_FUNC([sem_init], [have_sem_init=true], [have_sem_init=false] ) -AC_CHECK_HEADER(linux/futex.h, [have_futex=true], [have_futex=false]) +AC_CHECK_HEADER([linux/futex.h], [ + AC_DEFINE(HAVE_FUTEX, 1, [Define if linux/futex.h is present]) + have_futex=true], [have_futex=false] +) # # We support both Mach semaphores and POSIX semaphores; if the former are From 0245dd4760083d73034fe17a0cb749590e39db7b Mon Sep 17 00:00:00 2001 From: "Daniel A. Steffen" Date: Sat, 3 Jun 2017 21:27:15 -0700 Subject: [PATCH 08/46] Merge pull request #256 from apple/revert-255-revert-254-das-darwin-libdispatch-890-merge-master Re-merge darwin/libdispatch-890 to master Signed-off-by: Daniel A. Steffen --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index f9915719c..95dae37c9 100644 --- a/PATCHES +++ b/PATCHES @@ -335,3 +335,4 @@ github commits starting with 29bdc2f from [0b6c22e] APPLIED rdar://33531111 [5a3c02a] APPLIED rdar://33531111 [22df1e7] APPLIED rdar://33531111 +[21273de] APPLIED rdar://33531111 From 429caa2a105f2c1c530656b5b1411b5b28053a8c Mon Sep 17 00:00:00 2001 From: SpringsUp Date: Mon, 19 Sep 2016 07:18:53 +0200 Subject: [PATCH 09/46] [Overlay/Queue]Allow setting an optional value in setSpecific Signed-off-by: Daniel A. Steffen --- src/swift/Queue.swift | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/swift/Queue.swift b/src/swift/Queue.swift index 9075e9791..1808f9b06 100644 --- a/src/swift/Queue.swift +++ b/src/swift/Queue.swift @@ -324,10 +324,10 @@ public extension DispatchQueue { return nil } - public func setSpecific(key: DispatchSpecificKey, value: T) { - let v = _DispatchSpecificValue(value: value) + public func setSpecific(key: DispatchSpecificKey, value: T?) { let k = Unmanaged.passUnretained(key).toOpaque() - let p = Unmanaged.passRetained(v).toOpaque() + let v = value.flatMap { _DispatchSpecificValue(value: $0) } + let p = v.flatMap { Unmanaged.passRetained($0).toOpaque() } dispatch_queue_set_specific(self.__wrapped, k, p, _destructDispatchSpecificValue) } } From 5b1738ba580c781670abe956d829e63bc672a10e Mon Sep 17 00:00:00 2001 From: "Daniel A. Steffen" Date: Sun, 4 Jun 2017 02:13:31 -0700 Subject: [PATCH 10/46] Merge pull request #172 from karwa/removespecific [Overlay/Queue]: Allow setting an optional value in setSpecific Signed-off-by: Daniel A. Steffen --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 95dae37c9..c00fdb96c 100644 --- a/PATCHES +++ b/PATCHES @@ -336,3 +336,4 @@ github commits starting with 29bdc2f from [5a3c02a] APPLIED rdar://33531111 [22df1e7] APPLIED rdar://33531111 [21273de] APPLIED rdar://33531111 +[dc1857c] APPLIED rdar://33531111 From 201f3c73f2fd3e2f6c1caa83a43004a6ffb7cf91 Mon Sep 17 00:00:00 2001 From: David Grove Date: Mon, 29 May 2017 16:45:44 -0400 Subject: [PATCH 11/46] Convert dispatch_workq from legacy priorities to qos Update dispatch_workq (DISPATCH_USE_INTERNAL_WORKQUEUE) to use QoS-based constants instead of legacy priorities. Enhance monitoring code to count runnable threads from highest QoS to lowest and to suppress voluntary oversubscription for lower QoS queues if the total count of runnable worker threads is already over the desired threshold. Signed-off-by: Daniel A. Steffen --- src/event/workqueue.c | 64 +++++++++++----------------------- src/event/workqueue_internal.h | 14 ++------ src/queue.c | 36 +++++++++++++++++-- 3 files changed, 56 insertions(+), 58 deletions(-) diff --git a/src/event/workqueue.c b/src/event/workqueue.c index 0b9bc0ac5..dbc65938c 100644 --- a/src/event/workqueue.c +++ b/src/event/workqueue.c @@ -69,7 +69,7 @@ typedef struct dispatch_workq_monitor_s { int num_registered_tids; } dispatch_workq_monitor_s, *dispatch_workq_monitor_t; -static dispatch_workq_monitor_s _dispatch_workq_monitors[WORKQ_NUM_PRIORITIES]; +static dispatch_workq_monitor_s _dispatch_workq_monitors[DISPATCH_QOS_MAX]; #pragma mark Implementation of the monitoring subsystem. @@ -80,12 +80,13 @@ static void _dispatch_workq_init_once(void *context DISPATCH_UNUSED); static dispatch_once_t _dispatch_workq_init_once_pred; void -_dispatch_workq_worker_register(dispatch_queue_t root_q, int priority) +_dispatch_workq_worker_register(dispatch_queue_t root_q, qos_class_t cls) { dispatch_once_f(&_dispatch_workq_init_once_pred, NULL, &_dispatch_workq_init_once); #if HAVE_DISPATCH_WORKQ_MONITORING - dispatch_workq_monitor_t mon = &_dispatch_workq_monitors[priority]; + dispatch_qos_t qos = _dispatch_qos_from_qos_class(cls); + dispatch_workq_monitor_t mon = &_dispatch_workq_monitors[qos-1]; dispatch_assert(mon->dq == root_q); dispatch_tid tid = _dispatch_thread_getspecific(tid); _dispatch_unfair_lock_lock(&mon->registered_tid_lock); @@ -97,10 +98,11 @@ _dispatch_workq_worker_register(dispatch_queue_t root_q, int priority) } void -_dispatch_workq_worker_unregister(dispatch_queue_t root_q, int priority) +_dispatch_workq_worker_unregister(dispatch_queue_t root_q, qos_class_t cls) { #if HAVE_DISPATCH_WORKQ_MONITORING - dispatch_workq_monitor_t mon = &_dispatch_workq_monitors[priority]; + dispatch_qos_t qos = _dispatch_qos_from_qos_class(cls); + dispatch_workq_monitor_t mon = &_dispatch_workq_monitors[qos-1]; dispatch_tid tid = _dispatch_thread_getspecific(tid); _dispatch_unfair_lock_lock(&mon->registered_tid_lock); for (int i = 0; i < mon->num_registered_tids; i++) { @@ -177,16 +179,10 @@ _dispatch_workq_count_runnable_workers(dispatch_workq_monitor_t mon) static void _dispatch_workq_monitor_pools(void *context DISPATCH_UNUSED) { - // TODO: Once we switch away from the legacy priorities to - // newer QoS, we can loop in order of decreasing QoS - // and track the total number of runnable threads seen - // across pools. We can then use that number to - // implement a global policy where low QoS queues - // are not eligible for over-subscription if the higher - // QoS queues have already consumed the target - // number of threads. - for (int i = 0; i < WORKQ_NUM_PRIORITIES; i++) { - dispatch_workq_monitor_t mon = &_dispatch_workq_monitors[i]; + int global_soft_max = WORKQ_OVERSUBSCRIBE_FACTOR * dispatch_hw_config(active_cpus); + int global_runnable = 0; + for (dispatch_qos_t i = DISPATCH_QOS_MAX; i > DISPATCH_QOS_UNSPECIFIED; i--) { + dispatch_workq_monitor_t mon = &_dispatch_workq_monitors[i-1]; dispatch_queue_t dq = mon->dq; if (!_dispatch_queue_class_probe(dq)) { @@ -198,8 +194,10 @@ _dispatch_workq_monitor_pools(void *context DISPATCH_UNUSED) _dispatch_debug("workq: %s has %d runnable wokers (target is %d)", dq->dq_label, mon->num_runnable, mon->target_runnable); + global_runnable += mon->num_runnable; + if (mon->num_runnable == 0) { - // We are below target, and no worker is runnable. + // We have work, but no worker is runnable. // It is likely the program is stalled. Therefore treat // this as if dq were an overcommit queue and call poke // with the limit being the maximum number of workers for dq. @@ -207,7 +205,9 @@ _dispatch_workq_monitor_pools(void *context DISPATCH_UNUSED) _dispatch_debug("workq: %s has no runnable workers; poking with floor %d", dq->dq_label, floor); _dispatch_global_queue_poke(dq, 1, floor); - } else if (mon->num_runnable < mon->target_runnable) { + global_runnable += 1; // account for poke in global estimate + } else if (mon->num_runnable < mon->target_runnable && + global_runnable < global_soft_max) { // We are below target, but some workers are still runnable. // We want to oversubscribe to hit the desired load target. // However, this under-utilization may be transitory so set the @@ -218,42 +218,20 @@ _dispatch_workq_monitor_pools(void *context DISPATCH_UNUSED) _dispatch_debug("workq: %s under utilization target; poking with floor %d", dq->dq_label, floor); _dispatch_global_queue_poke(dq, 1, floor); + global_runnable += 1; // account for poke in global estimate } } } #endif // HAVE_DISPATCH_WORKQ_MONITORING - -// temporary until we switch over to QoS based interface. -static dispatch_queue_t -get_root_queue_from_legacy_priority(int priority) -{ - switch (priority) { - case WORKQ_HIGH_PRIOQUEUE: - return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS]; - case WORKQ_DEFAULT_PRIOQUEUE: - return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS]; - case WORKQ_LOW_PRIOQUEUE: - return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS]; - case WORKQ_BG_PRIOQUEUE: - return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS]; - case WORKQ_BG_PRIOQUEUE_CONDITIONAL: - return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS]; - case WORKQ_HIGH_PRIOQUEUE_CONDITIONAL: - return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS]; - default: - return NULL; - } -} - static void _dispatch_workq_init_once(void *context DISPATCH_UNUSED) { #if HAVE_DISPATCH_WORKQ_MONITORING int target_runnable = dispatch_hw_config(active_cpus); - for (int i = 0; i < WORKQ_NUM_PRIORITIES; i++) { - dispatch_workq_monitor_t mon = &_dispatch_workq_monitors[i]; - mon->dq = get_root_queue_from_legacy_priority(i); + for (dispatch_qos_t i = DISPATCH_QOS_MAX; i > DISPATCH_QOS_UNSPECIFIED; i--) { + dispatch_workq_monitor_t mon = &_dispatch_workq_monitors[i-1]; + mon->dq = _dispatch_get_root_queue(i, false); void *buf = _dispatch_calloc(WORKQ_MAX_TRACKED_TIDS, sizeof(dispatch_tid)); mon->registered_tids = buf; mon->target_runnable = target_runnable; diff --git a/src/event/workqueue_internal.h b/src/event/workqueue_internal.h index 9f8fc3adb..94dfe4e36 100644 --- a/src/event/workqueue_internal.h +++ b/src/event/workqueue_internal.h @@ -27,22 +27,12 @@ #ifndef __DISPATCH_WORKQUEUE_INTERNAL__ #define __DISPATCH_WORKQUEUE_INTERNAL__ -/* Work queue priority attributes. */ -#define WORKQ_HIGH_PRIOQUEUE 0 -#define WORKQ_DEFAULT_PRIOQUEUE 1 -#define WORKQ_LOW_PRIOQUEUE 2 -#define WORKQ_BG_PRIOQUEUE 3 -#define WORKQ_BG_PRIOQUEUE_CONDITIONAL 4 -#define WORKQ_HIGH_PRIOQUEUE_CONDITIONAL 5 - -#define WORKQ_NUM_PRIORITIES 6 - #define WORKQ_ADDTHREADS_OPTION_OVERCOMMIT 0x1 #define DISPATCH_WORKQ_MAX_PTHREAD_COUNT 255 -void _dispatch_workq_worker_register(dispatch_queue_t root_q, int priority); -void _dispatch_workq_worker_unregister(dispatch_queue_t root_q, int priority); +void _dispatch_workq_worker_register(dispatch_queue_t root_q, qos_class_t cls); +void _dispatch_workq_worker_unregister(dispatch_queue_t root_q, qos_class_t cls); #if defined(__linux__) #define HAVE_DISPATCH_WORKQ_MONITORING 1 diff --git a/src/queue.c b/src/queue.c index 435ac96ea..4d506ef05 100644 --- a/src/queue.c +++ b/src/queue.c @@ -37,6 +37,9 @@ !defined(DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK) #define DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK 1 #endif +#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP || DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK +#define DISPATCH_USE_WORKQ_PRIORITY 1 +#endif #if DISPATCH_USE_WORKQUEUES && DISPATCH_USE_PTHREAD_POOL && \ !DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK #define pthread_workqueue_t void* @@ -158,7 +161,10 @@ struct dispatch_root_queue_context_s { int volatile dgq_pending; #if DISPATCH_USE_WORKQUEUES qos_class_t dgq_qos; - int dgq_wq_priority, dgq_wq_options; +#if DISPATCH_USE_WORKQ_PRIORITY + int dgq_wq_priority; +#endif + int dgq_wq_options; #if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK || DISPATCH_USE_PTHREAD_POOL pthread_workqueue_t dgq_kworkqueue; #endif @@ -186,7 +192,9 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS] = {{{ #if DISPATCH_USE_WORKQUEUES .dgq_qos = QOS_CLASS_MAINTENANCE, +#if DISPATCH_USE_WORKQ_PRIORITY .dgq_wq_priority = WORKQ_BG_PRIOQUEUE, +#endif .dgq_wq_options = 0, #endif #if DISPATCH_ENABLE_THREAD_POOL @@ -197,7 +205,9 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT] = {{{ #if DISPATCH_USE_WORKQUEUES .dgq_qos = QOS_CLASS_MAINTENANCE, +#if DISPATCH_USE_WORKQ_PRIORITY .dgq_wq_priority = WORKQ_BG_PRIOQUEUE, +#endif .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, #endif #if DISPATCH_ENABLE_THREAD_POOL @@ -208,7 +218,9 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS] = {{{ #if DISPATCH_USE_WORKQUEUES .dgq_qos = QOS_CLASS_BACKGROUND, +#if DISPATCH_USE_WORKQ_PRIORITY .dgq_wq_priority = WORKQ_BG_PRIOQUEUE_CONDITIONAL, +#endif .dgq_wq_options = 0, #endif #if DISPATCH_ENABLE_THREAD_POOL @@ -219,7 +231,9 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT] = {{{ #if DISPATCH_USE_WORKQUEUES .dgq_qos = QOS_CLASS_BACKGROUND, +#if DISPATCH_USE_WORKQ_PRIORITY .dgq_wq_priority = WORKQ_BG_PRIOQUEUE_CONDITIONAL, +#endif .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, #endif #if DISPATCH_ENABLE_THREAD_POOL @@ -230,7 +244,9 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { [DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS] = {{{ #if DISPATCH_USE_WORKQUEUES .dgq_qos = QOS_CLASS_UTILITY, +#if DISPATCH_USE_WORKQ_PRIORITY .dgq_wq_priority = WORKQ_LOW_PRIOQUEUE, +#endif .dgq_wq_options = 0, #endif #if DISPATCH_ENABLE_THREAD_POOL @@ -241,7 +257,9 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { [DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT] = {{{ #if DISPATCH_USE_WORKQUEUES .dgq_qos = QOS_CLASS_UTILITY, +#if DISPATCH_USE_WORKQ_PRIORITY .dgq_wq_priority = WORKQ_LOW_PRIOQUEUE, +#endif .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, #endif #if DISPATCH_ENABLE_THREAD_POOL @@ -252,7 +270,9 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS] = {{{ #if DISPATCH_USE_WORKQUEUES .dgq_qos = QOS_CLASS_DEFAULT, +#if DISPATCH_USE_WORKQ_PRIORITY .dgq_wq_priority = WORKQ_DEFAULT_PRIOQUEUE, +#endif .dgq_wq_options = 0, #endif #if DISPATCH_ENABLE_THREAD_POOL @@ -263,7 +283,9 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT] = {{{ #if DISPATCH_USE_WORKQUEUES .dgq_qos = QOS_CLASS_DEFAULT, +#if DISPATCH_USE_WORKQ_PRIORITY .dgq_wq_priority = WORKQ_DEFAULT_PRIOQUEUE, +#endif .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, #endif #if DISPATCH_ENABLE_THREAD_POOL @@ -274,7 +296,9 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { [DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS] = {{{ #if DISPATCH_USE_WORKQUEUES .dgq_qos = QOS_CLASS_USER_INITIATED, +#if DISPATCH_USE_WORKQ_PRIORITY .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE, +#endif .dgq_wq_options = 0, #endif #if DISPATCH_ENABLE_THREAD_POOL @@ -285,7 +309,9 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { [DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT] = {{{ #if DISPATCH_USE_WORKQUEUES .dgq_qos = QOS_CLASS_USER_INITIATED, +#if DISPATCH_USE_WORKQ_PRIORITY .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE, +#endif .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, #endif #if DISPATCH_ENABLE_THREAD_POOL @@ -296,7 +322,9 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS] = {{{ #if DISPATCH_USE_WORKQUEUES .dgq_qos = QOS_CLASS_USER_INTERACTIVE, +#if DISPATCH_USE_WORKQ_PRIORITY .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE_CONDITIONAL, +#endif .dgq_wq_options = 0, #endif #if DISPATCH_ENABLE_THREAD_POOL @@ -307,7 +335,9 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT] = {{{ #if DISPATCH_USE_WORKQUEUES .dgq_qos = QOS_CLASS_USER_INTERACTIVE, +#if DISPATCH_USE_WORKQ_PRIORITY .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE_CONDITIONAL, +#endif .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, #endif #if DISPATCH_ENABLE_THREAD_POOL @@ -5809,7 +5839,7 @@ _dispatch_worker_thread(void *context) bool manager = (dq == &_dispatch_mgr_root_queue); bool monitored = !(overcommit || manager); if (monitored) { - _dispatch_workq_worker_register(dq, qc->dgq_wq_priority); + _dispatch_workq_worker_register(dq, qc->dgq_qos); } #endif @@ -5823,7 +5853,7 @@ _dispatch_worker_thread(void *context) #if DISPATCH_USE_INTERNAL_WORKQUEUE if (monitored) { - _dispatch_workq_worker_unregister(dq, qc->dgq_wq_priority); + _dispatch_workq_worker_unregister(dq, qc->dgq_qos); } #endif (void)os_atomic_inc2o(qc, dgq_thread_pool_size, release); From 8b1e9e788d89c4d8e9554635ad35146f8308e2ef Mon Sep 17 00:00:00 2001 From: "Daniel A. Steffen" Date: Sun, 4 Jun 2017 17:13:35 -0700 Subject: [PATCH 12/46] Merge pull request #253 from dgrove-oss/linux-qos-prioritty Convert dispatch_workq from legacy priorities to qos Signed-off-by: Daniel A. Steffen --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index c00fdb96c..c85ab7779 100644 --- a/PATCHES +++ b/PATCHES @@ -337,3 +337,4 @@ github commits starting with 29bdc2f from [22df1e7] APPLIED rdar://33531111 [21273de] APPLIED rdar://33531111 [dc1857c] APPLIED rdar://33531111 +[56f36b6] APPLIED rdar://33531111 From d015a755ff4782919fe0700a86a28dc715ccdb47 Mon Sep 17 00:00:00 2001 From: John Holdsworth Date: Wed, 21 Jun 2017 01:19:45 +0100 Subject: [PATCH 13/46] Thread detach hook for Java JNI on Android Signed-off-by: Daniel A. Steffen --- private/queue_private.h | 15 +++++++++++++++ src/queue.c | 17 +++++++++++++++++ src/swift/Queue.swift | 9 +++++++++ 3 files changed, 41 insertions(+) diff --git a/private/queue_private.h b/private/queue_private.h index 98c7f5e7b..2b50eb891 100644 --- a/private/queue_private.h +++ b/private/queue_private.h @@ -324,6 +324,21 @@ dispatch_async_enforce_qos_class_f(dispatch_queue_t queue, void *_Nullable context, dispatch_function_t work); +#ifdef __ANDROID__ +/*! + * @function _dispatch_install_thread_detach_callback + * + * @param callback + * Function to be called before each worker thread exits to detach JVM. + * + * Hook to be able to detach threads from the Java JVM before they exit. + * If JNI has been used on a thread on Android it needs to have been + * "detached" before the thread exits or the application will crash. + */ +DISPATCH_EXPORT +void _dispatch_install_thread_detach_callback(dispatch_function_t cb); +#endif + __END_DECLS DISPATCH_ASSUME_NONNULL_END diff --git a/src/queue.c b/src/queue.c index 4d506ef05..2406e7e7b 100644 --- a/src/queue.c +++ b/src/queue.c @@ -885,6 +885,18 @@ gettid(void) if ((f) && tsd->k) ((void(*)(void*))(f))(tsd->k); \ } while (0) +#ifdef __ANDROID__ +static void (*_dispatch_thread_detach_callback)(void); + +void +_dispatch_install_thread_detach_callback(dispatch_function_t cb) +{ + if (os_atomic_xchg(&_dispatch_thread_detach_callback, cb, relaxed)) { + DISPATCH_CLIENT_CRASH(0, "Installing a thread detach callback twice"); + } +} +#endif + void _libdispatch_tsd_cleanup(void *ctx) { @@ -909,6 +921,11 @@ _libdispatch_tsd_cleanup(void *ctx) _tsd_call_cleanup(dispatch_voucher_key, _voucher_thread_cleanup); _tsd_call_cleanup(dispatch_deferred_items_key, _dispatch_deferred_items_cleanup); +#ifdef __ANDROID__ + if (_dispatch_thread_detach_callback) { + _dispatch_thread_detach_callback(); + } +#endif tsd->tid = 0; } diff --git a/src/swift/Queue.swift b/src/swift/Queue.swift index 1808f9b06..b946a80f4 100644 --- a/src/swift/Queue.swift +++ b/src/swift/Queue.swift @@ -330,6 +330,15 @@ public extension DispatchQueue { let p = v.flatMap { Unmanaged.passRetained($0).toOpaque() } dispatch_queue_set_specific(self.__wrapped, k, p, _destructDispatchSpecificValue) } + + #if os(Android) + @_silgen_name("_dispatch_install_thread_detach_callback") + private static func _dispatch_install_thread_detach_callback(_ cb: @escaping @convention(c) () -> Void) + + public static func setThreadDetachCallback(_ cb: @escaping @convention(c) () -> Void) { + _dispatch_install_thread_detach_callback(cb) + } + #endif } private func _destructDispatchSpecificValue(ptr: UnsafeMutableRawPointer?) { From a1a808bd522063497f522edd7ead59109f9fa65a Mon Sep 17 00:00:00 2001 From: Pierre Habouzit Date: Wed, 21 Jun 2017 07:55:55 +0200 Subject: [PATCH 14/46] Merge pull request #259 from johnno1962a/master Thread detach hook for Java JNI on Android Signed-off-by: Daniel A. Steffen --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index c85ab7779..724d7eded 100644 --- a/PATCHES +++ b/PATCHES @@ -338,3 +338,4 @@ github commits starting with 29bdc2f from [21273de] APPLIED rdar://33531111 [dc1857c] APPLIED rdar://33531111 [56f36b6] APPLIED rdar://33531111 +[c87c6bb] APPLIED rdar://33531111 From 495ec35de901ab2ea5adfd15ddfb3387fad59015 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Tue, 27 Jun 2017 21:36:10 -0700 Subject: [PATCH 15/46] build: use target_sources instead of custom lists Use the target_sources function to add additional sources to the dispatch target rather than managing lists. This simplifies the handling and modernises the CMake usage. Signed-off-by: Daniel A. Steffen --- src/CMakeLists.txt | 86 ++++++++++++++++++++++++---------------------- 1 file changed, 45 insertions(+), 41 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 8bc572bfb..e9f69fbe9 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,44 +1,6 @@ include(SwiftSupport) -set(dispatch_BLOCK_SOURCES block.cpp) -if(HAVE_OBJC) - list(APPEND dispatch_BLOCK_SOURCES data.m object.m) -endif() -set(dispatch_SWIFT_SOURCES) -if(CMAKE_SWIFT_COMPILER) - set(swift_optimization_flags) - if(CMAKE_BUILD_TYPE MATCHES Release) - set(swift_optimization_flags -O) - endif() - add_swift_library(swiftDispatch - MODULE_NAME - Dispatch - MODULE_LINK_NAME - dispatch - MODULE_PATH - ${CMAKE_CURRENT_BINARY_DIR}/swift/Dispatch.swiftmodule - OUTPUT - ${CMAKE_CURRENT_BINARY_DIR}/swiftDispatch.o - SOURCES - swift/Block.swift - swift/Data.swift - swift/Dispatch.swift - swift/IO.swift - swift/Private.swift - swift/Queue.swift - swift/Source.swift - swift/Time.swift - swift/Wrapper.swift - CFLAGS - -fblocks - -fmodule-map-file=${CMAKE_SOURCE_DIR}/dispatch/module.modulemap - SWIFT_FLAGS - -I ${CMAKE_SOURCE_DIR} - ${swift_optimization_flags}) - list(APPEND dispatch_SWIFT_SOURCES - swift/DispatchStubs.cc;${CMAKE_CURRENT_BINARY_DIR}/swiftDispatch.o) -endif() add_library(dispatch allocator.c apply.c @@ -90,9 +52,51 @@ add_library(dispatch shims/perfmon.h shims/time.h shims/tsd.h - shims/yield.h - ${dispatch_BLOCK_SOURCES} - ${dispatch_SWIFT_SOURCES}) + shims/yield.h) +target_sources(dispatch + PRIVATE + block.cpp) +if(HAVE_OBJC) + target_sources(dispatch + PRIVATE + data.m + object.m) +endif() +if(CMAKE_SWIFT_COMPILER) + set(swift_optimization_flags) + if(CMAKE_BUILD_TYPE MATCHES Release) + set(swift_optimization_flags -O) + endif() + add_swift_library(swiftDispatch + MODULE_NAME + Dispatch + MODULE_LINK_NAME + dispatch + MODULE_PATH + ${CMAKE_CURRENT_BINARY_DIR}/swift/Dispatch.swiftmodule + OUTPUT + ${CMAKE_CURRENT_BINARY_DIR}/swiftDispatch.o + SOURCES + swift/Block.swift + swift/Data.swift + swift/Dispatch.swift + swift/IO.swift + swift/Private.swift + swift/Queue.swift + swift/Source.swift + swift/Time.swift + swift/Wrapper.swift + CFLAGS + -fblocks + -fmodule-map-file=${CMAKE_SOURCE_DIR}/dispatch/module.modulemap + SWIFT_FLAGS + -I ${CMAKE_SOURCE_DIR} + ${swift_optimization_flags}) + target_sources(dispatch + PRIVATE + swift/DispatchStubs.cc + ${CMAKE_CURRENT_BINARY_DIR}/swiftDispatch.o) +endif() target_include_directories(dispatch PRIVATE ${CMAKE_BINARY_DIR} From 10890b721879bfbc01a9d41ad2e4781783bd70c4 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Tue, 27 Jun 2017 21:46:53 -0700 Subject: [PATCH 16/46] build: support internal pthread workqueues in cmake Add support to the cmake based build system to use the internal pthread workqueue implementation. This restores parity with the autotools build. Signed-off-by: Daniel A. Steffen --- CMakeLists.txt | 53 ++++++++++++++++++++++------------------------ cmake/config.h.in | 2 +- src/CMakeLists.txt | 6 ++++++ 3 files changed, 32 insertions(+), 29 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 755f3c6c6..3ffb02507 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -15,11 +15,14 @@ set(CMAKE_THREAD_PREFER_PTHREAD TRUE) set(THREADS_PREFER_PTHREAD_FLAG TRUE) find_package(Threads REQUIRED) +include(CheckCSourceCompiles) +include(CheckFunctionExists) +include(CheckIncludeFiles) +include(CheckLibraryExists) +include(CheckSymbolExists) include(GNUInstallDirs) -include(ExternalProject) set(WITH_BLOCKS_RUNTIME "" CACHE PATH "Path to blocks runtime") -set(WITH_PTHREAD_WORKQUEUES "" CACHE PATH "Path to pthread-workqueues") include(DispatchAppleOptions) @@ -34,27 +37,27 @@ set(USE_LIBDISPATCH_INIT_CONSTRUCTOR ${ENABLE_DISPATCH_INIT_CONSTRUCTOR}) option(ENABLE_THREAD_LOCAL_STORAGE "enable usage of thread local storage via __thread" ON) set(DISPATCH_USE_THREAD_LOCAL_STORAGE ${ENABLE_THREAD_LOCAL_STORAGE}) -if(EXISTS "${CMAKE_SOURCE_DIR}/libpwq/CMakeLists.txt") - ExternalProject_Add(pwq - SOURCE_DIR - "${CMAKE_SOURCE_DIR}/libpwq" - CMAKE_ARGS - -DCMAKE_INSTALL_PREFIX= - -DCMAKE_INSTALL_LIBDIR=${CMAKE_INSTALL_LIBDIR} - -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} - BUILD_BYPRODUCTS - /${CMAKE_INSTALL_LIBDIR}/${CMAKE_STATIC_LIBRARY_PREFIX}pthread_workqueue${CMAKE_STATIC_LIBRARY_SUFFIX}) - ExternalProject_Get_Property(pwq install_dir) - add_library(PTHREAD::workqueue UNKNOWN IMPORTED) - set_target_properties(PTHREAD::workqueue - PROPERTIES - IMPORTED_LOCATION ${install_dir}/${CMAKE_INSTALL_LIBDIR}/${CMAKE_STATIC_LIBRARY_PREFIX}pthread_workqueue${CMAKE_STATIC_LIBRARY_SUFFIX}) - set(WITH_PTHREAD_WORKQUEUES "${install_dir}" CACHE PATH "Path to pthread-workqueues" FORCE) - set(HAVE_PTHREAD_WORKQUEUES 1) +if(CMAKE_SYSTEM_NAME STREQUAL Linux OR + CMAKE_SYSTEM_NAME STREQUAL Android OR + CMAKE_SYSTEM_NAME STREQUAL Windows) + set(ENABLE_INTERNAL_PTHREAD_WORKQUEUES_DEFAULT ON) else() - # TODO(compnerd) support system installed pthread-workqueues - # find_package(pthread_workqueues REQUIRED) - # set(HAVE_PTHREAD_WORKQUEUES 1) + set(ENABLE_INTERNAL_PTHREAD_WORKQUEUES_DEFAULT OFF) +endif() +option(ENABLE_INTERNAL_PTHREAD_WORKQUEUES "use libdispatch's own implementation of pthread workqueues" ${ENABLE_INTERNAL_PTHREAD_WORKQUEUES_DEFAULT}) +if(ENABLE_INTERNAL_PTHREAD_WORKQUEUES) + set(DISPATCH_USE_INTERNAL_WORKQUEUE 1) + set(HAVE_PTHREAD_WORKQUEUES 0) +else() + check_include_files(pthread/workqueue_private.h HAVE_PTHREAD_WORKQUEUE_PRIVATE_H) + check_include_files(pthread_workqueue.h HAVE_PTHREAD_WORKQUEUE_H) + if(HAVE_PTHREAD_WORKQUEUE_PRIVATE_H AND HAVE_PTHREAD_WORKQUEUE_H) + set(HAVE_PTHREAD_WORKQUEUES 1) + set(DISPATCH_USE_INTERNAL_WORKQUEUE 0) + else() + set(HAVE_PTHREAD_WORKQUEUES 0) + set(DISPATCH_USE_INTERNAL_WORKQUEUE 1) + endif() endif() if(CMAKE_SYSTEM_NAME STREQUAL Linux OR @@ -78,12 +81,6 @@ else() # find_package(BlocksRuntime REQUIRED) endif() -include(CheckCSourceCompiles) -include(CheckFunctionExists) -include(CheckIncludeFiles) -include(CheckLibraryExists) -include(CheckSymbolExists) - check_symbol_exists(__GNU_LIBRARY__ "features.h" _GNU_SOURCE) if(_GNU_SOURCE) set(CMAKE_REQUIRED_DEFINITIONS ${CMAKE_REQUIRED_DEFINITIONS} -D_GNU_SOURCE) diff --git a/cmake/config.h.in b/cmake/config.h.in index 27156dce4..6696e9863 100644 --- a/cmake/config.h.in +++ b/cmake/config.h.in @@ -1,6 +1,6 @@ /* Define if building pthread work queues from source */ -#cmakedefine BUILD_OWN_PTHREAD_WORKQUEUES +#cmakedefine01 DISPATCH_USE_INTERNAL_WORKQUEUE /* Enable usage of thread local storage via __thread */ #cmakedefine01 DISPATCH_USE_THREAD_LOCAL_STORAGE diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index e9f69fbe9..17c89980c 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -53,6 +53,12 @@ add_library(dispatch shims/time.h shims/tsd.h shims/yield.h) +if(DISPATCH_USE_INTERNAL_WORKQUEUE) + target_sources(dispatch + PRIVATE + event/workqueue.c + event/workqueue_internal.h) +endif() target_sources(dispatch PRIVATE block.cpp) From 7cd88671d86b8556948fa2a6f8b4527d9a2552d3 Mon Sep 17 00:00:00 2001 From: "Daniel A. Steffen" Date: Tue, 27 Jun 2017 21:58:32 -0700 Subject: [PATCH 17/46] Merge pull request #260 from compnerd/cmake-parity cmake parity updates Signed-off-by: Daniel A. Steffen --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 724d7eded..0b7089248 100644 --- a/PATCHES +++ b/PATCHES @@ -339,3 +339,4 @@ github commits starting with 29bdc2f from [dc1857c] APPLIED rdar://33531111 [56f36b6] APPLIED rdar://33531111 [c87c6bb] APPLIED rdar://33531111 +[b791d23] APPLIED rdar://33531111 From f5e5230292d21a6d06628411889bf9d3536a6d89 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Tue, 27 Jun 2017 22:01:36 -0700 Subject: [PATCH 18/46] build: create modulemap symlinks Use a darwin specific modulemap on darwin, and the generic one elsewhere. This fixes the cmake build to create the symlinks like the autotools build and cause the installation to be correct on Darwin as well as the build. Signed-off-by: Daniel A. Steffen --- CMakeLists.txt | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 3ffb02507..ed5c139c7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -188,6 +188,23 @@ if(leaks_EXECUTABLE) set(HAVE_LEAKS TRUE) endif() +if(CMAKE_SYSTEM_NAME STREQUAL Darwin) + add_custom_command(OUTPUT + "${CMAKE_SOURCE_DIR}/dispatch/module.modulemap" + "${CMAKE_SOURCE_DIR}/private/module.modulemap" + COMMAND + ${CMAKE_COMMAND} -E create_symlink "${CMAKE_SOURCE_DIR}/darwin/module.modulemap" "${CMAKE_SOURCE_DIR}/dispatch/module.modulemap" + COMMAND + ${CMAKE_COMMAND} -E create_symlink "${CMAKE_SOURCE_DIR}/darwin/module.modulemap" "${CMAKE_SOURCE_DIR}/private/module.modulemap") +else() + add_custom_command(OUTPUT + "${CMAKE_SOURCE_DIR}/dispatch/module.modulemap" + "${CMAKE_SOURCE_DIR}/private/module.modulemap" + COMMAND + ${CMAKE_COMMAND} -E create_symlink "${CMAKE_SOURCE_DIR}/generic/module.modulemap" "${CMAKE_SOURCE_DIR}/dispatch/module.modulemap" + COMMAND + ${CMAKE_COMMAND} -E create_symlink "${CMAKE_SOURCE_DIR}/generic/module.modulemap" "${CMAKE_SOURCE_DIR}/private/module.modulemap") +endif() configure_file("${CMAKE_SOURCE_DIR}/cmake/config.h.in" "${CMAKE_BINARY_DIR}/config/config_ac.h") add_definitions(-DHAVE_CONFIG_H) From 66416c376bb06a7cb927d6cc5d73bdd7cb760d7d Mon Sep 17 00:00:00 2001 From: "Daniel A. Steffen" Date: Tue, 27 Jun 2017 22:29:57 -0700 Subject: [PATCH 19/46] Merge pull request #261 from compnerd/module-maps build: switch the module map according to the target Signed-off-by: Daniel A. Steffen --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 0b7089248..cb8156a77 100644 --- a/PATCHES +++ b/PATCHES @@ -340,3 +340,4 @@ github commits starting with 29bdc2f from [56f36b6] APPLIED rdar://33531111 [c87c6bb] APPLIED rdar://33531111 [b791d23] APPLIED rdar://33531111 +[c2d0c49] APPLIED rdar://33531111 From 5c3c3cef864117522fd6f72a14b826cd2ddaf017 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Tue, 27 Jun 2017 22:26:21 -0700 Subject: [PATCH 20/46] build: add option to use the gold linker This used to be an option in the autotools build. Address the TODO item and add an option to control that. Signed-off-by: Daniel A. Steffen --- CMakeLists.txt | 9 +++++++-- src/CMakeLists.txt | 6 ++++++ 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index ed5c139c7..1593569bd 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -31,8 +31,13 @@ set(USE_LIBDISPATCH_INIT_CONSTRUCTOR ${ENABLE_DISPATCH_INIT_CONSTRUCTOR}) # TODO(compnerd) swift options -# TODO(compnerd) consider adding a flag for USE_GOLD_LINKER. Currently, we -# expect the user to specify `-fuse-ld=gold` +if(CMAKE_SYSTEM_NAME STREQUAL Linux OR + CMAKE_SYSTEM_NAME STREQUAL Android) + set(USE_GOLD_LINKER_DEFAULT ON) +else() + set(USE_GOLD_LINKER_DEFAULT OFF) +endif() +option(USE_GOLD_LINKER "use the gold linker" ${USE_GOLD_LINKER_DEFAULT}) option(ENABLE_THREAD_LOCAL_STORAGE "enable usage of thread local storage via __thread" ON) set(DISPATCH_USE_THREAD_LOCAL_STORAGE ${ENABLE_THREAD_LOCAL_STORAGE}) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 17c89980c..bbc7f461a 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -179,6 +179,12 @@ if(CMAKE_SYSTEM_NAME STREQUAL Darwin) "-Xlinker -dead_strip" "-Xlinker -alias_list -Xlinker ${CMAKE_SOURCE_DIR}/xcodeconfig/libdispatch.aliases") endif() +if(USE_GOLD_LINKER) + set_property(TARGET dispatch + APPEND_STRING + PROPERTY LINK_FLAGS + -fuse-ld=gold) +endif() install(TARGETS dispatch From 2daa1d75f43b8dbff7501cce7ada1748370d119a Mon Sep 17 00:00:00 2001 From: "Daniel A. Steffen" Date: Tue, 27 Jun 2017 22:31:24 -0700 Subject: [PATCH 21/46] Merge pull request #262 from compnerd/gold build: add option to use the gold linker Signed-off-by: Daniel A. Steffen --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index cb8156a77..a5f48bc75 100644 --- a/PATCHES +++ b/PATCHES @@ -341,3 +341,4 @@ github commits starting with 29bdc2f from [c87c6bb] APPLIED rdar://33531111 [b791d23] APPLIED rdar://33531111 [c2d0c49] APPLIED rdar://33531111 +[1d25040] APPLIED rdar://33531111 From 4b4519417ee593f8a10ab9a26f517a4410720e93 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Wed, 28 Jun 2017 12:10:24 -0700 Subject: [PATCH 22/46] build: add dtrace USDT probe support Add the missing support for dtrace header script support. Signed-off-by: Daniel A. Steffen --- cmake/modules/DTrace.cmake | 26 ++++++++++++++++++++++++++ src/CMakeLists.txt | 10 ++++++++++ 2 files changed, 36 insertions(+) create mode 100644 cmake/modules/DTrace.cmake diff --git a/cmake/modules/DTrace.cmake b/cmake/modules/DTrace.cmake new file mode 100644 index 000000000..20a28ccaa --- /dev/null +++ b/cmake/modules/DTrace.cmake @@ -0,0 +1,26 @@ + +function(dtrace_usdt_probe script) + set(options) + set(single_parameter_options TARGET_NAME OUTPUT_SOURCES) + set(multiple_parameter_options) + + cmake_parse_arguments("" "${options}" "${single_parameter_options}" "${multiple_parameter_options}" ${ARGN}) + + get_filename_component(script_we ${script} NAME_WE) + + add_custom_command(OUTPUT + ${CMAKE_CURRENT_BINARY_DIR}/${script_we}.h + COMMAND + ${dtrace_EXECUTABLE} -h -s ${script} -o ${CMAKE_CURRENT_BINARY_DIR}/${script_we}.h + DEPENDS + ${script}) + add_custom_target(dtrace-usdt-header-${script_we} + DEPENDS + ${CMAKE_CURRENT_BINARY_DIR}/${script_we}.h) + if(_TARGET_NAME) + set(${_TARGET_NAME} dtrace-usdt-header-${script_we} PARENT_SCOPE) + endif() + if(_OUTPUT_SOURCES) + set(${_OUTPUT_SOURCES} ${CMAKE_CURRENT_BINARY_DIR}/${script_we}.h PARENT_SCOPE) + endif() +endfunction() diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index bbc7f461a..ac2b30a9b 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,5 +1,6 @@ include(SwiftSupport) +include(DTrace) add_library(dispatch allocator.c @@ -103,11 +104,20 @@ if(CMAKE_SWIFT_COMPILER) swift/DispatchStubs.cc ${CMAKE_CURRENT_BINARY_DIR}/swiftDispatch.o) endif() +if(dtrace_EXECUTABLE) + dtrace_usdt_probe(${CMAKE_CURRENT_SOURCE_DIR}/provider.d + OUTPUT_SOURCES + dispatch_dtrace_provider_headers) + target_sources(dispatch + PRIVATE + ${dispatch_dtrace_provider_headers}) +endif() target_include_directories(dispatch PRIVATE ${CMAKE_BINARY_DIR} ${CMAKE_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR} + ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_SOURCE_DIR}/private) if(WITH_PTHREAD_WORKQUEUES) target_include_directories(dispatch From 5f00fb00cc7b2895edae8cb54c720805e71a8ec9 Mon Sep 17 00:00:00 2001 From: "Daniel A. Steffen" Date: Thu, 29 Jun 2017 11:36:25 -0700 Subject: [PATCH 23/46] Merge pull request #264 from compnerd/dtrace-usdt-probes build: add dtrace USDT probe support Signed-off-by: Daniel A. Steffen --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index a5f48bc75..89b596d67 100644 --- a/PATCHES +++ b/PATCHES @@ -342,3 +342,4 @@ github commits starting with 29bdc2f from [b791d23] APPLIED rdar://33531111 [c2d0c49] APPLIED rdar://33531111 [1d25040] APPLIED rdar://33531111 +[ab89c6c] APPLIED rdar://33531111 From 90352b30176a8a87e8a1aa547d85f8be17195747 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Wed, 28 Jun 2017 13:25:16 -0700 Subject: [PATCH 24/46] build: remove dead code Remove the supporting code for WITH_PTHREAD_WORKQUEUES which was removed previously. NFC. Signed-off-by: Daniel A. Steffen --- src/CMakeLists.txt | 8 -------- 1 file changed, 8 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index ac2b30a9b..4ce0e33b5 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -119,11 +119,6 @@ target_include_directories(dispatch ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_SOURCE_DIR}/private) -if(WITH_PTHREAD_WORKQUEUES) - target_include_directories(dispatch - SYSTEM BEFORE PRIVATE - "${WITH_PTHREAD_WORKQUEUES}/include") -endif() if(WITH_BLOCKS_RUNTIME) target_include_directories(dispatch SYSTEM BEFORE PRIVATE @@ -174,9 +169,6 @@ if(BSD_OVERLAY_FOUND) target_link_libraries(dispatch PRIVATE ${BSD_OVERLAY_LDFLAGS}) endif() target_link_libraries(dispatch PRIVATE Threads::Threads) -if(WITH_PTHREAD_WORKQUEUES) - target_link_libraries(dispatch PRIVATE PTHREAD::workqueue) -endif() if(WITH_BLOCKS_RUNTIME) target_link_libraries(dispatch PRIVATE BlocksRuntime) endif() From 6acca8dec37f2862edee5f8db579797f9384facc Mon Sep 17 00:00:00 2001 From: "Daniel A. Steffen" Date: Thu, 29 Jun 2017 11:39:27 -0700 Subject: [PATCH 25/46] Merge pull request #266 from compnerd/pwq build: remove dead code Signed-off-by: Daniel A. Steffen --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 89b596d67..2eb740567 100644 --- a/PATCHES +++ b/PATCHES @@ -343,3 +343,4 @@ github commits starting with 29bdc2f from [c2d0c49] APPLIED rdar://33531111 [1d25040] APPLIED rdar://33531111 [ab89c6c] APPLIED rdar://33531111 +[e591e7e] APPLIED rdar://33531111 From 6fc4e182407eda0058b18ca034169c67b640fe5c Mon Sep 17 00:00:00 2001 From: Simon Evans Date: Fri, 16 Jun 2017 12:05:25 +0100 Subject: [PATCH 26/46] pr_objc: Expose objc_retainAutoreleasedReturnValue() - This allows it to be used by Foundation so that the duplicate function can be removed. Signed-off-by: Daniel A. Steffen --- src/swift/DispatchStubs.cc | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/src/swift/DispatchStubs.cc b/src/swift/DispatchStubs.cc index de309c737..1eaf4bd93 100644 --- a/src/swift/DispatchStubs.cc +++ b/src/swift/DispatchStubs.cc @@ -199,8 +199,19 @@ SOURCE(VNODE) #endif SOURCE(WRITE) -// See comment in CFFuntime.c explaining why objc_retainAutoreleasedReturnValue is needed. +#if !USE_OBJC + +// For CF functions with 'Get' semantics, the compiler currently assumes that +// the result is autoreleased and must be retained. It does so on all platforms +// by emitting a call to objc_retainAutoreleasedReturnValue. On Darwin, this is +// implemented by the ObjC runtime. On non-ObjC platforms, there is no runtime, +// and therefore we have to stub it out here ourselves. The compiler will +// eventually call swift_release to balance the retain below. This is a +// workaround until the compiler no longer emits this callout on non-ObjC +// platforms. extern "C" void swift_retain(void *); + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE extern "C" void * objc_retainAutoreleasedReturnValue(void *obj) { if (obj) { swift_retain(obj); @@ -208,3 +219,5 @@ extern "C" void * objc_retainAutoreleasedReturnValue(void *obj) { } else return NULL; } + +#endif // !USE_OBJC From 695e9699bdf7ad6c59b6229ca9593770dc3ce5b4 Mon Sep 17 00:00:00 2001 From: "Daniel A. Steffen" Date: Thu, 29 Jun 2017 17:03:43 -0700 Subject: [PATCH 27/46] Merge pull request #258 from spevans/pr_expose_symbol Export objc_retainAutoreleasedReturnValue() Signed-off-by: Daniel A. Steffen --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 2eb740567..19857c667 100644 --- a/PATCHES +++ b/PATCHES @@ -344,3 +344,4 @@ github commits starting with 29bdc2f from [1d25040] APPLIED rdar://33531111 [ab89c6c] APPLIED rdar://33531111 [e591e7e] APPLIED rdar://33531111 +[ded5bab] APPLIED rdar://33531111 From 1d62d26b7a9ee8c2c29e4d41df1ea21177e4e702 Mon Sep 17 00:00:00 2001 From: David Grove Date: Thu, 29 Jun 2017 18:24:18 -0400 Subject: [PATCH 28/46] CMake compatibility with Swift build assumptions All of the swift projects that depend on finding libdispatch.so during their builds (swift, foundation, swiftpm) all expect it to be in src/.libs/libdispatch.so (libtool convention). Temporarily add a rule to copy the built library to where libtool would have placed it to decouple using CMake for libdispatch from updating all of the other dependent projects' build expectations. Signed-off-by: Daniel A. Steffen --- src/CMakeLists.txt | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 4ce0e33b5..2ec2691fc 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -188,6 +188,15 @@ if(USE_GOLD_LINKER) -fuse-ld=gold) endif() +# Temporary staging; the various swift projects that depend on libdispatch +# all expect libdispatch.so to be in src/.libs/libdispatch.so +# So for now, make a copy so we don't have to do a coordinated commit across +# all the swift projects to change this assumption. +add_custom_command(TARGET dispatch POST_BUILD + COMMAND cmake -E make_directory .libs + COMMAND cmake -E copy $ .libs + COMMENT "Copying libdispatch to .libs") + install(TARGETS dispatch DESTINATION From edb6f9eed9c7bcf919350c536916e27fb97b45e9 Mon Sep 17 00:00:00 2001 From: "Daniel A. Steffen" Date: Fri, 30 Jun 2017 18:06:15 -0700 Subject: [PATCH 29/46] Merge pull request #267 from dgrove-oss/cmake-copy-libdisaptch CMake compatibility with Swift build assumptions Signed-off-by: Daniel A. Steffen --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 19857c667..ae36d1bc0 100644 --- a/PATCHES +++ b/PATCHES @@ -345,3 +345,4 @@ github commits starting with 29bdc2f from [ab89c6c] APPLIED rdar://33531111 [e591e7e] APPLIED rdar://33531111 [ded5bab] APPLIED rdar://33531111 +[ce90d0c] APPLIED rdar://33531111 From 945851260bbd5f5f125dd1a43b81b23528b87d0b Mon Sep 17 00:00:00 2001 From: David Grove Date: Fri, 30 Jun 2017 13:47:20 -0400 Subject: [PATCH 30/46] Set CMake defaults to match autoconf Linux behavior. There may be a more elegant way to do this, but we really want to build shared libraries by default on Linux and we want to build the test cases and be able to run them. Signed-off-by: Daniel A. Steffen --- CMakeLists.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 1593569bd..f6b078e25 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -31,6 +31,10 @@ set(USE_LIBDISPATCH_INIT_CONSTRUCTOR ${ENABLE_DISPATCH_INIT_CONSTRUCTOR}) # TODO(compnerd) swift options +option(BUILD_SHARED_LIBS "build shared libraries" ON) + +option(ENABLE_TESTING "build libdispatch tests" ON) + if(CMAKE_SYSTEM_NAME STREQUAL Linux OR CMAKE_SYSTEM_NAME STREQUAL Android) set(USE_GOLD_LINKER_DEFAULT ON) From 1d61b1e7d7070bb5a13a67ed02796b20817773bd Mon Sep 17 00:00:00 2001 From: "Daniel A. Steffen" Date: Fri, 30 Jun 2017 18:07:27 -0700 Subject: [PATCH 31/46] Merge pull request #269 from dgrove-oss/cmake-default-args Set CMake defaults to match autoconf Linux behavior. Signed-off-by: Daniel A. Steffen --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index ae36d1bc0..772c934d3 100644 --- a/PATCHES +++ b/PATCHES @@ -346,3 +346,4 @@ github commits starting with 29bdc2f from [e591e7e] APPLIED rdar://33531111 [ded5bab] APPLIED rdar://33531111 [ce90d0c] APPLIED rdar://33531111 +[69c8f3e] APPLIED rdar://33531111 From 43b23fc24f8d4130eb77209112488269ba36ec0e Mon Sep 17 00:00:00 2001 From: Kim Topley Date: Thu, 29 Jun 2017 16:23:10 -0700 Subject: [PATCH 32/46] Fix warnings in DispatchQueue.sync() implementation when using a compiler with SE-0176 support. Signed-off-by: Daniel A. Steffen --- src/swift/Queue.swift | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/swift/Queue.swift b/src/swift/Queue.swift index b946a80f4..bff1bc323 100644 --- a/src/swift/Queue.swift +++ b/src/swift/Queue.swift @@ -216,11 +216,13 @@ public extension DispatchQueue { { var result: T? var error: Swift.Error? - fn { - do { - result = try work() - } catch let e { - error = e + withoutActuallyEscaping(work) { _work in + fn { + do { + result = try _work() + } catch let e { + error = e + } } } if let e = error { From 422d7361d2e79c39aa67561cbce8f5600662433c Mon Sep 17 00:00:00 2001 From: "Daniel A. Steffen" Date: Fri, 30 Jun 2017 18:15:16 -0700 Subject: [PATCH 33/46] Merge pull request #268 from ktopley-apple/dispatch-sync-fixup-overlay MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix warnings in DispatchQueue.sync() implementation when using a comp… Signed-off-by: Daniel A. Steffen --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 772c934d3..b44a895c6 100644 --- a/PATCHES +++ b/PATCHES @@ -347,3 +347,4 @@ github commits starting with 29bdc2f from [ded5bab] APPLIED rdar://33531111 [ce90d0c] APPLIED rdar://33531111 [69c8f3e] APPLIED rdar://33531111 +[23a3a84] APPLIED rdar://33531111 From c01599944879e58556a6e34585ac6539f5e83000 Mon Sep 17 00:00:00 2001 From: Pierre Habouzit Date: Thu, 13 Jul 2017 14:51:43 -0700 Subject: [PATCH 34/46] Remove dependency on sys_membarrier on linux By not inlining the fastpath of dispatch_once() when the hardware cannot give us the right semantics. Signed-off-by: Daniel A. Steffen --- dispatch/once.h | 12 ++++++++++++ src/once.c | 24 +++++++++++++++++++++--- src/shims/lock.h | 24 ------------------------ 3 files changed, 33 insertions(+), 27 deletions(-) diff --git a/dispatch/once.h b/dispatch/once.h index 68acfe803..37a49506d 100644 --- a/dispatch/once.h +++ b/dispatch/once.h @@ -40,6 +40,14 @@ __BEGIN_DECLS DISPATCH_SWIFT3_UNAVAILABLE("Use lazily initialized globals instead") typedef long dispatch_once_t; +#if defined(__x86_64__) || defined(__i386__) || defined(__s390x__) +#define DISPATCH_ONCE_INLINE_FASTPATH 1 +#elif defined(__APPLE__) +#define DISPATCH_ONCE_INLINE_FASTPATH 1 +#else +#define DISPATCH_ONCE_INLINE_FASTPATH 0 +#endif + /*! * @function dispatch_once * @@ -65,6 +73,7 @@ void dispatch_once(dispatch_once_t *predicate, DISPATCH_NOESCAPE dispatch_block_t block); +#if DISPATCH_ONCE_INLINE_FASTPATH DISPATCH_INLINE DISPATCH_ALWAYS_INLINE DISPATCH_NONNULL_ALL DISPATCH_NOTHROW DISPATCH_SWIFT3_UNAVAILABLE("Use lazily initialized globals instead") void @@ -81,6 +90,7 @@ _dispatch_once(dispatch_once_t *predicate, #undef dispatch_once #define dispatch_once _dispatch_once #endif +#endif // DISPATCH_ONCE_INLINE_FASTPATH API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW @@ -89,6 +99,7 @@ void dispatch_once_f(dispatch_once_t *predicate, void *_Nullable context, dispatch_function_t function); +#if DISPATCH_ONCE_INLINE_FASTPATH DISPATCH_INLINE DISPATCH_ALWAYS_INLINE DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW DISPATCH_SWIFT3_UNAVAILABLE("Use lazily initialized globals instead") @@ -105,6 +116,7 @@ _dispatch_once_f(dispatch_once_t *predicate, void *_Nullable context, } #undef dispatch_once_f #define dispatch_once_f _dispatch_once_f +#endif // DISPATCH_ONCE_INLINE_FASTPATH __END_DECLS diff --git a/src/once.c b/src/once.c index 75d7a39a5..c01538c9d 100644 --- a/src/once.c +++ b/src/once.c @@ -40,9 +40,15 @@ dispatch_once(dispatch_once_t *val, dispatch_block_t block) } #endif -DISPATCH_NOINLINE -void -dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func) +#if DISPATCH_ONCE_INLINE_FASTPATH +#define DISPATCH_ONCE_SLOW_INLINE inline DISPATCH_ALWAYS_INLINE +#else +#define DISPATCH_ONCE_SLOW_INLINE DISPATCH_NOINLINE +#endif // DISPATCH_ONCE_INLINE_FASTPATH + +DISPATCH_ONCE_SLOW_INLINE +static void +dispatch_once_f_slow(dispatch_once_t *val, void *ctxt, dispatch_function_t func) { #if DISPATCH_GATE_USE_FOR_DISPATCH_ONCE dispatch_once_gate_t l = (dispatch_once_gate_t)val; @@ -95,3 +101,15 @@ dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func) } #endif } + +DISPATCH_NOINLINE +void +dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func) +{ +#if !DISPATCH_ONCE_INLINE_FASTPATH + if (likely(os_atomic_load(val, acquire) == DLOCK_ONCE_DONE)) { + return; + } +#endif // !DISPATCH_ONCE_INLINE_FASTPATH + return dispatch_once_f_slow(val, ctxt, func); +} diff --git a/src/shims/lock.h b/src/shims/lock.h index 99c556370..0c089aafd 100644 --- a/src/shims/lock.h +++ b/src/shims/lock.h @@ -59,9 +59,6 @@ _dispatch_lock_owner(dispatch_lock lock_value) #elif defined(__linux__) #include -#if !defined(__x86_64__) && !defined(__i386__) && !defined(__s390x__) -#include -#endif #include #include /* For SYS_xxx definitions */ @@ -473,28 +470,7 @@ DISPATCH_ALWAYS_INLINE static inline dispatch_once_t _dispatch_once_xchg_done(dispatch_once_t *pred) { -#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__) - // On Intel, any load is a load-acquire, so we don't need to be fancy - // same for s390x return os_atomic_xchg(pred, DLOCK_ONCE_DONE, release); -#elif defined(__linux__) - if (unlikely(syscall(__NR_membarrier, MEMBARRIER_CMD_SHARED, 0) < 0)) { - /* - * sys_membarrier not supported - * - * Ideally we would call DISPATCH_INTERNAL_CRASH() here, but - * due to ordering constraints in internal.h required by Darwin - * the macro is undefined when this header is included. - * Instead, open-code what would be a call to - * _dispatch_hardware_crash() inside DISPATCH_INTERNAL_CRASH(). - */ - __asm__(""); - __builtin_trap(); - } - return os_atomic_xchg(pred, DLOCK_ONCE_DONE, relaxed); -#else -# error dispatch_once algorithm not available for this port -#endif } DISPATCH_ALWAYS_INLINE From 176b84690f3d76fca4f43ad9f3676de3eb42ccfb Mon Sep 17 00:00:00 2001 From: "Daniel A. Steffen" Date: Fri, 14 Jul 2017 12:50:40 -0700 Subject: [PATCH 35/46] Merge pull request #277 from apple/mad/remove-sys-membarrier Remove dependency on sys_membarrier on linux Signed-off-by: Daniel A. Steffen --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index b44a895c6..e3f70486b 100644 --- a/PATCHES +++ b/PATCHES @@ -348,3 +348,4 @@ github commits starting with 29bdc2f from [ce90d0c] APPLIED rdar://33531111 [69c8f3e] APPLIED rdar://33531111 [23a3a84] APPLIED rdar://33531111 +[79b7529] APPLIED rdar://33531111 From b7a87c789718aa99fd5c1ac3f91afab8f7de70fe Mon Sep 17 00:00:00 2001 From: Julien Blache Date: Fri, 14 Jul 2017 11:09:31 -0700 Subject: [PATCH 36/46] Actually update epoll events if needed when unregistering _dispatch_epoll_update() was being called without dmn->dmn_events being updated beforehand. Signed-off-by: Pierre Habouzit Signed-off-by: Daniel A. Steffen --- src/event/event_epoll.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/event/event_epoll.c b/src/event/event_epoll.c index 68140d50c..419cb07d6 100644 --- a/src/event/event_epoll.c +++ b/src/event/event_epoll.c @@ -316,6 +316,7 @@ _dispatch_unote_unregister(dispatch_unote_t du, uint32_t flags) if (events == dmn->dmn_events) { // nothing to do } else if (events & (EPOLLIN | EPOLLOUT)) { + dmn->dmn_events = events; _dispatch_epoll_update(dmn, EPOLL_CTL_MOD); } else { epoll_ctl(_dispatch_epfd, EPOLL_CTL_DEL, dmn->dmn_fd, NULL); From 7bf2e83e9d1bd97c2144480ecd70d0aa06685681 Mon Sep 17 00:00:00 2001 From: Pierre Habouzit Date: Fri, 14 Jul 2017 15:39:20 -0700 Subject: [PATCH 37/46] Merge pull request #280 from apple/mad/epoll-fixes Actually update epoll events if needed when unregistering Signed-off-by: Daniel A. Steffen --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index e3f70486b..ffe6994e1 100644 --- a/PATCHES +++ b/PATCHES @@ -349,3 +349,4 @@ github commits starting with 29bdc2f from [69c8f3e] APPLIED rdar://33531111 [23a3a84] APPLIED rdar://33531111 [79b7529] APPLIED rdar://33531111 +[f8e71eb] APPLIED rdar://33531111 From 3015c5140d679e252a01cbc4b612d3ed9eacfe80 Mon Sep 17 00:00:00 2001 From: Pierre Habouzit Date: Fri, 14 Jul 2017 16:44:06 -0700 Subject: [PATCH 38/46] the buffer size ioctls on linux return EINVAL when it's not supported Make sure we don't keep doing these syscalls and log stupid errors about it failing forever. Signed-off-by: Daniel A. Steffen --- src/event/event_epoll.c | 36 +++++++++++++++++++++++++++--------- 1 file changed, 27 insertions(+), 9 deletions(-) diff --git a/src/event/event_epoll.c b/src/event/event_epoll.c index 419cb07d6..c003bacaa 100644 --- a/src/event/event_epoll.c +++ b/src/event/event_epoll.c @@ -51,7 +51,8 @@ typedef struct dispatch_muxnote_s { int dmn_ident; uint32_t dmn_events; int16_t dmn_filter; - bool dmn_socket_listener; + bool dmn_skip_outq_ioctl; + bool dmn_skip_inq_ioctl; } *dispatch_muxnote_t; typedef struct dispatch_epoll_timeout_s { @@ -143,7 +144,7 @@ _dispatch_muxnote_create(dispatch_unote_t du, uint32_t events) struct stat sb; int fd = du._du->du_ident; int16_t filter = du._du->du_filter; - bool socket_listener = false; + bool skip_outq_ioctl = false, skip_inq_ioctl = false; sigset_t sigmask; switch (filter) { @@ -173,11 +174,15 @@ _dispatch_muxnote_create(dispatch_unote_t du, uint32_t events) if (fd < 0) { return NULL; } + // Linux doesn't support output queue size ioctls for regular files + skip_outq_ioctl = true; } else if (S_ISSOCK(sb.st_mode)) { socklen_t vlen = sizeof(int); int v; + // Linux doesn't support saying how many clients are ready to be + // accept()ed for sockets if (getsockopt(fd, SOL_SOCKET, SO_ACCEPTCONN, &v, &vlen) == 0) { - socket_listener = (bool)v; + skip_inq_ioctl = (bool)v; } } break; @@ -193,7 +198,8 @@ _dispatch_muxnote_create(dispatch_unote_t du, uint32_t events) dmn->dmn_ident = du._du->du_ident; dmn->dmn_filter = filter; dmn->dmn_events = events; - dmn->dmn_socket_listener = socket_listener; + dmn->dmn_skip_outq_ioctl = skip_outq_ioctl; + dmn->dmn_skip_inq_ioctl = skip_inq_ioctl; return dmn; } @@ -480,16 +486,28 @@ _dispatch_event_merge_signal(dispatch_muxnote_t dmn) static uintptr_t _dispatch_get_buffer_size(dispatch_muxnote_t dmn, bool writer) { - unsigned long op = writer ? SIOCOUTQ : SIOCINQ; int n; - if (!writer && dmn->dmn_socket_listener) { - // Linux doesn't support saying how many clients are ready to be - // accept()ed + if (writer ? dmn->dmn_skip_outq_ioctl : dmn->dmn_skip_inq_ioctl) { return 1; } - if (dispatch_assume_zero(ioctl(dmn->dmn_ident, op, &n))) { + if (ioctl(dmn->dmn_ident, writer ? SIOCOUTQ : SIOCINQ, &n) != 0) { + switch (errno) { + case EINVAL: + case ENOTTY: + // this file descriptor actually doesn't support the buffer + // size ioctl, remember that for next time to avoid the syscall. + break; + default: + dispatch_assume_zero(errno); + break; + } + if (writer) { + dmn->dmn_skip_outq_ioctl = true; + } else { + dmn->dmn_skip_inq_ioctl = true; + } return 1; } return (uintptr_t)n; From cfdc94d08c4e26a98554b8ae12600dce35aed59d Mon Sep 17 00:00:00 2001 From: Pierre Habouzit Date: Mon, 17 Jul 2017 10:30:31 -0700 Subject: [PATCH 39/46] Merge pull request #281 from apple/mad/epoll-fixes-2 the buffer size ioctls on linux return EINVAL when it's not supported Signed-off-by: Daniel A. Steffen --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index ffe6994e1..55a3dc367 100644 --- a/PATCHES +++ b/PATCHES @@ -350,3 +350,4 @@ github commits starting with 29bdc2f from [23a3a84] APPLIED rdar://33531111 [79b7529] APPLIED rdar://33531111 [f8e71eb] APPLIED rdar://33531111 +[8947dcf] APPLIED rdar://33531111 From 6a54c471b342013b2df784003e4fd9d51267aafc Mon Sep 17 00:00:00 2001 From: Pierre Habouzit Date: Fri, 14 Jul 2017 11:57:58 -0700 Subject: [PATCH 40/46] Fix improper double-fire of signal sources on Linux Signed-off-by: Daniel A. Steffen --- src/event/event_epoll.c | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/src/event/event_epoll.c b/src/event/event_epoll.c index c003bacaa..c86421b4e 100644 --- a/src/event/event_epoll.c +++ b/src/event/event_epoll.c @@ -474,12 +474,24 @@ _dispatch_event_merge_signal(dispatch_muxnote_t dmn) { dispatch_unote_linkage_t dul, dul_next; struct signalfd_siginfo si; - - dispatch_assume(read(dmn->dmn_fd, &si, sizeof(si)) == sizeof(si)); - - TAILQ_FOREACH_SAFE(dul, &dmn->dmn_readers_head, du_link, dul_next) { - dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); - dux_merge_evt(du._du, EV_ADD|EV_ENABLE|EV_CLEAR, 1, 0, 0); + ssize_t rc; + + // Linux has the weirdest semantics around signals: if it finds a thread + // that has not masked a process wide-signal, it may deliver it to this + // thread, meaning that the signalfd may have been made readable, but the + // signal consumed through the legacy delivery mechanism. + // + // Because of this we can get a misfire of the signalfd yielding EAGAIN the + // first time around. The _dispatch_muxnote_signal_block_and_raise() hack + // will kick in, the thread with the wrong mask will be fixed up, and the + // signal delivered to us again properly. + if ((rc = read(dmn->dmn_fd, &si, sizeof(si))) == sizeof(si)) { + TAILQ_FOREACH_SAFE(dul, &dmn->dmn_readers_head, du_link, dul_next) { + dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); + dux_merge_evt(du._du, EV_ADD|EV_ENABLE|EV_CLEAR, 1, 0, 0); + } + } else { + dispatch_assume(rc == -1 && errno == EAGAIN); } } From 8c5fde1a8c3c58baf5da8e591a4a4efa2140f1fa Mon Sep 17 00:00:00 2001 From: "Daniel A. Steffen" Date: Mon, 17 Jul 2017 18:29:53 -0700 Subject: [PATCH 41/46] Merge pull request #279 from apple/mad/greedy-signalfd Fix improper double-fire of signal sources on Linux Signed-off-by: Daniel A. Steffen --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 55a3dc367..6863bda9a 100644 --- a/PATCHES +++ b/PATCHES @@ -351,3 +351,4 @@ github commits starting with 29bdc2f from [79b7529] APPLIED rdar://33531111 [f8e71eb] APPLIED rdar://33531111 [8947dcf] APPLIED rdar://33531111 +[5ad9208] APPLIED rdar://33531111 From 887ac1a00325689900767aebf0b968761c95e23f Mon Sep 17 00:00:00 2001 From: David Grove Date: Wed, 5 Jul 2017 18:06:31 -0400 Subject: [PATCH 42/46] fixes for compiler warnings work in progress on cleaning up code so it can be compiled with the same set of warning flags used on Darwin. This is an initial pass through the C files in src to resolve warnings. Most warnings are related to implicit size/sign conversions between integral types and missing explicit prototypes for non-static functions. Signed-off-by: Daniel A. Steffen --- src/BlocksRuntime/runtime.c | 10 +++++++- src/data.c | 2 ++ src/event/event_epoll.c | 48 +++++++++++++++++++------------------ src/event/workqueue.c | 16 ++++++------- src/internal.h | 20 +++++++++++----- src/io.c | 6 ++--- src/queue.c | 6 ++++- src/shims/hw_config.h | 6 ++--- src/shims/lock.c | 7 +++--- src/shims/lock.h | 6 ++--- src/swift/DispatchStubs.cc | 24 +++++++++++++++++++ src/voucher.c | 2 ++ 12 files changed, 101 insertions(+), 52 deletions(-) diff --git a/src/BlocksRuntime/runtime.c b/src/BlocksRuntime/runtime.c index 8ec64cdc2..8c98e8d1e 100644 --- a/src/BlocksRuntime/runtime.c +++ b/src/BlocksRuntime/runtime.c @@ -148,6 +148,8 @@ GC support stub routines static void *_Block_alloc_default(const unsigned long size, const bool initialCountIsOne, const bool isObject) { + (void)initialCountIsOne; + (void)isObject; return malloc(size); } @@ -156,6 +158,8 @@ static void _Block_assign_default(void *value, void **destptr) { } static void _Block_setHasRefcount_default(const void *ptr, const bool hasRefcount) { + (void)ptr; + (void)hasRefcount; } #if HAVE_OBJC @@ -163,9 +167,11 @@ static void _Block_do_nothing(const void *aBlock) { } #endif static void _Block_retain_object_default(const void *ptr) { + (void)ptr; } static void _Block_release_object_default(const void *ptr) { + (void)ptr; } static void _Block_assign_weak_default(const void *ptr, void *dest) { @@ -193,7 +199,9 @@ static void _Block_memmove_gc_broken(void *dest, void *src, unsigned long size) } #endif -static void _Block_destructInstance_default(const void *aBlock) {} +static void _Block_destructInstance_default(const void *aBlock) { + (void)aBlock; +} /************************************************************************** GC support callout functions - initially set to stub routines diff --git a/src/data.c b/src/data.c index 240309f45..3efab2f89 100644 --- a/src/data.c +++ b/src/data.c @@ -138,6 +138,8 @@ _dispatch_data_destroy_buffer(const void* buffer, size_t size, mach_vm_size_t vm_size = size; mach_vm_address_t vm_addr = (uintptr_t)buffer; mach_vm_deallocate(mach_task_self(), vm_addr, vm_size); +#else + (void)size; #endif } else { if (!queue) { diff --git a/src/event/event_epoll.c b/src/event/event_epoll.c index c86421b4e..add4dde65 100644 --- a/src/event/event_epoll.c +++ b/src/event/event_epoll.c @@ -48,7 +48,7 @@ typedef struct dispatch_muxnote_s { TAILQ_HEAD(, dispatch_unote_linkage_s) dmn_readers_head; TAILQ_HEAD(, dispatch_unote_linkage_s) dmn_writers_head; int dmn_fd; - int dmn_ident; + uint32_t dmn_ident; uint32_t dmn_events; int16_t dmn_filter; bool dmn_skip_outq_ioctl; @@ -85,9 +85,9 @@ static struct dispatch_epoll_timeout_s _dispatch_epoll_timeout[] = { DISPATCH_ALWAYS_INLINE static inline struct dispatch_muxnote_bucket_s * -_dispatch_muxnote_bucket(int ident) +_dispatch_muxnote_bucket(uint32_t ident) { - return &_dispatch_sources[DSL_HASH((uint32_t)ident)]; + return &_dispatch_sources[DSL_HASH(ident)]; } #define _dispatch_unote_muxnote_bucket(du) \ _dispatch_muxnote_bucket(du._du->du_ident) @@ -95,7 +95,7 @@ _dispatch_muxnote_bucket(int ident) DISPATCH_ALWAYS_INLINE static inline dispatch_muxnote_t _dispatch_muxnote_find(struct dispatch_muxnote_bucket_s *dmb, - uint64_t ident, int16_t filter) + uint32_t ident, int16_t filter) { dispatch_muxnote_t dmn; if (filter == EVFILT_WRITE) filter = EVFILT_READ; @@ -112,7 +112,7 @@ _dispatch_muxnote_find(struct dispatch_muxnote_bucket_s *dmb, static void _dispatch_muxnote_dispose(dispatch_muxnote_t dmn) { - if (dmn->dmn_filter != EVFILT_READ || dmn->dmn_fd != dmn->dmn_ident) { + if (dmn->dmn_filter != EVFILT_READ || (uint32_t)dmn->dmn_fd != dmn->dmn_ident) { close(dmn->dmn_fd); } free(dmn); @@ -142,26 +142,27 @@ _dispatch_muxnote_create(dispatch_unote_t du, uint32_t events) dispatch_muxnote_t dmn; struct stat sb; - int fd = du._du->du_ident; + int fd = (int)du._du->du_ident; int16_t filter = du._du->du_filter; bool skip_outq_ioctl = false, skip_inq_ioctl = false; sigset_t sigmask; switch (filter) { - case EVFILT_SIGNAL: - if (!sigismember(&signals_with_unotes, du._du->du_ident)) { + case EVFILT_SIGNAL: { + int signo = (int)du._du->du_ident; + if (!sigismember(&signals_with_unotes, signo)) { manager_thread = pthread_self(); - sigaddset(&signals_with_unotes, du._du->du_ident); - sigaction(du._du->du_ident, &sa, NULL); + sigaddset(&signals_with_unotes, signo); + sigaction(signo, &sa, NULL); } sigemptyset(&sigmask); - sigaddset(&sigmask, du._du->du_ident); + sigaddset(&sigmask, signo); fd = signalfd(-1, &sigmask, SFD_NONBLOCK | SFD_CLOEXEC); if (fd < 0) { return NULL; } break; - + } case EVFILT_WRITE: filter = EVFILT_READ; case EVFILT_READ: @@ -290,7 +291,7 @@ _dispatch_unote_resume(dispatch_unote_t du) } bool -_dispatch_unote_unregister(dispatch_unote_t du, uint32_t flags) +_dispatch_unote_unregister(dispatch_unote_t du, DISPATCH_UNUSED uint32_t flags) { switch (du._du->du_filter) { case DISPATCH_EVFILT_CUSTOM_ADD: @@ -313,10 +314,10 @@ _dispatch_unote_unregister(dispatch_unote_t du, uint32_t flags) dul->du_muxnote = NULL; if (TAILQ_EMPTY(&dmn->dmn_readers_head)) { - events &= ~EPOLLIN; + events &= (uint32_t)(~EPOLLIN); } if (TAILQ_EMPTY(&dmn->dmn_writers_head)) { - events &= ~EPOLLOUT; + events &= (uint32_t)(~EPOLLOUT); } if (events == dmn->dmn_events) { @@ -350,7 +351,8 @@ _dispatch_event_merge_timer(dispatch_clock_t clock) } static void -_dispatch_timeout_program(uint32_t tidx, uint64_t target, uint64_t leeway) +_dispatch_timeout_program(uint32_t tidx, uint64_t target, + DISPATCH_UNUSED uint64_t leeway) { dispatch_clock_t clock = DISPATCH_TIMER_CLOCK(tidx); dispatch_epoll_timeout_t timer = &_dispatch_epoll_timeout[clock]; @@ -358,24 +360,24 @@ _dispatch_timeout_program(uint32_t tidx, uint64_t target, uint64_t leeway) .events = EPOLLONESHOT | EPOLLIN, .data = { .u32 = timer->det_ident }, }; - unsigned long op; + int op; if (target >= INT64_MAX && !timer->det_registered) { return; } if (unlikely(timer->det_fd < 0)) { - clockid_t clock; + clockid_t clockid; int fd; switch (DISPATCH_TIMER_CLOCK(tidx)) { case DISPATCH_CLOCK_MACH: - clock = CLOCK_MONOTONIC; + clockid = CLOCK_MONOTONIC; break; case DISPATCH_CLOCK_WALL: - clock = CLOCK_REALTIME; + clockid = CLOCK_REALTIME; break; } - fd = timerfd_create(clock, TFD_NONBLOCK | TFD_CLOEXEC); + fd = timerfd_create(clockid, TFD_NONBLOCK | TFD_CLOEXEC); if (!dispatch_assume(fd >= 0)) { return; } @@ -451,7 +453,7 @@ _dispatch_epoll_init(void *context DISPATCH_UNUSED) .events = EPOLLIN | EPOLLFREE, .data = { .u32 = DISPATCH_EPOLL_EVENTFD, }, }; - unsigned long op = EPOLL_CTL_ADD; + int op = EPOLL_CTL_ADD; if (epoll_ctl(_dispatch_epfd, op, _dispatch_eventfd, &ev) < 0) { DISPATCH_INTERNAL_CRASH(errno, "epoll_ctl() failed"); } @@ -504,7 +506,7 @@ _dispatch_get_buffer_size(dispatch_muxnote_t dmn, bool writer) return 1; } - if (ioctl(dmn->dmn_ident, writer ? SIOCOUTQ : SIOCINQ, &n) != 0) { + if (ioctl((int)dmn->dmn_ident, writer ? SIOCOUTQ : SIOCINQ, &n) != 0) { switch (errno) { case EINVAL: case ENOTTY: diff --git a/src/event/workqueue.c b/src/event/workqueue.c index dbc65938c..73362a58a 100644 --- a/src/event/workqueue.c +++ b/src/event/workqueue.c @@ -22,9 +22,6 @@ #if DISPATCH_USE_INTERNAL_WORKQUEUE -// forward looking typedef; not yet defined in dispatch -typedef pid_t dispatch_tid; - /* * dispatch_workq monitors the thread pool that is * executing the work enqueued on libdispatch's pthread @@ -88,7 +85,7 @@ _dispatch_workq_worker_register(dispatch_queue_t root_q, qos_class_t cls) dispatch_qos_t qos = _dispatch_qos_from_qos_class(cls); dispatch_workq_monitor_t mon = &_dispatch_workq_monitors[qos-1]; dispatch_assert(mon->dq == root_q); - dispatch_tid tid = _dispatch_thread_getspecific(tid); + dispatch_tid tid = _dispatch_tid_self(); _dispatch_unfair_lock_lock(&mon->registered_tid_lock); dispatch_assert(mon->num_registered_tids < WORKQ_MAX_TRACKED_TIDS-1); int worker_id = mon->num_registered_tids++; @@ -103,7 +100,8 @@ _dispatch_workq_worker_unregister(dispatch_queue_t root_q, qos_class_t cls) #if HAVE_DISPATCH_WORKQ_MONITORING dispatch_qos_t qos = _dispatch_qos_from_qos_class(cls); dispatch_workq_monitor_t mon = &_dispatch_workq_monitors[qos-1]; - dispatch_tid tid = _dispatch_thread_getspecific(tid); + dispatch_assert(mon->dq == root_q); + dispatch_tid tid = _dispatch_tid_self(); _dispatch_unfair_lock_lock(&mon->registered_tid_lock); for (int i = 0; i < mon->num_registered_tids; i++) { if (mon->registered_tids[i] == tid) { @@ -138,10 +136,10 @@ _dispatch_workq_count_runnable_workers(dispatch_workq_monitor_t mon) for (int i = 0; i < mon->num_registered_tids; i++) { dispatch_tid tid = mon->registered_tids[i]; int fd; - size_t bytes_read = -1; + ssize_t bytes_read = -1; int r = snprintf(path, sizeof(path), "/proc/%d/stat", tid); - dispatch_assert(r > 0 && r < sizeof(path)); + dispatch_assert(r > 0 && r < (int)sizeof(path)); fd = open(path, O_RDONLY | O_NONBLOCK); if (unlikely(fd == -1)) { @@ -179,7 +177,7 @@ _dispatch_workq_count_runnable_workers(dispatch_workq_monitor_t mon) static void _dispatch_workq_monitor_pools(void *context DISPATCH_UNUSED) { - int global_soft_max = WORKQ_OVERSUBSCRIBE_FACTOR * dispatch_hw_config(active_cpus); + int global_soft_max = WORKQ_OVERSUBSCRIBE_FACTOR * (int)dispatch_hw_config(active_cpus); int global_runnable = 0; for (dispatch_qos_t i = DISPATCH_QOS_MAX; i > DISPATCH_QOS_UNSPECIFIED; i--) { dispatch_workq_monitor_t mon = &_dispatch_workq_monitors[i-1]; @@ -228,7 +226,7 @@ static void _dispatch_workq_init_once(void *context DISPATCH_UNUSED) { #if HAVE_DISPATCH_WORKQ_MONITORING - int target_runnable = dispatch_hw_config(active_cpus); + int target_runnable = (int)dispatch_hw_config(active_cpus); for (dispatch_qos_t i = DISPATCH_QOS_MAX; i > DISPATCH_QOS_UNSPECIFIED; i--) { dispatch_workq_monitor_t mon = &_dispatch_workq_monitors[i-1]; mon->dq = _dispatch_get_root_queue(i, false); diff --git a/src/internal.h b/src/internal.h index 0536db107..3f481aad7 100644 --- a/src/internal.h +++ b/src/internal.h @@ -394,9 +394,9 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); DISPATCH_EXPORT DISPATCH_NOINLINE void _dispatch_bug(size_t line, long val); -#if HAVE_MACH DISPATCH_NOINLINE void _dispatch_bug_client(const char* msg); +#if HAVE_MACH DISPATCH_NOINLINE void _dispatch_bug_mach_client(const char *msg, mach_msg_return_t kr); #endif // HAVE_MACH @@ -466,7 +466,9 @@ void _dispatch_log(const char *msg, ...); } \ } while (0) #else -static inline void _dispatch_assert(long e, long line) { +static inline void +_dispatch_assert(long e, size_t line) +{ if (DISPATCH_DEBUG && !e) _dispatch_abort(line, e); } #define dispatch_assert(e) _dispatch_assert((long)(e), __LINE__) @@ -488,7 +490,9 @@ static inline void _dispatch_assert(long e, long line) { } \ } while (0) #else -static inline void _dispatch_assert_zero(long e, long line) { +static inline void +_dispatch_assert_zero(long e, size_t line) +{ if (DISPATCH_DEBUG && e) _dispatch_abort(line, e); } #define dispatch_assert_zero(e) _dispatch_assert_zero((long)(e), __LINE__) @@ -512,7 +516,9 @@ static inline void _dispatch_assert_zero(long e, long line) { _e; \ }) #else -static inline long _dispatch_assume(long e, long line) { +static inline long +_dispatch_assume(long e, long line) +{ if (!e) _dispatch_bug(line, e); return e; } @@ -535,7 +541,9 @@ static inline long _dispatch_assume(long e, long line) { _e; \ }) #else -static inline long _dispatch_assume_zero(long e, long line) { +static inline long +_dispatch_assume_zero(long e, long line) +{ if (e) _dispatch_bug(line, e); return e; } @@ -850,7 +858,7 @@ _dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b, #define _dispatch_hardware_crash() \ __asm__(""); __builtin_trap() // -#define _dispatch_set_crash_log_cause_and_message(ac, msg) +#define _dispatch_set_crash_log_cause_and_message(ac, msg) ((void)(ac)) #define _dispatch_set_crash_log_message(msg) #define _dispatch_set_crash_log_message_dynamic(msg) diff --git a/src/io.c b/src/io.c index 290437371..155b6cf02 100644 --- a/src/io.c +++ b/src/io.c @@ -25,7 +25,7 @@ #endif #ifndef PAGE_SIZE -#define PAGE_SIZE getpagesize() +#define PAGE_SIZE ((size_t)getpagesize()) #endif #if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA @@ -1372,7 +1372,7 @@ _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) break; ); } - int32_t dev = major(st.st_dev); + dev_t dev = major(st.st_dev); // We have to get the disk on the global dev queue. The // barrier queue cannot continue until that is complete dispatch_suspend(fd_entry->barrier_queue); @@ -2167,7 +2167,7 @@ _dispatch_operation_advise(dispatch_operation_t op, size_t chunk_size) op->advise_offset += advise.ra_count; #ifdef __linux__ _dispatch_io_syscall_switch(err, - readahead(op->fd_entry->fd, advise.ra_offset, advise.ra_count), + readahead(op->fd_entry->fd, advise.ra_offset, (size_t)advise.ra_count), case EINVAL: break; // fd does refer to a non-supported filetype default: (void)dispatch_assume_zero(err); break; ); diff --git a/src/queue.c b/src/queue.c index 2406e7e7b..fe26ab71d 100644 --- a/src/queue.c +++ b/src/queue.c @@ -5329,6 +5329,8 @@ _dispatch_root_queue_push(dispatch_queue_t rq, dispatch_object_t dou, if (_dispatch_root_queue_push_needs_override(rq, qos)) { return _dispatch_root_queue_push_override(rq, dou, qos); } +#else + (void)qos; #endif _dispatch_root_queue_push_inline(rq, dou, dou, 1); } @@ -5870,7 +5872,7 @@ _dispatch_worker_thread(void *context) #if DISPATCH_USE_INTERNAL_WORKQUEUE if (monitored) { - _dispatch_workq_worker_unregister(dq, qc->dgq_qos); + _dispatch_workq_worker_unregister(dq, qc->dgq_qos); } #endif (void)os_atomic_inc2o(qc, dgq_thread_pool_size, release); @@ -5962,6 +5964,7 @@ _dispatch_runloop_root_queue_wakeup_4CF(dispatch_queue_t dq) _dispatch_runloop_queue_wakeup(dq, 0, false); } +#if TARGET_OS_MAC dispatch_runloop_handle_t _dispatch_runloop_root_queue_get_port_4CF(dispatch_queue_t dq) { @@ -5970,6 +5973,7 @@ _dispatch_runloop_root_queue_get_port_4CF(dispatch_queue_t dq) } return _dispatch_runloop_queue_get_handle(dq); } +#endif static void _dispatch_runloop_queue_handle_init(void *ctxt) diff --git a/src/shims/hw_config.h b/src/shims/hw_config.h index 26856bce9..485dad663 100644 --- a/src/shims/hw_config.h +++ b/src/shims/hw_config.h @@ -101,7 +101,7 @@ _dispatch_hw_get_config(_dispatch_hw_config_t c) switch (c) { case _dispatch_hw_config_logical_cpus: case _dispatch_hw_config_physical_cpus: - return sysconf(_SC_NPROCESSORS_CONF); + return (uint32_t)sysconf(_SC_NPROCESSORS_CONF); case _dispatch_hw_config_active_cpus: { #ifdef __USE_GNU @@ -110,9 +110,9 @@ _dispatch_hw_get_config(_dispatch_hw_config_t c) // is restricted to a subset of the online cpus (eg via numactl). cpu_set_t cpuset; if (pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset) == 0) - return CPU_COUNT(&cpuset); + return (uint32_t)CPU_COUNT(&cpuset); #endif - return sysconf(_SC_NPROCESSORS_ONLN); + return (uint32_t)sysconf(_SC_NPROCESSORS_ONLN); } } #else diff --git a/src/shims/lock.c b/src/shims/lock.c index 617fa016d..24af953c3 100644 --- a/src/shims/lock.c +++ b/src/shims/lock.c @@ -382,7 +382,7 @@ _dispatch_futex(uint32_t *uaddr, int op, uint32_t val, const struct timespec *timeout, uint32_t *uaddr2, uint32_t val3, int opflags) { - return syscall(SYS_futex, uaddr, op | opflags, val, timeout, uaddr2, val3); + return (int)syscall(SYS_futex, uaddr, op | opflags, val, timeout, uaddr2, val3); } static int @@ -401,7 +401,7 @@ _dispatch_futex_wake(uint32_t *uaddr, int wake, int opflags) { int rc; _dlock_syscall_switch(err, - rc = _dispatch_futex(uaddr, FUTEX_WAKE, wake, NULL, NULL, 0, opflags), + rc = _dispatch_futex(uaddr, FUTEX_WAKE, (uint32_t)wake, NULL, NULL, 0, opflags), case 0: return; default: DISPATCH_CLIENT_CRASH(err, "futex_wake() failed"); ); @@ -412,7 +412,7 @@ _dispatch_futex_lock_pi(uint32_t *uaddr, struct timespec *timeout, int detect, int opflags) { _dlock_syscall_switch(err, - _dispatch_futex(uaddr, FUTEX_LOCK_PI, detect, timeout, + _dispatch_futex(uaddr, FUTEX_LOCK_PI, (uint32_t)detect, timeout, NULL, 0, opflags), case 0: return; default: DISPATCH_CLIENT_CRASH(errno, "futex_lock_pi() failed"); @@ -606,6 +606,7 @@ _dispatch_gate_wait_slow(dispatch_gate_t dgl, dispatch_lock value, _dispatch_thread_switch(new_value, flags, timeout++); #endif (void)timeout; + (void)flags; } } diff --git a/src/shims/lock.h b/src/shims/lock.h index 0c089aafd..37a3ecfc8 100644 --- a/src/shims/lock.h +++ b/src/shims/lock.h @@ -62,7 +62,7 @@ _dispatch_lock_owner(dispatch_lock lock_value) #include #include /* For SYS_xxx definitions */ -typedef pid_t dispatch_tid; +typedef uint32_t dispatch_tid; typedef uint32_t dispatch_lock; #define DLOCK_OWNER_MASK ((dispatch_lock)FUTEX_TID_MASK) @@ -174,8 +174,8 @@ typedef sem_t _dispatch_sema4_t; #define _DSEMA4_TIMEOUT() ((errno) = ETIMEDOUT, -1) void _dispatch_sema4_init(_dispatch_sema4_t *sema, int policy); -#define _dispatch_sema4_is_created(sema) 1 -#define _dispatch_sema4_create_slow(sema, policy) ((void)0) +#define _dispatch_sema4_is_created(sema) ((void)sema, 1) +#define _dispatch_sema4_create_slow(sema, policy) ((void)sema, (void)policy) #elif USE_WIN32_SEM diff --git a/src/swift/DispatchStubs.cc b/src/swift/DispatchStubs.cc index 1eaf4bd93..9c667d570 100644 --- a/src/swift/DispatchStubs.cc +++ b/src/swift/DispatchStubs.cc @@ -65,6 +65,29 @@ static void _dispatch_overlay_constructor() { #define SWIFT_CC_swift #endif +extern "C" dispatch_queue_attr_t _swift_dispatch_queue_concurrent(void); +extern "C" void _swift_dispatch_apply_current(size_t iterations, __attribute__((__noescape__)) void (^block)(size_t)); +extern "C" dispatch_queue_t _swift_dispatch_get_main_queue(void); +extern "C" dispatch_data_t _swift_dispatch_data_empty(void); +extern "C" dispatch_block_t _swift_dispatch_data_destructor_default(void); +extern "C" dispatch_block_t _swift_dispatch_data_destructor_free(void); +extern "C" dispatch_block_t _swift_dispatch_data_destructor_munmap(void); +extern "C" dispatch_block_t _swift_dispatch_block_create_with_qos_class(dispatch_block_flags_t flags, dispatch_qos_class_t qos, int relative_priority, dispatch_block_t block); +extern "C" dispatch_block_t _swift_dispatch_block_create_noescape(dispatch_block_flags_t flags, dispatch_block_t block); +extern "C" void _swift_dispatch_block_cancel(dispatch_block_t block); +extern "C" long _swift_dispatch_block_wait(dispatch_block_t block, dispatch_time_t timeout); +extern "C" void _swift_dispatch_block_notify(dispatch_block_t block, dispatch_queue_t queue, dispatch_block_t notification_block); +extern "C" long _swift_dispatch_block_testcancel(dispatch_block_t block); +extern "C" void _swift_dispatch_async(dispatch_queue_t queue, dispatch_block_t block); +extern "C" void _swift_dispatch_group_async(dispatch_group_t group, dispatch_queue_t queue, dispatch_block_t block); +extern "C" void _swift_dispatch_sync(dispatch_queue_t queue, dispatch_block_t block); +extern "C" void _swift_dispatch_release(dispatch_object_t obj); +extern "C" void _swift_dispatch_retain(dispatch_object_t obj); +#if !USE_OBJC +extern "C" void * objc_retainAutoreleasedReturnValue(void *obj); +#endif + + SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE extern "C" dispatch_queue_attr_t _swift_dispatch_queue_concurrent(void) { @@ -174,6 +197,7 @@ _swift_dispatch_retain(dispatch_object_t obj) { } #define SOURCE(t) \ + extern "C" dispatch_source_type_t _swift_dispatch_source_type_##t(void); \ SWIFT_CC(swift) \ DISPATCH_RUNTIME_STDLIB_INTERFACE extern "C" dispatch_source_type_t \ _swift_dispatch_source_type_##t(void) { \ diff --git a/src/voucher.c b/src/voucher.c index 5beadf0f1..e4128a289 100644 --- a/src/voucher.c +++ b/src/voucher.c @@ -1550,12 +1550,14 @@ _voucher_create_accounting_voucher(voucher_t voucher) return NULL; } +#if HAVE_MACH voucher_t voucher_create_with_mach_msg(mach_msg_header_t *msg) { (void)msg; return NULL; } +#endif #if VOUCHER_ENABLE_GET_MACH_VOUCHER mach_voucher_t From 54651ca2cf87ec84340c2b803bfea72032a68f6e Mon Sep 17 00:00:00 2001 From: "Daniel A. Steffen" Date: Mon, 24 Jul 2017 14:14:03 -0700 Subject: [PATCH 43/46] Merge pull request #273 from dgrove-oss/match-darwin-cflags-round1 fixes to src to prep for enabling additional compiler warnings Signed-off-by: Daniel A. Steffen --- PATCHES | 1 + 1 file changed, 1 insertion(+) diff --git a/PATCHES b/PATCHES index 6863bda9a..c3d28b330 100644 --- a/PATCHES +++ b/PATCHES @@ -352,3 +352,4 @@ github commits starting with 29bdc2f from [f8e71eb] APPLIED rdar://33531111 [8947dcf] APPLIED rdar://33531111 [5ad9208] APPLIED rdar://33531111 +[698d085] APPLIED rdar://33531111 From 1df729dd73359680417c29cdfb87450df8ccd402 Mon Sep 17 00:00:00 2001 From: "Daniel A. Steffen" Date: Thu, 27 Jul 2017 00:12:42 -0700 Subject: [PATCH 44/46] Merge libdispatch-913.1.4 Signed-off-by: Daniel A. Steffen --- dispatch/queue.h | 2 +- libdispatch.xcodeproj/project.pbxproj | 4 + man/dispatch_apply.3 | 41 +++- os/voucher_private.h | 17 +- private/private.h | 3 + private/source_private.h | 16 ++ src/apply.c | 16 +- src/event/event_config.h | 24 ++ src/event/event_kevent.c | 20 +- src/init.c | 1 + src/inline_internal.h | 46 +++- src/internal.h | 40 ++-- src/mach.c | 30 ++- src/object.c | 20 +- src/object.m | 8 + src/object_internal.h | 9 +- src/provider.d | 38 +++ src/queue.c | 265 ++++++++++++++------- src/queue_internal.h | 6 +- src/shims.h | 50 +++- src/shims/priority.h | 2 +- src/source.c | 61 +++-- src/swift/Source.swift | 322 +++++++++++++++++++++++++- src/swift/Time.swift | 12 + src/trace.h | 8 - src/voucher.c | 7 + src/voucher_internal.h | 60 +++-- tools/voucher_trace.d | 78 +++++++ xcodeconfig/libdispatch.order | 12 + 29 files changed, 1017 insertions(+), 201 deletions(-) create mode 100755 tools/voucher_trace.d diff --git a/dispatch/queue.h b/dispatch/queue.h index 606bd30e8..8dab75f9d 100644 --- a/dispatch/queue.h +++ b/dispatch/queue.h @@ -223,7 +223,7 @@ dispatch_sync_f(dispatch_queue_t queue, * @abstract * Constant to pass to dispatch_apply() or dispatch_apply_f() to request that * the system automatically use worker threads that match the configuration of - * the current thread most closely. + * the current thread as closely as possible. * * @discussion * When submitting a block for parallel invocation, passing this constant as the diff --git a/libdispatch.xcodeproj/project.pbxproj b/libdispatch.xcodeproj/project.pbxproj index 12d05d0c6..e7134e709 100644 --- a/libdispatch.xcodeproj/project.pbxproj +++ b/libdispatch.xcodeproj/project.pbxproj @@ -741,10 +741,12 @@ 96DF70BD0F38FE3C0074BD99 /* once.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = once.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; B63B793F1E8F004F0060C1E1 /* dispatch_no_blocks.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_no_blocks.c; sourceTree = ""; }; B68330BC1EBCF6080003E71C /* dispatch_wl.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_wl.c; sourceTree = ""; }; + B69878521F06F8790088F94F /* dispatch_signals.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_signals.c; sourceTree = ""; }; B6AC73FD1EB10973009FB2F2 /* perf_thread_request.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = perf_thread_request.c; sourceTree = ""; }; B6AE9A4A1D7F53B300AC007F /* dispatch_queue_create.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_queue_create.c; sourceTree = ""; }; B6AE9A561D7F53C100AC007F /* perf_async_bench.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = perf_async_bench.m; sourceTree = ""; }; B6AE9A581D7F53CB00AC007F /* perf_bench.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = perf_bench.m; sourceTree = ""; }; + B6FA01801F0AD522004479BF /* dispatch_pthread_root_queue.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_pthread_root_queue.c; sourceTree = ""; }; C00B0E0A1C5AEBBE000330B3 /* libdispatch_dyld_stub.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch_dyld_stub.a; sourceTree = BUILT_PRODUCTS_DIR; }; C00B0E121C5AEBF7000330B3 /* libdispatch-dyld-stub.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "libdispatch-dyld-stub.xcconfig"; sourceTree = ""; }; C01866BD1C5973210040FC07 /* libdispatch.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch.a; sourceTree = BUILT_PRODUCTS_DIR; }; @@ -1013,6 +1015,7 @@ 6E67D9151C1768B300FC98AC /* dispatch_pingpong.c */, 6E326B441C239B61002A6505 /* dispatch_priority.c */, 6E326AB51C225477002A6505 /* dispatch_proc.c */, + B6FA01801F0AD522004479BF /* dispatch_pthread_root_queue.c */, 6E326AB31C224870002A6505 /* dispatch_qos.c */, B6AE9A4A1D7F53B300AC007F /* dispatch_queue_create.c */, 6E67D9111C17669C00FC98AC /* dispatch_queue_finalizer.c */, @@ -1022,6 +1025,7 @@ 6E326ADC1C234396002A6505 /* dispatch_readsync.c */, 6E8E4E6D1C1A35EE0004F5CC /* dispatch_select.c */, 6E8E4E9B1C1A4EF10004F5CC /* dispatch_sema.c */, + B69878521F06F8790088F94F /* dispatch_signals.c */, 6EA2CB841C005DEF0076794A /* dispatch_source.c */, 6E326AE01C234780002A6505 /* dispatch_starfish.c */, 6EE89F3D1BFAF5B000EB140D /* dispatch_state_machine.c */, diff --git a/man/dispatch_apply.3 b/man/dispatch_apply.3 index 5a43a0a13..57c99a8a7 100644 --- a/man/dispatch_apply.3 +++ b/man/dispatch_apply.3 @@ -1,4 +1,4 @@ -.\" Copyright (c) 2008-2010 Apple Inc. All rights reserved. +.\" Copyright (c) 2008-2017 Apple Inc. All rights reserved. .Dd May 1, 2009 .Dt dispatch_apply 3 .Os Darwin @@ -20,21 +20,32 @@ The .Fn dispatch_apply function provides data-level concurrency through a "for (;;)" loop like primitive: .Bd -literal -dispatch_queue_t the_queue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); size_t iterations = 10; // 'idx' is zero indexed, just like: // for (idx = 0; idx < iterations; idx++) -dispatch_apply(iterations, the_queue, ^(size_t idx) { +dispatch_apply(iterations, DISPATCH_APPLY_AUTO, ^(size_t idx) { printf("%zu\\n", idx); }); .Ed .Pp +Although any queue can be used, it is strongly recommended to use +.Vt DISPATCH_APPLY_AUTO +as the +.Vt queue +argument to both +.Fn dispatch_apply +and +.Fn dispatch_apply_f , +as shown in the example above, since this allows the system to automatically use worker threads +that match the configuration of the current thread as closely as possible. +No assumptions should be made about which global concurrent queue will be used. +.Pp Like a "for (;;)" loop, the .Fn dispatch_apply function is synchronous. -If asynchronous behavior is desired, please wrap the call to +If asynchronous behavior is desired, wrap the call to .Fn dispatch_apply with a call to .Fn dispatch_async @@ -49,7 +60,7 @@ achieved (perhaps using a power of two search): .Bd -literal #define STRIDE 3 -dispatch_apply(count / STRIDE, queue, ^(size_t idx) { +dispatch_apply(count / STRIDE, DISPATCH_APPLY_AUTO, ^(size_t idx) { size_t j = idx * STRIDE; size_t j_stop = j + STRIDE; do { @@ -74,12 +85,21 @@ This is in contrast to asynchronous functions which must retain both the block and target queue for the duration of the asynchronous operation (as the calling function may immediately release its interest in these objects). .Sh FUNDAMENTALS -Conceptually, .Fn dispatch_apply -is a convenient wrapper around +and +.Fn dispatch_apply_f +attempt to quickly create enough worker threads to efficiently iterate work in parallel. +By contrast, a loop that passes work items individually to .Fn dispatch_async -and a semaphore to wait for completion. -In practice, the dispatch library optimizes this function. +or +.Fn dispatch_async_f +will incur more overhead and does not express the desired parallel execution semantics to +the system, so may not create an optimal number of worker threads for a parallel workload. +For this reason, prefer to use +.Fn dispatch_apply +or +.Fn dispatch_apply_f +when parallel execution is important. .Pp The .Fn dispatch_apply @@ -99,5 +119,4 @@ use a for-loop around invocations of .Sh SEE ALSO .Xr dispatch 3 , .Xr dispatch_async 3 , -.Xr dispatch_queue_create 3 , -.Xr dispatch_semaphore_create 3 +.Xr dispatch_queue_create 3 diff --git a/os/voucher_private.h b/os/voucher_private.h index 6675a0edb..aecbbc9ff 100644 --- a/os/voucher_private.h +++ b/os/voucher_private.h @@ -202,8 +202,23 @@ voucher_decrement_importance_count4CF(voucher_t _Nullable voucher); * This flag is ignored if a specific voucher object is assigned with the * dispatch_block_create_with_voucher* functions, and is equivalent to passing * the NULL voucher to these functions. + * + * @const DISPATCH_BLOCK_IF_LAST_RESET_QUEUE_QOS_OVERRIDE + * Flag indicating that this dispatch block object should try to reset the + * recorded maximum QoS of all currently enqueued items on a serial dispatch + * queue at the base of a queue hierarchy. + * + * This is only works if the queue becomes empty by dequeuing the block in + * question, and then allows that block to enqueue more work on this hierarchy + * without perpetuating QoS overrides resulting from items previously executed + * on the hierarchy. + * + * A dispatch block object created with this flag set cannot be used with + * dispatch_block_wait() or dispatch_block_cancel(). */ -#define DISPATCH_BLOCK_NO_VOUCHER (0x40) +#define DISPATCH_BLOCK_NO_VOUCHER (0x40ul) + +#define DISPATCH_BLOCK_IF_LAST_RESET_QUEUE_QOS_OVERRIDE (0x80ul) /*! * @function dispatch_block_create_with_voucher diff --git a/private/private.h b/private/private.h index cc9d57842..ed9f876cc 100644 --- a/private/private.h +++ b/private/private.h @@ -43,6 +43,9 @@ #include #endif #include +#if TARGET_OS_MAC +#include +#endif #ifndef __DISPATCH_BUILDING_DISPATCH__ #include diff --git a/private/source_private.h b/private/source_private.h index 019f648a6..ad22e6a6a 100644 --- a/private/source_private.h +++ b/private/source_private.h @@ -102,6 +102,13 @@ DISPATCH_SOURCE_TYPE_DECL(memorystatus); API_AVAILABLE(macos(10.8), ios(6.0)) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_SOURCE_TYPE_DECL(sock); +/*! + * @const DISPATCH_SOURCE_TYPE_NW_CHANNEL + * @discussion A dispatch source that monitors events on a network channel. + */ +#define DISPATCH_SOURCE_TYPE_NW_CHANNEL (&_dispatch_source_type_nw_channel) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_LINUX_UNAVAILABLE() +DISPATCH_SOURCE_TYPE_DECL(nw_channel); __END_DECLS @@ -165,6 +172,15 @@ enum { DISPATCH_SOCK_NOTIFY_ACK = 0x00004000, }; +/*! + * @enum dispatch_source_nw_channel_flags_t + * + * @constant DISPATCH_NW_CHANNEL_FLOW_ADV_UPDATE + * Received network channel flow advisory. + */ +enum { + DISPATCH_NW_CHANNEL_FLOW_ADV_UPDATE = 0x00000001, +}; /*! * @enum dispatch_source_vfs_flags_t diff --git a/src/apply.c b/src/apply.c index 9d6452225..6f44cf90b 100644 --- a/src/apply.c +++ b/src/apply.c @@ -253,12 +253,23 @@ dispatch_apply_f(size_t iterations, dispatch_queue_t dq, void *ctxt, if (unlikely(iterations == 0)) { return; } - int32_t thr_cnt = (int32_t)dispatch_hw_config(active_cpus); dispatch_thread_context_t dtctxt = _dispatch_thread_context_find(_dispatch_apply_key); size_t nested = dtctxt ? dtctxt->dtc_apply_nesting : 0; dispatch_queue_t old_dq = _dispatch_queue_get_current(); + if (likely(dq == DISPATCH_APPLY_AUTO)) { + dq = _dispatch_apply_root_queue(old_dq); + } + dispatch_qos_t qos = _dispatch_priority_qos(dq->dq_priority); + if (unlikely(dq->do_targetq)) { + // if the queue passed-in is not a root queue, use the current QoS + // since the caller participates in the work anyway + qos = _dispatch_qos_from_pp(_dispatch_get_priority()); + } + int32_t thr_cnt = (int32_t)_dispatch_qos_max_parallelism(qos, + DISPATCH_MAX_PARALLELISM_ACTIVE); + if (likely(!nested)) { nested = iterations; } else { @@ -269,9 +280,6 @@ dispatch_apply_f(size_t iterations, dispatch_queue_t dq, void *ctxt, if (iterations < (size_t)thr_cnt) { thr_cnt = (int32_t)iterations; } - if (likely(dq == DISPATCH_APPLY_AUTO)) { - dq = _dispatch_apply_root_queue(old_dq); - } struct dispatch_continuation_s dc = { .dc_func = (void*)func, .dc_ctxt = ctxt, diff --git a/src/event/event_config.h b/src/event/event_config.h index 2ac3c428d..60f776f95 100644 --- a/src/event/event_config.h +++ b/src/event/event_config.h @@ -50,6 +50,25 @@ #define DISPATCH_MACHPORT_DEBUG 0 #endif +#ifndef DISPATCH_TIMER_ASSERTIONS +#if DISPATCH_DEBUG +#define DISPATCH_TIMER_ASSERTIONS 1 +#else +#define DISPATCH_TIMER_ASSERTIONS 0 +#endif +#endif + +#if DISPATCH_TIMER_ASSERTIONS +#define DISPATCH_TIMER_ASSERT(a, op, b, text) ({ \ + typeof(a) _a = (a); \ + if (unlikely(!(_a op (b)))) { \ + DISPATCH_CLIENT_CRASH(_a, "Timer: " text); \ + } \ + }) +#else +#define DISPATCH_TIMER_ASSERT(a, op, b, text) ((void)0) +#endif + #ifndef EV_VANISHED #define EV_VANISHED 0x0200 #endif @@ -105,6 +124,11 @@ # ifndef VQ_DESIRED_DISK # undef HAVE_DECL_VQ_DESIRED_DISK # endif // VQ_DESIRED_DISK + +# if !defined(EVFILT_NW_CHANNEL) && defined(__APPLE__) +# define EVFILT_NW_CHANNEL (-16) +# define NOTE_FLOW_ADV_UPDATE 0x1 +# endif #else // DISPATCH_EVENT_BACKEND_KEVENT # define EV_ADD 0x0001 # define EV_DELETE 0x0002 diff --git a/src/event/event_kevent.c b/src/event/event_kevent.c index c15a397b4..8fe76d55c 100644 --- a/src/event/event_kevent.c +++ b/src/event/event_kevent.c @@ -671,8 +671,9 @@ _dispatch_kq_drain(dispatch_wlh_t wlh, dispatch_kevent_t ke, int n, r = 0; } else if (flags & KEVENT_FLAG_ERROR_EVENTS) { for (i = 0, r = 0; i < n; i++) { - if ((ke_out[i].flags & EV_ERROR) && (r = (int)ke_out[i].data)) { + if ((ke_out[i].flags & EV_ERROR) && ke_out[i].data) { _dispatch_kevent_drain(&ke_out[i]); + r = (int)ke_out[i].data; } } } else { @@ -1407,6 +1408,17 @@ const dispatch_source_type_s _dispatch_source_type_sock = { }; #endif // EVFILT_SOCK +#ifdef EVFILT_NW_CHANNEL +const dispatch_source_type_s _dispatch_source_type_nw_channel = { + .dst_kind = "nw_channel", + .dst_filter = EVFILT_NW_CHANNEL, + .dst_flags = DISPATCH_EV_DIRECT|EV_CLEAR|EV_VANISHED, + .dst_mask = NOTE_FLOW_ADV_UPDATE, + .dst_size = sizeof(struct dispatch_source_refs_s), + .dst_create = _dispatch_unote_create_with_fd, + .dst_merge_evt = _dispatch_source_merge_evt, +}; +#endif // EVFILT_NW_CHANNEL #if DISPATCH_USE_MEMORYSTATUS @@ -1609,9 +1621,9 @@ _dispatch_mach_notify_source_invoke(mach_msg_header_t *hdr) if (!tlr) { DISPATCH_INTERNAL_CRASH(0, "message received without expected trailer"); } - if (tlr->msgh_audit.val[DISPATCH_MACH_AUDIT_TOKEN_PID] != 0) { - (void)dispatch_assume_zero( - tlr->msgh_audit.val[DISPATCH_MACH_AUDIT_TOKEN_PID]); + if (hdr->msgh_id <= MACH_NOTIFY_LAST + && dispatch_assume_zero(tlr->msgh_audit.val[ + DISPATCH_MACH_AUDIT_TOKEN_PID])) { mach_msg_destroy(hdr); return; } diff --git a/src/init.c b/src/init.c index dea5e8769..6672fac45 100644 --- a/src/init.c +++ b/src/init.c @@ -897,6 +897,7 @@ void _dispatch_temporary_resource_shortage(void) { sleep(1); + asm(""); // prevent tailcall } void * diff --git a/src/inline_internal.h b/src/inline_internal.h index 0ed9e51a8..4103c688f 100644 --- a/src/inline_internal.h +++ b/src/inline_internal.h @@ -1740,7 +1740,7 @@ static inline dispatch_priority_t _dispatch_root_queue_identity_assume(dispatch_queue_t assumed_rq) { dispatch_priority_t old_dbp = _dispatch_get_basepri(); - dispatch_assert(dx_type(assumed_rq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE); + dispatch_assert(dx_hastypeflag(assumed_rq, QUEUE_ROOT)); _dispatch_reset_basepri(assumed_rq->dq_priority); _dispatch_queue_set_current(assumed_rq); return old_dbp; @@ -2108,11 +2108,25 @@ _dispatch_queue_priority_inherit_from_target(dispatch_queue_t dq, if ((!_dispatch_priority_qos(pri) || (pri & inherited_flag)) && (tpri & rootqueue_flag)) { + if (_dispatch_priority_override_qos(pri) == DISPATCH_QOS_SATURATED) { + pri &= DISPATCH_PRIORITY_OVERRIDE_MASK; + } else { + pri = 0; + } if (tpri & defaultqueue_flag) { - dq->dq_priority = 0; + // base queues need to know they target + // the default root queue so that _dispatch_queue_override_qos() + // in _dispatch_queue_class_wakeup() can fallback to QOS_DEFAULT + // if no other priority was provided. + pri |= defaultqueue_flag; } else { - dq->dq_priority = (tpri & ~rootqueue_flag) | inherited_flag; + pri |= (tpri & ~rootqueue_flag) | inherited_flag; } + dq->dq_priority = pri; + } else if (pri & defaultqueue_flag) { + // the DEFAULTQUEUE flag is only set on queues due to the code above, + // and must never be kept if we don't target a global root queue. + dq->dq_priority = (pri & ~defaultqueue_flag); } #else (void)dq; (void)tq; @@ -2272,7 +2286,9 @@ static inline dispatch_qos_t _dispatch_queue_override_qos(dispatch_queue_class_t dqu, dispatch_qos_t qos) { if (dqu._oq->oq_priority & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE) { - return qos; + // queues targeting the default root queue use any asynchronous + // workitem priority available and fallback to QOS_DEFAULT otherwise. + return qos ? qos : DISPATCH_QOS_DEFAULT; } // for asynchronous workitems, queue priority is the floor for overrides return MAX(qos, _dispatch_priority_qos(dqu._oq->oq_priority)); @@ -2338,14 +2354,20 @@ _dispatch_block_has_private_data(const dispatch_block_t block) } DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT -static inline bool -_dispatch_block_invoke_should_set_priority(dispatch_block_flags_t flags) -{ - if (flags & DISPATCH_BLOCK_HAS_PRIORITY) { - return (flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) || - !(flags & DISPATCH_BLOCK_INHERIT_QOS_CLASS); - } - return false; +static inline pthread_priority_t +_dispatch_block_invoke_should_set_priority(dispatch_block_flags_t flags, + pthread_priority_t new_pri) +{ + pthread_priority_t old_pri, p = 0; // 0 means do not change priority. + if ((flags & DISPATCH_BLOCK_HAS_PRIORITY) + && ((flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) || + !(flags & DISPATCH_BLOCK_INHERIT_QOS_CLASS))) { + old_pri = _dispatch_get_priority(); + new_pri &= ~_PTHREAD_PRIORITY_FLAGS_MASK; + p = old_pri & ~_PTHREAD_PRIORITY_FLAGS_MASK; + if (!p || p >= new_pri) p = 0; + } + return p; } DISPATCH_ALWAYS_INLINE diff --git a/src/internal.h b/src/internal.h index 3f481aad7..286e53458 100644 --- a/src/internal.h +++ b/src/internal.h @@ -459,9 +459,9 @@ void _dispatch_log(const char *msg, ...); if (__builtin_constant_p(e)) { \ dispatch_static_assert(e); \ } else { \ - typeof(e) _e = fastpath(e); /* always eval 'e' */ \ - if (!_e) { \ - __assert_rtn(__func__, __FILE__, __LINE__, #e); \ + typeof(e) _e = (e); /* always eval 'e' */ \ + if (unlikely(DISPATCH_DEBUG && !_e)) { \ + _dispatch_abort(__LINE__, (long)_e); \ } \ } \ } while (0) @@ -483,9 +483,9 @@ _dispatch_assert(long e, size_t line) if (__builtin_constant_p(e)) { \ dispatch_static_assert(e); \ } else { \ - typeof(e) _e = slowpath(e); /* always eval 'e' */ \ - if (_e) { \ - __assert_rtn(__func__, __FILE__, __LINE__, #e); \ + typeof(e) _e = (e); /* always eval 'e' */ \ + if (unlikely(DISPATCH_DEBUG && _e)) { \ + _dispatch_abort(__LINE__, (long)_e); \ } \ } \ } while (0) @@ -506,8 +506,8 @@ _dispatch_assert_zero(long e, size_t line) */ #if __GNUC__ #define dispatch_assume(e) ({ \ - typeof(e) _e = fastpath(e); /* always eval 'e' */ \ - if (!_e) { \ + typeof(e) _e = (e); /* always eval 'e' */ \ + if (unlikely(!_e)) { \ if (__builtin_constant_p(e)) { \ dispatch_static_assert(e); \ } \ @@ -531,8 +531,8 @@ _dispatch_assume(long e, long line) */ #if __GNUC__ #define dispatch_assume_zero(e) ({ \ - typeof(e) _e = slowpath(e); /* always eval 'e' */ \ - if (_e) { \ + typeof(e) _e = (e); /* always eval 'e' */ \ + if (unlikely(_e)) { \ if (__builtin_constant_p(e)) { \ dispatch_static_assert(e); \ } \ @@ -558,8 +558,8 @@ _dispatch_assume_zero(long e, long line) if (__builtin_constant_p(e)) { \ dispatch_static_assert(e); \ } else { \ - typeof(e) _e = fastpath(e); /* always eval 'e' */ \ - if (DISPATCH_DEBUG && !_e) { \ + typeof(e) _e = (e); /* always eval 'e' */ \ + if (unlikely(DISPATCH_DEBUG && !_e)) { \ _dispatch_log("%s() 0x%lx: " msg, __func__, (long)_e, ##args); \ abort(); \ } \ @@ -567,8 +567,8 @@ _dispatch_assume_zero(long e, long line) } while (0) #else #define dispatch_debug_assert(e, msg, args...) do { \ - long _e = (long)fastpath(e); /* always eval 'e' */ \ - if (DISPATCH_DEBUG && !_e) { \ + typeof(e) _e = (e); /* always eval 'e' */ \ + if (unlikely(DISPATCH_DEBUG && !_e)) { \ _dispatch_log("%s() 0x%lx: " msg, __FUNCTION__, _e, ##args); \ abort(); \ } \ @@ -626,7 +626,7 @@ DISPATCH_ALWAYS_INLINE static inline void _dispatch_fork_becomes_unsafe(void) { - if (!fastpath(_dispatch_is_multithreaded_inline())) { + if (unlikely(!_dispatch_is_multithreaded_inline())) { _dispatch_fork_becomes_unsafe_slow(); DISPATCH_COMPILER_CAN_ASSUME(_dispatch_is_multithreaded_inline()); } @@ -732,6 +732,14 @@ extern bool _dispatch_memory_warn; #endif // HAVE_SYS_GUARDED_H +#if DISPATCH_USE_DTRACE || DISPATCH_USE_DTRACE_INTROSPECTION +typedef struct dispatch_trace_timer_params_s { + int64_t deadline, interval, leeway; +} *dispatch_trace_timer_params_t; + +#include "provider.h" +#endif // DISPATCH_USE_DTRACE || DISPATCH_USE_DTRACE_INTROSPECTION + #if __has_include() #include #ifndef DBG_DISPATCH @@ -754,7 +762,7 @@ extern bool _dispatch_memory_warn; #define ARIADNE_ENTER_DISPATCH_MAIN_CODE 0 #endif #if !defined(DISPATCH_USE_VOUCHER_KDEBUG_TRACE) && \ - (DISPATCH_INTROSPECTION || DISPATCH_PROFILE) + (DISPATCH_INTROSPECTION || DISPATCH_PROFILE || DISPATCH_DEBUG) #define DISPATCH_USE_VOUCHER_KDEBUG_TRACE 1 #endif diff --git a/src/mach.c b/src/mach.c index 0f9e9a8f3..699492da0 100644 --- a/src/mach.c +++ b/src/mach.c @@ -59,7 +59,7 @@ static inline mach_msg_header_t* _dispatch_mach_msg_get_msg( static void _dispatch_mach_send_push(dispatch_mach_t dm, dispatch_object_t dou, dispatch_qos_t qos); static void _dispatch_mach_cancel(dispatch_mach_t dm); -static void _dispatch_mach_send_barrier_drain_push(dispatch_mach_t dm, +static void _dispatch_mach_push_send_barrier_drain(dispatch_mach_t dm, dispatch_qos_t qos); static void _dispatch_mach_handle_or_push_received_msg(dispatch_mach_t dm, dispatch_mach_msg_t dmsg); @@ -73,6 +73,9 @@ static void _dispatch_mach_notification_kevent_unregister(dispatch_mach_t dm); static void _dispatch_mach_notification_kevent_register(dispatch_mach_t dm, mach_port_t send); +// For tests only. +DISPATCH_EXPORT void _dispatch_mach_hooks_install_default(void); + dispatch_source_t _dispatch_source_create_mach_msg_direct_recv(mach_port_t recvp, const struct dispatch_continuation_s *dc) @@ -153,6 +156,13 @@ dispatch_mach_hooks_install_4libxpc(dispatch_mach_xpc_hooks_t hooks) } } +void +_dispatch_mach_hooks_install_default(void) +{ + os_atomic_store(&_dispatch_mach_xpc_hooks, + &_dispatch_mach_xpc_hooks_default, relaxed); +} + #pragma mark - #pragma mark dispatch_mach_t @@ -431,6 +441,9 @@ _dispatch_mach_reply_kevent_register(dispatch_mach_t dm, mach_port_t reply_port, if (!drq) { pri = dm->dq_priority; wlh = dm->dm_recv_refs->du_wlh; + } else if (dx_type(drq) == DISPATCH_QUEUE_NETWORK_EVENT_TYPE) { + pri = DISPATCH_PRIORITY_FLAG_MANAGER; + wlh = (dispatch_wlh_t)drq; } else if (dx_hastypeflag(drq, QUEUE_ROOT)) { pri = drq->dq_priority; wlh = DISPATCH_WLH_ANON; @@ -1386,7 +1399,7 @@ _dispatch_mach_send_drain(dispatch_mach_t dm, dispatch_invoke_flags_t flags, if (new_state & DISPATCH_MACH_STATE_PENDING_BARRIER) { qos = _dmsr_state_max_qos(new_state); - _dispatch_mach_send_barrier_drain_push(dm, qos); + _dispatch_mach_push_send_barrier_drain(dm, qos); } else { if (needs_mgr || dm->dm_needs_mgr) { qos = _dmsr_state_max_qos(new_state); @@ -1472,7 +1485,7 @@ _dispatch_mach_send_barrier_drain_invoke(dispatch_continuation_t dc, DISPATCH_NOINLINE static void -_dispatch_mach_send_barrier_drain_push(dispatch_mach_t dm, dispatch_qos_t qos) +_dispatch_mach_push_send_barrier_drain(dispatch_mach_t dm, dispatch_qos_t qos) { dispatch_continuation_t dc = _dispatch_continuation_alloc(); @@ -1534,7 +1547,7 @@ _dispatch_mach_send_push(dispatch_mach_t dm, dispatch_continuation_t dc, dispatch_wakeup_flags_t wflags = 0; if (state_flags & DISPATCH_MACH_STATE_PENDING_BARRIER) { - _dispatch_mach_send_barrier_drain_push(dm, qos); + _dispatch_mach_push_send_barrier_drain(dm, qos); } else if (wakeup || dmsr->dmsr_disconnect_cnt || (dm->dq_atomic_flags & DSF_CANCELED)) { wflags = DISPATCH_WAKEUP_MAKE_DIRTY | DISPATCH_WAKEUP_CONSUME_2; @@ -1739,6 +1752,7 @@ _dispatch_mach_priority_propagate(mach_msg_option_t options, } *msg_pp = _dispatch_priority_compute_propagated(0, flags); // TODO: remove QoS contribution of sync IPC messages to send queue + // rdar://31848737 return _dispatch_qos_from_pp(*msg_pp); } @@ -2216,7 +2230,7 @@ dispatch_mach_send_barrier_f(dispatch_mach_t dm, void *context, dispatch_function_t func) { dispatch_continuation_t dc = _dispatch_continuation_alloc(); - uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_MACH_BARRIER; dispatch_qos_t qos; _dispatch_continuation_init_f(dc, dm, context, func, 0, 0, dc_flags); @@ -2231,7 +2245,7 @@ void dispatch_mach_send_barrier(dispatch_mach_t dm, dispatch_block_t barrier) { dispatch_continuation_t dc = _dispatch_continuation_alloc(); - uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_MACH_BARRIER; dispatch_qos_t qos; _dispatch_continuation_init(dc, dm, barrier, 0, 0, dc_flags); @@ -2247,7 +2261,7 @@ dispatch_mach_receive_barrier_f(dispatch_mach_t dm, void *context, dispatch_function_t func) { dispatch_continuation_t dc = _dispatch_continuation_alloc(); - uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_MACH_BARRIER; _dispatch_continuation_init_f(dc, dm, context, func, 0, 0, dc_flags); _dispatch_mach_barrier_set_vtable(dc, dm, DC_VTABLE(MACH_RECV_BARRIER)); @@ -2259,7 +2273,7 @@ void dispatch_mach_receive_barrier(dispatch_mach_t dm, dispatch_block_t barrier) { dispatch_continuation_t dc = _dispatch_continuation_alloc(); - uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_MACH_BARRIER; _dispatch_continuation_init(dc, dm, barrier, 0, 0, dc_flags); _dispatch_mach_barrier_set_vtable(dc, dm, DC_VTABLE(MACH_RECV_BARRIER)); diff --git a/src/object.c b/src/object.c index 43f580bd2..86d100507 100644 --- a/src/object.c +++ b/src/object.c @@ -236,8 +236,9 @@ void * dispatch_get_context(dispatch_object_t dou) { DISPATCH_OBJECT_TFB(_dispatch_objc_get_context, dou); - if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || - slowpath(dx_hastypeflag(dou._do, QUEUE_ROOT))) { + if (unlikely(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT || + dx_hastypeflag(dou._do, QUEUE_ROOT) || + dx_hastypeflag(dou._do, QUEUE_BASE))) { return NULL; } return dou._do->do_ctxt; @@ -247,8 +248,9 @@ void dispatch_set_context(dispatch_object_t dou, void *context) { DISPATCH_OBJECT_TFB(_dispatch_objc_set_context, dou, context); - if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || - slowpath(dx_hastypeflag(dou._do, QUEUE_ROOT))) { + if (unlikely(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT || + dx_hastypeflag(dou._do, QUEUE_ROOT) || + dx_hastypeflag(dou._do, QUEUE_BASE))) { return; } dou._do->do_ctxt = context; @@ -258,8 +260,9 @@ void dispatch_set_finalizer_f(dispatch_object_t dou, dispatch_function_t finalizer) { DISPATCH_OBJECT_TFB(_dispatch_objc_set_finalizer_f, dou, finalizer); - if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || - slowpath(dx_hastypeflag(dou._do, QUEUE_ROOT))) { + if (unlikely(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT || + dx_hastypeflag(dou._do, QUEUE_ROOT) || + dx_hastypeflag(dou._do, QUEUE_BASE))) { return; } dou._do->do_finalizer = finalizer; @@ -271,8 +274,9 @@ dispatch_set_target_queue(dispatch_object_t dou, dispatch_queue_t tq) DISPATCH_OBJECT_TFB(_dispatch_objc_set_target_queue, dou, tq); if (dx_vtable(dou._do)->do_set_targetq) { dx_vtable(dou._do)->do_set_targetq(dou._do, tq); - } else if (dou._do->do_ref_cnt != DISPATCH_OBJECT_GLOBAL_REFCNT && - !slowpath(dx_hastypeflag(dou._do, QUEUE_ROOT))) { + } else if (likely(dou._do->do_ref_cnt != DISPATCH_OBJECT_GLOBAL_REFCNT && + !dx_hastypeflag(dou._do, QUEUE_ROOT) && + !dx_hastypeflag(dou._do, QUEUE_BASE))) { if (slowpath(!tq)) { tq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false); } diff --git a/src/object.m b/src/object.m index cc97cc3db..efee82947 100644 --- a/src/object.m +++ b/src/object.m @@ -387,6 +387,14 @@ @implementation OS_OBJECT_CLASS(voucher) DISPATCH_UNAVAILABLE_INIT() DISPATCH_OBJC_LOAD() +-(id)retain { + return (id)_voucher_retain_inline((struct voucher_s *)self); +} + +-(oneway void)release { + return _voucher_release_inline((struct voucher_s *)self); +} + - (void)_xref_dispose { return _voucher_xref_dispose(self); // calls _os_object_release_internal() } diff --git a/src/object_internal.h b/src/object_internal.h index 0060f27f8..4504f6587 100644 --- a/src/object_internal.h +++ b/src/object_internal.h @@ -355,6 +355,8 @@ enum { _DISPATCH_DISK_TYPE = 0x70000, // meta-type for io disks _DISPATCH_QUEUE_ROOT_TYPEFLAG = 0x0100, // bit set for any root queues + _DISPATCH_QUEUE_BASE_TYPEFLAG = 0x0200, // base of a hierarchy + // targets a root queue #define DISPATCH_CONTINUATION_TYPE(name) \ (_DISPATCH_CONTINUATION_TYPE | DC_##name##_TYPE) @@ -372,10 +374,11 @@ enum { DISPATCH_QUEUE_GLOBAL_ROOT_TYPE = 4 | _DISPATCH_QUEUE_TYPE | _DISPATCH_QUEUE_ROOT_TYPEFLAG, DISPATCH_QUEUE_NETWORK_EVENT_TYPE = 5 | _DISPATCH_QUEUE_TYPE | - _DISPATCH_QUEUE_ROOT_TYPEFLAG, + _DISPATCH_QUEUE_BASE_TYPEFLAG, DISPATCH_QUEUE_RUNLOOP_TYPE = 6 | _DISPATCH_QUEUE_TYPE | - _DISPATCH_QUEUE_ROOT_TYPEFLAG, - DISPATCH_QUEUE_MGR_TYPE = 7 | _DISPATCH_QUEUE_TYPE, + _DISPATCH_QUEUE_BASE_TYPEFLAG, + DISPATCH_QUEUE_MGR_TYPE = 7 | _DISPATCH_QUEUE_TYPE | + _DISPATCH_QUEUE_BASE_TYPEFLAG, DISPATCH_QUEUE_SPECIFIC_TYPE = 8 | _DISPATCH_QUEUE_TYPE, DISPATCH_SEMAPHORE_TYPE = 1 | _DISPATCH_SEMAPHORE_TYPE, diff --git a/src/provider.d b/src/provider.d index ede3c56b3..13bcf7a93 100644 --- a/src/provider.d +++ b/src/provider.d @@ -101,3 +101,41 @@ provider dispatch { #pragma D attributes Private/Private/Common provider dispatch function #pragma D attributes Evolving/Evolving/Common provider dispatch name #pragma D attributes Evolving/Evolving/Common provider dispatch args + +typedef struct voucher_s *voucher_t; + +/* + * Probes for vouchers + */ +provider voucher { + + /* + * Voucher lifetime: + * + * voucher$target:::create A new voucher is being created + * voucher$target:::dispose A voucher is being freed + * voucher$target:::retain A voucher is being retained + * voucher$target:::release A voucher is being released + */ + probe create(voucher_t voucher, mach_port_t kv, uint64_t activity_id); + probe dispose(voucher_t voucher); + probe retain(voucher_t voucher, int resulting_refcnt); + probe release(voucher_t voucher, int resulting_refcnt); + + /* + * Thread adoption + * + * voucher$target:::adopt A voucher is being adopted by the current thread + * voucher$target:::orphan A voucher is being orphanned by the current thread + */ + probe adopt(voucher_t voucher); + probe orphan(voucher_t voucher); + +}; + +#pragma D attributes Evolving/Evolving/Common provider voucher provider +#pragma D attributes Private/Private/Common provider voucher module +#pragma D attributes Private/Private/Common provider voucher function +#pragma D attributes Evolving/Evolving/Common provider voucher name +#pragma D attributes Evolving/Evolving/Common provider voucher args + diff --git a/src/queue.c b/src/queue.c index fe26ab71d..7bd1dbc41 100644 --- a/src/queue.c +++ b/src/queue.c @@ -26,20 +26,32 @@ #if HAVE_PTHREAD_WORKQUEUES || DISPATCH_USE_INTERNAL_WORKQUEUE #define DISPATCH_USE_WORKQUEUES 1 #endif -#if (!HAVE_PTHREAD_WORKQUEUES || DISPATCH_DEBUG) && !defined(DISPATCH_ENABLE_THREAD_POOL) +#if (!HAVE_PTHREAD_WORKQUEUES || DISPATCH_DEBUG) && \ + !defined(DISPATCH_ENABLE_THREAD_POOL) #define DISPATCH_ENABLE_THREAD_POOL 1 #endif #if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES || DISPATCH_ENABLE_THREAD_POOL #define DISPATCH_USE_PTHREAD_POOL 1 #endif -#if HAVE_PTHREAD_WORKQUEUES && (!HAVE_PTHREAD_WORKQUEUE_QOS || DISPATCH_DEBUG) && \ - !HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP && \ +#if HAVE_PTHREAD_WORKQUEUES && (!HAVE_PTHREAD_WORKQUEUE_QOS || \ + DISPATCH_DEBUG) && !HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP && \ !defined(DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK) #define DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK 1 #endif -#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP || DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK +#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP && (DISPATCH_DEBUG || \ + (!DISPATCH_USE_KEVENT_WORKQUEUE && !HAVE_PTHREAD_WORKQUEUE_QOS)) && \ + !defined(DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP) +#define DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP 1 +#endif +#if DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP || \ + DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK || \ + DISPATCH_USE_INTERNAL_WORKQUEUE +#if !DISPATCH_USE_INTERNAL_WORKQUEUE #define DISPATCH_USE_WORKQ_PRIORITY 1 #endif +#define DISPATCH_USE_WORKQ_OPTIONS 1 +#endif + #if DISPATCH_USE_WORKQUEUES && DISPATCH_USE_PTHREAD_POOL && \ !DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK #define pthread_workqueue_t void* @@ -69,7 +81,7 @@ static void _dispatch_worker_thread4(void *context); #if HAVE_PTHREAD_WORKQUEUE_QOS static void _dispatch_worker_thread3(pthread_priority_t priority); #endif -#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP +#if DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP static void _dispatch_worker_thread2(int priority, int options, void *context); #endif #endif @@ -164,7 +176,9 @@ struct dispatch_root_queue_context_s { #if DISPATCH_USE_WORKQ_PRIORITY int dgq_wq_priority; #endif +#if DISPATCH_USE_WORKQ_OPTIONS int dgq_wq_options; +#endif #if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK || DISPATCH_USE_PTHREAD_POOL pthread_workqueue_t dgq_kworkqueue; #endif @@ -195,8 +209,10 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #if DISPATCH_USE_WORKQ_PRIORITY .dgq_wq_priority = WORKQ_BG_PRIOQUEUE, #endif +#if DISPATCH_USE_WORKQ_OPTIONS .dgq_wq_options = 0, #endif +#endif #if DISPATCH_ENABLE_THREAD_POOL .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS], @@ -208,8 +224,10 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #if DISPATCH_USE_WORKQ_PRIORITY .dgq_wq_priority = WORKQ_BG_PRIOQUEUE, #endif +#if DISPATCH_USE_WORKQ_OPTIONS .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, #endif +#endif #if DISPATCH_ENABLE_THREAD_POOL .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT], @@ -221,8 +239,10 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #if DISPATCH_USE_WORKQ_PRIORITY .dgq_wq_priority = WORKQ_BG_PRIOQUEUE_CONDITIONAL, #endif +#if DISPATCH_USE_WORKQ_OPTIONS .dgq_wq_options = 0, #endif +#endif #if DISPATCH_ENABLE_THREAD_POOL .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS], @@ -234,8 +254,10 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #if DISPATCH_USE_WORKQ_PRIORITY .dgq_wq_priority = WORKQ_BG_PRIOQUEUE_CONDITIONAL, #endif +#if DISPATCH_USE_WORKQ_OPTIONS .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, #endif +#endif #if DISPATCH_ENABLE_THREAD_POOL .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT], @@ -247,8 +269,10 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #if DISPATCH_USE_WORKQ_PRIORITY .dgq_wq_priority = WORKQ_LOW_PRIOQUEUE, #endif +#if DISPATCH_USE_WORKQ_OPTIONS .dgq_wq_options = 0, #endif +#endif #if DISPATCH_ENABLE_THREAD_POOL .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS], @@ -260,8 +284,10 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #if DISPATCH_USE_WORKQ_PRIORITY .dgq_wq_priority = WORKQ_LOW_PRIOQUEUE, #endif +#if DISPATCH_USE_WORKQ_OPTIONS .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, #endif +#endif #if DISPATCH_ENABLE_THREAD_POOL .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT], @@ -273,8 +299,10 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #if DISPATCH_USE_WORKQ_PRIORITY .dgq_wq_priority = WORKQ_DEFAULT_PRIOQUEUE, #endif +#if DISPATCH_USE_WORKQ_OPTIONS .dgq_wq_options = 0, #endif +#endif #if DISPATCH_ENABLE_THREAD_POOL .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS], @@ -286,8 +314,10 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #if DISPATCH_USE_WORKQ_PRIORITY .dgq_wq_priority = WORKQ_DEFAULT_PRIOQUEUE, #endif +#if DISPATCH_USE_WORKQ_OPTIONS .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, #endif +#endif #if DISPATCH_ENABLE_THREAD_POOL .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT], @@ -299,8 +329,10 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #if DISPATCH_USE_WORKQ_PRIORITY .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE, #endif +#if DISPATCH_USE_WORKQ_OPTIONS .dgq_wq_options = 0, #endif +#endif #if DISPATCH_ENABLE_THREAD_POOL .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS], @@ -312,8 +344,10 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #if DISPATCH_USE_WORKQ_PRIORITY .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE, #endif +#if DISPATCH_USE_WORKQ_OPTIONS .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, #endif +#endif #if DISPATCH_ENABLE_THREAD_POOL .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT], @@ -325,8 +359,10 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #if DISPATCH_USE_WORKQ_PRIORITY .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE_CONDITIONAL, #endif +#if DISPATCH_USE_WORKQ_OPTIONS .dgq_wq_options = 0, #endif +#endif #if DISPATCH_ENABLE_THREAD_POOL .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS], @@ -338,8 +374,10 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #if DISPATCH_USE_WORKQ_PRIORITY .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE_CONDITIONAL, #endif +#if DISPATCH_USE_WORKQ_OPTIONS .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, #endif +#endif #if DISPATCH_ENABLE_THREAD_POOL .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT], @@ -419,7 +457,7 @@ struct dispatch_queue_s _dispatch_root_queues[] = { ), }; -#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP +#if DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP static const dispatch_queue_t _dispatch_wq2root_queues[][2] = { [WORKQ_BG_PRIOQUEUE][0] = &_dispatch_root_queues[ DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS], @@ -442,7 +480,7 @@ static const dispatch_queue_t _dispatch_wq2root_queues[][2] = { &_dispatch_root_queues[ DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT], }; -#endif // HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP +#endif // DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP #if DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES static struct dispatch_queue_s _dispatch_mgr_root_queue; @@ -650,7 +688,7 @@ _dispatch_root_queues_init_workq(int *wq_supported) } } #endif // DISPATCH_USE_KEVENT_WORKQUEUE || HAVE_PTHREAD_WORKQUEUE_QOS -#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP +#if DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP if (!result && !disable_wq) { pthread_workqueue_setdispatchoffset_np( offsetof(struct dispatch_queue_s, dq_serialnum)); @@ -660,7 +698,7 @@ _dispatch_root_queues_init_workq(int *wq_supported) #endif result = !r; } -#endif // HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP +#endif // DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP #if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK || DISPATCH_USE_PTHREAD_POOL if (!result) { #if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK @@ -788,7 +826,7 @@ libdispatch_init(void) DISPATCH_ROOT_QUEUE_COUNT); dispatch_assert(countof(_dispatch_root_queue_contexts) == DISPATCH_ROOT_QUEUE_COUNT); -#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP +#if DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP dispatch_assert(sizeof(_dispatch_wq2root_queues) / sizeof(_dispatch_wq2root_queues[0][0]) == WORKQ_NUM_PRIOQUEUE * 2); @@ -1420,7 +1458,7 @@ _dispatch_queue_destroy(dispatch_queue_t dq, bool *allow_free) uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); uint64_t initial_state = DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width); - if (dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) { + if (dx_hastypeflag(dq, QUEUE_ROOT)) { initial_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE; } dq_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; @@ -1799,23 +1837,24 @@ _dispatch_queue_set_width2(void *ctxt) uint32_t tmp; dispatch_queue_t dq = _dispatch_queue_get_current(); - if (w > 0) { - tmp = (unsigned int)w; - } else switch (w) { - case 0: - tmp = 1; - break; - case DISPATCH_QUEUE_WIDTH_MAX_PHYSICAL_CPUS: - tmp = dispatch_hw_config(physical_cpus); - break; - case DISPATCH_QUEUE_WIDTH_ACTIVE_CPUS: - tmp = dispatch_hw_config(active_cpus); - break; - default: - // fall through - case DISPATCH_QUEUE_WIDTH_MAX_LOGICAL_CPUS: - tmp = dispatch_hw_config(logical_cpus); - break; + if (w >= 0) { + tmp = w ? (unsigned int)w : 1; + } else { + dispatch_qos_t qos = _dispatch_qos_from_pp(_dispatch_get_priority()); + switch (w) { + case DISPATCH_QUEUE_WIDTH_MAX_PHYSICAL_CPUS: + tmp = _dispatch_qos_max_parallelism(qos, + DISPATCH_MAX_PARALLELISM_PHYSICAL); + break; + case DISPATCH_QUEUE_WIDTH_ACTIVE_CPUS: + tmp = _dispatch_qos_max_parallelism(qos, + DISPATCH_MAX_PARALLELISM_ACTIVE); + break; + case DISPATCH_QUEUE_WIDTH_MAX_LOGICAL_CPUS: + default: + tmp = _dispatch_qos_max_parallelism(qos, 0); + break; + } } if (tmp > DISPATCH_QUEUE_WIDTH_MAX) { tmp = DISPATCH_QUEUE_WIDTH_MAX; @@ -1832,8 +1871,9 @@ _dispatch_queue_set_width2(void *ctxt) void dispatch_queue_set_width(dispatch_queue_t dq, long width) { - if (slowpath(dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || - slowpath(dx_hastypeflag(dq, QUEUE_ROOT))) { + if (unlikely(dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT || + dx_hastypeflag(dq, QUEUE_ROOT) || + dx_hastypeflag(dq, QUEUE_BASE))) { return; } @@ -1848,8 +1888,15 @@ dispatch_queue_set_width(dispatch_queue_t dq, long width) DISPATCH_CLIENT_CRASH(type, "Unexpected dispatch object type"); } - _dispatch_barrier_trysync_or_async_f(dq, (void*)(intptr_t)width, - _dispatch_queue_set_width2); + if (likely((int)width >= 0)) { + _dispatch_barrier_trysync_or_async_f(dq, (void*)(intptr_t)width, + _dispatch_queue_set_width2); + } else { + // The negative width constants need to execute on the queue to + // query the queue QoS + _dispatch_barrier_async_detached_f(dq, (void*)(intptr_t)width, + _dispatch_queue_set_width2); + } } static void @@ -2265,7 +2312,7 @@ dispatch_pthread_root_queue_copy_current(void) { dispatch_queue_t dq = _dispatch_queue_get_current(); if (!dq) return NULL; - while (slowpath(dq->do_targetq)) { + while (unlikely(dq->do_targetq)) { dq = dq->do_targetq; } if (dx_type(dq) != DISPATCH_QUEUE_GLOBAL_ROOT_TYPE || @@ -2911,9 +2958,9 @@ _dispatch_block_invoke_direct(const struct dispatch_block_private_data_s *dbcpd) if (atomic_flags & DBF_CANCELED) goto out; pthread_priority_t op = 0, p = 0; - if (_dispatch_block_invoke_should_set_priority(flags)) { - op = _dispatch_get_priority(); - p = dbpd->dbpd_priority; + op = _dispatch_block_invoke_should_set_priority(flags, dbpd->dbpd_priority); + if (op) { + p = dbpd->dbpd_priority; } voucher_t ov, v = DISPATCH_NO_VOUCHER; if (flags & DISPATCH_BLOCK_HAS_VOUCHER) { @@ -2966,9 +3013,53 @@ _dispatch_block_sync_invoke(void *block) } } -DISPATCH_ALWAYS_INLINE static void -_dispatch_block_async_invoke2(dispatch_block_t b, bool release) +_dispatch_block_async_invoke_reset_max_qos(dispatch_queue_t dq, + dispatch_qos_t qos) +{ + uint64_t old_state, new_state, qos_bits = _dq_state_from_qos(qos); + + // Only dispatch queues can reach this point (as opposed to sources or more + // complex objects) which allows us to handle the DIRTY bit protocol by only + // looking at the tail + dispatch_assert(dx_metatype(dq) == _DISPATCH_QUEUE_TYPE); + +again: + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + dispatch_assert(_dq_state_is_base_wlh(old_state)); + if ((old_state & DISPATCH_QUEUE_MAX_QOS_MASK) <= qos_bits) { + // Nothing to do if the QoS isn't going down + os_atomic_rmw_loop_give_up(return); + } + if (_dq_state_is_dirty(old_state)) { + os_atomic_rmw_loop_give_up({ + // just renew the drain lock with an acquire barrier, to see + // what the enqueuer that set DIRTY has done. + // the xor generates better assembly as DISPATCH_QUEUE_DIRTY + // is already in a register + os_atomic_xor2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, acquire); + if (!dq->dq_items_tail) { + goto again; + } + return; + }); + } + + new_state = old_state; + new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; + new_state |= qos_bits; + }); + + _dispatch_deferred_items_get()->ddi_wlh_needs_update = true; + _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE); +} + +#define DISPATCH_BLOCK_ASYNC_INVOKE_RELEASE 0x1 +#define DISPATCH_BLOCK_ASYNC_INVOKE_NO_OVERRIDE_RESET 0x2 + +DISPATCH_NOINLINE +static void +_dispatch_block_async_invoke2(dispatch_block_t b, unsigned long invoke_flags) { dispatch_block_private_data_t dbpd = _dispatch_block_get_data(b); unsigned int atomic_flags = dbpd->dbpd_atomic_flags; @@ -2976,6 +3067,17 @@ _dispatch_block_async_invoke2(dispatch_block_t b, bool release) DISPATCH_CLIENT_CRASH(atomic_flags, "A block object may not be both " "run more than once and waited for"); } + + if (unlikely((dbpd->dbpd_flags & + DISPATCH_BLOCK_IF_LAST_RESET_QUEUE_QOS_OVERRIDE) && + !(invoke_flags & DISPATCH_BLOCK_ASYNC_INVOKE_NO_OVERRIDE_RESET))) { + dispatch_queue_t dq = _dispatch_get_current_queue(); + dispatch_qos_t qos = _dispatch_qos_from_pp(_dispatch_get_priority()); + if ((dispatch_wlh_t)dq == _dispatch_get_wlh() && !dq->dq_items_tail) { + _dispatch_block_async_invoke_reset_max_qos(dq, qos); + } + } + if (!slowpath(atomic_flags & DBF_CANCELED)) { dbpd->dbpd_block(); } @@ -2984,13 +3086,14 @@ _dispatch_block_async_invoke2(dispatch_block_t b, bool release) dispatch_group_leave(_dbpd_group(dbpd)); } } - os_mpsc_queue_t oq; - oq = os_atomic_xchg2o(dbpd, dbpd_queue, NULL, relaxed); + + os_mpsc_queue_t oq = os_atomic_xchg2o(dbpd, dbpd_queue, NULL, relaxed); if (oq) { // balances dispatch_{,barrier_,group_}async _os_object_release_internal_n_inline(oq->_as_os_obj, 2); } - if (release) { + + if (invoke_flags & DISPATCH_BLOCK_ASYNC_INVOKE_RELEASE) { Block_release(b); } } @@ -2998,20 +3101,35 @@ _dispatch_block_async_invoke2(dispatch_block_t b, bool release) static void _dispatch_block_async_invoke(void *block) { - _dispatch_block_async_invoke2(block, false); + _dispatch_block_async_invoke2(block, 0); } static void _dispatch_block_async_invoke_and_release(void *block) { - _dispatch_block_async_invoke2(block, true); + _dispatch_block_async_invoke2(block, DISPATCH_BLOCK_ASYNC_INVOKE_RELEASE); +} + +static void +_dispatch_block_async_invoke_and_release_mach_barrier(void *block) +{ + _dispatch_block_async_invoke2(block, DISPATCH_BLOCK_ASYNC_INVOKE_RELEASE | + DISPATCH_BLOCK_ASYNC_INVOKE_NO_OVERRIDE_RESET); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_block_supports_wait_and_cancel(dispatch_block_private_data_t dbpd) +{ + return dbpd && !(dbpd->dbpd_flags & + DISPATCH_BLOCK_IF_LAST_RESET_QUEUE_QOS_OVERRIDE); } void dispatch_block_cancel(dispatch_block_t db) { dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); - if (!dbpd) { + if (unlikely(!_dispatch_block_supports_wait_and_cancel(dbpd))) { DISPATCH_CLIENT_CRASH(db, "Invalid block object passed to " "dispatch_block_cancel()"); } @@ -3022,7 +3140,7 @@ long dispatch_block_testcancel(dispatch_block_t db) { dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); - if (!dbpd) { + if (unlikely(!_dispatch_block_supports_wait_and_cancel(dbpd))) { DISPATCH_CLIENT_CRASH(db, "Invalid block object passed to " "dispatch_block_testcancel()"); } @@ -3033,7 +3151,7 @@ long dispatch_block_wait(dispatch_block_t db, dispatch_time_t timeout) { dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); - if (!dbpd) { + if (unlikely(!_dispatch_block_supports_wait_and_cancel(dbpd))) { DISPATCH_CLIENT_CRASH(db, "Invalid block object passed to " "dispatch_block_wait()"); } @@ -3128,7 +3246,10 @@ _dispatch_continuation_init_slow(dispatch_continuation_t dc, _os_object_retain_internal_n_inline(oq->_as_os_obj, 2); } - if (dc_flags & DISPATCH_OBJ_CONSUME_BIT) { + if (dc_flags & DISPATCH_OBJ_MACH_BARRIER) { + dispatch_assert(dc_flags & DISPATCH_OBJ_CONSUME_BIT); + dc->dc_func = _dispatch_block_async_invoke_and_release_mach_barrier; + } else if (dc_flags & DISPATCH_OBJ_CONSUME_BIT) { dc->dc_func = _dispatch_block_async_invoke_and_release; } else { dc->dc_func = _dispatch_block_async_invoke; @@ -3156,28 +3277,7 @@ _dispatch_continuation_init_slow(dispatch_continuation_t dc, dc->dc_flags = dc_flags; } -void -_dispatch_continuation_update_bits(dispatch_continuation_t dc, - uintptr_t dc_flags) -{ - dc->dc_flags = dc_flags; - if (dc_flags & DISPATCH_OBJ_CONSUME_BIT) { - if (dc_flags & DISPATCH_OBJ_BLOCK_PRIVATE_DATA_BIT) { - dc->dc_func = _dispatch_block_async_invoke_and_release; - } else if (dc_flags & DISPATCH_OBJ_BLOCK_BIT) { - dc->dc_func = _dispatch_call_block_and_release; - } - } else { - if (dc_flags & DISPATCH_OBJ_BLOCK_PRIVATE_DATA_BIT) { - dc->dc_func = _dispatch_block_async_invoke; - } else if (dc_flags & DISPATCH_OBJ_BLOCK_BIT) { - dc->dc_func = _dispatch_Block_invoke(dc->dc_ctxt); - } - } -} - #endif // __BLOCKS__ - #pragma mark - #pragma mark dispatch_barrier_async @@ -4092,15 +4192,19 @@ _dispatch_sync_block_with_private_data(dispatch_queue_t dq, dispatch_block_t work, dispatch_block_flags_t flags) { dispatch_block_private_data_t dbpd = _dispatch_block_get_data(work); - pthread_priority_t op = 0; + pthread_priority_t op = 0, p = 0; flags |= dbpd->dbpd_flags; - if (_dispatch_block_invoke_should_set_priority(flags)) { - voucher_t v = DISPATCH_NO_VOUCHER; - op = _dispatch_get_priority(); - v = _dispatch_set_priority_and_voucher(dbpd->dbpd_priority, v, 0); - dispatch_assert(v == DISPATCH_NO_VOUCHER); + op = _dispatch_block_invoke_should_set_priority(flags, dbpd->dbpd_priority); + if (op) { + p = dbpd->dbpd_priority; + } + voucher_t ov, v = DISPATCH_NO_VOUCHER; + if (flags & DISPATCH_BLOCK_HAS_VOUCHER) { + v = dbpd->dbpd_voucher; } + ov = _dispatch_set_priority_and_voucher(p, v, 0); + // balanced in d_block_sync_invoke or d_block_wait if (os_atomic_cmpxchg2o(dbpd, dbpd_queue, NULL, dq->_as_oq, relaxed)) { _dispatch_retain_2(dq); @@ -4110,7 +4214,7 @@ _dispatch_sync_block_with_private_data(dispatch_queue_t dq, } else { dispatch_sync_f(dq, work, _dispatch_block_sync_invoke); } - _dispatch_reset_priority_and_voucher(op, DISPATCH_NO_VOUCHER); + _dispatch_reset_priority_and_voucher(op, ov); } void @@ -4448,7 +4552,7 @@ _dispatch_global_queue_poke_slow(dispatch_queue_t dq, int n, int floor) #if HAVE_PTHREAD_WORKQUEUE_QOS r = _pthread_workqueue_addthreads(remaining, _dispatch_priority_to_pp(dq->dq_priority)); -#elif HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP +#elif DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP r = pthread_workqueue_addthreads_np(qc->dgq_wq_priority, qc->dgq_wq_options, remaining); #endif @@ -5250,7 +5354,7 @@ _dispatch_queue_class_wakeup_with_override_slow(dispatch_queue_t dq, } apply_again: - if (dx_type(tq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) { + if (dx_hastypeflag(tq, QUEUE_ROOT)) { if (_dispatch_root_queue_push_queue_override_needed(tq, qos)) { _dispatch_root_queue_push_override_stealer(tq, dq, qos); } @@ -5675,6 +5779,9 @@ _dispatch_root_queue_drain_deferred_wlh(dispatch_deferred_items_t ddi goto park; } dq_state = os_atomic_load2o(dq, dq_state, relaxed); + if (unlikely(!_dq_state_is_base_wlh(dq_state))) { // rdar://32671286 + goto park; + } if (unlikely(_dq_state_is_enqueued_on_target(dq_state))) { _dispatch_retain(dq); _dispatch_trace_continuation_push(dq->do_targetq, dq); @@ -5810,7 +5917,7 @@ _dispatch_worker_thread3(pthread_priority_t pp) } #endif // HAVE_PTHREAD_WORKQUEUE_QOS -#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP +#if DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP // 6618342 Contact the team that owns the Instrument DTrace probe before // renaming this symbol static void @@ -5823,7 +5930,7 @@ _dispatch_worker_thread2(int priority, int options, return _dispatch_worker_thread4(dq); } -#endif // HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP +#endif // DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP #endif // HAVE_PTHREAD_WORKQUEUES #if DISPATCH_USE_PTHREAD_POOL diff --git a/src/queue_internal.h b/src/queue_internal.h index c1d0f6e5a..1a590e27a 100644 --- a/src/queue_internal.h +++ b/src/queue_internal.h @@ -832,6 +832,8 @@ dispatch_queue_attr_t _dispatch_get_default_queue_attr(void); #define DISPATCH_OBJ_CTXT_FETCH_BIT 0x040ul // use the voucher from the continuation even if the queue has voucher set #define DISPATCH_OBJ_ENFORCE_VOUCHER 0x080ul +// never set on continuations, used by mach.c only +#define DISPATCH_OBJ_MACH_BARRIER 0x1000000ul typedef struct dispatch_continuation_s { struct dispatch_object_s _as_do[0]; @@ -975,7 +977,7 @@ typedef struct dispatch_apply_s *dispatch_apply_t; #ifdef __BLOCKS__ -#define DISPATCH_BLOCK_API_MASK (0x80u - 1) +#define DISPATCH_BLOCK_API_MASK (0x100u - 1) #define DISPATCH_BLOCK_HAS_VOUCHER (1u << 31) #define DISPATCH_BLOCK_HAS_PRIORITY (1u << 30) @@ -1022,8 +1024,6 @@ void _dispatch_block_sync_invoke(void *block); void _dispatch_continuation_init_slow(dispatch_continuation_t dc, dispatch_queue_class_t dqu, dispatch_block_flags_t flags); -void _dispatch_continuation_update_bits(dispatch_continuation_t dc, - uintptr_t dc_flags); long _dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func); diff --git a/src/shims.h b/src/shims.h index 8dd23ee24..28e1c53a9 100644 --- a/src/shims.h +++ b/src/shims.h @@ -36,6 +36,7 @@ #include "shims/android_stubs.h" #endif +#include "shims/hw_config.h" #include "shims/priority.h" #if HAVE_PTHREAD_WORKQUEUES @@ -147,6 +148,51 @@ _pthread_workqueue_should_narrow(pthread_priority_t priority) } #endif +#if HAVE_PTHREAD_QOS_H && __has_include() && \ + defined(PTHREAD_MAX_PARALLELISM_PHYSICAL) && \ + DISPATCH_HAVE_HW_CONFIG_COMMPAGE && \ + DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(109900) +#define DISPATCH_USE_PTHREAD_QOS_MAX_PARALLELISM 1 +#define DISPATCH_MAX_PARALLELISM_PHYSICAL PTHREAD_MAX_PARALLELISM_PHYSICAL +#else +#define DISPATCH_MAX_PARALLELISM_PHYSICAL 0x1 +#endif +#define DISPATCH_MAX_PARALLELISM_ACTIVE 0x2 +_Static_assert(!(DISPATCH_MAX_PARALLELISM_PHYSICAL & + DISPATCH_MAX_PARALLELISM_ACTIVE), "Overlapping parallelism flags"); + +DISPATCH_ALWAYS_INLINE +static inline uint32_t +_dispatch_qos_max_parallelism(dispatch_qos_t qos, unsigned long flags) +{ + uint32_t p; + int r = 0; + + if (qos) { +#if DISPATCH_USE_PTHREAD_QOS_MAX_PARALLELISM + r = pthread_qos_max_parallelism(_dispatch_qos_to_qos_class(qos), + flags & PTHREAD_MAX_PARALLELISM_PHYSICAL); +#endif + } + if (likely(r > 0)) { + p = (uint32_t)r; + } else { + p = (flags & DISPATCH_MAX_PARALLELISM_PHYSICAL) ? + dispatch_hw_config(physical_cpus) : + dispatch_hw_config(logical_cpus); + } + if (flags & DISPATCH_MAX_PARALLELISM_ACTIVE) { + uint32_t active_cpus = dispatch_hw_config(active_cpus); + if ((flags & DISPATCH_MAX_PARALLELISM_PHYSICAL) && + active_cpus < dispatch_hw_config(logical_cpus)) { + active_cpus /= dispatch_hw_config(logical_cpus) / + dispatch_hw_config(physical_cpus); + } + if (active_cpus < p) p = active_cpus; + } + return p; +} + #if !HAVE_NORETURN_BUILTIN_TRAP /* * XXXRW: Work-around for possible clang bug in which __builtin_trap() is not @@ -174,7 +220,6 @@ void __builtin_trap(void); #include "shims/yield.h" #include "shims/lock.h" -#include "shims/hw_config.h" #include "shims/perfmon.h" #include "shims/getprogname.h" @@ -228,7 +273,8 @@ _dispatch_mempcpy(void *ptr, const void *data, size_t len) #define _dispatch_clear_stack(s) do { \ void *a[(s)/sizeof(void*) ? (s)/sizeof(void*) : 1]; \ a[0] = pthread_get_stackaddr_np(pthread_self()); \ - bzero((void*)&a[1], (size_t)(a[0] - (void*)&a[1])); \ + void* volatile const p = (void*)&a[1]; /* */ \ + bzero((void*)p, (size_t)(a[0] - (void*)&a[1])); \ } while (0) #else #define _dispatch_clear_stack(s) diff --git a/src/shims/priority.h b/src/shims/priority.h index 948e4c7af..3e85ff54c 100644 --- a/src/shims/priority.h +++ b/src/shims/priority.h @@ -94,7 +94,7 @@ typedef uint16_t dispatch_priority_requested_t; #define DISPATCH_PRIORITY_OVERRIDE_SHIFT 16 #define DISPATCH_PRIORITY_FLAGS_MASK ((dispatch_priority_t)0xff000000) -#define DISPATCH_PRIORITY_SATURATED_OVERRIDE DISPATCH_PRIORITY_OVERRIDE_MASK +#define DISPATCH_PRIORITY_SATURATED_OVERRIDE ((dispatch_priority_t)0x000f0000) #define DISPATCH_PRIORITY_FLAG_OVERCOMMIT ((dispatch_priority_t)0x80000000) // _PTHREAD_PRIORITY_OVERCOMMIT_FLAG #define DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE ((dispatch_priority_t)0x04000000) // _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG diff --git a/src/source.c b/src/source.c index fd337a9a3..6b9750722 100644 --- a/src/source.c +++ b/src/source.c @@ -521,6 +521,22 @@ _dispatch_source_latch_and_call(dispatch_source_t ds, dispatch_queue_t cq, } } +DISPATCH_NOINLINE +static void +_dispatch_source_refs_finalize_unregistration(dispatch_source_t ds) +{ + dispatch_queue_flags_t dqf; + dispatch_source_refs_t dr = ds->ds_refs; + + dqf = _dispatch_queue_atomic_flags_set_and_clear_orig(ds->_as_dq, + DSF_DELETED, DSF_ARMED | DSF_DEFERRED_DELETE | DSF_CANCEL_WAITER); + if (dqf & DSF_CANCEL_WAITER) { + _dispatch_wake_by_address(&ds->dq_atomic_flags); + } + _dispatch_debug("kevent-source[%p]: disarmed kevent[%p]", ds, dr); + _dispatch_release_tailcall(ds); // the retain is done at creation time +} + void _dispatch_source_refs_unregister(dispatch_source_t ds, uint32_t options) { @@ -549,14 +565,8 @@ _dispatch_source_refs_unregister(dispatch_source_t ds, uint32_t options) } } - dqf = _dispatch_queue_atomic_flags_set_and_clear_orig(ds->_as_dq, - DSF_DELETED, DSF_ARMED | DSF_DEFERRED_DELETE | DSF_CANCEL_WAITER); - if (dqf & DSF_CANCEL_WAITER) { - _dispatch_wake_by_address(&ds->dq_atomic_flags); - } ds->ds_is_installed = true; - _dispatch_debug("kevent-source[%p]: disarmed kevent[%p]", ds, dr); - _dispatch_release_tailcall(ds); // the retain is done at creation time + _dispatch_source_refs_finalize_unregistration(ds); } DISPATCH_ALWAYS_INLINE @@ -619,8 +629,9 @@ _dispatch_source_refs_register(dispatch_source_t ds, dispatch_wlh_t wlh, if (unlikely(!_dispatch_source_tryarm(ds) || !_dispatch_unote_register(dr, wlh, pri))) { - _dispatch_queue_atomic_flags_set_and_clear(ds->_as_dq, DSF_DELETED, - DSF_ARMED | DSF_DEFERRED_DELETE); + // Do the parts of dispatch_source_refs_unregister() that + // are required after this partial initialization. + _dispatch_source_refs_finalize_unregistration(ds); } else { _dispatch_debug("kevent-source[%p]: armed kevent[%p]", ds, dr); } @@ -1761,8 +1772,10 @@ _dispatch_timer_heap_insert(dispatch_timer_heap_t dth, { uint32_t idx = (dth->dth_count += DTH_ID_COUNT) - DTH_ID_COUNT; - dispatch_assert(dt->dt_heap_entry[DTH_TARGET_ID] == DTH_INVALID_ID); - dispatch_assert(dt->dt_heap_entry[DTH_DEADLINE_ID] == DTH_INVALID_ID); + DISPATCH_TIMER_ASSERT(dt->dt_heap_entry[DTH_TARGET_ID], ==, + DTH_INVALID_ID, "target idx"); + DISPATCH_TIMER_ASSERT(dt->dt_heap_entry[DTH_DEADLINE_ID], ==, + DTH_INVALID_ID, "deadline idx"); if (idx == 0) { dt->dt_heap_entry[DTH_TARGET_ID] = DTH_TARGET_ID; @@ -1786,12 +1799,16 @@ _dispatch_timer_heap_remove(dispatch_timer_heap_t dth, { uint32_t idx = (dth->dth_count -= DTH_ID_COUNT); - dispatch_assert(dt->dt_heap_entry[DTH_TARGET_ID] != DTH_INVALID_ID); - dispatch_assert(dt->dt_heap_entry[DTH_DEADLINE_ID] != DTH_INVALID_ID); + DISPATCH_TIMER_ASSERT(dt->dt_heap_entry[DTH_TARGET_ID], !=, + DTH_INVALID_ID, "target idx"); + DISPATCH_TIMER_ASSERT(dt->dt_heap_entry[DTH_DEADLINE_ID], !=, + DTH_INVALID_ID, "deadline idx"); if (idx == 0) { - dispatch_assert(dth->dth_min[DTH_TARGET_ID] == dt); - dispatch_assert(dth->dth_min[DTH_DEADLINE_ID] == dt); + DISPATCH_TIMER_ASSERT(dth->dth_min[DTH_TARGET_ID], ==, dt, + "target slot"); + DISPATCH_TIMER_ASSERT(dth->dth_min[DTH_DEADLINE_ID], ==, dt, + "deadline slot"); dth->dth_min[DTH_TARGET_ID] = dth->dth_min[DTH_DEADLINE_ID] = NULL; goto clear_heap_entry; } @@ -1819,8 +1836,11 @@ static inline void _dispatch_timer_heap_update(dispatch_timer_heap_t dth, dispatch_timer_source_refs_t dt) { - dispatch_assert(dt->dt_heap_entry[DTH_TARGET_ID] != DTH_INVALID_ID); - dispatch_assert(dt->dt_heap_entry[DTH_DEADLINE_ID] != DTH_INVALID_ID); + DISPATCH_TIMER_ASSERT(dt->dt_heap_entry[DTH_TARGET_ID], !=, + DTH_INVALID_ID, "target idx"); + DISPATCH_TIMER_ASSERT(dt->dt_heap_entry[DTH_DEADLINE_ID], !=, + DTH_INVALID_ID, "deadline idx"); + _dispatch_timer_heap_resift(dth, dt, dt->dt_heap_entry[DTH_TARGET_ID]); _dispatch_timer_heap_resift(dth, dt, dt->dt_heap_entry[DTH_DEADLINE_ID]); @@ -1875,7 +1895,7 @@ _dispatch_timers_register(dispatch_timer_source_refs_t dt, uint32_t tidx) { dispatch_timer_heap_t heap = &_dispatch_timers_heap[tidx]; if (_dispatch_unote_registered(dt)) { - dispatch_assert(dt->du_ident == tidx); + DISPATCH_TIMER_ASSERT(dt->du_ident, ==, tidx, "tidx"); _dispatch_timer_heap_update(heap, dt); } else { dt->du_ident = tidx; @@ -2051,7 +2071,10 @@ _dispatch_timers_run2(dispatch_clock_now_cache_t nows, uint32_t tidx) uint64_t now = _dispatch_time_now_cached(DISPATCH_TIMER_CLOCK(tidx), nows); while ((dr = _dispatch_timers_heap[tidx].dth_min[DTH_TARGET_ID])) { - dispatch_assert(tidx == dr->du_ident && dr->dt_timer.target); + DISPATCH_TIMER_ASSERT(dr->du_filter, ==, DISPATCH_EVFILT_TIMER, + "invalid filter"); + DISPATCH_TIMER_ASSERT(dr->du_ident, ==, tidx, "tidx"); + DISPATCH_TIMER_ASSERT(dr->dt_timer.target, !=, 0, "missing target"); ds = _dispatch_source_from_refs(dr); if (dr->dt_timer.target > now) { // Done running timers for now. diff --git a/src/swift/Source.swift b/src/swift/Source.swift index a3a7e7903..421a6e9bb 100644 --- a/src/swift/Source.swift +++ b/src/swift/Source.swift @@ -279,28 +279,342 @@ public extension DispatchSourceProcess { #endif public extension DispatchSourceTimer { + /// + /// Sets the deadline and leeway for a timer event that fires once. + /// + /// Once this function returns, any pending source data accumulated for the previous timer values + /// has been cleared and the next timer event will occur at `deadline`. + /// + /// Delivery of the timer event may be delayed by the system in order to improve power consumption + /// and system performance. The upper limit to the allowable delay may be configured with the `leeway` + /// argument; the lower limit is under the control of the system. + /// + /// The lower limit to the allowable delay may vary with process state such as visibility of the + /// application UI. If the timer source was created with flags `TimerFlags.strict`, the system + /// will make a best effort to strictly observe the provided `leeway` value, even if it is smaller + /// than the current lower limit. Note that a minimal amount of delay is to be expected even if + /// this flag is specified. + /// + /// Calling this method has no effect if the timer source has already been canceled. + /// - note: Delivery of the timer event does not cancel the timer source. + /// + /// - parameter deadline: the time at which the timer event will be delivered, subject to the + /// leeway and other considerations described above. The deadline is based on Mach absolute + /// time. + /// - parameter leeway: the leeway for the timer. + /// + @available(swift, deprecated: 4, renamed: "schedule(deadline:repeating:leeway:)") public func scheduleOneshot(deadline: DispatchTime, leeway: DispatchTimeInterval = .nanoseconds(0)) { dispatch_source_set_timer((self as! DispatchSource).__wrapped, deadline.rawValue, ~0, UInt64(leeway.rawValue)) } + /// + /// Sets the deadline and leeway for a timer event that fires once. + /// + /// Once this function returns, any pending source data accumulated for the previous timer values + /// has been cleared and the next timer event will occur at `wallDeadline`. + /// + /// Delivery of the timer event may be delayed by the system in order to improve power consumption + /// and system performance. The upper limit to the allowable delay may be configured with the `leeway` + /// argument; the lower limit is under the control of the system. + /// + /// The lower limit to the allowable delay may vary with process state such as visibility of the + /// application UI. If the timer source was created with flags `TimerFlags.strict`, the system + /// will make a best effort to strictly observe the provided `leeway` value, even if it is smaller + /// than the current lower limit. Note that a minimal amount of delay is to be expected even if + /// this flag is specified. + /// + /// Calling this method has no effect if the timer source has already been canceled. + /// - note: Delivery of the timer event does not cancel the timer source. + /// + /// - parameter wallDeadline: the time at which the timer event will be delivered, subject to the + /// leeway and other considerations described above. The deadline is based on + /// `gettimeofday(3)`. + /// - parameter leeway: the leeway for the timer. + /// + @available(swift, deprecated: 4, renamed: "schedule(wallDeadline:repeating:leeway:)") public func scheduleOneshot(wallDeadline: DispatchWallTime, leeway: DispatchTimeInterval = .nanoseconds(0)) { dispatch_source_set_timer((self as! DispatchSource).__wrapped, wallDeadline.rawValue, ~0, UInt64(leeway.rawValue)) } + /// + /// Sets the deadline, interval and leeway for a timer event that fires at least once. + /// + /// Once this function returns, any pending source data accumulated for the previous timer values + /// has been cleared. The next timer event will occur at `deadline` and every `interval` units of + /// time thereafter until the timer source is canceled. + /// + /// Delivery of a timer event may be delayed by the system in order to improve power consumption + /// and system performance. The upper limit to the allowable delay may be configured with the `leeway` + /// argument; the lower limit is under the control of the system. + /// + /// For the initial timer fire at `deadline`, the upper limit to the allowable delay is set to + /// `leeway`. For the subsequent timer fires at `deadline + N * interval`, the upper + /// limit is the smaller of `leeway` and `interval/2`. + /// + /// The lower limit to the allowable delay may vary with process state such as visibility of the + /// application UI. If the timer source was created with flags `TimerFlags.strict`, the system + /// will make a best effort to strictly observe the provided `leeway` value, even if it is smaller + /// than the current lower limit. Note that a minimal amount of delay is to be expected even if + /// this flag is specified. + /// + /// Calling this method has no effect if the timer source has already been canceled. + /// + /// - parameter deadline: the time at which the timer event will be delivered, subject to the + /// leeway and other considerations described above. The deadline is based on Mach absolute + /// time. + /// - parameter interval: the interval for the timer. + /// - parameter leeway: the leeway for the timer. + /// + @available(swift, deprecated: 4, renamed: "schedule(deadline:repeating:leeway:)") public func scheduleRepeating(deadline: DispatchTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval = .nanoseconds(0)) { - dispatch_source_set_timer((self as! DispatchSource).__wrapped, deadline.rawValue, UInt64(interval.rawValue), UInt64(leeway.rawValue)) + dispatch_source_set_timer((self as! DispatchSource).__wrapped, deadline.rawValue, interval == .never ? ~0 : UInt64(interval.rawValue), UInt64(leeway.rawValue)) } + /// + /// Sets the deadline, interval and leeway for a timer event that fires at least once. + /// + /// Once this function returns, any pending source data accumulated for the previous timer values + /// has been cleared. The next timer event will occur at `deadline` and every `interval` seconds + /// thereafter until the timer source is canceled. + /// + /// Delivery of a timer event may be delayed by the system in order to improve power consumption and + /// system performance. The upper limit to the allowable delay may be configured with the `leeway` + /// argument; the lower limit is under the control of the system. + /// + /// For the initial timer fire at `deadline`, the upper limit to the allowable delay is set to + /// `leeway`. For the subsequent timer fires at `deadline + N * interval`, the upper + /// limit is the smaller of `leeway` and `interval/2`. + /// + /// The lower limit to the allowable delay may vary with process state such as visibility of the + /// application UI. If the timer source was created with flags `TimerFlags.strict`, the system + /// will make a best effort to strictly observe the provided `leeway` value, even if it is smaller + /// than the current lower limit. Note that a minimal amount of delay is to be expected even if + /// this flag is specified. + /// + /// Calling this method has no effect if the timer source has already been canceled. + /// + /// - parameter deadline: the time at which the timer event will be delivered, subject to the + /// leeway and other considerations described above. The deadline is based on Mach absolute + /// time. + /// - parameter interval: the interval for the timer in seconds. + /// - parameter leeway: the leeway for the timer. + /// + @available(swift, deprecated: 4, renamed: "schedule(deadline:repeating:leeway:)") public func scheduleRepeating(deadline: DispatchTime, interval: Double, leeway: DispatchTimeInterval = .nanoseconds(0)) { - dispatch_source_set_timer((self as! DispatchSource).__wrapped, deadline.rawValue, UInt64(interval * Double(NSEC_PER_SEC)), UInt64(leeway.rawValue)) + dispatch_source_set_timer((self as! DispatchSource).__wrapped, deadline.rawValue, interval.isInfinite ? ~0 : UInt64(interval * Double(NSEC_PER_SEC)), UInt64(leeway.rawValue)) } + /// + /// Sets the deadline, interval and leeway for a timer event that fires at least once. + /// + /// Once this function returns, any pending source data accumulated for the previous timer values + /// has been cleared. The next timer event will occur at `wallDeadline` and every `interval` units of + /// time thereafter until the timer source is canceled. + /// + /// Delivery of a timer event may be delayed by the system in order to improve power consumption and + /// system performance. The upper limit to the allowable delay may be configured with the `leeway` + /// argument; the lower limit is under the control of the system. + /// + /// For the initial timer fire at `wallDeadline`, the upper limit to the allowable delay is set to + /// `leeway`. For the subsequent timer fires at `wallDeadline + N * interval`, the upper + /// limit is the smaller of `leeway` and `interval/2`. + /// + /// The lower limit to the allowable delay may vary with process state such as visibility of the + /// application UI. If the timer source was created with flags `TimerFlags.strict`, the system + /// will make a best effort to strictly observe the provided `leeway` value, even if it is smaller + /// than the current lower limit. Note that a minimal amount of delay is to be expected even if + /// this flag is specified. + /// + /// Calling this method has no effect if the timer source has already been canceled. + /// + /// - parameter wallDeadline: the time at which the timer event will be delivered, subject to the + /// leeway and other considerations described above. The deadline is based on + /// `gettimeofday(3)`. + /// - parameter interval: the interval for the timer. + /// - parameter leeway: the leeway for the timer. + /// + @available(swift, deprecated: 4, renamed: "schedule(wallDeadline:repeating:leeway:)") public func scheduleRepeating(wallDeadline: DispatchWallTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval = .nanoseconds(0)) { - dispatch_source_set_timer((self as! DispatchSource).__wrapped, wallDeadline.rawValue, UInt64(interval.rawValue), UInt64(leeway.rawValue)) + dispatch_source_set_timer((self as! DispatchSource).__wrapped, wallDeadline.rawValue, interval == .never ? ~0 : UInt64(interval.rawValue), UInt64(leeway.rawValue)) } + /// + /// Sets the deadline, interval and leeway for a timer event that fires at least once. + /// + /// Once this function returns, any pending source data accumulated for the previous timer values + /// has been cleared. The next timer event will occur at `wallDeadline` and every `interval` seconds + /// thereafter until the timer source is canceled. + /// + /// Delivery of a timer event may be delayed by the system in order to improve power consumption and + /// system performance. The upper limit to the allowable delay may be configured with the `leeway` + /// argument; the lower limit is under the control of the system. + /// + /// For the initial timer fire at `wallDeadline`, the upper limit to the allowable delay is set to + /// `leeway`. For the subsequent timer fires at `wallDeadline + N * interval`, the upper + /// limit is the smaller of `leeway` and `interval/2`. + /// + /// The lower limit to the allowable delay may vary with process state such as visibility of the + /// application UI. If the timer source was created with flags `TimerFlags.strict`, the system + /// will make a best effort to strictly observe the provided `leeway` value, even if it is smaller + /// than the current lower limit. Note that a minimal amount of delay is to be expected even if + /// this flag is specified. + /// + /// Calling this method has no effect if the timer source has already been canceled. + /// + /// - parameter wallDeadline: the time at which the timer event will be delivered, subject to the + /// leeway and other considerations described above. The deadline is based on + /// `gettimeofday(3)`. + /// - parameter interval: the interval for the timer in seconds. + /// - parameter leeway: the leeway for the timer. + /// + @available(swift, deprecated: 4, renamed: "schedule(wallDeadline:repeating:leeway:)") public func scheduleRepeating(wallDeadline: DispatchWallTime, interval: Double, leeway: DispatchTimeInterval = .nanoseconds(0)) { - dispatch_source_set_timer((self as! DispatchSource).__wrapped, wallDeadline.rawValue, UInt64(interval * Double(NSEC_PER_SEC)), UInt64(leeway.rawValue)) + dispatch_source_set_timer((self as! DispatchSource).__wrapped, wallDeadline.rawValue, interval.isInfinite ? ~0 : UInt64(interval * Double(NSEC_PER_SEC)), UInt64(leeway.rawValue)) + } + + /// + /// Sets the deadline, repeat interval and leeway for a timer event. + /// + /// Once this function returns, any pending source data accumulated for the previous timer values + /// has been cleared. The next timer event will occur at `deadline` and every `repeating` units of + /// time thereafter until the timer source is canceled. If the value of `repeating` is `.never`, + /// or is defaulted, the timer fires only once. + /// + /// Delivery of a timer event may be delayed by the system in order to improve power consumption + /// and system performance. The upper limit to the allowable delay may be configured with the `leeway` + /// argument; the lower limit is under the control of the system. + /// + /// For the initial timer fire at `deadline`, the upper limit to the allowable delay is set to + /// `leeway`. For the subsequent timer fires at `deadline + N * repeating`, the upper + /// limit is the smaller of `leeway` and `repeating/2`. + /// + /// The lower limit to the allowable delay may vary with process state such as visibility of the + /// application UI. If the timer source was created with flags `TimerFlags.strict`, the system + /// will make a best effort to strictly observe the provided `leeway` value, even if it is smaller + /// than the current lower limit. Note that a minimal amount of delay is to be expected even if + /// this flag is specified. + /// + /// Calling this method has no effect if the timer source has already been canceled. + /// + /// - parameter deadline: the time at which the first timer event will be delivered, subject to the + /// leeway and other considerations described above. The deadline is based on Mach absolute + /// time. + /// - parameter repeating: the repeat interval for the timer, or `.never` if the timer should fire + /// only once. + /// - parameter leeway: the leeway for the timer. + /// + @available(swift, introduced: 4) + public func schedule(deadline: DispatchTime, repeating interval: DispatchTimeInterval = .never, leeway: DispatchTimeInterval = .nanoseconds(0)) { + dispatch_source_set_timer((self as! DispatchSource).__wrapped, deadline.rawValue, interval == .never ? ~0 : UInt64(interval.rawValue), UInt64(leeway.rawValue)) + } + + /// + /// Sets the deadline, repeat interval and leeway for a timer event. + /// + /// Once this function returns, any pending source data accumulated for the previous timer values + /// has been cleared. The next timer event will occur at `deadline` and every `repeating` seconds + /// thereafter until the timer source is canceled. If the value of `repeating` is `.infinity`, + /// the timer fires only once. + /// + /// Delivery of a timer event may be delayed by the system in order to improve power consumption + /// and system performance. The upper limit to the allowable delay may be configured with the `leeway` + /// argument; the lower limit is under the control of the system. + /// + /// For the initial timer fire at `deadline`, the upper limit to the allowable delay is set to + /// `leeway`. For the subsequent timer fires at `deadline + N * repeating`, the upper + /// limit is the smaller of `leeway` and `repeating/2`. + /// + /// The lower limit to the allowable delay may vary with process state such as visibility of the + /// application UI. If the timer source was created with flags `TimerFlags.strict`, the system + /// will make a best effort to strictly observe the provided `leeway` value, even if it is smaller + /// than the current lower limit. Note that a minimal amount of delay is to be expected even if + /// this flag is specified. + /// + /// Calling this method has no effect if the timer source has already been canceled. + /// + /// - parameter deadline: the time at which the timer event will be delivered, subject to the + /// leeway and other considerations described above. The deadline is based on Mach absolute + /// time. + /// - parameter repeating: the repeat interval for the timer in seconds, or `.infinity` if the timer + /// should fire only once. + /// - parameter leeway: the leeway for the timer. + /// + @available(swift, introduced: 4) + public func schedule(deadline: DispatchTime, repeating interval: Double, leeway: DispatchTimeInterval = .nanoseconds(0)) { + dispatch_source_set_timer((self as! DispatchSource).__wrapped, deadline.rawValue, interval.isInfinite ? ~0 : UInt64(interval * Double(NSEC_PER_SEC)), UInt64(leeway.rawValue)) + } + + /// + /// Sets the deadline, repeat interval and leeway for a timer event. + /// + /// Once this function returns, any pending source data accumulated for the previous timer values + /// has been cleared. The next timer event will occur at `wallDeadline` and every `repeating` units of + /// time thereafter until the timer source is canceled. If the value of `repeating` is `.never`, + /// or is defaulted, the timer fires only once. + /// + /// Delivery of a timer event may be delayed by the system in order to improve power consumption and + /// system performance. The upper limit to the allowable delay may be configured with the `leeway` + /// argument; the lower limit is under the control of the system. + /// + /// For the initial timer fire at `wallDeadline`, the upper limit to the allowable delay is set to + /// `leeway`. For the subsequent timer fires at `wallDeadline + N * repeating`, the upper + /// limit is the smaller of `leeway` and `repeating/2`. + /// + /// The lower limit to the allowable delay may vary with process state such as visibility of the + /// application UI. If the timer source was created with flags `TimerFlags.strict`, the system + /// will make a best effort to strictly observe the provided `leeway` value, even if it is smaller + /// than the current lower limit. Note that a minimal amount of delay is to be expected even if + /// this flag is specified. + /// + /// Calling this method has no effect if the timer source has already been canceled. + /// + /// - parameter wallDeadline: the time at which the timer event will be delivered, subject to the + /// leeway and other considerations described above. The deadline is based on + /// `gettimeofday(3)`. + /// - parameter repeating: the repeat interval for the timer, or `.never` if the timer should fire + /// only once. + /// - parameter leeway: the leeway for the timer. + /// + @available(swift, introduced: 4) + public func schedule(wallDeadline: DispatchWallTime, repeating interval: DispatchTimeInterval = .never, leeway: DispatchTimeInterval = .nanoseconds(0)) { + dispatch_source_set_timer((self as! DispatchSource).__wrapped, wallDeadline.rawValue, interval == .never ? ~0 : UInt64(interval.rawValue), UInt64(leeway.rawValue)) + } + + /// + /// Sets the deadline, repeat interval and leeway for a timer event that fires at least once. + /// + /// Once this function returns, any pending source data accumulated for the previous timer values + /// has been cleared. The next timer event will occur at `wallDeadline` and every `repeating` seconds + /// thereafter until the timer source is canceled. If the value of `repeating` is `.infinity`, + /// the timer fires only once. + /// + /// Delivery of a timer event may be delayed by the system in order to improve power consumption + /// and system performance. The upper limit to the allowable delay may be configured with the `leeway` + /// argument; the lower limit is under the control of the system. + /// + /// For the initial timer fire at `wallDeadline`, the upper limit to the allowable delay is set to + /// `leeway`. For the subsequent timer fires at `wallDeadline + N * repeating`, the upper + /// limit is the smaller of `leeway` and `repeating/2`. + /// + /// The lower limit to the allowable delay may vary with process state such as visibility of the + /// application UI. If the timer source was created with flags `TimerFlags.strict`, the system + /// will make a best effort to strictly observe the provided `leeway` value, even if it is smaller + /// than the current lower limit. Note that a minimal amount of delay is to be expected even if + /// this flag is specified. + /// + /// Calling this method has no effect if the timer source has already been canceled. + /// + /// - parameter wallDeadline: the time at which the timer event will be delivered, subject to the + /// leeway and other considerations described above. The deadline is based on + /// `gettimeofday(3)`. + /// - parameter repeating: the repeat interval for the timer in seconds, or `.infinity` if the timer + /// should fire only once. + /// - parameter leeway: the leeway for the timer. + /// + @available(swift, introduced: 4) + public func schedule(wallDeadline: DispatchWallTime, repeating interval: Double, leeway: DispatchTimeInterval = .nanoseconds(0)) { + dispatch_source_set_timer((self as! DispatchSource).__wrapped, wallDeadline.rawValue, interval.isInfinite ? ~0 : UInt64(interval * Double(NSEC_PER_SEC)), UInt64(leeway.rawValue)) } } diff --git a/src/swift/Time.swift b/src/swift/Time.swift index 8178ffd6c..d7d49c96b 100644 --- a/src/swift/Time.swift +++ b/src/swift/Time.swift @@ -124,6 +124,8 @@ public enum DispatchTimeInterval { case milliseconds(Int) case microseconds(Int) case nanoseconds(Int) + @_downgrade_exhaustivity_check + case never internal var rawValue: Int64 { switch self { @@ -131,6 +133,16 @@ public enum DispatchTimeInterval { case .milliseconds(let ms): return Int64(ms) * Int64(NSEC_PER_MSEC) case .microseconds(let us): return Int64(us) * Int64(NSEC_PER_USEC) case .nanoseconds(let ns): return Int64(ns) + case .never: return Int64.max + } + } + + public static func ==(lhs: DispatchTimeInterval, rhs: DispatchTimeInterval) -> Bool { + switch (lhs, rhs) { + case (.never, .never): return true + case (.never, _): return false + case (_, .never): return false + default: return lhs.rawValue == rhs.rawValue } } } diff --git a/src/trace.h b/src/trace.h index 872cd6ff5..c670f60b7 100644 --- a/src/trace.h +++ b/src/trace.h @@ -29,14 +29,6 @@ #if DISPATCH_PURE_C -#if DISPATCH_USE_DTRACE || DISPATCH_USE_DTRACE_INTROSPECTION -typedef struct dispatch_trace_timer_params_s { - int64_t deadline, interval, leeway; -} *dispatch_trace_timer_params_t; - -#include "provider.h" -#endif // DISPATCH_USE_DTRACE || DISPATCH_USE_DTRACE_INTROSPECTION - #if DISPATCH_USE_DTRACE_INTROSPECTION #define _dispatch_trace_callout(_c, _f, _dcc) do { \ if (slowpath(DISPATCH_CALLOUT_ENTRY_ENABLED()) || \ diff --git a/src/voucher.c b/src/voucher.c index e4128a289..458e2f0a4 100644 --- a/src/voucher.c +++ b/src/voucher.c @@ -85,6 +85,7 @@ voucher_create(voucher_recipe_t recipe) if (extra) { memcpy(_voucher_extra_recipes(voucher), recipe->vr_data, extra); } + _voucher_trace(CREATE, voucher, MACH_PORT_NULL, 0); return voucher; } #endif @@ -585,6 +586,7 @@ _voucher_create_with_mach_voucher(mach_voucher_t kv, mach_msg_bits_t msgh_bits) } } + _voucher_trace(CREATE, v, v->v_kvoucher, v->v_activity); _voucher_insert(v); _dispatch_voucher_debug("kvoucher[0x%08x] create", v, kv); return v; @@ -619,6 +621,7 @@ _voucher_create_with_priority_and_mach_voucher(voucher_t ov, "voucher[%p]", v, kv, ov); _dispatch_voucher_debug_machport(kv); } + _voucher_trace(CREATE, v, v->v_kvoucher, v->v_activity); return v; } @@ -676,6 +679,7 @@ _voucher_create_without_importance(voucher_t ov) _dispatch_voucher_debug("kvoucher[0x%08x] create without importance " "from voucher[%p]", v, kv, ov); } + _voucher_trace(CREATE, v, v->v_kvoucher, v->v_activity); return v; } @@ -711,6 +715,7 @@ _voucher_create_accounting_voucher(voucher_t ov) v->v_kvbase = _voucher_retain(ov); _voucher_dealloc_mach_voucher(kv); // borrow base reference } + _voucher_trace(CREATE, v, kv, v->v_activity); _voucher_insert(v); _dispatch_voucher_debug("kvoucher[0x%08x] create accounting voucher " "from voucher[%p]", v, kv, ov); @@ -774,6 +779,7 @@ _voucher_xref_dispose(voucher_t voucher) void _voucher_dispose(voucher_t voucher) { + _voucher_trace(DISPOSE, voucher); _dispatch_voucher_debug("dispose", voucher); if (slowpath(_voucher_hash_is_enqueued(voucher))) { _dispatch_voucher_debug("corruption", voucher); @@ -1237,6 +1243,7 @@ voucher_activity_create_with_data(firehose_tracepoint_id_t *trace_id, } done: *trace_id = ftid.ftid_value; + _voucher_trace(CREATE, v, v->v_kvoucher, va_id); return v; } diff --git a/src/voucher_internal.h b/src/voucher_internal.h index a0ddd4db4..772c8c434 100644 --- a/src/voucher_internal.h +++ b/src/voucher_internal.h @@ -262,6 +262,16 @@ typedef struct voucher_recipe_s { #define _dispatch_voucher_debug_machport(name) ((void)(name)) #endif +#if DISPATCH_USE_DTRACE +#define _voucher_trace(how, ...) ({ \ + if (unlikely(VOUCHER_##how##_ENABLED())) { \ + VOUCHER_##how(__VA_ARGS__); \ + } \ + }) +#else +#define _voucher_trace(how, ...) ((void)0) +#endif + #ifndef DISPATCH_VOUCHER_OBJC_DEBUG #if DISPATCH_INTROSPECTION || DISPATCH_DEBUG #define DISPATCH_VOUCHER_OBJC_DEBUG 1 @@ -270,36 +280,29 @@ typedef struct voucher_recipe_s { #endif #endif // DISPATCH_VOUCHER_OBJC_DEBUG -#if DISPATCH_PURE_C - DISPATCH_ALWAYS_INLINE -static inline voucher_t -_voucher_retain(voucher_t voucher) +static inline struct voucher_s * +_voucher_retain_inline(struct voucher_s *voucher) { -#if !DISPATCH_VOUCHER_OBJC_DEBUG // not using _os_object_refcnt* because we don't need barriers: // vouchers are immutable and are in a hash table with a lock int xref_cnt = os_atomic_inc2o(voucher, os_obj_xref_cnt, relaxed); + _voucher_trace(RETAIN, (voucher_t)voucher, xref_cnt + 1); _dispatch_voucher_debug("retain -> %d", voucher, xref_cnt + 1); if (unlikely(xref_cnt <= 0)) { _OS_OBJECT_CLIENT_CRASH("Voucher resurrection"); } -#else - os_retain(voucher); - _dispatch_voucher_debug("retain -> %d", voucher, - voucher->os_obj_xref_cnt + 1); -#endif // DISPATCH_DEBUG return voucher; } DISPATCH_ALWAYS_INLINE static inline void -_voucher_release(voucher_t voucher) +_voucher_release_inline(struct voucher_s *voucher) { -#if !DISPATCH_VOUCHER_OBJC_DEBUG // not using _os_object_refcnt* because we don't need barriers: // vouchers are immutable and are in a hash table with a lock int xref_cnt = os_atomic_dec2o(voucher, os_obj_xref_cnt, relaxed); + _voucher_trace(RELEASE, (voucher_t)voucher, xref_cnt + 1); _dispatch_voucher_debug("release -> %d", voucher, xref_cnt + 1); if (likely(xref_cnt >= 0)) { return; @@ -308,10 +311,31 @@ _voucher_release(voucher_t voucher) _OS_OBJECT_CLIENT_CRASH("Voucher over-release"); } return _os_object_xref_dispose((_os_object_t)voucher); +} + +#if DISPATCH_PURE_C + +DISPATCH_ALWAYS_INLINE +static inline voucher_t +_voucher_retain(voucher_t voucher) +{ +#if DISPATCH_VOUCHER_OBJC_DEBUG + os_retain(voucher); #else - _dispatch_voucher_debug("release -> %d", voucher, voucher->os_obj_xref_cnt); - return os_release(voucher); -#endif // DISPATCH_DEBUG + _voucher_retain_inline(voucher); +#endif // DISPATCH_VOUCHER_OBJC_DEBUG + return voucher; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_release(voucher_t voucher) +{ +#if DISPATCH_VOUCHER_OBJC_DEBUG + os_release(voucher); +#else + _voucher_release_inline(voucher); +#endif // DISPATCH_VOUCHER_OBJC_DEBUG } DISPATCH_ALWAYS_INLINE @@ -322,13 +346,13 @@ _voucher_release_no_dispose(voucher_t voucher) // not using _os_object_refcnt* because we don't need barriers: // vouchers are immutable and are in a hash table with a lock int xref_cnt = os_atomic_dec2o(voucher, os_obj_xref_cnt, relaxed); + _voucher_trace(RELEASE, voucher, xref_cnt + 1); _dispatch_voucher_debug("release -> %d", voucher, xref_cnt + 1); if (likely(xref_cnt >= 0)) { return; } _OS_OBJECT_CLIENT_CRASH("Voucher over-release"); #else - _dispatch_voucher_debug("release -> %d", voucher, voucher->os_obj_xref_cnt); return os_release(voucher); #endif // DISPATCH_DEBUG } @@ -371,8 +395,10 @@ static inline mach_voucher_t _voucher_swap_and_get_mach_voucher(voucher_t ov, voucher_t voucher) { if (ov == voucher) return VOUCHER_NO_MACH_VOUCHER; - _dispatch_voucher_debug("swap from voucher[%p]", voucher, ov); + if (ov) _voucher_trace(ORPHAN, ov); _dispatch_thread_setspecific(dispatch_voucher_key, voucher); + if (voucher) _voucher_trace(ADOPT, voucher); + _dispatch_voucher_debug("swap from voucher[%p]", voucher, ov); mach_voucher_t kv = voucher ? voucher->v_kvoucher : MACH_VOUCHER_NULL; mach_voucher_t okv = ov ? ov->v_kvoucher : MACH_VOUCHER_NULL; #if OS_VOUCHER_ACTIVITY_GENERATE_SWAPS diff --git a/tools/voucher_trace.d b/tools/voucher_trace.d new file mode 100755 index 000000000..890198e66 --- /dev/null +++ b/tools/voucher_trace.d @@ -0,0 +1,78 @@ +#!/usr/sbin/dtrace -s + +/* + * Copyright (c) 2017 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * Usage: voucher_trace.d -p [pid] + * traced process must have been executed with + * DYLD_LIBRARY_PATH=/usr/lib/system/introspection or with + * DYLD_IMAGE_SUFFIX=_profile or DYLD_IMAGE_SUFFIX=_debug + */ + +#pragma D option quiet +#pragma D option zdefs +#pragma D option bufsize=16m + +BEGIN { + printf("Starting to trace voucher operations...\n"); +} + +voucher$target:libdispatch*.dylib::create +{ + printf("ALLOC voucher 0x%p, thread %#llx, ref 1, port %#x, aid %#llx", arg0, tid, arg1, arg2); + ustack(10); + printf("\n") +} + +voucher$target:libdispatch*.dylib::dispose +{ + printf("FREE voucher 0x%p, thread %#llx, ref 0", arg0, tid); + ustack(10); + printf("\n") +} + +voucher$target:libdispatch*.dylib::retain +{ + printf("RETAIN voucher 0x%p, thread %#llx, ref %d", arg0, tid, arg1); + ustack(10); + printf("\n") +} + +voucher$target:libdispatch*.dylib::release +{ + printf("RELEASE voucher 0x%p, thread %#llx, ref %d", arg0, tid, arg1); + ustack(10); + printf("\n") +} + +voucher$target:libdispatch*.dylib::adopt +{ + printf("ADOPT voucher 0x%p, thread %#llx", arg0, tid); + ustack(10); + printf("\n") +} + +voucher$target:libdispatch*.dylib::orphan +{ + printf("ORPHAN voucher 0x%p, thread %#llx", arg0, tid); + ustack(10); + printf("\n") +} diff --git a/xcodeconfig/libdispatch.order b/xcodeconfig/libdispatch.order index a25ecc980..9642ca4dd 100644 --- a/xcodeconfig/libdispatch.order +++ b/xcodeconfig/libdispatch.order @@ -71,6 +71,18 @@ _OBJC_METACLASS_$_OS_dispatch_queue _OBJC_METACLASS_$_OS_dispatch_queue_serial _OBJC_METACLASS_$_OS_dispatch_queue_concurrent _OBJC_METACLASS_$_OS_dispatch_queue_root +_OBJC_METACLASS_$_OS_dispatch_queue_main +_OBJC_METACLASS_$_OS_dispatch_queue_runloop +_OBJC_METACLASS_$_OS_dispatch_queue_mgr +_OBJC_METACLASS_$_OS_dispatch_queue_specific_queue +_OBJC_METACLASS_$_OS_dispatch_queue_attr +_OBJC_METACLASS_$_OS_dispatch_source +_OBJC_METACLASS_$_OS_dispatch_mach +_OBJC_METACLASS_$_OS_dispatch_mach_msg +_OBJC_METACLASS_$_OS_dispatch_io +_OBJC_METACLASS_$_OS_dispatch_operation +_OBJC_METACLASS_$_OS_dispatch_disk +_OBJC_METACLASS_$_OS_object _OBJC_METACLASS_$_OS_voucher #_OBJC_METACLASS_$_OS_voucher_recipe _OBJC_METACLASS_$_OS_dispatch_data From 0eaaab669e3d891419ad8a54b7e5599d7d4b2216 Mon Sep 17 00:00:00 2001 From: "Daniel A. Steffen" Date: Mon, 31 Jul 2017 17:06:05 -0700 Subject: [PATCH 45/46] fix linux build failure Signed-off-by: Daniel A. Steffen --- src/internal.h | 4 ++++ src/queue.c | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/src/internal.h b/src/internal.h index 286e53458..417ae20ed 100644 --- a/src/internal.h +++ b/src/internal.h @@ -732,6 +732,8 @@ extern bool _dispatch_memory_warn; #endif // HAVE_SYS_GUARDED_H +#if DISPATCH_PURE_C + #if DISPATCH_USE_DTRACE || DISPATCH_USE_DTRACE_INTROSPECTION typedef struct dispatch_trace_timer_params_s { int64_t deadline, interval, leeway; @@ -740,6 +742,8 @@ typedef struct dispatch_trace_timer_params_s { #include "provider.h" #endif // DISPATCH_USE_DTRACE || DISPATCH_USE_DTRACE_INTROSPECTION +#endif // DISPATCH_PURE_C + #if __has_include() #include #ifndef DBG_DISPATCH diff --git a/src/queue.c b/src/queue.c index 7bd1dbc41..33e500957 100644 --- a/src/queue.c +++ b/src/queue.c @@ -3013,6 +3013,7 @@ _dispatch_block_sync_invoke(void *block) } } +#if DISPATCH_USE_KEVENT_WORKQUEUE static void _dispatch_block_async_invoke_reset_max_qos(dispatch_queue_t dq, dispatch_qos_t qos) @@ -3053,6 +3054,7 @@ _dispatch_block_async_invoke_reset_max_qos(dispatch_queue_t dq, _dispatch_deferred_items_get()->ddi_wlh_needs_update = true; _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE); } +#endif // DISPATCH_USE_KEVENT_WORKQUEUE #define DISPATCH_BLOCK_ASYNC_INVOKE_RELEASE 0x1 #define DISPATCH_BLOCK_ASYNC_INVOKE_NO_OVERRIDE_RESET 0x2 @@ -3068,6 +3070,7 @@ _dispatch_block_async_invoke2(dispatch_block_t b, unsigned long invoke_flags) "run more than once and waited for"); } +#if DISPATCH_USE_KEVENT_WORKQUEUE if (unlikely((dbpd->dbpd_flags & DISPATCH_BLOCK_IF_LAST_RESET_QUEUE_QOS_OVERRIDE) && !(invoke_flags & DISPATCH_BLOCK_ASYNC_INVOKE_NO_OVERRIDE_RESET))) { @@ -3077,6 +3080,7 @@ _dispatch_block_async_invoke2(dispatch_block_t b, unsigned long invoke_flags) _dispatch_block_async_invoke_reset_max_qos(dq, qos); } } +#endif // DISPATCH_USE_KEVENT_WORKQUEUE if (!slowpath(atomic_flags & DBF_CANCELED)) { dbpd->dbpd_block(); From 17c153ac4e15690e873731fba0938fac6e1371cf Mon Sep 17 00:00:00 2001 From: "Daniel A. Steffen" Date: Mon, 31 Jul 2017 18:52:30 -0700 Subject: [PATCH 46/46] better fix for provider.h Signed-off-by: Daniel A. Steffen --- src/internal.h | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/internal.h b/src/internal.h index 417ae20ed..84e33e350 100644 --- a/src/internal.h +++ b/src/internal.h @@ -732,18 +732,20 @@ extern bool _dispatch_memory_warn; #endif // HAVE_SYS_GUARDED_H -#if DISPATCH_PURE_C - #if DISPATCH_USE_DTRACE || DISPATCH_USE_DTRACE_INTROSPECTION typedef struct dispatch_trace_timer_params_s { int64_t deadline, interval, leeway; } *dispatch_trace_timer_params_t; +#ifdef __cplusplus +extern "C++" { +#endif #include "provider.h" +#ifdef __cplusplus +} +#endif #endif // DISPATCH_USE_DTRACE || DISPATCH_USE_DTRACE_INTROSPECTION -#endif // DISPATCH_PURE_C - #if __has_include() #include #ifndef DBG_DISPATCH