iOS_dispatch_sync synchronization implementation/deadlock

  • dispatch_syncmethod implementation call

dispatch_sync(参数) {
    
    
	uintptr_t dc_flags = DC_FLAG_BLOCK;
	// 特殊情况判断并处理
	if (unlikely(_dispatch_block_has_private_data(work))) {
    
    
		//	特殊处理,参考_dispatch_sync_block_with_privdata代码块
		return _dispatch_sync_block_with_privdata(dq, work, dc_flags);
	}
	// 调用方法 _dispatch_sync_f()
	_dispatch_sync_f(参数) {
    
    
		_dispatch_sync_f_inline(参数){
    
    
			// 串行 来到这里
			if (likely(dq->dq_width == 1)) {
    
    
				// 栅栏
				return _dispatch_barrier_sync_f(参数){
    
    
					_dispatch_barrier_sync_f_inline(参数){
    
    
						// 参考 _dispatch_barrier_sync_f_inline代码块内容
					}
				}
			}
			....其他情况处理
		}
	}
}

  • dispatch_synccode block
void
dispatch_sync(dispatch_queue_t dq, dispatch_block_t work)
{
    
    
	uintptr_t dc_flags = DC_FLAG_BLOCK;
	if (unlikely(_dispatch_block_has_private_data(work))) {
    
    
		return _dispatch_sync_block_with_privdata(dq, work, dc_flags);
	}
	_dispatch_sync_f(dq, work, _dispatch_Block_invoke(work), dc_flags);
}
  • _dispatch_sync_fcode block
static void
_dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func,
		uintptr_t dc_flags)
{
    
    
	_dispatch_sync_f_inline(dq, ctxt, func, dc_flags);
}

  • _dispatch_sync_f_inlinecode block
static inline void
_dispatch_sync_f_inline(dispatch_queue_t dq, void *ctxt,
		dispatch_function_t func, uintptr_t dc_flags)
{
    
    
	
	// 串行 来到这里
	if (likely(dq->dq_width == 1)) {
    
    
		// 栅栏
		return _dispatch_barrier_sync_f(dq, ctxt, func, dc_flags);
	}

	if (unlikely(dx_metatype(dq) != _DISPATCH_LANE_TYPE)) {
    
    
		DISPATCH_CLIENT_CRASH(0, "Queue type doesn't support dispatch_sync");
	}

	dispatch_lane_t dl = upcast(dq)._dl;
	// Global concurrent queues and queues bound to non-dispatch threads
	// always fall into the slow case, see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE
	if (unlikely(!_dispatch_queue_try_reserve_sync_width(dl))) {
    
    
		return _dispatch_sync_f_slow(dl, ctxt, func, 0, dl, dc_flags);
	}

	if (unlikely(dq->do_targetq->do_targetq)) {
    
    
		return _dispatch_sync_recurse(dl, ctxt, func, dc_flags);
	}
	_dispatch_introspection_sync_begin(dl);
	_dispatch_sync_invoke_and_complete(dl, ctxt, func DISPATCH_TRACE_ARG(
			_dispatch_trace_item_sync_push_pop(dq, ctxt, func, dc_flags)));
}


  • dispatch_barrier_synccode block

void
dispatch_barrier_sync(dispatch_queue_t dq, dispatch_block_t work)
{
    
    
	uintptr_t dc_flags = DC_FLAG_BARRIER | DC_FLAG_BLOCK;
	if (unlikely(_dispatch_block_has_private_data(work))) {
    
    
		return _dispatch_sync_block_with_privdata(dq, work, dc_flags);
	}
	_dispatch_barrier_sync_f(dq, work, _dispatch_Block_invoke(work), dc_flags);
}

  • _dispatch_barrier_sync_fcode block
_dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt,
		dispatch_function_t func, uintptr_t dc_flags)
{
    
    
	_dispatch_barrier_sync_f_inline(dq, ctxt, func, dc_flags);
}
  • _dispatch_barrier_sync_f_inlinecode block

static inline void
_dispatch_barrier_sync_f_inline(dispatch_queue_t dq, void *ctxt,
		dispatch_function_t func, uintptr_t dc_flags)
{
    
    
	// 获取线程ID -- mach pthread --
	dispatch_tid tid = _dispatch_tid_self();

	if (unlikely(dx_metatype(dq) != _DISPATCH_LANE_TYPE)) {
    
    
		DISPATCH_CLIENT_CRASH(0, "Queue type doesn't support dispatch_sync");
	}

	dispatch_lane_t dl = upcast(dq)._dl;
	// The more correct thing to do would be to merge the qos of the thread
	// that just acquired the barrier lock into the queue state.
	//
	// However this is too expensive for the fast path, so skip doing it.
	// The chosen tradeoff is that if an enqueue on a lower priority thread
	// contends with this fast path, this thread may receive a useless override.
	//
	// Global concurrent queues and queues bound to non-dispatch threads
	// always fall into the slow case, see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE
	
	// 死锁 - 线程同步 -  从os底层获取到一个status
	if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(dl, tid))) {
    
    
		return _dispatch_sync_f_slow(dl, ctxt, func, DC_FLAG_BARRIER, dl,
				DC_FLAG_BARRIER | dc_flags);
	}
	
	if (unlikely(dl->do_targetq->do_targetq)) {
    
    
		return _dispatch_sync_recurse(dl, ctxt, func,
				DC_FLAG_BARRIER | dc_flags);
	}
	_dispatch_introspection_sync_begin(dl);
	_dispatch_lane_barrier_sync_invoke_and_complete(dl, ctxt, func
			DISPATCH_TRACE_ARG(_dispatch_trace_item_sync_push_pop(
					dq, ctxt, func, dc_flags | DC_FLAG_BARRIER)));
}

  • _dispatch_sync_block_with_privdatacode block
static void
_dispatch_sync_block_with_privdata(dispatch_queue_t dq, dispatch_block_t work,
		uintptr_t dc_flags)
{
    
    
	dispatch_block_private_data_t dbpd = _dispatch_block_get_data(work);
	pthread_priority_t op = 0, p = 0;
	dispatch_block_flags_t flags = dbpd->dbpd_flags;

	if (flags & DISPATCH_BLOCK_BARRIER) {
    
    
		dc_flags |= DC_FLAG_BLOCK_WITH_PRIVATE_DATA | DC_FLAG_BARRIER;
	} else {
    
    
		dc_flags |= DC_FLAG_BLOCK_WITH_PRIVATE_DATA;
	}

	op = _dispatch_block_invoke_should_set_priority(flags, dbpd->dbpd_priority);
	if (op) {
    
    
		p = dbpd->dbpd_priority;
	}
	voucher_t ov, v = DISPATCH_NO_VOUCHER;
	if (flags & DISPATCH_BLOCK_HAS_VOUCHER) {
    
    
		v = dbpd->dbpd_voucher;
	}
	ov = _dispatch_set_priority_and_voucher(p, v, 0);

	// balanced in d_block_sync_invoke or d_block_wait
	if (os_atomic_cmpxchg2o(dbpd, dbpd_queue, NULL, dq, relaxed)) {
    
    
		_dispatch_retain_2(dq);
	}
	if (dc_flags & DC_FLAG_BARRIER) {
    
    
		_dispatch_barrier_sync_f(dq, work, _dispatch_block_sync_invoke,
				dc_flags);
	} else {
    
    
		_dispatch_sync_f(dq, work, _dispatch_block_sync_invoke, dc_flags);
	}
	_dispatch_reset_priority_and_voucher(op, ov);
}

  • _dispatch_queue_try_acquire_barrier_synccode block

static inline bool
_dispatch_queue_try_acquire_barrier_sync(dispatch_queue_class_t dq, uint32_t tid)
{
    
    
	return _dispatch_queue_try_acquire_barrier_sync_and_suspend(dq._dl, tid, 0);
}
  • _dispatch_queue_try_acquire_barrier_sync_and_suspendcode block

static inline bool
_dispatch_queue_try_acquire_barrier_sync_and_suspend(dispatch_lane_t dq,
		uint32_t tid, uint64_t suspend_count)
{
    
    
	uint64_t init  = DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width);
	uint64_t value = DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER |
			_dispatch_lock_value_from_tid(tid) |
			(suspend_count * DISPATCH_QUEUE_SUSPEND_INTERVAL);
	uint64_t old_state, new_state;
	// 从底层获取信息 -- 状态信息 - 当前队列 - 线程
	return os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, {
    
    
		uint64_t role = old_state & DISPATCH_QUEUE_ROLE_MASK;
		if (old_state != (init | role)) {
    
    
			os_atomic_rmw_loop_give_up(break);
		}
		new_state = value | role;
	});
}

  • _dispatch_sync_f_slowcode block

static void
_dispatch_sync_f_slow(dispatch_queue_class_t top_dqu, void *ctxt,
		dispatch_function_t func, uintptr_t top_dc_flags,
		dispatch_queue_class_t dqu, uintptr_t dc_flags)
{
    
    
	dispatch_queue_t top_dq = top_dqu._dq;
	dispatch_queue_t dq = dqu._dq;
	if (unlikely(!dq->do_targetq)) {
    
    
		return _dispatch_sync_function_invoke(dq, ctxt, func);
	}

	pthread_priority_t pp = _dispatch_get_priority();
	struct dispatch_sync_context_s dsc = {
    
    
		.dc_flags    = DC_FLAG_SYNC_WAITER | dc_flags,
		.dc_func     = _dispatch_async_and_wait_invoke,
		.dc_ctxt     = &dsc,
		.dc_other    = top_dq,
		.dc_priority = pp | _PTHREAD_PRIORITY_ENFORCE_FLAG,
		.dc_voucher  = _voucher_get(),
		.dsc_func    = func,
		.dsc_ctxt    = ctxt,
		.dsc_waiter  = _dispatch_tid_self(),
	};
	

	// 压栈  - 先进先出 - 顺序执行
	_dispatch_trace_item_push(top_dq, &dsc);
	// 死锁
	__DISPATCH_WAIT_FOR_QUEUE__(&dsc, dq);


	if (dsc.dsc_func == NULL) {
    
    
		dispatch_queue_t stop_dq = dsc.dc_other;
		return _dispatch_sync_complete_recurse(top_dq, stop_dq, top_dc_flags);
	}

	_dispatch_introspection_sync_begin(top_dq);
	_dispatch_trace_item_pop(top_dq, &dsc);
	_dispatch_sync_invoke_and_complete_recurse(top_dq, ctxt, func,top_dc_flags
			DISPATCH_TRACE_ARG(&dsc));
}

  • _dispatch_sync_recursecode block
static void
_dispatch_sync_recurse(dispatch_lane_t dq, void *ctxt,
		dispatch_function_t func, uintptr_t dc_flags)
{
    
    
	dispatch_tid tid = _dispatch_tid_self();
	dispatch_queue_t tq = dq->do_targetq;

	do {
    
    
		if (likely(tq->dq_width == 1)) {
    
    
			if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(tq, tid))) {
    
    
				return _dispatch_sync_f_slow(dq, ctxt, func, dc_flags, tq,
						DC_FLAG_BARRIER);
			}
		} else {
    
    
			dispatch_queue_concurrent_t dl = upcast(tq)._dl;
			if (unlikely(!_dispatch_queue_try_reserve_sync_width(dl))) {
    
    
				return _dispatch_sync_f_slow(dq, ctxt, func, dc_flags, tq, 0);
			}
		}
		tq = tq->do_targetq;
	} while (unlikely(tq->do_targetq));

	_dispatch_introspection_sync_begin(dq);
	_dispatch_sync_invoke_and_complete_recurse(dq, ctxt, func, dc_flags
			DISPATCH_TRACE_ARG(_dispatch_trace_item_sync_push_pop(
					dq, ctxt, func, dc_flags)));
}

  • __DISPATCH_WAIT_FOR_QUEUE__code block

static void
__DISPATCH_WAIT_FOR_QUEUE__(dispatch_sync_context_t dsc, dispatch_queue_t dq)
{
    
    
	uint64_t dq_state = _dispatch_wait_prepare(dq);
	// 判断是否是同一个线程,如果是则crash
	if (unlikely(_dq_state_drain_locked_by(dq_state, dsc->dsc_waiter))) {
    
    
		DISPATCH_CLIENT_CRASH((uintptr_t)dq_state,
				"dispatch_sync called on queue "
				"already owned by current thread");
	}

	// Blocks submitted to the main thread MUST run on the main thread, and
	// dispatch_async_and_wait also executes on the remote context rather than
	// the current thread.
	//
	// For both these cases we need to save the frame linkage for the sake of
	// _dispatch_async_and_wait_invoke
	_dispatch_thread_frame_save_state(&dsc->dsc_dtf);

	if (_dq_state_is_suspended(dq_state) ||
			_dq_state_is_base_anon(dq_state)) {
    
    
		dsc->dc_data = DISPATCH_WLH_ANON;
	} else if (_dq_state_is_base_wlh(dq_state)) {
    
    
		dsc->dc_data = (dispatch_wlh_t)dq;
	} else {
    
    
		_dispatch_wait_compute_wlh(upcast(dq)._dl, dsc);
	}

	if (dsc->dc_data == DISPATCH_WLH_ANON) {
    
    
		dsc->dsc_override_qos_floor = dsc->dsc_override_qos =
				(uint8_t)_dispatch_get_basepri_override_qos_floor();
		_dispatch_thread_event_init(&dsc->dsc_event);
	}
	dx_push(dq, dsc, _dispatch_qos_from_pp(dsc->dc_priority));
	_dispatch_trace_runtime_event(sync_wait, dq, 0);
	if (dsc->dc_data == DISPATCH_WLH_ANON) {
    
    
		_dispatch_thread_event_wait(&dsc->dsc_event); // acquire
	} else {
    
    
		_dispatch_event_loop_wait_for_ownership(dsc);
	}
	if (dsc->dc_data == DISPATCH_WLH_ANON) {
    
    
		_dispatch_thread_event_destroy(&dsc->dsc_event);
		// If _dispatch_sync_waiter_wake() gave this thread an override,
		// ensure that the root queue sees it.
		if (dsc->dsc_override_qos > dsc->dsc_override_qos_floor) {
    
    
			_dispatch_set_basepri_override_qos(dsc->dsc_override_qos);
		}
	}
}

  • _dq_state_drain_locked_bycode block

static inline bool
_dq_state_drain_locked_by(uint64_t dq_state, dispatch_tid tid)
{
    
    
	return _dispatch_lock_is_locked_by((dispatch_lock)dq_state, tid);
}
  • _dispatch_lock_is_locked_bycode block

static inline bool
_dispatch_lock_is_locked_by(dispatch_lock lock_value, dispatch_tid tid)
{
    
    
	// equivalent to _dispatch_lock_owner(lock_value) == tid
	// ^ (异或运算法) 两个相同就会出现 0 否则为1
	return ((lock_value ^ tid) & DLOCK_OWNER_MASK) == 0;
}

  • macro definition
// continuation is a dispatch_sync or dispatch_barrier_sync
#define DC_FLAG_SYNC_WAITER				0x001ul
// continuation acts as a barrier
#define DC_FLAG_BARRIER					0x002ul
// continuation resources are freed on run
// this is set on async or for non event_handler source handlers
#define DC_FLAG_CONSUME					0x004ul
// continuation has a group in dc_data
#define DC_FLAG_GROUP_ASYNC				0x008ul
// continuation function is a block (copied in dc_ctxt)
#define DC_FLAG_BLOCK					0x010ul
// continuation function is a block with private data, implies BLOCK_BIT
#define DC_FLAG_BLOCK_WITH_PRIVATE_DATA	0x020ul
// source handler requires fetching context from source
#define DC_FLAG_FETCH_CONTEXT			0x040ul
// continuation is a dispatch_async_and_wait
#define DC_FLAG_ASYNC_AND_WAIT			0x080ul
// bit used to make sure dc_flags is never 0 for allocated continuations
#define DC_FLAG_ALLOCATED				0x100ul
// continuation is an internal implementation detail that should not be
// introspected
#define DC_FLAG_NO_INTROSPECTION		0x200ul
// never set on continuations, used by mach.c only
#define DC_FLAG_MACH_BARRIER		0x1000000ul

Guess you like

Origin blog.csdn.net/FlyingKuiKui/article/details/122393384