*参照元 [#f0cf6b42] #backlinks *説明 [#ofe93605] -パス: [[linux-2.6.33/kernel/mutex.c]] -FIXME: これは何? --説明 **引数 [#d2325e80] -struct mutex *lock -- --[[linux-2.6.33/mutex]] -long state -- -unsigned int subclass -- -unsigned long ip -- **返り値 [#m775187e] -int -- **参考 [#o2e836ab] *実装 [#z99d90a8] /* * Lock a mutex (possibly interruptible), slowpath: */ static inline int __sched - --[[linux-2.6.33/__sched]] __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, unsigned long ip) { struct task_struct *task = current; struct mutex_waiter waiter; unsigned long flags; - --[[linux-2.6.33/current(global)]] - --[[linux-2.6.33/task_struct]] - --[[linux-2.6.33/mutex_waiter]] preempt_disable(); mutex_acquire(&lock->dep_map, subclass, 0, ip); - --[[linux-2.6.33/preempt_disable()]] - --[[linux-2.6.33/mutex_acquire()]] #ifdef CONFIG_MUTEX_SPIN_ON_OWNER - --[[linux-2.6.33/CONFIG_MUTEX_SPIN_ON_OWNER]] /* * Optimistic spinning. * * We try to spin for acquisition when we find that there are no * pending waiters and the lock owner is currently running on a * (different) CPU. * * The rationale is that if the lock owner is running, it is likely to * release the lock soon. * * Since this needs the lock owner, and this mutex implementation * doesn't track the owner atomically in the lock field, we need to * track it non-atomically. * * We can't do this for DEBUG_MUTEXES because that relies on wait_lock * to serialize everything. */ for (;;) { struct thread_info *owner; - --[[linux-2.6.33/thread_info]] /* * If there's an owner, wait for it to either * release the lock or go to sleep. */ owner = ACCESS_ONCE(lock->owner); if (owner && !mutex_spin_on_owner(lock, owner)) break; - --[[linux-2.6.33/ACCESS_ONCE()]] - --[[linux-2.6.33/mutex_spin_on_owner()]] if (atomic_cmpxchg(&lock->count, 1, 0) == 1) { - --[[linux-2.6.33/atomic_cmpxchg()]] lock_acquired(&lock->dep_map, ip); - --[[linux-2.6.33/lock_acquired()]] mutex_set_owner(lock); - --[[linux-2.6.33/mutex_set_owner()]] preempt_enable(); - --[[linux-2.6.33/preempt_enable()]] return 0; } /* * When there's no owner, we might have preempted between the * owner acquiring the lock and setting the owner field. If * we're an RT task that will live-lock because we won't let * the owner complete. */ if (!owner && (need_resched() || rt_task(task))) break; - --[[linux-2.6.33/need_resched()]] - --[[linux-2.6.33/rt_task()]] /* * The cpu_relax() call is a compiler barrier which forces * everything in this loop to be re-loaded. We don't need * memory barriers as we'll eventually observe the right * values at the cost of a few extra spins. */ cpu_relax(); - --[[linux-2.6.33/cpu_relax()]] } #endif spin_lock_mutex(&lock->wait_lock, flags); - --[[linux-2.6.33/spin_lock_mutex()]] debug_mutex_lock_common(lock, &waiter); debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); - --[[linux-2.6.33/debug_mutex_lock_common()]] - --[[linux-2.6.33/debug_mutex_add_waiter()]] - --[[linux-2.6.33/task_thread_info()]] /* add waiting tasks to the end of the waitqueue (FIFO): */ list_add_tail(&waiter.list, &lock->wait_list); waiter.task = task; - --[[linux-2.6.33/list_add_tail()]] if (atomic_xchg(&lock->count, -1) == 1) goto done; - --[[linux-2.6.33/atomic_xchg()]] lock_contended(&lock->dep_map, ip); - --[[linux-2.6.33/lock_contended()]] for (;;) { /* * Lets try to take the lock again - this is needed even if * we get here for the first time (shortly after failing to * acquire the lock), to make sure that we get a wakeup once * it's unlocked. Later on, if we sleep, this is the * operation that gives us the lock. We xchg it to -1, so * that when we release the lock, we properly wake up the * other waiters: */ if (atomic_xchg(&lock->count, -1) == 1) break; /* * got a signal? (This code gets eliminated in the * TASK_UNINTERRUPTIBLE case.) */ if (unlikely(signal_pending_state(state, task))) { - --[[linux-2.6.33/unlikely()]] - --[[linux-2.6.33/signal_pending_state()]] mutex_remove_waiter(lock, &waiter, task_thread_info(task)); - --[[linux-2.6.33/mutex_remove_waiter()]] - --[[linux-2.6.33/task_thread_info()]] mutex_release(&lock->dep_map, 1, ip); spin_unlock_mutex(&lock->wait_lock, flags); - --[[linux-2.6.33/mutex_release()]] - --[[linux-2.6.33/spin_unlock_mutex()]] debug_mutex_free_waiter(&waiter); preempt_enable(); - --[[linux-2.6.33/debug_mutex_free_waiter()]] - --[[linux-2.6.33/preempt_enable()]] return -EINTR; } __set_task_state(task, state); - --[[linux-2.6.33/__set_task_state()]] /* didnt get the lock, go to sleep: */ spin_unlock_mutex(&lock->wait_lock, flags); preempt_enable_no_resched(); - --[[linux-2.6.33/spin_unlock_mutex()]] - --[[linux-2.6.33/preempt_enable_no_resched()]] schedule(); - --[[linux-2.6.33/schedule()]] preempt_disable(); spin_lock_mutex(&lock->wait_lock, flags); - --[[linux-2.6.33/preempt_disable()]] - --[[linux-2.6.33/spin_lock_mutex()]] } done: lock_acquired(&lock->dep_map, ip); - --[[linux-2.6.33/lock_acquired()]] /* got the lock - rejoice! */ mutex_remove_waiter(lock, &waiter, current_thread_info()); mutex_set_owner(lock); - --[[linux-2.6.33/mutex_remove_waiter()]] - --[[linux-2.6.33/current_thread_info()]] - --[[linux-2.6.33/mutex_set_owner()]] /* set it to 0 if there are no waiters left: */ if (likely(list_empty(&lock->wait_list))) atomic_set(&lock->count, 0); - --[[linux-2.6.33/likely()]] - --[[linux-2.6.33/list_empty()]] - --[[linux-2.6.33/atomic_set()]] spin_unlock_mutex(&lock->wait_lock, flags); - --[[linux-2.6.33/spin_unlock_mutex()]] debug_mutex_free_waiter(&waiter); preempt_enable(); - --[[linux-2.6.33/debug_mutex_free_waiter()]] - --[[linux-2.6.33/preempt_enable()]] return 0; } *コメント [#qed6f128]