@@ -51,321 +51,6 @@ LIST_HEAD(debug_mutex_held_locks);
5151 */
5252int debug_mutex_on = 1 ;
5353
54- static void printk_task (struct task_struct * p )
55- {
56- if (p )
57- printk ("%16s:%5d [%p, %3d]" , p -> comm , p -> pid , p , p -> prio );
58- else
59- printk ("<none>" );
60- }
61-
62- static void printk_ti (struct thread_info * ti )
63- {
64- if (ti )
65- printk_task (ti -> task );
66- else
67- printk ("<none>" );
68- }
69-
70- static void printk_task_short (struct task_struct * p )
71- {
72- if (p )
73- printk ("%s/%d [%p, %3d]" , p -> comm , p -> pid , p , p -> prio );
74- else
75- printk ("<none>" );
76- }
77-
78- static void printk_lock (struct mutex * lock , int print_owner )
79- {
80- printk (" [%p] {%s}\n" , lock , lock -> name );
81-
82- if (print_owner && lock -> owner ) {
83- printk (".. held by: " );
84- printk_ti (lock -> owner );
85- printk ("\n" );
86- }
87- if (lock -> owner ) {
88- printk ("... acquired at: " );
89- print_symbol ("%s\n" , lock -> acquire_ip );
90- }
91- }
92-
93- /*
94- * printk locks held by a task:
95- */
96- static void show_task_locks (struct task_struct * p )
97- {
98- switch (p -> state ) {
99- case TASK_RUNNING : printk ("R" ); break ;
100- case TASK_INTERRUPTIBLE : printk ("S" ); break ;
101- case TASK_UNINTERRUPTIBLE : printk ("D" ); break ;
102- case TASK_STOPPED : printk ("T" ); break ;
103- case EXIT_ZOMBIE : printk ("Z" ); break ;
104- case EXIT_DEAD : printk ("X" ); break ;
105- default : printk ("?" ); break ;
106- }
107- printk_task (p );
108- if (p -> blocked_on ) {
109- struct mutex * lock = p -> blocked_on -> lock ;
110-
111- printk (" blocked on mutex:" );
112- printk_lock (lock , 1 );
113- } else
114- printk (" (not blocked on mutex)\n" );
115- }
116-
117- /*
118- * printk all locks held in the system (if filter == NULL),
119- * or all locks belonging to a single task (if filter != NULL):
120- */
121- void show_held_locks (struct task_struct * filter )
122- {
123- struct list_head * curr , * cursor = NULL ;
124- struct mutex * lock ;
125- struct thread_info * t ;
126- unsigned long flags ;
127- int count = 0 ;
128-
129- if (filter ) {
130- printk ("------------------------------\n" );
131- printk ("| showing all locks held by: | (" );
132- printk_task_short (filter );
133- printk ("):\n" );
134- printk ("------------------------------\n" );
135- } else {
136- printk ("---------------------------\n" );
137- printk ("| showing all locks held: |\n" );
138- printk ("---------------------------\n" );
139- }
140-
141- /*
142- * Play safe and acquire the global trace lock. We
143- * cannot printk with that lock held so we iterate
144- * very carefully:
145- */
146- next :
147- debug_spin_lock_save (& debug_mutex_lock , flags );
148- list_for_each (curr , & debug_mutex_held_locks ) {
149- if (cursor && curr != cursor )
150- continue ;
151- lock = list_entry (curr , struct mutex , held_list );
152- t = lock -> owner ;
153- if (filter && (t != filter -> thread_info ))
154- continue ;
155- count ++ ;
156- cursor = curr -> next ;
157- debug_spin_unlock_restore (& debug_mutex_lock , flags );
158-
159- printk ("\n#%03d: " , count );
160- printk_lock (lock , filter ? 0 : 1 );
161- goto next ;
162- }
163- debug_spin_unlock_restore (& debug_mutex_lock , flags );
164- printk ("\n" );
165- }
166-
167- void mutex_debug_show_all_locks (void )
168- {
169- struct task_struct * g , * p ;
170- int count = 10 ;
171- int unlock = 1 ;
172-
173- printk ("\nShowing all blocking locks in the system:\n" );
174-
175- /*
176- * Here we try to get the tasklist_lock as hard as possible,
177- * if not successful after 2 seconds we ignore it (but keep
178- * trying). This is to enable a debug printout even if a
179- * tasklist_lock-holding task deadlocks or crashes.
180- */
181- retry :
182- if (!read_trylock (& tasklist_lock )) {
183- if (count == 10 )
184- printk ("hm, tasklist_lock locked, retrying... " );
185- if (count ) {
186- count -- ;
187- printk (" #%d" , 10 - count );
188- mdelay (200 );
189- goto retry ;
190- }
191- printk (" ignoring it.\n" );
192- unlock = 0 ;
193- }
194- if (count != 10 )
195- printk (" locked it.\n" );
196-
197- do_each_thread (g , p ) {
198- show_task_locks (p );
199- if (!unlock )
200- if (read_trylock (& tasklist_lock ))
201- unlock = 1 ;
202- } while_each_thread (g , p );
203-
204- printk ("\n" );
205- show_held_locks (NULL );
206- printk ("=============================================\n\n" );
207-
208- if (unlock )
209- read_unlock (& tasklist_lock );
210- }
211-
212- static void report_deadlock (struct task_struct * task , struct mutex * lock ,
213- struct mutex * lockblk , unsigned long ip )
214- {
215- printk ("\n%s/%d is trying to acquire this lock:\n" ,
216- current -> comm , current -> pid );
217- printk_lock (lock , 1 );
218- printk ("... trying at: " );
219- print_symbol ("%s\n" , ip );
220- show_held_locks (current );
221-
222- if (lockblk ) {
223- printk ("but %s/%d is deadlocking current task %s/%d!\n\n" ,
224- task -> comm , task -> pid , current -> comm , current -> pid );
225- printk ("\n%s/%d is blocked on this lock:\n" ,
226- task -> comm , task -> pid );
227- printk_lock (lockblk , 1 );
228-
229- show_held_locks (task );
230-
231- printk ("\n%s/%d's [blocked] stackdump:\n\n" ,
232- task -> comm , task -> pid );
233- show_stack (task , NULL );
234- }
235-
236- printk ("\n%s/%d's [current] stackdump:\n\n" ,
237- current -> comm , current -> pid );
238- dump_stack ();
239- mutex_debug_show_all_locks ();
240- printk ("[ turning off deadlock detection. Please report this. ]\n\n" );
241- local_irq_disable ();
242- }
243-
244- /*
245- * Recursively check for mutex deadlocks:
246- */
247- static int check_deadlock (struct mutex * lock , int depth ,
248- struct thread_info * ti , unsigned long ip )
249- {
250- struct mutex * lockblk ;
251- struct task_struct * task ;
252-
253- if (!debug_mutex_on )
254- return 0 ;
255-
256- ti = lock -> owner ;
257- if (!ti )
258- return 0 ;
259-
260- task = ti -> task ;
261- lockblk = NULL ;
262- if (task -> blocked_on )
263- lockblk = task -> blocked_on -> lock ;
264-
265- /* Self-deadlock: */
266- if (current == task ) {
267- DEBUG_OFF ();
268- if (depth )
269- return 1 ;
270- printk ("\n==========================================\n" );
271- printk ( "[ BUG: lock recursion deadlock detected! |\n" );
272- printk ( "------------------------------------------\n" );
273- report_deadlock (task , lock , NULL , ip );
274- return 0 ;
275- }
276-
277- /* Ugh, something corrupted the lock data structure? */
278- if (depth > 20 ) {
279- DEBUG_OFF ();
280- printk ("\n===========================================\n" );
281- printk ( "[ BUG: infinite lock dependency detected!? |\n" );
282- printk ( "-------------------------------------------\n" );
283- report_deadlock (task , lock , lockblk , ip );
284- return 0 ;
285- }
286-
287- /* Recursively check for dependencies: */
288- if (lockblk && check_deadlock (lockblk , depth + 1 , ti , ip )) {
289- printk ("\n============================================\n" );
290- printk ( "[ BUG: circular locking deadlock detected! ]\n" );
291- printk ( "--------------------------------------------\n" );
292- report_deadlock (task , lock , lockblk , ip );
293- return 0 ;
294- }
295- return 0 ;
296- }
297-
298- /*
299- * Called when a task exits, this function checks whether the
300- * task is holding any locks, and reports the first one if so:
301- */
302- void mutex_debug_check_no_locks_held (struct task_struct * task )
303- {
304- struct list_head * curr , * next ;
305- struct thread_info * t ;
306- unsigned long flags ;
307- struct mutex * lock ;
308-
309- if (!debug_mutex_on )
310- return ;
311-
312- debug_spin_lock_save (& debug_mutex_lock , flags );
313- list_for_each_safe (curr , next , & debug_mutex_held_locks ) {
314- lock = list_entry (curr , struct mutex , held_list );
315- t = lock -> owner ;
316- if (t != task -> thread_info )
317- continue ;
318- list_del_init (curr );
319- DEBUG_OFF ();
320- debug_spin_unlock_restore (& debug_mutex_lock , flags );
321-
322- printk ("BUG: %s/%d, lock held at task exit time!\n" ,
323- task -> comm , task -> pid );
324- printk_lock (lock , 1 );
325- if (lock -> owner != task -> thread_info )
326- printk ("exiting task is not even the owner??\n" );
327- return ;
328- }
329- debug_spin_unlock_restore (& debug_mutex_lock , flags );
330- }
331-
332- /*
333- * Called when kernel memory is freed (or unmapped), or if a mutex
334- * is destroyed or reinitialized - this code checks whether there is
335- * any held lock in the memory range of <from> to <to>:
336- */
337- void mutex_debug_check_no_locks_freed (const void * from , unsigned long len )
338- {
339- struct list_head * curr , * next ;
340- const void * to = from + len ;
341- unsigned long flags ;
342- struct mutex * lock ;
343- void * lock_addr ;
344-
345- if (!debug_mutex_on )
346- return ;
347-
348- debug_spin_lock_save (& debug_mutex_lock , flags );
349- list_for_each_safe (curr , next , & debug_mutex_held_locks ) {
350- lock = list_entry (curr , struct mutex , held_list );
351- lock_addr = lock ;
352- if (lock_addr < from || lock_addr >= to )
353- continue ;
354- list_del_init (curr );
355- DEBUG_OFF ();
356- debug_spin_unlock_restore (& debug_mutex_lock , flags );
357-
358- printk ("BUG: %s/%d, active lock [%p(%p-%p)] freed!\n" ,
359- current -> comm , current -> pid , lock , from , to );
360- dump_stack ();
361- printk_lock (lock , 1 );
362- if (lock -> owner != current_thread_info ())
363- printk ("freeing task is not even the owner??\n" );
364- return ;
365- }
366- debug_spin_unlock_restore (& debug_mutex_lock , flags );
367- }
368-
36954/*
37055 * Must be called with lock->wait_lock held.
37156 */
@@ -405,7 +90,6 @@ void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
40590 struct thread_info * ti __IP_DECL__ )
40691{
40792 SMP_DEBUG_LOCKS_WARN_ON (!spin_is_locked (& lock -> wait_lock ));
408- check_deadlock (lock , 0 , ti , ip );
40993 /* Mark the current thread as blocked on the lock: */
41094 ti -> task -> blocked_on = waiter ;
41195 waiter -> lock = lock ;
0 commit comments