]>
Commit | Line | Data |
---|---|---|
5d5314d6 JW |
1 | /* |
2 | * Kernel Debugger Architecture Independent Support Functions | |
3 | * | |
4 | * This file is subject to the terms and conditions of the GNU General Public | |
5 | * License. See the file "COPYING" in the main directory of this archive | |
6 | * for more details. | |
7 | * | |
8 | * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved. | |
9 | * Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved. | |
10 | * 03/02/13 added new 2.5 kallsyms <[email protected]> | |
11 | */ | |
12 | ||
13 | #include <stdarg.h> | |
14 | #include <linux/types.h> | |
15 | #include <linux/sched.h> | |
16 | #include <linux/mm.h> | |
17 | #include <linux/kallsyms.h> | |
18 | #include <linux/stddef.h> | |
19 | #include <linux/vmalloc.h> | |
20 | #include <linux/ptrace.h> | |
21 | #include <linux/module.h> | |
22 | #include <linux/highmem.h> | |
23 | #include <linux/hardirq.h> | |
24 | #include <linux/delay.h> | |
25 | #include <linux/uaccess.h> | |
26 | #include <linux/kdb.h> | |
27 | #include <linux/slab.h> | |
28 | #include "kdb_private.h" | |
29 | ||
30 | /* | |
31 | * kdbgetsymval - Return the address of the given symbol. | |
32 | * | |
33 | * Parameters: | |
34 | * symname Character string containing symbol name | |
35 | * symtab Structure to receive results | |
36 | * Returns: | |
37 | * 0 Symbol not found, symtab zero filled | |
38 | * 1 Symbol mapped to module/symbol/section, data in symtab | |
39 | */ | |
40 | int kdbgetsymval(const char *symname, kdb_symtab_t *symtab) | |
41 | { | |
42 | if (KDB_DEBUG(AR)) | |
568fb6f4 | 43 | kdb_printf("kdbgetsymval: symname=%s, symtab=%px\n", symname, |
5d5314d6 JW |
44 | symtab); |
45 | memset(symtab, 0, sizeof(*symtab)); | |
46 | symtab->sym_start = kallsyms_lookup_name(symname); | |
47 | if (symtab->sym_start) { | |
48 | if (KDB_DEBUG(AR)) | |
49 | kdb_printf("kdbgetsymval: returns 1, " | |
50 | "symtab->sym_start=0x%lx\n", | |
51 | symtab->sym_start); | |
52 | return 1; | |
53 | } | |
54 | if (KDB_DEBUG(AR)) | |
55 | kdb_printf("kdbgetsymval: returns 0\n"); | |
56 | return 0; | |
57 | } | |
58 | EXPORT_SYMBOL(kdbgetsymval); | |
59 | ||
60 | static char *kdb_name_table[100]; /* arbitrary size */ | |
61 | ||
62 | /* | |
63 | * kdbnearsym - Return the name of the symbol with the nearest address | |
64 | * less than 'addr'. | |
65 | * | |
66 | * Parameters: | |
67 | * addr Address to check for symbol near | |
68 | * symtab Structure to receive results | |
69 | * Returns: | |
70 | * 0 No sections contain this address, symtab zero filled | |
71 | * 1 Address mapped to module/symbol/section, data in symtab | |
72 | * Remarks: | |
73 | * 2.6 kallsyms has a "feature" where it unpacks the name into a | |
74 | * string. If that string is reused before the caller expects it | |
75 | * then the caller sees its string change without warning. To | |
76 | * avoid cluttering up the main kdb code with lots of kdb_strdup, | |
77 | * tests and kfree calls, kdbnearsym maintains an LRU list of the | |
78 | * last few unique strings. The list is sized large enough to | |
79 | * hold active strings, no kdb caller of kdbnearsym makes more | |
80 | * than ~20 later calls before using a saved value. | |
81 | */ | |
82 | int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab) | |
83 | { | |
84 | int ret = 0; | |
b590cddf JW |
85 | unsigned long symbolsize = 0; |
86 | unsigned long offset = 0; | |
5d5314d6 JW |
87 | #define knt1_size 128 /* must be >= kallsyms table size */ |
88 | char *knt1 = NULL; | |
89 | ||
90 | if (KDB_DEBUG(AR)) | |
568fb6f4 | 91 | kdb_printf("kdbnearsym: addr=0x%lx, symtab=%px\n", addr, symtab); |
5d5314d6 JW |
92 | memset(symtab, 0, sizeof(*symtab)); |
93 | ||
94 | if (addr < 4096) | |
95 | goto out; | |
96 | knt1 = debug_kmalloc(knt1_size, GFP_ATOMIC); | |
97 | if (!knt1) { | |
98 | kdb_printf("kdbnearsym: addr=0x%lx cannot kmalloc knt1\n", | |
99 | addr); | |
100 | goto out; | |
101 | } | |
102 | symtab->sym_name = kallsyms_lookup(addr, &symbolsize , &offset, | |
103 | (char **)(&symtab->mod_name), knt1); | |
104 | if (offset > 8*1024*1024) { | |
105 | symtab->sym_name = NULL; | |
106 | addr = offset = symbolsize = 0; | |
107 | } | |
108 | symtab->sym_start = addr - offset; | |
109 | symtab->sym_end = symtab->sym_start + symbolsize; | |
110 | ret = symtab->sym_name != NULL && *(symtab->sym_name) != '\0'; | |
111 | ||
112 | if (ret) { | |
113 | int i; | |
114 | /* Another 2.6 kallsyms "feature". Sometimes the sym_name is | |
115 | * set but the buffer passed into kallsyms_lookup is not used, | |
116 | * so it contains garbage. The caller has to work out which | |
117 | * buffer needs to be saved. | |
118 | * | |
119 | * What was Rusty smoking when he wrote that code? | |
120 | */ | |
121 | if (symtab->sym_name != knt1) { | |
122 | strncpy(knt1, symtab->sym_name, knt1_size); | |
123 | knt1[knt1_size-1] = '\0'; | |
124 | } | |
125 | for (i = 0; i < ARRAY_SIZE(kdb_name_table); ++i) { | |
126 | if (kdb_name_table[i] && | |
127 | strcmp(kdb_name_table[i], knt1) == 0) | |
128 | break; | |
129 | } | |
130 | if (i >= ARRAY_SIZE(kdb_name_table)) { | |
131 | debug_kfree(kdb_name_table[0]); | |
2cf2f0d5 | 132 | memmove(kdb_name_table, kdb_name_table+1, |
5d5314d6 JW |
133 | sizeof(kdb_name_table[0]) * |
134 | (ARRAY_SIZE(kdb_name_table)-1)); | |
135 | } else { | |
136 | debug_kfree(knt1); | |
137 | knt1 = kdb_name_table[i]; | |
2cf2f0d5 | 138 | memmove(kdb_name_table+i, kdb_name_table+i+1, |
5d5314d6 JW |
139 | sizeof(kdb_name_table[0]) * |
140 | (ARRAY_SIZE(kdb_name_table)-i-1)); | |
141 | } | |
142 | i = ARRAY_SIZE(kdb_name_table) - 1; | |
143 | kdb_name_table[i] = knt1; | |
144 | symtab->sym_name = kdb_name_table[i]; | |
145 | knt1 = NULL; | |
146 | } | |
147 | ||
148 | if (symtab->mod_name == NULL) | |
149 | symtab->mod_name = "kernel"; | |
150 | if (KDB_DEBUG(AR)) | |
151 | kdb_printf("kdbnearsym: returns %d symtab->sym_start=0x%lx, " | |
568fb6f4 | 152 | "symtab->mod_name=%px, symtab->sym_name=%px (%s)\n", ret, |
5d5314d6 JW |
153 | symtab->sym_start, symtab->mod_name, symtab->sym_name, |
154 | symtab->sym_name); | |
155 | ||
156 | out: | |
157 | debug_kfree(knt1); | |
158 | return ret; | |
159 | } | |
160 | ||
161 | void kdbnearsym_cleanup(void) | |
162 | { | |
163 | int i; | |
164 | for (i = 0; i < ARRAY_SIZE(kdb_name_table); ++i) { | |
165 | if (kdb_name_table[i]) { | |
166 | debug_kfree(kdb_name_table[i]); | |
167 | kdb_name_table[i] = NULL; | |
168 | } | |
169 | } | |
170 | } | |
171 | ||
172 | static char ks_namebuf[KSYM_NAME_LEN+1], ks_namebuf_prev[KSYM_NAME_LEN+1]; | |
173 | ||
174 | /* | |
175 | * kallsyms_symbol_complete | |
176 | * | |
177 | * Parameters: | |
178 | * prefix_name prefix of a symbol name to lookup | |
179 | * max_len maximum length that can be returned | |
180 | * Returns: | |
181 | * Number of symbols which match the given prefix. | |
182 | * Notes: | |
183 | * prefix_name is changed to contain the longest unique prefix that | |
184 | * starts with this prefix (tab completion). | |
185 | */ | |
186 | int kallsyms_symbol_complete(char *prefix_name, int max_len) | |
187 | { | |
188 | loff_t pos = 0; | |
189 | int prefix_len = strlen(prefix_name), prev_len = 0; | |
190 | int i, number = 0; | |
191 | const char *name; | |
192 | ||
193 | while ((name = kdb_walk_kallsyms(&pos))) { | |
194 | if (strncmp(name, prefix_name, prefix_len) == 0) { | |
195 | strcpy(ks_namebuf, name); | |
196 | /* Work out the longest name that matches the prefix */ | |
197 | if (++number == 1) { | |
198 | prev_len = min_t(int, max_len-1, | |
199 | strlen(ks_namebuf)); | |
200 | memcpy(ks_namebuf_prev, ks_namebuf, prev_len); | |
201 | ks_namebuf_prev[prev_len] = '\0'; | |
202 | continue; | |
203 | } | |
204 | for (i = 0; i < prev_len; i++) { | |
205 | if (ks_namebuf[i] != ks_namebuf_prev[i]) { | |
206 | prev_len = i; | |
207 | ks_namebuf_prev[i] = '\0'; | |
208 | break; | |
209 | } | |
210 | } | |
211 | } | |
212 | } | |
213 | if (prev_len > prefix_len) | |
214 | memcpy(prefix_name, ks_namebuf_prev, prev_len+1); | |
215 | return number; | |
216 | } | |
217 | ||
218 | /* | |
219 | * kallsyms_symbol_next | |
220 | * | |
221 | * Parameters: | |
222 | * prefix_name prefix of a symbol name to lookup | |
223 | * flag 0 means search from the head, 1 means continue search. | |
c2b94c72 PB |
224 | * buf_size maximum length that can be written to prefix_name |
225 | * buffer | |
5d5314d6 JW |
226 | * Returns: |
227 | * 1 if a symbol matches the given prefix. | |
228 | * 0 if no string found | |
229 | */ | |
c2b94c72 | 230 | int kallsyms_symbol_next(char *prefix_name, int flag, int buf_size) |
5d5314d6 JW |
231 | { |
232 | int prefix_len = strlen(prefix_name); | |
233 | static loff_t pos; | |
234 | const char *name; | |
235 | ||
236 | if (!flag) | |
237 | pos = 0; | |
238 | ||
239 | while ((name = kdb_walk_kallsyms(&pos))) { | |
c2b94c72 PB |
240 | if (!strncmp(name, prefix_name, prefix_len)) |
241 | return strscpy(prefix_name, name, buf_size); | |
5d5314d6 JW |
242 | } |
243 | return 0; | |
244 | } | |
245 | ||
246 | /* | |
247 | * kdb_symbol_print - Standard method for printing a symbol name and offset. | |
248 | * Inputs: | |
249 | * addr Address to be printed. | |
250 | * symtab Address of symbol data, if NULL this routine does its | |
251 | * own lookup. | |
252 | * punc Punctuation for string, bit field. | |
253 | * Remarks: | |
254 | * The string and its punctuation is only printed if the address | |
255 | * is inside the kernel, except that the value is always printed | |
256 | * when requested. | |
257 | */ | |
258 | void kdb_symbol_print(unsigned long addr, const kdb_symtab_t *symtab_p, | |
259 | unsigned int punc) | |
260 | { | |
261 | kdb_symtab_t symtab, *symtab_p2; | |
262 | if (symtab_p) { | |
263 | symtab_p2 = (kdb_symtab_t *)symtab_p; | |
264 | } else { | |
265 | symtab_p2 = &symtab; | |
266 | kdbnearsym(addr, symtab_p2); | |
267 | } | |
268 | if (!(symtab_p2->sym_name || (punc & KDB_SP_VALUE))) | |
269 | return; | |
270 | if (punc & KDB_SP_SPACEB) | |
271 | kdb_printf(" "); | |
272 | if (punc & KDB_SP_VALUE) | |
273 | kdb_printf(kdb_machreg_fmt0, addr); | |
274 | if (symtab_p2->sym_name) { | |
275 | if (punc & KDB_SP_VALUE) | |
276 | kdb_printf(" "); | |
277 | if (punc & KDB_SP_PAREN) | |
278 | kdb_printf("("); | |
279 | if (strcmp(symtab_p2->mod_name, "kernel")) | |
280 | kdb_printf("[%s]", symtab_p2->mod_name); | |
281 | kdb_printf("%s", symtab_p2->sym_name); | |
282 | if (addr != symtab_p2->sym_start) | |
283 | kdb_printf("+0x%lx", addr - symtab_p2->sym_start); | |
284 | if (punc & KDB_SP_SYMSIZE) | |
285 | kdb_printf("/0x%lx", | |
286 | symtab_p2->sym_end - symtab_p2->sym_start); | |
287 | if (punc & KDB_SP_PAREN) | |
288 | kdb_printf(")"); | |
289 | } | |
290 | if (punc & KDB_SP_SPACEA) | |
291 | kdb_printf(" "); | |
292 | if (punc & KDB_SP_NEWLINE) | |
293 | kdb_printf("\n"); | |
294 | } | |
295 | ||
296 | /* | |
297 | * kdb_strdup - kdb equivalent of strdup, for disasm code. | |
298 | * Inputs: | |
299 | * str The string to duplicate. | |
300 | * type Flags to kmalloc for the new string. | |
301 | * Returns: | |
302 | * Address of the new string, NULL if storage could not be allocated. | |
303 | * Remarks: | |
304 | * This is not in lib/string.c because it uses kmalloc which is not | |
305 | * available when string.o is used in boot loaders. | |
306 | */ | |
307 | char *kdb_strdup(const char *str, gfp_t type) | |
308 | { | |
309 | int n = strlen(str)+1; | |
310 | char *s = kmalloc(n, type); | |
311 | if (!s) | |
312 | return NULL; | |
313 | return strcpy(s, str); | |
314 | } | |
315 | ||
316 | /* | |
317 | * kdb_getarea_size - Read an area of data. The kdb equivalent of | |
318 | * copy_from_user, with kdb messages for invalid addresses. | |
319 | * Inputs: | |
320 | * res Pointer to the area to receive the result. | |
321 | * addr Address of the area to copy. | |
322 | * size Size of the area. | |
323 | * Returns: | |
324 | * 0 for success, < 0 for error. | |
325 | */ | |
326 | int kdb_getarea_size(void *res, unsigned long addr, size_t size) | |
327 | { | |
328 | int ret = probe_kernel_read((char *)res, (char *)addr, size); | |
329 | if (ret) { | |
330 | if (!KDB_STATE(SUPPRESS)) { | |
331 | kdb_printf("kdb_getarea: Bad address 0x%lx\n", addr); | |
332 | KDB_STATE_SET(SUPPRESS); | |
333 | } | |
334 | ret = KDB_BADADDR; | |
335 | } else { | |
336 | KDB_STATE_CLEAR(SUPPRESS); | |
337 | } | |
338 | return ret; | |
339 | } | |
340 | ||
341 | /* | |
342 | * kdb_putarea_size - Write an area of data. The kdb equivalent of | |
343 | * copy_to_user, with kdb messages for invalid addresses. | |
344 | * Inputs: | |
345 | * addr Address of the area to write to. | |
346 | * res Pointer to the area holding the data. | |
347 | * size Size of the area. | |
348 | * Returns: | |
349 | * 0 for success, < 0 for error. | |
350 | */ | |
351 | int kdb_putarea_size(unsigned long addr, void *res, size_t size) | |
352 | { | |
353 | int ret = probe_kernel_read((char *)addr, (char *)res, size); | |
354 | if (ret) { | |
355 | if (!KDB_STATE(SUPPRESS)) { | |
356 | kdb_printf("kdb_putarea: Bad address 0x%lx\n", addr); | |
357 | KDB_STATE_SET(SUPPRESS); | |
358 | } | |
359 | ret = KDB_BADADDR; | |
360 | } else { | |
361 | KDB_STATE_CLEAR(SUPPRESS); | |
362 | } | |
363 | return ret; | |
364 | } | |
365 | ||
366 | /* | |
367 | * kdb_getphys - Read data from a physical address. Validate the | |
368 | * address is in range, use kmap_atomic() to get data | |
369 | * similar to kdb_getarea() - but for phys addresses | |
370 | * Inputs: | |
371 | * res Pointer to the word to receive the result | |
372 | * addr Physical address of the area to copy | |
373 | * size Size of the area | |
374 | * Returns: | |
375 | * 0 for success, < 0 for error. | |
376 | */ | |
377 | static int kdb_getphys(void *res, unsigned long addr, size_t size) | |
378 | { | |
379 | unsigned long pfn; | |
380 | void *vaddr; | |
381 | struct page *page; | |
382 | ||
383 | pfn = (addr >> PAGE_SHIFT); | |
384 | if (!pfn_valid(pfn)) | |
385 | return 1; | |
386 | page = pfn_to_page(pfn); | |
d762a50b | 387 | vaddr = kmap_atomic(page); |
5d5314d6 | 388 | memcpy(res, vaddr + (addr & (PAGE_SIZE - 1)), size); |
d762a50b | 389 | kunmap_atomic(vaddr); |
5d5314d6 JW |
390 | |
391 | return 0; | |
392 | } | |
393 | ||
394 | /* | |
395 | * kdb_getphysword | |
396 | * Inputs: | |
397 | * word Pointer to the word to receive the result. | |
398 | * addr Address of the area to copy. | |
399 | * size Size of the area. | |
400 | * Returns: | |
401 | * 0 for success, < 0 for error. | |
402 | */ | |
403 | int kdb_getphysword(unsigned long *word, unsigned long addr, size_t size) | |
404 | { | |
405 | int diag; | |
406 | __u8 w1; | |
407 | __u16 w2; | |
408 | __u32 w4; | |
409 | __u64 w8; | |
410 | *word = 0; /* Default value if addr or size is invalid */ | |
411 | ||
412 | switch (size) { | |
413 | case 1: | |
414 | diag = kdb_getphys(&w1, addr, sizeof(w1)); | |
415 | if (!diag) | |
416 | *word = w1; | |
417 | break; | |
418 | case 2: | |
419 | diag = kdb_getphys(&w2, addr, sizeof(w2)); | |
420 | if (!diag) | |
421 | *word = w2; | |
422 | break; | |
423 | case 4: | |
424 | diag = kdb_getphys(&w4, addr, sizeof(w4)); | |
425 | if (!diag) | |
426 | *word = w4; | |
427 | break; | |
428 | case 8: | |
429 | if (size <= sizeof(*word)) { | |
430 | diag = kdb_getphys(&w8, addr, sizeof(w8)); | |
431 | if (!diag) | |
432 | *word = w8; | |
433 | break; | |
434 | } | |
646558ff | 435 | /* fall through */ |
5d5314d6 JW |
436 | default: |
437 | diag = KDB_BADWIDTH; | |
438 | kdb_printf("kdb_getphysword: bad width %ld\n", (long) size); | |
439 | } | |
440 | return diag; | |
441 | } | |
442 | ||
443 | /* | |
444 | * kdb_getword - Read a binary value. Unlike kdb_getarea, this treats | |
445 | * data as numbers. | |
446 | * Inputs: | |
447 | * word Pointer to the word to receive the result. | |
448 | * addr Address of the area to copy. | |
449 | * size Size of the area. | |
450 | * Returns: | |
451 | * 0 for success, < 0 for error. | |
452 | */ | |
453 | int kdb_getword(unsigned long *word, unsigned long addr, size_t size) | |
454 | { | |
455 | int diag; | |
456 | __u8 w1; | |
457 | __u16 w2; | |
458 | __u32 w4; | |
459 | __u64 w8; | |
460 | *word = 0; /* Default value if addr or size is invalid */ | |
461 | switch (size) { | |
462 | case 1: | |
463 | diag = kdb_getarea(w1, addr); | |
464 | if (!diag) | |
465 | *word = w1; | |
466 | break; | |
467 | case 2: | |
468 | diag = kdb_getarea(w2, addr); | |
469 | if (!diag) | |
470 | *word = w2; | |
471 | break; | |
472 | case 4: | |
473 | diag = kdb_getarea(w4, addr); | |
474 | if (!diag) | |
475 | *word = w4; | |
476 | break; | |
477 | case 8: | |
478 | if (size <= sizeof(*word)) { | |
479 | diag = kdb_getarea(w8, addr); | |
480 | if (!diag) | |
481 | *word = w8; | |
482 | break; | |
483 | } | |
646558ff | 484 | /* fall through */ |
5d5314d6 JW |
485 | default: |
486 | diag = KDB_BADWIDTH; | |
487 | kdb_printf("kdb_getword: bad width %ld\n", (long) size); | |
488 | } | |
489 | return diag; | |
490 | } | |
491 | ||
492 | /* | |
493 | * kdb_putword - Write a binary value. Unlike kdb_putarea, this | |
494 | * treats data as numbers. | |
495 | * Inputs: | |
496 | * addr Address of the area to write to.. | |
497 | * word The value to set. | |
498 | * size Size of the area. | |
499 | * Returns: | |
500 | * 0 for success, < 0 for error. | |
501 | */ | |
502 | int kdb_putword(unsigned long addr, unsigned long word, size_t size) | |
503 | { | |
504 | int diag; | |
505 | __u8 w1; | |
506 | __u16 w2; | |
507 | __u32 w4; | |
508 | __u64 w8; | |
509 | switch (size) { | |
510 | case 1: | |
511 | w1 = word; | |
512 | diag = kdb_putarea(addr, w1); | |
513 | break; | |
514 | case 2: | |
515 | w2 = word; | |
516 | diag = kdb_putarea(addr, w2); | |
517 | break; | |
518 | case 4: | |
519 | w4 = word; | |
520 | diag = kdb_putarea(addr, w4); | |
521 | break; | |
522 | case 8: | |
523 | if (size <= sizeof(word)) { | |
524 | w8 = word; | |
525 | diag = kdb_putarea(addr, w8); | |
526 | break; | |
527 | } | |
646558ff | 528 | /* fall through */ |
5d5314d6 JW |
529 | default: |
530 | diag = KDB_BADWIDTH; | |
531 | kdb_printf("kdb_putword: bad width %ld\n", (long) size); | |
532 | } | |
533 | return diag; | |
534 | } | |
535 | ||
536 | /* | |
537 | * kdb_task_state_string - Convert a string containing any of the | |
538 | * letters DRSTCZEUIMA to a mask for the process state field and | |
539 | * return the value. If no argument is supplied, return the mask | |
540 | * that corresponds to environment variable PS, DRSTCZEU by | |
541 | * default. | |
542 | * Inputs: | |
543 | * s String to convert | |
544 | * Returns: | |
545 | * Mask for process state. | |
546 | * Notes: | |
547 | * The mask folds data from several sources into a single long value, so | |
25985edc | 548 | * be careful not to overlap the bits. TASK_* bits are in the LSB, |
5d5314d6 JW |
549 | * special cases like UNRUNNABLE are in the MSB. As of 2.6.10-rc1 there |
550 | * is no overlap between TASK_* and EXIT_* but that may not always be | |
551 | * true, so EXIT_* bits are shifted left 16 bits before being stored in | |
552 | * the mask. | |
553 | */ | |
554 | ||
555 | /* unrunnable is < 0 */ | |
556 | #define UNRUNNABLE (1UL << (8*sizeof(unsigned long) - 1)) | |
557 | #define RUNNING (1UL << (8*sizeof(unsigned long) - 2)) | |
558 | #define IDLE (1UL << (8*sizeof(unsigned long) - 3)) | |
559 | #define DAEMON (1UL << (8*sizeof(unsigned long) - 4)) | |
560 | ||
561 | unsigned long kdb_task_state_string(const char *s) | |
562 | { | |
563 | long res = 0; | |
564 | if (!s) { | |
565 | s = kdbgetenv("PS"); | |
566 | if (!s) | |
567 | s = "DRSTCZEU"; /* default value for ps */ | |
568 | } | |
569 | while (*s) { | |
570 | switch (*s) { | |
571 | case 'D': | |
572 | res |= TASK_UNINTERRUPTIBLE; | |
573 | break; | |
574 | case 'R': | |
575 | res |= RUNNING; | |
576 | break; | |
577 | case 'S': | |
578 | res |= TASK_INTERRUPTIBLE; | |
579 | break; | |
580 | case 'T': | |
581 | res |= TASK_STOPPED; | |
582 | break; | |
583 | case 'C': | |
584 | res |= TASK_TRACED; | |
585 | break; | |
586 | case 'Z': | |
587 | res |= EXIT_ZOMBIE << 16; | |
588 | break; | |
589 | case 'E': | |
590 | res |= EXIT_DEAD << 16; | |
591 | break; | |
592 | case 'U': | |
593 | res |= UNRUNNABLE; | |
594 | break; | |
595 | case 'I': | |
596 | res |= IDLE; | |
597 | break; | |
598 | case 'M': | |
599 | res |= DAEMON; | |
600 | break; | |
601 | case 'A': | |
602 | res = ~0UL; | |
603 | break; | |
604 | default: | |
605 | kdb_printf("%s: unknown flag '%c' ignored\n", | |
606 | __func__, *s); | |
607 | break; | |
608 | } | |
609 | ++s; | |
610 | } | |
611 | return res; | |
612 | } | |
613 | ||
614 | /* | |
615 | * kdb_task_state_char - Return the character that represents the task state. | |
616 | * Inputs: | |
617 | * p struct task for the process | |
618 | * Returns: | |
619 | * One character to represent the task state. | |
620 | */ | |
621 | char kdb_task_state_char (const struct task_struct *p) | |
622 | { | |
623 | int cpu; | |
624 | char state; | |
625 | unsigned long tmp; | |
626 | ||
627 | if (!p || probe_kernel_read(&tmp, (char *)p, sizeof(unsigned long))) | |
628 | return 'E'; | |
629 | ||
630 | cpu = kdb_process_cpu(p); | |
631 | state = (p->state == 0) ? 'R' : | |
632 | (p->state < 0) ? 'U' : | |
633 | (p->state & TASK_UNINTERRUPTIBLE) ? 'D' : | |
634 | (p->state & TASK_STOPPED) ? 'T' : | |
635 | (p->state & TASK_TRACED) ? 'C' : | |
636 | (p->exit_state & EXIT_ZOMBIE) ? 'Z' : | |
637 | (p->exit_state & EXIT_DEAD) ? 'E' : | |
638 | (p->state & TASK_INTERRUPTIBLE) ? 'S' : '?'; | |
7fc20c5c | 639 | if (is_idle_task(p)) { |
5d5314d6 JW |
640 | /* Idle task. Is it really idle, apart from the kdb |
641 | * interrupt? */ | |
642 | if (!kdb_task_has_cpu(p) || kgdb_info[cpu].irq_depth == 1) { | |
643 | if (cpu != kdb_initial_cpu) | |
644 | state = 'I'; /* idle task */ | |
645 | } | |
646 | } else if (!p->mm && state == 'S') { | |
647 | state = 'M'; /* sleeping system daemon */ | |
648 | } | |
649 | return state; | |
650 | } | |
651 | ||
652 | /* | |
653 | * kdb_task_state - Return true if a process has the desired state | |
654 | * given by the mask. | |
655 | * Inputs: | |
656 | * p struct task for the process | |
657 | * mask mask from kdb_task_state_string to select processes | |
658 | * Returns: | |
659 | * True if the process matches at least one criteria defined by the mask. | |
660 | */ | |
661 | unsigned long kdb_task_state(const struct task_struct *p, unsigned long mask) | |
662 | { | |
663 | char state[] = { kdb_task_state_char(p), '\0' }; | |
664 | return (mask & kdb_task_state_string(state)) != 0; | |
665 | } | |
666 | ||
667 | /* | |
668 | * kdb_print_nameval - Print a name and its value, converting the | |
669 | * value to a symbol lookup if possible. | |
670 | * Inputs: | |
671 | * name field name to print | |
672 | * val value of field | |
673 | */ | |
674 | void kdb_print_nameval(const char *name, unsigned long val) | |
675 | { | |
676 | kdb_symtab_t symtab; | |
677 | kdb_printf(" %-11.11s ", name); | |
678 | if (kdbnearsym(val, &symtab)) | |
679 | kdb_symbol_print(val, &symtab, | |
680 | KDB_SP_VALUE|KDB_SP_SYMSIZE|KDB_SP_NEWLINE); | |
681 | else | |
682 | kdb_printf("0x%lx\n", val); | |
683 | } | |
684 | ||
685 | /* Last ditch allocator for debugging, so we can still debug even when | |
686 | * the GFP_ATOMIC pool has been exhausted. The algorithms are tuned | |
687 | * for space usage, not for speed. One smallish memory pool, the free | |
688 | * chain is always in ascending address order to allow coalescing, | |
689 | * allocations are done in brute force best fit. | |
690 | */ | |
691 | ||
692 | struct debug_alloc_header { | |
693 | u32 next; /* offset of next header from start of pool */ | |
694 | u32 size; | |
695 | void *caller; | |
696 | }; | |
697 | ||
698 | /* The memory returned by this allocator must be aligned, which means | |
699 | * so must the header size. Do not assume that sizeof(struct | |
700 | * debug_alloc_header) is a multiple of the alignment, explicitly | |
701 | * calculate the overhead of this header, including the alignment. | |
702 | * The rest of this code must not use sizeof() on any header or | |
703 | * pointer to a header. | |
704 | */ | |
705 | #define dah_align 8 | |
706 | #define dah_overhead ALIGN(sizeof(struct debug_alloc_header), dah_align) | |
707 | ||
708 | static u64 debug_alloc_pool_aligned[256*1024/dah_align]; /* 256K pool */ | |
709 | static char *debug_alloc_pool = (char *)debug_alloc_pool_aligned; | |
710 | static u32 dah_first, dah_first_call = 1, dah_used, dah_used_max; | |
711 | ||
712 | /* Locking is awkward. The debug code is called from all contexts, | |
713 | * including non maskable interrupts. A normal spinlock is not safe | |
714 | * in NMI context. Try to get the debug allocator lock, if it cannot | |
715 | * be obtained after a second then give up. If the lock could not be | |
716 | * previously obtained on this cpu then only try once. | |
717 | * | |
718 | * sparse has no annotation for "this function _sometimes_ acquires a | |
719 | * lock", so fudge the acquire/release notation. | |
720 | */ | |
721 | static DEFINE_SPINLOCK(dap_lock); | |
722 | static int get_dap_lock(void) | |
723 | __acquires(dap_lock) | |
724 | { | |
725 | static int dap_locked = -1; | |
726 | int count; | |
727 | if (dap_locked == smp_processor_id()) | |
728 | count = 1; | |
729 | else | |
730 | count = 1000; | |
731 | while (1) { | |
732 | if (spin_trylock(&dap_lock)) { | |
733 | dap_locked = -1; | |
734 | return 1; | |
735 | } | |
736 | if (!count--) | |
737 | break; | |
738 | udelay(1000); | |
739 | } | |
740 | dap_locked = smp_processor_id(); | |
741 | __acquire(dap_lock); | |
742 | return 0; | |
743 | } | |
744 | ||
745 | void *debug_kmalloc(size_t size, gfp_t flags) | |
746 | { | |
747 | unsigned int rem, h_offset; | |
748 | struct debug_alloc_header *best, *bestprev, *prev, *h; | |
749 | void *p = NULL; | |
750 | if (!get_dap_lock()) { | |
751 | __release(dap_lock); /* we never actually got it */ | |
752 | return NULL; | |
753 | } | |
754 | h = (struct debug_alloc_header *)(debug_alloc_pool + dah_first); | |
755 | if (dah_first_call) { | |
756 | h->size = sizeof(debug_alloc_pool_aligned) - dah_overhead; | |
757 | dah_first_call = 0; | |
758 | } | |
759 | size = ALIGN(size, dah_align); | |
760 | prev = best = bestprev = NULL; | |
761 | while (1) { | |
762 | if (h->size >= size && (!best || h->size < best->size)) { | |
763 | best = h; | |
764 | bestprev = prev; | |
765 | if (h->size == size) | |
766 | break; | |
767 | } | |
768 | if (!h->next) | |
769 | break; | |
770 | prev = h; | |
771 | h = (struct debug_alloc_header *)(debug_alloc_pool + h->next); | |
772 | } | |
773 | if (!best) | |
774 | goto out; | |
775 | rem = best->size - size; | |
776 | /* The pool must always contain at least one header */ | |
777 | if (best->next == 0 && bestprev == NULL && rem < dah_overhead) | |
778 | goto out; | |
779 | if (rem >= dah_overhead) { | |
780 | best->size = size; | |
781 | h_offset = ((char *)best - debug_alloc_pool) + | |
782 | dah_overhead + best->size; | |
783 | h = (struct debug_alloc_header *)(debug_alloc_pool + h_offset); | |
784 | h->size = rem - dah_overhead; | |
785 | h->next = best->next; | |
786 | } else | |
787 | h_offset = best->next; | |
788 | best->caller = __builtin_return_address(0); | |
789 | dah_used += best->size; | |
790 | dah_used_max = max(dah_used, dah_used_max); | |
791 | if (bestprev) | |
792 | bestprev->next = h_offset; | |
793 | else | |
794 | dah_first = h_offset; | |
795 | p = (char *)best + dah_overhead; | |
796 | memset(p, POISON_INUSE, best->size - 1); | |
797 | *((char *)p + best->size - 1) = POISON_END; | |
798 | out: | |
799 | spin_unlock(&dap_lock); | |
800 | return p; | |
801 | } | |
802 | ||
803 | void debug_kfree(void *p) | |
804 | { | |
805 | struct debug_alloc_header *h; | |
806 | unsigned int h_offset; | |
807 | if (!p) | |
808 | return; | |
809 | if ((char *)p < debug_alloc_pool || | |
810 | (char *)p >= debug_alloc_pool + sizeof(debug_alloc_pool_aligned)) { | |
811 | kfree(p); | |
812 | return; | |
813 | } | |
814 | if (!get_dap_lock()) { | |
815 | __release(dap_lock); /* we never actually got it */ | |
816 | return; /* memory leak, cannot be helped */ | |
817 | } | |
818 | h = (struct debug_alloc_header *)((char *)p - dah_overhead); | |
819 | memset(p, POISON_FREE, h->size - 1); | |
820 | *((char *)p + h->size - 1) = POISON_END; | |
821 | h->caller = NULL; | |
822 | dah_used -= h->size; | |
823 | h_offset = (char *)h - debug_alloc_pool; | |
824 | if (h_offset < dah_first) { | |
825 | h->next = dah_first; | |
826 | dah_first = h_offset; | |
827 | } else { | |
828 | struct debug_alloc_header *prev; | |
829 | unsigned int prev_offset; | |
830 | prev = (struct debug_alloc_header *)(debug_alloc_pool + | |
831 | dah_first); | |
832 | while (1) { | |
833 | if (!prev->next || prev->next > h_offset) | |
834 | break; | |
835 | prev = (struct debug_alloc_header *) | |
836 | (debug_alloc_pool + prev->next); | |
837 | } | |
838 | prev_offset = (char *)prev - debug_alloc_pool; | |
839 | if (prev_offset + dah_overhead + prev->size == h_offset) { | |
840 | prev->size += dah_overhead + h->size; | |
841 | memset(h, POISON_FREE, dah_overhead - 1); | |
842 | *((char *)h + dah_overhead - 1) = POISON_END; | |
843 | h = prev; | |
844 | h_offset = prev_offset; | |
845 | } else { | |
846 | h->next = prev->next; | |
847 | prev->next = h_offset; | |
848 | } | |
849 | } | |
850 | if (h_offset + dah_overhead + h->size == h->next) { | |
851 | struct debug_alloc_header *next; | |
852 | next = (struct debug_alloc_header *) | |
853 | (debug_alloc_pool + h->next); | |
854 | h->size += dah_overhead + next->size; | |
855 | h->next = next->next; | |
856 | memset(next, POISON_FREE, dah_overhead - 1); | |
857 | *((char *)next + dah_overhead - 1) = POISON_END; | |
858 | } | |
859 | spin_unlock(&dap_lock); | |
860 | } | |
861 | ||
862 | void debug_kusage(void) | |
863 | { | |
864 | struct debug_alloc_header *h_free, *h_used; | |
865 | #ifdef CONFIG_IA64 | |
866 | /* FIXME: using dah for ia64 unwind always results in a memory leak. | |
867 | * Fix that memory leak first, then set debug_kusage_one_time = 1 for | |
868 | * all architectures. | |
869 | */ | |
870 | static int debug_kusage_one_time; | |
871 | #else | |
872 | static int debug_kusage_one_time = 1; | |
873 | #endif | |
874 | if (!get_dap_lock()) { | |
875 | __release(dap_lock); /* we never actually got it */ | |
876 | return; | |
877 | } | |
878 | h_free = (struct debug_alloc_header *)(debug_alloc_pool + dah_first); | |
879 | if (dah_first == 0 && | |
880 | (h_free->size == sizeof(debug_alloc_pool_aligned) - dah_overhead || | |
881 | dah_first_call)) | |
882 | goto out; | |
883 | if (!debug_kusage_one_time) | |
884 | goto out; | |
885 | debug_kusage_one_time = 0; | |
886 | kdb_printf("%s: debug_kmalloc memory leak dah_first %d\n", | |
887 | __func__, dah_first); | |
888 | if (dah_first) { | |
889 | h_used = (struct debug_alloc_header *)debug_alloc_pool; | |
568fb6f4 | 890 | kdb_printf("%s: h_used %px size %d\n", __func__, h_used, |
5d5314d6 JW |
891 | h_used->size); |
892 | } | |
893 | do { | |
894 | h_used = (struct debug_alloc_header *) | |
895 | ((char *)h_free + dah_overhead + h_free->size); | |
568fb6f4 | 896 | kdb_printf("%s: h_used %px size %d caller %px\n", |
5d5314d6 JW |
897 | __func__, h_used, h_used->size, h_used->caller); |
898 | h_free = (struct debug_alloc_header *) | |
899 | (debug_alloc_pool + h_free->next); | |
900 | } while (h_free->next); | |
901 | h_used = (struct debug_alloc_header *) | |
902 | ((char *)h_free + dah_overhead + h_free->size); | |
903 | if ((char *)h_used - debug_alloc_pool != | |
904 | sizeof(debug_alloc_pool_aligned)) | |
568fb6f4 | 905 | kdb_printf("%s: h_used %px size %d caller %px\n", |
5d5314d6 JW |
906 | __func__, h_used, h_used->size, h_used->caller); |
907 | out: | |
908 | spin_unlock(&dap_lock); | |
909 | } | |
910 | ||
911 | /* Maintain a small stack of kdb_flags to allow recursion without disturbing | |
912 | * the global kdb state. | |
913 | */ | |
914 | ||
915 | static int kdb_flags_stack[4], kdb_flags_index; | |
916 | ||
917 | void kdb_save_flags(void) | |
918 | { | |
919 | BUG_ON(kdb_flags_index >= ARRAY_SIZE(kdb_flags_stack)); | |
920 | kdb_flags_stack[kdb_flags_index++] = kdb_flags; | |
921 | } | |
922 | ||
923 | void kdb_restore_flags(void) | |
924 | { | |
925 | BUG_ON(kdb_flags_index <= 0); | |
926 | kdb_flags = kdb_flags_stack[--kdb_flags_index]; | |
927 | } |