From 0c75a3ed633419d75d823d5dcb05d42924c6ae61 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 16 Feb 2009 11:21:52 -0500 Subject: [PATCH 01/15] ftrace: state that all functions are enabled in set_ftrace_filter Impact: clean up, make set_ftrace_filter less confusing The set_ftrace_filter shows only the functions that will be traced. But when it is empty, it will trace all functions. This can be a bit confusing. This patch makes set_ftrace_filter show: #### all functions enabled #### When all functions will be traced, and we do not filter only a select few. Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 1796e018fbff..369fb78bd4ab 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -773,6 +773,7 @@ enum { FTRACE_ITER_CONT = (1 << 1), FTRACE_ITER_NOTRACE = (1 << 2), FTRACE_ITER_FAILURES = (1 << 3), + FTRACE_ITER_PRINTALL = (1 << 4), }; #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ @@ -794,6 +795,9 @@ t_next(struct seq_file *m, void *v, loff_t *pos) (*pos)++; + if (iter->flags & FTRACE_ITER_PRINTALL) + return NULL; + /* should not be called from interrupt context */ spin_lock(&ftrace_lock); retry: @@ -834,6 +838,19 @@ static void *t_start(struct seq_file *m, loff_t *pos) struct ftrace_iterator *iter = m->private; void *p = NULL; + /* + * For set_ftrace_filter reading, if we have the filter + * off, we can short cut and just print out that all + * functions are enabled. + */ + if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) { + if (*pos > 0) + return NULL; + iter->flags |= FTRACE_ITER_PRINTALL; + (*pos)++; + return iter; + } + if (*pos > 0) { if (iter->idx < 0) return p; @@ -852,9 +869,15 @@ static void t_stop(struct seq_file *m, void *p) static int t_show(struct seq_file *m, void *v) { + struct ftrace_iterator *iter = m->private; struct dyn_ftrace *rec = v; char str[KSYM_SYMBOL_LEN]; + if (iter->flags & FTRACE_ITER_PRINTALL) { + seq_printf(m, "#### all functions enabled ####\n"); + return 0; + } + if (!rec) return 0; From 265c831cb03d533cbe159af45798ac9fef534260 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 13 Feb 2009 12:43:56 -0500 Subject: [PATCH 02/15] ftrace: add do_for_each_ftrace_rec and while_for_each_ftrace_rec Impact: clean up To iterate over all the functions that dynamic trace knows about it requires two for loops. One to iterate over the pages and the other to iterate over the records within the page. There are several duplications of these loops in ftrace.c. This patch creates the macros do_for_each_ftrace_rec and while_for_each_ftrace_rec to handle this logic, and removes the duplicate code. While making this change, I also discovered and fixed a small bug that one of the iterations should exit the loop after it found the record it was searching for. This used a break when it should have used a goto, since there were two loops it needed to break out from. No real harm was done by this bug since it would only continue to search the other records, and the code was in a slow path anyway. Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 202 ++++++++++++++++++++---------------------- 1 file changed, 98 insertions(+), 104 deletions(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 369fb78bd4ab..fed1ebc3a133 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -297,6 +297,19 @@ static struct ftrace_page *ftrace_pages; static struct dyn_ftrace *ftrace_free_records; +/* + * This is a double for. Do not use 'break' to break out of the loop, + * you must use a goto. + */ +#define do_for_each_ftrace_rec(pg, rec) \ + for (pg = ftrace_pages_start; pg; pg = pg->next) { \ + int _____i; \ + for (_____i = 0; _____i < pg->index; _____i++) { \ + rec = &pg->records[_____i]; + +#define while_for_each_ftrace_rec() \ + } \ + } #ifdef CONFIG_KPROBES @@ -341,7 +354,6 @@ void ftrace_release(void *start, unsigned long size) struct ftrace_page *pg; unsigned long s = (unsigned long)start; unsigned long e = s + size; - int i; if (ftrace_disabled || !start) return; @@ -349,14 +361,11 @@ void ftrace_release(void *start, unsigned long size) /* should not be called from interrupt context */ spin_lock(&ftrace_lock); - for (pg = ftrace_pages_start; pg; pg = pg->next) { - for (i = 0; i < pg->index; i++) { - rec = &pg->records[i]; + do_for_each_ftrace_rec(pg, rec) { + if ((rec->ip >= s) && (rec->ip < e)) + ftrace_free_rec(rec); + } while_for_each_ftrace_rec(); - if ((rec->ip >= s) && (rec->ip < e)) - ftrace_free_rec(rec); - } - } spin_unlock(&ftrace_lock); } @@ -523,41 +532,37 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable) static void ftrace_replace_code(int enable) { - int i, failed; + int failed; struct dyn_ftrace *rec; struct ftrace_page *pg; - for (pg = ftrace_pages_start; pg; pg = pg->next) { - for (i = 0; i < pg->index; i++) { - rec = &pg->records[i]; + do_for_each_ftrace_rec(pg, rec) { + /* + * Skip over free records and records that have + * failed. + */ + if (rec->flags & FTRACE_FL_FREE || + rec->flags & FTRACE_FL_FAILED) + continue; - /* - * Skip over free records and records that have - * failed. - */ - if (rec->flags & FTRACE_FL_FREE || - rec->flags & FTRACE_FL_FAILED) - continue; - - /* ignore updates to this record's mcount site */ - if (get_kprobe((void *)rec->ip)) { - freeze_record(rec); - continue; - } else { - unfreeze_record(rec); - } - - failed = __ftrace_replace_code(rec, enable); - if (failed && (rec->flags & FTRACE_FL_CONVERTED)) { - rec->flags |= FTRACE_FL_FAILED; - if ((system_state == SYSTEM_BOOTING) || - !core_kernel_text(rec->ip)) { - ftrace_free_rec(rec); - } else - ftrace_bug(failed, rec->ip); - } + /* ignore updates to this record's mcount site */ + if (get_kprobe((void *)rec->ip)) { + freeze_record(rec); + continue; + } else { + unfreeze_record(rec); } - } + + failed = __ftrace_replace_code(rec, enable); + if (failed && (rec->flags & FTRACE_FL_CONVERTED)) { + rec->flags |= FTRACE_FL_FAILED; + if ((system_state == SYSTEM_BOOTING) || + !core_kernel_text(rec->ip)) { + ftrace_free_rec(rec); + } else + ftrace_bug(failed, rec->ip); + } + } while_for_each_ftrace_rec(); } static int @@ -956,22 +961,17 @@ static void ftrace_filter_reset(int enable) struct ftrace_page *pg; struct dyn_ftrace *rec; unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; - unsigned i; /* should not be called from interrupt context */ spin_lock(&ftrace_lock); if (enable) ftrace_filtered = 0; - pg = ftrace_pages_start; - while (pg) { - for (i = 0; i < pg->index; i++) { - rec = &pg->records[i]; - if (rec->flags & FTRACE_FL_FAILED) - continue; - rec->flags &= ~type; - } - pg = pg->next; - } + do_for_each_ftrace_rec(pg, rec) { + if (rec->flags & FTRACE_FL_FAILED) + continue; + rec->flags &= ~type; + } while_for_each_ftrace_rec(); + spin_unlock(&ftrace_lock); } @@ -1094,44 +1094,39 @@ ftrace_match(unsigned char *buff, int len, int enable) spin_lock(&ftrace_lock); if (enable) ftrace_filtered = 1; - pg = ftrace_pages_start; - while (pg) { - for (i = 0; i < pg->index; i++) { - int matched = 0; - char *ptr; + do_for_each_ftrace_rec(pg, rec) { + int matched = 0; + char *ptr; - rec = &pg->records[i]; - if (rec->flags & FTRACE_FL_FAILED) - continue; - kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); - switch (type) { - case MATCH_FULL: - if (strcmp(str, buff) == 0) - matched = 1; - break; - case MATCH_FRONT_ONLY: - if (memcmp(str, buff, match) == 0) - matched = 1; - break; - case MATCH_MIDDLE_ONLY: - if (strstr(str, search)) - matched = 1; - break; - case MATCH_END_ONLY: - ptr = strstr(str, search); - if (ptr && (ptr[search_len] == 0)) - matched = 1; - break; - } - if (matched) { - if (not) - rec->flags &= ~flag; - else - rec->flags |= flag; - } + if (rec->flags & FTRACE_FL_FAILED) + continue; + kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); + switch (type) { + case MATCH_FULL: + if (strcmp(str, buff) == 0) + matched = 1; + break; + case MATCH_FRONT_ONLY: + if (memcmp(str, buff, match) == 0) + matched = 1; + break; + case MATCH_MIDDLE_ONLY: + if (strstr(str, search)) + matched = 1; + break; + case MATCH_END_ONLY: + ptr = strstr(str, search); + if (ptr && (ptr[search_len] == 0)) + matched = 1; + break; } - pg = pg->next; - } + if (matched) { + if (not) + rec->flags &= ~flag; + else + rec->flags |= flag; + } + } while_for_each_ftrace_rec(); spin_unlock(&ftrace_lock); } @@ -1452,7 +1447,7 @@ ftrace_set_func(unsigned long *array, int idx, char *buffer) struct dyn_ftrace *rec; struct ftrace_page *pg; int found = 0; - int i, j; + int j; if (ftrace_disabled) return -ENODEV; @@ -1460,27 +1455,26 @@ ftrace_set_func(unsigned long *array, int idx, char *buffer) /* should not be called from interrupt context */ spin_lock(&ftrace_lock); - for (pg = ftrace_pages_start; pg; pg = pg->next) { - for (i = 0; i < pg->index; i++) { - rec = &pg->records[i]; + do_for_each_ftrace_rec(pg, rec) { - if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE)) - continue; + if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE)) + continue; - kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); - if (strcmp(str, buffer) == 0) { - found = 1; - for (j = 0; j < idx; j++) - if (array[j] == rec->ip) { - found = 0; - break; - } - if (found) - array[idx] = rec->ip; - break; - } + kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); + if (strcmp(str, buffer) == 0) { + /* Return 1 if we add it to the array */ + found = 1; + for (j = 0; j < idx; j++) + if (array[j] == rec->ip) { + found = 0; + break; + } + if (found) + array[idx] = rec->ip; + goto out; } - } + } while_for_each_ftrace_rec(); + out: spin_unlock(&ftrace_lock); return found ? 0 : -EINVAL; From 7f24b31b01a271b62346d9df084b029e48612163 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 13 Feb 2009 14:37:33 -0500 Subject: [PATCH 03/15] ftrace: rename ftrace_match to ftrace_match_records Impact: clean up ftrace_match is too generic of a name. What it really does is search all records and matches the records with the given string, and either sets or unsets the functions to be traced depending on if the parameter 'enable' is set or not. This allows us to make another function called ftrace_match that can be used to test a single record. Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index fed1ebc3a133..f397d7adb62e 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1054,7 +1054,7 @@ enum { }; static void -ftrace_match(unsigned char *buff, int len, int enable) +ftrace_match_records(unsigned char *buff, int len, int enable) { char str[KSYM_SYMBOL_LEN]; char *search = NULL; @@ -1197,7 +1197,7 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, if (isspace(ch)) { iter->filtered++; iter->buffer[iter->buffer_idx] = 0; - ftrace_match(iter->buffer, iter->buffer_idx, enable); + ftrace_match_records(iter->buffer, iter->buffer_idx, enable); iter->buffer_idx = 0; } else iter->flags |= FTRACE_ITER_CONT; @@ -1236,7 +1236,7 @@ ftrace_set_regex(unsigned char *buf, int len, int reset, int enable) if (reset) ftrace_filter_reset(enable); if (buf) - ftrace_match(buf, len, enable); + ftrace_match_records(buf, len, enable); mutex_unlock(&ftrace_regex_lock); } @@ -1286,7 +1286,7 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable) if (iter->buffer_idx) { iter->filtered++; iter->buffer[iter->buffer_idx] = 0; - ftrace_match(iter->buffer, iter->buffer_idx, enable); + ftrace_match_records(iter->buffer, iter->buffer_idx, enable); } mutex_lock(&ftrace_sysctl_lock); From 9f4801e30ad291e27284e873696da1ead92d68fa Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 13 Feb 2009 15:56:43 -0500 Subject: [PATCH 04/15] ftrace: break up ftrace_match_records into smaller components Impact: clean up ftrace_match_records does a lot of things that other features can use. This patch breaks up ftrace_match_records and pulls out ftrace_setup_glob and ftrace_match_record. ftrace_setup_glob prepares a simple glob expression for use with ftrace_match_record. ftrace_match_record compares a single record with a glob type. Breaking this up will allow for more features to run on individual records. Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 115 +++++++++++++++++++++++++++--------------- 1 file changed, 75 insertions(+), 40 deletions(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index f397d7adb62e..fcec31323a10 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1053,79 +1053,114 @@ enum { MATCH_END_ONLY, }; -static void -ftrace_match_records(unsigned char *buff, int len, int enable) +/* + * (static function - no need for kernel doc) + * + * Pass in a buffer containing a glob and this function will + * set search to point to the search part of the buffer and + * return the type of search it is (see enum above). + * This does modify buff. + * + * Returns enum type. + * search returns the pointer to use for comparison. + * not returns 1 if buff started with a '!' + * 0 otherwise. + */ +static int +ftrace_setup_glob(unsigned char *buff, int len, char **search, int *not) { - char str[KSYM_SYMBOL_LEN]; - char *search = NULL; - struct ftrace_page *pg; - struct dyn_ftrace *rec; int type = MATCH_FULL; - unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; - unsigned i, match = 0, search_len = 0; - int not = 0; + int i; if (buff[0] == '!') { - not = 1; + *not = 1; buff++; len--; - } + } else + *not = 0; + + *search = buff; for (i = 0; i < len; i++) { if (buff[i] == '*') { if (!i) { - search = buff + i + 1; + *search = buff + 1; type = MATCH_END_ONLY; - search_len = len - (i + 1); } else { - if (type == MATCH_END_ONLY) { + if (type == MATCH_END_ONLY) type = MATCH_MIDDLE_ONLY; - } else { - match = i; + else type = MATCH_FRONT_ONLY; - } buff[i] = 0; break; } } } + return type; +} + +static int +ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type) +{ + char str[KSYM_SYMBOL_LEN]; + int matched = 0; + char *ptr; + + kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); + switch (type) { + case MATCH_FULL: + if (strcmp(str, regex) == 0) + matched = 1; + break; + case MATCH_FRONT_ONLY: + if (strncmp(str, regex, len) == 0) + matched = 1; + break; + case MATCH_MIDDLE_ONLY: + if (strstr(str, regex)) + matched = 1; + break; + case MATCH_END_ONLY: + ptr = strstr(str, regex); + if (ptr && (ptr[len] == 0)) + matched = 1; + break; + } + + return matched; +} + +static void ftrace_match_records(char *buff, int len, int enable) +{ + char *search; + struct ftrace_page *pg; + struct dyn_ftrace *rec; + int type; + unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; + unsigned search_len; + int not; + + type = ftrace_setup_glob(buff, len, &search, ¬); + + search_len = strlen(search); + /* should not be called from interrupt context */ spin_lock(&ftrace_lock); if (enable) ftrace_filtered = 1; do_for_each_ftrace_rec(pg, rec) { - int matched = 0; - char *ptr; if (rec->flags & FTRACE_FL_FAILED) continue; - kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); - switch (type) { - case MATCH_FULL: - if (strcmp(str, buff) == 0) - matched = 1; - break; - case MATCH_FRONT_ONLY: - if (memcmp(str, buff, match) == 0) - matched = 1; - break; - case MATCH_MIDDLE_ONLY: - if (strstr(str, search)) - matched = 1; - break; - case MATCH_END_ONLY: - ptr = strstr(str, search); - if (ptr && (ptr[search_len] == 0)) - matched = 1; - break; - } - if (matched) { + + if (ftrace_match_record(rec, search, search_len, type)) { if (not) rec->flags &= ~flag; else rec->flags |= flag; } + } while_for_each_ftrace_rec(); spin_unlock(&ftrace_lock); } From 64e7c440618998fd69eee6ab490b042d12248021 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 13 Feb 2009 17:08:48 -0500 Subject: [PATCH 05/15] ftrace: add module command function filter selection This patch adds a "command" syntax to the function filtering files: /debugfs/tracing/set_ftrace_filter /debugfs/tracing/set_ftrace_notrace Of the format: :: The command is optional, and dependent on the command, so are the parameters. echo do_fork > set_ftrace_filter Will only trace 'do_fork'. echo 'sched_*' > set_ftrace_filter Will only trace functions starting with the letters 'sched_'. echo '*:mod:ext3' > set_ftrace_filter Will trace only the ext3 module functions. echo '*write*:mod:ext3' > set_ftrace_notrace Will prevent the ext3 functions with the letters 'write' in the name from being traced. echo '!*_allocate:mod:ext3' > set_ftrace_filter Will remove the functions in ext3 that end with the letters '_allocate' from the ftrace filter. Although this patch implements the 'command' format, only the 'mod' command is supported. More commands to follow. Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 115 +++++++++++++++++++++++++++++++++++++++--- 1 file changed, 109 insertions(+), 6 deletions(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index fcec31323a10..9e60ae423af9 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1067,7 +1067,7 @@ enum { * 0 otherwise. */ static int -ftrace_setup_glob(unsigned char *buff, int len, char **search, int *not) +ftrace_setup_glob(char *buff, int len, char **search, int *not) { int type = MATCH_FULL; int i; @@ -1100,14 +1100,11 @@ ftrace_setup_glob(unsigned char *buff, int len, char **search, int *not) return type; } -static int -ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type) +static int ftrace_match(char *str, char *regex, int len, int type) { - char str[KSYM_SYMBOL_LEN]; int matched = 0; char *ptr; - kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); switch (type) { case MATCH_FULL: if (strcmp(str, regex) == 0) @@ -1131,6 +1128,15 @@ ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type) return matched; } +static int +ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type) +{ + char str[KSYM_SYMBOL_LEN]; + + kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); + return ftrace_match(str, regex, len, type); +} + static void ftrace_match_records(char *buff, int len, int enable) { char *search; @@ -1165,6 +1171,100 @@ static void ftrace_match_records(char *buff, int len, int enable) spin_unlock(&ftrace_lock); } +static int +ftrace_match_module_record(struct dyn_ftrace *rec, char *mod, + char *regex, int len, int type) +{ + char str[KSYM_SYMBOL_LEN]; + char *modname; + + kallsyms_lookup(rec->ip, NULL, NULL, &modname, str); + + if (!modname || strcmp(modname, mod)) + return 0; + + /* blank search means to match all funcs in the mod */ + if (len) + return ftrace_match(str, regex, len, type); + else + return 1; +} + +static void ftrace_match_module_records(char *buff, char *mod, int enable) +{ + char *search = buff; + struct ftrace_page *pg; + struct dyn_ftrace *rec; + int type = MATCH_FULL; + unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; + unsigned search_len = 0; + int not = 0; + + /* blank or '*' mean the same */ + if (strcmp(buff, "*") == 0) + buff[0] = 0; + + /* handle the case of 'dont filter this module' */ + if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) { + buff[0] = 0; + not = 1; + } + + if (strlen(buff)) { + type = ftrace_setup_glob(buff, strlen(buff), &search, ¬); + search_len = strlen(search); + } + + /* should not be called from interrupt context */ + spin_lock(&ftrace_lock); + if (enable) + ftrace_filtered = 1; + + do_for_each_ftrace_rec(pg, rec) { + + if (rec->flags & FTRACE_FL_FAILED) + continue; + + if (ftrace_match_module_record(rec, mod, + search, search_len, type)) { + if (not) + rec->flags &= ~flag; + else + rec->flags |= flag; + } + + } while_for_each_ftrace_rec(); + spin_unlock(&ftrace_lock); +} + +static int ftrace_process_regex(char *buff, int len, int enable) +{ + char *func, *mod, *command, *next = buff; + + func = strsep(&next, ":"); + + if (!next) { + ftrace_match_records(func, len, enable); + return 0; + } + + /* command fonud */ + + command = strsep(&next, ":"); + + if (strcmp(command, "mod") == 0) { + /* only match modules */ + if (!next) + return -EINVAL; + + mod = strsep(&next, ":"); + ftrace_match_module_records(func, mod, enable); + return 0; + } + + return -EINVAL; +} + static ssize_t ftrace_regex_write(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos, int enable) @@ -1232,7 +1332,10 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, if (isspace(ch)) { iter->filtered++; iter->buffer[iter->buffer_idx] = 0; - ftrace_match_records(iter->buffer, iter->buffer_idx, enable); + ret = ftrace_process_regex(iter->buffer, + iter->buffer_idx, enable); + if (ret) + goto out; iter->buffer_idx = 0; } else iter->flags |= FTRACE_ITER_CONT; From e68746a271eb3393a2183840be9e903caddf765b Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 13 Feb 2009 20:53:42 -0500 Subject: [PATCH 06/15] ftrace: enable filtering only when a function is filtered on Impact: fix to prevent empty set_ftrace_filter and no ftrace output The function filter is used to only trace a given set of functions. The filter is enabled when a function name is echoed into the set_ftrace_filter file. But if the name has a typo and the function is not found, the filter is enabled, but no function is listed. This makes a confusing situation where set_ftrace_filter is empty but no functions ever get enabled for tracing. For example: # cat /debug/tracing/set_ftrace_filter #### all functions enabled #### # echo bad_name > set_ftrace_filter # cat /debug/tracing/set_ftrace_filter # echo function > current_tracer # cat trace # tracer: nop # # TASK-PID CPU# TIMESTAMP FUNCTION # | | | | | This patch changes that to only enable filtering if a function is set to be filtered on. Now, the filter is not enabled if a bad name is echoed into set_ftrace_filter. Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 9e60ae423af9..340f88b68d9e 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1153,8 +1153,6 @@ static void ftrace_match_records(char *buff, int len, int enable) /* should not be called from interrupt context */ spin_lock(&ftrace_lock); - if (enable) - ftrace_filtered = 1; do_for_each_ftrace_rec(pg, rec) { if (rec->flags & FTRACE_FL_FAILED) @@ -1166,7 +1164,12 @@ static void ftrace_match_records(char *buff, int len, int enable) else rec->flags |= flag; } - + /* + * Only enable filtering if we have a function that + * is filtered on. + */ + if (enable && (rec->flags & FTRACE_FL_FILTER)) + ftrace_filtered = 1; } while_for_each_ftrace_rec(); spin_unlock(&ftrace_lock); } @@ -1217,9 +1220,6 @@ static void ftrace_match_module_records(char *buff, char *mod, int enable) /* should not be called from interrupt context */ spin_lock(&ftrace_lock); - if (enable) - ftrace_filtered = 1; - do_for_each_ftrace_rec(pg, rec) { if (rec->flags & FTRACE_FL_FAILED) @@ -1232,6 +1232,8 @@ static void ftrace_match_module_records(char *buff, char *mod, int enable) else rec->flags |= flag; } + if (enable && (rec->flags & FTRACE_FL_FILTER)) + ftrace_filtered = 1; } while_for_each_ftrace_rec(); spin_unlock(&ftrace_lock); From f6180773d90595650e11de0118bb112018290915 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Sat, 14 Feb 2009 00:40:25 -0500 Subject: [PATCH 07/15] ftrace: add command interface for function selection Allow for other tracers to add their own commands for function selection. This interface gives a trace the ability to name a command for function selection. Right now it is pretty limited in what it offers, but this is a building step for more features. The :mod: command is converted to this interface and also serves as a template for other implementations. Signed-off-by: Steven Rostedt --- include/linux/ftrace.h | 16 +++++++ kernel/trace/ftrace.c | 106 ++++++++++++++++++++++++++++++++++++----- 2 files changed, 111 insertions(+), 11 deletions(-) diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 106b7909d500..f0a0ecc63b5c 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -95,6 +95,13 @@ stack_trace_sysctl(struct ctl_table *table, int write, loff_t *ppos); #endif +struct ftrace_func_command { + struct list_head list; + char *name; + int (*func)(char *func, char *cmd, + char *params, int enable); +}; + #ifdef CONFIG_DYNAMIC_FTRACE /* asm/ftrace.h must be defined for archs supporting dynamic ftrace */ #include @@ -119,6 +126,9 @@ struct dyn_ftrace { int ftrace_force_update(void); void ftrace_set_filter(unsigned char *buf, int len, int reset); +int register_ftrace_command(struct ftrace_func_command *cmd); +int unregister_ftrace_command(struct ftrace_func_command *cmd); + /* defined in arch */ extern int ftrace_ip_converted(unsigned long ip); extern int ftrace_dyn_arch_init(void *data); @@ -202,6 +212,12 @@ extern void ftrace_enable_daemon(void); # define ftrace_disable_daemon() do { } while (0) # define ftrace_enable_daemon() do { } while (0) static inline void ftrace_release(void *start, unsigned long size) { } +static inline int register_ftrace_command(struct ftrace_func_command *cmd) +{ +} +static inline int unregister_ftrace_command(char *cmd_name) +{ +} #endif /* CONFIG_DYNAMIC_FTRACE */ /* totally disable ftrace - can not re-enable after this */ diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 340f88b68d9e..45a44c402566 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1239,9 +1239,93 @@ static void ftrace_match_module_records(char *buff, char *mod, int enable) spin_unlock(&ftrace_lock); } +/* + * We register the module command as a template to show others how + * to register the a command as well. + */ + +static int +ftrace_mod_callback(char *func, char *cmd, char *param, int enable) +{ + char *mod; + + /* + * cmd == 'mod' because we only registered this func + * for the 'mod' ftrace_func_command. + * But if you register one func with multiple commands, + * you can tell which command was used by the cmd + * parameter. + */ + + /* we must have a module name */ + if (!param) + return -EINVAL; + + mod = strsep(¶m, ":"); + if (!strlen(mod)) + return -EINVAL; + + ftrace_match_module_records(func, mod, enable); + return 0; +} + +static struct ftrace_func_command ftrace_mod_cmd = { + .name = "mod", + .func = ftrace_mod_callback, +}; + +static int __init ftrace_mod_cmd_init(void) +{ + return register_ftrace_command(&ftrace_mod_cmd); +} +device_initcall(ftrace_mod_cmd_init); + +static LIST_HEAD(ftrace_commands); +static DEFINE_MUTEX(ftrace_cmd_mutex); + +int register_ftrace_command(struct ftrace_func_command *cmd) +{ + struct ftrace_func_command *p; + int ret = 0; + + mutex_lock(&ftrace_cmd_mutex); + list_for_each_entry(p, &ftrace_commands, list) { + if (strcmp(cmd->name, p->name) == 0) { + ret = -EBUSY; + goto out_unlock; + } + } + list_add(&cmd->list, &ftrace_commands); + out_unlock: + mutex_unlock(&ftrace_cmd_mutex); + + return ret; +} + +int unregister_ftrace_command(struct ftrace_func_command *cmd) +{ + struct ftrace_func_command *p, *n; + int ret = -ENODEV; + + mutex_lock(&ftrace_cmd_mutex); + list_for_each_entry_safe(p, n, &ftrace_commands, list) { + if (strcmp(cmd->name, p->name) == 0) { + ret = 0; + list_del_init(&p->list); + goto out_unlock; + } + } + out_unlock: + mutex_unlock(&ftrace_cmd_mutex); + + return ret; +} + static int ftrace_process_regex(char *buff, int len, int enable) { - char *func, *mod, *command, *next = buff; + struct ftrace_func_command *p; + char *func, *command, *next = buff; + int ret = -EINVAL; func = strsep(&next, ":"); @@ -1250,21 +1334,21 @@ static int ftrace_process_regex(char *buff, int len, int enable) return 0; } - /* command fonud */ + /* command found */ command = strsep(&next, ":"); - if (strcmp(command, "mod") == 0) { - /* only match modules */ - if (!next) - return -EINVAL; - - mod = strsep(&next, ":"); - ftrace_match_module_records(func, mod, enable); - return 0; + mutex_lock(&ftrace_cmd_mutex); + list_for_each_entry(p, &ftrace_commands, list) { + if (strcmp(p->name, command) == 0) { + ret = p->func(func, command, next, enable); + goto out_unlock; + } } + out_unlock: + mutex_unlock(&ftrace_cmd_mutex); - return -EINVAL; + return ret; } static ssize_t From 52baf11922db7377b580dd5448a07f71c6a35611 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Sat, 14 Feb 2009 01:15:39 -0500 Subject: [PATCH 08/15] ftrace: convert ftrace_lock from a spinlock to mutex Impact: clean up The older versions of ftrace required doing the ftrace list search under atomic context. Now all the calls are in non-atomic context. There is no reason to keep the ftrace_lock as a spinlock. This patch converts it to a mutex. Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 51 ++++++++++++++++--------------------------- 1 file changed, 19 insertions(+), 32 deletions(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 45a44c402566..4771732037ee 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -61,7 +61,7 @@ int function_trace_stop; */ static int ftrace_disabled __read_mostly; -static DEFINE_SPINLOCK(ftrace_lock); +static DEFINE_MUTEX(ftrace_lock); static DEFINE_MUTEX(ftrace_sysctl_lock); static DEFINE_MUTEX(ftrace_start_lock); @@ -134,8 +134,7 @@ static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip) static int __register_ftrace_function(struct ftrace_ops *ops) { - /* should not be called from interrupt context */ - spin_lock(&ftrace_lock); + mutex_lock(&ftrace_lock); ops->next = ftrace_list; /* @@ -172,7 +171,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops) #endif } - spin_unlock(&ftrace_lock); + mutex_unlock(&ftrace_lock); return 0; } @@ -182,8 +181,7 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) struct ftrace_ops **p; int ret = 0; - /* should not be called from interrupt context */ - spin_lock(&ftrace_lock); + mutex_lock(&ftrace_lock); /* * If we are removing the last function, then simply point @@ -224,7 +222,7 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) } out: - spin_unlock(&ftrace_lock); + mutex_unlock(&ftrace_lock); return ret; } @@ -233,8 +231,7 @@ static void ftrace_update_pid_func(void) { ftrace_func_t func; - /* should not be called from interrupt context */ - spin_lock(&ftrace_lock); + mutex_lock(&ftrace_lock); if (ftrace_trace_function == ftrace_stub) goto out; @@ -256,7 +253,7 @@ static void ftrace_update_pid_func(void) #endif out: - spin_unlock(&ftrace_lock); + mutex_unlock(&ftrace_lock); } #ifdef CONFIG_DYNAMIC_FTRACE @@ -358,15 +355,12 @@ void ftrace_release(void *start, unsigned long size) if (ftrace_disabled || !start) return; - /* should not be called from interrupt context */ - spin_lock(&ftrace_lock); - + mutex_lock(&ftrace_lock); do_for_each_ftrace_rec(pg, rec) { if ((rec->ip >= s) && (rec->ip < e)) ftrace_free_rec(rec); } while_for_each_ftrace_rec(); - - spin_unlock(&ftrace_lock); + mutex_unlock(&ftrace_lock); } static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) @@ -803,8 +797,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos) if (iter->flags & FTRACE_ITER_PRINTALL) return NULL; - /* should not be called from interrupt context */ - spin_lock(&ftrace_lock); + mutex_lock(&ftrace_lock); retry: if (iter->idx >= iter->pg->index) { if (iter->pg->next) { @@ -833,7 +826,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos) goto retry; } } - spin_unlock(&ftrace_lock); + mutex_unlock(&ftrace_lock); return rec; } @@ -962,8 +955,7 @@ static void ftrace_filter_reset(int enable) struct dyn_ftrace *rec; unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; - /* should not be called from interrupt context */ - spin_lock(&ftrace_lock); + mutex_lock(&ftrace_lock); if (enable) ftrace_filtered = 0; do_for_each_ftrace_rec(pg, rec) { @@ -971,8 +963,7 @@ static void ftrace_filter_reset(int enable) continue; rec->flags &= ~type; } while_for_each_ftrace_rec(); - - spin_unlock(&ftrace_lock); + mutex_unlock(&ftrace_lock); } static int @@ -1151,8 +1142,7 @@ static void ftrace_match_records(char *buff, int len, int enable) search_len = strlen(search); - /* should not be called from interrupt context */ - spin_lock(&ftrace_lock); + mutex_lock(&ftrace_lock); do_for_each_ftrace_rec(pg, rec) { if (rec->flags & FTRACE_FL_FAILED) @@ -1171,7 +1161,7 @@ static void ftrace_match_records(char *buff, int len, int enable) if (enable && (rec->flags & FTRACE_FL_FILTER)) ftrace_filtered = 1; } while_for_each_ftrace_rec(); - spin_unlock(&ftrace_lock); + mutex_unlock(&ftrace_lock); } static int @@ -1218,8 +1208,7 @@ static void ftrace_match_module_records(char *buff, char *mod, int enable) search_len = strlen(search); } - /* should not be called from interrupt context */ - spin_lock(&ftrace_lock); + mutex_lock(&ftrace_lock); do_for_each_ftrace_rec(pg, rec) { if (rec->flags & FTRACE_FL_FAILED) @@ -1236,7 +1225,7 @@ static void ftrace_match_module_records(char *buff, char *mod, int enable) ftrace_filtered = 1; } while_for_each_ftrace_rec(); - spin_unlock(&ftrace_lock); + mutex_unlock(&ftrace_lock); } /* @@ -1676,9 +1665,7 @@ ftrace_set_func(unsigned long *array, int idx, char *buffer) if (ftrace_disabled) return -ENODEV; - /* should not be called from interrupt context */ - spin_lock(&ftrace_lock); - + mutex_lock(&ftrace_lock); do_for_each_ftrace_rec(pg, rec) { if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE)) @@ -1699,7 +1686,7 @@ ftrace_set_func(unsigned long *array, int idx, char *buffer) } } while_for_each_ftrace_rec(); out: - spin_unlock(&ftrace_lock); + mutex_unlock(&ftrace_lock); return found ? 0 : -EINVAL; } From e6ea44e9b4c12325337cd1c06103cd515a1c02b2 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Sat, 14 Feb 2009 01:42:44 -0500 Subject: [PATCH 09/15] ftrace: consolidate mutexes Impact: clean up Now that ftrace_lock is a mutex, there is no reason to have three different mutexes protecting similar data. All the mutex paths are not in hot paths, so having a mutex to cover more data is not a problem. This patch removes the ftrace_sysctl_lock and ftrace_start_lock and uses the ftrace_lock to protect the locations that were protected by these locks. By doing so, this change also removes some of the lock nesting that was taking place. There are still more mutexes in ftrace.c that can probably be consolidated, but they can be dealt with later. We need to be careful about the way the locks are nested, and by consolidating, we can cause a recursive deadlock. Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 68 +++++++++++++------------------------------ 1 file changed, 21 insertions(+), 47 deletions(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 4771732037ee..157d4f68b0e0 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -62,8 +62,6 @@ int function_trace_stop; static int ftrace_disabled __read_mostly; static DEFINE_MUTEX(ftrace_lock); -static DEFINE_MUTEX(ftrace_sysctl_lock); -static DEFINE_MUTEX(ftrace_start_lock); static struct ftrace_ops ftrace_list_end __read_mostly = { @@ -134,8 +132,6 @@ static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip) static int __register_ftrace_function(struct ftrace_ops *ops) { - mutex_lock(&ftrace_lock); - ops->next = ftrace_list; /* * We are entering ops into the ftrace_list but another @@ -171,17 +167,12 @@ static int __register_ftrace_function(struct ftrace_ops *ops) #endif } - mutex_unlock(&ftrace_lock); - return 0; } static int __unregister_ftrace_function(struct ftrace_ops *ops) { struct ftrace_ops **p; - int ret = 0; - - mutex_lock(&ftrace_lock); /* * If we are removing the last function, then simply point @@ -190,17 +181,15 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) if (ftrace_list == ops && ops->next == &ftrace_list_end) { ftrace_trace_function = ftrace_stub; ftrace_list = &ftrace_list_end; - goto out; + return 0; } for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next) if (*p == ops) break; - if (*p != ops) { - ret = -1; - goto out; - } + if (*p != ops) + return -1; *p = (*p)->next; @@ -221,10 +210,7 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) } } - out: - mutex_unlock(&ftrace_lock); - - return ret; + return 0; } static void ftrace_update_pid_func(void) @@ -622,13 +608,10 @@ static void ftrace_startup(int command) if (unlikely(ftrace_disabled)) return; - mutex_lock(&ftrace_start_lock); ftrace_start_up++; command |= FTRACE_ENABLE_CALLS; ftrace_startup_enable(command); - - mutex_unlock(&ftrace_start_lock); } static void ftrace_shutdown(int command) @@ -636,7 +619,6 @@ static void ftrace_shutdown(int command) if (unlikely(ftrace_disabled)) return; - mutex_lock(&ftrace_start_lock); ftrace_start_up--; if (!ftrace_start_up) command |= FTRACE_DISABLE_CALLS; @@ -647,11 +629,9 @@ static void ftrace_shutdown(int command) } if (!command || !ftrace_enabled) - goto out; + return; ftrace_run_update_code(command); - out: - mutex_unlock(&ftrace_start_lock); } static void ftrace_startup_sysctl(void) @@ -661,7 +641,6 @@ static void ftrace_startup_sysctl(void) if (unlikely(ftrace_disabled)) return; - mutex_lock(&ftrace_start_lock); /* Force update next time */ saved_ftrace_func = NULL; /* ftrace_start_up is true if we want ftrace running */ @@ -669,7 +648,6 @@ static void ftrace_startup_sysctl(void) command |= FTRACE_ENABLE_CALLS; ftrace_run_update_code(command); - mutex_unlock(&ftrace_start_lock); } static void ftrace_shutdown_sysctl(void) @@ -679,13 +657,11 @@ static void ftrace_shutdown_sysctl(void) if (unlikely(ftrace_disabled)) return; - mutex_lock(&ftrace_start_lock); /* ftrace_start_up is true if ftrace is running */ if (ftrace_start_up) command |= FTRACE_DISABLE_CALLS; ftrace_run_update_code(command); - mutex_unlock(&ftrace_start_lock); } static cycle_t ftrace_update_time; @@ -1502,12 +1478,10 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable) ftrace_match_records(iter->buffer, iter->buffer_idx, enable); } - mutex_lock(&ftrace_sysctl_lock); - mutex_lock(&ftrace_start_lock); + mutex_lock(&ftrace_lock); if (ftrace_start_up && ftrace_enabled) ftrace_run_update_code(FTRACE_ENABLE_CALLS); - mutex_unlock(&ftrace_start_lock); - mutex_unlock(&ftrace_sysctl_lock); + mutex_unlock(&ftrace_lock); kfree(iter); mutex_unlock(&ftrace_regex_lock); @@ -1824,7 +1798,7 @@ static int ftrace_convert_nops(struct module *mod, unsigned long addr; unsigned long flags; - mutex_lock(&ftrace_start_lock); + mutex_lock(&ftrace_lock); p = start; while (p < end) { addr = ftrace_call_adjust(*p++); @@ -1843,7 +1817,7 @@ static int ftrace_convert_nops(struct module *mod, local_irq_save(flags); ftrace_update_code(mod); local_irq_restore(flags); - mutex_unlock(&ftrace_start_lock); + mutex_unlock(&ftrace_lock); return 0; } @@ -2016,7 +1990,7 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf, if (ret < 0) return ret; - mutex_lock(&ftrace_start_lock); + mutex_lock(&ftrace_lock); if (val < 0) { /* disable pid tracing */ if (!ftrace_pid_trace) @@ -2055,7 +2029,7 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf, ftrace_startup_enable(0); out: - mutex_unlock(&ftrace_start_lock); + mutex_unlock(&ftrace_lock); return cnt; } @@ -2118,12 +2092,12 @@ int register_ftrace_function(struct ftrace_ops *ops) if (unlikely(ftrace_disabled)) return -1; - mutex_lock(&ftrace_sysctl_lock); + mutex_lock(&ftrace_lock); ret = __register_ftrace_function(ops); ftrace_startup(0); - mutex_unlock(&ftrace_sysctl_lock); + mutex_unlock(&ftrace_lock); return ret; } @@ -2137,10 +2111,10 @@ int unregister_ftrace_function(struct ftrace_ops *ops) { int ret; - mutex_lock(&ftrace_sysctl_lock); + mutex_lock(&ftrace_lock); ret = __unregister_ftrace_function(ops); ftrace_shutdown(0); - mutex_unlock(&ftrace_sysctl_lock); + mutex_unlock(&ftrace_lock); return ret; } @@ -2155,7 +2129,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, if (unlikely(ftrace_disabled)) return -ENODEV; - mutex_lock(&ftrace_sysctl_lock); + mutex_lock(&ftrace_lock); ret = proc_dointvec(table, write, file, buffer, lenp, ppos); @@ -2184,7 +2158,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, } out: - mutex_unlock(&ftrace_sysctl_lock); + mutex_unlock(&ftrace_lock); return ret; } @@ -2296,7 +2270,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc, { int ret = 0; - mutex_lock(&ftrace_sysctl_lock); + mutex_lock(&ftrace_lock); ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call; register_pm_notifier(&ftrace_suspend_notifier); @@ -2314,13 +2288,13 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc, ftrace_startup(FTRACE_START_FUNC_RET); out: - mutex_unlock(&ftrace_sysctl_lock); + mutex_unlock(&ftrace_lock); return ret; } void unregister_ftrace_graph(void) { - mutex_lock(&ftrace_sysctl_lock); + mutex_lock(&ftrace_lock); atomic_dec(&ftrace_graph_active); ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; @@ -2328,7 +2302,7 @@ void unregister_ftrace_graph(void) ftrace_shutdown(FTRACE_STOP_FUNC_RET); unregister_pm_notifier(&ftrace_suspend_notifier); - mutex_unlock(&ftrace_sysctl_lock); + mutex_unlock(&ftrace_lock); } /* Allocate a return stack for newly created task */ From 59df055f1991c9fc0c71a9230663c39188f6972f Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Sat, 14 Feb 2009 15:29:06 -0500 Subject: [PATCH 10/15] ftrace: trace different functions with a different tracer Impact: new feature Currently, the function tracer only gives you an ability to hook a tracer to all functions being traced. The dynamic function trace allows you to pick and choose which of those functions will be traced, but all functions being traced will call all tracers that registered with the function tracer. This patch adds a new feature that allows a tracer to hook to specific functions, even when all functions are being traced. It allows for different functions to call different tracer hooks. The way this is accomplished is by a special function that will hook to the function tracer and will set up a hash table knowing which tracer hook to call with which function. This is the most general and easiest method to accomplish this. Later, an arch may choose to supply their own method in changing the mcount call of a function to call a different tracer. But that will be an exercise for the future. To register a function: struct ftrace_hook_ops { void (*func)(unsigned long ip, unsigned long parent_ip, void **data); int (*callback)(unsigned long ip, void **data); void (*free)(void **data); }; int register_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops, void *data); glob is a simple glob to search for the functions to hook. ops is a pointer to the operations (listed below) data is the default data to be passed to the hook functions when traced ops: func is the hook function to call when the functions are traced callback is a callback function that is called when setting up the hash. That is, if the tracer needs to do something special for each function, that is being traced, and wants to give each function its own data. The address of the entry data is passed to this callback, so that the callback may wish to update the entry to whatever it would like. free is a callback for when the entry is freed. In case the tracer allocated any data, it is give the chance to free it. To unregister we have three functions: void unregister_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops, void *data) This will unregister all hooks that match glob, point to ops, and have its data matching data. (note, if glob is NULL, blank or '*', all functions will be tested). void unregister_ftrace_function_hook_func(char *glob, struct ftrace_hook_ops *ops) This will unregister all functions matching glob that has an entry pointing to ops. void unregister_ftrace_function_hook_all(char *glob) This simply unregisters all funcs. Signed-off-by: Steven Rostedt --- include/linux/ftrace.h | 18 +++ kernel/trace/ftrace.c | 247 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 265 insertions(+) diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index f0a0ecc63b5c..13918c4400ad 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -106,6 +106,24 @@ struct ftrace_func_command { /* asm/ftrace.h must be defined for archs supporting dynamic ftrace */ #include +struct ftrace_hook_ops { + void (*func)(unsigned long ip, + unsigned long parent_ip, + void **data); + int (*callback)(unsigned long ip, void **data); + void (*free)(void **data); +}; + +extern int +register_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops, + void *data); +extern void +unregister_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops, + void *data); +extern void +unregister_ftrace_function_hook_func(char *glob, struct ftrace_hook_ops *ops); +extern void unregister_ftrace_function_hook_all(char *glob); + enum { FTRACE_FL_FREE = (1 << 0), FTRACE_FL_FAILED = (1 << 1), diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 157d4f68b0e0..0b80e325f296 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -27,6 +27,7 @@ #include #include #include +#include #include @@ -1245,6 +1246,252 @@ static int __init ftrace_mod_cmd_init(void) } device_initcall(ftrace_mod_cmd_init); +#define FTRACE_HASH_BITS 7 +#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS) +static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly; + +struct ftrace_func_hook { + struct hlist_node node; + struct ftrace_hook_ops *ops; + unsigned long flags; + unsigned long ip; + void *data; + struct rcu_head rcu; +}; + +static void +function_trace_hook_call(unsigned long ip, unsigned long parent_ip) +{ + struct ftrace_func_hook *entry; + struct hlist_head *hhd; + struct hlist_node *n; + unsigned long key; + int resched; + + key = hash_long(ip, FTRACE_HASH_BITS); + + hhd = &ftrace_func_hash[key]; + + if (hlist_empty(hhd)) + return; + + /* + * Disable preemption for these calls to prevent a RCU grace + * period. This syncs the hash iteration and freeing of items + * on the hash. rcu_read_lock is too dangerous here. + */ + resched = ftrace_preempt_disable(); + hlist_for_each_entry_rcu(entry, n, hhd, node) { + if (entry->ip == ip) + entry->ops->func(ip, parent_ip, &entry->data); + } + ftrace_preempt_enable(resched); +} + +static struct ftrace_ops trace_hook_ops __read_mostly = +{ + .func = function_trace_hook_call, +}; + +static int ftrace_hook_registered; + +static void __enable_ftrace_function_hook(void) +{ + int i; + + if (ftrace_hook_registered) + return; + + for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { + struct hlist_head *hhd = &ftrace_func_hash[i]; + if (hhd->first) + break; + } + /* Nothing registered? */ + if (i == FTRACE_FUNC_HASHSIZE) + return; + + __register_ftrace_function(&trace_hook_ops); + ftrace_startup(0); + ftrace_hook_registered = 1; +} + +static void __disable_ftrace_function_hook(void) +{ + int i; + + if (!ftrace_hook_registered) + return; + + for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { + struct hlist_head *hhd = &ftrace_func_hash[i]; + if (hhd->first) + return; + } + + /* no more funcs left */ + __unregister_ftrace_function(&trace_hook_ops); + ftrace_shutdown(0); + ftrace_hook_registered = 0; +} + + +static void ftrace_free_entry_rcu(struct rcu_head *rhp) +{ + struct ftrace_func_hook *entry = + container_of(rhp, struct ftrace_func_hook, rcu); + + if (entry->ops->free) + entry->ops->free(&entry->data); + kfree(entry); +} + + +int +register_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops, + void *data) +{ + struct ftrace_func_hook *entry; + struct ftrace_page *pg; + struct dyn_ftrace *rec; + unsigned long key; + int type, len, not; + int count = 0; + char *search; + + type = ftrace_setup_glob(glob, strlen(glob), &search, ¬); + len = strlen(search); + + /* we do not support '!' for function hooks */ + if (WARN_ON(not)) + return -EINVAL; + + mutex_lock(&ftrace_lock); + do_for_each_ftrace_rec(pg, rec) { + + if (rec->flags & FTRACE_FL_FAILED) + continue; + + if (!ftrace_match_record(rec, search, len, type)) + continue; + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + /* If we did not hook to any, then return error */ + if (!count) + count = -ENOMEM; + goto out_unlock; + } + + count++; + + entry->data = data; + + /* + * The caller might want to do something special + * for each function we find. We call the callback + * to give the caller an opportunity to do so. + */ + if (ops->callback) { + if (ops->callback(rec->ip, &entry->data) < 0) { + /* caller does not like this func */ + kfree(entry); + continue; + } + } + + entry->ops = ops; + entry->ip = rec->ip; + + key = hash_long(entry->ip, FTRACE_HASH_BITS); + hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]); + + } while_for_each_ftrace_rec(); + __enable_ftrace_function_hook(); + + out_unlock: + mutex_unlock(&ftrace_lock); + + return count; +} + +enum { + HOOK_TEST_FUNC = 1, + HOOK_TEST_DATA = 2 +}; + +static void +__unregister_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops, + void *data, int flags) +{ + struct ftrace_func_hook *entry; + struct hlist_node *n, *tmp; + char str[KSYM_SYMBOL_LEN]; + int type = MATCH_FULL; + int i, len = 0; + char *search; + + if (glob && (strcmp(glob, "*") || !strlen(glob))) + glob = NULL; + else { + int not; + + type = ftrace_setup_glob(glob, strlen(glob), &search, ¬); + len = strlen(search); + + /* we do not support '!' for function hooks */ + if (WARN_ON(not)) + return; + } + + mutex_lock(&ftrace_lock); + for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { + struct hlist_head *hhd = &ftrace_func_hash[i]; + + hlist_for_each_entry_safe(entry, n, tmp, hhd, node) { + + /* break up if statements for readability */ + if ((flags & HOOK_TEST_FUNC) && entry->ops != ops) + continue; + + if ((flags & HOOK_TEST_DATA) && entry->data != data) + continue; + + /* do this last, since it is the most expensive */ + if (glob) { + kallsyms_lookup(entry->ip, NULL, NULL, + NULL, str); + if (!ftrace_match(str, glob, len, type)) + continue; + } + + hlist_del(&entry->node); + call_rcu(&entry->rcu, ftrace_free_entry_rcu); + } + } + __disable_ftrace_function_hook(); + mutex_unlock(&ftrace_lock); +} + +void +unregister_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops, + void *data) +{ + __unregister_ftrace_function_hook(glob, ops, data, + HOOK_TEST_FUNC | HOOK_TEST_DATA); +} + +void +unregister_ftrace_function_hook_func(char *glob, struct ftrace_hook_ops *ops) +{ + __unregister_ftrace_function_hook(glob, ops, NULL, HOOK_TEST_FUNC); +} + +void unregister_ftrace_function_hook_all(char *glob) +{ + __unregister_ftrace_function_hook(glob, NULL, NULL, 0); +} + static LIST_HEAD(ftrace_commands); static DEFINE_MUTEX(ftrace_cmd_mutex); From 988ae9d6b2bc3ebdc1a488490250a6812f85e9d4 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Sat, 14 Feb 2009 19:17:02 -0500 Subject: [PATCH 11/15] ring-buffer: add tracing_is_on to test if ring buffer is enabled This patch adds the tracing_is_on() interface to tell if the ring buffer is turned on or not. Signed-off-by: Steven Rostedt --- include/linux/ring_buffer.h | 2 ++ kernel/trace/ring_buffer.c | 9 +++++++++ 2 files changed, 11 insertions(+) diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 8e6646a54acf..f5e793d69bd3 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -128,10 +128,12 @@ void ring_buffer_normalize_time_stamp(int cpu, u64 *ts); void tracing_on(void); void tracing_off(void); void tracing_off_permanent(void); +int tracing_is_on(void); #else static inline void tracing_on(void) { } static inline void tracing_off(void) { } static inline void tracing_off_permanent(void) { } +static inline int tracing_is_on(void) { return 0; } #endif void *ring_buffer_alloc_read_page(struct ring_buffer *buffer); diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 2b4626ce95d6..8f19f1aa42b0 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -98,6 +98,15 @@ void tracing_off_permanent(void) set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags); } +/** + * tracing_is_on - show state of ring buffers enabled + */ +int tracing_is_on(void) +{ + return ring_buffer_flags == RB_BUFFERS_ON; +} +EXPORT_SYMBOL_GPL(tracing_is_on); + #include "trace.h" /* Up this if you want to test the TIME_EXTENTS and normalization */ From 23b4ff3aa479c9e3bb23cb6b2d0a97878399784a Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Sat, 14 Feb 2009 19:04:24 -0500 Subject: [PATCH 12/15] ftrace: add traceon traceoff commands to enable/disable the buffers This patch adds the new function selection commands traceon and traceoff. traceon sets the function to enable the ring buffers while traceoff disables the ring buffers. You can pass in the number of times you want the command to be executed when the function is hit. It will only execute if the state of the buffers are not already in that state. Example: # echo do_fork:traceon:4 Will enable the ring buffers if they are disabled every time it hits do_fork, up to 4 times. # echo sys_close:traceoff This will disable the ring buffers every time (unlimited) when sys_close is called. Signed-off-by: Steven Rostedt --- kernel/trace/trace_functions.c | 135 +++++++++++++++++++++++++++++++++ 1 file changed, 135 insertions(+) diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 36bf9568ccd9..5c95708b9dc3 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -9,6 +9,7 @@ * Copyright (C) 2004-2006 Ingo Molnar * Copyright (C) 2004 William Lee Irwin III */ +#include #include #include #include @@ -231,9 +232,143 @@ static struct tracer function_trace __read_mostly = #endif }; +#ifdef CONFIG_DYNAMIC_FTRACE +static void +ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data) +{ + long *count = (long *)data; + + if (tracing_is_on()) + return; + + if (!*count) + return; + + if (*count != -1) + (*count)--; + + tracing_on(); +} + +static void +ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data) +{ + long *count = (long *)data; + + if (!tracing_is_on()) + return; + + if (!*count) + return; + + if (*count != -1) + (*count)--; + + tracing_off(); +} + +static struct ftrace_hook_ops traceon_hook_ops = { + .func = ftrace_traceon, +}; + +static struct ftrace_hook_ops traceoff_hook_ops = { + .func = ftrace_traceoff, +}; + +static int +ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param) +{ + struct ftrace_hook_ops *ops; + + /* we register both traceon and traceoff to this callback */ + if (strcmp(cmd, "traceon") == 0) + ops = &traceon_hook_ops; + else + ops = &traceoff_hook_ops; + + unregister_ftrace_function_hook_func(glob, ops); + + return 0; +} + +static int +ftrace_trace_onoff_callback(char *glob, char *cmd, char *param, int enable) +{ + struct ftrace_hook_ops *ops; + void *count = (void *)-1; + char *number; + int ret; + + /* hash funcs only work with set_ftrace_filter */ + if (!enable) + return -EINVAL; + + if (glob[0] == '!') + return ftrace_trace_onoff_unreg(glob+1, cmd, param); + + /* we register both traceon and traceoff to this callback */ + if (strcmp(cmd, "traceon") == 0) + ops = &traceon_hook_ops; + else + ops = &traceoff_hook_ops; + + if (!param) + goto out_reg; + + number = strsep(¶m, ":"); + + if (!strlen(number)) + goto out_reg; + + /* + * We use the callback data field (which is a pointer) + * as our counter. + */ + ret = strict_strtoul(number, 0, (unsigned long *)&count); + if (ret) + return ret; + + out_reg: + ret = register_ftrace_function_hook(glob, ops, count); + + return ret; +} + +static struct ftrace_func_command ftrace_traceon_cmd = { + .name = "traceon", + .func = ftrace_trace_onoff_callback, +}; + +static struct ftrace_func_command ftrace_traceoff_cmd = { + .name = "traceoff", + .func = ftrace_trace_onoff_callback, +}; + +static int __init init_func_cmd_traceon(void) +{ + int ret; + + ret = register_ftrace_command(&ftrace_traceoff_cmd); + if (ret) + return ret; + + ret = register_ftrace_command(&ftrace_traceon_cmd); + if (ret) + unregister_ftrace_command(&ftrace_traceoff_cmd); + return ret; +} +#else +static inline int init_func_cmd_traceon(void) +{ + return 0; +} +#endif /* CONFIG_DYNAMIC_FTRACE */ + static __init int init_function_trace(void) { + init_func_cmd_traceon(); return register_tracer(&function_trace); } device_initcall(init_function_trace); + From 8fc0c701c5b6c0c3e242758c3acef6f9047940a9 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 16 Feb 2009 15:28:00 -0500 Subject: [PATCH 13/15] ftrace: show selected functions in set_ftrace_filter This patch adds output to show what functions have tracer hooks attached to them. # echo 'sys_open:traceon:4' > /debug/tracing/set_ftrace_filter # cat set_ftrace_filter #### all functions enabled #### sys_open:ftrace_traceon:0000000000000004 # echo 'do_fork:traceoff:' > set_ftrace_filter # cat set_ftrace_filter #### all functions enabled #### sys_open:ftrace_traceon:0000000000000002 do_fork:ftrace_traceoff:ffffffffffffffff Note the 4 changed to a 2. This is because The code was executed twice since the traceoff was added. If a cat is done again: #### all functions enabled #### sys_open:ftrace_traceon do_fork:ftrace_traceoff:ffffffffffffffff The number disappears. That is because it will not print a NULL. Callbacks to allow the tracer to pretty print will be implemented soon. Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 123 +++++++++++++++++++++++++++++++++++------- 1 file changed, 103 insertions(+), 20 deletions(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 0b80e325f296..1e058848cddb 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -45,14 +45,14 @@ ftrace_kill(); \ } while (0) +/* hash bits for specific function selection */ +#define FTRACE_HASH_BITS 7 +#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS) + /* ftrace_enabled is a method to turn ftrace on or off */ int ftrace_enabled __read_mostly; static int last_ftrace_enabled; -/* set when tracing only a pid */ -struct pid *ftrace_pid_trace; -static struct pid * const ftrace_swapper_pid = &init_struct_pid; - /* Quick disabling of function tracer. */ int function_trace_stop; @@ -248,6 +248,21 @@ static void ftrace_update_pid_func(void) # error Dynamic ftrace depends on MCOUNT_RECORD #endif +/* set when tracing only a pid */ +struct pid *ftrace_pid_trace; +static struct pid * const ftrace_swapper_pid = &init_struct_pid; +static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly; + +struct ftrace_func_hook { + struct hlist_node node; + struct ftrace_hook_ops *ops; + unsigned long flags; + unsigned long ip; + void *data; + struct rcu_head rcu; +}; + + enum { FTRACE_ENABLE_CALLS = (1 << 0), FTRACE_DISABLE_CALLS = (1 << 1), @@ -750,12 +765,14 @@ enum { FTRACE_ITER_NOTRACE = (1 << 2), FTRACE_ITER_FAILURES = (1 << 3), FTRACE_ITER_PRINTALL = (1 << 4), + FTRACE_ITER_HASH = (1 << 5), }; #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ struct ftrace_iterator { struct ftrace_page *pg; + int hidx; int idx; unsigned flags; unsigned char buffer[FTRACE_BUFF_MAX+1]; @@ -763,18 +780,87 @@ struct ftrace_iterator { unsigned filtered; }; +static void * +t_hash_next(struct seq_file *m, void *v, loff_t *pos) +{ + struct ftrace_iterator *iter = m->private; + struct hlist_node *hnd = v; + struct hlist_head *hhd; + + WARN_ON(!(iter->flags & FTRACE_ITER_HASH)); + + (*pos)++; + + retry: + if (iter->hidx >= FTRACE_FUNC_HASHSIZE) + return NULL; + + hhd = &ftrace_func_hash[iter->hidx]; + + if (hlist_empty(hhd)) { + iter->hidx++; + hnd = NULL; + goto retry; + } + + if (!hnd) + hnd = hhd->first; + else { + hnd = hnd->next; + if (!hnd) { + iter->hidx++; + goto retry; + } + } + + return hnd; +} + +static void *t_hash_start(struct seq_file *m, loff_t *pos) +{ + struct ftrace_iterator *iter = m->private; + void *p = NULL; + + iter->flags |= FTRACE_ITER_HASH; + + return t_hash_next(m, p, pos); +} + +static int t_hash_show(struct seq_file *m, void *v) +{ + struct ftrace_func_hook *rec; + struct hlist_node *hnd = v; + char str[KSYM_SYMBOL_LEN]; + + rec = hlist_entry(hnd, struct ftrace_func_hook, node); + + kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); + seq_printf(m, "%s:", str); + + kallsyms_lookup((unsigned long)rec->ops->func, NULL, NULL, NULL, str); + seq_printf(m, "%s", str); + + if (rec->data) + seq_printf(m, ":%p", rec->data); + seq_putc(m, '\n'); + + return 0; +} + static void * t_next(struct seq_file *m, void *v, loff_t *pos) { struct ftrace_iterator *iter = m->private; struct dyn_ftrace *rec = NULL; + if (iter->flags & FTRACE_ITER_HASH) + return t_hash_next(m, v, pos); + (*pos)++; if (iter->flags & FTRACE_ITER_PRINTALL) return NULL; - mutex_lock(&ftrace_lock); retry: if (iter->idx >= iter->pg->index) { if (iter->pg->next) { @@ -803,7 +889,6 @@ t_next(struct seq_file *m, void *v, loff_t *pos) goto retry; } } - mutex_unlock(&ftrace_lock); return rec; } @@ -813,6 +898,7 @@ static void *t_start(struct seq_file *m, loff_t *pos) struct ftrace_iterator *iter = m->private; void *p = NULL; + mutex_lock(&ftrace_lock); /* * For set_ftrace_filter reading, if we have the filter * off, we can short cut and just print out that all @@ -820,12 +906,15 @@ static void *t_start(struct seq_file *m, loff_t *pos) */ if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) { if (*pos > 0) - return NULL; + return t_hash_start(m, pos); iter->flags |= FTRACE_ITER_PRINTALL; (*pos)++; return iter; } + if (iter->flags & FTRACE_ITER_HASH) + return t_hash_start(m, pos); + if (*pos > 0) { if (iter->idx < 0) return p; @@ -835,11 +924,15 @@ static void *t_start(struct seq_file *m, loff_t *pos) p = t_next(m, p, pos); + if (!p) + return t_hash_start(m, pos); + return p; } static void t_stop(struct seq_file *m, void *p) { + mutex_unlock(&ftrace_lock); } static int t_show(struct seq_file *m, void *v) @@ -848,6 +941,9 @@ static int t_show(struct seq_file *m, void *v) struct dyn_ftrace *rec = v; char str[KSYM_SYMBOL_LEN]; + if (iter->flags & FTRACE_ITER_HASH) + return t_hash_show(m, v); + if (iter->flags & FTRACE_ITER_PRINTALL) { seq_printf(m, "#### all functions enabled ####\n"); return 0; @@ -1246,19 +1342,6 @@ static int __init ftrace_mod_cmd_init(void) } device_initcall(ftrace_mod_cmd_init); -#define FTRACE_HASH_BITS 7 -#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS) -static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly; - -struct ftrace_func_hook { - struct hlist_node node; - struct ftrace_hook_ops *ops; - unsigned long flags; - unsigned long ip; - void *data; - struct rcu_head rcu; -}; - static void function_trace_hook_call(unsigned long ip, unsigned long parent_ip) { From 809dcf29ce4e1723709910878e050bd187617e0e Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 16 Feb 2009 23:06:01 -0500 Subject: [PATCH 14/15] ftrace: add pretty print to selected fuction traces This patch adds a call back for the tracers that have hooks to selected functions. This allows the tracer to show better output in the set_ftrace_filter file. Signed-off-by: Steven Rostedt --- include/linux/ftrace.h | 6 ++++++ kernel/trace/ftrace.c | 3 +++ 2 files changed, 9 insertions(+) diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 13918c4400ad..b331e216d8a1 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -106,12 +106,18 @@ struct ftrace_func_command { /* asm/ftrace.h must be defined for archs supporting dynamic ftrace */ #include +struct seq_file; + struct ftrace_hook_ops { void (*func)(unsigned long ip, unsigned long parent_ip, void **data); int (*callback)(unsigned long ip, void **data); void (*free)(void **data); + int (*print)(struct seq_file *m, + unsigned long ip, + struct ftrace_hook_ops *ops, + void *data); }; extern int diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 1e058848cddb..6533c1d20155 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -834,6 +834,9 @@ static int t_hash_show(struct seq_file *m, void *v) rec = hlist_entry(hnd, struct ftrace_func_hook, node); + if (rec->ops->print) + return rec->ops->print(m, rec->ip, rec->ops, rec->data); + kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); seq_printf(m, "%s:", str); From e110e3d1eaa0f9628918be67ddd32e8ad65a2871 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 16 Feb 2009 23:38:13 -0500 Subject: [PATCH 15/15] ftrace: add pretty print function for traceon and traceoff hooks This patch adds a pretty print version of traceon and traceoff output for set_ftrace_filter. # echo 'sys_open:traceon:4' > set_ftrace_filter # cat set_ftrace_filter #### all functions enabled #### sys_open:traceon:count=4 Signed-off-by: Steven Rostedt --- kernel/trace/trace_functions.c | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 5c95708b9dc3..f520aa419dff 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -267,14 +267,42 @@ ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data) tracing_off(); } +static int +ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip, + struct ftrace_hook_ops *ops, void *data); + static struct ftrace_hook_ops traceon_hook_ops = { .func = ftrace_traceon, + .print = ftrace_trace_onoff_print, }; static struct ftrace_hook_ops traceoff_hook_ops = { .func = ftrace_traceoff, + .print = ftrace_trace_onoff_print, }; +static int +ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip, + struct ftrace_hook_ops *ops, void *data) +{ + char str[KSYM_SYMBOL_LEN]; + long count = (long)data; + + kallsyms_lookup(ip, NULL, NULL, NULL, str); + seq_printf(m, "%s:", str); + + if (ops == &traceon_hook_ops) + seq_printf(m, "traceon"); + else + seq_printf(m, "traceoff"); + + if (count != -1) + seq_printf(m, ":count=%ld", count); + seq_putc(m, '\n'); + + return 0; +} + static int ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param) {