diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2008-05-28 14:45:33 +0200 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-05-28 14:49:27 +0200 |
commit | 64565911cdb57c2f512a9715b985b5617402cc67 (patch) | |
tree | 1c8a3d03fcb0e620c8f2244962fb249cff51fec4 | |
parent | 4722dc52a891ab6cb2d637ddb87233e0ce277827 (diff) | |
download | kernel_samsung_smdk4412-64565911cdb57c2f512a9715b985b5617402cc67.zip kernel_samsung_smdk4412-64565911cdb57c2f512a9715b985b5617402cc67.tar.gz kernel_samsung_smdk4412-64565911cdb57c2f512a9715b985b5617402cc67.tar.bz2 |
block: make blktrace use per-cpu buffers for message notes
Currently it uses a single static char array, but that risks
being corrupted when multiple users issue message notes at the
same time. Make the buffers dynamically allocated when the trace
is setup and make them per-cpu instead.
The default max message size of 1k is also very large, the
interface is mainly for small text notes. So shrink it to 128 bytes.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r-- | block/blktrace.c | 15 | ||||
-rw-r--r-- | include/linux/blktrace_api.h | 3 |
2 files changed, 14 insertions, 4 deletions
diff --git a/block/blktrace.c b/block/blktrace.c index 20e11f3..7ae87cc 100644 --- a/block/blktrace.c +++ b/block/blktrace.c @@ -79,13 +79,16 @@ void __trace_note_message(struct blk_trace *bt, const char *fmt, ...) { int n; va_list args; - static char bt_msg_buf[BLK_TN_MAX_MSG]; + char *buf; + preempt_disable(); + buf = per_cpu_ptr(bt->msg_data, smp_processor_id()); va_start(args, fmt); - n = vscnprintf(bt_msg_buf, BLK_TN_MAX_MSG, fmt, args); + n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args); va_end(args); - trace_note(bt, 0, BLK_TN_MESSAGE, bt_msg_buf, n); + trace_note(bt, 0, BLK_TN_MESSAGE, buf, n); + preempt_enable(); } EXPORT_SYMBOL_GPL(__trace_note_message); @@ -246,6 +249,7 @@ static void blk_trace_cleanup(struct blk_trace *bt) debugfs_remove(bt->dropped_file); blk_remove_tree(bt->dir); free_percpu(bt->sequence); + free_percpu(bt->msg_data); kfree(bt); } @@ -360,6 +364,10 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, if (!bt->sequence) goto err; + bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG); + if (!bt->msg_data) + goto err; + ret = -ENOENT; dir = blk_create_tree(buts->name); if (!dir) @@ -406,6 +414,7 @@ err: if (bt->dropped_file) debugfs_remove(bt->dropped_file); free_percpu(bt->sequence); + free_percpu(bt->msg_data); if (bt->rchan) relay_close(bt->rchan); kfree(bt); diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index b7cd8f1..e3ef903 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h @@ -121,6 +121,7 @@ struct blk_trace { int trace_state; struct rchan *rchan; unsigned long *sequence; + unsigned char *msg_data; u16 act_mask; u64 start_lba; u64 end_lba; @@ -172,7 +173,7 @@ extern void __trace_note_message(struct blk_trace *, const char *fmt, ...); if (unlikely(bt)) \ __trace_note_message(bt, fmt, ##__VA_ARGS__); \ } while (0) -#define BLK_TN_MAX_MSG 1024 +#define BLK_TN_MAX_MSG 128 /** * blk_add_trace_rq - Add a trace for a request oriented action |