diff options
author | Andrew Morton <akpm@osdl.org> | 2006-03-24 03:18:10 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-24 07:33:26 -0800 |
commit | fa5a734e406b53761fcc5ee22366006f71112c2d (patch) | |
tree | 003a238b9207e38f747bfb119a30fb52f1cd5ae9 | |
parent | 8a14342683b1e3adcf5f78660a42fcbd95b44a35 (diff) | |
download | kernel_samsung_tuna-fa5a734e406b53761fcc5ee22366006f71112c2d.zip kernel_samsung_tuna-fa5a734e406b53761fcc5ee22366006f71112c2d.tar.gz kernel_samsung_tuna-fa5a734e406b53761fcc5ee22366006f71112c2d.tar.bz2 |
[PATCH] balance_dirty_pages_ratelimited: take nr_pages arg
Modify balance_dirty_pages_ratelimited() so that it can take a
number-of-pages-which-I-just-dirtied argument. For msync().
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | include/linux/writeback.h | 10 | ||||
-rw-r--r-- | mm/page-writeback.c | 24 |
2 files changed, 24 insertions, 10 deletions
diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 6095659..56f92fc 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -99,7 +99,15 @@ int dirty_writeback_centisecs_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); void page_writeback_init(void); -void balance_dirty_pages_ratelimited(struct address_space *mapping); +void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, + unsigned long nr_pages_dirtied); + +static inline void +balance_dirty_pages_ratelimited(struct address_space *mapping) +{ + balance_dirty_pages_ratelimited_nr(mapping, 1); +} + int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0); int do_writepages(struct address_space *mapping, struct writeback_control *wbc); int sync_page_range(struct inode *inode, struct address_space *mapping, diff --git a/mm/page-writeback.c b/mm/page-writeback.c index c1052ee..c67ddc4 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -256,8 +256,9 @@ static void balance_dirty_pages(struct address_space *mapping) } /** - * balance_dirty_pages_ratelimited - balance dirty memory state + * balance_dirty_pages_ratelimited_nr - balance dirty memory state * @mapping: address_space which was dirtied + * @nr_pages: number of pages which the caller has just dirtied * * Processes which are dirtying memory should call in here once for each page * which was newly dirtied. The function will periodically check the system's @@ -268,10 +269,12 @@ static void balance_dirty_pages(struct address_space *mapping) * limit we decrease the ratelimiting by a lot, to prevent individual processes * from overshooting the limit by (ratelimit_pages) each. */ -void balance_dirty_pages_ratelimited(struct address_space *mapping) +void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, + unsigned long nr_pages_dirtied) { - static DEFINE_PER_CPU(int, ratelimits) = 0; - long ratelimit; + static DEFINE_PER_CPU(unsigned long, ratelimits) = 0; + unsigned long ratelimit; + unsigned long *p; ratelimit = ratelimit_pages; if (dirty_exceeded) @@ -281,15 +284,18 @@ void balance_dirty_pages_ratelimited(struct address_space *mapping) * Check the rate limiting. Also, we do not want to throttle real-time * tasks in balance_dirty_pages(). Period. */ - if (get_cpu_var(ratelimits)++ >= ratelimit) { - __get_cpu_var(ratelimits) = 0; - put_cpu_var(ratelimits); + preempt_disable(); + p = &__get_cpu_var(ratelimits); + *p += nr_pages_dirtied; + if (unlikely(*p >= ratelimit)) { + *p = 0; + preempt_enable(); balance_dirty_pages(mapping); return; } - put_cpu_var(ratelimits); + preempt_enable(); } -EXPORT_SYMBOL(balance_dirty_pages_ratelimited); +EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr); void throttle_vm_writeout(void) { |