aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2009-02-09 16:22:03 -0500
committerChris Mason <chris.mason@oracle.com>2009-02-09 16:22:03 -0500
commit284b066af41579f62649048fdec5c5e7091703e6 (patch)
treee34185c911cb50b0ade04f804056ffbe2a6e04ae /fs
parent42f15d77df8a7e8a2feb15041d5d30710ee7f951 (diff)
downloadkernel_samsung_aries-284b066af41579f62649048fdec5c5e7091703e6.zip
kernel_samsung_aries-284b066af41579f62649048fdec5c5e7091703e6.tar.gz
kernel_samsung_aries-284b066af41579f62649048fdec5c5e7091703e6.tar.bz2
Btrfs: don't use spin_is_contended
Btrfs was using spin_is_contended to see if it should drop locks before doing extent allocations during btrfs_search_slot. The idea was to avoid expensive searches in the tree unless the lock was actually contended. But, spin_is_contended is specific to the ticket spinlocks on x86, so this is causing compile errors everywhere else. In practice, the contention could easily appear some time after we started doing the extent allocation, and it makes more sense to always drop the lock instead. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/ctree.c3
-rw-r--r--fs/btrfs/locking.c22
-rw-r--r--fs/btrfs/locking.h2
3 files changed, 1 insertions, 26 deletions
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 551177c..35443cc 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1530,8 +1530,7 @@ again:
* for higher level blocks, try not to allocate blocks
* with the block and the parent locks held.
*/
- if (level > 0 && !prealloc_block.objectid &&
- btrfs_path_lock_waiting(p, level)) {
+ if (level > 0 && !prealloc_block.objectid) {
u32 size = b->len;
u64 hint = b->start;
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 68fd9cc..9ebe938 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -236,25 +236,3 @@ int btrfs_tree_locked(struct extent_buffer *eb)
return test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags) ||
spin_is_locked(&eb->lock);
}
-
-/*
- * btrfs_search_slot uses this to decide if it should drop its locks
- * before doing something expensive like allocating free blocks for cow.
- */
-int btrfs_path_lock_waiting(struct btrfs_path *path, int level)
-{
- int i;
- struct extent_buffer *eb;
-
- for (i = level; i <= level + 1 && i < BTRFS_MAX_LEVEL; i++) {
- eb = path->nodes[i];
- if (!eb)
- break;
- smp_mb();
- if (spin_is_contended(&eb->lock) ||
- waitqueue_active(&eb->lock_wq))
- return 1;
- }
- return 0;
-}
-
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
index d92e707..6bb0afb 100644
--- a/fs/btrfs/locking.h
+++ b/fs/btrfs/locking.h
@@ -26,8 +26,6 @@ int btrfs_tree_locked(struct extent_buffer *eb);
int btrfs_try_tree_lock(struct extent_buffer *eb);
int btrfs_try_spin_lock(struct extent_buffer *eb);
-int btrfs_path_lock_waiting(struct btrfs_path *path, int level);
-
void btrfs_set_lock_blocking(struct extent_buffer *eb);
void btrfs_clear_lock_blocking(struct extent_buffer *eb);
#endif