aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/ctree.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-06-25 16:01:30 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:04:03 -0400
commit5cd57b2cbbb06a350df2698314e4e6a80805fc2f (patch)
treecd20c904dd016ab031af582dadfbd6e04bf4df9e /fs/btrfs/ctree.c
parent168fd7d271d9d8e81ff0b03eb08c36d82670c8a9 (diff)
downloadkernel_samsung_espresso10-5cd57b2cbbb06a350df2698314e4e6a80805fc2f.zip
kernel_samsung_espresso10-5cd57b2cbbb06a350df2698314e4e6a80805fc2f.tar.gz
kernel_samsung_espresso10-5cd57b2cbbb06a350df2698314e4e6a80805fc2f.tar.bz2
Btrfs: Add a skip_locking parameter to struct path, and make various funcs honor it
Allocations may need to read in block groups from the extent allocation tree, which will require a tree search and take locks on the extent allocation tree. But, those locks might already be held in other places, leading to deadlocks. Since the alloc_mutex serializes everything right now, it is safe to skip the btree locking while caching block groups. A better fix will be to either create a recursive lock or find a way to back off existing locks while caching block groups. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/ctree.c')
-rw-r--r--fs/btrfs/ctree.c32
1 files changed, 18 insertions, 14 deletions
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index b8f7aec..fd68601 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -64,6 +64,7 @@ void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p)
{
int i;
int keep = p->keep_locks;
+ int skip = p->skip_locking;
for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
if (!p->nodes[i])
@@ -76,6 +77,7 @@ void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p)
}
memset(p, 0, sizeof(*p));
p->keep_locks = keep;
+ p->skip_locking = skip;
}
struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
@@ -1262,7 +1264,10 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
if (ins_len < 0)
lowest_unlock = 2;
again:
- b = btrfs_lock_root_node(root);
+ if (p->skip_locking)
+ b = btrfs_root_node(root);
+ else
+ b = btrfs_lock_root_node(root);
while (b) {
level = btrfs_header_level(b);
@@ -1282,7 +1287,8 @@ again:
WARN_ON(1);
level = btrfs_header_level(b);
p->nodes[level] = b;
- p->locks[level] = 1;
+ if (!p->skip_locking)
+ p->locks[level] = 1;
ret = check_block(root, p, level);
if (ret)
return -1;
@@ -1349,7 +1355,8 @@ again:
b = read_node_slot(root, b, slot);
}
}
- btrfs_tree_lock(b);
+ if (!p->skip_locking)
+ btrfs_tree_lock(b);
unlock_up(p, level, lowest_unlock);
} else {
p->slots[level] = slot;
@@ -1392,13 +1399,6 @@ static int fixup_low_keys(struct btrfs_trans_handle *trans,
break;
t = path->nodes[i];
btrfs_set_node_key(t, key, tslot);
- if (!btrfs_tree_locked(path->nodes[i])) {
- int ii;
-printk("fixup without lock on level %d\n", btrfs_header_level(path->nodes[i]));
- for (ii = 0; ii < BTRFS_MAX_LEVEL; ii++) {
-printk("level %d slot %d\n", ii, path->slots[ii]);
- }
- }
btrfs_mark_buffer_dirty(path->nodes[i]);
if (tslot != 0)
break;
@@ -3033,8 +3033,10 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
reada_for_search(root, path, level, slot, 0);
next = read_node_slot(root, c, slot);
- WARN_ON(!btrfs_tree_locked(c));
- btrfs_tree_lock(next);
+ if (!path->skip_locking) {
+ WARN_ON(!btrfs_tree_locked(c));
+ btrfs_tree_lock(next);
+ }
break;
}
path->slots[level] = slot;
@@ -3052,8 +3054,10 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
if (level == 1 && path->locks[1] && path->reada)
reada_for_search(root, path, level, slot, 0);
next = read_node_slot(root, next, 0);
- WARN_ON(!btrfs_tree_locked(path->nodes[level]));
- btrfs_tree_lock(next);
+ if (!path->skip_locking) {
+ WARN_ON(!btrfs_tree_locked(path->nodes[level]));
+ btrfs_tree_lock(next);
+ }
}
done:
unlock_up(path, 0, 1);