diff options
Diffstat (limited to 'fs/ocfs2/xattr.c')
-rw-r--r-- | fs/ocfs2/xattr.c | 2182 |
1 files changed, 1179 insertions, 1003 deletions
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 8fc6fb0..d1b0d38 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -116,10 +116,11 @@ static struct xattr_handler *ocfs2_xattr_handler_map[OCFS2_XATTR_MAX] = { }; struct ocfs2_xattr_info { - int name_index; - const char *name; - const void *value; - size_t value_len; + int xi_name_index; + const char *xi_name; + int xi_name_len; + const void *xi_value; + size_t xi_value_len; }; struct ocfs2_xattr_search { @@ -137,6 +138,115 @@ struct ocfs2_xattr_search { int not_found; }; +/* Operations on struct ocfs2_xa_entry */ +struct ocfs2_xa_loc; +struct ocfs2_xa_loc_operations { + /* + * Journal functions + */ + int (*xlo_journal_access)(handle_t *handle, struct ocfs2_xa_loc *loc, + int type); + void (*xlo_journal_dirty)(handle_t *handle, struct ocfs2_xa_loc *loc); + + /* + * Return a pointer to the appropriate buffer in loc->xl_storage + * at the given offset from loc->xl_header. + */ + void *(*xlo_offset_pointer)(struct ocfs2_xa_loc *loc, int offset); + + /* Can we reuse the existing entry for the new value? */ + int (*xlo_can_reuse)(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_info *xi); + + /* How much space is needed for the new value? */ + int (*xlo_check_space)(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_info *xi); + + /* + * Return the offset of the first name+value pair. This is + * the start of our downward-filling free space. + */ + int (*xlo_get_free_start)(struct ocfs2_xa_loc *loc); + + /* + * Remove the name+value at this location. Do whatever is + * appropriate with the remaining name+value pairs. + */ + void (*xlo_wipe_namevalue)(struct ocfs2_xa_loc *loc); + + /* Fill xl_entry with a new entry */ + void (*xlo_add_entry)(struct ocfs2_xa_loc *loc, u32 name_hash); + + /* Add name+value storage to an entry */ + void (*xlo_add_namevalue)(struct ocfs2_xa_loc *loc, int size); + + /* + * Initialize the value buf's access and bh fields for this entry. + * ocfs2_xa_fill_value_buf() will handle the xv pointer. + */ + void (*xlo_fill_value_buf)(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_value_buf *vb); +}; + +/* + * Describes an xattr entry location. This is a memory structure + * tracking the on-disk structure. + */ +struct ocfs2_xa_loc { + /* This xattr belongs to this inode */ + struct inode *xl_inode; + + /* The ocfs2_xattr_header inside the on-disk storage. Not NULL. */ + struct ocfs2_xattr_header *xl_header; + + /* Bytes from xl_header to the end of the storage */ + int xl_size; + + /* + * The ocfs2_xattr_entry this location describes. If this is + * NULL, this location describes the on-disk structure where it + * would have been. + */ + struct ocfs2_xattr_entry *xl_entry; + + /* + * Internal housekeeping + */ + + /* Buffer(s) containing this entry */ + void *xl_storage; + + /* Operations on the storage backing this location */ + const struct ocfs2_xa_loc_operations *xl_ops; +}; + +/* + * Convenience functions to calculate how much space is needed for a + * given name+value pair + */ +static int namevalue_size(int name_len, uint64_t value_len) +{ + if (value_len > OCFS2_XATTR_INLINE_SIZE) + return OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE; + else + return OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_SIZE(value_len); +} + +static int namevalue_size_xi(struct ocfs2_xattr_info *xi) +{ + return namevalue_size(xi->xi_name_len, xi->xi_value_len); +} + +static int namevalue_size_xe(struct ocfs2_xattr_entry *xe) +{ + u64 value_len = le64_to_cpu(xe->xe_value_size); + + BUG_ON((value_len > OCFS2_XATTR_INLINE_SIZE) && + ocfs2_xattr_is_local(xe)); + return namevalue_size(xe->xe_name_len, value_len); +} + + static int ocfs2_xattr_bucket_get_name_value(struct super_block *sb, struct ocfs2_xattr_header *xh, int index, @@ -212,14 +322,6 @@ static inline u16 ocfs2_blocks_per_xattr_bucket(struct super_block *sb) return OCFS2_XATTR_BUCKET_SIZE / (1 << sb->s_blocksize_bits); } -static inline u16 ocfs2_xattr_max_xe_in_bucket(struct super_block *sb) -{ - u16 len = sb->s_blocksize - - offsetof(struct ocfs2_xattr_header, xh_entries); - - return len / sizeof(struct ocfs2_xattr_entry); -} - #define bucket_blkno(_b) ((_b)->bu_bhs[0]->b_blocknr) #define bucket_block(_b, _n) ((_b)->bu_bhs[(_n)]->b_data) #define bucket_xh(_b) ((struct ocfs2_xattr_header *)bucket_block((_b), 0)) @@ -463,35 +565,22 @@ static u32 ocfs2_xattr_name_hash(struct inode *inode, return hash; } -/* - * ocfs2_xattr_hash_entry() - * - * Compute the hash of an extended attribute. - */ -static void ocfs2_xattr_hash_entry(struct inode *inode, - struct ocfs2_xattr_header *header, - struct ocfs2_xattr_entry *entry) +static int ocfs2_xattr_entry_real_size(int name_len, size_t value_len) { - u32 hash = 0; - char *name = (char *)header + le16_to_cpu(entry->xe_name_offset); - - hash = ocfs2_xattr_name_hash(inode, name, entry->xe_name_len); - entry->xe_name_hash = cpu_to_le32(hash); - - return; + return namevalue_size(name_len, value_len) + + sizeof(struct ocfs2_xattr_entry); } -static int ocfs2_xattr_entry_real_size(int name_len, size_t value_len) +static int ocfs2_xi_entry_usage(struct ocfs2_xattr_info *xi) { - int size = 0; - - if (value_len <= OCFS2_XATTR_INLINE_SIZE) - size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_SIZE(value_len); - else - size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE; - size += sizeof(struct ocfs2_xattr_entry); + return namevalue_size_xi(xi) + + sizeof(struct ocfs2_xattr_entry); +} - return size; +static int ocfs2_xe_entry_usage(struct ocfs2_xattr_entry *xe) +{ + return namevalue_size_xe(xe) + + sizeof(struct ocfs2_xattr_entry); } int ocfs2_calc_security_init(struct inode *dir, @@ -1308,452 +1397,897 @@ out: return ret; } -static int ocfs2_xattr_cleanup(struct inode *inode, - handle_t *handle, - struct ocfs2_xattr_info *xi, - struct ocfs2_xattr_search *xs, - struct ocfs2_xattr_value_buf *vb, - size_t offs) +static int ocfs2_xa_check_space_helper(int needed_space, int free_start, + int num_entries) { - int ret = 0; - size_t name_len = strlen(xi->name); - void *val = xs->base + offs; - size_t size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE; + int free_space; - ret = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh, - OCFS2_JOURNAL_ACCESS_WRITE); - if (ret) { - mlog_errno(ret); - goto out; - } - /* Decrease xattr count */ - le16_add_cpu(&xs->header->xh_count, -1); - /* Remove the xattr entry and tree root which has already be set*/ - memset((void *)xs->here, 0, sizeof(struct ocfs2_xattr_entry)); - memset(val, 0, size); + if (!needed_space) + return 0; - ret = ocfs2_journal_dirty(handle, vb->vb_bh); - if (ret < 0) - mlog_errno(ret); -out: - return ret; + free_space = free_start - + sizeof(struct ocfs2_xattr_header) - + (num_entries * sizeof(struct ocfs2_xattr_entry)) - + OCFS2_XATTR_HEADER_GAP; + if (free_space < 0) + return -EIO; + if (free_space < needed_space) + return -ENOSPC; + + return 0; } -static int ocfs2_xattr_update_entry(struct inode *inode, - handle_t *handle, - struct ocfs2_xattr_info *xi, - struct ocfs2_xattr_search *xs, - struct ocfs2_xattr_value_buf *vb, - size_t offs) +static int ocfs2_xa_journal_access(handle_t *handle, struct ocfs2_xa_loc *loc, + int type) { - int ret; + return loc->xl_ops->xlo_journal_access(handle, loc, type); +} - ret = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh, - OCFS2_JOURNAL_ACCESS_WRITE); - if (ret) { - mlog_errno(ret); - goto out; - } +static void ocfs2_xa_journal_dirty(handle_t *handle, struct ocfs2_xa_loc *loc) +{ + loc->xl_ops->xlo_journal_dirty(handle, loc); +} - xs->here->xe_name_offset = cpu_to_le16(offs); - xs->here->xe_value_size = cpu_to_le64(xi->value_len); - if (xi->value_len <= OCFS2_XATTR_INLINE_SIZE) - ocfs2_xattr_set_local(xs->here, 1); - else - ocfs2_xattr_set_local(xs->here, 0); - ocfs2_xattr_hash_entry(inode, xs->header, xs->here); +/* Give a pointer into the storage for the given offset */ +static void *ocfs2_xa_offset_pointer(struct ocfs2_xa_loc *loc, int offset) +{ + BUG_ON(offset >= loc->xl_size); + return loc->xl_ops->xlo_offset_pointer(loc, offset); +} - ret = ocfs2_journal_dirty(handle, vb->vb_bh); - if (ret < 0) - mlog_errno(ret); -out: - return ret; +/* + * Wipe the name+value pair and allow the storage to reclaim it. This + * must be followed by either removal of the entry or a call to + * ocfs2_xa_add_namevalue(). + */ +static void ocfs2_xa_wipe_namevalue(struct ocfs2_xa_loc *loc) +{ + loc->xl_ops->xlo_wipe_namevalue(loc); } /* - * ocfs2_xattr_set_value_outside() - * - * Set large size value in B tree. + * Find lowest offset to a name+value pair. This is the start of our + * downward-growing free space. */ -static int ocfs2_xattr_set_value_outside(struct inode *inode, - struct ocfs2_xattr_info *xi, - struct ocfs2_xattr_search *xs, - struct ocfs2_xattr_set_ctxt *ctxt, - struct ocfs2_xattr_value_buf *vb, - size_t offs) +static int ocfs2_xa_get_free_start(struct ocfs2_xa_loc *loc) { - size_t name_len = strlen(xi->name); - void *val = xs->base + offs; - struct ocfs2_xattr_value_root *xv = NULL; - size_t size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE; - int ret = 0; + return loc->xl_ops->xlo_get_free_start(loc); +} - memset(val, 0, size); - memcpy(val, xi->name, name_len); - xv = (struct ocfs2_xattr_value_root *) - (val + OCFS2_XATTR_SIZE(name_len)); - xv->xr_clusters = 0; - xv->xr_last_eb_blk = 0; - xv->xr_list.l_tree_depth = 0; - xv->xr_list.l_count = cpu_to_le16(1); - xv->xr_list.l_next_free_rec = 0; - vb->vb_xv = xv; - - ret = ocfs2_xattr_value_truncate(inode, vb, xi->value_len, ctxt); - if (ret < 0) { - mlog_errno(ret); - return ret; +/* Can we reuse loc->xl_entry for xi? */ +static int ocfs2_xa_can_reuse_entry(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_info *xi) +{ + return loc->xl_ops->xlo_can_reuse(loc, xi); +} + +/* How much free space is needed to set the new value */ +static int ocfs2_xa_check_space(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_info *xi) +{ + return loc->xl_ops->xlo_check_space(loc, xi); +} + +static void ocfs2_xa_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash) +{ + loc->xl_ops->xlo_add_entry(loc, name_hash); + loc->xl_entry->xe_name_hash = cpu_to_le32(name_hash); + /* + * We can't leave the new entry's xe_name_offset at zero or + * add_namevalue() will go nuts. We set it to the size of our + * storage so that it can never be less than any other entry. + */ + loc->xl_entry->xe_name_offset = cpu_to_le16(loc->xl_size); +} + +static void ocfs2_xa_add_namevalue(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_info *xi) +{ + int size = namevalue_size_xi(xi); + int nameval_offset; + char *nameval_buf; + + loc->xl_ops->xlo_add_namevalue(loc, size); + loc->xl_entry->xe_value_size = cpu_to_le64(xi->xi_value_len); + loc->xl_entry->xe_name_len = xi->xi_name_len; + ocfs2_xattr_set_type(loc->xl_entry, xi->xi_name_index); + ocfs2_xattr_set_local(loc->xl_entry, + xi->xi_value_len <= OCFS2_XATTR_INLINE_SIZE); + + nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset); + nameval_buf = ocfs2_xa_offset_pointer(loc, nameval_offset); + memset(nameval_buf, 0, size); + memcpy(nameval_buf, xi->xi_name, xi->xi_name_len); +} + +static void ocfs2_xa_fill_value_buf(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_value_buf *vb) +{ + int nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset); + int name_size = OCFS2_XATTR_SIZE(loc->xl_entry->xe_name_len); + + /* Value bufs are for value trees */ + BUG_ON(ocfs2_xattr_is_local(loc->xl_entry)); + BUG_ON(namevalue_size_xe(loc->xl_entry) != + (name_size + OCFS2_XATTR_ROOT_SIZE)); + + loc->xl_ops->xlo_fill_value_buf(loc, vb); + vb->vb_xv = + (struct ocfs2_xattr_value_root *)ocfs2_xa_offset_pointer(loc, + nameval_offset + + name_size); +} + +static int ocfs2_xa_block_journal_access(handle_t *handle, + struct ocfs2_xa_loc *loc, int type) +{ + struct buffer_head *bh = loc->xl_storage; + ocfs2_journal_access_func access; + + if (loc->xl_size == (bh->b_size - + offsetof(struct ocfs2_xattr_block, + xb_attrs.xb_header))) + access = ocfs2_journal_access_xb; + else + access = ocfs2_journal_access_di; + return access(handle, INODE_CACHE(loc->xl_inode), bh, type); +} + +static void ocfs2_xa_block_journal_dirty(handle_t *handle, + struct ocfs2_xa_loc *loc) +{ + struct buffer_head *bh = loc->xl_storage; + + ocfs2_journal_dirty(handle, bh); +} + +static void *ocfs2_xa_block_offset_pointer(struct ocfs2_xa_loc *loc, + int offset) +{ + return (char *)loc->xl_header + offset; +} + +static int ocfs2_xa_block_can_reuse(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_info *xi) +{ + /* + * Block storage is strict. If the sizes aren't exact, we will + * remove the old one and reinsert the new. + */ + return namevalue_size_xe(loc->xl_entry) == + namevalue_size_xi(xi); +} + +static int ocfs2_xa_block_get_free_start(struct ocfs2_xa_loc *loc) +{ + struct ocfs2_xattr_header *xh = loc->xl_header; + int i, count = le16_to_cpu(xh->xh_count); + int offset, free_start = loc->xl_size; + + for (i = 0; i < count; i++) { + offset = le16_to_cpu(xh->xh_entries[i].xe_name_offset); + if (offset < free_start) + free_start = offset; } - ret = ocfs2_xattr_update_entry(inode, ctxt->handle, xi, xs, vb, offs); - if (ret < 0) { - mlog_errno(ret); - return ret; + + return free_start; +} + +static int ocfs2_xa_block_check_space(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_info *xi) +{ + int count = le16_to_cpu(loc->xl_header->xh_count); + int free_start = ocfs2_xa_get_free_start(loc); + int needed_space = ocfs2_xi_entry_usage(xi); + + /* + * Block storage will reclaim the original entry before inserting + * the new value, so we only need the difference. If the new + * entry is smaller than the old one, we don't need anything. + */ + if (loc->xl_entry) { + /* Don't need space if we're reusing! */ + if (ocfs2_xa_can_reuse_entry(loc, xi)) + needed_space = 0; + else + needed_space -= ocfs2_xe_entry_usage(loc->xl_entry); } - ret = __ocfs2_xattr_set_value_outside(inode, ctxt->handle, vb, - xi->value, xi->value_len); - if (ret < 0) - mlog_errno(ret); + if (needed_space < 0) + needed_space = 0; + return ocfs2_xa_check_space_helper(needed_space, free_start, count); +} - return ret; +/* + * Block storage for xattrs keeps the name+value pairs compacted. When + * we remove one, we have to shift any that preceded it towards the end. + */ +static void ocfs2_xa_block_wipe_namevalue(struct ocfs2_xa_loc *loc) +{ + int i, offset; + int namevalue_offset, first_namevalue_offset, namevalue_size; + struct ocfs2_xattr_entry *entry = loc->xl_entry; + struct ocfs2_xattr_header *xh = loc->xl_header; + int count = le16_to_cpu(xh->xh_count); + + namevalue_offset = le16_to_cpu(entry->xe_name_offset); + namevalue_size = namevalue_size_xe(entry); + first_namevalue_offset = ocfs2_xa_get_free_start(loc); + + /* Shift the name+value pairs */ + memmove((char *)xh + first_namevalue_offset + namevalue_size, + (char *)xh + first_namevalue_offset, + namevalue_offset - first_namevalue_offset); + memset((char *)xh + first_namevalue_offset, 0, namevalue_size); + + /* Now tell xh->xh_entries about it */ + for (i = 0; i < count; i++) { + offset = le16_to_cpu(xh->xh_entries[i].xe_name_offset); + if (offset < namevalue_offset) + le16_add_cpu(&xh->xh_entries[i].xe_name_offset, + namevalue_size); + } + + /* + * Note that we don't update xh_free_start or xh_name_value_len + * because they're not used in block-stored xattrs. + */ +} + +static void ocfs2_xa_block_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash) +{ + int count = le16_to_cpu(loc->xl_header->xh_count); + loc->xl_entry = &(loc->xl_header->xh_entries[count]); + le16_add_cpu(&loc->xl_header->xh_count, 1); + memset(loc->xl_entry, 0, sizeof(struct ocfs2_xattr_entry)); +} + +static void ocfs2_xa_block_add_namevalue(struct ocfs2_xa_loc *loc, int size) +{ + int free_start = ocfs2_xa_get_free_start(loc); + + loc->xl_entry->xe_name_offset = cpu_to_le16(free_start - size); +} + +static void ocfs2_xa_block_fill_value_buf(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_value_buf *vb) +{ + struct buffer_head *bh = loc->xl_storage; + + if (loc->xl_size == (bh->b_size - + offsetof(struct ocfs2_xattr_block, + xb_attrs.xb_header))) + vb->vb_access = ocfs2_journal_access_xb; + else + vb->vb_access = ocfs2_journal_access_di; + vb->vb_bh = bh; } /* - * ocfs2_xattr_set_entry_local() - * - * Set, replace or remove extended attribute in local. + * Operations for xattrs stored in blocks. This includes inline inode + * storage and unindexed ocfs2_xattr_blocks. */ -static void ocfs2_xattr_set_entry_local(struct inode *inode, - struct ocfs2_xattr_info *xi, - struct ocfs2_xattr_search *xs, - struct ocfs2_xattr_entry *last, - size_t min_offs) +static const struct ocfs2_xa_loc_operations ocfs2_xa_block_loc_ops = { + .xlo_journal_access = ocfs2_xa_block_journal_access, + .xlo_journal_dirty = ocfs2_xa_block_journal_dirty, + .xlo_offset_pointer = ocfs2_xa_block_offset_pointer, + .xlo_check_space = ocfs2_xa_block_check_space, + .xlo_can_reuse = ocfs2_xa_block_can_reuse, + .xlo_get_free_start = ocfs2_xa_block_get_free_start, + .xlo_wipe_namevalue = ocfs2_xa_block_wipe_namevalue, + .xlo_add_entry = ocfs2_xa_block_add_entry, + .xlo_add_namevalue = ocfs2_xa_block_add_namevalue, + .xlo_fill_value_buf = ocfs2_xa_block_fill_value_buf, +}; + +static int ocfs2_xa_bucket_journal_access(handle_t *handle, + struct ocfs2_xa_loc *loc, int type) { - size_t name_len = strlen(xi->name); - int i; + struct ocfs2_xattr_bucket *bucket = loc->xl_storage; - if (xi->value && xs->not_found) { - /* Insert the new xattr entry. */ - le16_add_cpu(&xs->header->xh_count, 1); - ocfs2_xattr_set_type(last, xi->name_index); - ocfs2_xattr_set_local(last, 1); - last->xe_name_len = name_len; - } else { - void *first_val; - void *val; - size_t offs, size; - - first_val = xs->base + min_offs; - offs = le16_to_cpu(xs->here->xe_name_offset); - val = xs->base + offs; - - if (le64_to_cpu(xs->here->xe_value_size) > - OCFS2_XATTR_INLINE_SIZE) - size = OCFS2_XATTR_SIZE(name_len) + - OCFS2_XATTR_ROOT_SIZE; + return ocfs2_xattr_bucket_journal_access(handle, bucket, type); +} + +static void ocfs2_xa_bucket_journal_dirty(handle_t *handle, + struct ocfs2_xa_loc *loc) +{ + struct ocfs2_xattr_bucket *bucket = loc->xl_storage; + + ocfs2_xattr_bucket_journal_dirty(handle, bucket); +} + +static void *ocfs2_xa_bucket_offset_pointer(struct ocfs2_xa_loc *loc, + int offset) +{ + struct ocfs2_xattr_bucket *bucket = loc->xl_storage; + int block, block_offset; + + /* The header is at the front of the bucket */ + block = offset >> loc->xl_inode->i_sb->s_blocksize_bits; + block_offset = offset % loc->xl_inode->i_sb->s_blocksize; + + return bucket_block(bucket, block) + block_offset; +} + +static int ocfs2_xa_bucket_can_reuse(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_info *xi) +{ + return namevalue_size_xe(loc->xl_entry) >= + namevalue_size_xi(xi); +} + +static int ocfs2_xa_bucket_get_free_start(struct ocfs2_xa_loc *loc) +{ + struct ocfs2_xattr_bucket *bucket = loc->xl_storage; + return le16_to_cpu(bucket_xh(bucket)->xh_free_start); +} + +static int ocfs2_bucket_align_free_start(struct super_block *sb, + int free_start, int size) +{ + /* + * We need to make sure that the name+value pair fits within + * one block. + */ + if (((free_start - size) >> sb->s_blocksize_bits) != + ((free_start - 1) >> sb->s_blocksize_bits)) + free_start -= free_start % sb->s_blocksize; + + return free_start; +} + +static int ocfs2_xa_bucket_check_space(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_info *xi) +{ + int rc; + int count = le16_to_cpu(loc->xl_header->xh_count); + int free_start = ocfs2_xa_get_free_start(loc); + int needed_space = ocfs2_xi_entry_usage(xi); + int size = namevalue_size_xi(xi); + struct super_block *sb = loc->xl_inode->i_sb; + + /* + * Bucket storage does not reclaim name+value pairs it cannot + * reuse. They live as holes until the bucket fills, and then + * the bucket is defragmented. However, the bucket can reclaim + * the ocfs2_xattr_entry. + */ + if (loc->xl_entry) { + /* Don't need space if we're reusing! */ + if (ocfs2_xa_can_reuse_entry(loc, xi)) + needed_space = 0; else - size = OCFS2_XATTR_SIZE(name_len) + - OCFS2_XATTR_SIZE(le64_to_cpu(xs->here->xe_value_size)); - - if (xi->value && size == OCFS2_XATTR_SIZE(name_len) + - OCFS2_XATTR_SIZE(xi->value_len)) { - /* The old and the new value have the - same size. Just replace the value. */ - ocfs2_xattr_set_local(xs->here, 1); - xs->here->xe_value_size = cpu_to_le64(xi->value_len); - /* Clear value bytes. */ - memset(val + OCFS2_XATTR_SIZE(name_len), - 0, - OCFS2_XATTR_SIZE(xi->value_len)); - memcpy(val + OCFS2_XATTR_SIZE(name_len), - xi->value, - xi->value_len); - return; - } - /* Remove the old name+value. */ - memmove(first_val + size, first_val, val - first_val); - memset(first_val, 0, size); - xs->here->xe_name_hash = 0; - xs->here->xe_name_offset = 0; - ocfs2_xattr_set_local(xs->here, 1); - xs->here->xe_value_size = 0; - - min_offs += size; - - /* Adjust all value offsets. */ - last = xs->header->xh_entries; - for (i = 0 ; i < le16_to_cpu(xs->header->xh_count); i++) { - size_t o = le16_to_cpu(last->xe_name_offset); - - if (o < offs) - last->xe_name_offset = cpu_to_le16(o + size); - last += 1; - } + needed_space -= sizeof(struct ocfs2_xattr_entry); + } + BUG_ON(needed_space < 0); - if (!xi->value) { - /* Remove the old entry. */ - last -= 1; - memmove(xs->here, xs->here + 1, - (void *)last - (void *)xs->here); - memset(last, 0, sizeof(struct ocfs2_xattr_entry)); - le16_add_cpu(&xs->header->xh_count, -1); - } + if (free_start < size) { + if (needed_space) + return -ENOSPC; + } else { + /* + * First we check if it would fit in the first place. + * Below, we align the free start to a block. This may + * slide us below the minimum gap. By checking unaligned + * first, we avoid that error. + */ + rc = ocfs2_xa_check_space_helper(needed_space, free_start, + count); + if (rc) + return rc; + free_start = ocfs2_bucket_align_free_start(sb, free_start, + size); } - if (xi->value) { - /* Insert the new name+value. */ - size_t size = OCFS2_XATTR_SIZE(name_len) + - OCFS2_XATTR_SIZE(xi->value_len); - void *val = xs->base + min_offs - size; + return ocfs2_xa_check_space_helper(needed_space, free_start, count); +} + +static void ocfs2_xa_bucket_wipe_namevalue(struct ocfs2_xa_loc *loc) +{ + le16_add_cpu(&loc->xl_header->xh_name_value_len, + -namevalue_size_xe(loc->xl_entry)); +} - xs->here->xe_name_offset = cpu_to_le16(min_offs - size); - memset(val, 0, size); - memcpy(val, xi->name, name_len); - memcpy(val + OCFS2_XATTR_SIZE(name_len), - xi->value, - xi->value_len); - xs->here->xe_value_size = cpu_to_le64(xi->value_len); - ocfs2_xattr_set_local(xs->here, 1); - ocfs2_xattr_hash_entry(inode, xs->header, xs->here); +static void ocfs2_xa_bucket_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash) +{ + struct ocfs2_xattr_header *xh = loc->xl_header; + int count = le16_to_cpu(xh->xh_count); + int low = 0, high = count - 1, tmp; + struct ocfs2_xattr_entry *tmp_xe; + + /* + * We keep buckets sorted by name_hash, so we need to find + * our insert place. + */ + while (low <= high && count) { + tmp = (low + high) / 2; + tmp_xe = &xh->xh_entries[tmp]; + + if (name_hash > le32_to_cpu(tmp_xe->xe_name_hash)) + low = tmp + 1; + else if (name_hash < le32_to_cpu(tmp_xe->xe_name_hash)) + high = tmp - 1; + else { + low = tmp; + break; + } } - return; + if (low != count) + memmove(&xh->xh_entries[low + 1], + &xh->xh_entries[low], + ((count - low) * sizeof(struct ocfs2_xattr_entry))); + + le16_add_cpu(&xh->xh_count, 1); + loc->xl_entry = &xh->xh_entries[low]; + memset(loc->xl_entry, 0, sizeof(struct ocfs2_xattr_entry)); +} + +static void ocfs2_xa_bucket_add_namevalue(struct ocfs2_xa_loc *loc, int size) +{ + int free_start = ocfs2_xa_get_free_start(loc); + struct ocfs2_xattr_header *xh = loc->xl_header; + struct super_block *sb = loc->xl_inode->i_sb; + int nameval_offset; + + free_start = ocfs2_bucket_align_free_start(sb, free_start, size); + nameval_offset = free_start - size; + loc->xl_entry->xe_name_offset = cpu_to_le16(nameval_offset); + xh->xh_free_start = cpu_to_le16(nameval_offset); + le16_add_cpu(&xh->xh_name_value_len, size); + +} + +static void ocfs2_xa_bucket_fill_value_buf(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_value_buf *vb) +{ + struct ocfs2_xattr_bucket *bucket = loc->xl_storage; + struct super_block *sb = loc->xl_inode->i_sb; + int nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset); + int size = namevalue_size_xe(loc->xl_entry); + int block_offset = nameval_offset >> sb->s_blocksize_bits; + + /* Values are not allowed to straddle block boundaries */ + BUG_ON(block_offset != + ((nameval_offset + size - 1) >> sb->s_blocksize_bits)); + /* We expect the bucket to be filled in */ + BUG_ON(!bucket->bu_bhs[block_offset]); + + vb->vb_access = ocfs2_journal_access; + vb->vb_bh = bucket->bu_bhs[block_offset]; +} + +/* Operations for xattrs stored in buckets. */ +static const struct ocfs2_xa_loc_operations ocfs2_xa_bucket_loc_ops = { + .xlo_journal_access = ocfs2_xa_bucket_journal_access, + .xlo_journal_dirty = ocfs2_xa_bucket_journal_dirty, + .xlo_offset_pointer = ocfs2_xa_bucket_offset_pointer, + .xlo_check_space = ocfs2_xa_bucket_check_space, + .xlo_can_reuse = ocfs2_xa_bucket_can_reuse, + .xlo_get_free_start = ocfs2_xa_bucket_get_free_start, + .xlo_wipe_namevalue = ocfs2_xa_bucket_wipe_namevalue, + .xlo_add_entry = ocfs2_xa_bucket_add_entry, + .xlo_add_namevalue = ocfs2_xa_bucket_add_namevalue, + .xlo_fill_value_buf = ocfs2_xa_bucket_fill_value_buf, +}; + +static unsigned int ocfs2_xa_value_clusters(struct ocfs2_xa_loc *loc) +{ + struct ocfs2_xattr_value_buf vb; + + if (ocfs2_xattr_is_local(loc->xl_entry)) + return 0; + + ocfs2_xa_fill_value_buf(loc, &vb); + return le32_to_cpu(vb.vb_xv->xr_clusters); +} + +static int ocfs2_xa_value_truncate(struct ocfs2_xa_loc *loc, u64 bytes, + struct ocfs2_xattr_set_ctxt *ctxt) +{ + int trunc_rc, access_rc; + struct ocfs2_xattr_value_buf vb; + + ocfs2_xa_fill_value_buf(loc, &vb); + trunc_rc = ocfs2_xattr_value_truncate(loc->xl_inode, &vb, bytes, + ctxt); + + /* + * The caller of ocfs2_xa_value_truncate() has already called + * ocfs2_xa_journal_access on the loc. However, The truncate code + * calls ocfs2_extend_trans(). This may commit the previous + * transaction and open a new one. If this is a bucket, truncate + * could leave only vb->vb_bh set up for journaling. Meanwhile, + * the caller is expecting to dirty the entire bucket. So we must + * reset the journal work. We do this even if truncate has failed, + * as it could have failed after committing the extend. + */ + access_rc = ocfs2_xa_journal_access(ctxt->handle, loc, + OCFS2_JOURNAL_ACCESS_WRITE); + + /* Errors in truncate take precedence */ + return trunc_rc ? trunc_rc : access_rc; +} + +static void ocfs2_xa_remove_entry(struct ocfs2_xa_loc *loc) +{ + int index, count; + struct ocfs2_xattr_header *xh = loc->xl_header; + struct ocfs2_xattr_entry *entry = loc->xl_entry; + + ocfs2_xa_wipe_namevalue(loc); + loc->xl_entry = NULL; + + le16_add_cpu(&xh->xh_count, -1); + count = le16_to_cpu(xh->xh_count); + + /* + * Only zero out the entry if there are more remaining. This is + * important for an empty bucket, as it keeps track of the + * bucket's hash value. It doesn't hurt empty block storage. + */ + if (count) { + index = ((char *)entry - (char *)&xh->xh_entries) / + sizeof(struct ocfs2_xattr_entry); + memmove(&xh->xh_entries[index], &xh->xh_entries[index + 1], + (count - index) * sizeof(struct ocfs2_xattr_entry)); + memset(&xh->xh_entries[count], 0, + sizeof(struct ocfs2_xattr_entry)); + } } /* - * ocfs2_xattr_set_entry() + * If we have a problem adjusting the size of an external value during + * ocfs2_xa_prepare_entry() or ocfs2_xa_remove(), we may have an xattr + * in an intermediate state. For example, the value may be partially + * truncated. + * + * If the value tree hasn't changed, the extend/truncate went nowhere. + * We have nothing to do. The caller can treat it as a straight error. * - * Set extended attribute entry into inode or block. + * If the value tree got partially truncated, we now have a corrupted + * extended attribute. We're going to wipe its entry and leak the + * clusters. Better to leak some storage than leave a corrupt entry. * - * If extended attribute value size > OCFS2_XATTR_INLINE_SIZE, - * We first insert tree root(ocfs2_xattr_value_root) with set_entry_local(), - * then set value in B tree with set_value_outside(). + * If the value tree grew, it obviously didn't grow enough for the + * new entry. We're not going to try and reclaim those clusters either. + * If there was already an external value there (orig_clusters != 0), + * the new clusters are attached safely and we can just leave the old + * value in place. If there was no external value there, we remove + * the entry. + * + * This way, the xattr block we store in the journal will be consistent. + * If the size change broke because of the journal, no changes will hit + * disk anyway. */ -static int ocfs2_xattr_set_entry(struct inode *inode, - struct ocfs2_xattr_info *xi, - struct ocfs2_xattr_search *xs, - struct ocfs2_xattr_set_ctxt *ctxt, - int flag) -{ - struct ocfs2_xattr_entry *last; - struct ocfs2_inode_info *oi = OCFS2_I(inode); - struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data; - size_t min_offs = xs->end - xs->base, name_len = strlen(xi->name); - size_t size_l = 0; - handle_t *handle = ctxt->handle; - int free, i, ret; - struct ocfs2_xattr_info xi_l = { - .name_index = xi->name_index, - .name = xi->name, - .value = xi->value, - .value_len = xi->value_len, - }; - struct ocfs2_xattr_value_buf vb = { - .vb_bh = xs->xattr_bh, - .vb_access = ocfs2_journal_access_di, - }; +static void ocfs2_xa_cleanup_value_truncate(struct ocfs2_xa_loc *loc, + const char *what, + unsigned int orig_clusters) +{ + unsigned int new_clusters = ocfs2_xa_value_clusters(loc); + char *nameval_buf = ocfs2_xa_offset_pointer(loc, + le16_to_cpu(loc->xl_entry->xe_name_offset)); + + if (new_clusters < orig_clusters) { + mlog(ML_ERROR, + "Partial truncate while %s xattr %.*s. Leaking " + "%u clusters and removing the entry\n", + what, loc->xl_entry->xe_name_len, nameval_buf, + orig_clusters - new_clusters); + ocfs2_xa_remove_entry(loc); + } else if (!orig_clusters) { + mlog(ML_ERROR, + "Unable to allocate an external value for xattr " + "%.*s safely. Leaking %u clusters and removing the " + "entry\n", + loc->xl_entry->xe_name_len, nameval_buf, + new_clusters - orig_clusters); + ocfs2_xa_remove_entry(loc); + } else if (new_clusters > orig_clusters) + mlog(ML_ERROR, + "Unable to grow xattr %.*s safely. %u new clusters " + "have been added, but the value will not be " + "modified\n", + loc->xl_entry->xe_name_len, nameval_buf, + new_clusters - orig_clusters); +} + +static int ocfs2_xa_remove(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_set_ctxt *ctxt) +{ + int rc = 0; + unsigned int orig_clusters; + + if (!ocfs2_xattr_is_local(loc->xl_entry)) { + orig_clusters = ocfs2_xa_value_clusters(loc); + rc = ocfs2_xa_value_truncate(loc, 0, ctxt); + if (rc) { + mlog_errno(rc); + /* + * Since this is remove, we can return 0 if + * ocfs2_xa_cleanup_value_truncate() is going to + * wipe the entry anyway. So we check the + * cluster count as well. + */ + if (orig_clusters != ocfs2_xa_value_clusters(loc)) + rc = 0; + ocfs2_xa_cleanup_value_truncate(loc, "removing", + orig_clusters); + if (rc) + goto out; + } + } - if (!(flag & OCFS2_INLINE_XATTR_FL)) { - BUG_ON(xs->xattr_bh == xs->inode_bh); - vb.vb_access = ocfs2_journal_access_xb; - } else - BUG_ON(xs->xattr_bh != xs->inode_bh); + ocfs2_xa_remove_entry(loc); - /* Compute min_offs, last and free space. */ - last = xs->header->xh_entries; +out: + return rc; +} - for (i = 0 ; i < le16_to_cpu(xs->header->xh_count); i++) { - size_t offs = le16_to_cpu(last->xe_name_offset); - if (offs < min_offs) - min_offs = offs; - last += 1; - } +static void ocfs2_xa_install_value_root(struct ocfs2_xa_loc *loc) +{ + int name_size = OCFS2_XATTR_SIZE(loc->xl_entry->xe_name_len); + char *nameval_buf; - free = min_offs - ((void *)last - xs->base) - OCFS2_XATTR_HEADER_GAP; - if (free < 0) - return -EIO; + nameval_buf = ocfs2_xa_offset_pointer(loc, + le16_to_cpu(loc->xl_entry->xe_name_offset)); + memcpy(nameval_buf + name_size, &def_xv, OCFS2_XATTR_ROOT_SIZE); +} - if (!xs->not_found) { - size_t size = 0; - if (ocfs2_xattr_is_local(xs->here)) - size = OCFS2_XATTR_SIZE(name_len) + - OCFS2_XATTR_SIZE(le64_to_cpu(xs->here->xe_value_size)); - else - size = OCFS2_XATTR_SIZE(name_len) + - OCFS2_XATTR_ROOT_SIZE; - free += (size + sizeof(struct ocfs2_xattr_entry)); - } - /* Check free space in inode or block */ - if (xi->value && xi->value_len > OCFS2_XATTR_INLINE_SIZE) { - if (free < sizeof(struct ocfs2_xattr_entry) + - OCFS2_XATTR_SIZE(name_len) + - OCFS2_XATTR_ROOT_SIZE) { - ret = -ENOSPC; - goto out; +/* + * Take an existing entry and make it ready for the new value. This + * won't allocate space, but it may free space. It should be ready for + * ocfs2_xa_prepare_entry() to finish the work. + */ +static int ocfs2_xa_reuse_entry(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_info *xi, + struct ocfs2_xattr_set_ctxt *ctxt) +{ + int rc = 0; + int name_size = OCFS2_XATTR_SIZE(xi->xi_name_len); + unsigned int orig_clusters; + char *nameval_buf; + int xe_local = ocfs2_xattr_is_local(loc->xl_entry); + int xi_local = xi->xi_value_len <= OCFS2_XATTR_INLINE_SIZE; + + BUG_ON(OCFS2_XATTR_SIZE(loc->xl_entry->xe_name_len) != + name_size); + + nameval_buf = ocfs2_xa_offset_pointer(loc, + le16_to_cpu(loc->xl_entry->xe_name_offset)); + if (xe_local) { + memset(nameval_buf + name_size, 0, + namevalue_size_xe(loc->xl_entry) - name_size); + if (!xi_local) + ocfs2_xa_install_value_root(loc); + } else { + orig_clusters = ocfs2_xa_value_clusters(loc); + if (xi_local) { + rc = ocfs2_xa_value_truncate(loc, 0, ctxt); + if (rc < 0) + mlog_errno(rc); + else + memset(nameval_buf + name_size, 0, + namevalue_size_xe(loc->xl_entry) - + name_size); + } else if (le64_to_cpu(loc->xl_entry->xe_value_size) > + xi->xi_value_len) { + rc = ocfs2_xa_value_truncate(loc, xi->xi_value_len, + ctxt); + if (rc < 0) + mlog_errno(rc); } - size_l = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE; - xi_l.value = (void *)&def_xv; - xi_l.value_len = OCFS2_XATTR_ROOT_SIZE; - } else if (xi->value) { - if (free < sizeof(struct ocfs2_xattr_entry) + - OCFS2_XATTR_SIZE(name_len) + - OCFS2_XATTR_SIZE(xi->value_len)) { - ret = -ENOSPC; + + if (rc) { + ocfs2_xa_cleanup_value_truncate(loc, "reusing", + orig_clusters); goto out; } } - if (!xs->not_found) { - /* For existing extended attribute */ - size_t size = OCFS2_XATTR_SIZE(name_len) + - OCFS2_XATTR_SIZE(le64_to_cpu(xs->here->xe_value_size)); - size_t offs = le16_to_cpu(xs->here->xe_name_offset); - void *val = xs->base + offs; + loc->xl_entry->xe_value_size = cpu_to_le64(xi->xi_value_len); + ocfs2_xattr_set_local(loc->xl_entry, xi_local); - if (ocfs2_xattr_is_local(xs->here) && size == size_l) { - /* Replace existing local xattr with tree root */ - ret = ocfs2_xattr_set_value_outside(inode, xi, xs, - ctxt, &vb, offs); - if (ret < 0) - mlog_errno(ret); - goto out; - } else if (!ocfs2_xattr_is_local(xs->here)) { - /* For existing xattr which has value outside */ - vb.vb_xv = (struct ocfs2_xattr_value_root *) - (val + OCFS2_XATTR_SIZE(name_len)); +out: + return rc; +} - if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) { - /* - * If new value need set outside also, - * first truncate old value to new value, - * then set new value with set_value_outside(). - */ - ret = ocfs2_xattr_value_truncate(inode, - &vb, - xi->value_len, - ctxt); - if (ret < 0) { - mlog_errno(ret); - goto out; - } +/* + * Prepares loc->xl_entry to receive the new xattr. This includes + * properly setting up the name+value pair region. If loc->xl_entry + * already exists, it will take care of modifying it appropriately. + * + * Note that this modifies the data. You did journal_access already, + * right? + */ +static int ocfs2_xa_prepare_entry(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_info *xi, + u32 name_hash, + struct ocfs2_xattr_set_ctxt *ctxt) +{ + int rc = 0; + unsigned int orig_clusters; + __le64 orig_value_size = 0; - ret = ocfs2_xattr_update_entry(inode, - handle, - xi, - xs, - &vb, - offs); - if (ret < 0) { - mlog_errno(ret); - goto out; - } + rc = ocfs2_xa_check_space(loc, xi); + if (rc) + goto out; - ret = __ocfs2_xattr_set_value_outside(inode, - handle, - &vb, - xi->value, - xi->value_len); - if (ret < 0) - mlog_errno(ret); + if (loc->xl_entry) { + if (ocfs2_xa_can_reuse_entry(loc, xi)) { + orig_value_size = loc->xl_entry->xe_value_size; + rc = ocfs2_xa_reuse_entry(loc, xi, ctxt); + if (rc) + goto out; + goto alloc_value; + } + + if (!ocfs2_xattr_is_local(loc->xl_entry)) { + orig_clusters = ocfs2_xa_value_clusters(loc); + rc = ocfs2_xa_value_truncate(loc, 0, ctxt); + if (rc) { + mlog_errno(rc); + ocfs2_xa_cleanup_value_truncate(loc, + "overwriting", + orig_clusters); goto out; - } else { - /* - * If new value need set in local, - * just trucate old value to zero. - */ - ret = ocfs2_xattr_value_truncate(inode, - &vb, - 0, - ctxt); - if (ret < 0) - mlog_errno(ret); } } + ocfs2_xa_wipe_namevalue(loc); + } else + ocfs2_xa_add_entry(loc, name_hash); + + /* + * If we get here, we have a blank entry. Fill it. We grow our + * name+value pair back from the end. + */ + ocfs2_xa_add_namevalue(loc, xi); + if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) + ocfs2_xa_install_value_root(loc); + +alloc_value: + if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) { + orig_clusters = ocfs2_xa_value_clusters(loc); + rc = ocfs2_xa_value_truncate(loc, xi->xi_value_len, ctxt); + if (rc < 0) { + /* + * If we tried to grow an existing external value, + * ocfs2_xa_cleanuP-value_truncate() is going to + * let it stand. We have to restore its original + * value size. + */ + loc->xl_entry->xe_value_size = orig_value_size; + ocfs2_xa_cleanup_value_truncate(loc, "growing", + orig_clusters); + mlog_errno(rc); + } } - ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), xs->inode_bh, +out: + return rc; +} + +/* + * Store the value portion of the name+value pair. This will skip + * values that are stored externally. Their tree roots were set up + * by ocfs2_xa_prepare_entry(). + */ +static int ocfs2_xa_store_value(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_info *xi, + struct ocfs2_xattr_set_ctxt *ctxt) +{ + int rc = 0; + int nameval_offset = le16_to_cpu(loc->xl_entry->xe_name_offset); + int name_size = OCFS2_XATTR_SIZE(xi->xi_name_len); + char *nameval_buf; + struct ocfs2_xattr_value_buf vb; + + nameval_buf = ocfs2_xa_offset_pointer(loc, nameval_offset); + if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) { + ocfs2_xa_fill_value_buf(loc, &vb); + rc = __ocfs2_xattr_set_value_outside(loc->xl_inode, + ctxt->handle, &vb, + xi->xi_value, + xi->xi_value_len); + } else + memcpy(nameval_buf + name_size, xi->xi_value, xi->xi_value_len); + + return rc; +} + +static int ocfs2_xa_set(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_info *xi, + struct ocfs2_xattr_set_ctxt *ctxt) +{ + int ret; + u32 name_hash = ocfs2_xattr_name_hash(loc->xl_inode, xi->xi_name, + xi->xi_name_len); + + ret = ocfs2_xa_journal_access(ctxt->handle, loc, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } - if (!(flag & OCFS2_INLINE_XATTR_FL)) { - ret = vb.vb_access(handle, INODE_CACHE(inode), vb.vb_bh, - OCFS2_JOURNAL_ACCESS_WRITE); - if (ret) { - mlog_errno(ret); - goto out; - } - } - /* - * Set value in local, include set tree root in local. - * This is the first step for value size >INLINE_SIZE. + * From here on out, everything is going to modify the buffer a + * little. Errors are going to leave the xattr header in a + * sane state. Thus, even with errors we dirty the sucker. */ - ocfs2_xattr_set_entry_local(inode, &xi_l, xs, last, min_offs); - if (!(flag & OCFS2_INLINE_XATTR_FL)) { - ret = ocfs2_journal_dirty(handle, xs->xattr_bh); - if (ret < 0) { - mlog_errno(ret); - goto out; - } + /* Don't worry, we are never called with !xi_value and !xl_entry */ + if (!xi->xi_value) { + ret = ocfs2_xa_remove(loc, ctxt); + goto out_dirty; } - if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) && - (flag & OCFS2_INLINE_XATTR_FL)) { - struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - unsigned int xattrsize = osb->s_xattr_inline_size; - - /* - * Adjust extent record count or inline data size - * to reserve space for extended attribute. - */ - if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) { - struct ocfs2_inline_data *idata = &di->id2.i_data; - le16_add_cpu(&idata->id_count, -xattrsize); - } else if (!(ocfs2_inode_is_fast_symlink(inode))) { - struct ocfs2_extent_list *el = &di->id2.i_list; - le16_add_cpu(&el->l_count, -(xattrsize / - sizeof(struct ocfs2_extent_rec))); - } - di->i_xattr_inline_size = cpu_to_le16(xattrsize); + ret = ocfs2_xa_prepare_entry(loc, xi, name_hash, ctxt); + if (ret) { + if (ret != -ENOSPC) + mlog_errno(ret); + goto out_dirty; } - /* Update xattr flag */ - spin_lock(&oi->ip_lock); - oi->ip_dyn_features |= flag; - di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); - spin_unlock(&oi->ip_lock); - ret = ocfs2_journal_dirty(handle, xs->inode_bh); - if (ret < 0) + ret = ocfs2_xa_store_value(loc, xi, ctxt); + if (ret) mlog_errno(ret); - if (!ret && xi->value_len > OCFS2_XATTR_INLINE_SIZE) { - /* - * Set value outside in B tree. - * This is the second step for value size > INLINE_SIZE. - */ - size_t offs = le16_to_cpu(xs->here->xe_name_offset); - ret = ocfs2_xattr_set_value_outside(inode, xi, xs, ctxt, - &vb, offs); - if (ret < 0) { - int ret2; +out_dirty: + ocfs2_xa_journal_dirty(ctxt->handle, loc); - mlog_errno(ret); - /* - * If set value outside failed, we have to clean - * the junk tree root we have already set in local. - */ - ret2 = ocfs2_xattr_cleanup(inode, ctxt->handle, - xi, xs, &vb, offs); - if (ret2 < 0) - mlog_errno(ret2); - } - } out: return ret; } +static void ocfs2_init_dinode_xa_loc(struct ocfs2_xa_loc *loc, + struct inode *inode, + struct buffer_head *bh, + struct ocfs2_xattr_entry *entry) +{ + struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data; + + BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_XATTR_FL)); + + loc->xl_inode = inode; + loc->xl_ops = &ocfs2_xa_block_loc_ops; + loc->xl_storage = bh; + loc->xl_entry = entry; + loc->xl_size = le16_to_cpu(di->i_xattr_inline_size); + loc->xl_header = + (struct ocfs2_xattr_header *)(bh->b_data + bh->b_size - + loc->xl_size); +} + +static void ocfs2_init_xattr_block_xa_loc(struct ocfs2_xa_loc *loc, + struct inode *inode, + struct buffer_head *bh, + struct ocfs2_xattr_entry *entry) +{ + struct ocfs2_xattr_block *xb = + (struct ocfs2_xattr_block *)bh->b_data; + + BUG_ON(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED); + + loc->xl_inode = inode; + loc->xl_ops = &ocfs2_xa_block_loc_ops; + loc->xl_storage = bh; + loc->xl_header = &(xb->xb_attrs.xb_header); + loc->xl_entry = entry; + loc->xl_size = bh->b_size - offsetof(struct ocfs2_xattr_block, + xb_attrs.xb_header); +} + +static void ocfs2_init_xattr_bucket_xa_loc(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_bucket *bucket, + struct ocfs2_xattr_entry *entry) +{ + loc->xl_inode = bucket->bu_inode; + loc->xl_ops = &ocfs2_xa_bucket_loc_ops; + loc->xl_storage = bucket; + loc->xl_header = bucket_xh(bucket); + loc->xl_entry = entry; + loc->xl_size = OCFS2_XATTR_BUCKET_SIZE; +} + /* * In xattr remove, if it is stored outside and refcounted, we may have * the chance to split the refcount tree. So need the allocators. @@ -2149,6 +2683,55 @@ static int ocfs2_xattr_ibody_find(struct inode *inode, return 0; } +static int ocfs2_xattr_ibody_init(struct inode *inode, + struct buffer_head *di_bh, + struct ocfs2_xattr_set_ctxt *ctxt) +{ + int ret; + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + unsigned int xattrsize = osb->s_xattr_inline_size; + + if (!ocfs2_xattr_has_space_inline(inode, di)) { + ret = -ENOSPC; + goto out; + } + + ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode), di_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + + /* + * Adjust extent record count or inline data size + * to reserve space for extended attribute. + */ + if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) { + struct ocfs2_inline_data *idata = &di->id2.i_data; + le16_add_cpu(&idata->id_count, -xattrsize); + } else if (!(ocfs2_inode_is_fast_symlink(inode))) { + struct ocfs2_extent_list *el = &di->id2.i_list; + le16_add_cpu(&el->l_count, -(xattrsize / + sizeof(struct ocfs2_extent_rec))); + } + di->i_xattr_inline_size = cpu_to_le16(xattrsize); + + spin_lock(&oi->ip_lock); + oi->ip_dyn_features |= OCFS2_INLINE_XATTR_FL|OCFS2_HAS_XATTR_FL; + di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); + spin_unlock(&oi->ip_lock); + + ret = ocfs2_journal_dirty(ctxt->handle, di_bh); + if (ret < 0) + mlog_errno(ret); + +out: + return ret; +} + /* * ocfs2_xattr_ibody_set() * @@ -2160,9 +2743,10 @@ static int ocfs2_xattr_ibody_set(struct inode *inode, struct ocfs2_xattr_search *xs, struct ocfs2_xattr_set_ctxt *ctxt) { + int ret; struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data; - int ret; + struct ocfs2_xa_loc loc; if (inode->i_sb->s_blocksize == OCFS2_MIN_BLOCKSIZE) return -ENOSPC; @@ -2175,8 +2759,25 @@ static int ocfs2_xattr_ibody_set(struct inode *inode, } } - ret = ocfs2_xattr_set_entry(inode, xi, xs, ctxt, - (OCFS2_INLINE_XATTR_FL | OCFS2_HAS_XATTR_FL)); + if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL)) { + ret = ocfs2_xattr_ibody_init(inode, xs->inode_bh, ctxt); + if (ret) { + if (ret != -ENOSPC) + mlog_errno(ret); + goto out; + } + } + + ocfs2_init_dinode_xa_loc(&loc, inode, xs->inode_bh, + xs->not_found ? NULL : xs->here); + ret = ocfs2_xa_set(&loc, xi, ctxt); + if (ret) { + if (ret != -ENOSPC) + mlog_errno(ret); + goto out; + } + xs->here = loc.xl_entry; + out: up_write(&oi->ip_alloc_sem); @@ -2236,12 +2837,11 @@ cleanup: return ret; } -static int ocfs2_create_xattr_block(handle_t *handle, - struct inode *inode, +static int ocfs2_create_xattr_block(struct inode *inode, struct buffer_head *inode_bh, - struct ocfs2_alloc_context *meta_ac, - struct buffer_head **ret_bh, - int indexed) + struct ocfs2_xattr_set_ctxt *ctxt, + int indexed, + struct buffer_head **ret_bh) { int ret; u16 suballoc_bit_start; @@ -2252,14 +2852,14 @@ static int ocfs2_create_xattr_block(handle_t *handle, struct buffer_head *new_bh = NULL; struct ocfs2_xattr_block *xblk; - ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), inode_bh, - OCFS2_JOURNAL_ACCESS_CREATE); + ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode), + inode_bh, OCFS2_JOURNAL_ACCESS_CREATE); if (ret < 0) { mlog_errno(ret); goto end; } - ret = ocfs2_claim_metadata(osb, handle, meta_ac, 1, + ret = ocfs2_claim_metadata(osb, ctxt->handle, ctxt->meta_ac, 1, &suballoc_bit_start, &num_got, &first_blkno); if (ret < 0) { @@ -2270,7 +2870,7 @@ static int ocfs2_create_xattr_block(handle_t *handle, new_bh = sb_getblk(inode->i_sb, first_blkno); ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), new_bh); - ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode), + ret = ocfs2_journal_access_xb(ctxt->handle, INODE_CACHE(inode), new_bh, OCFS2_JOURNAL_ACCESS_CREATE); if (ret < 0) { @@ -2282,11 +2882,10 @@ static int ocfs2_create_xattr_block(handle_t *handle, xblk = (struct ocfs2_xattr_block *)new_bh->b_data; memset(xblk, 0, inode->i_sb->s_blocksize); strcpy((void *)xblk, OCFS2_XATTR_BLOCK_SIGNATURE); - xblk->xb_suballoc_slot = cpu_to_le16(osb->slot_num); + xblk->xb_suballoc_slot = cpu_to_le16(ctxt->meta_ac->ac_alloc_slot); xblk->xb_suballoc_bit = cpu_to_le16(suballoc_bit_start); xblk->xb_fs_generation = cpu_to_le32(osb->fs_generation); xblk->xb_blkno = cpu_to_le64(first_blkno); - if (indexed) { struct ocfs2_xattr_tree_root *xr = &xblk->xb_attrs.xb_root; xr->xt_clusters = cpu_to_le32(1); @@ -2297,14 +2896,17 @@ static int ocfs2_create_xattr_block(handle_t *handle, xr->xt_list.l_next_free_rec = cpu_to_le16(1); xblk->xb_flags = cpu_to_le16(OCFS2_XATTR_INDEXED); } + ocfs2_journal_dirty(ctxt->handle, new_bh); - ret = ocfs2_journal_dirty(handle, new_bh); - if (ret < 0) { - mlog_errno(ret); - goto end; - } + /* Add it to the inode */ di->i_xattr_loc = cpu_to_le64(first_blkno); - ocfs2_journal_dirty(handle, inode_bh); + + spin_lock(&OCFS2_I(inode)->ip_lock); + OCFS2_I(inode)->ip_dyn_features |= OCFS2_HAS_XATTR_FL; + di->i_dyn_features = cpu_to_le16(OCFS2_I(inode)->ip_dyn_features); + spin_unlock(&OCFS2_I(inode)->ip_lock); + + ocfs2_journal_dirty(ctxt->handle, inode_bh); *ret_bh = new_bh; new_bh = NULL; @@ -2326,13 +2928,13 @@ static int ocfs2_xattr_block_set(struct inode *inode, struct ocfs2_xattr_set_ctxt *ctxt) { struct buffer_head *new_bh = NULL; - handle_t *handle = ctxt->handle; struct ocfs2_xattr_block *xblk = NULL; int ret; + struct ocfs2_xa_loc loc; if (!xs->xattr_bh) { - ret = ocfs2_create_xattr_block(handle, inode, xs->inode_bh, - ctxt->meta_ac, &new_bh, 0); + ret = ocfs2_create_xattr_block(inode, xs->inode_bh, ctxt, + 0, &new_bh); if (ret) { mlog_errno(ret); goto end; @@ -2348,21 +2950,25 @@ static int ocfs2_xattr_block_set(struct inode *inode, xblk = (struct ocfs2_xattr_block *)xs->xattr_bh->b_data; if (!(le16_to_cpu(xblk->xb_flags) & OCFS2_XATTR_INDEXED)) { - /* Set extended attribute into external block */ - ret = ocfs2_xattr_set_entry(inode, xi, xs, ctxt, - OCFS2_HAS_XATTR_FL); - if (!ret || ret != -ENOSPC) - goto end; + ocfs2_init_xattr_block_xa_loc(&loc, inode, xs->xattr_bh, + xs->not_found ? NULL : xs->here); - ret = ocfs2_xattr_create_index_block(inode, xs, ctxt); - if (ret) + ret = ocfs2_xa_set(&loc, xi, ctxt); + if (!ret) + xs->here = loc.xl_entry; + else if (ret != -ENOSPC) goto end; + else { + ret = ocfs2_xattr_create_index_block(inode, xs, ctxt); + if (ret) + goto end; + } } - ret = ocfs2_xattr_set_entry_index_block(inode, xi, xs, ctxt); + if (le16_to_cpu(xblk->xb_flags) & OCFS2_XATTR_INDEXED) + ret = ocfs2_xattr_set_entry_index_block(inode, xi, xs, ctxt); end: - return ret; } @@ -2371,7 +2977,6 @@ static int ocfs2_xattr_can_be_in_inode(struct inode *inode, struct ocfs2_xattr_info *xi, struct ocfs2_xattr_search *xs) { - u64 value_size; struct ocfs2_xattr_entry *last; int free, i; size_t min_offs = xs->end - xs->base; @@ -2394,13 +2999,7 @@ static int ocfs2_xattr_can_be_in_inode(struct inode *inode, BUG_ON(!xs->not_found); - if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) - value_size = OCFS2_XATTR_ROOT_SIZE; - else - value_size = OCFS2_XATTR_SIZE(xi->value_len); - - if (free >= sizeof(struct ocfs2_xattr_entry) + - OCFS2_XATTR_SIZE(strlen(xi->name)) + value_size) + if (free >= (sizeof(struct ocfs2_xattr_entry) + namevalue_size_xi(xi))) return 1; return 0; @@ -2424,7 +3023,7 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode, char *base = NULL; int name_offset, name_len = 0; u32 new_clusters = ocfs2_clusters_for_bytes(inode->i_sb, - xi->value_len); + xi->xi_value_len); u64 value_size; /* @@ -2432,14 +3031,14 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode, * No matter whether we replace an old one or add a new one, * we need this for writing. */ - if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) + if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) credits += new_clusters * ocfs2_clusters_to_blocks(inode->i_sb, 1); if (xis->not_found && xbs->not_found) { credits += ocfs2_blocks_per_xattr_bucket(inode->i_sb); - if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) { + if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) { clusters_add += new_clusters; credits += ocfs2_calc_extend_credits(inode->i_sb, &def_xv.xv.xr_list, @@ -2484,7 +3083,7 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode, * The credits for removing the value tree will be extended * by ocfs2_remove_extent itself. */ - if (!xi->value) { + if (!xi->xi_value) { if (!ocfs2_xattr_is_local(xe)) credits += ocfs2_remove_extent_credits(inode->i_sb); @@ -2514,7 +3113,7 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode, } } - if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) { + if (xi->xi_value_len > OCFS2_XATTR_INLINE_SIZE) { /* the new values will be stored outside. */ u32 old_clusters = 0; @@ -2547,9 +3146,10 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode, * value, we don't need any allocation, otherwise we have * to guess metadata allocation. */ - if ((ocfs2_xattr_is_local(xe) && value_size >= xi->value_len) || + if ((ocfs2_xattr_is_local(xe) && + (value_size >= xi->xi_value_len)) || (!ocfs2_xattr_is_local(xe) && - OCFS2_XATTR_ROOT_SIZE >= xi->value_len)) + OCFS2_XATTR_ROOT_SIZE >= xi->xi_value_len)) goto out; } @@ -2639,7 +3239,7 @@ static int ocfs2_init_xattr_set_ctxt(struct inode *inode, meta_add += extra_meta; mlog(0, "Set xattr %s, reserve meta blocks = %d, clusters = %d, " - "credits = %d\n", xi->name, meta_add, clusters_add, *credits); + "credits = %d\n", xi->xi_name, meta_add, clusters_add, *credits); if (meta_add) { ret = ocfs2_reserve_new_metadata_blocks(osb, meta_add, @@ -2679,7 +3279,7 @@ static int __ocfs2_xattr_set_handle(struct inode *inode, { int ret = 0, credits, old_found; - if (!xi->value) { + if (!xi->xi_value) { /* Remove existing extended attribute */ if (!xis->not_found) ret = ocfs2_xattr_ibody_set(inode, xi, xis, ctxt); @@ -2693,8 +3293,8 @@ static int __ocfs2_xattr_set_handle(struct inode *inode, * If succeed and that extended attribute existing in * external block, then we will remove it. */ - xi->value = NULL; - xi->value_len = 0; + xi->xi_value = NULL; + xi->xi_value_len = 0; old_found = xis->not_found; xis->not_found = -ENODATA; @@ -2722,8 +3322,8 @@ static int __ocfs2_xattr_set_handle(struct inode *inode, } else if (ret == -ENOSPC) { if (di->i_xattr_loc && !xbs->xattr_bh) { ret = ocfs2_xattr_block_find(inode, - xi->name_index, - xi->name, xbs); + xi->xi_name_index, + xi->xi_name, xbs); if (ret) goto out; @@ -2762,8 +3362,8 @@ static int __ocfs2_xattr_set_handle(struct inode *inode, * If succeed and that extended attribute * existing in inode, we will remove it. */ - xi->value = NULL; - xi->value_len = 0; + xi->xi_value = NULL; + xi->xi_value_len = 0; xbs->not_found = -ENODATA; ret = ocfs2_calc_xattr_set_need(inode, di, @@ -2829,10 +3429,11 @@ int ocfs2_xattr_set_handle(handle_t *handle, int ret; struct ocfs2_xattr_info xi = { - .name_index = name_index, - .name = name, - .value = value, - .value_len = value_len, + .xi_name_index = name_index, + .xi_name = name, + .xi_name_len = strlen(name), + .xi_value = value, + .xi_value_len = value_len, }; struct ocfs2_xattr_search xis = { @@ -2912,10 +3513,11 @@ int ocfs2_xattr_set(struct inode *inode, struct ocfs2_refcount_tree *ref_tree = NULL; struct ocfs2_xattr_info xi = { - .name_index = name_index, - .name = name, - .value = value, - .value_len = value_len, + .xi_name_index = name_index, + .xi_name = name, + .xi_name_len = strlen(name), + .xi_value = value, + .xi_value_len = value_len, }; struct ocfs2_xattr_search xis = { @@ -3759,7 +4361,7 @@ static int ocfs2_defrag_xattr_bucket(struct inode *inode, struct ocfs2_xattr_bucket *bucket) { int ret, i; - size_t end, offset, len, value_len; + size_t end, offset, len; struct ocfs2_xattr_header *xh; char *entries, *buf, *bucket_buf = NULL; u64 blkno = bucket_blkno(bucket); @@ -3813,12 +4415,7 @@ static int ocfs2_defrag_xattr_bucket(struct inode *inode, end = OCFS2_XATTR_BUCKET_SIZE; for (i = 0; i < le16_to_cpu(xh->xh_count); i++, xe++) { offset = le16_to_cpu(xe->xe_name_offset); - if (ocfs2_xattr_is_local(xe)) - value_len = OCFS2_XATTR_SIZE( - le64_to_cpu(xe->xe_value_size)); - else - value_len = OCFS2_XATTR_ROOT_SIZE; - len = OCFS2_XATTR_SIZE(xe->xe_name_len) + value_len; + len = namevalue_size_xe(xe); /* * We must make sure that the name/value pair @@ -4007,7 +4604,7 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode, int new_bucket_head) { int ret, i; - int count, start, len, name_value_len = 0, xe_len, name_offset = 0; + int count, start, len, name_value_len = 0, name_offset = 0; struct ocfs2_xattr_bucket *s_bucket = NULL, *t_bucket = NULL; struct ocfs2_xattr_header *xh; struct ocfs2_xattr_entry *xe; @@ -4098,13 +4695,7 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode, name_value_len = 0; for (i = 0; i < start; i++) { xe = &xh->xh_entries[i]; - xe_len = OCFS2_XATTR_SIZE(xe->xe_name_len); - if (ocfs2_xattr_is_local(xe)) - xe_len += - OCFS2_XATTR_SIZE(le64_to_cpu(xe->xe_value_size)); - else - xe_len += OCFS2_XATTR_ROOT_SIZE; - name_value_len += xe_len; + name_value_len += namevalue_size_xe(xe); if (le16_to_cpu(xe->xe_name_offset) < name_offset) name_offset = le16_to_cpu(xe->xe_name_offset); } @@ -4134,12 +4725,6 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode, xh->xh_free_start = cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE); for (i = 0; i < le16_to_cpu(xh->xh_count); i++) { xe = &xh->xh_entries[i]; - xe_len = OCFS2_XATTR_SIZE(xe->xe_name_len); - if (ocfs2_xattr_is_local(xe)) - xe_len += - OCFS2_XATTR_SIZE(le64_to_cpu(xe->xe_value_size)); - else - xe_len += OCFS2_XATTR_ROOT_SIZE; if (le16_to_cpu(xe->xe_name_offset) < le16_to_cpu(xh->xh_free_start)) xh->xh_free_start = xe->xe_name_offset; @@ -4751,195 +5336,6 @@ static inline char *ocfs2_xattr_bucket_get_val(struct inode *inode, } /* - * Handle the normal xattr set, including replace, delete and new. - * - * Note: "local" indicates the real data's locality. So we can't - * just its bucket locality by its length. - */ -static void ocfs2_xattr_set_entry_normal(struct inode *inode, - struct ocfs2_xattr_info *xi, - struct ocfs2_xattr_search *xs, - u32 name_hash, - int local) -{ - struct ocfs2_xattr_entry *last, *xe; - int name_len = strlen(xi->name); - struct ocfs2_xattr_header *xh = xs->header; - u16 count = le16_to_cpu(xh->xh_count), start; - size_t blocksize = inode->i_sb->s_blocksize; - char *val; - size_t offs, size, new_size; - - last = &xh->xh_entries[count]; - if (!xs->not_found) { - xe = xs->here; - offs = le16_to_cpu(xe->xe_name_offset); - if (ocfs2_xattr_is_local(xe)) - size = OCFS2_XATTR_SIZE(name_len) + - OCFS2_XATTR_SIZE(le64_to_cpu(xe->xe_value_size)); - else - size = OCFS2_XATTR_SIZE(name_len) + - OCFS2_XATTR_SIZE(OCFS2_XATTR_ROOT_SIZE); - - /* - * If the new value will be stored outside, xi->value has been - * initalized as an empty ocfs2_xattr_value_root, and the same - * goes with xi->value_len, so we can set new_size safely here. - * See ocfs2_xattr_set_in_bucket. - */ - new_size = OCFS2_XATTR_SIZE(name_len) + - OCFS2_XATTR_SIZE(xi->value_len); - - le16_add_cpu(&xh->xh_name_value_len, -size); - if (xi->value) { - if (new_size > size) - goto set_new_name_value; - - /* Now replace the old value with new one. */ - if (local) - xe->xe_value_size = cpu_to_le64(xi->value_len); - else - xe->xe_value_size = 0; - - val = ocfs2_xattr_bucket_get_val(inode, - xs->bucket, offs); - memset(val + OCFS2_XATTR_SIZE(name_len), 0, - size - OCFS2_XATTR_SIZE(name_len)); - if (OCFS2_XATTR_SIZE(xi->value_len) > 0) - memcpy(val + OCFS2_XATTR_SIZE(name_len), - xi->value, xi->value_len); - - le16_add_cpu(&xh->xh_name_value_len, new_size); - ocfs2_xattr_set_local(xe, local); - return; - } else { - /* - * Remove the old entry if there is more than one. - * We don't remove the last entry so that we can - * use it to indicate the hash value of the empty - * bucket. - */ - last -= 1; - le16_add_cpu(&xh->xh_count, -1); - if (xh->xh_count) { - memmove(xe, xe + 1, - (void *)last - (void *)xe); - memset(last, 0, - sizeof(struct ocfs2_xattr_entry)); - } else - xh->xh_free_start = - cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE); - - return; - } - } else { - /* find a new entry for insert. */ - int low = 0, high = count - 1, tmp; - struct ocfs2_xattr_entry *tmp_xe; - - while (low <= high && count) { - tmp = (low + high) / 2; - tmp_xe = &xh->xh_entries[tmp]; - - if (name_hash > le32_to_cpu(tmp_xe->xe_name_hash)) - low = tmp + 1; - else if (name_hash < - le32_to_cpu(tmp_xe->xe_name_hash)) - high = tmp - 1; - else { - low = tmp; - break; - } - } - - xe = &xh->xh_entries[low]; - if (low != count) - memmove(xe + 1, xe, (void *)last - (void *)xe); - - le16_add_cpu(&xh->xh_count, 1); - memset(xe, 0, sizeof(struct ocfs2_xattr_entry)); - xe->xe_name_hash = cpu_to_le32(name_hash); - xe->xe_name_len = name_len; - ocfs2_xattr_set_type(xe, xi->name_index); - } - -set_new_name_value: - /* Insert the new name+value. */ - size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_SIZE(xi->value_len); - - /* - * We must make sure that the name/value pair - * exists in the same block. - */ - offs = le16_to_cpu(xh->xh_free_start); - start = offs - size; - - if (start >> inode->i_sb->s_blocksize_bits != - (offs - 1) >> inode->i_sb->s_blocksize_bits) { - offs = offs - offs % blocksize; - xh->xh_free_start = cpu_to_le16(offs); - } - - val = ocfs2_xattr_bucket_get_val(inode, xs->bucket, offs - size); - xe->xe_name_offset = cpu_to_le16(offs - size); - - memset(val, 0, size); - memcpy(val, xi->name, name_len); - memcpy(val + OCFS2_XATTR_SIZE(name_len), xi->value, xi->value_len); - - xe->xe_value_size = cpu_to_le64(xi->value_len); - ocfs2_xattr_set_local(xe, local); - xs->here = xe; - le16_add_cpu(&xh->xh_free_start, -size); - le16_add_cpu(&xh->xh_name_value_len, size); - - return; -} - -/* - * Set the xattr entry in the specified bucket. - * The bucket is indicated by xs->bucket and it should have the enough - * space for the xattr insertion. - */ -static int ocfs2_xattr_set_entry_in_bucket(struct inode *inode, - handle_t *handle, - struct ocfs2_xattr_info *xi, - struct ocfs2_xattr_search *xs, - u32 name_hash, - int local) -{ - int ret; - u64 blkno; - - mlog(0, "Set xattr entry len = %lu index = %d in bucket %llu\n", - (unsigned long)xi->value_len, xi->name_index, - (unsigned long long)bucket_blkno(xs->bucket)); - - if (!xs->bucket->bu_bhs[1]) { - blkno = bucket_blkno(xs->bucket); - ocfs2_xattr_bucket_relse(xs->bucket); - ret = ocfs2_read_xattr_bucket(xs->bucket, blkno); - if (ret) { - mlog_errno(ret); - goto out; - } - } - - ret = ocfs2_xattr_bucket_journal_access(handle, xs->bucket, - OCFS2_JOURNAL_ACCESS_WRITE); - if (ret < 0) { - mlog_errno(ret); - goto out; - } - - ocfs2_xattr_set_entry_normal(inode, xi, xs, name_hash, local); - ocfs2_xattr_bucket_journal_dirty(handle, xs->bucket); - -out: - return ret; -} - -/* * Truncate the specified xe_off entry in xattr bucket. * bucket is indicated by header_bh and len is the new length. * Both the ocfs2_xattr_value_root and the entry will be updated here. @@ -5009,66 +5405,6 @@ out: return ret; } -static int ocfs2_xattr_bucket_value_truncate_xs(struct inode *inode, - struct ocfs2_xattr_search *xs, - int len, - struct ocfs2_xattr_set_ctxt *ctxt) -{ - int ret, offset; - struct ocfs2_xattr_entry *xe = xs->here; - struct ocfs2_xattr_header *xh = (struct ocfs2_xattr_header *)xs->base; - - BUG_ON(!xs->bucket->bu_bhs[0] || !xe || ocfs2_xattr_is_local(xe)); - - offset = xe - xh->xh_entries; - ret = ocfs2_xattr_bucket_value_truncate(inode, xs->bucket, - offset, len, ctxt); - if (ret) - mlog_errno(ret); - - return ret; -} - -static int ocfs2_xattr_bucket_set_value_outside(struct inode *inode, - handle_t *handle, - struct ocfs2_xattr_search *xs, - char *val, - int value_len) -{ - int ret, offset, block_off; - struct ocfs2_xattr_value_root *xv; - struct ocfs2_xattr_entry *xe = xs->here; - struct ocfs2_xattr_header *xh = bucket_xh(xs->bucket); - void *base; - struct ocfs2_xattr_value_buf vb = { - .vb_access = ocfs2_journal_access, - }; - - BUG_ON(!xs->base || !xe || ocfs2_xattr_is_local(xe)); - - ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb, xh, - xe - xh->xh_entries, - &block_off, - &offset); - if (ret) { - mlog_errno(ret); - goto out; - } - - base = bucket_block(xs->bucket, block_off); - xv = (struct ocfs2_xattr_value_root *)(base + offset + - OCFS2_XATTR_SIZE(xe->xe_name_len)); - - vb.vb_xv = xv; - vb.vb_bh = xs->bucket->bu_bhs[block_off]; - ret = __ocfs2_xattr_set_value_outside(inode, handle, - &vb, val, value_len); - if (ret) - mlog_errno(ret); -out: - return ret; -} - static int ocfs2_rm_xattr_cluster(struct inode *inode, struct buffer_head *root_bh, u64 blkno, @@ -5167,128 +5503,6 @@ out: return ret; } -static void ocfs2_xattr_bucket_remove_xs(struct inode *inode, - handle_t *handle, - struct ocfs2_xattr_search *xs) -{ - struct ocfs2_xattr_header *xh = bucket_xh(xs->bucket); - struct ocfs2_xattr_entry *last = &xh->xh_entries[ - le16_to_cpu(xh->xh_count) - 1]; - int ret = 0; - - ret = ocfs2_xattr_bucket_journal_access(handle, xs->bucket, - OCFS2_JOURNAL_ACCESS_WRITE); - if (ret) { - mlog_errno(ret); - return; - } - - /* Remove the old entry. */ - memmove(xs->here, xs->here + 1, - (void *)last - (void *)xs->here); - memset(last, 0, sizeof(struct ocfs2_xattr_entry)); - le16_add_cpu(&xh->xh_count, -1); - - ocfs2_xattr_bucket_journal_dirty(handle, xs->bucket); -} - -/* - * Set the xattr name/value in the bucket specified in xs. - * - * As the new value in xi may be stored in the bucket or in an outside cluster, - * we divide the whole process into 3 steps: - * 1. insert name/value in the bucket(ocfs2_xattr_set_entry_in_bucket) - * 2. truncate of the outside cluster(ocfs2_xattr_bucket_value_truncate_xs) - * 3. Set the value to the outside cluster(ocfs2_xattr_bucket_set_value_outside) - * 4. If the clusters for the new outside value can't be allocated, we need - * to free the xattr we allocated in set. - */ -static int ocfs2_xattr_set_in_bucket(struct inode *inode, - struct ocfs2_xattr_info *xi, - struct ocfs2_xattr_search *xs, - struct ocfs2_xattr_set_ctxt *ctxt) -{ - int ret, local = 1; - size_t value_len; - char *val = (char *)xi->value; - struct ocfs2_xattr_entry *xe = xs->here; - u32 name_hash = ocfs2_xattr_name_hash(inode, xi->name, - strlen(xi->name)); - - if (!xs->not_found && !ocfs2_xattr_is_local(xe)) { - /* - * We need to truncate the xattr storage first. - * - * If both the old and new value are stored to - * outside block, we only need to truncate - * the storage and then set the value outside. - * - * If the new value should be stored within block, - * we should free all the outside block first and - * the modification to the xattr block will be done - * by following steps. - */ - if (xi->value_len > OCFS2_XATTR_INLINE_SIZE) - value_len = xi->value_len; - else - value_len = 0; - - ret = ocfs2_xattr_bucket_value_truncate_xs(inode, xs, - value_len, - ctxt); - if (ret) - goto out; - - if (value_len) - goto set_value_outside; - } - - value_len = xi->value_len; - /* So we have to handle the inside block change now. */ - if (value_len > OCFS2_XATTR_INLINE_SIZE) { - /* - * If the new value will be stored outside of block, - * initalize a new empty value root and insert it first. - */ - local = 0; - xi->value = &def_xv; - xi->value_len = OCFS2_XATTR_ROOT_SIZE; - } - - ret = ocfs2_xattr_set_entry_in_bucket(inode, ctxt->handle, xi, xs, - name_hash, local); - if (ret) { - mlog_errno(ret); - goto out; - } - - if (value_len <= OCFS2_XATTR_INLINE_SIZE) - goto out; - - /* allocate the space now for the outside block storage. */ - ret = ocfs2_xattr_bucket_value_truncate_xs(inode, xs, - value_len, ctxt); - if (ret) { - mlog_errno(ret); - - if (xs->not_found) { - /* - * We can't allocate enough clusters for outside - * storage and we have allocated xattr already, - * so need to remove it. - */ - ocfs2_xattr_bucket_remove_xs(inode, ctxt->handle, xs); - } - goto out; - } - -set_value_outside: - ret = ocfs2_xattr_bucket_set_value_outside(inode, ctxt->handle, - xs, val, value_len); -out: - return ret; -} - /* * check whether the xattr bucket is filled up with the same hash value. * If we want to insert the xattr with the same hash, return -ENOSPC. @@ -5317,156 +5531,116 @@ static int ocfs2_check_xattr_bucket_collision(struct inode *inode, return 0; } -static int ocfs2_xattr_set_entry_index_block(struct inode *inode, - struct ocfs2_xattr_info *xi, - struct ocfs2_xattr_search *xs, - struct ocfs2_xattr_set_ctxt *ctxt) +/* + * Try to set the entry in the current bucket. If we fail, the caller + * will handle getting us another bucket. + */ +static int ocfs2_xattr_set_entry_bucket(struct inode *inode, + struct ocfs2_xattr_info *xi, + struct ocfs2_xattr_search *xs, + struct ocfs2_xattr_set_ctxt *ctxt) { - struct ocfs2_xattr_header *xh; - struct ocfs2_xattr_entry *xe; - u16 count, header_size, xh_free_start; - int free, max_free, need, old; - size_t value_size = 0, name_len = strlen(xi->name); - size_t blocksize = inode->i_sb->s_blocksize; - int ret, allocation = 0; - - mlog_entry("Set xattr %s in xattr index block\n", xi->name); - -try_again: - xh = xs->header; - count = le16_to_cpu(xh->xh_count); - xh_free_start = le16_to_cpu(xh->xh_free_start); - header_size = sizeof(struct ocfs2_xattr_header) + - count * sizeof(struct ocfs2_xattr_entry); - max_free = OCFS2_XATTR_BUCKET_SIZE - header_size - - le16_to_cpu(xh->xh_name_value_len) - OCFS2_XATTR_HEADER_GAP; - - mlog_bug_on_msg(header_size > blocksize, "bucket %llu has header size " - "of %u which exceed block size\n", - (unsigned long long)bucket_blkno(xs->bucket), - header_size); + int ret; + struct ocfs2_xa_loc loc; - if (xi->value && xi->value_len > OCFS2_XATTR_INLINE_SIZE) - value_size = OCFS2_XATTR_ROOT_SIZE; - else if (xi->value) - value_size = OCFS2_XATTR_SIZE(xi->value_len); + mlog_entry("Set xattr %s in xattr bucket\n", xi->xi_name); - if (xs->not_found) - need = sizeof(struct ocfs2_xattr_entry) + - OCFS2_XATTR_SIZE(name_len) + value_size; - else { - need = value_size + OCFS2_XATTR_SIZE(name_len); + ocfs2_init_xattr_bucket_xa_loc(&loc, xs->bucket, + xs->not_found ? NULL : xs->here); + ret = ocfs2_xa_set(&loc, xi, ctxt); + if (!ret) { + xs->here = loc.xl_entry; + goto out; + } + if (ret != -ENOSPC) { + mlog_errno(ret); + goto out; + } - /* - * We only replace the old value if the new length is smaller - * than the old one. Otherwise we will allocate new space in the - * bucket to store it. - */ - xe = xs->here; - if (ocfs2_xattr_is_local(xe)) - old = OCFS2_XATTR_SIZE(le64_to_cpu(xe->xe_value_size)); - else - old = OCFS2_XATTR_SIZE(OCFS2_XATTR_ROOT_SIZE); + /* Ok, we need space. Let's try defragmenting the bucket. */ + ret = ocfs2_defrag_xattr_bucket(inode, ctxt->handle, + xs->bucket); + if (ret) { + mlog_errno(ret); + goto out; + } - if (old >= value_size) - need = 0; + ret = ocfs2_xa_set(&loc, xi, ctxt); + if (!ret) { + xs->here = loc.xl_entry; + goto out; } + if (ret != -ENOSPC) + mlog_errno(ret); - free = xh_free_start - header_size - OCFS2_XATTR_HEADER_GAP; - /* - * We need to make sure the new name/value pair - * can exist in the same block. - */ - if (xh_free_start % blocksize < need) - free -= xh_free_start % blocksize; - - mlog(0, "xs->not_found = %d, in xattr bucket %llu: free = %d, " - "need = %d, max_free = %d, xh_free_start = %u, xh_name_value_len =" - " %u\n", xs->not_found, - (unsigned long long)bucket_blkno(xs->bucket), - free, need, max_free, le16_to_cpu(xh->xh_free_start), - le16_to_cpu(xh->xh_name_value_len)); - - if (free < need || - (xs->not_found && - count == ocfs2_xattr_max_xe_in_bucket(inode->i_sb))) { - if (need <= max_free && - count < ocfs2_xattr_max_xe_in_bucket(inode->i_sb)) { - /* - * We can create the space by defragment. Since only the - * name/value will be moved, the xe shouldn't be changed - * in xs. - */ - ret = ocfs2_defrag_xattr_bucket(inode, ctxt->handle, - xs->bucket); - if (ret) { - mlog_errno(ret); - goto out; - } - xh_free_start = le16_to_cpu(xh->xh_free_start); - free = xh_free_start - header_size - - OCFS2_XATTR_HEADER_GAP; - if (xh_free_start % blocksize < need) - free -= xh_free_start % blocksize; +out: + mlog_exit(ret); + return ret; +} - if (free >= need) - goto xattr_set; +static int ocfs2_xattr_set_entry_index_block(struct inode *inode, + struct ocfs2_xattr_info *xi, + struct ocfs2_xattr_search *xs, + struct ocfs2_xattr_set_ctxt *ctxt) +{ + int ret; - mlog(0, "Can't get enough space for xattr insert by " - "defragment. Need %u bytes, but we have %d, so " - "allocate new bucket for it.\n", need, free); - } + mlog_entry("Set xattr %s in xattr index block\n", xi->xi_name); - /* - * We have to add new buckets or clusters and one - * allocation should leave us enough space for insert. - */ - BUG_ON(allocation); + ret = ocfs2_xattr_set_entry_bucket(inode, xi, xs, ctxt); + if (!ret) + goto out; + if (ret != -ENOSPC) { + mlog_errno(ret); + goto out; + } - /* - * We do not allow for overlapping ranges between buckets. And - * the maximum number of collisions we will allow for then is - * one bucket's worth, so check it here whether we need to - * add a new bucket for the insert. - */ - ret = ocfs2_check_xattr_bucket_collision(inode, - xs->bucket, - xi->name); - if (ret) { - mlog_errno(ret); - goto out; - } + /* Ack, need more space. Let's try to get another bucket! */ - ret = ocfs2_add_new_xattr_bucket(inode, - xs->xattr_bh, + /* + * We do not allow for overlapping ranges between buckets. And + * the maximum number of collisions we will allow for then is + * one bucket's worth, so check it here whether we need to + * add a new bucket for the insert. + */ + ret = ocfs2_check_xattr_bucket_collision(inode, xs->bucket, - ctxt); - if (ret) { - mlog_errno(ret); - goto out; - } + xi->xi_name); + if (ret) { + mlog_errno(ret); + goto out; + } - /* - * ocfs2_add_new_xattr_bucket() will have updated - * xs->bucket if it moved, but it will not have updated - * any of the other search fields. Thus, we drop it and - * re-search. Everything should be cached, so it'll be - * quick. - */ - ocfs2_xattr_bucket_relse(xs->bucket); - ret = ocfs2_xattr_index_block_find(inode, xs->xattr_bh, - xi->name_index, - xi->name, xs); - if (ret && ret != -ENODATA) - goto out; - xs->not_found = ret; - allocation = 1; - goto try_again; + ret = ocfs2_add_new_xattr_bucket(inode, + xs->xattr_bh, + xs->bucket, + ctxt); + if (ret) { + mlog_errno(ret); + goto out; } -xattr_set: - ret = ocfs2_xattr_set_in_bucket(inode, xi, xs, ctxt); + /* + * ocfs2_add_new_xattr_bucket() will have updated + * xs->bucket if it moved, but it will not have updated + * any of the other search fields. Thus, we drop it and + * re-search. Everything should be cached, so it'll be + * quick. + */ + ocfs2_xattr_bucket_relse(xs->bucket); + ret = ocfs2_xattr_index_block_find(inode, xs->xattr_bh, + xi->xi_name_index, + xi->xi_name, xs); + if (ret && ret != -ENODATA) + goto out; + xs->not_found = ret; + + /* Ok, we have a new bucket, let's try again */ + ret = ocfs2_xattr_set_entry_bucket(inode, xi, xs, ctxt); + if (ret && (ret != -ENOSPC)) + mlog_errno(ret); + out: mlog_exit(ret); return ret; @@ -5678,7 +5852,7 @@ static int ocfs2_prepare_refcount_xattr(struct inode *inode, * refcount tree, and make the original extent become 3. So we will need * 2 * cluster more extent recs at most. */ - if (!xi->value || xi->value_len <= OCFS2_XATTR_INLINE_SIZE) { + if (!xi->xi_value || xi->xi_value_len <= OCFS2_XATTR_INLINE_SIZE) { ret = ocfs2_refcounted_xattr_delete_need(inode, &(*ref_tree)->rf_ci, @@ -6354,9 +6528,11 @@ static int ocfs2_create_empty_xattr_block(struct inode *inode, int indexed) { int ret; - handle_t *handle; struct ocfs2_alloc_context *meta_ac; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct ocfs2_xattr_set_ctxt ctxt = { + .meta_ac = meta_ac, + }; ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac); if (ret < 0) { @@ -6364,21 +6540,21 @@ static int ocfs2_create_empty_xattr_block(struct inode *inode, return ret; } - handle = ocfs2_start_trans(osb, OCFS2_XATTR_BLOCK_CREATE_CREDITS); - if (IS_ERR(handle)) { - ret = PTR_ERR(handle); + ctxt.handle = ocfs2_start_trans(osb, OCFS2_XATTR_BLOCK_CREATE_CREDITS); + if (IS_ERR(ctxt.handle)) { + ret = PTR_ERR(ctxt.handle); mlog_errno(ret); goto out; } mlog(0, "create new xattr block for inode %llu, index = %d\n", (unsigned long long)fe_bh->b_blocknr, indexed); - ret = ocfs2_create_xattr_block(handle, inode, fe_bh, - meta_ac, ret_bh, indexed); + ret = ocfs2_create_xattr_block(inode, fe_bh, &ctxt, indexed, + ret_bh); if (ret) mlog_errno(ret); - ocfs2_commit_trans(osb, handle); + ocfs2_commit_trans(osb, ctxt.handle); out: ocfs2_free_alloc_context(meta_ac); return ret; |