diff options
Diffstat (limited to 'fs')
286 files changed, 13782 insertions, 4810 deletions
diff --git a/fs/9p/Kconfig b/fs/9p/Kconfig index 74e0723..7952337 100644 --- a/fs/9p/Kconfig +++ b/fs/9p/Kconfig @@ -8,3 +8,12 @@ config 9P_FS See <http://v9fs.sf.net> for more information. If unsure, say N. + +config 9P_FSCACHE + bool "Enable 9P client caching support (EXPERIMENTAL)" + depends on EXPERIMENTAL + depends on 9P_FS=m && FSCACHE || 9P_FS=y && FSCACHE=y + help + Choose Y here to enable persistent, read-only local + caching support for 9p clients using FS-Cache + diff --git a/fs/9p/Makefile b/fs/9p/Makefile index bc7f0d1..1a940ec 100644 --- a/fs/9p/Makefile +++ b/fs/9p/Makefile @@ -8,5 +8,6 @@ obj-$(CONFIG_9P_FS) := 9p.o vfs_dir.o \ vfs_dentry.o \ v9fs.o \ - fid.o \ + fid.o +9p-$(CONFIG_9P_FSCACHE) += cache.o diff --git a/fs/9p/cache.c b/fs/9p/cache.c new file mode 100644 index 0000000..51c94e2 --- /dev/null +++ b/fs/9p/cache.c @@ -0,0 +1,474 @@ +/* + * V9FS cache definitions. + * + * Copyright (C) 2009 by Abhishek Kulkarni <adkulkar@umail.iu.edu> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#include <linux/jiffies.h> +#include <linux/file.h> +#include <linux/stat.h> +#include <linux/sched.h> +#include <linux/fs.h> +#include <net/9p/9p.h> + +#include "v9fs.h" +#include "cache.h" + +#define CACHETAG_LEN 11 + +struct kmem_cache *vcookie_cache; + +struct fscache_netfs v9fs_cache_netfs = { + .name = "9p", + .version = 0, +}; + +static void init_once(void *foo) +{ + struct v9fs_cookie *vcookie = (struct v9fs_cookie *) foo; + vcookie->fscache = NULL; + vcookie->qid = NULL; + inode_init_once(&vcookie->inode); +} + +/** + * v9fs_init_vcookiecache - initialize a cache for vcookies to maintain + * vcookie to inode mapping + * + * Returns 0 on success. + */ + +static int v9fs_init_vcookiecache(void) +{ + vcookie_cache = kmem_cache_create("vcookie_cache", + sizeof(struct v9fs_cookie), + 0, (SLAB_RECLAIM_ACCOUNT| + SLAB_MEM_SPREAD), + init_once); + if (!vcookie_cache) + return -ENOMEM; + + return 0; +} + +/** + * v9fs_destroy_vcookiecache - destroy the cache of vcookies + * + */ + +static void v9fs_destroy_vcookiecache(void) +{ + kmem_cache_destroy(vcookie_cache); +} + +int __v9fs_cache_register(void) +{ + int ret; + ret = v9fs_init_vcookiecache(); + if (ret < 0) + return ret; + + return fscache_register_netfs(&v9fs_cache_netfs); +} + +void __v9fs_cache_unregister(void) +{ + v9fs_destroy_vcookiecache(); + fscache_unregister_netfs(&v9fs_cache_netfs); +} + +/** + * v9fs_random_cachetag - Generate a random tag to be associated + * with a new cache session. + * + * The value of jiffies is used for a fairly randomly cache tag. + */ + +static +int v9fs_random_cachetag(struct v9fs_session_info *v9ses) +{ + v9ses->cachetag = kmalloc(CACHETAG_LEN, GFP_KERNEL); + if (!v9ses->cachetag) + return -ENOMEM; + + return scnprintf(v9ses->cachetag, CACHETAG_LEN, "%lu", jiffies); +} + +static uint16_t v9fs_cache_session_get_key(const void *cookie_netfs_data, + void *buffer, uint16_t bufmax) +{ + struct v9fs_session_info *v9ses; + uint16_t klen = 0; + + v9ses = (struct v9fs_session_info *)cookie_netfs_data; + P9_DPRINTK(P9_DEBUG_FSC, "session %p buf %p size %u", v9ses, + buffer, bufmax); + + if (v9ses->cachetag) + klen = strlen(v9ses->cachetag); + + if (klen > bufmax) + return 0; + + memcpy(buffer, v9ses->cachetag, klen); + P9_DPRINTK(P9_DEBUG_FSC, "cache session tag %s", v9ses->cachetag); + return klen; +} + +const struct fscache_cookie_def v9fs_cache_session_index_def = { + .name = "9P.session", + .type = FSCACHE_COOKIE_TYPE_INDEX, + .get_key = v9fs_cache_session_get_key, +}; + +void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses) +{ + /* If no cache session tag was specified, we generate a random one. */ + if (!v9ses->cachetag) + v9fs_random_cachetag(v9ses); + + v9ses->fscache = fscache_acquire_cookie(v9fs_cache_netfs.primary_index, + &v9fs_cache_session_index_def, + v9ses); + P9_DPRINTK(P9_DEBUG_FSC, "session %p get cookie %p", v9ses, + v9ses->fscache); +} + +void v9fs_cache_session_put_cookie(struct v9fs_session_info *v9ses) +{ + P9_DPRINTK(P9_DEBUG_FSC, "session %p put cookie %p", v9ses, + v9ses->fscache); + fscache_relinquish_cookie(v9ses->fscache, 0); + v9ses->fscache = NULL; +} + + +static uint16_t v9fs_cache_inode_get_key(const void *cookie_netfs_data, + void *buffer, uint16_t bufmax) +{ + const struct v9fs_cookie *vcookie = cookie_netfs_data; + memcpy(buffer, &vcookie->qid->path, sizeof(vcookie->qid->path)); + + P9_DPRINTK(P9_DEBUG_FSC, "inode %p get key %llu", &vcookie->inode, + vcookie->qid->path); + return sizeof(vcookie->qid->path); +} + +static void v9fs_cache_inode_get_attr(const void *cookie_netfs_data, + uint64_t *size) +{ + const struct v9fs_cookie *vcookie = cookie_netfs_data; + *size = i_size_read(&vcookie->inode); + + P9_DPRINTK(P9_DEBUG_FSC, "inode %p get attr %llu", &vcookie->inode, + *size); +} + +static uint16_t v9fs_cache_inode_get_aux(const void *cookie_netfs_data, + void *buffer, uint16_t buflen) +{ + const struct v9fs_cookie *vcookie = cookie_netfs_data; + memcpy(buffer, &vcookie->qid->version, sizeof(vcookie->qid->version)); + + P9_DPRINTK(P9_DEBUG_FSC, "inode %p get aux %u", &vcookie->inode, + vcookie->qid->version); + return sizeof(vcookie->qid->version); +} + +static enum +fscache_checkaux v9fs_cache_inode_check_aux(void *cookie_netfs_data, + const void *buffer, + uint16_t buflen) +{ + const struct v9fs_cookie *vcookie = cookie_netfs_data; + + if (buflen != sizeof(vcookie->qid->version)) + return FSCACHE_CHECKAUX_OBSOLETE; + + if (memcmp(buffer, &vcookie->qid->version, + sizeof(vcookie->qid->version))) + return FSCACHE_CHECKAUX_OBSOLETE; + + return FSCACHE_CHECKAUX_OKAY; +} + +static void v9fs_cache_inode_now_uncached(void *cookie_netfs_data) +{ + struct v9fs_cookie *vcookie = cookie_netfs_data; + struct pagevec pvec; + pgoff_t first; + int loop, nr_pages; + + pagevec_init(&pvec, 0); + first = 0; + + for (;;) { + nr_pages = pagevec_lookup(&pvec, vcookie->inode.i_mapping, + first, + PAGEVEC_SIZE - pagevec_count(&pvec)); + if (!nr_pages) + break; + + for (loop = 0; loop < nr_pages; loop++) + ClearPageFsCache(pvec.pages[loop]); + + first = pvec.pages[nr_pages - 1]->index + 1; + + pvec.nr = nr_pages; + pagevec_release(&pvec); + cond_resched(); + } +} + +const struct fscache_cookie_def v9fs_cache_inode_index_def = { + .name = "9p.inode", + .type = FSCACHE_COOKIE_TYPE_DATAFILE, + .get_key = v9fs_cache_inode_get_key, + .get_attr = v9fs_cache_inode_get_attr, + .get_aux = v9fs_cache_inode_get_aux, + .check_aux = v9fs_cache_inode_check_aux, + .now_uncached = v9fs_cache_inode_now_uncached, +}; + +void v9fs_cache_inode_get_cookie(struct inode *inode) +{ + struct v9fs_cookie *vcookie; + struct v9fs_session_info *v9ses; + + if (!S_ISREG(inode->i_mode)) + return; + + vcookie = v9fs_inode2cookie(inode); + if (vcookie->fscache) + return; + + v9ses = v9fs_inode2v9ses(inode); + vcookie->fscache = fscache_acquire_cookie(v9ses->fscache, + &v9fs_cache_inode_index_def, + vcookie); + + P9_DPRINTK(P9_DEBUG_FSC, "inode %p get cookie %p", inode, + vcookie->fscache); +} + +void v9fs_cache_inode_put_cookie(struct inode *inode) +{ + struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode); + + if (!vcookie->fscache) + return; + P9_DPRINTK(P9_DEBUG_FSC, "inode %p put cookie %p", inode, + vcookie->fscache); + + fscache_relinquish_cookie(vcookie->fscache, 0); + vcookie->fscache = NULL; +} + +void v9fs_cache_inode_flush_cookie(struct inode *inode) +{ + struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode); + + if (!vcookie->fscache) + return; + P9_DPRINTK(P9_DEBUG_FSC, "inode %p flush cookie %p", inode, + vcookie->fscache); + + fscache_relinquish_cookie(vcookie->fscache, 1); + vcookie->fscache = NULL; +} + +void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *filp) +{ + struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode); + struct p9_fid *fid; + + if (!vcookie->fscache) + return; + + spin_lock(&vcookie->lock); + fid = filp->private_data; + if ((filp->f_flags & O_ACCMODE) != O_RDONLY) + v9fs_cache_inode_flush_cookie(inode); + else + v9fs_cache_inode_get_cookie(inode); + + spin_unlock(&vcookie->lock); +} + +void v9fs_cache_inode_reset_cookie(struct inode *inode) +{ + struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode); + struct v9fs_session_info *v9ses; + struct fscache_cookie *old; + + if (!vcookie->fscache) + return; + + old = vcookie->fscache; + + spin_lock(&vcookie->lock); + fscache_relinquish_cookie(vcookie->fscache, 1); + + v9ses = v9fs_inode2v9ses(inode); + vcookie->fscache = fscache_acquire_cookie(v9ses->fscache, + &v9fs_cache_inode_index_def, + vcookie); + + P9_DPRINTK(P9_DEBUG_FSC, "inode %p revalidating cookie old %p new %p", + inode, old, vcookie->fscache); + + spin_unlock(&vcookie->lock); +} + +int __v9fs_fscache_release_page(struct page *page, gfp_t gfp) +{ + struct inode *inode = page->mapping->host; + struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode); + + BUG_ON(!vcookie->fscache); + + if (PageFsCache(page)) { + if (fscache_check_page_write(vcookie->fscache, page)) { + if (!(gfp & __GFP_WAIT)) + return 0; + fscache_wait_on_page_write(vcookie->fscache, page); + } + + fscache_uncache_page(vcookie->fscache, page); + ClearPageFsCache(page); + } + + return 1; +} + +void __v9fs_fscache_invalidate_page(struct page *page) +{ + struct inode *inode = page->mapping->host; + struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode); + + BUG_ON(!vcookie->fscache); + + if (PageFsCache(page)) { + fscache_wait_on_page_write(vcookie->fscache, page); + BUG_ON(!PageLocked(page)); + fscache_uncache_page(vcookie->fscache, page); + ClearPageFsCache(page); + } +} + +static void v9fs_vfs_readpage_complete(struct page *page, void *data, + int error) +{ + if (!error) + SetPageUptodate(page); + + unlock_page(page); +} + +/** + * __v9fs_readpage_from_fscache - read a page from cache + * + * Returns 0 if the pages are in cache and a BIO is submitted, + * 1 if the pages are not in cache and -error otherwise. + */ + +int __v9fs_readpage_from_fscache(struct inode *inode, struct page *page) +{ + int ret; + const struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode); + + P9_DPRINTK(P9_DEBUG_FSC, "inode %p page %p", inode, page); + if (!vcookie->fscache) + return -ENOBUFS; + + ret = fscache_read_or_alloc_page(vcookie->fscache, + page, + v9fs_vfs_readpage_complete, + NULL, + GFP_KERNEL); + switch (ret) { + case -ENOBUFS: + case -ENODATA: + P9_DPRINTK(P9_DEBUG_FSC, "page/inode not in cache %d", ret); + return 1; + case 0: + P9_DPRINTK(P9_DEBUG_FSC, "BIO submitted"); + return ret; + default: + P9_DPRINTK(P9_DEBUG_FSC, "ret %d", ret); + return ret; + } +} + +/** + * __v9fs_readpages_from_fscache - read multiple pages from cache + * + * Returns 0 if the pages are in cache and a BIO is submitted, + * 1 if the pages are not in cache and -error otherwise. + */ + +int __v9fs_readpages_from_fscache(struct inode *inode, + struct address_space *mapping, + struct list_head *pages, + unsigned *nr_pages) +{ + int ret; + const struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode); + + P9_DPRINTK(P9_DEBUG_FSC, "inode %p pages %u", inode, *nr_pages); + if (!vcookie->fscache) + return -ENOBUFS; + + ret = fscache_read_or_alloc_pages(vcookie->fscache, + mapping, pages, nr_pages, + v9fs_vfs_readpage_complete, + NULL, + mapping_gfp_mask(mapping)); + switch (ret) { + case -ENOBUFS: + case -ENODATA: + P9_DPRINTK(P9_DEBUG_FSC, "pages/inodes not in cache %d", ret); + return 1; + case 0: + BUG_ON(!list_empty(pages)); + BUG_ON(*nr_pages != 0); + P9_DPRINTK(P9_DEBUG_FSC, "BIO submitted"); + return ret; + default: + P9_DPRINTK(P9_DEBUG_FSC, "ret %d", ret); + return ret; + } +} + +/** + * __v9fs_readpage_to_fscache - write a page to the cache + * + */ + +void __v9fs_readpage_to_fscache(struct inode *inode, struct page *page) +{ + int ret; + const struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode); + + P9_DPRINTK(P9_DEBUG_FSC, "inode %p page %p", inode, page); + ret = fscache_write_page(vcookie->fscache, page, GFP_KERNEL); + P9_DPRINTK(P9_DEBUG_FSC, "ret = %d", ret); + if (ret != 0) + v9fs_uncache_page(inode, page); +} diff --git a/fs/9p/cache.h b/fs/9p/cache.h new file mode 100644 index 0000000..a94192b --- /dev/null +++ b/fs/9p/cache.h @@ -0,0 +1,176 @@ +/* + * V9FS cache definitions. + * + * Copyright (C) 2009 by Abhishek Kulkarni <adkulkar@umail.iu.edu> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#ifndef _9P_CACHE_H +#ifdef CONFIG_9P_FSCACHE +#include <linux/fscache.h> +#include <linux/spinlock.h> + +extern struct kmem_cache *vcookie_cache; + +struct v9fs_cookie { + spinlock_t lock; + struct inode inode; + struct fscache_cookie *fscache; + struct p9_qid *qid; +}; + +static inline struct v9fs_cookie *v9fs_inode2cookie(const struct inode *inode) +{ + return container_of(inode, struct v9fs_cookie, inode); +} + +extern struct fscache_netfs v9fs_cache_netfs; +extern const struct fscache_cookie_def v9fs_cache_session_index_def; +extern const struct fscache_cookie_def v9fs_cache_inode_index_def; + +extern void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses); +extern void v9fs_cache_session_put_cookie(struct v9fs_session_info *v9ses); + +extern void v9fs_cache_inode_get_cookie(struct inode *inode); +extern void v9fs_cache_inode_put_cookie(struct inode *inode); +extern void v9fs_cache_inode_flush_cookie(struct inode *inode); +extern void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *filp); +extern void v9fs_cache_inode_reset_cookie(struct inode *inode); + +extern int __v9fs_cache_register(void); +extern void __v9fs_cache_unregister(void); + +extern int __v9fs_fscache_release_page(struct page *page, gfp_t gfp); +extern void __v9fs_fscache_invalidate_page(struct page *page); +extern int __v9fs_readpage_from_fscache(struct inode *inode, + struct page *page); +extern int __v9fs_readpages_from_fscache(struct inode *inode, + struct address_space *mapping, + struct list_head *pages, + unsigned *nr_pages); +extern void __v9fs_readpage_to_fscache(struct inode *inode, struct page *page); + + +/** + * v9fs_cache_register - Register v9fs file system with the cache + */ +static inline int v9fs_cache_register(void) +{ + return __v9fs_cache_register(); +} + +/** + * v9fs_cache_unregister - Unregister v9fs from the cache + */ +static inline void v9fs_cache_unregister(void) +{ + __v9fs_cache_unregister(); +} + +static inline int v9fs_fscache_release_page(struct page *page, + gfp_t gfp) +{ + return __v9fs_fscache_release_page(page, gfp); +} + +static inline void v9fs_fscache_invalidate_page(struct page *page) +{ + __v9fs_fscache_invalidate_page(page); +} + +static inline int v9fs_readpage_from_fscache(struct inode *inode, + struct page *page) +{ + return __v9fs_readpage_from_fscache(inode, page); +} + +static inline int v9fs_readpages_from_fscache(struct inode *inode, + struct address_space *mapping, + struct list_head *pages, + unsigned *nr_pages) +{ + return __v9fs_readpages_from_fscache(inode, mapping, pages, + nr_pages); +} + +static inline void v9fs_readpage_to_fscache(struct inode *inode, + struct page *page) +{ + if (PageFsCache(page)) + __v9fs_readpage_to_fscache(inode, page); +} + +static inline void v9fs_uncache_page(struct inode *inode, struct page *page) +{ + struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode); + fscache_uncache_page(vcookie->fscache, page); + BUG_ON(PageFsCache(page)); +} + +static inline void v9fs_vcookie_set_qid(struct inode *inode, + struct p9_qid *qid) +{ + struct v9fs_cookie *vcookie = v9fs_inode2cookie(inode); + spin_lock(&vcookie->lock); + vcookie->qid = qid; + spin_unlock(&vcookie->lock); +} + +#else /* CONFIG_9P_FSCACHE */ + +static inline int v9fs_cache_register(void) +{ + return 1; +} + +static inline void v9fs_cache_unregister(void) {} + +static inline int v9fs_fscache_release_page(struct page *page, + gfp_t gfp) { + return 1; +} + +static inline void v9fs_fscache_invalidate_page(struct page *page) {} + +static inline int v9fs_readpage_from_fscache(struct inode *inode, + struct page *page) +{ + return -ENOBUFS; +} + +static inline int v9fs_readpages_from_fscache(struct inode *inode, + struct address_space *mapping, + struct list_head *pages, + unsigned *nr_pages) +{ + return -ENOBUFS; +} + +static inline void v9fs_readpage_to_fscache(struct inode *inode, + struct page *page) +{} + +static inline void v9fs_uncache_page(struct inode *inode, struct page *page) +{} + +static inline void v9fs_vcookie_set_qid(struct inode *inode, + struct p9_qid *qid) +{} + +#endif /* CONFIG_9P_FSCACHE */ +#endif /* _9P_CACHE_H */ diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c index f7003cf..cf62b05 100644 --- a/fs/9p/v9fs.c +++ b/fs/9p/v9fs.c @@ -34,21 +34,25 @@ #include <net/9p/transport.h> #include "v9fs.h" #include "v9fs_vfs.h" +#include "cache.h" + +static DEFINE_SPINLOCK(v9fs_sessionlist_lock); +static LIST_HEAD(v9fs_sessionlist); /* - * Option Parsing (code inspired by NFS code) - * NOTE: each transport will parse its own options - */ + * Option Parsing (code inspired by NFS code) + * NOTE: each transport will parse its own options + */ enum { /* Options that take integer arguments */ Opt_debug, Opt_dfltuid, Opt_dfltgid, Opt_afid, /* String options */ - Opt_uname, Opt_remotename, Opt_trans, + Opt_uname, Opt_remotename, Opt_trans, Opt_cache, Opt_cachetag, /* Options that take no arguments */ Opt_nodevmap, /* Cache options */ - Opt_cache_loose, + Opt_cache_loose, Opt_fscache, /* Access options */ Opt_access, /* Error token */ @@ -63,8 +67,10 @@ static const match_table_t tokens = { {Opt_uname, "uname=%s"}, {Opt_remotename, "aname=%s"}, {Opt_nodevmap, "nodevmap"}, - {Opt_cache_loose, "cache=loose"}, + {Opt_cache, "cache=%s"}, {Opt_cache_loose, "loose"}, + {Opt_fscache, "fscache"}, + {Opt_cachetag, "cachetag=%s"}, {Opt_access, "access=%s"}, {Opt_err, NULL} }; @@ -89,16 +95,16 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) v9ses->afid = ~0; v9ses->debug = 0; v9ses->cache = 0; +#ifdef CONFIG_9P_FSCACHE + v9ses->cachetag = NULL; +#endif if (!opts) return 0; options = kstrdup(opts, GFP_KERNEL); - if (!options) { - P9_DPRINTK(P9_DEBUG_ERROR, - "failed to allocate copy of option string\n"); - return -ENOMEM; - } + if (!options) + goto fail_option_alloc; while ((p = strsep(&options, ",")) != NULL) { int token; @@ -143,16 +149,33 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) case Opt_cache_loose: v9ses->cache = CACHE_LOOSE; break; + case Opt_fscache: + v9ses->cache = CACHE_FSCACHE; + break; + case Opt_cachetag: +#ifdef CONFIG_9P_FSCACHE + v9ses->cachetag = match_strdup(&args[0]); +#endif + break; + case Opt_cache: + s = match_strdup(&args[0]); + if (!s) + goto fail_option_alloc; + + if (strcmp(s, "loose") == 0) + v9ses->cache = CACHE_LOOSE; + else if (strcmp(s, "fscache") == 0) + v9ses->cache = CACHE_FSCACHE; + else + v9ses->cache = CACHE_NONE; + kfree(s); + break; case Opt_access: s = match_strdup(&args[0]); - if (!s) { - P9_DPRINTK(P9_DEBUG_ERROR, - "failed to allocate copy" - " of option argument\n"); - ret = -ENOMEM; - break; - } + if (!s) + goto fail_option_alloc; + v9ses->flags &= ~V9FS_ACCESS_MASK; if (strcmp(s, "user") == 0) v9ses->flags |= V9FS_ACCESS_USER; @@ -173,6 +196,11 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) } kfree(options); return ret; + +fail_option_alloc: + P9_DPRINTK(P9_DEBUG_ERROR, + "failed to allocate copy of option argument\n"); + return -ENOMEM; } /** @@ -200,6 +228,10 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses, return ERR_PTR(-ENOMEM); } + spin_lock(&v9fs_sessionlist_lock); + list_add(&v9ses->slist, &v9fs_sessionlist); + spin_unlock(&v9fs_sessionlist_lock); + v9ses->flags = V9FS_EXTENDED | V9FS_ACCESS_USER; strcpy(v9ses->uname, V9FS_DEFUSER); strcpy(v9ses->aname, V9FS_DEFANAME); @@ -249,6 +281,11 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses, else fid->uid = ~0; +#ifdef CONFIG_9P_FSCACHE + /* register the session for caching */ + v9fs_cache_session_get_cookie(v9ses); +#endif + return fid; error: @@ -268,8 +305,18 @@ void v9fs_session_close(struct v9fs_session_info *v9ses) v9ses->clnt = NULL; } +#ifdef CONFIG_9P_FSCACHE + if (v9ses->fscache) { + v9fs_cache_session_put_cookie(v9ses); + kfree(v9ses->cachetag); + } +#endif __putname(v9ses->uname); __putname(v9ses->aname); + + spin_lock(&v9fs_sessionlist_lock); + list_del(&v9ses->slist); + spin_unlock(&v9fs_sessionlist_lock); } /** @@ -286,25 +333,132 @@ void v9fs_session_cancel(struct v9fs_session_info *v9ses) { extern int v9fs_error_init(void); +static struct kobject *v9fs_kobj; + +#ifdef CONFIG_9P_FSCACHE /** - * v9fs_init - Initialize module + * caches_show - list caches associated with a session + * + * Returns the size of buffer written. + */ + +static ssize_t caches_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + ssize_t n = 0, count = 0, limit = PAGE_SIZE; + struct v9fs_session_info *v9ses; + + spin_lock(&v9fs_sessionlist_lock); + list_for_each_entry(v9ses, &v9fs_sessionlist, slist) { + if (v9ses->cachetag) { + n = snprintf(buf, limit, "%s\n", v9ses->cachetag); + if (n < 0) { + count = n; + break; + } + + count += n; + limit -= n; + } + } + + spin_unlock(&v9fs_sessionlist_lock); + return count; +} + +static struct kobj_attribute v9fs_attr_cache = __ATTR_RO(caches); +#endif /* CONFIG_9P_FSCACHE */ + +static struct attribute *v9fs_attrs[] = { +#ifdef CONFIG_9P_FSCACHE + &v9fs_attr_cache.attr, +#endif + NULL, +}; + +static struct attribute_group v9fs_attr_group = { + .attrs = v9fs_attrs, +}; + +/** + * v9fs_sysfs_init - Initialize the v9fs sysfs interface + * + */ + +static int v9fs_sysfs_init(void) +{ + v9fs_kobj = kobject_create_and_add("9p", fs_kobj); + if (!v9fs_kobj) + return -ENOMEM; + + if (sysfs_create_group(v9fs_kobj, &v9fs_attr_group)) { + kobject_put(v9fs_kobj); + return -ENOMEM; + } + + return 0; +} + +/** + * v9fs_sysfs_cleanup - Unregister the v9fs sysfs interface + * + */ + +static void v9fs_sysfs_cleanup(void) +{ + sysfs_remove_group(v9fs_kobj, &v9fs_attr_group); + kobject_put(v9fs_kobj); +} + +/** + * init_v9fs - Initialize module * */ static int __init init_v9fs(void) { + int err; printk(KERN_INFO "Installing v9fs 9p2000 file system support\n"); /* TODO: Setup list of registered trasnport modules */ - return register_filesystem(&v9fs_fs_type); + err = register_filesystem(&v9fs_fs_type); + if (err < 0) { + printk(KERN_ERR "Failed to register filesystem\n"); + return err; + } + + err = v9fs_cache_register(); + if (err < 0) { + printk(KERN_ERR "Failed to register v9fs for caching\n"); + goto out_fs_unreg; + } + + err = v9fs_sysfs_init(); + if (err < 0) { + printk(KERN_ERR "Failed to register with sysfs\n"); + goto out_sysfs_cleanup; + } + + return 0; + +out_sysfs_cleanup: + v9fs_sysfs_cleanup(); + +out_fs_unreg: + unregister_filesystem(&v9fs_fs_type); + + return err; } /** - * v9fs_init - shutdown module + * exit_v9fs - shutdown module * */ static void __exit exit_v9fs(void) { + v9fs_sysfs_cleanup(); + v9fs_cache_unregister(); unregister_filesystem(&v9fs_fs_type); } diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h index 38762bf..019f4cc 100644 --- a/fs/9p/v9fs.h +++ b/fs/9p/v9fs.h @@ -51,6 +51,7 @@ enum p9_session_flags { enum p9_cache_modes { CACHE_NONE, CACHE_LOOSE, + CACHE_FSCACHE, }; /** @@ -60,6 +61,8 @@ enum p9_cache_modes { * @debug: debug level * @afid: authentication handle * @cache: cache mode of type &p9_cache_modes + * @cachetag: the tag of the cache associated with this session + * @fscache: session cookie associated with FS-Cache * @options: copy of options string given by user * @uname: string user name to mount hierarchy as * @aname: mount specifier for remote hierarchy @@ -68,7 +71,7 @@ enum p9_cache_modes { * @dfltgid: default numeric groupid to mount hierarchy as * @uid: if %V9FS_ACCESS_SINGLE, the numeric uid which mounted the hierarchy * @clnt: reference to 9P network client instantiated for this session - * @debugfs_dir: reference to debugfs_dir which can be used for add'l debug + * @slist: reference to list of registered 9p sessions * * This structure holds state for each session instance established during * a sys_mount() . @@ -84,6 +87,10 @@ struct v9fs_session_info { unsigned short debug; unsigned int afid; unsigned int cache; +#ifdef CONFIG_9P_FSCACHE + char *cachetag; + struct fscache_cookie *fscache; +#endif char *uname; /* user name to mount as */ char *aname; /* name of remote hierarchy being mounted */ @@ -92,11 +99,9 @@ struct v9fs_session_info { unsigned int dfltgid; /* default gid for legacy support */ u32 uid; /* if ACCESS_SINGLE, the uid that has access */ struct p9_client *clnt; /* 9p client */ - struct dentry *debugfs_dir; + struct list_head slist; /* list of sessions registered with v9fs */ }; -extern struct dentry *v9fs_debugfs_root; - struct p9_fid *v9fs_session_init(struct v9fs_session_info *, const char *, char *); void v9fs_session_close(struct v9fs_session_info *v9ses); diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h index f0c7de7..3a7560e 100644 --- a/fs/9p/v9fs_vfs.h +++ b/fs/9p/v9fs_vfs.h @@ -44,7 +44,13 @@ extern const struct file_operations v9fs_dir_operations; extern const struct dentry_operations v9fs_dentry_operations; extern const struct dentry_operations v9fs_cached_dentry_operations; +#ifdef CONFIG_9P_FSCACHE +struct inode *v9fs_alloc_inode(struct super_block *sb); +void v9fs_destroy_inode(struct inode *inode); +#endif + struct inode *v9fs_get_inode(struct super_block *sb, int mode); +void v9fs_clear_inode(struct inode *inode); ino_t v9fs_qid2ino(struct p9_qid *qid); void v9fs_stat2inode(struct p9_wstat *, struct inode *, struct super_block *); int v9fs_dir_release(struct inode *inode, struct file *filp); diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c index 9282828..90e3844 100644 --- a/fs/9p/vfs_addr.c +++ b/fs/9p/vfs_addr.c @@ -38,6 +38,7 @@ #include "v9fs.h" #include "v9fs_vfs.h" +#include "cache.h" /** * v9fs_vfs_readpage - read an entire page in from 9P @@ -52,18 +53,31 @@ static int v9fs_vfs_readpage(struct file *filp, struct page *page) int retval; loff_t offset; char *buffer; + struct inode *inode; + inode = page->mapping->host; P9_DPRINTK(P9_DEBUG_VFS, "\n"); + + BUG_ON(!PageLocked(page)); + + retval = v9fs_readpage_from_fscache(inode, page); + if (retval == 0) + return retval; + buffer = kmap(page); offset = page_offset(page); retval = v9fs_file_readn(filp, buffer, NULL, PAGE_CACHE_SIZE, offset); - if (retval < 0) + if (retval < 0) { + v9fs_uncache_page(inode, page); goto done; + } memset(buffer + retval, 0, PAGE_CACHE_SIZE - retval); flush_dcache_page(page); SetPageUptodate(page); + + v9fs_readpage_to_fscache(inode, page); retval = 0; done: @@ -72,6 +86,78 @@ done: return retval; } +/** + * v9fs_vfs_readpages - read a set of pages from 9P + * + * @filp: file being read + * @mapping: the address space + * @pages: list of pages to read + * @nr_pages: count of pages to read + * + */ + +static int v9fs_vfs_readpages(struct file *filp, struct address_space *mapping, + struct list_head *pages, unsigned nr_pages) +{ + int ret = 0; + struct inode *inode; + + inode = mapping->host; + P9_DPRINTK(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, filp); + + ret = v9fs_readpages_from_fscache(inode, mapping, pages, &nr_pages); + if (ret == 0) + return ret; + + ret = read_cache_pages(mapping, pages, (void *)v9fs_vfs_readpage, filp); + P9_DPRINTK(P9_DEBUG_VFS, " = %d\n", ret); + return ret; +} + +/** + * v9fs_release_page - release the private state associated with a page + * + * Returns 1 if the page can be released, false otherwise. + */ + +static int v9fs_release_page(struct page *page, gfp_t gfp) +{ + if (PagePrivate(page)) + return 0; + + return v9fs_fscache_release_page(page, gfp); +} + +/** + * v9fs_invalidate_page - Invalidate a page completely or partially + * + * @page: structure to page + * @offset: offset in the page + */ + +static void v9fs_invalidate_page(struct page *page, unsigned long offset) +{ + if (offset == 0) + v9fs_fscache_invalidate_page(page); +} + +/** + * v9fs_launder_page - Writeback a dirty page + * Since the writes go directly to the server, we simply return a 0 + * here to indicate success. + * + * Returns 0 on success. + */ + +static int v9fs_launder_page(struct page *page) +{ + return 0; +} + const struct address_space_operations v9fs_addr_operations = { .readpage = v9fs_vfs_readpage, + .readpages = v9fs_vfs_readpages, + .releasepage = v9fs_release_page, + .invalidatepage = v9fs_invalidate_page, + .launder_page = v9fs_launder_page, }; diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index 68bf2af..3902bf4 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c @@ -32,6 +32,7 @@ #include <linux/string.h> #include <linux/inet.h> #include <linux/list.h> +#include <linux/pagemap.h> #include <asm/uaccess.h> #include <linux/idr.h> #include <net/9p/9p.h> @@ -40,6 +41,7 @@ #include "v9fs.h" #include "v9fs_vfs.h" #include "fid.h" +#include "cache.h" static const struct file_operations v9fs_cached_file_operations; @@ -72,7 +74,7 @@ int v9fs_file_open(struct inode *inode, struct file *file) return err; } if (omode & P9_OTRUNC) { - inode->i_size = 0; + i_size_write(inode, 0); inode->i_blocks = 0; } if ((file->f_flags & O_APPEND) && (!v9fs_extended(v9ses))) @@ -85,6 +87,10 @@ int v9fs_file_open(struct inode *inode, struct file *file) /* enable cached file options */ if(file->f_op == &v9fs_file_operations) file->f_op = &v9fs_cached_file_operations; + +#ifdef CONFIG_9P_FSCACHE + v9fs_cache_inode_set_cookie(inode, file); +#endif } return 0; @@ -210,6 +216,7 @@ v9fs_file_write(struct file *filp, const char __user * data, struct p9_client *clnt; struct inode *inode = filp->f_path.dentry->d_inode; int origin = *offset; + unsigned long pg_start, pg_end; P9_DPRINTK(P9_DEBUG_VFS, "data %p count %d offset %x\n", data, (int)count, (int)*offset); @@ -225,7 +232,7 @@ v9fs_file_write(struct file *filp, const char __user * data, if (count < rsize) rsize = count; - n = p9_client_write(fid, NULL, data+total, *offset+total, + n = p9_client_write(fid, NULL, data+total, origin+total, rsize); if (n <= 0) break; @@ -234,14 +241,14 @@ v9fs_file_write(struct file *filp, const char __user * data, } while (count > 0); if (total > 0) { - invalidate_inode_pages2_range(inode->i_mapping, origin, - origin+total); + pg_start = origin >> PAGE_CACHE_SHIFT; + pg_end = (origin + total - 1) >> PAGE_CACHE_SHIFT; + if (inode->i_mapping && inode->i_mapping->nrpages) + invalidate_inode_pages2_range(inode->i_mapping, + pg_start, pg_end); *offset += total; - } - - if (*offset > inode->i_size) { - inode->i_size = *offset; - inode->i_blocks = (inode->i_size + 512 - 1) >> 9; + i_size_write(inode, i_size_read(inode) + total); + inode->i_blocks = (i_size_read(inode) + 512 - 1) >> 9; } if (n < 0) diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 06a223d..5947628 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -40,6 +40,7 @@ #include "v9fs.h" #include "v9fs_vfs.h" #include "fid.h" +#include "cache.h" static const struct inode_operations v9fs_dir_inode_operations; static const struct inode_operations v9fs_dir_inode_operations_ext; @@ -197,6 +198,39 @@ v9fs_blank_wstat(struct p9_wstat *wstat) wstat->extension = NULL; } +#ifdef CONFIG_9P_FSCACHE +/** + * v9fs_alloc_inode - helper function to allocate an inode + * This callback is executed before setting up the inode so that we + * can associate a vcookie with each inode. + * + */ + +struct inode *v9fs_alloc_inode(struct super_block *sb) +{ + struct v9fs_cookie *vcookie; + vcookie = (struct v9fs_cookie *)kmem_cache_alloc(vcookie_cache, + GFP_KERNEL); + if (!vcookie) + return NULL; + + vcookie->fscache = NULL; + vcookie->qid = NULL; + spin_lock_init(&vcookie->lock); + return &vcookie->inode; +} + +/** + * v9fs_destroy_inode - destroy an inode + * + */ + +void v9fs_destroy_inode(struct inode *inode) +{ + kmem_cache_free(vcookie_cache, v9fs_inode2cookie(inode)); +} +#endif + /** * v9fs_get_inode - helper function to setup an inode * @sb: superblock @@ -326,6 +360,21 @@ error: } */ + +/** + * v9fs_clear_inode - release an inode + * @inode: inode to release + * + */ +void v9fs_clear_inode(struct inode *inode) +{ + filemap_fdatawrite(inode->i_mapping); + +#ifdef CONFIG_9P_FSCACHE + v9fs_cache_inode_put_cookie(inode); +#endif +} + /** * v9fs_inode_from_fid - populate an inode by issuing a attribute request * @v9ses: session information @@ -356,8 +405,14 @@ v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid, v9fs_stat2inode(st, ret, sb); ret->i_ino = v9fs_qid2ino(&st->qid); + +#ifdef CONFIG_9P_FSCACHE + v9fs_vcookie_set_qid(ret, &st->qid); + v9fs_cache_inode_get_cookie(ret); +#endif p9stat_free(st); kfree(st); + return ret; error: @@ -751,7 +806,7 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, P9_DPRINTK(P9_DEBUG_VFS, "dentry: %p\n", dentry); err = -EPERM; v9ses = v9fs_inode2v9ses(dentry->d_inode); - if (v9ses->cache == CACHE_LOOSE) + if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) return simple_getattr(mnt, dentry, stat); fid = v9fs_fid_lookup(dentry); @@ -872,10 +927,10 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode, } else inode->i_rdev = 0; - inode->i_size = stat->length; + i_size_write(inode, stat->length); /* not real number of blocks, but 512 byte ones ... */ - inode->i_blocks = (inode->i_size + 512 - 1) >> 9; + inode->i_blocks = (i_size_read(inode) + 512 - 1) >> 9; } /** diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c index 8961f1a..14a8644 100644 --- a/fs/9p/vfs_super.c +++ b/fs/9p/vfs_super.c @@ -44,21 +44,9 @@ #include "v9fs_vfs.h" #include "fid.h" -static void v9fs_clear_inode(struct inode *); static const struct super_operations v9fs_super_ops; /** - * v9fs_clear_inode - release an inode - * @inode: inode to release - * - */ - -static void v9fs_clear_inode(struct inode *inode) -{ - filemap_fdatawrite(inode->i_mapping); -} - -/** * v9fs_set_super - set the superblock * @s: super block * @data: file system specific data @@ -220,6 +208,10 @@ v9fs_umount_begin(struct super_block *sb) } static const struct super_operations v9fs_super_ops = { +#ifdef CONFIG_9P_FSCACHE + .alloc_inode = v9fs_alloc_inode, + .destroy_inode = v9fs_destroy_inode, +#endif .statfs = simple_statfs, .clear_inode = v9fs_clear_inode, .show_options = generic_show_options, @@ -109,6 +109,7 @@ source "fs/sysfs/Kconfig" config TMPFS bool "Virtual memory file system support (former shm fs)" + depends on SHMEM help Tmpfs is a file system which keeps all files in virtual memory. diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c index 798cb07..3f57ce4 100644 --- a/fs/adfs/inode.c +++ b/fs/adfs/inode.c @@ -19,9 +19,6 @@ static int adfs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh, int create) { - if (block < 0) - goto abort_negative; - if (!create) { if (block >= inode->i_blocks) goto abort_toobig; @@ -34,10 +31,6 @@ adfs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh, /* don't support allocation of blocks yet */ return -EIO; -abort_negative: - adfs_error(inode->i_sb, "block %d < 0", block); - return -EIO; - abort_toobig: return 0; } diff --git a/fs/afs/flock.c b/fs/afs/flock.c index 3ff8bdd..0931bc1 100644 --- a/fs/afs/flock.c +++ b/fs/afs/flock.c @@ -21,7 +21,7 @@ static void afs_fl_release_private(struct file_lock *fl); static struct workqueue_struct *afs_lock_manager; static DEFINE_MUTEX(afs_lock_manager_mutex); -static struct file_lock_operations afs_lock_ops = { +static const struct file_lock_operations afs_lock_ops = { .fl_copy_lock = afs_fl_copy_lock, .fl_release_private = afs_fl_release_private, }; diff --git a/fs/afs/proc.c b/fs/afs/proc.c index 8630615..852739d 100644 --- a/fs/afs/proc.c +++ b/fs/afs/proc.c @@ -28,7 +28,7 @@ static int afs_proc_cells_show(struct seq_file *m, void *v); static ssize_t afs_proc_cells_write(struct file *file, const char __user *buf, size_t size, loff_t *_pos); -static struct seq_operations afs_proc_cells_ops = { +static const struct seq_operations afs_proc_cells_ops = { .start = afs_proc_cells_start, .next = afs_proc_cells_next, .stop = afs_proc_cells_stop, @@ -70,7 +70,7 @@ static void *afs_proc_cell_volumes_next(struct seq_file *p, void *v, static void afs_proc_cell_volumes_stop(struct seq_file *p, void *v); static int afs_proc_cell_volumes_show(struct seq_file *m, void *v); -static struct seq_operations afs_proc_cell_volumes_ops = { +static const struct seq_operations afs_proc_cell_volumes_ops = { .start = afs_proc_cell_volumes_start, .next = afs_proc_cell_volumes_next, .stop = afs_proc_cell_volumes_stop, @@ -95,7 +95,7 @@ static void *afs_proc_cell_vlservers_next(struct seq_file *p, void *v, static void afs_proc_cell_vlservers_stop(struct seq_file *p, void *v); static int afs_proc_cell_vlservers_show(struct seq_file *m, void *v); -static struct seq_operations afs_proc_cell_vlservers_ops = { +static const struct seq_operations afs_proc_cell_vlservers_ops = { .start = afs_proc_cell_vlservers_start, .next = afs_proc_cell_vlservers_next, .stop = afs_proc_cell_vlservers_stop, @@ -119,7 +119,7 @@ static void *afs_proc_cell_servers_next(struct seq_file *p, void *v, static void afs_proc_cell_servers_stop(struct seq_file *p, void *v); static int afs_proc_cell_servers_show(struct seq_file *m, void *v); -static struct seq_operations afs_proc_cell_servers_ops = { +static const struct seq_operations afs_proc_cell_servers_ops = { .start = afs_proc_cell_servers_start, .next = afs_proc_cell_servers_next, .stop = afs_proc_cell_servers_stop, diff --git a/fs/afs/write.c b/fs/afs/write.c index c2e7a7f..c63a3c8 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -712,7 +712,6 @@ int afs_writeback_all(struct afs_vnode *vnode) .bdi = mapping->backing_dev_info, .sync_mode = WB_SYNC_ALL, .nr_to_write = LONG_MAX, - .for_writepages = 1, .range_cyclic = 1, }; int ret; @@ -24,6 +24,7 @@ #include <linux/file.h> #include <linux/mm.h> #include <linux/mman.h> +#include <linux/mmu_context.h> #include <linux/slab.h> #include <linux/timer.h> #include <linux/aio.h> @@ -34,7 +35,6 @@ #include <asm/kmap_types.h> #include <asm/uaccess.h> -#include <asm/mmu_context.h> #if DEBUG > 1 #define dprintk printk @@ -78,6 +78,7 @@ static int __init aio_setup(void) return 0; } +__initcall(aio_setup); static void aio_free_ring(struct kioctx *ctx) { @@ -380,6 +381,7 @@ ssize_t wait_on_sync_kiocb(struct kiocb *iocb) __set_current_state(TASK_RUNNING); return iocb->ki_user_data; } +EXPORT_SYMBOL(wait_on_sync_kiocb); /* exit_aio: called when the last user of mm goes away. At this point, * there is no way for any new requests to be submited or any of the @@ -573,6 +575,7 @@ int aio_put_req(struct kiocb *req) spin_unlock_irq(&ctx->ctx_lock); return ret; } +EXPORT_SYMBOL(aio_put_req); static struct kioctx *lookup_ioctx(unsigned long ctx_id) { @@ -595,51 +598,6 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id) } /* - * use_mm - * Makes the calling kernel thread take on the specified - * mm context. - * Called by the retry thread execute retries within the - * iocb issuer's mm context, so that copy_from/to_user - * operations work seamlessly for aio. - * (Note: this routine is intended to be called only - * from a kernel thread context) - */ -static void use_mm(struct mm_struct *mm) -{ - struct mm_struct *active_mm; - struct task_struct *tsk = current; - - task_lock(tsk); - active_mm = tsk->active_mm; - atomic_inc(&mm->mm_count); - tsk->mm = mm; - tsk->active_mm = mm; - switch_mm(active_mm, mm, tsk); - task_unlock(tsk); - - mmdrop(active_mm); -} - -/* - * unuse_mm - * Reverses the effect of use_mm, i.e. releases the - * specified mm context which was earlier taken on - * by the calling kernel thread - * (Note: this routine is intended to be called only - * from a kernel thread context) - */ -static void unuse_mm(struct mm_struct *mm) -{ - struct task_struct *tsk = current; - - task_lock(tsk); - tsk->mm = NULL; - /* active_mm is still 'mm' */ - enter_lazy_tlb(mm, tsk); - task_unlock(tsk); -} - -/* * Queue up a kiocb to be retried. Assumes that the kiocb * has already been marked as kicked, and places it on * the retry run list for the corresponding ioctx, if it @@ -1037,6 +995,7 @@ put_rq: spin_unlock_irqrestore(&ctx->ctx_lock, flags); return ret; } +EXPORT_SYMBOL(aio_complete); /* aio_read_evt * Pull an event off of the ioctx's event ring. Returns the number of @@ -1825,9 +1784,3 @@ SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id, asmlinkage_protect(5, ret, ctx_id, min_nr, nr, events, timeout); return ret; } - -__initcall(aio_setup); - -EXPORT_SYMBOL(aio_complete); -EXPORT_SYMBOL(aio_put_req); -EXPORT_SYMBOL(wait_on_sync_kiocb); diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c index 47d4a01..d11c51f 100644 --- a/fs/anon_inodes.c +++ b/fs/anon_inodes.c @@ -77,28 +77,24 @@ static const struct address_space_operations anon_aops = { * * Creates a new file by hooking it on a single inode. This is useful for files * that do not need to have a full-fledged inode in order to operate correctly. - * All the files created with anon_inode_getfd() will share a single inode, + * All the files created with anon_inode_getfile() will share a single inode, * hence saving memory and avoiding code duplication for the file/inode/dentry - * setup. Returns new descriptor or -error. + * setup. Returns the newly created file* or an error pointer. */ -int anon_inode_getfd(const char *name, const struct file_operations *fops, - void *priv, int flags) +struct file *anon_inode_getfile(const char *name, + const struct file_operations *fops, + void *priv, int flags) { struct qstr this; struct dentry *dentry; struct file *file; - int error, fd; + int error; if (IS_ERR(anon_inode_inode)) - return -ENODEV; + return ERR_PTR(-ENODEV); if (fops->owner && !try_module_get(fops->owner)) - return -ENOENT; - - error = get_unused_fd_flags(flags); - if (error < 0) - goto err_module; - fd = error; + return ERR_PTR(-ENOENT); /* * Link the inode to a directory entry by creating a unique name @@ -110,7 +106,7 @@ int anon_inode_getfd(const char *name, const struct file_operations *fops, this.hash = 0; dentry = d_alloc(anon_inode_mnt->mnt_sb->s_root, &this); if (!dentry) - goto err_put_unused_fd; + goto err_module; /* * We know the anon_inode inode count is always greater than zero, @@ -136,16 +132,54 @@ int anon_inode_getfd(const char *name, const struct file_operations *fops, file->f_version = 0; file->private_data = priv; + return file; + +err_dput: + dput(dentry); +err_module: + module_put(fops->owner); + return ERR_PTR(error); +} +EXPORT_SYMBOL_GPL(anon_inode_getfile); + +/** + * anon_inode_getfd - creates a new file instance by hooking it up to an + * anonymous inode, and a dentry that describe the "class" + * of the file + * + * @name: [in] name of the "class" of the new file + * @fops: [in] file operations for the new file + * @priv: [in] private data for the new file (will be file's private_data) + * @flags: [in] flags + * + * Creates a new file by hooking it on a single inode. This is useful for files + * that do not need to have a full-fledged inode in order to operate correctly. + * All the files created with anon_inode_getfd() will share a single inode, + * hence saving memory and avoiding code duplication for the file/inode/dentry + * setup. Returns new descriptor or an error code. + */ +int anon_inode_getfd(const char *name, const struct file_operations *fops, + void *priv, int flags) +{ + int error, fd; + struct file *file; + + error = get_unused_fd_flags(flags); + if (error < 0) + return error; + fd = error; + + file = anon_inode_getfile(name, fops, priv, flags); + if (IS_ERR(file)) { + error = PTR_ERR(file); + goto err_put_unused_fd; + } fd_install(fd, file); return fd; -err_dput: - dput(dentry); err_put_unused_fd: put_unused_fd(fd); -err_module: - module_put(fops->owner); return error; } EXPORT_SYMBOL_GPL(anon_inode_getfd); diff --git a/fs/autofs/dirhash.c b/fs/autofs/dirhash.c index 2316e94..e947915 100644 --- a/fs/autofs/dirhash.c +++ b/fs/autofs/dirhash.c @@ -90,7 +90,7 @@ struct autofs_dir_ent *autofs_expire(struct super_block *sb, DPRINTK(("autofs: not expirable (not a mounted directory): %s\n", ent->name)); continue; } - while (d_mountpoint(path.dentry) && follow_down(&path)); + while (d_mountpoint(path.dentry) && follow_down(&path)) ; umount_ok = may_umount(path.mnt); path_put(&path); diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index 615d549..dd376c1 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c @@ -842,7 +842,7 @@ befs_fill_super(struct super_block *sb, void *data, int silent) sb->s_magic = BEFS_SUPER_MAGIC; /* Set real blocksize of fs */ sb_set_blocksize(sb, (ulong) befs_sb->block_size); - sb->s_op = (struct super_operations *) &befs_sops; + sb->s_op = &befs_sops; root = befs_iget(sb, iaddr2blockno(sb, &(befs_sb->root_dir))); if (IS_ERR(root)) { ret = PTR_ERR(root); diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 7c1e65d..b9b3bb5 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1280,9 +1280,6 @@ static int writenote(struct memelfnote *men, struct file *file, #define DUMP_WRITE(addr, nr) \ if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \ goto end_coredump; -#define DUMP_SEEK(off) \ - if (!dump_seek(file, (off))) \ - goto end_coredump; static void fill_elf_header(struct elfhdr *elf, int segs, u16 machine, u32 flags, u8 osabi) @@ -1714,42 +1711,52 @@ struct elf_note_info { int numnote; }; -static int fill_note_info(struct elfhdr *elf, int phdrs, - struct elf_note_info *info, - long signr, struct pt_regs *regs) +static int elf_note_info_init(struct elf_note_info *info) { -#define NUM_NOTES 6 - struct list_head *t; - - info->notes = NULL; - info->prstatus = NULL; - info->psinfo = NULL; - info->fpu = NULL; -#ifdef ELF_CORE_COPY_XFPREGS - info->xfpu = NULL; -#endif + memset(info, 0, sizeof(*info)); INIT_LIST_HEAD(&info->thread_list); - info->notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), - GFP_KERNEL); + /* Allocate space for six ELF notes */ + info->notes = kmalloc(6 * sizeof(struct memelfnote), GFP_KERNEL); if (!info->notes) return 0; info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL); if (!info->psinfo) - return 0; + goto notes_free; info->prstatus = kmalloc(sizeof(*info->prstatus), GFP_KERNEL); if (!info->prstatus) - return 0; + goto psinfo_free; info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL); if (!info->fpu) - return 0; + goto prstatus_free; #ifdef ELF_CORE_COPY_XFPREGS info->xfpu = kmalloc(sizeof(*info->xfpu), GFP_KERNEL); if (!info->xfpu) - return 0; + goto fpu_free; +#endif + return 1; +#ifdef ELF_CORE_COPY_XFPREGS + fpu_free: + kfree(info->fpu); #endif + prstatus_free: + kfree(info->prstatus); + psinfo_free: + kfree(info->psinfo); + notes_free: + kfree(info->notes); + return 0; +} + +static int fill_note_info(struct elfhdr *elf, int phdrs, + struct elf_note_info *info, + long signr, struct pt_regs *regs) +{ + struct list_head *t; + + if (!elf_note_info_init(info)) + return 0; - info->thread_status_size = 0; if (signr) { struct core_thread *ct; struct elf_thread_status *ets; @@ -1809,8 +1816,6 @@ static int fill_note_info(struct elfhdr *elf, int phdrs, #endif return 1; - -#undef NUM_NOTES } static size_t get_note_info_size(struct elf_note_info *info) @@ -2016,7 +2021,8 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un goto end_coredump; /* Align to page */ - DUMP_SEEK(dataoff - foffset); + if (!dump_seek(file, dataoff - foffset)) + goto end_coredump; for (vma = first_vma(current, gate_vma); vma != NULL; vma = next_vma(vma, gate_vma)) { @@ -2027,33 +2033,19 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) { struct page *page; - struct vm_area_struct *tmp_vma; - - if (get_user_pages(current, current->mm, addr, 1, 0, 1, - &page, &tmp_vma) <= 0) { - DUMP_SEEK(PAGE_SIZE); - } else { - if (page == ZERO_PAGE(0)) { - if (!dump_seek(file, PAGE_SIZE)) { - page_cache_release(page); - goto end_coredump; - } - } else { - void *kaddr; - flush_cache_page(tmp_vma, addr, - page_to_pfn(page)); - kaddr = kmap(page); - if ((size += PAGE_SIZE) > limit || - !dump_write(file, kaddr, - PAGE_SIZE)) { - kunmap(page); - page_cache_release(page); - goto end_coredump; - } - kunmap(page); - } + int stop; + + page = get_dump_page(addr); + if (page) { + void *kaddr = kmap(page); + stop = ((size += PAGE_SIZE) > limit) || + !dump_write(file, kaddr, PAGE_SIZE); + kunmap(page); page_cache_release(page); - } + } else + stop = !dump_seek(file, PAGE_SIZE); + if (stop) + goto end_coredump; } } diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 20fbece..38502c6 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -283,20 +283,23 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm, } stack_size = exec_params.stack_size; - if (stack_size < interp_params.stack_size) - stack_size = interp_params.stack_size; - if (exec_params.flags & ELF_FDPIC_FLAG_EXEC_STACK) executable_stack = EXSTACK_ENABLE_X; else if (exec_params.flags & ELF_FDPIC_FLAG_NOEXEC_STACK) executable_stack = EXSTACK_DISABLE_X; - else if (interp_params.flags & ELF_FDPIC_FLAG_EXEC_STACK) - executable_stack = EXSTACK_ENABLE_X; - else if (interp_params.flags & ELF_FDPIC_FLAG_NOEXEC_STACK) - executable_stack = EXSTACK_DISABLE_X; else executable_stack = EXSTACK_DEFAULT; + if (stack_size == 0) { + stack_size = interp_params.stack_size; + if (interp_params.flags & ELF_FDPIC_FLAG_EXEC_STACK) + executable_stack = EXSTACK_ENABLE_X; + else if (interp_params.flags & ELF_FDPIC_FLAG_NOEXEC_STACK) + executable_stack = EXSTACK_DISABLE_X; + else + executable_stack = EXSTACK_DEFAULT; + } + retval = -ENOEXEC; if (stack_size == 0) goto error; @@ -1325,9 +1328,6 @@ static int writenote(struct memelfnote *men, struct file *file) #define DUMP_WRITE(addr, nr) \ if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \ goto end_coredump; -#define DUMP_SEEK(off) \ - if (!dump_seek(file, (off))) \ - goto end_coredump; static inline void fill_elf_fdpic_header(struct elfhdr *elf, int segs) { @@ -1518,6 +1518,7 @@ static int elf_fdpic_dump_segments(struct file *file, size_t *size, unsigned long *limit, unsigned long mm_flags) { struct vm_area_struct *vma; + int err = 0; for (vma = current->mm->mmap; vma; vma = vma->vm_next) { unsigned long addr; @@ -1525,43 +1526,26 @@ static int elf_fdpic_dump_segments(struct file *file, size_t *size, if (!maydump(vma, mm_flags)) continue; - for (addr = vma->vm_start; - addr < vma->vm_end; - addr += PAGE_SIZE - ) { - struct vm_area_struct *vma; - struct page *page; - - if (get_user_pages(current, current->mm, addr, 1, 0, 1, - &page, &vma) <= 0) { - DUMP_SEEK(file->f_pos + PAGE_SIZE); - } - else if (page == ZERO_PAGE(0)) { - page_cache_release(page); - DUMP_SEEK(file->f_pos + PAGE_SIZE); - } - else { - void *kaddr; - - flush_cache_page(vma, addr, page_to_pfn(page)); - kaddr = kmap(page); - if ((*size += PAGE_SIZE) > *limit || - !dump_write(file, kaddr, PAGE_SIZE) - ) { - kunmap(page); - page_cache_release(page); - return -EIO; - } + for (addr = vma->vm_start; addr < vma->vm_end; + addr += PAGE_SIZE) { + struct page *page = get_dump_page(addr); + if (page) { + void *kaddr = kmap(page); + *size += PAGE_SIZE; + if (*size > *limit) + err = -EFBIG; + else if (!dump_write(file, kaddr, PAGE_SIZE)) + err = -EIO; kunmap(page); page_cache_release(page); - } + } else if (!dump_seek(file, file->f_pos + PAGE_SIZE)) + err = -EFBIG; + if (err) + goto out; } } - - return 0; - -end_coredump: - return -EFBIG; +out: + return err; } #endif @@ -1802,7 +1786,8 @@ static int elf_fdpic_core_dump(long signr, struct pt_regs *regs, goto end_coredump; } - DUMP_SEEK(dataoff); + if (!dump_seek(file, dataoff)) + goto end_coredump; if (elf_fdpic_dump_segments(file, &size, &limit, mm_flags) < 0) goto end_coredump; diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index e92f229..a279665 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c @@ -278,8 +278,6 @@ static int decompress_exec( ret = bprm->file->f_op->read(bprm->file, buf, LBUFSIZE, &fpos); if (ret <= 0) break; - if (ret >= (unsigned long) -4096) - break; len -= ret; strm.next_in = buf; @@ -335,7 +333,7 @@ calc_reloc(unsigned long r, struct lib_info *p, int curid, int internalp) "(%d != %d)", (unsigned) r, curid, id); goto failed; } else if ( ! p->lib_list[id].loaded && - load_flat_shared_library(id, p) > (unsigned long) -4096) { + IS_ERR_VALUE(load_flat_shared_library(id, p))) { printk("BINFMT_FLAT: failed to load library %d", id); goto failed; } @@ -545,7 +543,7 @@ static int load_flat_file(struct linux_binprm * bprm, textpos = do_mmap(bprm->file, 0, text_len, PROT_READ|PROT_EXEC, MAP_PRIVATE|MAP_EXECUTABLE, 0); up_write(¤t->mm->mmap_sem); - if (!textpos || textpos >= (unsigned long) -4096) { + if (!textpos || IS_ERR_VALUE(textpos)) { if (!textpos) textpos = (unsigned long) -ENOMEM; printk("Unable to mmap process text, errno %d\n", (int)-textpos); @@ -560,7 +558,7 @@ static int load_flat_file(struct linux_binprm * bprm, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, 0); up_write(¤t->mm->mmap_sem); - if (realdatastart == 0 || realdatastart >= (unsigned long)-4096) { + if (realdatastart == 0 || IS_ERR_VALUE(realdatastart)) { if (!realdatastart) realdatastart = (unsigned long) -ENOMEM; printk("Unable to allocate RAM for process data, errno %d\n", @@ -587,7 +585,7 @@ static int load_flat_file(struct linux_binprm * bprm, result = bprm->file->f_op->read(bprm->file, (char *) datapos, data_len + (relocs * sizeof(unsigned long)), &fpos); } - if (result >= (unsigned long)-4096) { + if (IS_ERR_VALUE(result)) { printk("Unable to read data+bss, errno %d\n", (int)-result); do_munmap(current->mm, textpos, text_len); do_munmap(current->mm, realdatastart, data_len + extra); @@ -607,7 +605,7 @@ static int load_flat_file(struct linux_binprm * bprm, PROT_READ | PROT_EXEC | PROT_WRITE, MAP_PRIVATE, 0); up_write(¤t->mm->mmap_sem); - if (!textpos || textpos >= (unsigned long) -4096) { + if (!textpos || IS_ERR_VALUE(textpos)) { if (!textpos) textpos = (unsigned long) -ENOMEM; printk("Unable to allocate RAM for process text/data, errno %d\n", @@ -641,7 +639,7 @@ static int load_flat_file(struct linux_binprm * bprm, fpos = 0; result = bprm->file->f_op->read(bprm->file, (char *) textpos, text_len, &fpos); - if (result < (unsigned long) -4096) + if (!IS_ERR_VALUE(result)) result = decompress_exec(bprm, text_len, (char *) datapos, data_len + (relocs * sizeof(unsigned long)), 0); } @@ -651,13 +649,13 @@ static int load_flat_file(struct linux_binprm * bprm, fpos = 0; result = bprm->file->f_op->read(bprm->file, (char *) textpos, text_len, &fpos); - if (result < (unsigned long) -4096) { + if (!IS_ERR_VALUE(result)) { fpos = ntohl(hdr->data_start); result = bprm->file->f_op->read(bprm->file, (char *) datapos, data_len + (relocs * sizeof(unsigned long)), &fpos); } } - if (result >= (unsigned long)-4096) { + if (IS_ERR_VALUE(result)) { printk("Unable to read code+data+bss, errno %d\n",(int)-result); do_munmap(current->mm, textpos, text_len + data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long)); @@ -835,7 +833,7 @@ static int load_flat_shared_library(int id, struct lib_info *libs) res = prepare_binprm(&bprm); - if (res <= (unsigned long)-4096) + if (!IS_ERR_VALUE(res)) res = load_flat_file(&bprm, libs, id, NULL); abort_creds(bprm.cred); @@ -880,7 +878,7 @@ static int load_flat_binary(struct linux_binprm * bprm, struct pt_regs * regs) stack_len += FLAT_DATA_ALIGN - 1; /* reserve for upcoming alignment */ res = load_flat_file(bprm, &libinfo, 0, &stack_len); - if (res > (unsigned long)-4096) + if (IS_ERR_VALUE(res)) return res; /* Update data segment pointers for all libraries */ diff --git a/fs/block_dev.c b/fs/block_dev.c index 3581a4e..5d1ed50 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -420,7 +420,6 @@ static void bdev_destroy_inode(struct inode *inode) { struct bdev_inode *bdi = BDEV_I(inode); - bdi->bdev.bd_inode_backing_dev_info = NULL; kmem_cache_free(bdev_cachep, bdi); } @@ -1115,7 +1114,7 @@ EXPORT_SYMBOL(revalidate_disk); int check_disk_change(struct block_device *bdev) { struct gendisk *disk = bdev->bd_disk; - struct block_device_operations * bdops = disk->fops; + const struct block_device_operations *bdops = disk->fops; if (!bdops->media_changed) return 0; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 15831d5..6c41731 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -772,7 +772,7 @@ static void btree_invalidatepage(struct page *page, unsigned long offset) } } -static struct address_space_operations btree_aops = { +static const struct address_space_operations btree_aops = { .readpage = btree_readpage, .writepage = btree_writepage, .writepages = btree_writepages, @@ -1600,6 +1600,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, sb->s_blocksize = 4096; sb->s_blocksize_bits = blksize_bits(4096); + sb->s_bdi = &fs_info->bdi; /* * we set the i_size on the btree inode to the max possible int. diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index dd86050..d154a3f 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -55,13 +55,13 @@ struct btrfs_iget_args { struct btrfs_root *root; }; -static struct inode_operations btrfs_dir_inode_operations; -static struct inode_operations btrfs_symlink_inode_operations; -static struct inode_operations btrfs_dir_ro_inode_operations; -static struct inode_operations btrfs_special_inode_operations; -static struct inode_operations btrfs_file_inode_operations; -static struct address_space_operations btrfs_aops; -static struct address_space_operations btrfs_symlink_aops; +static const struct inode_operations btrfs_dir_inode_operations; +static const struct inode_operations btrfs_symlink_inode_operations; +static const struct inode_operations btrfs_dir_ro_inode_operations; +static const struct inode_operations btrfs_special_inode_operations; +static const struct inode_operations btrfs_file_inode_operations; +static const struct address_space_operations btrfs_aops; +static const struct address_space_operations btrfs_symlink_aops; static struct file_operations btrfs_dir_file_operations; static struct extent_io_ops btrfs_extent_io_ops; @@ -5201,7 +5201,7 @@ static int btrfs_permission(struct inode *inode, int mask) return generic_permission(inode, mask, btrfs_check_acl); } -static struct inode_operations btrfs_dir_inode_operations = { +static const struct inode_operations btrfs_dir_inode_operations = { .getattr = btrfs_getattr, .lookup = btrfs_lookup, .create = btrfs_create, @@ -5219,7 +5219,7 @@ static struct inode_operations btrfs_dir_inode_operations = { .removexattr = btrfs_removexattr, .permission = btrfs_permission, }; -static struct inode_operations btrfs_dir_ro_inode_operations = { +static const struct inode_operations btrfs_dir_ro_inode_operations = { .lookup = btrfs_lookup, .permission = btrfs_permission, }; @@ -5259,7 +5259,7 @@ static struct extent_io_ops btrfs_extent_io_ops = { * * For now we're avoiding this by dropping bmap. */ -static struct address_space_operations btrfs_aops = { +static const struct address_space_operations btrfs_aops = { .readpage = btrfs_readpage, .writepage = btrfs_writepage, .writepages = btrfs_writepages, @@ -5272,14 +5272,14 @@ static struct address_space_operations btrfs_aops = { .error_remove_page = generic_error_remove_page, }; -static struct address_space_operations btrfs_symlink_aops = { +static const struct address_space_operations btrfs_symlink_aops = { .readpage = btrfs_readpage, .writepage = btrfs_writepage, .invalidatepage = btrfs_invalidatepage, .releasepage = btrfs_releasepage, }; -static struct inode_operations btrfs_file_inode_operations = { +static const struct inode_operations btrfs_file_inode_operations = { .truncate = btrfs_truncate, .getattr = btrfs_getattr, .setattr = btrfs_setattr, @@ -5291,7 +5291,7 @@ static struct inode_operations btrfs_file_inode_operations = { .fallocate = btrfs_fallocate, .fiemap = btrfs_fiemap, }; -static struct inode_operations btrfs_special_inode_operations = { +static const struct inode_operations btrfs_special_inode_operations = { .getattr = btrfs_getattr, .setattr = btrfs_setattr, .permission = btrfs_permission, @@ -5300,7 +5300,7 @@ static struct inode_operations btrfs_special_inode_operations = { .listxattr = btrfs_listxattr, .removexattr = btrfs_removexattr, }; -static struct inode_operations btrfs_symlink_inode_operations = { +static const struct inode_operations btrfs_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = page_follow_link_light, .put_link = page_put_link, diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index d6f0806..7b2f401 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -740,7 +740,6 @@ int btrfs_fdatawrite_range(struct address_space *mapping, loff_t start, .nr_to_write = mapping->nrpages * 2, .range_start = start, .range_end = end, - .for_writepages = 1, }; return btrfs_writepages(mapping, &wbc); } diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 6d6d06c..2db17cd 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -51,7 +51,7 @@ #include "export.h" #include "compression.h" -static struct super_operations btrfs_super_ops; +static const struct super_operations btrfs_super_ops; static void btrfs_put_super(struct super_block *sb) { @@ -675,7 +675,7 @@ static int btrfs_unfreeze(struct super_block *sb) return 0; } -static struct super_operations btrfs_super_ops = { +static const struct super_operations btrfs_super_ops = { .delete_inode = btrfs_delete_inode, .put_super = btrfs_put_super, .sync_fs = btrfs_sync_fs, diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index d91b0de..30c0d45 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -2605,7 +2605,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, extent); cs = btrfs_file_extent_offset(src, extent); cl = btrfs_file_extent_num_bytes(src, - extent);; + extent); if (btrfs_file_extent_compression(src, extent)) { cs = 0; diff --git a/fs/buffer.c b/fs/buffer.c index 90a9886..209f7f1 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -52,6 +52,7 @@ init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private) bh->b_end_io = handler; bh->b_private = private; } +EXPORT_SYMBOL(init_buffer); static int sync_buffer(void *word) { @@ -80,6 +81,7 @@ void unlock_buffer(struct buffer_head *bh) smp_mb__after_clear_bit(); wake_up_bit(&bh->b_state, BH_Lock); } +EXPORT_SYMBOL(unlock_buffer); /* * Block until a buffer comes unlocked. This doesn't stop it @@ -90,6 +92,7 @@ void __wait_on_buffer(struct buffer_head * bh) { wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE); } +EXPORT_SYMBOL(__wait_on_buffer); static void __clear_page_buffers(struct page *page) @@ -144,6 +147,7 @@ void end_buffer_read_sync(struct buffer_head *bh, int uptodate) __end_buffer_read_notouch(bh, uptodate); put_bh(bh); } +EXPORT_SYMBOL(end_buffer_read_sync); void end_buffer_write_sync(struct buffer_head *bh, int uptodate) { @@ -164,6 +168,7 @@ void end_buffer_write_sync(struct buffer_head *bh, int uptodate) unlock_buffer(bh); put_bh(bh); } +EXPORT_SYMBOL(end_buffer_write_sync); /* * Various filesystems appear to want __find_get_block to be non-blocking. @@ -272,6 +277,7 @@ void invalidate_bdev(struct block_device *bdev) invalidate_bh_lrus(); invalidate_mapping_pages(mapping, 0, -1); } +EXPORT_SYMBOL(invalidate_bdev); /* * Kick pdflush then try to free up some ZONE_NORMAL memory. @@ -410,6 +416,7 @@ still_busy: local_irq_restore(flags); return; } +EXPORT_SYMBOL(end_buffer_async_write); /* * If a page's buffers are under async readin (end_buffer_async_read @@ -438,8 +445,8 @@ static void mark_buffer_async_read(struct buffer_head *bh) set_buffer_async_read(bh); } -void mark_buffer_async_write_endio(struct buffer_head *bh, - bh_end_io_t *handler) +static void mark_buffer_async_write_endio(struct buffer_head *bh, + bh_end_io_t *handler) { bh->b_end_io = handler; set_buffer_async_write(bh); @@ -553,7 +560,7 @@ repeat: return err; } -void do_thaw_all(struct work_struct *work) +static void do_thaw_all(struct work_struct *work) { struct super_block *sb; char b[BDEVNAME_SIZE]; @@ -1172,6 +1179,7 @@ void mark_buffer_dirty(struct buffer_head *bh) } } } +EXPORT_SYMBOL(mark_buffer_dirty); /* * Decrement a buffer_head's reference count. If all buffers against a page @@ -1188,6 +1196,7 @@ void __brelse(struct buffer_head * buf) } WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n"); } +EXPORT_SYMBOL(__brelse); /* * bforget() is like brelse(), except it discards any @@ -1206,6 +1215,7 @@ void __bforget(struct buffer_head *bh) } __brelse(bh); } +EXPORT_SYMBOL(__bforget); static struct buffer_head *__bread_slow(struct buffer_head *bh) { @@ -2218,6 +2228,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block) } return 0; } +EXPORT_SYMBOL(block_read_full_page); /* utility function for filesystems that need to do work on expanding * truncates. Uses filesystem pagecache writes to allow the filesystem to @@ -2252,6 +2263,7 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size) out: return err; } +EXPORT_SYMBOL(generic_cont_expand_simple); static int cont_expand_zero(struct file *file, struct address_space *mapping, loff_t pos, loff_t *bytes) @@ -2352,6 +2364,7 @@ int cont_write_begin(struct file *file, struct address_space *mapping, out: return err; } +EXPORT_SYMBOL(cont_write_begin); int block_prepare_write(struct page *page, unsigned from, unsigned to, get_block_t *get_block) @@ -2362,6 +2375,7 @@ int block_prepare_write(struct page *page, unsigned from, unsigned to, ClearPageUptodate(page); return err; } +EXPORT_SYMBOL(block_prepare_write); int block_commit_write(struct page *page, unsigned from, unsigned to) { @@ -2369,6 +2383,7 @@ int block_commit_write(struct page *page, unsigned from, unsigned to) __block_commit_write(inode,page,from,to); return 0; } +EXPORT_SYMBOL(block_commit_write); /* * block_page_mkwrite() is not allowed to change the file size as it gets @@ -2426,6 +2441,7 @@ block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, out: return ret; } +EXPORT_SYMBOL(block_page_mkwrite); /* * nobh_write_begin()'s prereads are special: the buffer_heads are freed @@ -2849,6 +2865,7 @@ unlock: out: return err; } +EXPORT_SYMBOL(block_truncate_page); /* * The generic ->writepage function for buffer-backed address_spaces @@ -2890,6 +2907,7 @@ int block_write_full_page_endio(struct page *page, get_block_t *get_block, zero_user_segment(page, offset, PAGE_CACHE_SIZE); return __block_write_full_page(inode, page, get_block, wbc, handler); } +EXPORT_SYMBOL(block_write_full_page_endio); /* * The generic ->writepage function for buffer-backed address_spaces @@ -2900,7 +2918,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block, return block_write_full_page_endio(page, get_block, wbc, end_buffer_async_write); } - +EXPORT_SYMBOL(block_write_full_page); sector_t generic_block_bmap(struct address_space *mapping, sector_t block, get_block_t *get_block) @@ -2913,6 +2931,7 @@ sector_t generic_block_bmap(struct address_space *mapping, sector_t block, get_block(inode, block, &tmp, 0); return tmp.b_blocknr; } +EXPORT_SYMBOL(generic_block_bmap); static void end_bio_bh_io_sync(struct bio *bio, int err) { @@ -2982,6 +3001,7 @@ int submit_bh(int rw, struct buffer_head * bh) bio_put(bio); return ret; } +EXPORT_SYMBOL(submit_bh); /** * ll_rw_block: low-level access to block devices (DEPRECATED) @@ -3043,6 +3063,7 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) unlock_buffer(bh); } } +EXPORT_SYMBOL(ll_rw_block); /* * For a data-integrity writeout, we need to wait upon any in-progress I/O @@ -3071,6 +3092,7 @@ int sync_dirty_buffer(struct buffer_head *bh) } return ret; } +EXPORT_SYMBOL(sync_dirty_buffer); /* * try_to_free_buffers() checks if all the buffers on this particular page @@ -3185,6 +3207,7 @@ void block_sync_page(struct page *page) if (mapping) blk_run_backing_dev(mapping->backing_dev_info, page); } +EXPORT_SYMBOL(block_sync_page); /* * There are no bdflush tunables left. But distributions are @@ -3361,29 +3384,3 @@ void __init buffer_init(void) max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head)); hotcpu_notifier(buffer_cpu_notify, 0); } - -EXPORT_SYMBOL(__bforget); -EXPORT_SYMBOL(__brelse); -EXPORT_SYMBOL(__wait_on_buffer); -EXPORT_SYMBOL(block_commit_write); -EXPORT_SYMBOL(block_prepare_write); -EXPORT_SYMBOL(block_page_mkwrite); -EXPORT_SYMBOL(block_read_full_page); -EXPORT_SYMBOL(block_sync_page); -EXPORT_SYMBOL(block_truncate_page); -EXPORT_SYMBOL(block_write_full_page); -EXPORT_SYMBOL(block_write_full_page_endio); -EXPORT_SYMBOL(cont_write_begin); -EXPORT_SYMBOL(end_buffer_read_sync); -EXPORT_SYMBOL(end_buffer_write_sync); -EXPORT_SYMBOL(end_buffer_async_write); -EXPORT_SYMBOL(file_fsync); -EXPORT_SYMBOL(generic_block_bmap); -EXPORT_SYMBOL(generic_cont_expand_simple); -EXPORT_SYMBOL(init_buffer); -EXPORT_SYMBOL(invalidate_bdev); -EXPORT_SYMBOL(ll_rw_block); -EXPORT_SYMBOL(mark_buffer_dirty); -EXPORT_SYMBOL(submit_bh); -EXPORT_SYMBOL(sync_dirty_buffer); -EXPORT_SYMBOL(unlock_buffer); diff --git a/fs/char_dev.c b/fs/char_dev.c index 3cbc57f..d6db933 100644 --- a/fs/char_dev.c +++ b/fs/char_dev.c @@ -264,7 +264,6 @@ int __register_chrdev(unsigned int major, unsigned int baseminor, { struct char_device_struct *cd; struct cdev *cdev; - char *s; int err = -ENOMEM; cd = __register_chrdev_region(major, baseminor, count, name); @@ -278,8 +277,6 @@ int __register_chrdev(unsigned int major, unsigned int baseminor, cdev->owner = fops->owner; cdev->ops = fops; kobject_set_name(&cdev->kobj, "%s", name); - for (s = strchr(kobject_name(&cdev->kobj),'/'); s; s = strchr(s, '/')) - *s = '!'; err = cdev_add(cdev, MKDEV(cd->major, baseminor), count); if (err) diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c index 606912d..fea9e89 100644 --- a/fs/cifs/cifs_dfs_ref.c +++ b/fs/cifs/cifs_dfs_ref.c @@ -142,7 +142,7 @@ char *cifs_compose_mount_options(const char *sb_mountdata, rc = dns_resolve_server_name_to_ip(*devname, &srvIP); if (rc != 0) { cERROR(1, ("%s: Failed to resolve server part of %s to IP: %d", - __func__, *devname, rc));; + __func__, *devname, rc)); goto compose_mount_options_err; } /* md_len = strlen(...) + 12 for 'sep+prefixpath=' @@ -385,7 +385,7 @@ out_err: goto out; } -struct inode_operations cifs_dfs_referral_inode_operations = { +const struct inode_operations cifs_dfs_referral_inode_operations = { .follow_link = cifs_dfs_follow_mountpoint, }; diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 3610e99..d79ce2e 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -50,7 +50,7 @@ #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */ #ifdef CONFIG_CIFS_QUOTA -static struct quotactl_ops cifs_quotactl_ops; +static const struct quotactl_ops cifs_quotactl_ops; #endif /* QUOTA */ int cifsFYI = 0; @@ -517,7 +517,7 @@ int cifs_xstate_get(struct super_block *sb, struct fs_quota_stat *qstats) return rc; } -static struct quotactl_ops cifs_quotactl_ops = { +static const struct quotactl_ops cifs_quotactl_ops = { .set_xquota = cifs_xquota_set, .get_xquota = cifs_xquota_get, .set_xstate = cifs_xstate_set, diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 094325e..ac2b24c 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -67,7 +67,7 @@ extern int cifs_setattr(struct dentry *, struct iattr *); extern const struct inode_operations cifs_file_inode_ops; extern const struct inode_operations cifs_symlink_inode_ops; -extern struct inode_operations cifs_dfs_referral_inode_operations; +extern const struct inode_operations cifs_dfs_referral_inode_operations; /* Functions related to files and directories */ diff --git a/fs/coda/coda_int.h b/fs/coda/coda_int.h index 8ccd5ed..d99860a 100644 --- a/fs/coda/coda_int.h +++ b/fs/coda/coda_int.h @@ -2,6 +2,7 @@ #define _CODA_INT_ struct dentry; +struct file; extern struct file_system_type coda_fs_type; extern unsigned long coda_timeout; diff --git a/fs/compat.c b/fs/compat.c index 6d6f98f..3aa4883 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -100,13 +100,6 @@ asmlinkage long compat_sys_utimensat(unsigned int dfd, char __user *filename, st get_compat_timespec(&tv[1], &t[1])) return -EFAULT; - if ((tv[0].tv_nsec == UTIME_OMIT || tv[0].tv_nsec == UTIME_NOW) - && tv[0].tv_sec != 0) - return -EINVAL; - if ((tv[1].tv_nsec == UTIME_OMIT || tv[1].tv_nsec == UTIME_NOW) - && tv[1].tv_sec != 0) - return -EINVAL; - if (tv[0].tv_nsec == UTIME_OMIT && tv[1].tv_nsec == UTIME_OMIT) return 0; } diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index 75efb02..d5f8c96 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -18,14 +18,13 @@ #include <linux/mount.h> #include <linux/tty.h> #include <linux/mutex.h> +#include <linux/magic.h> #include <linux/idr.h> #include <linux/devpts_fs.h> #include <linux/parser.h> #include <linux/fsnotify.h> #include <linux/seq_file.h> -#define DEVPTS_SUPER_MAGIC 0x1cd1 - #define DEVPTS_DEFAULT_MODE 0600 /* * ptmx is a new node in /dev/pts and will be unused in legacy (single- diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c index 1d1d274..1c8bb8c 100644 --- a/fs/dlm/debug_fs.c +++ b/fs/dlm/debug_fs.c @@ -386,9 +386,9 @@ static int table_seq_show(struct seq_file *seq, void *iter_ptr) return rv; } -static struct seq_operations format1_seq_ops; -static struct seq_operations format2_seq_ops; -static struct seq_operations format3_seq_ops; +static const struct seq_operations format1_seq_ops; +static const struct seq_operations format2_seq_ops; +static const struct seq_operations format3_seq_ops; static void *table_seq_start(struct seq_file *seq, loff_t *pos) { @@ -534,21 +534,21 @@ static void table_seq_stop(struct seq_file *seq, void *iter_ptr) } } -static struct seq_operations format1_seq_ops = { +static const struct seq_operations format1_seq_ops = { .start = table_seq_start, .next = table_seq_next, .stop = table_seq_stop, .show = table_seq_show, }; -static struct seq_operations format2_seq_ops = { +static const struct seq_operations format2_seq_ops = { .start = table_seq_start, .next = table_seq_next, .stop = table_seq_stop, .show = table_seq_show, }; -static struct seq_operations format3_seq_ops = { +static const struct seq_operations format3_seq_ops = { .start = table_seq_start, .next = table_seq_next, .stop = table_seq_stop, diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 618a60f..240cef1 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -106,6 +106,7 @@ struct connection { #define CF_CONNECT_PENDING 3 #define CF_INIT_PENDING 4 #define CF_IS_OTHERCON 5 +#define CF_CLOSE 6 struct list_head writequeue; /* List of outgoing writequeue_entries */ spinlock_t writequeue_lock; int (*rx_action) (struct connection *); /* What to do when active */ @@ -299,6 +300,8 @@ static void lowcomms_write_space(struct sock *sk) static inline void lowcomms_connect_sock(struct connection *con) { + if (test_bit(CF_CLOSE, &con->flags)) + return; if (!test_and_set_bit(CF_CONNECT_PENDING, &con->flags)) queue_work(send_workqueue, &con->swork); } @@ -926,10 +929,8 @@ static void tcp_connect_to_sock(struct connection *con) goto out_err; memset(&saddr, 0, sizeof(saddr)); - if (dlm_nodeid_to_addr(con->nodeid, &saddr)) { - sock_release(sock); + if (dlm_nodeid_to_addr(con->nodeid, &saddr)) goto out_err; - } sock->sk->sk_user_data = con; con->rx_action = receive_from_sock; @@ -1284,7 +1285,6 @@ out: static void send_to_sock(struct connection *con) { int ret = 0; - ssize_t(*sendpage) (struct socket *, struct page *, int, size_t, int); const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL; struct writequeue_entry *e; int len, offset; @@ -1293,8 +1293,6 @@ static void send_to_sock(struct connection *con) if (con->sock == NULL) goto out_connect; - sendpage = con->sock->ops->sendpage; - spin_lock(&con->writequeue_lock); for (;;) { e = list_entry(con->writequeue.next, struct writequeue_entry, @@ -1309,8 +1307,8 @@ static void send_to_sock(struct connection *con) ret = 0; if (len) { - ret = sendpage(con->sock, e->page, offset, len, - msg_flags); + ret = kernel_sendpage(con->sock, e->page, offset, len, + msg_flags); if (ret == -EAGAIN || ret == 0) { cond_resched(); goto out; @@ -1370,6 +1368,13 @@ int dlm_lowcomms_close(int nodeid) log_print("closing connection to node %d", nodeid); con = nodeid2con(nodeid, 0); if (con) { + clear_bit(CF_CONNECT_PENDING, &con->flags); + clear_bit(CF_WRITE_PENDING, &con->flags); + set_bit(CF_CLOSE, &con->flags); + if (cancel_work_sync(&con->swork)) + log_print("canceled swork for node %d", nodeid); + if (cancel_work_sync(&con->rwork)) + log_print("canceled rwork for node %d", nodeid); clean_one_writequeue(con); close_connection(con, true); } @@ -1395,9 +1400,10 @@ static void process_send_sockets(struct work_struct *work) if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) { con->connect_action(con); + set_bit(CF_WRITE_PENDING, &con->flags); } - clear_bit(CF_WRITE_PENDING, &con->flags); - send_to_sock(con); + if (test_and_clear_bit(CF_WRITE_PENDING, &con->flags)) + send_to_sock(con); } diff --git a/fs/drop_caches.c b/fs/drop_caches.c index a2edb79..31f4b0e 100644 --- a/fs/drop_caches.c +++ b/fs/drop_caches.c @@ -63,9 +63,9 @@ static void drop_slab(void) } int drop_caches_sysctl_handler(ctl_table *table, int write, - struct file *file, void __user *buffer, size_t *length, loff_t *ppos) + void __user *buffer, size_t *length, loff_t *ppos) { - proc_dointvec_minmax(table, write, file, buffer, length, ppos); + proc_dointvec_minmax(table, write, buffer, length, ppos); if (write) { if (sysctl_drop_caches & 1) drop_pagecache(); diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h index 00b30a2..542f625 100644 --- a/fs/ecryptfs/ecryptfs_kernel.h +++ b/fs/ecryptfs/ecryptfs_kernel.h @@ -582,7 +582,7 @@ extern const struct inode_operations ecryptfs_dir_iops; extern const struct inode_operations ecryptfs_symlink_iops; extern const struct super_operations ecryptfs_sops; extern const struct dentry_operations ecryptfs_dops; -extern struct address_space_operations ecryptfs_aops; +extern const struct address_space_operations ecryptfs_aops; extern int ecryptfs_verbosity; extern unsigned int ecryptfs_message_buf_len; extern signed long ecryptfs_message_wait_timeout; diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index 5c6bab9..05772ae 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c @@ -545,7 +545,7 @@ static sector_t ecryptfs_bmap(struct address_space *mapping, sector_t block) return rc; } -struct address_space_operations ecryptfs_aops = { +const struct address_space_operations ecryptfs_aops = { .writepage = ecryptfs_writepage, .readpage = ecryptfs_readpage, .write_begin = ecryptfs_write_begin, diff --git a/fs/eventfd.c b/fs/eventfd.c index 31d12de..8b47e42 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c @@ -68,11 +68,16 @@ int eventfd_signal(struct eventfd_ctx *ctx, int n) } EXPORT_SYMBOL_GPL(eventfd_signal); +static void eventfd_free_ctx(struct eventfd_ctx *ctx) +{ + kfree(ctx); +} + static void eventfd_free(struct kref *kref) { struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref); - kfree(ctx); + eventfd_free_ctx(ctx); } /** @@ -298,9 +303,23 @@ struct eventfd_ctx *eventfd_ctx_fileget(struct file *file) } EXPORT_SYMBOL_GPL(eventfd_ctx_fileget); -SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags) +/** + * eventfd_file_create - Creates an eventfd file pointer. + * @count: Initial eventfd counter value. + * @flags: Flags for the eventfd file. + * + * This function creates an eventfd file pointer, w/out installing it into + * the fd table. This is useful when the eventfd file is used during the + * initialization of data structures that require extra setup after the eventfd + * creation. So the eventfd creation is split into the file pointer creation + * phase, and the file descriptor installation phase. + * In this way races with userspace closing the newly installed file descriptor + * can be avoided. + * Returns an eventfd file pointer, or a proper error pointer. + */ +struct file *eventfd_file_create(unsigned int count, int flags) { - int fd; + struct file *file; struct eventfd_ctx *ctx; /* Check the EFD_* constants for consistency. */ @@ -308,26 +327,48 @@ SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags) BUILD_BUG_ON(EFD_NONBLOCK != O_NONBLOCK); if (flags & ~EFD_FLAGS_SET) - return -EINVAL; + return ERR_PTR(-EINVAL); ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) - return -ENOMEM; + return ERR_PTR(-ENOMEM); kref_init(&ctx->kref); init_waitqueue_head(&ctx->wqh); ctx->count = count; ctx->flags = flags; - /* - * When we call this, the initialization must be complete, since - * anon_inode_getfd() will install the fd. - */ - fd = anon_inode_getfd("[eventfd]", &eventfd_fops, ctx, - flags & EFD_SHARED_FCNTL_FLAGS); - if (fd < 0) - kfree(ctx); + file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx, + flags & EFD_SHARED_FCNTL_FLAGS); + if (IS_ERR(file)) + eventfd_free_ctx(ctx); + + return file; +} + +SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags) +{ + int fd, error; + struct file *file; + + error = get_unused_fd_flags(flags & EFD_SHARED_FCNTL_FLAGS); + if (error < 0) + return error; + fd = error; + + file = eventfd_file_create(count, flags); + if (IS_ERR(file)) { + error = PTR_ERR(file); + goto err_put_unused_fd; + } + fd_install(fd, file); + return fd; + +err_put_unused_fd: + put_unused_fd(fd); + + return error; } SYSCALL_DEFINE1(eventfd, unsigned int, count) @@ -33,7 +33,7 @@ #include <linux/string.h> #include <linux/init.h> #include <linux/pagemap.h> -#include <linux/perf_counter.h> +#include <linux/perf_event.h> #include <linux/highmem.h> #include <linux/spinlock.h> #include <linux/key.h> @@ -55,6 +55,7 @@ #include <linux/kmod.h> #include <linux/fsnotify.h> #include <linux/fs_struct.h> +#include <linux/pipe_fs_i.h> #include <asm/uaccess.h> #include <asm/mmu_context.h> @@ -63,6 +64,7 @@ int core_uses_pid; char core_pattern[CORENAME_MAX_SIZE] = "core"; +unsigned int core_pipe_limit; int suid_dumpable = 0; /* The maximal length of core_pattern is also specified in sysctl.c */ @@ -845,6 +847,9 @@ static int de_thread(struct task_struct *tsk) sig->notify_count = 0; no_thread_group: + if (current->mm) + setmax_mm_hiwater_rss(&sig->maxrss, current->mm); + exit_itimers(sig); flush_itimer_signals(); @@ -923,7 +928,7 @@ void set_task_comm(struct task_struct *tsk, char *buf) task_lock(tsk); strlcpy(tsk->comm, buf, sizeof(tsk->comm)); task_unlock(tsk); - perf_counter_comm(tsk); + perf_event_comm(tsk); } int flush_old_exec(struct linux_binprm * bprm) @@ -997,7 +1002,7 @@ int flush_old_exec(struct linux_binprm * bprm) * security domain: */ if (!get_dumpable(current->mm)) - perf_counter_exit_task(current); + perf_event_exit_task(current); /* An exec changes our domain. We are no longer part of the thread group */ @@ -1354,6 +1359,8 @@ int do_execve(char * filename, if (retval < 0) goto out; + current->stack_start = current->mm->start_stack; + /* execve succeeded */ current->fs->in_exec = 0; current->in_execve = 0; @@ -1388,18 +1395,16 @@ out_ret: return retval; } -int set_binfmt(struct linux_binfmt *new) +void set_binfmt(struct linux_binfmt *new) { - struct linux_binfmt *old = current->binfmt; + struct mm_struct *mm = current->mm; - if (new) { - if (!try_module_get(new->module)) - return -1; - } - current->binfmt = new; - if (old) - module_put(old->module); - return 0; + if (mm->binfmt) + module_put(mm->binfmt->module); + + mm->binfmt = new; + if (new) + __module_get(new->module); } EXPORT_SYMBOL(set_binfmt); @@ -1723,6 +1728,29 @@ int get_dumpable(struct mm_struct *mm) return (ret >= 2) ? 2 : ret; } +static void wait_for_dump_helpers(struct file *file) +{ + struct pipe_inode_info *pipe; + + pipe = file->f_path.dentry->d_inode->i_pipe; + + pipe_lock(pipe); + pipe->readers++; + pipe->writers--; + + while ((pipe->readers > 1) && (!signal_pending(current))) { + wake_up_interruptible_sync(&pipe->wait); + kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); + pipe_wait(pipe); + } + + pipe->readers--; + pipe->writers++; + pipe_unlock(pipe); + +} + + void do_coredump(long signr, int exit_code, struct pt_regs *regs) { struct core_state core_state; @@ -1739,11 +1767,12 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) unsigned long core_limit = current->signal->rlim[RLIMIT_CORE].rlim_cur; char **helper_argv = NULL; int helper_argc = 0; - char *delimit; + int dump_count = 0; + static atomic_t core_dump_count = ATOMIC_INIT(0); audit_core_dumps(signr); - binfmt = current->binfmt; + binfmt = mm->binfmt; if (!binfmt || !binfmt->core_dump) goto fail; @@ -1794,54 +1823,63 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) lock_kernel(); ispipe = format_corename(corename, signr); unlock_kernel(); - /* - * Don't bother to check the RLIMIT_CORE value if core_pattern points - * to a pipe. Since we're not writing directly to the filesystem - * RLIMIT_CORE doesn't really apply, as no actual core file will be - * created unless the pipe reader choses to write out the core file - * at which point file size limits and permissions will be imposed - * as it does with any other process - */ + if ((!ispipe) && (core_limit < binfmt->min_coredump)) goto fail_unlock; if (ispipe) { + if (core_limit == 0) { + /* + * Normally core limits are irrelevant to pipes, since + * we're not writing to the file system, but we use + * core_limit of 0 here as a speacial value. Any + * non-zero limit gets set to RLIM_INFINITY below, but + * a limit of 0 skips the dump. This is a consistent + * way to catch recursive crashes. We can still crash + * if the core_pattern binary sets RLIM_CORE = !0 + * but it runs as root, and can do lots of stupid things + * Note that we use task_tgid_vnr here to grab the pid + * of the process group leader. That way we get the + * right pid if a thread in a multi-threaded + * core_pattern process dies. + */ + printk(KERN_WARNING + "Process %d(%s) has RLIMIT_CORE set to 0\n", + task_tgid_vnr(current), current->comm); + printk(KERN_WARNING "Aborting core\n"); + goto fail_unlock; + } + + dump_count = atomic_inc_return(&core_dump_count); + if (core_pipe_limit && (core_pipe_limit < dump_count)) { + printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n", + task_tgid_vnr(current), current->comm); + printk(KERN_WARNING "Skipping core dump\n"); + goto fail_dropcount; + } + helper_argv = argv_split(GFP_KERNEL, corename+1, &helper_argc); if (!helper_argv) { printk(KERN_WARNING "%s failed to allocate memory\n", __func__); - goto fail_unlock; - } - /* Terminate the string before the first option */ - delimit = strchr(corename, ' '); - if (delimit) - *delimit = '\0'; - delimit = strrchr(helper_argv[0], '/'); - if (delimit) - delimit++; - else - delimit = helper_argv[0]; - if (!strcmp(delimit, current->comm)) { - printk(KERN_NOTICE "Recursive core dump detected, " - "aborting\n"); - goto fail_unlock; + goto fail_dropcount; } core_limit = RLIM_INFINITY; /* SIGPIPE can happen, but it's just never processed */ - if (call_usermodehelper_pipe(corename+1, helper_argv, NULL, + if (call_usermodehelper_pipe(helper_argv[0], helper_argv, NULL, &file)) { printk(KERN_INFO "Core dump to %s pipe failed\n", corename); - goto fail_unlock; + goto fail_dropcount; } } else file = filp_open(corename, O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag, 0600); if (IS_ERR(file)) - goto fail_unlock; + goto fail_dropcount; inode = file->f_path.dentry->d_inode; if (inode->i_nlink > 1) goto close_fail; /* multiple links - don't dump */ @@ -1870,7 +1908,12 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs) if (retval) current->signal->group_exit_code |= 0x80; close_fail: + if (ispipe && core_pipe_limit) + wait_for_dump_helpers(file); filp_close(file, NULL); +fail_dropcount: + if (dump_count) + atomic_dec(&core_dump_count); fail_unlock: if (helper_argv) argv_free(helper_argv); diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index 23701f2..dd7175c 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c @@ -70,7 +70,7 @@ static struct dentry *ext2_lookup(struct inode * dir, struct dentry *dentry, str if (PTR_ERR(inode) == -ESTALE) { ext2_error(dir->i_sb, __func__, "deleted inode referenced: %lu", - ino); + (unsigned long) ino); return ERR_PTR(-EIO); } else { return ERR_CAST(inode); diff --git a/fs/ext2/xip.c b/fs/ext2/xip.c index b72b858..c18fbf3 100644 --- a/fs/ext2/xip.c +++ b/fs/ext2/xip.c @@ -20,7 +20,7 @@ __inode_direct_access(struct inode *inode, sector_t block, void **kaddr, unsigned long *pfn) { struct block_device *bdev = inode->i_sb->s_bdev; - struct block_device_operations *ops = bdev->bd_disk->fops; + const struct block_device_operations *ops = bdev->bd_disk->fops; sector_t sector; sector = block * (PAGE_SIZE / 512); /* ext2 block to bdev sector */ diff --git a/fs/ext3/fsync.c b/fs/ext3/fsync.c index d336341..451d166 100644 --- a/fs/ext3/fsync.c +++ b/fs/ext3/fsync.c @@ -23,6 +23,7 @@ */ #include <linux/time.h> +#include <linux/blkdev.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/writeback.h> @@ -73,7 +74,7 @@ int ext3_sync_file(struct file * file, struct dentry *dentry, int datasync) } if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) - goto out; + goto flush; /* * The VFS has written the file data. If the inode is unaltered @@ -85,7 +86,16 @@ int ext3_sync_file(struct file * file, struct dentry *dentry, int datasync) .nr_to_write = 0, /* sys_fsync did this */ }; ret = sync_inode(inode, &wbc); + goto out; } +flush: + /* + * In case we didn't commit a transaction, we have to flush + * disk caches manually so that data really is on persistent + * storage + */ + if (test_opt(inode->i_sb, BARRIER)) + blkdev_issue_flush(inode->i_sb->s_bdev, NULL); out: return ret; } diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index 953b430..acf1b14 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c @@ -172,10 +172,21 @@ static int try_to_extend_transaction(handle_t *handle, struct inode *inode) * so before we call here everything must be consistently dirtied against * this transaction. */ -static int ext3_journal_test_restart(handle_t *handle, struct inode *inode) +static int truncate_restart_transaction(handle_t *handle, struct inode *inode) { + int ret; + jbd_debug(2, "restarting handle %p\n", handle); - return ext3_journal_restart(handle, blocks_for_truncate(inode)); + /* + * Drop truncate_mutex to avoid deadlock with ext3_get_blocks_handle + * At this moment, get_block can be called only for blocks inside + * i_size since page cache has been already dropped and writes are + * blocked by i_mutex. So we can safely drop the truncate_mutex. + */ + mutex_unlock(&EXT3_I(inode)->truncate_mutex); + ret = ext3_journal_restart(handle, blocks_for_truncate(inode)); + mutex_lock(&EXT3_I(inode)->truncate_mutex); + return ret; } /* @@ -2075,7 +2086,7 @@ static void ext3_clear_blocks(handle_t *handle, struct inode *inode, ext3_journal_dirty_metadata(handle, bh); } ext3_mark_inode_dirty(handle, inode); - ext3_journal_test_restart(handle, inode); + truncate_restart_transaction(handle, inode); if (bh) { BUFFER_TRACE(bh, "retaking write access"); ext3_journal_get_write_access(handle, bh); @@ -2285,7 +2296,7 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode, return; if (try_to_extend_transaction(handle, inode)) { ext3_mark_inode_dirty(handle, inode); - ext3_journal_test_restart(handle, inode); + truncate_restart_transaction(handle, inode); } ext3_free_blocks(handle, inode, nr, 1); @@ -2895,6 +2906,10 @@ static int ext3_do_update_inode(handle_t *handle, struct buffer_head *bh = iloc->bh; int err = 0, rc, block; +again: + /* we can't allow multiple procs in here at once, its a bit racey */ + lock_buffer(bh); + /* For fields not not tracking in the in-memory inode, * initialise them to zero for new inodes. */ if (ei->i_state & EXT3_STATE_NEW) @@ -2954,16 +2969,20 @@ static int ext3_do_update_inode(handle_t *handle, /* If this is the first large file * created, add a flag to the superblock. */ + unlock_buffer(bh); err = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh); if (err) goto out_brelse; + ext3_update_dynamic_rev(sb); EXT3_SET_RO_COMPAT_FEATURE(sb, EXT3_FEATURE_RO_COMPAT_LARGE_FILE); handle->h_sync = 1; err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh); + /* get our lock and start over */ + goto again; } } } @@ -2986,6 +3005,7 @@ static int ext3_do_update_inode(handle_t *handle, raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); + unlock_buffer(bh); rc = ext3_journal_dirty_metadata(handle, bh); if (!err) err = rc; diff --git a/fs/ext3/super.c b/fs/ext3/super.c index a8d80a7..72743d3 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -720,7 +720,7 @@ static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data, static ssize_t ext3_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off); -static struct dquot_operations ext3_quota_operations = { +static const struct dquot_operations ext3_quota_operations = { .initialize = dquot_initialize, .drop = dquot_drop, .alloc_space = dquot_alloc_space, @@ -737,7 +737,7 @@ static struct dquot_operations ext3_quota_operations = { .destroy_dquot = dquot_destroy, }; -static struct quotactl_ops ext3_qctl_operations = { +static const struct quotactl_ops ext3_qctl_operations = { .quota_on = ext3_quota_on, .quota_off = vfs_quota_off, .quota_sync = vfs_quota_sync, diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig index 418b6f3..d5c0ea2 100644 --- a/fs/ext4/Kconfig +++ b/fs/ext4/Kconfig @@ -37,7 +37,7 @@ config EXT4DEV_COMPAT To enable backwards compatibility so that systems that are still expecting to mount ext4 filesystems using ext4dev, - chose Y here. This feature will go away by 2.6.31, so + choose Y here. This feature will go away by 2.6.31, so please arrange to get your userspace programs fixed! config EXT4_FS_XATTR @@ -77,3 +77,12 @@ config EXT4_FS_SECURITY If you are not using a security module that requires using extended attributes for file security labels, say N. + +config EXT4_DEBUG + bool "EXT4 debugging support" + depends on EXT4_FS + help + Enables run-time debugging support for the ext4 filesystem. + + If you select Y here, then you will be able to turn on debugging + with a command such as "echo 1 > /sys/kernel/debug/ext4/mballoc-debug" diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index e2126d7..1d04189 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -478,7 +478,7 @@ void ext4_add_groupblocks(handle_t *handle, struct super_block *sb, * new bitmap information */ set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); - ext4_mb_update_group_info(grp, blocks_freed); + grp->bb_free += blocks_freed; up_write(&grp->alloc_sem); /* We dirtied the bitmap block */ diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 9714db3..e227eea 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -67,27 +67,29 @@ typedef unsigned int ext4_group_t; /* prefer goal again. length */ -#define EXT4_MB_HINT_MERGE 1 +#define EXT4_MB_HINT_MERGE 0x0001 /* blocks already reserved */ -#define EXT4_MB_HINT_RESERVED 2 +#define EXT4_MB_HINT_RESERVED 0x0002 /* metadata is being allocated */ -#define EXT4_MB_HINT_METADATA 4 +#define EXT4_MB_HINT_METADATA 0x0004 /* first blocks in the file */ -#define EXT4_MB_HINT_FIRST 8 +#define EXT4_MB_HINT_FIRST 0x0008 /* search for the best chunk */ -#define EXT4_MB_HINT_BEST 16 +#define EXT4_MB_HINT_BEST 0x0010 /* data is being allocated */ -#define EXT4_MB_HINT_DATA 32 +#define EXT4_MB_HINT_DATA 0x0020 /* don't preallocate (for tails) */ -#define EXT4_MB_HINT_NOPREALLOC 64 +#define EXT4_MB_HINT_NOPREALLOC 0x0040 /* allocate for locality group */ -#define EXT4_MB_HINT_GROUP_ALLOC 128 +#define EXT4_MB_HINT_GROUP_ALLOC 0x0080 /* allocate goal blocks or none */ -#define EXT4_MB_HINT_GOAL_ONLY 256 +#define EXT4_MB_HINT_GOAL_ONLY 0x0100 /* goal is meaningful */ -#define EXT4_MB_HINT_TRY_GOAL 512 +#define EXT4_MB_HINT_TRY_GOAL 0x0200 /* blocks already pre-reserved by delayed allocation */ -#define EXT4_MB_DELALLOC_RESERVED 1024 +#define EXT4_MB_DELALLOC_RESERVED 0x0400 +/* We are doing stream allocation */ +#define EXT4_MB_STREAM_ALLOC 0x0800 struct ext4_allocation_request { @@ -112,6 +114,21 @@ struct ext4_allocation_request { }; /* + * For delayed allocation tracking + */ +struct mpage_da_data { + struct inode *inode; + sector_t b_blocknr; /* start block number of extent */ + size_t b_size; /* size of extent */ + unsigned long b_state; /* state of the extent */ + unsigned long first_page, next_page; /* extent of pages */ + struct writeback_control *wbc; + int io_done; + int pages_written; + int retval; +}; + +/* * Special inodes numbers */ #define EXT4_BAD_INO 1 /* Bad blocks inode */ @@ -251,7 +268,6 @@ struct flex_groups { #define EXT4_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/ #define EXT4_HUGE_FILE_FL 0x00040000 /* Set to each huge file */ #define EXT4_EXTENTS_FL 0x00080000 /* Inode uses extents */ -#define EXT4_EXT_MIGRATE 0x00100000 /* Inode is migrating */ #define EXT4_RESERVED_FL 0x80000000 /* reserved for ext4 lib */ #define EXT4_FL_USER_VISIBLE 0x000BDFFF /* User visible flags */ @@ -289,6 +305,7 @@ static inline __u32 ext4_mask_flags(umode_t mode, __u32 flags) #define EXT4_STATE_XATTR 0x00000004 /* has in-inode xattrs */ #define EXT4_STATE_NO_EXPAND 0x00000008 /* No space for expansion */ #define EXT4_STATE_DA_ALLOC_CLOSE 0x00000010 /* Alloc DA blks on close */ +#define EXT4_STATE_EXT_MIGRATE 0x00000020 /* Inode is migrating */ /* Used to pass group descriptor data when online resize is done */ struct ext4_new_group_input { @@ -386,6 +403,9 @@ struct ext4_mount_options { #endif }; +/* Max physical block we can addres w/o extents */ +#define EXT4_MAX_BLOCK_FILE_PHYS 0xFFFFFFFF + /* * Structure of an inode on the disk */ @@ -456,7 +476,6 @@ struct move_extent { __u64 len; /* block length to be moved */ __u64 moved_len; /* moved block length */ }; -#define MAX_DEFRAG_SIZE ((1UL<<31) - 1) #define EXT4_EPOCH_BITS 2 #define EXT4_EPOCH_MASK ((1 << EXT4_EPOCH_BITS) - 1) @@ -694,7 +713,6 @@ struct ext4_inode_info { #define EXT4_MOUNT_QUOTA 0x80000 /* Some quota option set */ #define EXT4_MOUNT_USRQUOTA 0x100000 /* "old" user quota */ #define EXT4_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */ -#define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */ #define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */ #define EXT4_MOUNT_I_VERSION 0x2000000 /* i_version support */ #define EXT4_MOUNT_DELALLOC 0x8000000 /* Delalloc support */ @@ -841,6 +859,7 @@ struct ext4_sb_info { unsigned long s_gdb_count; /* Number of group descriptor blocks */ unsigned long s_desc_per_block; /* Number of group descriptors per block */ ext4_group_t s_groups_count; /* Number of groups in the fs */ + ext4_group_t s_blockfile_groups;/* Groups acceptable for non-extent files */ unsigned long s_overhead_last; /* Last calculated overhead */ unsigned long s_blocks_last; /* Last seen block count */ loff_t s_bitmap_maxbytes; /* max bytes for bitmap files */ @@ -950,6 +969,7 @@ struct ext4_sb_info { atomic_t s_mb_lost_chunks; atomic_t s_mb_preallocated; atomic_t s_mb_discarded; + atomic_t s_lock_busy; /* locality groups */ struct ext4_locality_group *s_locality_groups; @@ -1340,8 +1360,6 @@ extern void ext4_mb_free_blocks(handle_t *, struct inode *, ext4_fsblk_t, unsigned long, int, unsigned long *); extern int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t i, struct ext4_group_desc *desc); -extern void ext4_mb_update_group_info(struct ext4_group_info *grp, - ext4_grpblk_t add); extern int ext4_mb_get_buddy_cache_lock(struct super_block *, ext4_group_t); extern void ext4_mb_put_buddy_cache_lock(struct super_block *, ext4_group_t, int); @@ -1367,6 +1385,7 @@ extern int ext4_change_inode_journal_flag(struct inode *, int); extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *); extern int ext4_can_truncate(struct inode *inode); extern void ext4_truncate(struct inode *); +extern int ext4_truncate_restart_trans(handle_t *, struct inode *, int nblocks); extern void ext4_set_inode_flags(struct inode *); extern void ext4_get_inode_flags(struct ext4_inode_info *); extern int ext4_alloc_da_blocks(struct inode *inode); @@ -1575,15 +1594,18 @@ static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize) struct ext4_group_info { unsigned long bb_state; struct rb_root bb_free_root; - unsigned short bb_first_free; - unsigned short bb_free; - unsigned short bb_fragments; + ext4_grpblk_t bb_first_free; /* first free block */ + ext4_grpblk_t bb_free; /* total free blocks */ + ext4_grpblk_t bb_fragments; /* nr of freespace fragments */ struct list_head bb_prealloc_list; #ifdef DOUBLE_CHECK void *bb_bitmap; #endif struct rw_semaphore alloc_sem; - unsigned short bb_counters[]; + ext4_grpblk_t bb_counters[]; /* Nr of free power-of-two-block + * regions, index is order. + * bb_counters[3] = 5 means + * 5 free 8-block regions. */ }; #define EXT4_GROUP_INFO_NEED_INIT_BIT 0 @@ -1591,15 +1613,42 @@ struct ext4_group_info { #define EXT4_MB_GRP_NEED_INIT(grp) \ (test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state))) +#define EXT4_MAX_CONTENTION 8 +#define EXT4_CONTENTION_THRESHOLD 2 + static inline spinlock_t *ext4_group_lock_ptr(struct super_block *sb, ext4_group_t group) { return bgl_lock_ptr(EXT4_SB(sb)->s_blockgroup_lock, group); } +/* + * Returns true if the filesystem is busy enough that attempts to + * access the block group locks has run into contention. + */ +static inline int ext4_fs_is_busy(struct ext4_sb_info *sbi) +{ + return (atomic_read(&sbi->s_lock_busy) > EXT4_CONTENTION_THRESHOLD); +} + static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group) { - spin_lock(ext4_group_lock_ptr(sb, group)); + spinlock_t *lock = ext4_group_lock_ptr(sb, group); + if (spin_trylock(lock)) + /* + * We're able to grab the lock right away, so drop the + * lock contention counter. + */ + atomic_add_unless(&EXT4_SB(sb)->s_lock_busy, -1, 0); + else { + /* + * The lock is busy, so bump the contention counter, + * and then wait on the spin lock. + */ + atomic_add_unless(&EXT4_SB(sb)->s_lock_busy, 1, + EXT4_MAX_CONTENTION); + spin_lock(lock); + } } static inline void ext4_unlock_group(struct super_block *sb, diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h index 20a8410..61652f1 100644 --- a/fs/ext4/ext4_extents.h +++ b/fs/ext4/ext4_extents.h @@ -43,8 +43,7 @@ #define CHECK_BINSEARCH__ /* - * If EXT_DEBUG is defined you can use the 'extdebug' mount option - * to get lots of info about what's going on. + * Turn on EXT_DEBUG to get lots of info about extents operations. */ #define EXT_DEBUG__ #ifdef EXT_DEBUG @@ -138,6 +137,7 @@ typedef int (*ext_prepare_callback)(struct inode *, struct ext4_ext_path *, #define EXT_BREAK 1 #define EXT_REPEAT 2 +/* Maximum logical block in a file; ext4_extent's ee_block is __le32 */ #define EXT_MAX_BLOCK 0xffffffff /* diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c index eb27fd0..6a94099 100644 --- a/fs/ext4/ext4_jbd2.c +++ b/fs/ext4/ext4_jbd2.c @@ -44,7 +44,7 @@ int __ext4_journal_forget(const char *where, handle_t *handle, handle, err); } else - brelse(bh); + bforget(bh); return err; } @@ -60,7 +60,7 @@ int __ext4_journal_revoke(const char *where, handle_t *handle, handle, err); } else - brelse(bh); + bforget(bh); return err; } @@ -89,7 +89,10 @@ int __ext4_handle_dirty_metadata(const char *where, handle_t *handle, ext4_journal_abort_handle(where, __func__, bh, handle, err); } else { - mark_buffer_dirty(bh); + if (inode && bh) + mark_buffer_dirty_inode(bh, inode); + else + mark_buffer_dirty(bh); if (inode && inode_needs_sync(inode)) { sync_dirty_buffer(bh); if (buffer_req(bh) && !buffer_uptodate(bh)) { diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 73ebfb4..7a38325 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -93,7 +93,9 @@ static void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb) ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff); } -static int ext4_ext_journal_restart(handle_t *handle, int needed) +static int ext4_ext_truncate_extend_restart(handle_t *handle, + struct inode *inode, + int needed) { int err; @@ -104,7 +106,14 @@ static int ext4_ext_journal_restart(handle_t *handle, int needed) err = ext4_journal_extend(handle, needed); if (err <= 0) return err; - return ext4_journal_restart(handle, needed); + err = ext4_truncate_restart_trans(handle, inode, needed); + /* + * We have dropped i_data_sem so someone might have cached again + * an extent we are going to truncate. + */ + ext4_ext_invalidate_cache(inode); + + return err; } /* @@ -220,57 +229,65 @@ ext4_ext_new_meta_block(handle_t *handle, struct inode *inode, return newblock; } -static int ext4_ext_space_block(struct inode *inode) +static inline int ext4_ext_space_block(struct inode *inode, int check) { int size; size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) / sizeof(struct ext4_extent); + if (!check) { #ifdef AGGRESSIVE_TEST - if (size > 6) - size = 6; + if (size > 6) + size = 6; #endif + } return size; } -static int ext4_ext_space_block_idx(struct inode *inode) +static inline int ext4_ext_space_block_idx(struct inode *inode, int check) { int size; size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header)) / sizeof(struct ext4_extent_idx); + if (!check) { #ifdef AGGRESSIVE_TEST - if (size > 5) - size = 5; + if (size > 5) + size = 5; #endif + } return size; } -static int ext4_ext_space_root(struct inode *inode) +static inline int ext4_ext_space_root(struct inode *inode, int check) { int size; size = sizeof(EXT4_I(inode)->i_data); size -= sizeof(struct ext4_extent_header); size /= sizeof(struct ext4_extent); + if (!check) { #ifdef AGGRESSIVE_TEST - if (size > 3) - size = 3; + if (size > 3) + size = 3; #endif + } return size; } -static int ext4_ext_space_root_idx(struct inode *inode) +static inline int ext4_ext_space_root_idx(struct inode *inode, int check) { int size; size = sizeof(EXT4_I(inode)->i_data); size -= sizeof(struct ext4_extent_header); size /= sizeof(struct ext4_extent_idx); + if (!check) { #ifdef AGGRESSIVE_TEST - if (size > 4) - size = 4; + if (size > 4) + size = 4; #endif + } return size; } @@ -284,9 +301,9 @@ int ext4_ext_calc_metadata_amount(struct inode *inode, int blocks) int lcap, icap, rcap, leafs, idxs, num; int newextents = blocks; - rcap = ext4_ext_space_root_idx(inode); - lcap = ext4_ext_space_block(inode); - icap = ext4_ext_space_block_idx(inode); + rcap = ext4_ext_space_root_idx(inode, 0); + lcap = ext4_ext_space_block(inode, 0); + icap = ext4_ext_space_block_idx(inode, 0); /* number of new leaf blocks needed */ num = leafs = (newextents + lcap - 1) / lcap; @@ -311,14 +328,14 @@ ext4_ext_max_entries(struct inode *inode, int depth) if (depth == ext_depth(inode)) { if (depth == 0) - max = ext4_ext_space_root(inode); + max = ext4_ext_space_root(inode, 1); else - max = ext4_ext_space_root_idx(inode); + max = ext4_ext_space_root_idx(inode, 1); } else { if (depth == 0) - max = ext4_ext_space_block(inode); + max = ext4_ext_space_block(inode, 1); else - max = ext4_ext_space_block_idx(inode); + max = ext4_ext_space_block_idx(inode, 1); } return max; @@ -437,8 +454,9 @@ static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block), idx_pblock(path->p_idx)); } else if (path->p_ext) { - ext_debug(" %d:%d:%llu ", + ext_debug(" %d:[%d]%d:%llu ", le32_to_cpu(path->p_ext->ee_block), + ext4_ext_is_uninitialized(path->p_ext), ext4_ext_get_actual_len(path->p_ext), ext_pblock(path->p_ext)); } else @@ -460,8 +478,11 @@ static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path) eh = path[depth].p_hdr; ex = EXT_FIRST_EXTENT(eh); + ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino); + for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) { - ext_debug("%d:%d:%llu ", le32_to_cpu(ex->ee_block), + ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block), + ext4_ext_is_uninitialized(ex), ext4_ext_get_actual_len(ex), ext_pblock(ex)); } ext_debug("\n"); @@ -580,9 +601,10 @@ ext4_ext_binsearch(struct inode *inode, } path->p_ext = l - 1; - ext_debug(" -> %d:%llu:%d ", + ext_debug(" -> %d:%llu:[%d]%d ", le32_to_cpu(path->p_ext->ee_block), ext_pblock(path->p_ext), + ext4_ext_is_uninitialized(path->p_ext), ext4_ext_get_actual_len(path->p_ext)); #ifdef CHECK_BINSEARCH @@ -612,7 +634,7 @@ int ext4_ext_tree_init(handle_t *handle, struct inode *inode) eh->eh_depth = 0; eh->eh_entries = 0; eh->eh_magic = EXT4_EXT_MAGIC; - eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode)); + eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0)); ext4_mark_inode_dirty(handle, inode); ext4_ext_invalidate_cache(inode); return 0; @@ -837,7 +859,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, neh = ext_block_hdr(bh); neh->eh_entries = 0; - neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode)); + neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); neh->eh_magic = EXT4_EXT_MAGIC; neh->eh_depth = 0; ex = EXT_FIRST_EXTENT(neh); @@ -850,9 +872,10 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, path[depth].p_ext++; while (path[depth].p_ext <= EXT_MAX_EXTENT(path[depth].p_hdr)) { - ext_debug("move %d:%llu:%d in new leaf %llu\n", + ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n", le32_to_cpu(path[depth].p_ext->ee_block), ext_pblock(path[depth].p_ext), + ext4_ext_is_uninitialized(path[depth].p_ext), ext4_ext_get_actual_len(path[depth].p_ext), newblock); /*memmove(ex++, path[depth].p_ext++, @@ -912,7 +935,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, neh = ext_block_hdr(bh); neh->eh_entries = cpu_to_le16(1); neh->eh_magic = EXT4_EXT_MAGIC; - neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode)); + neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); neh->eh_depth = cpu_to_le16(depth - i); fidx = EXT_FIRST_INDEX(neh); fidx->ei_block = border; @@ -1037,9 +1060,9 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, /* old root could have indexes or leaves * so calculate e_max right way */ if (ext_depth(inode)) - neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode)); + neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0)); else - neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode)); + neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); neh->eh_magic = EXT4_EXT_MAGIC; set_buffer_uptodate(bh); unlock_buffer(bh); @@ -1054,7 +1077,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, goto out; curp->p_hdr->eh_magic = EXT4_EXT_MAGIC; - curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode)); + curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0)); curp->p_hdr->eh_entries = cpu_to_le16(1); curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr); @@ -1580,9 +1603,11 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, /* try to insert block into found extent and return */ if (ex && ext4_can_extents_be_merged(inode, ex, newext)) { - ext_debug("append %d block to %d:%d (from %llu)\n", + ext_debug("append [%d]%d block to %d:[%d]%d (from %llu)\n", + ext4_ext_is_uninitialized(newext), ext4_ext_get_actual_len(newext), le32_to_cpu(ex->ee_block), + ext4_ext_is_uninitialized(ex), ext4_ext_get_actual_len(ex), ext_pblock(ex)); err = ext4_ext_get_access(handle, inode, path + depth); if (err) @@ -1651,9 +1676,10 @@ has_space: if (!nearex) { /* there is no extent in this leaf, create first one */ - ext_debug("first extent in the leaf: %d:%llu:%d\n", + ext_debug("first extent in the leaf: %d:%llu:[%d]%d\n", le32_to_cpu(newext->ee_block), ext_pblock(newext), + ext4_ext_is_uninitialized(newext), ext4_ext_get_actual_len(newext)); path[depth].p_ext = EXT_FIRST_EXTENT(eh); } else if (le32_to_cpu(newext->ee_block) @@ -1663,10 +1689,11 @@ has_space: len = EXT_MAX_EXTENT(eh) - nearex; len = (len - 1) * sizeof(struct ext4_extent); len = len < 0 ? 0 : len; - ext_debug("insert %d:%llu:%d after: nearest 0x%p, " + ext_debug("insert %d:%llu:[%d]%d after: nearest 0x%p, " "move %d from 0x%p to 0x%p\n", le32_to_cpu(newext->ee_block), ext_pblock(newext), + ext4_ext_is_uninitialized(newext), ext4_ext_get_actual_len(newext), nearex, len, nearex + 1, nearex + 2); memmove(nearex + 2, nearex + 1, len); @@ -1676,10 +1703,11 @@ has_space: BUG_ON(newext->ee_block == nearex->ee_block); len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent); len = len < 0 ? 0 : len; - ext_debug("insert %d:%llu:%d before: nearest 0x%p, " + ext_debug("insert %d:%llu:[%d]%d before: nearest 0x%p, " "move %d from 0x%p to 0x%p\n", le32_to_cpu(newext->ee_block), ext_pblock(newext), + ext4_ext_is_uninitialized(newext), ext4_ext_get_actual_len(newext), nearex, len, nearex + 1, nearex + 2); memmove(nearex + 1, nearex, len); @@ -2094,7 +2122,8 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, else uninitialized = 0; - ext_debug("remove ext %lu:%u\n", ex_ee_block, ex_ee_len); + ext_debug("remove ext %u:[%d]%d\n", ex_ee_block, + uninitialized, ex_ee_len); path[depth].p_ext = ex; a = ex_ee_block > start ? ex_ee_block : start; @@ -2138,7 +2167,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, } credits += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb); - err = ext4_ext_journal_restart(handle, credits); + err = ext4_ext_truncate_extend_restart(handle, inode, credits); if (err) goto out; @@ -2327,7 +2356,7 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start) if (err == 0) { ext_inode_hdr(inode)->eh_depth = 0; ext_inode_hdr(inode)->eh_max = - cpu_to_le16(ext4_ext_space_root(inode)); + cpu_to_le16(ext4_ext_space_root(inode, 0)); err = ext4_ext_dirty(handle, inode, path); } } @@ -2743,6 +2772,7 @@ insert: } else if (err) goto fix_extent_len; out: + ext4_ext_show_leaf(inode, path); return err ? err : allocated; fix_extent_len: @@ -2786,7 +2816,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, struct ext4_allocation_request ar; __clear_bit(BH_New, &bh_result->b_state); - ext_debug("blocks %u/%u requested for inode %u\n", + ext_debug("blocks %u/%u requested for inode %lu\n", iblock, max_blocks, inode->i_ino); /* check in cache */ @@ -2849,7 +2879,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, newblock = iblock - ee_block + ee_start; /* number of remaining blocks in the extent */ allocated = ee_len - (iblock - ee_block); - ext_debug("%u fit into %lu:%d -> %llu\n", iblock, + ext_debug("%u fit into %u:%d -> %llu\n", iblock, ee_block, ee_len, newblock); /* Do not put uninitialized extent in the cache */ @@ -2950,7 +2980,7 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, newblock = ext4_mb_new_blocks(handle, &ar, &err); if (!newblock) goto out2; - ext_debug("allocate new block: goal %llu, found %llu/%lu\n", + ext_debug("allocate new block: goal %llu, found %llu/%u\n", ar.goal, newblock, allocated); /* try to insert new extent into found leaf and return */ diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c index 83cf641..0747574 100644 --- a/fs/ext4/fsync.c +++ b/fs/ext4/fsync.c @@ -50,7 +50,7 @@ int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync) { struct inode *inode = dentry->d_inode; journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; - int ret = 0; + int err, ret = 0; J_ASSERT(ext4_journal_current_handle() == NULL); @@ -79,6 +79,9 @@ int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync) goto out; } + if (!journal) + ret = sync_mapping_buffers(inode->i_mapping); + if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) goto out; @@ -91,10 +94,12 @@ int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync) .sync_mode = WB_SYNC_ALL, .nr_to_write = 0, /* sys_fsync did this */ }; - ret = sync_inode(inode, &wbc); - if (journal && (journal->j_flags & JBD2_BARRIER)) - blkdev_issue_flush(inode->i_sb->s_bdev, NULL); + err = sync_inode(inode, &wbc); + if (ret == 0) + ret = err; } out: + if (journal && (journal->j_flags & JBD2_BARRIER)) + blkdev_issue_flush(inode->i_sb->s_bdev, NULL); return ret; } diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 29e6dc7..f3624ea 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -1189,7 +1189,7 @@ unsigned long ext4_count_free_inodes(struct super_block *sb) x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8); printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n", - i, ext4_free_inodes_count(sb, gdp), x); + (unsigned long) i, ext4_free_inodes_count(sb, gdp), x); bitmap_count += x; } brelse(bitmap_bh); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 349dd6b..064746f 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -192,11 +192,24 @@ static int try_to_extend_transaction(handle_t *handle, struct inode *inode) * so before we call here everything must be consistently dirtied against * this transaction. */ -static int ext4_journal_test_restart(handle_t *handle, struct inode *inode) + int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode, + int nblocks) { + int ret; + + /* + * Drop i_data_sem to avoid deadlock with ext4_get_blocks At this + * moment, get_block can be called only for blocks inside i_size since + * page cache has been already dropped and writes are blocked by + * i_mutex. So we can safely drop the i_data_sem here. + */ BUG_ON(EXT4_JOURNAL(inode) == NULL); jbd_debug(2, "restarting handle %p\n", handle); - return ext4_journal_restart(handle, blocks_for_truncate(inode)); + up_write(&EXT4_I(inode)->i_data_sem); + ret = ext4_journal_restart(handle, blocks_for_truncate(inode)); + down_write(&EXT4_I(inode)->i_data_sem); + + return ret; } /* @@ -341,9 +354,7 @@ static int ext4_block_to_path(struct inode *inode, int n = 0; int final = 0; - if (i_block < 0) { - ext4_warning(inode->i_sb, "ext4_block_to_path", "block < 0"); - } else if (i_block < direct_blocks) { + if (i_block < direct_blocks) { offsets[n++] = i_block; final = direct_blocks; } else if ((i_block -= direct_blocks) < indirect_blocks) { @@ -551,15 +562,21 @@ static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind) * * Normally this function find the preferred place for block allocation, * returns it. + * Because this is only used for non-extent files, we limit the block nr + * to 32 bits. */ static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block, Indirect *partial) { + ext4_fsblk_t goal; + /* * XXX need to get goal block from mballoc's data structures */ - return ext4_find_near(inode, partial); + goal = ext4_find_near(inode, partial); + goal = goal & EXT4_MAX_BLOCK_FILE_PHYS; + return goal; } /** @@ -640,6 +657,8 @@ static int ext4_alloc_blocks(handle_t *handle, struct inode *inode, if (*err) goto failed_out; + BUG_ON(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS); + target -= count; /* allocate blocks for indirect blocks */ while (index < indirect_blks && count) { @@ -674,6 +693,7 @@ static int ext4_alloc_blocks(handle_t *handle, struct inode *inode, ar.flags = EXT4_MB_HINT_DATA; current_block = ext4_mb_new_blocks(handle, &ar, err); + BUG_ON(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS); if (*err && (target == blks)) { /* @@ -762,8 +782,9 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode, BUFFER_TRACE(bh, "call get_create_access"); err = ext4_journal_get_create_access(handle, bh); if (err) { + /* Don't brelse(bh) here; it's done in + * ext4_journal_forget() below */ unlock_buffer(bh); - brelse(bh); goto failed; } @@ -1109,16 +1130,15 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used) ext4_discard_preallocations(inode); } -static int check_block_validity(struct inode *inode, sector_t logical, - sector_t phys, int len) +static int check_block_validity(struct inode *inode, const char *msg, + sector_t logical, sector_t phys, int len) { if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), phys, len)) { - ext4_error(inode->i_sb, "check_block_validity", + ext4_error(inode->i_sb, msg, "inode #%lu logical block %llu mapped to %llu " "(size %d)", inode->i_ino, (unsigned long long) logical, (unsigned long long) phys, len); - WARN_ON(1); return -EIO; } return 0; @@ -1170,8 +1190,8 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block, up_read((&EXT4_I(inode)->i_data_sem)); if (retval > 0 && buffer_mapped(bh)) { - int ret = check_block_validity(inode, block, - bh->b_blocknr, retval); + int ret = check_block_validity(inode, "file system corruption", + block, bh->b_blocknr, retval); if (ret != 0) return ret; } @@ -1235,8 +1255,7 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block, * i_data's format changing. Force the migrate * to fail by clearing migrate flags */ - EXT4_I(inode)->i_flags = EXT4_I(inode)->i_flags & - ~EXT4_EXT_MIGRATE; + EXT4_I(inode)->i_state &= ~EXT4_STATE_EXT_MIGRATE; } } @@ -1252,8 +1271,9 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block, up_write((&EXT4_I(inode)->i_data_sem)); if (retval > 0 && buffer_mapped(bh)) { - int ret = check_block_validity(inode, block, - bh->b_blocknr, retval); + int ret = check_block_validity(inode, "file system " + "corruption after allocation", + block, bh->b_blocknr, retval); if (ret != 0) return ret; } @@ -1863,18 +1883,6 @@ static void ext4_da_page_release_reservation(struct page *page, * Delayed allocation stuff */ -struct mpage_da_data { - struct inode *inode; - sector_t b_blocknr; /* start block number of extent */ - size_t b_size; /* size of extent */ - unsigned long b_state; /* state of the extent */ - unsigned long first_page, next_page; /* extent of pages */ - struct writeback_control *wbc; - int io_done; - int pages_written; - int retval; -}; - /* * mpage_da_submit_io - walks through extent of pages and try to write * them with writepage() call back @@ -2329,7 +2337,7 @@ static int __mpage_da_writepage(struct page *page, /* * Rest of the page in the page_vec * redirty then and skip then. We will - * try to to write them again after + * try to write them again after * starting a new transaction */ redirty_page_for_writepage(wbc, page); @@ -2737,6 +2745,7 @@ static int ext4_da_writepages(struct address_space *mapping, long pages_skipped; int range_cyclic, cycled = 1, io_done = 0; int needed_blocks, ret = 0, nr_to_writebump = 0; + loff_t range_start = wbc->range_start; struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); trace_ext4_da_writepages(inode, wbc); @@ -2850,6 +2859,7 @@ retry: mpd.io_done = 1; ret = MPAGE_DA_EXTENT_TAIL; } + trace_ext4_da_write_pages(inode, &mpd); wbc->nr_to_write -= mpd.pages_written; ext4_journal_stop(handle); @@ -2905,6 +2915,7 @@ out_writepages: if (!no_nrwrite_index_update) wbc->no_nrwrite_index_update = 0; wbc->nr_to_write -= nr_to_writebump; + wbc->range_start = range_start; trace_ext4_da_writepages_result(inode, wbc, ret, pages_written); return ret; } @@ -3117,6 +3128,8 @@ out: */ int ext4_alloc_da_blocks(struct inode *inode) { + trace_ext4_alloc_da_blocks(inode); + if (!EXT4_I(inode)->i_reserved_data_blocks && !EXT4_I(inode)->i_reserved_meta_blocks) return 0; @@ -3663,7 +3676,8 @@ static void ext4_clear_blocks(handle_t *handle, struct inode *inode, ext4_handle_dirty_metadata(handle, inode, bh); } ext4_mark_inode_dirty(handle, inode); - ext4_journal_test_restart(handle, inode); + ext4_truncate_restart_trans(handle, inode, + blocks_for_truncate(inode)); if (bh) { BUFFER_TRACE(bh, "retaking write access"); ext4_journal_get_write_access(handle, bh); @@ -3874,7 +3888,8 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode, return; if (try_to_extend_transaction(handle, inode)) { ext4_mark_inode_dirty(handle, inode); - ext4_journal_test_restart(handle, inode); + ext4_truncate_restart_trans(handle, inode, + blocks_for_truncate(inode)); } ext4_free_blocks(handle, inode, nr, 1, 1); @@ -3962,8 +3977,7 @@ void ext4_truncate(struct inode *inode) if (!ext4_can_truncate(inode)) return; - if (ei->i_disksize && inode->i_size == 0 && - !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) + if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) ei->i_state |= EXT4_STATE_DA_ALLOC_CLOSE; if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) { @@ -4537,7 +4551,8 @@ static int ext4_inode_blocks_set(handle_t *handle, */ static int ext4_do_update_inode(handle_t *handle, struct inode *inode, - struct ext4_iloc *iloc) + struct ext4_iloc *iloc, + int do_sync) { struct ext4_inode *raw_inode = ext4_raw_inode(iloc); struct ext4_inode_info *ei = EXT4_I(inode); @@ -4585,8 +4600,7 @@ static int ext4_do_update_inode(handle_t *handle, if (ext4_inode_blocks_set(handle, raw_inode, ei)) goto out_brelse; raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); - /* clear the migrate flag in the raw_inode */ - raw_inode->i_flags = cpu_to_le32(ei->i_flags & ~EXT4_EXT_MIGRATE); + raw_inode->i_flags = cpu_to_le32(ei->i_flags); if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != cpu_to_le32(EXT4_OS_HURD)) raw_inode->i_file_acl_high = @@ -4639,10 +4653,22 @@ static int ext4_do_update_inode(handle_t *handle, raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); } - BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); - rc = ext4_handle_dirty_metadata(handle, inode, bh); - if (!err) - err = rc; + /* + * If we're not using a journal and we were called from + * ext4_write_inode() to sync the inode (making do_sync true), + * we can just use sync_dirty_buffer() directly to do our dirty + * work. Testing s_journal here is a bit redundant but it's + * worth it to avoid potential future trouble. + */ + if (EXT4_SB(inode->i_sb)->s_journal == NULL && do_sync) { + BUFFER_TRACE(bh, "call sync_dirty_buffer"); + sync_dirty_buffer(bh); + } else { + BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); + rc = ext4_handle_dirty_metadata(handle, inode, bh); + if (!err) + err = rc; + } ei->i_state &= ~EXT4_STATE_NEW; out_brelse: @@ -4688,19 +4714,32 @@ out_brelse: */ int ext4_write_inode(struct inode *inode, int wait) { + int err; + if (current->flags & PF_MEMALLOC) return 0; - if (ext4_journal_current_handle()) { - jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); - dump_stack(); - return -EIO; - } + if (EXT4_SB(inode->i_sb)->s_journal) { + if (ext4_journal_current_handle()) { + jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); + dump_stack(); + return -EIO; + } - if (!wait) - return 0; + if (!wait) + return 0; + + err = ext4_force_commit(inode->i_sb); + } else { + struct ext4_iloc iloc; - return ext4_force_commit(inode->i_sb); + err = ext4_get_inode_loc(inode, &iloc); + if (err) + return err; + err = ext4_do_update_inode(EXT4_NOJOURNAL_HANDLE, + inode, &iloc, wait); + } + return err; } /* @@ -4994,7 +5033,7 @@ int ext4_mark_iloc_dirty(handle_t *handle, get_bh(iloc->bh); /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */ - err = ext4_do_update_inode(handle, inode, iloc); + err = ext4_do_update_inode(handle, inode, iloc, 0); put_bh(iloc->bh); return err; } @@ -5285,12 +5324,21 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) else len = PAGE_CACHE_SIZE; + lock_page(page); + /* + * return if we have all the buffers mapped. This avoid + * the need to call write_begin/write_end which does a + * journal_start/journal_stop which can block and take + * long time + */ if (page_has_buffers(page)) { - /* return if we have all the buffers mapped */ if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, - ext4_bh_unmapped)) + ext4_bh_unmapped)) { + unlock_page(page); goto out_unlock; + } } + unlock_page(page); /* * OK, we need to fill the hole... Do write_begin write_end * to do block allocation/reservation.We are not holding diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 7050a9c..c1cdf61 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -243,10 +243,9 @@ setversion_out: me.donor_start, me.len, &me.moved_len); fput(donor_filp); - if (!err) - if (copy_to_user((struct move_extent *)arg, - &me, sizeof(me))) - return -EFAULT; + if (copy_to_user((struct move_extent *)arg, &me, sizeof(me))) + return -EFAULT; + return err; } diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index cd25846..e9c6189 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -22,6 +22,7 @@ */ #include "mballoc.h" +#include <linux/debugfs.h> #include <trace/events/ext4.h> /* @@ -622,13 +623,13 @@ static int __mb_check_buddy(struct ext4_buddy *e4b, char *file, /* FIXME!! need more doc */ static void ext4_mb_mark_free_simple(struct super_block *sb, - void *buddy, unsigned first, int len, + void *buddy, ext4_grpblk_t first, ext4_grpblk_t len, struct ext4_group_info *grp) { struct ext4_sb_info *sbi = EXT4_SB(sb); - unsigned short min; - unsigned short max; - unsigned short chunk; + ext4_grpblk_t min; + ext4_grpblk_t max; + ext4_grpblk_t chunk; unsigned short border; BUG_ON(len > EXT4_BLOCKS_PER_GROUP(sb)); @@ -662,10 +663,10 @@ void ext4_mb_generate_buddy(struct super_block *sb, void *buddy, void *bitmap, ext4_group_t group) { struct ext4_group_info *grp = ext4_get_group_info(sb, group); - unsigned short max = EXT4_BLOCKS_PER_GROUP(sb); - unsigned short i = 0; - unsigned short first; - unsigned short len; + ext4_grpblk_t max = EXT4_BLOCKS_PER_GROUP(sb); + ext4_grpblk_t i = 0; + ext4_grpblk_t first; + ext4_grpblk_t len; unsigned free = 0; unsigned fragments = 0; unsigned long long period = get_cycles(); @@ -743,7 +744,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore) char *data; char *bitmap; - mb_debug("init page %lu\n", page->index); + mb_debug(1, "init page %lu\n", page->index); inode = page->mapping->host; sb = inode->i_sb; @@ -822,7 +823,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore) set_bitmap_uptodate(bh[i]); bh[i]->b_end_io = end_buffer_read_sync; submit_bh(READ, bh[i]); - mb_debug("read bitmap for group %u\n", first_group + i); + mb_debug(1, "read bitmap for group %u\n", first_group + i); } /* wait for I/O completion */ @@ -862,12 +863,13 @@ static int ext4_mb_init_cache(struct page *page, char *incore) if ((first_block + i) & 1) { /* this is block of buddy */ BUG_ON(incore == NULL); - mb_debug("put buddy for group %u in page %lu/%x\n", + mb_debug(1, "put buddy for group %u in page %lu/%x\n", group, page->index, i * blocksize); grinfo = ext4_get_group_info(sb, group); grinfo->bb_fragments = 0; memset(grinfo->bb_counters, 0, - sizeof(unsigned short)*(sb->s_blocksize_bits+2)); + sizeof(*grinfo->bb_counters) * + (sb->s_blocksize_bits+2)); /* * incore got set to the group block bitmap below */ @@ -878,7 +880,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore) } else { /* this is block of bitmap */ BUG_ON(incore != NULL); - mb_debug("put bitmap for group %u in page %lu/%x\n", + mb_debug(1, "put bitmap for group %u in page %lu/%x\n", group, page->index, i * blocksize); /* see comments in ext4_mb_put_pa() */ @@ -908,6 +910,100 @@ out: return err; } +static noinline_for_stack +int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) +{ + + int ret = 0; + void *bitmap; + int blocks_per_page; + int block, pnum, poff; + int num_grp_locked = 0; + struct ext4_group_info *this_grp; + struct ext4_sb_info *sbi = EXT4_SB(sb); + struct inode *inode = sbi->s_buddy_cache; + struct page *page = NULL, *bitmap_page = NULL; + + mb_debug(1, "init group %u\n", group); + blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; + this_grp = ext4_get_group_info(sb, group); + /* + * This ensures that we don't reinit the buddy cache + * page which map to the group from which we are already + * allocating. If we are looking at the buddy cache we would + * have taken a reference using ext4_mb_load_buddy and that + * would have taken the alloc_sem lock. + */ + num_grp_locked = ext4_mb_get_buddy_cache_lock(sb, group); + if (!EXT4_MB_GRP_NEED_INIT(this_grp)) { + /* + * somebody initialized the group + * return without doing anything + */ + ret = 0; + goto err; + } + /* + * the buddy cache inode stores the block bitmap + * and buddy information in consecutive blocks. + * So for each group we need two blocks. + */ + block = group * 2; + pnum = block / blocks_per_page; + poff = block % blocks_per_page; + page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); + if (page) { + BUG_ON(page->mapping != inode->i_mapping); + ret = ext4_mb_init_cache(page, NULL); + if (ret) { + unlock_page(page); + goto err; + } + unlock_page(page); + } + if (page == NULL || !PageUptodate(page)) { + ret = -EIO; + goto err; + } + mark_page_accessed(page); + bitmap_page = page; + bitmap = page_address(page) + (poff * sb->s_blocksize); + + /* init buddy cache */ + block++; + pnum = block / blocks_per_page; + poff = block % blocks_per_page; + page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); + if (page == bitmap_page) { + /* + * If both the bitmap and buddy are in + * the same page we don't need to force + * init the buddy + */ + unlock_page(page); + } else if (page) { + BUG_ON(page->mapping != inode->i_mapping); + ret = ext4_mb_init_cache(page, bitmap); + if (ret) { + unlock_page(page); + goto err; + } + unlock_page(page); + } + if (page == NULL || !PageUptodate(page)) { + ret = -EIO; + goto err; + } + mark_page_accessed(page); +err: + ext4_mb_put_buddy_cache_lock(sb, group, num_grp_locked); + if (bitmap_page) + page_cache_release(bitmap_page); + if (page) + page_cache_release(page); + return ret; +} + static noinline_for_stack int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, struct ext4_buddy *e4b) @@ -922,7 +1018,7 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, struct ext4_sb_info *sbi = EXT4_SB(sb); struct inode *inode = sbi->s_buddy_cache; - mb_debug("load group %u\n", group); + mb_debug(1, "load group %u\n", group); blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; grp = ext4_get_group_info(sb, group); @@ -941,8 +1037,26 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, * groups mapped by the page is blocked * till we are done with allocation */ +repeat_load_buddy: down_read(e4b->alloc_semp); + if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { + /* we need to check for group need init flag + * with alloc_semp held so that we can be sure + * that new blocks didn't get added to the group + * when we are loading the buddy cache + */ + up_read(e4b->alloc_semp); + /* + * we need full data about the group + * to make a good selection + */ + ret = ext4_mb_init_group(sb, group); + if (ret) + return ret; + goto repeat_load_buddy; + } + /* * the buddy cache inode stores the block bitmap * and buddy information in consecutive blocks. @@ -1360,7 +1474,7 @@ static void ext4_mb_use_best_found(struct ext4_allocation_context *ac, ac->alloc_semp = e4b->alloc_semp; e4b->alloc_semp = NULL; /* store last allocated for subsequent stream allocation */ - if ((ac->ac_flags & EXT4_MB_HINT_DATA)) { + if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { spin_lock(&sbi->s_md_lock); sbi->s_mb_last_group = ac->ac_f_ex.fe_group; sbi->s_mb_last_start = ac->ac_f_ex.fe_start; @@ -1837,97 +1951,6 @@ void ext4_mb_put_buddy_cache_lock(struct super_block *sb, } -static noinline_for_stack -int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) -{ - - int ret; - void *bitmap; - int blocks_per_page; - int block, pnum, poff; - int num_grp_locked = 0; - struct ext4_group_info *this_grp; - struct ext4_sb_info *sbi = EXT4_SB(sb); - struct inode *inode = sbi->s_buddy_cache; - struct page *page = NULL, *bitmap_page = NULL; - - mb_debug("init group %lu\n", group); - blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; - this_grp = ext4_get_group_info(sb, group); - /* - * This ensures we don't add group - * to this buddy cache via resize - */ - num_grp_locked = ext4_mb_get_buddy_cache_lock(sb, group); - if (!EXT4_MB_GRP_NEED_INIT(this_grp)) { - /* - * somebody initialized the group - * return without doing anything - */ - ret = 0; - goto err; - } - /* - * the buddy cache inode stores the block bitmap - * and buddy information in consecutive blocks. - * So for each group we need two blocks. - */ - block = group * 2; - pnum = block / blocks_per_page; - poff = block % blocks_per_page; - page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); - if (page) { - BUG_ON(page->mapping != inode->i_mapping); - ret = ext4_mb_init_cache(page, NULL); - if (ret) { - unlock_page(page); - goto err; - } - unlock_page(page); - } - if (page == NULL || !PageUptodate(page)) { - ret = -EIO; - goto err; - } - mark_page_accessed(page); - bitmap_page = page; - bitmap = page_address(page) + (poff * sb->s_blocksize); - - /* init buddy cache */ - block++; - pnum = block / blocks_per_page; - poff = block % blocks_per_page; - page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); - if (page == bitmap_page) { - /* - * If both the bitmap and buddy are in - * the same page we don't need to force - * init the buddy - */ - unlock_page(page); - } else if (page) { - BUG_ON(page->mapping != inode->i_mapping); - ret = ext4_mb_init_cache(page, bitmap); - if (ret) { - unlock_page(page); - goto err; - } - unlock_page(page); - } - if (page == NULL || !PageUptodate(page)) { - ret = -EIO; - goto err; - } - mark_page_accessed(page); -err: - ext4_mb_put_buddy_cache_lock(sb, group, num_grp_locked); - if (bitmap_page) - page_cache_release(bitmap_page); - if (page) - page_cache_release(page); - return ret; -} - static noinline_for_stack int ext4_mb_regular_allocator(struct ext4_allocation_context *ac) { @@ -1938,11 +1961,14 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac) struct ext4_sb_info *sbi; struct super_block *sb; struct ext4_buddy e4b; - loff_t size, isize; sb = ac->ac_sb; sbi = EXT4_SB(sb); ngroups = ext4_get_groups_count(sb); + /* non-extent files are limited to low blocks/groups */ + if (!(EXT4_I(ac->ac_inode)->i_flags & EXT4_EXTENTS_FL)) + ngroups = sbi->s_blockfile_groups; + BUG_ON(ac->ac_status == AC_STATUS_FOUND); /* first, try the goal */ @@ -1974,20 +2000,16 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac) } bsbits = ac->ac_sb->s_blocksize_bits; - /* if stream allocation is enabled, use global goal */ - size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len; - isize = i_size_read(ac->ac_inode) >> bsbits; - if (size < isize) - size = isize; - if (size < sbi->s_mb_stream_request && - (ac->ac_flags & EXT4_MB_HINT_DATA)) { + /* if stream allocation is enabled, use global goal */ + if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { /* TBD: may be hot point */ spin_lock(&sbi->s_md_lock); ac->ac_g_ex.fe_group = sbi->s_mb_last_group; ac->ac_g_ex.fe_start = sbi->s_mb_last_start; spin_unlock(&sbi->s_md_lock); } + /* Let's just scan groups to find more-less suitable blocks */ cr = ac->ac_2order ? 0 : 1; /* @@ -2015,27 +2037,6 @@ repeat: if (grp->bb_free == 0) continue; - /* - * if the group is already init we check whether it is - * a good group and if not we don't load the buddy - */ - if (EXT4_MB_GRP_NEED_INIT(grp)) { - /* - * we need full data about the group - * to make a good selection - */ - err = ext4_mb_init_group(sb, group); - if (err) - goto out; - } - - /* - * If the particular group doesn't satisfy our - * criteria we continue with the next group - */ - if (!ext4_mb_good_group(ac, group, cr)) - continue; - err = ext4_mb_load_buddy(sb, group, &e4b); if (err) goto out; @@ -2156,7 +2157,7 @@ static int ext4_mb_seq_history_show(struct seq_file *seq, void *v) if (v == SEQ_START_TOKEN) { seq_printf(seq, "%-5s %-8s %-23s %-23s %-23s %-5s " - "%-5s %-2s %-5s %-5s %-5s %-6s\n", + "%-5s %-2s %-6s %-5s %-5s %-6s\n", "pid", "inode", "original", "goal", "result", "found", "grps", "cr", "flags", "merge", "tail", "broken"); return 0; @@ -2164,7 +2165,7 @@ static int ext4_mb_seq_history_show(struct seq_file *seq, void *v) if (hs->op == EXT4_MB_HISTORY_ALLOC) { fmt = "%-5u %-8u %-23s %-23s %-23s %-5u %-5u %-2u " - "%-5u %-5s %-5u %-6u\n"; + "0x%04x %-5s %-5u %-6u\n"; sprintf(buf2, "%u/%d/%u@%u", hs->result.fe_group, hs->result.fe_start, hs->result.fe_len, hs->result.fe_logical); @@ -2205,7 +2206,7 @@ static void ext4_mb_seq_history_stop(struct seq_file *seq, void *v) { } -static struct seq_operations ext4_mb_seq_history_ops = { +static const struct seq_operations ext4_mb_seq_history_ops = { .start = ext4_mb_seq_history_start, .next = ext4_mb_seq_history_next, .stop = ext4_mb_seq_history_stop, @@ -2287,7 +2288,7 @@ static ssize_t ext4_mb_seq_history_write(struct file *file, return count; } -static struct file_operations ext4_mb_seq_history_fops = { +static const struct file_operations ext4_mb_seq_history_fops = { .owner = THIS_MODULE, .open = ext4_mb_seq_history_open, .read = seq_read, @@ -2328,7 +2329,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) struct ext4_buddy e4b; struct sg { struct ext4_group_info info; - unsigned short counters[16]; + ext4_grpblk_t counters[16]; } sg; group--; @@ -2366,7 +2367,7 @@ static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v) { } -static struct seq_operations ext4_mb_seq_groups_ops = { +static const struct seq_operations ext4_mb_seq_groups_ops = { .start = ext4_mb_seq_groups_start, .next = ext4_mb_seq_groups_next, .stop = ext4_mb_seq_groups_stop, @@ -2387,7 +2388,7 @@ static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file) } -static struct file_operations ext4_mb_seq_groups_fops = { +static const struct file_operations ext4_mb_seq_groups_fops = { .owner = THIS_MODULE, .open = ext4_mb_seq_groups_open, .read = seq_read, @@ -2532,7 +2533,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); init_rwsem(&meta_group_info[i]->alloc_sem); - meta_group_info[i]->bb_free_root.rb_node = NULL;; + meta_group_info[i]->bb_free_root.rb_node = NULL; #ifdef DOUBLE_CHECK { @@ -2558,26 +2559,15 @@ exit_meta_group_info: return -ENOMEM; } /* ext4_mb_add_groupinfo */ -/* - * Update an existing group. - * This function is used for online resize - */ -void ext4_mb_update_group_info(struct ext4_group_info *grp, ext4_grpblk_t add) -{ - grp->bb_free += add; -} - static int ext4_mb_init_backend(struct super_block *sb) { ext4_group_t ngroups = ext4_get_groups_count(sb); ext4_group_t i; - int metalen; struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; int num_meta_group_infos; int num_meta_group_infos_max; int array_size; - struct ext4_group_info **meta_group_info; struct ext4_group_desc *desc; /* This is the number of blocks used by GDT */ @@ -2622,22 +2612,6 @@ static int ext4_mb_init_backend(struct super_block *sb) goto err_freesgi; } EXT4_I(sbi->s_buddy_cache)->i_disksize = 0; - - metalen = sizeof(*meta_group_info) << EXT4_DESC_PER_BLOCK_BITS(sb); - for (i = 0; i < num_meta_group_infos; i++) { - if ((i + 1) == num_meta_group_infos) - metalen = sizeof(*meta_group_info) * - (ngroups - - (i << EXT4_DESC_PER_BLOCK_BITS(sb))); - meta_group_info = kmalloc(metalen, GFP_KERNEL); - if (meta_group_info == NULL) { - printk(KERN_ERR "EXT4-fs: can't allocate mem for a " - "buddy group\n"); - goto err_freemeta; - } - sbi->s_group_info[i] = meta_group_info; - } - for (i = 0; i < ngroups; i++) { desc = ext4_get_group_desc(sb, i, NULL); if (desc == NULL) { @@ -2655,7 +2629,6 @@ err_freebuddy: while (i-- > 0) kfree(ext4_get_group_info(sb, i)); i = num_meta_group_infos; -err_freemeta: while (i-- > 0) kfree(sbi->s_group_info[i]); iput(sbi->s_buddy_cache); @@ -2672,14 +2645,14 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery) unsigned max; int ret; - i = (sb->s_blocksize_bits + 2) * sizeof(unsigned short); + i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets); sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL); if (sbi->s_mb_offsets == NULL) { return -ENOMEM; } - i = (sb->s_blocksize_bits + 2) * sizeof(unsigned int); + i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_maxs); sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL); if (sbi->s_mb_maxs == NULL) { kfree(sbi->s_mb_offsets); @@ -2758,7 +2731,7 @@ static void ext4_mb_cleanup_pa(struct ext4_group_info *grp) kmem_cache_free(ext4_pspace_cachep, pa); } if (count) - mb_debug("mballoc: %u PAs left\n", count); + mb_debug(1, "mballoc: %u PAs left\n", count); } @@ -2839,7 +2812,7 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn) list_for_each_safe(l, ltmp, &txn->t_private_list) { entry = list_entry(l, struct ext4_free_data, list); - mb_debug("gonna free %u blocks in group %u (0x%p):", + mb_debug(1, "gonna free %u blocks in group %u (0x%p):", entry->count, entry->group, entry); err = ext4_mb_load_buddy(sb, entry->group, &e4b); @@ -2874,9 +2847,43 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn) ext4_mb_release_desc(&e4b); } - mb_debug("freed %u blocks in %u structures\n", count, count2); + mb_debug(1, "freed %u blocks in %u structures\n", count, count2); +} + +#ifdef CONFIG_EXT4_DEBUG +u8 mb_enable_debug __read_mostly; + +static struct dentry *debugfs_dir; +static struct dentry *debugfs_debug; + +static void __init ext4_create_debugfs_entry(void) +{ + debugfs_dir = debugfs_create_dir("ext4", NULL); + if (debugfs_dir) + debugfs_debug = debugfs_create_u8("mballoc-debug", + S_IRUGO | S_IWUSR, + debugfs_dir, + &mb_enable_debug); +} + +static void ext4_remove_debugfs_entry(void) +{ + debugfs_remove(debugfs_debug); + debugfs_remove(debugfs_dir); } +#else + +static void __init ext4_create_debugfs_entry(void) +{ +} + +static void ext4_remove_debugfs_entry(void) +{ +} + +#endif + int __init init_ext4_mballoc(void) { ext4_pspace_cachep = @@ -2904,6 +2911,7 @@ int __init init_ext4_mballoc(void) kmem_cache_destroy(ext4_ac_cachep); return -ENOMEM; } + ext4_create_debugfs_entry(); return 0; } @@ -2917,6 +2925,7 @@ void exit_ext4_mballoc(void) kmem_cache_destroy(ext4_pspace_cachep); kmem_cache_destroy(ext4_ac_cachep); kmem_cache_destroy(ext4_free_ext_cachep); + ext4_remove_debugfs_entry(); } @@ -3061,7 +3070,7 @@ static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac) ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_stripe; else ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc; - mb_debug("#%u: goal %u blocks for locality group\n", + mb_debug(1, "#%u: goal %u blocks for locality group\n", current->pid, ac->ac_g_ex.fe_len); } @@ -3180,23 +3189,18 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac, BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end || ac->ac_o_ex.fe_logical < pa->pa_lstart)); - /* skip PA normalized request doesn't overlap with */ - if (pa->pa_lstart >= end) { - spin_unlock(&pa->pa_lock); - continue; - } - if (pa_end <= start) { + /* skip PAs this normalized request doesn't overlap with */ + if (pa->pa_lstart >= end || pa_end <= start) { spin_unlock(&pa->pa_lock); continue; } BUG_ON(pa->pa_lstart <= start && pa_end >= end); + /* adjust start or end to be adjacent to this pa */ if (pa_end <= ac->ac_o_ex.fe_logical) { BUG_ON(pa_end < start); start = pa_end; - } - - if (pa->pa_lstart > ac->ac_o_ex.fe_logical) { + } else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) { BUG_ON(pa->pa_lstart > end); end = pa->pa_lstart; } @@ -3251,7 +3255,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac, ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; } - mb_debug("goal: %u(was %u) blocks at %u\n", (unsigned) size, + mb_debug(1, "goal: %u(was %u) blocks at %u\n", (unsigned) size, (unsigned) orig_size, (unsigned) start); } @@ -3300,7 +3304,7 @@ static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac, BUG_ON(pa->pa_free < len); pa->pa_free -= len; - mb_debug("use %llu/%u from inode pa %p\n", start, len, pa); + mb_debug(1, "use %llu/%u from inode pa %p\n", start, len, pa); } /* @@ -3324,7 +3328,7 @@ static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac, * in on-disk bitmap -- see ext4_mb_release_context() * Other CPUs are prevented from allocating from this pa by lg_mutex */ - mb_debug("use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa); + mb_debug(1, "use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa); } /* @@ -3382,6 +3386,11 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac) ac->ac_o_ex.fe_logical >= pa->pa_lstart + pa->pa_len) continue; + /* non-extent files can't have physical blocks past 2^32 */ + if (!(EXT4_I(ac->ac_inode)->i_flags & EXT4_EXTENTS_FL) && + pa->pa_pstart + pa->pa_len > EXT4_MAX_BLOCK_FILE_PHYS) + continue; + /* found preallocated blocks, use them */ spin_lock(&pa->pa_lock); if (pa->pa_deleted == 0 && pa->pa_free) { @@ -3503,7 +3512,7 @@ void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, preallocated += len; count++; } - mb_debug("prellocated %u for group %u\n", preallocated, group); + mb_debug(1, "prellocated %u for group %u\n", preallocated, group); } static void ext4_mb_pa_callback(struct rcu_head *head) @@ -3638,7 +3647,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) pa->pa_deleted = 0; pa->pa_type = MB_INODE_PA; - mb_debug("new inode pa %p: %llu/%u for %u\n", pa, + mb_debug(1, "new inode pa %p: %llu/%u for %u\n", pa, pa->pa_pstart, pa->pa_len, pa->pa_lstart); trace_ext4_mb_new_inode_pa(ac, pa); @@ -3698,7 +3707,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac) pa->pa_deleted = 0; pa->pa_type = MB_GROUP_PA; - mb_debug("new group pa %p: %llu/%u for %u\n", pa, + mb_debug(1, "new group pa %p: %llu/%u for %u\n", pa, pa->pa_pstart, pa->pa_len, pa->pa_lstart); trace_ext4_mb_new_group_pa(ac, pa); @@ -3777,7 +3786,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, next = mb_find_next_bit(bitmap_bh->b_data, end, bit); start = group * EXT4_BLOCKS_PER_GROUP(sb) + bit + le32_to_cpu(sbi->s_es->s_first_data_block); - mb_debug(" free preallocated %u/%u in group %u\n", + mb_debug(1, " free preallocated %u/%u in group %u\n", (unsigned) start, (unsigned) next - bit, (unsigned) group); free += next - bit; @@ -3868,7 +3877,7 @@ ext4_mb_discard_group_preallocations(struct super_block *sb, int busy = 0; int free = 0; - mb_debug("discard preallocation for group %u\n", group); + mb_debug(1, "discard preallocation for group %u\n", group); if (list_empty(&grp->bb_prealloc_list)) return 0; @@ -3992,7 +4001,7 @@ void ext4_discard_preallocations(struct inode *inode) return; } - mb_debug("discard preallocation for inode %lu\n", inode->i_ino); + mb_debug(1, "discard preallocation for inode %lu\n", inode->i_ino); trace_ext4_discard_preallocations(inode); INIT_LIST_HEAD(&list); @@ -4097,7 +4106,7 @@ static void ext4_mb_return_to_preallocation(struct inode *inode, { BUG_ON(!list_empty(&EXT4_I(inode)->i_prealloc_list)); } -#ifdef MB_DEBUG +#ifdef CONFIG_EXT4_DEBUG static void ext4_mb_show_ac(struct ext4_allocation_context *ac) { struct super_block *sb = ac->ac_sb; @@ -4139,14 +4148,14 @@ static void ext4_mb_show_ac(struct ext4_allocation_context *ac) ext4_get_group_no_and_offset(sb, pa->pa_pstart, NULL, &start); spin_unlock(&pa->pa_lock); - printk(KERN_ERR "PA:%lu:%d:%u \n", i, - start, pa->pa_len); + printk(KERN_ERR "PA:%u:%d:%u \n", i, + start, pa->pa_len); } ext4_unlock_group(sb, i); if (grp->bb_free == 0) continue; - printk(KERN_ERR "%lu: %d/%d \n", + printk(KERN_ERR "%u: %d/%d \n", i, grp->bb_free, grp->bb_fragments); } printk(KERN_ERR "\n"); @@ -4174,16 +4183,26 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) return; + if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) + return; + size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len; - isize = i_size_read(ac->ac_inode) >> bsbits; + isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1) + >> bsbits; size = max(size, isize); - /* don't use group allocation for large files */ - if (size >= sbi->s_mb_stream_request) + if ((size == isize) && + !ext4_fs_is_busy(sbi) && + (atomic_read(&ac->ac_inode->i_writecount) == 0)) { + ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC; return; + } - if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) + /* don't use group allocation for large files */ + if (size >= sbi->s_mb_stream_request) { + ac->ac_flags |= EXT4_MB_STREAM_ALLOC; return; + } BUG_ON(ac->ac_lg != NULL); /* @@ -4246,7 +4265,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac, * locality group. this is a policy, actually */ ext4_mb_group_or_file(ac); - mb_debug("init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, " + mb_debug(1, "init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, " "left: %u/%u, right %u/%u to %swritable\n", (unsigned) ar->len, (unsigned) ar->logical, (unsigned) ar->goal, ac->ac_flags, ac->ac_2order, @@ -4268,7 +4287,7 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb, struct ext4_prealloc_space *pa, *tmp; struct ext4_allocation_context *ac; - mb_debug("discard locality group preallocation\n"); + mb_debug(1, "discard locality group preallocation\n"); INIT_LIST_HEAD(&discard_list); ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS); diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h index c96bb19..188d3d7 100644 --- a/fs/ext4/mballoc.h +++ b/fs/ext4/mballoc.h @@ -37,11 +37,19 @@ /* */ -#define MB_DEBUG__ -#ifdef MB_DEBUG -#define mb_debug(fmt, a...) printk(fmt, ##a) +#ifdef CONFIG_EXT4_DEBUG +extern u8 mb_enable_debug; + +#define mb_debug(n, fmt, a...) \ + do { \ + if ((n) <= mb_enable_debug) { \ + printk(KERN_DEBUG "(%s, %d): %s: ", \ + __FILE__, __LINE__, __func__); \ + printk(fmt, ## a); \ + } \ + } while (0) #else -#define mb_debug(fmt, a...) +#define mb_debug(n, fmt, a...) #endif /* @@ -128,8 +136,8 @@ struct ext4_prealloc_space { unsigned pa_deleted; ext4_fsblk_t pa_pstart; /* phys. block */ ext4_lblk_t pa_lstart; /* log. block */ - unsigned short pa_len; /* len of preallocated chunk */ - unsigned short pa_free; /* how many blocks are free */ + ext4_grpblk_t pa_len; /* len of preallocated chunk */ + ext4_grpblk_t pa_free; /* how many blocks are free */ unsigned short pa_type; /* pa type. inode or group */ spinlock_t *pa_obj_lock; struct inode *pa_inode; /* hack, for history only */ @@ -144,7 +152,7 @@ struct ext4_free_extent { ext4_lblk_t fe_logical; ext4_grpblk_t fe_start; ext4_group_t fe_group; - int fe_len; + ext4_grpblk_t fe_len; }; /* diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c index 313a50b..bf519f2 100644 --- a/fs/ext4/migrate.c +++ b/fs/ext4/migrate.c @@ -353,17 +353,16 @@ static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode, down_write(&EXT4_I(inode)->i_data_sem); /* - * if EXT4_EXT_MIGRATE is cleared a block allocation + * if EXT4_STATE_EXT_MIGRATE is cleared a block allocation * happened after we started the migrate. We need to * fail the migrate */ - if (!(EXT4_I(inode)->i_flags & EXT4_EXT_MIGRATE)) { + if (!(EXT4_I(inode)->i_state & EXT4_STATE_EXT_MIGRATE)) { retval = -EAGAIN; up_write(&EXT4_I(inode)->i_data_sem); goto err_out; } else - EXT4_I(inode)->i_flags = EXT4_I(inode)->i_flags & - ~EXT4_EXT_MIGRATE; + EXT4_I(inode)->i_state &= ~EXT4_STATE_EXT_MIGRATE; /* * We have the extent map build with the tmp inode. * Now copy the i_data across @@ -517,14 +516,15 @@ int ext4_ext_migrate(struct inode *inode) * when we add extents we extent the journal */ /* - * Even though we take i_mutex we can still cause block allocation - * via mmap write to holes. If we have allocated new blocks we fail - * migrate. New block allocation will clear EXT4_EXT_MIGRATE flag. - * The flag is updated with i_data_sem held to prevent racing with - * block allocation. + * Even though we take i_mutex we can still cause block + * allocation via mmap write to holes. If we have allocated + * new blocks we fail migrate. New block allocation will + * clear EXT4_STATE_EXT_MIGRATE flag. The flag is updated + * with i_data_sem held to prevent racing with block + * allocation. */ down_read((&EXT4_I(inode)->i_data_sem)); - EXT4_I(inode)->i_flags = EXT4_I(inode)->i_flags | EXT4_EXT_MIGRATE; + EXT4_I(inode)->i_state |= EXT4_STATE_EXT_MIGRATE; up_read((&EXT4_I(inode)->i_data_sem)); handle = ext4_journal_start(inode, 1); @@ -618,7 +618,7 @@ err_out: tmp_inode->i_nlink = 0; ext4_journal_stop(handle); - + unlock_new_inode(tmp_inode); iput(tmp_inode); return retval; diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c index bbf2dd9..c07a291 100644 --- a/fs/ext4/move_extent.c +++ b/fs/ext4/move_extent.c @@ -19,14 +19,31 @@ #include "ext4_extents.h" #include "ext4.h" -#define get_ext_path(path, inode, block, ret) \ - do { \ - path = ext4_ext_find_extent(inode, block, path); \ - if (IS_ERR(path)) { \ - ret = PTR_ERR(path); \ - path = NULL; \ - } \ - } while (0) +/** + * get_ext_path - Find an extent path for designated logical block number. + * + * @inode: an inode which is searched + * @lblock: logical block number to find an extent path + * @path: pointer to an extent path pointer (for output) + * + * ext4_ext_find_extent wrapper. Return 0 on success, or a negative error value + * on failure. + */ +static inline int +get_ext_path(struct inode *inode, ext4_lblk_t lblock, + struct ext4_ext_path **path) +{ + int ret = 0; + + *path = ext4_ext_find_extent(inode, lblock, *path); + if (IS_ERR(*path)) { + ret = PTR_ERR(*path); + *path = NULL; + } else if ((*path)[ext_depth(inode)].p_ext == NULL) + ret = -ENODATA; + + return ret; +} /** * copy_extent_status - Copy the extent's initialization status @@ -113,6 +130,31 @@ mext_next_extent(struct inode *inode, struct ext4_ext_path *path, } /** + * mext_check_null_inode - NULL check for two inodes + * + * If inode1 or inode2 is NULL, return -EIO. Otherwise, return 0. + */ +static int +mext_check_null_inode(struct inode *inode1, struct inode *inode2, + const char *function) +{ + int ret = 0; + + if (inode1 == NULL) { + ext4_error(inode2->i_sb, function, + "Both inodes should not be NULL: " + "inode1 NULL inode2 %lu", inode2->i_ino); + ret = -EIO; + } else if (inode2 == NULL) { + ext4_error(inode1->i_sb, function, + "Both inodes should not be NULL: " + "inode1 %lu inode2 NULL", inode1->i_ino); + ret = -EIO; + } + return ret; +} + +/** * mext_double_down_read - Acquire two inodes' read semaphore * * @orig_inode: original inode structure @@ -124,8 +166,6 @@ mext_double_down_read(struct inode *orig_inode, struct inode *donor_inode) { struct inode *first = orig_inode, *second = donor_inode; - BUG_ON(orig_inode == NULL || donor_inode == NULL); - /* * Use the inode number to provide the stable locking order instead * of its address, because the C language doesn't guarantee you can @@ -152,8 +192,6 @@ mext_double_down_write(struct inode *orig_inode, struct inode *donor_inode) { struct inode *first = orig_inode, *second = donor_inode; - BUG_ON(orig_inode == NULL || donor_inode == NULL); - /* * Use the inode number to provide the stable locking order instead * of its address, because the C language doesn't guarantee you can @@ -178,8 +216,6 @@ mext_double_down_write(struct inode *orig_inode, struct inode *donor_inode) static void mext_double_up_read(struct inode *orig_inode, struct inode *donor_inode) { - BUG_ON(orig_inode == NULL || donor_inode == NULL); - up_read(&EXT4_I(orig_inode)->i_data_sem); up_read(&EXT4_I(donor_inode)->i_data_sem); } @@ -194,8 +230,6 @@ mext_double_up_read(struct inode *orig_inode, struct inode *donor_inode) static void mext_double_up_write(struct inode *orig_inode, struct inode *donor_inode) { - BUG_ON(orig_inode == NULL || donor_inode == NULL); - up_write(&EXT4_I(orig_inode)->i_data_sem); up_write(&EXT4_I(donor_inode)->i_data_sem); } @@ -283,8 +317,8 @@ mext_insert_across_blocks(handle_t *handle, struct inode *orig_inode, } if (new_flag) { - get_ext_path(orig_path, orig_inode, eblock, err); - if (orig_path == NULL) + err = get_ext_path(orig_inode, eblock, &orig_path); + if (err) goto out; if (ext4_ext_insert_extent(handle, orig_inode, @@ -293,9 +327,9 @@ mext_insert_across_blocks(handle_t *handle, struct inode *orig_inode, } if (end_flag) { - get_ext_path(orig_path, orig_inode, - le32_to_cpu(end_ext->ee_block) - 1, err); - if (orig_path == NULL) + err = get_ext_path(orig_inode, + le32_to_cpu(end_ext->ee_block) - 1, &orig_path); + if (err) goto out; if (ext4_ext_insert_extent(handle, orig_inode, @@ -519,7 +553,15 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode, * oext |-----------| * new_ext |-------| */ - BUG_ON(le32_to_cpu(oext->ee_block) + oext_alen - 1 < new_ext_end); + if (le32_to_cpu(oext->ee_block) + oext_alen - 1 < new_ext_end) { + ext4_error(orig_inode->i_sb, __func__, + "new_ext_end(%u) should be less than or equal to " + "oext->ee_block(%u) + oext_alen(%d) - 1", + new_ext_end, le32_to_cpu(oext->ee_block), + oext_alen); + ret = -EIO; + goto out; + } /* * Case: new_ext is smaller than original extent @@ -543,6 +585,7 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode, ret = mext_insert_extents(handle, orig_inode, orig_path, o_start, o_end, &start_ext, &new_ext, &end_ext); +out: return ret; } @@ -554,8 +597,10 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode, * @orig_off: block offset of original inode * @donor_off: block offset of donor inode * @max_count: the maximun length of extents + * + * Return 0 on success, or a negative error value on failure. */ -static void +static int mext_calc_swap_extents(struct ext4_extent *tmp_dext, struct ext4_extent *tmp_oext, ext4_lblk_t orig_off, ext4_lblk_t donor_off, @@ -564,6 +609,19 @@ mext_calc_swap_extents(struct ext4_extent *tmp_dext, ext4_lblk_t diff, orig_diff; struct ext4_extent dext_old, oext_old; + BUG_ON(orig_off != donor_off); + + /* original and donor extents have to cover the same block offset */ + if (orig_off < le32_to_cpu(tmp_oext->ee_block) || + le32_to_cpu(tmp_oext->ee_block) + + ext4_ext_get_actual_len(tmp_oext) - 1 < orig_off) + return -ENODATA; + + if (orig_off < le32_to_cpu(tmp_dext->ee_block) || + le32_to_cpu(tmp_dext->ee_block) + + ext4_ext_get_actual_len(tmp_dext) - 1 < orig_off) + return -ENODATA; + dext_old = *tmp_dext; oext_old = *tmp_oext; @@ -591,6 +649,8 @@ mext_calc_swap_extents(struct ext4_extent *tmp_dext, copy_extent_status(&oext_old, tmp_dext); copy_extent_status(&dext_old, tmp_oext); + + return 0; } /** @@ -631,13 +691,13 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode, mext_double_down_write(orig_inode, donor_inode); /* Get the original extent for the block "orig_off" */ - get_ext_path(orig_path, orig_inode, orig_off, err); - if (orig_path == NULL) + err = get_ext_path(orig_inode, orig_off, &orig_path); + if (err) goto out; /* Get the donor extent for the head */ - get_ext_path(donor_path, donor_inode, donor_off, err); - if (donor_path == NULL) + err = get_ext_path(donor_inode, donor_off, &donor_path); + if (err) goto out; depth = ext_depth(orig_inode); oext = orig_path[depth].p_ext; @@ -647,13 +707,28 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode, dext = donor_path[depth].p_ext; tmp_dext = *dext; - mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off, + err = mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off, donor_off, count); + if (err) + goto out; /* Loop for the donor extents */ while (1) { /* The extent for donor must be found. */ - BUG_ON(!dext || donor_off != le32_to_cpu(tmp_dext.ee_block)); + if (!dext) { + ext4_error(donor_inode->i_sb, __func__, + "The extent for donor must be found"); + err = -EIO; + goto out; + } else if (donor_off != le32_to_cpu(tmp_dext.ee_block)) { + ext4_error(donor_inode->i_sb, __func__, + "Donor offset(%u) and the first block of donor " + "extent(%u) should be equal", + donor_off, + le32_to_cpu(tmp_dext.ee_block)); + err = -EIO; + goto out; + } /* Set donor extent to orig extent */ err = mext_leaf_block(handle, orig_inode, @@ -678,8 +753,8 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode, if (orig_path) ext4_ext_drop_refs(orig_path); - get_ext_path(orig_path, orig_inode, orig_off, err); - if (orig_path == NULL) + err = get_ext_path(orig_inode, orig_off, &orig_path); + if (err) goto out; depth = ext_depth(orig_inode); oext = orig_path[depth].p_ext; @@ -692,9 +767,8 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode, if (donor_path) ext4_ext_drop_refs(donor_path); - get_ext_path(donor_path, donor_inode, - donor_off, err); - if (donor_path == NULL) + err = get_ext_path(donor_inode, donor_off, &donor_path); + if (err) goto out; depth = ext_depth(donor_inode); dext = donor_path[depth].p_ext; @@ -705,9 +779,10 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode, } tmp_dext = *dext; - mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off, - donor_off, - count - replaced_count); + err = mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off, + donor_off, count - replaced_count); + if (err) + goto out; } out: @@ -740,7 +815,7 @@ out: * on success, or a negative error value on failure. */ static int -move_extent_par_page(struct file *o_filp, struct inode *donor_inode, +move_extent_per_page(struct file *o_filp, struct inode *donor_inode, pgoff_t orig_page_offset, int data_offset_in_page, int block_len_in_page, int uninit) { @@ -871,6 +946,7 @@ out: if (PageLocked(page)) unlock_page(page); page_cache_release(page); + ext4_journal_stop(handle); } out2: ext4_journal_stop(handle); @@ -897,6 +973,10 @@ mext_check_arguments(struct inode *orig_inode, struct inode *donor_inode, __u64 orig_start, __u64 donor_start, __u64 *len, __u64 moved_len) { + ext4_lblk_t orig_blocks, donor_blocks; + unsigned int blkbits = orig_inode->i_blkbits; + unsigned int blocksize = 1 << blkbits; + /* Regular file check */ if (!S_ISREG(orig_inode->i_mode) || !S_ISREG(donor_inode->i_mode)) { ext4_debug("ext4 move extent: The argument files should be " @@ -960,54 +1040,58 @@ mext_check_arguments(struct inode *orig_inode, return -EINVAL; } - if ((orig_start > MAX_DEFRAG_SIZE) || - (donor_start > MAX_DEFRAG_SIZE) || - (*len > MAX_DEFRAG_SIZE) || - (orig_start + *len > MAX_DEFRAG_SIZE)) { - ext4_debug("ext4 move extent: Can't handle over [%lu] blocks " - "[ino:orig %lu, donor %lu]\n", MAX_DEFRAG_SIZE, + if ((orig_start > EXT_MAX_BLOCK) || + (donor_start > EXT_MAX_BLOCK) || + (*len > EXT_MAX_BLOCK) || + (orig_start + *len > EXT_MAX_BLOCK)) { + ext4_debug("ext4 move extent: Can't handle over [%u] blocks " + "[ino:orig %lu, donor %lu]\n", EXT_MAX_BLOCK, orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } if (orig_inode->i_size > donor_inode->i_size) { - if (orig_start >= donor_inode->i_size) { + donor_blocks = (donor_inode->i_size + blocksize - 1) >> blkbits; + /* TODO: eliminate this artificial restriction */ + if (orig_start >= donor_blocks) { ext4_debug("ext4 move extent: orig start offset " - "[%llu] should be less than donor file size " - "[%lld] [ino:orig %lu, donor_inode %lu]\n", - orig_start, donor_inode->i_size, + "[%llu] should be less than donor file blocks " + "[%u] [ino:orig %lu, donor %lu]\n", + orig_start, donor_blocks, orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } - if (orig_start + *len > donor_inode->i_size) { + /* TODO: eliminate this artificial restriction */ + if (orig_start + *len > donor_blocks) { ext4_debug("ext4 move extent: End offset [%llu] should " - "be less than donor file size [%lld]." - "So adjust length from %llu to %lld " + "be less than donor file blocks [%u]." + "So adjust length from %llu to %llu " "[ino:orig %lu, donor %lu]\n", - orig_start + *len, donor_inode->i_size, - *len, donor_inode->i_size - orig_start, + orig_start + *len, donor_blocks, + *len, donor_blocks - orig_start, orig_inode->i_ino, donor_inode->i_ino); - *len = donor_inode->i_size - orig_start; + *len = donor_blocks - orig_start; } } else { - if (orig_start >= orig_inode->i_size) { + orig_blocks = (orig_inode->i_size + blocksize - 1) >> blkbits; + if (orig_start >= orig_blocks) { ext4_debug("ext4 move extent: start offset [%llu] " - "should be less than original file size " - "[%lld] [inode:orig %lu, donor %lu]\n", - orig_start, orig_inode->i_size, + "should be less than original file blocks " + "[%u] [ino:orig %lu, donor %lu]\n", + orig_start, orig_blocks, orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } - if (orig_start + *len > orig_inode->i_size) { + if (orig_start + *len > orig_blocks) { ext4_debug("ext4 move extent: Adjust length " - "from %llu to %lld. Because it should be " - "less than original file size " + "from %llu to %llu. Because it should be " + "less than original file blocks " "[ino:orig %lu, donor %lu]\n", - *len, orig_inode->i_size - orig_start, + *len, orig_blocks - orig_start, orig_inode->i_ino, donor_inode->i_ino); - *len = orig_inode->i_size - orig_start; + *len = orig_blocks - orig_start; } } @@ -1027,18 +1111,23 @@ mext_check_arguments(struct inode *orig_inode, * @inode1: the inode structure * @inode2: the inode structure * - * Lock two inodes' i_mutex by i_ino order. This function is moved from - * fs/inode.c. + * Lock two inodes' i_mutex by i_ino order. + * If inode1 or inode2 is NULL, return -EIO. Otherwise, return 0. */ -static void +static int mext_inode_double_lock(struct inode *inode1, struct inode *inode2) { - if (inode1 == NULL || inode2 == NULL || inode1 == inode2) { - if (inode1) - mutex_lock(&inode1->i_mutex); - else if (inode2) - mutex_lock(&inode2->i_mutex); - return; + int ret = 0; + + BUG_ON(inode1 == NULL && inode2 == NULL); + + ret = mext_check_null_inode(inode1, inode2, __func__); + if (ret < 0) + goto out; + + if (inode1 == inode2) { + mutex_lock(&inode1->i_mutex); + goto out; } if (inode1->i_ino < inode2->i_ino) { @@ -1048,6 +1137,9 @@ mext_inode_double_lock(struct inode *inode1, struct inode *inode2) mutex_lock_nested(&inode2->i_mutex, I_MUTEX_PARENT); mutex_lock_nested(&inode1->i_mutex, I_MUTEX_CHILD); } + +out: + return ret; } /** @@ -1056,17 +1148,28 @@ mext_inode_double_lock(struct inode *inode1, struct inode *inode2) * @inode1: the inode that is released first * @inode2: the inode that is released second * - * This function is moved from fs/inode.c. + * If inode1 or inode2 is NULL, return -EIO. Otherwise, return 0. */ -static void +static int mext_inode_double_unlock(struct inode *inode1, struct inode *inode2) { + int ret = 0; + + BUG_ON(inode1 == NULL && inode2 == NULL); + + ret = mext_check_null_inode(inode1, inode2, __func__); + if (ret < 0) + goto out; + if (inode1) mutex_unlock(&inode1->i_mutex); if (inode2 && inode2 != inode1) mutex_unlock(&inode2->i_mutex); + +out: + return ret; } /** @@ -1123,70 +1226,76 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, ext4_lblk_t block_end, seq_start, add_blocks, file_end, seq_blocks = 0; ext4_lblk_t rest_blocks; pgoff_t orig_page_offset = 0, seq_end_page; - int ret, depth, last_extent = 0; + int ret1, ret2, depth, last_extent = 0; int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits; int data_offset_in_page; int block_len_in_page; int uninit; /* protect orig and donor against a truncate */ - mext_inode_double_lock(orig_inode, donor_inode); + ret1 = mext_inode_double_lock(orig_inode, donor_inode); + if (ret1 < 0) + return ret1; mext_double_down_read(orig_inode, donor_inode); /* Check the filesystem environment whether move_extent can be done */ - ret = mext_check_arguments(orig_inode, donor_inode, orig_start, + ret1 = mext_check_arguments(orig_inode, donor_inode, orig_start, donor_start, &len, *moved_len); mext_double_up_read(orig_inode, donor_inode); - if (ret) - goto out2; + if (ret1) + goto out; file_end = (i_size_read(orig_inode) - 1) >> orig_inode->i_blkbits; block_end = block_start + len - 1; if (file_end < block_end) len -= block_end - file_end; - get_ext_path(orig_path, orig_inode, block_start, ret); - if (orig_path == NULL) - goto out2; + ret1 = get_ext_path(orig_inode, block_start, &orig_path); + if (ret1) + goto out; /* Get path structure to check the hole */ - get_ext_path(holecheck_path, orig_inode, block_start, ret); - if (holecheck_path == NULL) + ret1 = get_ext_path(orig_inode, block_start, &holecheck_path); + if (ret1) goto out; depth = ext_depth(orig_inode); ext_cur = holecheck_path[depth].p_ext; - if (ext_cur == NULL) { - ret = -EINVAL; - goto out; - } /* - * Get proper extent whose ee_block is beyond block_start - * if block_start was within the hole. + * Get proper starting location of block replacement if block_start was + * within the hole. */ if (le32_to_cpu(ext_cur->ee_block) + ext4_ext_get_actual_len(ext_cur) - 1 < block_start) { + /* + * The hole exists between extents or the tail of + * original file. + */ last_extent = mext_next_extent(orig_inode, holecheck_path, &ext_cur); if (last_extent < 0) { - ret = last_extent; + ret1 = last_extent; goto out; } last_extent = mext_next_extent(orig_inode, orig_path, &ext_dummy); if (last_extent < 0) { - ret = last_extent; + ret1 = last_extent; goto out; } - } - seq_start = block_start; + seq_start = le32_to_cpu(ext_cur->ee_block); + } else if (le32_to_cpu(ext_cur->ee_block) > block_start) + /* The hole exists at the beginning of original file. */ + seq_start = le32_to_cpu(ext_cur->ee_block); + else + seq_start = block_start; /* No blocks within the specified range. */ if (le32_to_cpu(ext_cur->ee_block) > block_end) { ext4_debug("ext4 move extent: The specified range of file " "may be the hole\n"); - ret = -EINVAL; + ret1 = -EINVAL; goto out; } @@ -1206,7 +1315,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, last_extent = mext_next_extent(orig_inode, holecheck_path, &ext_cur); if (last_extent < 0) { - ret = last_extent; + ret1 = last_extent; break; } add_blocks = ext4_ext_get_actual_len(ext_cur); @@ -1258,16 +1367,23 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, while (orig_page_offset <= seq_end_page) { /* Swap original branches with new branches */ - ret = move_extent_par_page(o_filp, donor_inode, + ret1 = move_extent_per_page(o_filp, donor_inode, orig_page_offset, data_offset_in_page, block_len_in_page, uninit); - if (ret < 0) + if (ret1 < 0) goto out; orig_page_offset++; /* Count how many blocks we have exchanged */ *moved_len += block_len_in_page; - BUG_ON(*moved_len > len); + if (*moved_len > len) { + ext4_error(orig_inode->i_sb, __func__, + "We replaced blocks too much! " + "sum of replaced: %llu requested: %llu", + *moved_len, len); + ret1 = -EIO; + goto out; + } data_offset_in_page = 0; rest_blocks -= block_len_in_page; @@ -1280,17 +1396,16 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, /* Decrease buffer counter */ if (holecheck_path) ext4_ext_drop_refs(holecheck_path); - get_ext_path(holecheck_path, orig_inode, - seq_start, ret); - if (holecheck_path == NULL) + ret1 = get_ext_path(orig_inode, seq_start, &holecheck_path); + if (ret1) break; depth = holecheck_path->p_depth; /* Decrease buffer counter */ if (orig_path) ext4_ext_drop_refs(orig_path); - get_ext_path(orig_path, orig_inode, seq_start, ret); - if (orig_path == NULL) + ret1 = get_ext_path(orig_inode, seq_start, &orig_path); + if (ret1) break; ext_cur = holecheck_path[depth].p_ext; @@ -1307,14 +1422,13 @@ out: ext4_ext_drop_refs(holecheck_path); kfree(holecheck_path); } -out2: - mext_inode_double_unlock(orig_inode, donor_inode); - if (ret) - return ret; + ret2 = mext_inode_double_unlock(orig_inode, donor_inode); - /* All of the specified blocks must be exchanged in succeed */ - BUG_ON(*moved_len != len); + if (ret1) + return ret1; + else if (ret2) + return ret2; return 0; } diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 114abe5..42f81d2 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -1518,8 +1518,12 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, return retval; if (blocks == 1 && !dx_fallback && - EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) - return make_indexed_dir(handle, dentry, inode, bh); + EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) { + retval = make_indexed_dir(handle, dentry, inode, bh); + if (retval == -ENOSPC) + brelse(bh); + return retval; + } brelse(bh); } bh = ext4_append(handle, dir, &block, &retval); @@ -1528,7 +1532,10 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, de = (struct ext4_dir_entry_2 *) bh->b_data; de->inode = 0; de->rec_len = ext4_rec_len_to_disk(blocksize, blocksize); - return add_dirent_to_buf(handle, dentry, inode, de, bh); + retval = add_dirent_to_buf(handle, dentry, inode, de, bh); + if (retval == -ENOSPC) + brelse(bh); + return retval; } /* @@ -1590,9 +1597,9 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry, goto cleanup; node2 = (struct dx_node *)(bh2->b_data); entries2 = node2->entries; + memset(&node2->fake, 0, sizeof(struct fake_dirent)); node2->fake.rec_len = ext4_rec_len_to_disk(sb->s_blocksize, sb->s_blocksize); - node2->fake.inode = 0; BUFFER_TRACE(frame->bh, "get_write_access"); err = ext4_journal_get_write_access(handle, frame->bh); if (err) @@ -1657,7 +1664,8 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry, if (!de) goto cleanup; err = add_dirent_to_buf(handle, dentry, inode, de, bh); - bh = NULL; + if (err != -ENOSPC) + bh = NULL; goto cleanup; journal_error: @@ -2310,7 +2318,7 @@ static int ext4_link(struct dentry *old_dentry, struct inode *inode = old_dentry->d_inode; int err, retries = 0; - if (EXT4_DIR_LINK_MAX(inode)) + if (inode->i_nlink >= EXT4_LINK_MAX) return -EMLINK; /* @@ -2413,7 +2421,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, goto end_rename; retval = -EMLINK; if (!new_inode && new_dir != old_dir && - new_dir->i_nlink >= EXT4_LINK_MAX) + EXT4_DIR_LINK_MAX(new_dir)) goto end_rename; } if (!new_bh) { diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index 68b0351..3cfc343 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -746,7 +746,6 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input) struct inode *inode = NULL; handle_t *handle; int gdb_off, gdb_num; - int num_grp_locked = 0; int err, err2; gdb_num = input->group / EXT4_DESC_PER_BLOCK(sb); @@ -856,7 +855,6 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input) * using the new disk blocks. */ - num_grp_locked = ext4_mb_get_buddy_cache_lock(sb, input->group); /* Update group descriptor block for new group */ gdp = (struct ext4_group_desc *)((char *)primary->b_data + gdb_off * EXT4_DESC_SIZE(sb)); @@ -875,10 +873,8 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input) * descriptor */ err = ext4_mb_add_groupinfo(sb, input->group, gdp); - if (err) { - ext4_mb_put_buddy_cache_lock(sb, input->group, num_grp_locked); + if (err) goto exit_journal; - } /* * Make the new blocks and inodes valid next. We do this before @@ -920,7 +916,6 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input) /* Update the global fs size fields */ sbi->s_groups_count++; - ext4_mb_put_buddy_cache_lock(sb, input->group, num_grp_locked); ext4_handle_dirty_metadata(handle, NULL, primary); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 8f4f079..df539ba 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -45,6 +45,7 @@ #include "ext4_jbd2.h" #include "xattr.h" #include "acl.h" +#include "mballoc.h" #define CREATE_TRACE_POINTS #include <trace/events/ext4.h> @@ -344,7 +345,8 @@ static const char *ext4_decode_error(struct super_block *sb, int errno, errstr = "Out of memory"; break; case -EROFS: - if (!sb || EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT) + if (!sb || (EXT4_SB(sb)->s_journal && + EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT)) errstr = "Journal has aborted"; else errstr = "Readonly filesystem"; @@ -962,7 +964,7 @@ static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data, static ssize_t ext4_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off); -static struct dquot_operations ext4_quota_operations = { +static const struct dquot_operations ext4_quota_operations = { .initialize = dquot_initialize, .drop = dquot_drop, .alloc_space = dquot_alloc_space, @@ -983,7 +985,7 @@ static struct dquot_operations ext4_quota_operations = { .destroy_dquot = dquot_destroy, }; -static struct quotactl_ops ext4_qctl_operations = { +static const struct quotactl_ops ext4_qctl_operations = { .quota_on = ext4_quota_on, .quota_off = vfs_quota_off, .quota_sync = vfs_quota_sync, @@ -1279,11 +1281,9 @@ static int parse_options(char *options, struct super_block *sb, *journal_devnum = option; break; case Opt_journal_checksum: - set_opt(sbi->s_mount_opt, JOURNAL_CHECKSUM); - break; + break; /* Kept for backwards compatibility */ case Opt_journal_async_commit: set_opt(sbi->s_mount_opt, JOURNAL_ASYNC_COMMIT); - set_opt(sbi->s_mount_opt, JOURNAL_CHECKSUM); break; case Opt_noload: set_opt(sbi->s_mount_opt, NOLOAD); @@ -1695,12 +1695,12 @@ static int ext4_fill_flex_info(struct super_block *sb) gdp = ext4_get_group_desc(sb, i, NULL); flex_group = ext4_flex_group(sbi, i); - atomic_set(&sbi->s_flex_groups[flex_group].free_inodes, - ext4_free_inodes_count(sb, gdp)); - atomic_set(&sbi->s_flex_groups[flex_group].free_blocks, - ext4_free_blks_count(sb, gdp)); - atomic_set(&sbi->s_flex_groups[flex_group].used_dirs, - ext4_used_dirs_count(sb, gdp)); + atomic_add(ext4_free_inodes_count(sb, gdp), + &sbi->s_flex_groups[flex_group].free_inodes); + atomic_add(ext4_free_blks_count(sb, gdp), + &sbi->s_flex_groups[flex_group].free_blocks); + atomic_add(ext4_used_dirs_count(sb, gdp), + &sbi->s_flex_groups[flex_group].used_dirs); } return 1; @@ -2253,6 +2253,49 @@ static struct kobj_type ext4_ktype = { .release = ext4_sb_release, }; +/* + * Check whether this filesystem can be mounted based on + * the features present and the RDONLY/RDWR mount requested. + * Returns 1 if this filesystem can be mounted as requested, + * 0 if it cannot be. + */ +static int ext4_feature_set_ok(struct super_block *sb, int readonly) +{ + if (EXT4_HAS_INCOMPAT_FEATURE(sb, ~EXT4_FEATURE_INCOMPAT_SUPP)) { + ext4_msg(sb, KERN_ERR, + "Couldn't mount because of " + "unsupported optional features (%x)", + (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) & + ~EXT4_FEATURE_INCOMPAT_SUPP)); + return 0; + } + + if (readonly) + return 1; + + /* Check that feature set is OK for a read-write mount */ + if (EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT4_FEATURE_RO_COMPAT_SUPP)) { + ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of " + "unsupported optional features (%x)", + (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) & + ~EXT4_FEATURE_RO_COMPAT_SUPP)); + return 0; + } + /* + * Large file size enabled file system can only be mounted + * read-write on 32-bit systems if kernel is built with CONFIG_LBDAF + */ + if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) { + if (sizeof(blkcnt_t) < sizeof(u64)) { + ext4_msg(sb, KERN_ERR, "Filesystem with huge files " + "cannot be mounted RDWR without " + "CONFIG_LBDAF"); + return 0; + } + } + return 1; +} + static int ext4_fill_super(struct super_block *sb, void *data, int silent) __releases(kernel_lock) __acquires(kernel_lock) @@ -2274,7 +2317,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) unsigned int db_count; unsigned int i; int needs_recovery, has_huge_files; - int features; __u64 blocks_count; int err; unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO; @@ -2401,39 +2443,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) * previously didn't change the revision level when setting the flags, * so there is a chance incompat flags are set on a rev 0 filesystem. */ - features = EXT4_HAS_INCOMPAT_FEATURE(sb, ~EXT4_FEATURE_INCOMPAT_SUPP); - if (features) { - ext4_msg(sb, KERN_ERR, - "Couldn't mount because of " - "unsupported optional features (%x)", - (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) & - ~EXT4_FEATURE_INCOMPAT_SUPP)); - goto failed_mount; - } - features = EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT4_FEATURE_RO_COMPAT_SUPP); - if (!(sb->s_flags & MS_RDONLY) && features) { - ext4_msg(sb, KERN_ERR, - "Couldn't mount RDWR because of " - "unsupported optional features (%x)", - (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) & - ~EXT4_FEATURE_RO_COMPAT_SUPP)); + if (!ext4_feature_set_ok(sb, (sb->s_flags & MS_RDONLY))) goto failed_mount; - } - has_huge_files = EXT4_HAS_RO_COMPAT_FEATURE(sb, - EXT4_FEATURE_RO_COMPAT_HUGE_FILE); - if (has_huge_files) { - /* - * Large file size enabled file system can only be - * mount if kernel is build with CONFIG_LBDAF - */ - if (sizeof(root->i_blocks) < sizeof(u64) && - !(sb->s_flags & MS_RDONLY)) { - ext4_msg(sb, KERN_ERR, "Filesystem with huge " - "files cannot be mounted read-write " - "without CONFIG_LBDAF"); - goto failed_mount; - } - } + blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size); if (blocksize < EXT4_MIN_BLOCK_SIZE || @@ -2469,6 +2481,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) } } + has_huge_files = EXT4_HAS_RO_COMPAT_FEATURE(sb, + EXT4_FEATURE_RO_COMPAT_HUGE_FILE); sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits, has_huge_files); sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files); @@ -2549,12 +2563,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) goto failed_mount; } - if (ext4_blocks_count(es) > - (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) { + /* + * Test whether we have more sectors than will fit in sector_t, + * and whether the max offset is addressable by the page cache. + */ + if ((ext4_blocks_count(es) > + (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) || + (ext4_blocks_count(es) > + (pgoff_t)(~0ULL) >> (PAGE_CACHE_SHIFT - sb->s_blocksize_bits))) { ext4_msg(sb, KERN_ERR, "filesystem" - " too large to mount safely"); + " too large to mount safely on this system"); if (sizeof(sector_t) < 8) ext4_msg(sb, KERN_WARNING, "CONFIG_LBDAF not enabled"); + ret = -EFBIG; goto failed_mount; } @@ -2595,6 +2616,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) goto failed_mount; } sbi->s_groups_count = blocks_count; + sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count, + (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb))); db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) / EXT4_DESC_PER_BLOCK(sb); sbi->s_group_desc = kmalloc(db_count * sizeof(struct buffer_head *), @@ -2729,20 +2752,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) goto failed_mount4; } - if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { - jbd2_journal_set_features(sbi->s_journal, - JBD2_FEATURE_COMPAT_CHECKSUM, 0, + jbd2_journal_set_features(sbi->s_journal, + JBD2_FEATURE_COMPAT_CHECKSUM, 0, 0); + if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) + jbd2_journal_set_features(sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT); - } else if (test_opt(sb, JOURNAL_CHECKSUM)) { - jbd2_journal_set_features(sbi->s_journal, - JBD2_FEATURE_COMPAT_CHECKSUM, 0, 0); + else jbd2_journal_clear_features(sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT); - } else { - jbd2_journal_clear_features(sbi->s_journal, - JBD2_FEATURE_COMPAT_CHECKSUM, 0, - JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT); - } /* We have now updated the journal if required, so we can * validate the data journaling mode. */ @@ -3208,7 +3225,18 @@ static int ext4_commit_super(struct super_block *sb, int sync) clear_buffer_write_io_error(sbh); set_buffer_uptodate(sbh); } - es->s_wtime = cpu_to_le32(get_seconds()); + /* + * If the file system is mounted read-only, don't update the + * superblock write time. This avoids updating the superblock + * write time when we are mounting the root file system + * read/only but we need to replay the journal; at that point, + * for people who are east of GMT and who make their clock + * tick in localtime for Windows bug-for-bug compatibility, + * the clock is set in the future, and this will cause e2fsck + * to complain and force a full file system check. + */ + if (!(sb->s_flags & MS_RDONLY)) + es->s_wtime = cpu_to_le32(get_seconds()); es->s_kbytes_written = cpu_to_le64(EXT4_SB(sb)->s_kbytes_written + ((part_stat_read(sb->s_bdev->bd_part, sectors[1]) - @@ -3477,18 +3505,11 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) if (sbi->s_journal) ext4_mark_recovery_complete(sb, es); } else { - int ret; - if ((ret = EXT4_HAS_RO_COMPAT_FEATURE(sb, - ~EXT4_FEATURE_RO_COMPAT_SUPP))) { - ext4_msg(sb, KERN_WARNING, "couldn't " - "remount RDWR because of unsupported " - "optional features (%x)", - (le32_to_cpu(sbi->s_es->s_feature_ro_compat) & - ~EXT4_FEATURE_RO_COMPAT_SUPP)); + /* Make sure we can mount this feature set readwrite */ + if (!ext4_feature_set_ok(sb, 0)) { err = -EROFS; goto restore_opts; } - /* * Make sure the group descriptor checksums * are sane. If they aren't, refuse to remount r/w. diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 62b31c2..fed5b01 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -810,12 +810,23 @@ inserted: get_bh(new_bh); } else { /* We need to allocate a new block */ - ext4_fsblk_t goal = ext4_group_first_block_no(sb, + ext4_fsblk_t goal, block; + + goal = ext4_group_first_block_no(sb, EXT4_I(inode)->i_block_group); - ext4_fsblk_t block = ext4_new_meta_blocks(handle, inode, + + /* non-extent files can't have physical blocks past 2^32 */ + if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) + goal = goal & EXT4_MAX_BLOCK_FILE_PHYS; + + block = ext4_new_meta_blocks(handle, inode, goal, NULL, &error); if (error) goto cleanup; + + if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) + BUG_ON(block > EXT4_MAX_BLOCK_FILE_PHYS); + ea_idebug(inode, "creating block %d", block); new_bh = sb_getblk(sb, block); @@ -263,6 +263,79 @@ pid_t f_getown(struct file *filp) return pid; } +static int f_setown_ex(struct file *filp, unsigned long arg) +{ + struct f_owner_ex * __user owner_p = (void * __user)arg; + struct f_owner_ex owner; + struct pid *pid; + int type; + int ret; + + ret = copy_from_user(&owner, owner_p, sizeof(owner)); + if (ret) + return ret; + + switch (owner.type) { + case F_OWNER_TID: + type = PIDTYPE_MAX; + break; + + case F_OWNER_PID: + type = PIDTYPE_PID; + break; + + case F_OWNER_GID: + type = PIDTYPE_PGID; + break; + + default: + return -EINVAL; + } + + rcu_read_lock(); + pid = find_vpid(owner.pid); + if (owner.pid && !pid) + ret = -ESRCH; + else + ret = __f_setown(filp, pid, type, 1); + rcu_read_unlock(); + + return ret; +} + +static int f_getown_ex(struct file *filp, unsigned long arg) +{ + struct f_owner_ex * __user owner_p = (void * __user)arg; + struct f_owner_ex owner; + int ret = 0; + + read_lock(&filp->f_owner.lock); + owner.pid = pid_vnr(filp->f_owner.pid); + switch (filp->f_owner.pid_type) { + case PIDTYPE_MAX: + owner.type = F_OWNER_TID; + break; + + case PIDTYPE_PID: + owner.type = F_OWNER_PID; + break; + + case PIDTYPE_PGID: + owner.type = F_OWNER_GID; + break; + + default: + WARN_ON(1); + ret = -EINVAL; + break; + } + read_unlock(&filp->f_owner.lock); + + if (!ret) + ret = copy_to_user(owner_p, &owner, sizeof(owner)); + return ret; +} + static long do_fcntl(int fd, unsigned int cmd, unsigned long arg, struct file *filp) { @@ -313,6 +386,12 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg, case F_SETOWN: err = f_setown(filp, arg, 1); break; + case F_GETOWN_EX: + err = f_getown_ex(filp, arg); + break; + case F_SETOWN_EX: + err = f_setown_ex(filp, arg); + break; case F_GETSIG: err = filp->f_owner.signum; break; @@ -428,8 +507,7 @@ static inline int sigio_perm(struct task_struct *p, static void send_sigio_to_task(struct task_struct *p, struct fown_struct *fown, - int fd, - int reason) + int fd, int reason, int group) { /* * F_SETSIG can change ->signum lockless in parallel, make @@ -461,11 +539,11 @@ static void send_sigio_to_task(struct task_struct *p, else si.si_band = band_table[reason - POLL_IN]; si.si_fd = fd; - if (!group_send_sig_info(signum, &si, p)) + if (!do_send_sig_info(signum, &si, p, group)) break; /* fall-through: fall back on the old plain SIGIO signal */ case 0: - group_send_sig_info(SIGIO, SEND_SIG_PRIV, p); + do_send_sig_info(SIGIO, SEND_SIG_PRIV, p, group); } } @@ -474,16 +552,23 @@ void send_sigio(struct fown_struct *fown, int fd, int band) struct task_struct *p; enum pid_type type; struct pid *pid; + int group = 1; read_lock(&fown->lock); + type = fown->pid_type; + if (type == PIDTYPE_MAX) { + group = 0; + type = PIDTYPE_PID; + } + pid = fown->pid; if (!pid) goto out_unlock_fown; read_lock(&tasklist_lock); do_each_pid_task(pid, type, p) { - send_sigio_to_task(p, fown, fd, band); + send_sigio_to_task(p, fown, fd, band, group); } while_each_pid_task(pid, type, p); read_unlock(&tasklist_lock); out_unlock_fown: @@ -491,10 +576,10 @@ void send_sigio(struct fown_struct *fown, int fd, int band) } static void send_sigurg_to_task(struct task_struct *p, - struct fown_struct *fown) + struct fown_struct *fown, int group) { if (sigio_perm(p, fown, SIGURG)) - group_send_sig_info(SIGURG, SEND_SIG_PRIV, p); + do_send_sig_info(SIGURG, SEND_SIG_PRIV, p, group); } int send_sigurg(struct fown_struct *fown) @@ -502,10 +587,17 @@ int send_sigurg(struct fown_struct *fown) struct task_struct *p; enum pid_type type; struct pid *pid; + int group = 1; int ret = 0; read_lock(&fown->lock); + type = fown->pid_type; + if (type == PIDTYPE_MAX) { + group = 0; + type = PIDTYPE_PID; + } + pid = fown->pid; if (!pid) goto out_unlock_fown; @@ -514,7 +606,7 @@ int send_sigurg(struct fown_struct *fown) read_lock(&tasklist_lock); do_each_pid_task(pid, type, p) { - send_sigurg_to_task(p, fown); + send_sigurg_to_task(p, fown, group); } while_each_pid_task(pid, type, p); read_unlock(&tasklist_lock); out_unlock_fown: diff --git a/fs/file_table.c b/fs/file_table.c index 334ce39..8eb4404 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -74,14 +74,14 @@ EXPORT_SYMBOL_GPL(get_max_files); * Handle nr_files sysctl */ #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) -int proc_nr_files(ctl_table *table, int write, struct file *filp, +int proc_nr_files(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { files_stat.nr_files = get_nr_files(); - return proc_dointvec(table, write, filp, buffer, lenp, ppos); + return proc_dointvec(table, write, buffer, lenp, ppos); } #else -int proc_nr_files(ctl_table *table, int write, struct file *filp, +int proc_nr_files(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 628235c..8e1e5e1 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -35,21 +35,29 @@ int nr_pdflush_threads; /* + * Passed into wb_writeback(), essentially a subset of writeback_control + */ +struct wb_writeback_args { + long nr_pages; + struct super_block *sb; + enum writeback_sync_modes sync_mode; + int for_kupdate; + int range_cyclic; +}; + +/* * Work items for the bdi_writeback threads */ struct bdi_work { - struct list_head list; - struct list_head wait_list; - struct rcu_head rcu_head; + struct list_head list; /* pending work list */ + struct rcu_head rcu_head; /* for RCU free/clear of work */ - unsigned long seen; - atomic_t pending; + unsigned long seen; /* threads that have seen this work */ + atomic_t pending; /* number of threads still to do work */ - struct super_block *sb; - unsigned long nr_pages; - enum writeback_sync_modes sync_mode; + struct wb_writeback_args args; /* writeback arguments */ - unsigned long state; + unsigned long state; /* flag bits, see WS_* */ }; enum { @@ -66,22 +74,13 @@ static inline bool bdi_work_on_stack(struct bdi_work *work) } static inline void bdi_work_init(struct bdi_work *work, - struct writeback_control *wbc) + struct wb_writeback_args *args) { INIT_RCU_HEAD(&work->rcu_head); - work->sb = wbc->sb; - work->nr_pages = wbc->nr_to_write; - work->sync_mode = wbc->sync_mode; + work->args = *args; work->state = WS_USED; } -static inline void bdi_work_init_on_stack(struct bdi_work *work, - struct writeback_control *wbc) -{ - bdi_work_init(work, wbc); - work->state |= WS_ONSTACK; -} - /** * writeback_in_progress - determine whether there is writeback in progress * @bdi: the device's backing_dev_info structure. @@ -98,6 +97,11 @@ static void bdi_work_clear(struct bdi_work *work) { clear_bit(WS_USED_B, &work->state); smp_mb__after_clear_bit(); + /* + * work can have disappeared at this point. bit waitq functions + * should be able to tolerate this, provided bdi_sched_wait does + * not dereference it's pointer argument. + */ wake_up_bit(&work->state, WS_USED_B); } @@ -113,7 +117,8 @@ static void bdi_work_free(struct rcu_head *head) static void wb_work_complete(struct bdi_work *work) { - const enum writeback_sync_modes sync_mode = work->sync_mode; + const enum writeback_sync_modes sync_mode = work->args.sync_mode; + int onstack = bdi_work_on_stack(work); /* * For allocated work, we can clear the done/seen bit right here. @@ -121,9 +126,9 @@ static void wb_work_complete(struct bdi_work *work) * to after the RCU grace period, since the stack could be invalidated * as soon as bdi_work_clear() has done the wakeup. */ - if (!bdi_work_on_stack(work)) + if (!onstack) bdi_work_clear(work); - if (sync_mode == WB_SYNC_NONE || bdi_work_on_stack(work)) + if (sync_mode == WB_SYNC_NONE || onstack) call_rcu(&work->rcu_head, bdi_work_free); } @@ -146,21 +151,19 @@ static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work) static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work) { - if (work) { - work->seen = bdi->wb_mask; - BUG_ON(!work->seen); - atomic_set(&work->pending, bdi->wb_cnt); - BUG_ON(!bdi->wb_cnt); - - /* - * Make sure stores are seen before it appears on the list - */ - smp_mb(); + work->seen = bdi->wb_mask; + BUG_ON(!work->seen); + atomic_set(&work->pending, bdi->wb_cnt); + BUG_ON(!bdi->wb_cnt); - spin_lock(&bdi->wb_lock); - list_add_tail_rcu(&work->list, &bdi->work_list); - spin_unlock(&bdi->wb_lock); - } + /* + * list_add_tail_rcu() contains the necessary barriers to + * make sure the above stores are seen before the item is + * noticed on the list + */ + spin_lock(&bdi->wb_lock); + list_add_tail_rcu(&work->list, &bdi->work_list); + spin_unlock(&bdi->wb_lock); /* * If the default thread isn't there, make sure we add it. When @@ -171,15 +174,7 @@ static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work) else { struct bdi_writeback *wb = &bdi->wb; - /* - * If we failed allocating the bdi work item, wake up the wb - * thread always. As a safety precaution, it'll flush out - * everything - */ - if (!wb_has_dirty_io(wb)) { - if (work) - wb_clear_pending(wb, work); - } else if (wb->task) + if (wb->task) wake_up_process(wb->task); } } @@ -194,48 +189,75 @@ static void bdi_wait_on_work_clear(struct bdi_work *work) TASK_UNINTERRUPTIBLE); } -static struct bdi_work *bdi_alloc_work(struct writeback_control *wbc) +static void bdi_alloc_queue_work(struct backing_dev_info *bdi, + struct wb_writeback_args *args) { struct bdi_work *work; + /* + * This is WB_SYNC_NONE writeback, so if allocation fails just + * wakeup the thread for old dirty data writeback + */ work = kmalloc(sizeof(*work), GFP_ATOMIC); - if (work) - bdi_work_init(work, wbc); + if (work) { + bdi_work_init(work, args); + bdi_queue_work(bdi, work); + } else { + struct bdi_writeback *wb = &bdi->wb; - return work; + if (wb->task) + wake_up_process(wb->task); + } } -void bdi_start_writeback(struct writeback_control *wbc) +/** + * bdi_sync_writeback - start and wait for writeback + * @bdi: the backing device to write from + * @sb: write inodes from this super_block + * + * Description: + * This does WB_SYNC_ALL data integrity writeback and waits for the + * IO to complete. Callers must hold the sb s_umount semaphore for + * reading, to avoid having the super disappear before we are done. + */ +static void bdi_sync_writeback(struct backing_dev_info *bdi, + struct super_block *sb) { - const bool must_wait = wbc->sync_mode == WB_SYNC_ALL; - struct bdi_work work_stack, *work = NULL; + struct wb_writeback_args args = { + .sb = sb, + .sync_mode = WB_SYNC_ALL, + .nr_pages = LONG_MAX, + .range_cyclic = 0, + }; + struct bdi_work work; - if (!must_wait) - work = bdi_alloc_work(wbc); + bdi_work_init(&work, &args); + work.state |= WS_ONSTACK; - if (!work) { - work = &work_stack; - bdi_work_init_on_stack(work, wbc); - } + bdi_queue_work(bdi, &work); + bdi_wait_on_work_clear(&work); +} - bdi_queue_work(wbc->bdi, work); +/** + * bdi_start_writeback - start writeback + * @bdi: the backing device to write from + * @nr_pages: the number of pages to write + * + * Description: + * This does WB_SYNC_NONE opportunistic writeback. The IO is only + * started when this function returns, we make no guarentees on + * completion. Caller need not hold sb s_umount semaphore. + * + */ +void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages) +{ + struct wb_writeback_args args = { + .sync_mode = WB_SYNC_NONE, + .nr_pages = nr_pages, + .range_cyclic = 1, + }; - /* - * If the sync mode is WB_SYNC_ALL, block waiting for the work to - * complete. If not, we only need to wait for the work to be started, - * if we allocated it on-stack. We use the same mechanism, if the - * wait bit is set in the bdi_work struct, then threads will not - * clear pending until after they are done. - * - * Note that work == &work_stack if must_wait is true, so we don't - * need to do call_rcu() here ever, since the completion path will - * have done that for us. - */ - if (must_wait || work == &work_stack) { - bdi_wait_on_work_clear(work); - if (work != &work_stack) - call_rcu(&work->rcu_head, bdi_work_free); - } + bdi_alloc_queue_work(bdi, &args); } /* @@ -671,17 +693,16 @@ static inline bool over_bground_thresh(void) * older_than_this takes precedence over nr_to_write. So we'll only write back * all dirty pages if they are all attached to "old" mappings. */ -static long wb_writeback(struct bdi_writeback *wb, long nr_pages, - struct super_block *sb, - enum writeback_sync_modes sync_mode, int for_kupdate) +static long wb_writeback(struct bdi_writeback *wb, + struct wb_writeback_args *args) { struct writeback_control wbc = { .bdi = wb->bdi, - .sb = sb, - .sync_mode = sync_mode, + .sb = args->sb, + .sync_mode = args->sync_mode, .older_than_this = NULL, - .for_kupdate = for_kupdate, - .range_cyclic = 1, + .for_kupdate = args->for_kupdate, + .range_cyclic = args->range_cyclic, }; unsigned long oldest_jif; long wrote = 0; @@ -691,13 +712,18 @@ static long wb_writeback(struct bdi_writeback *wb, long nr_pages, oldest_jif = jiffies - msecs_to_jiffies(dirty_expire_interval * 10); } + if (!wbc.range_cyclic) { + wbc.range_start = 0; + wbc.range_end = LLONG_MAX; + } for (;;) { /* * Don't flush anything for non-integrity writeback where * no nr_pages was given */ - if (!for_kupdate && nr_pages <= 0 && sync_mode == WB_SYNC_NONE) + if (!args->for_kupdate && args->nr_pages <= 0 && + args->sync_mode == WB_SYNC_NONE) break; /* @@ -705,7 +731,8 @@ static long wb_writeback(struct bdi_writeback *wb, long nr_pages, * periodic background writeout and we are below the * background dirty threshold, don't do anything */ - if (for_kupdate && nr_pages <= 0 && !over_bground_thresh()) + if (args->for_kupdate && args->nr_pages <= 0 && + !over_bground_thresh()) break; wbc.more_io = 0; @@ -713,7 +740,7 @@ static long wb_writeback(struct bdi_writeback *wb, long nr_pages, wbc.nr_to_write = MAX_WRITEBACK_PAGES; wbc.pages_skipped = 0; writeback_inodes_wb(wb, &wbc); - nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; + args->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write; /* @@ -731,7 +758,11 @@ static long wb_writeback(struct bdi_writeback *wb, long nr_pages, /* * Return the next bdi_work struct that hasn't been processed by this - * wb thread yet + * wb thread yet. ->seen is initially set for each thread that exists + * for this device, when a thread first notices a piece of work it + * clears its bit. Depending on writeback type, the thread will notify + * completion on either receiving the work (WB_SYNC_NONE) or after + * it is done (WB_SYNC_ALL). */ static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi, struct bdi_writeback *wb) @@ -741,8 +772,9 @@ static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi, rcu_read_lock(); list_for_each_entry_rcu(work, &bdi->work_list, list) { - if (!test_and_clear_bit(wb->nr, &work->seen)) + if (!test_bit(wb->nr, &work->seen)) continue; + clear_bit(wb->nr, &work->seen); ret = work; break; @@ -767,8 +799,16 @@ static long wb_check_old_data_flush(struct bdi_writeback *wb) global_page_state(NR_UNSTABLE_NFS) + (inodes_stat.nr_inodes - inodes_stat.nr_unused); - if (nr_pages) - return wb_writeback(wb, nr_pages, NULL, WB_SYNC_NONE, 1); + if (nr_pages) { + struct wb_writeback_args args = { + .nr_pages = nr_pages, + .sync_mode = WB_SYNC_NONE, + .for_kupdate = 1, + .range_cyclic = 1, + }; + + return wb_writeback(wb, &args); + } return 0; } @@ -780,35 +820,31 @@ long wb_do_writeback(struct bdi_writeback *wb, int force_wait) { struct backing_dev_info *bdi = wb->bdi; struct bdi_work *work; - long nr_pages, wrote = 0; + long wrote = 0; while ((work = get_next_work_item(bdi, wb)) != NULL) { - enum writeback_sync_modes sync_mode; - - nr_pages = work->nr_pages; + struct wb_writeback_args args = work->args; /* * Override sync mode, in case we must wait for completion */ if (force_wait) - work->sync_mode = sync_mode = WB_SYNC_ALL; - else - sync_mode = work->sync_mode; + work->args.sync_mode = args.sync_mode = WB_SYNC_ALL; /* * If this isn't a data integrity operation, just notify * that we have seen this work and we are now starting it. */ - if (sync_mode == WB_SYNC_NONE) + if (args.sync_mode == WB_SYNC_NONE) wb_clear_pending(wb, work); - wrote += wb_writeback(wb, nr_pages, work->sb, sync_mode, 0); + wrote += wb_writeback(wb, &args); /* * This is a data integrity writeback, so only do the * notification when we have completed the work. */ - if (sync_mode == WB_SYNC_ALL) + if (args.sync_mode == WB_SYNC_ALL) wb_clear_pending(wb, work); } @@ -849,8 +885,7 @@ int bdi_writeback_task(struct bdi_writeback *wb) } wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10); - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(wait_jiffies); + schedule_timeout_interruptible(wait_jiffies); try_to_freeze(); } @@ -858,67 +893,28 @@ int bdi_writeback_task(struct bdi_writeback *wb) } /* - * Schedule writeback for all backing devices. Expensive! If this is a data - * integrity operation, writeback will be complete when this returns. If - * we are simply called for WB_SYNC_NONE, then writeback will merely be - * scheduled to run. + * Schedule writeback for all backing devices. This does WB_SYNC_NONE + * writeback, for integrity writeback see bdi_sync_writeback(). */ -static void bdi_writeback_all(struct writeback_control *wbc) +static void bdi_writeback_all(struct super_block *sb, long nr_pages) { - const bool must_wait = wbc->sync_mode == WB_SYNC_ALL; + struct wb_writeback_args args = { + .sb = sb, + .nr_pages = nr_pages, + .sync_mode = WB_SYNC_NONE, + }; struct backing_dev_info *bdi; - struct bdi_work *work; - LIST_HEAD(list); - -restart: - spin_lock(&bdi_lock); - list_for_each_entry(bdi, &bdi_list, bdi_list) { - struct bdi_work *work; + rcu_read_lock(); + list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { if (!bdi_has_dirty_io(bdi)) continue; - /* - * If work allocation fails, do the writes inline. We drop - * the lock and restart the list writeout. This should be OK, - * since this happens rarely and because the writeout should - * eventually make more free memory available. - */ - work = bdi_alloc_work(wbc); - if (!work) { - struct writeback_control __wbc; - - /* - * Not a data integrity writeout, just continue - */ - if (!must_wait) - continue; - - spin_unlock(&bdi_lock); - __wbc = *wbc; - __wbc.bdi = bdi; - writeback_inodes_wbc(&__wbc); - goto restart; - } - if (must_wait) - list_add_tail(&work->wait_list, &list); - - bdi_queue_work(bdi, work); + bdi_alloc_queue_work(bdi, &args); } - spin_unlock(&bdi_lock); - - /* - * If this is for WB_SYNC_ALL, wait for pending work to complete - * before returning. - */ - while (!list_empty(&list)) { - work = list_entry(list.next, struct bdi_work, wait_list); - list_del(&work->wait_list); - bdi_wait_on_work_clear(work); - call_rcu(&work->rcu_head, bdi_work_free); - } + rcu_read_unlock(); } /* @@ -927,17 +923,10 @@ restart: */ void wakeup_flusher_threads(long nr_pages) { - struct writeback_control wbc = { - .sync_mode = WB_SYNC_NONE, - .older_than_this = NULL, - .range_cyclic = 1, - }; - if (nr_pages == 0) nr_pages = global_page_state(NR_FILE_DIRTY) + global_page_state(NR_UNSTABLE_NFS); - wbc.nr_to_write = nr_pages; - bdi_writeback_all(&wbc); + bdi_writeback_all(NULL, nr_pages); } static noinline void block_dump___mark_inode_dirty(struct inode *inode) @@ -1084,7 +1073,7 @@ EXPORT_SYMBOL(__mark_inode_dirty); * on the writer throttling path, and we get decent balancing between many * throttled threads: we don't want them all piling up on inode_sync_wait. */ -static void wait_sb_inodes(struct writeback_control *wbc) +static void wait_sb_inodes(struct super_block *sb) { struct inode *inode, *old_inode = NULL; @@ -1092,7 +1081,7 @@ static void wait_sb_inodes(struct writeback_control *wbc) * We need to be protected against the filesystem going from * r/o to r/w or vice versa. */ - WARN_ON(!rwsem_is_locked(&wbc->sb->s_umount)); + WARN_ON(!rwsem_is_locked(&sb->s_umount)); spin_lock(&inode_lock); @@ -1103,7 +1092,7 @@ static void wait_sb_inodes(struct writeback_control *wbc) * In which case, the inode may not be on the dirty list, but * we still have to wait for that writeout. */ - list_for_each_entry(inode, &wbc->sb->s_inodes, i_sb_list) { + list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { struct address_space *mapping; if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW)) @@ -1143,14 +1132,8 @@ static void wait_sb_inodes(struct writeback_control *wbc) * for IO completion of submitted IO. The number of pages submitted is * returned. */ -long writeback_inodes_sb(struct super_block *sb) +void writeback_inodes_sb(struct super_block *sb) { - struct writeback_control wbc = { - .sb = sb, - .sync_mode = WB_SYNC_NONE, - .range_start = 0, - .range_end = LLONG_MAX, - }; unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY); unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS); long nr_to_write; @@ -1158,9 +1141,7 @@ long writeback_inodes_sb(struct super_block *sb) nr_to_write = nr_dirty + nr_unstable + (inodes_stat.nr_inodes - inodes_stat.nr_unused); - wbc.nr_to_write = nr_to_write; - bdi_writeback_all(&wbc); - return nr_to_write - wbc.nr_to_write; + bdi_writeback_all(sb, nr_to_write); } EXPORT_SYMBOL(writeback_inodes_sb); @@ -1171,20 +1152,10 @@ EXPORT_SYMBOL(writeback_inodes_sb); * This function writes and waits on any dirty inode belonging to this * super_block. The number of pages synced is returned. */ -long sync_inodes_sb(struct super_block *sb) +void sync_inodes_sb(struct super_block *sb) { - struct writeback_control wbc = { - .sb = sb, - .sync_mode = WB_SYNC_ALL, - .range_start = 0, - .range_end = LLONG_MAX, - }; - long nr_to_write = LONG_MAX; /* doesn't actually matter */ - - wbc.nr_to_write = nr_to_write; - bdi_writeback_all(&wbc); - wait_sb_inodes(&wbc); - return nr_to_write - wbc.nr_to_write; + bdi_sync_writeback(sb->s_bdi, sb); + wait_sb_inodes(sb); } EXPORT_SYMBOL(sync_inodes_sb); diff --git a/fs/fuse/control.c b/fs/fuse/control.c index 99c99df..3773fd6 100644 --- a/fs/fuse/control.c +++ b/fs/fuse/control.c @@ -61,6 +61,121 @@ static ssize_t fuse_conn_waiting_read(struct file *file, char __user *buf, return simple_read_from_buffer(buf, len, ppos, tmp, size); } +static ssize_t fuse_conn_limit_read(struct file *file, char __user *buf, + size_t len, loff_t *ppos, unsigned val) +{ + char tmp[32]; + size_t size = sprintf(tmp, "%u\n", val); + + return simple_read_from_buffer(buf, len, ppos, tmp, size); +} + +static ssize_t fuse_conn_limit_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos, unsigned *val, + unsigned global_limit) +{ + unsigned long t; + char tmp[32]; + unsigned limit = (1 << 16) - 1; + int err; + + if (*ppos || count >= sizeof(tmp) - 1) + return -EINVAL; + + if (copy_from_user(tmp, buf, count)) + return -EINVAL; + + tmp[count] = '\0'; + + err = strict_strtoul(tmp, 0, &t); + if (err) + return err; + + if (!capable(CAP_SYS_ADMIN)) + limit = min(limit, global_limit); + + if (t > limit) + return -EINVAL; + + *val = t; + + return count; +} + +static ssize_t fuse_conn_max_background_read(struct file *file, + char __user *buf, size_t len, + loff_t *ppos) +{ + struct fuse_conn *fc; + unsigned val; + + fc = fuse_ctl_file_conn_get(file); + if (!fc) + return 0; + + val = fc->max_background; + fuse_conn_put(fc); + + return fuse_conn_limit_read(file, buf, len, ppos, val); +} + +static ssize_t fuse_conn_max_background_write(struct file *file, + const char __user *buf, + size_t count, loff_t *ppos) +{ + unsigned val; + ssize_t ret; + + ret = fuse_conn_limit_write(file, buf, count, ppos, &val, + max_user_bgreq); + if (ret > 0) { + struct fuse_conn *fc = fuse_ctl_file_conn_get(file); + if (fc) { + fc->max_background = val; + fuse_conn_put(fc); + } + } + + return ret; +} + +static ssize_t fuse_conn_congestion_threshold_read(struct file *file, + char __user *buf, size_t len, + loff_t *ppos) +{ + struct fuse_conn *fc; + unsigned val; + + fc = fuse_ctl_file_conn_get(file); + if (!fc) + return 0; + + val = fc->congestion_threshold; + fuse_conn_put(fc); + + return fuse_conn_limit_read(file, buf, len, ppos, val); +} + +static ssize_t fuse_conn_congestion_threshold_write(struct file *file, + const char __user *buf, + size_t count, loff_t *ppos) +{ + unsigned val; + ssize_t ret; + + ret = fuse_conn_limit_write(file, buf, count, ppos, &val, + max_user_congthresh); + if (ret > 0) { + struct fuse_conn *fc = fuse_ctl_file_conn_get(file); + if (fc) { + fc->congestion_threshold = val; + fuse_conn_put(fc); + } + } + + return ret; +} + static const struct file_operations fuse_ctl_abort_ops = { .open = nonseekable_open, .write = fuse_conn_abort_write, @@ -71,6 +186,18 @@ static const struct file_operations fuse_ctl_waiting_ops = { .read = fuse_conn_waiting_read, }; +static const struct file_operations fuse_conn_max_background_ops = { + .open = nonseekable_open, + .read = fuse_conn_max_background_read, + .write = fuse_conn_max_background_write, +}; + +static const struct file_operations fuse_conn_congestion_threshold_ops = { + .open = nonseekable_open, + .read = fuse_conn_congestion_threshold_read, + .write = fuse_conn_congestion_threshold_write, +}; + static struct dentry *fuse_ctl_add_dentry(struct dentry *parent, struct fuse_conn *fc, const char *name, @@ -127,9 +254,14 @@ int fuse_ctl_add_conn(struct fuse_conn *fc) goto err; if (!fuse_ctl_add_dentry(parent, fc, "waiting", S_IFREG | 0400, 1, - NULL, &fuse_ctl_waiting_ops) || + NULL, &fuse_ctl_waiting_ops) || !fuse_ctl_add_dentry(parent, fc, "abort", S_IFREG | 0200, 1, - NULL, &fuse_ctl_abort_ops)) + NULL, &fuse_ctl_abort_ops) || + !fuse_ctl_add_dentry(parent, fc, "max_background", S_IFREG | 0600, + 1, NULL, &fuse_conn_max_background_ops) || + !fuse_ctl_add_dentry(parent, fc, "congestion_threshold", + S_IFREG | 0600, 1, NULL, + &fuse_conn_congestion_threshold_ops)) goto err; return 0; @@ -156,7 +288,7 @@ void fuse_ctl_remove_conn(struct fuse_conn *fc) d_drop(dentry); dput(dentry); } - fuse_control_sb->s_root->d_inode->i_nlink--; + drop_nlink(fuse_control_sb->s_root->d_inode); } static int fuse_ctl_fill_super(struct super_block *sb, void *data, int silent) diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 6484eb7..51d9e33 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -250,7 +250,7 @@ static void queue_request(struct fuse_conn *fc, struct fuse_req *req) static void flush_bg_queue(struct fuse_conn *fc) { - while (fc->active_background < FUSE_MAX_BACKGROUND && + while (fc->active_background < fc->max_background && !list_empty(&fc->bg_queue)) { struct fuse_req *req; @@ -280,11 +280,11 @@ __releases(&fc->lock) list_del(&req->intr_entry); req->state = FUSE_REQ_FINISHED; if (req->background) { - if (fc->num_background == FUSE_MAX_BACKGROUND) { + if (fc->num_background == fc->max_background) { fc->blocked = 0; wake_up_all(&fc->blocked_waitq); } - if (fc->num_background == FUSE_CONGESTION_THRESHOLD && + if (fc->num_background == fc->congestion_threshold && fc->connected && fc->bdi_initialized) { clear_bdi_congested(&fc->bdi, BLK_RW_SYNC); clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC); @@ -410,9 +410,9 @@ static void fuse_request_send_nowait_locked(struct fuse_conn *fc, { req->background = 1; fc->num_background++; - if (fc->num_background == FUSE_MAX_BACKGROUND) + if (fc->num_background == fc->max_background) fc->blocked = 1; - if (fc->num_background == FUSE_CONGESTION_THRESHOLD && + if (fc->num_background == fc->congestion_threshold && fc->bdi_initialized) { set_bdi_congested(&fc->bdi, BLK_RW_SYNC); set_bdi_congested(&fc->bdi, BLK_RW_ASYNC); diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 52b641f..fc9c79f 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -25,12 +25,6 @@ /** Max number of pages that can be used in a single read request */ #define FUSE_MAX_PAGES_PER_REQ 32 -/** Maximum number of outstanding background requests */ -#define FUSE_MAX_BACKGROUND 12 - -/** Congestion starts at 75% of maximum */ -#define FUSE_CONGESTION_THRESHOLD (FUSE_MAX_BACKGROUND * 75 / 100) - /** Bias for fi->writectr, meaning new writepages must not be sent */ #define FUSE_NOWRITE INT_MIN @@ -38,7 +32,7 @@ #define FUSE_NAME_MAX 1024 /** Number of dentries for each connection in the control filesystem */ -#define FUSE_CTL_NUM_DENTRIES 3 +#define FUSE_CTL_NUM_DENTRIES 5 /** If the FUSE_DEFAULT_PERMISSIONS flag is given, the filesystem module will check permissions based on the file mode. Otherwise no @@ -55,6 +49,10 @@ extern struct list_head fuse_conn_list; /** Global mutex protecting fuse_conn_list and the control filesystem */ extern struct mutex fuse_mutex; +/** Module parameters */ +extern unsigned max_user_bgreq; +extern unsigned max_user_congthresh; + /** FUSE inode */ struct fuse_inode { /** Inode data */ @@ -349,6 +347,12 @@ struct fuse_conn { /** rbtree of fuse_files waiting for poll events indexed by ph */ struct rb_root polled_files; + /** Maximum number of outstanding background requests */ + unsigned max_background; + + /** Number of background requests at which congestion starts */ + unsigned congestion_threshold; + /** Number of requests currently in the background */ unsigned num_background; diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 4567db6..6da947d 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -14,6 +14,7 @@ #include <linux/seq_file.h> #include <linux/init.h> #include <linux/module.h> +#include <linux/moduleparam.h> #include <linux/parser.h> #include <linux/statfs.h> #include <linux/random.h> @@ -28,10 +29,34 @@ static struct kmem_cache *fuse_inode_cachep; struct list_head fuse_conn_list; DEFINE_MUTEX(fuse_mutex); +static int set_global_limit(const char *val, struct kernel_param *kp); + +unsigned max_user_bgreq; +module_param_call(max_user_bgreq, set_global_limit, param_get_uint, + &max_user_bgreq, 0644); +__MODULE_PARM_TYPE(max_user_bgreq, "uint"); +MODULE_PARM_DESC(max_user_bgreq, + "Global limit for the maximum number of backgrounded requests an " + "unprivileged user can set"); + +unsigned max_user_congthresh; +module_param_call(max_user_congthresh, set_global_limit, param_get_uint, + &max_user_congthresh, 0644); +__MODULE_PARM_TYPE(max_user_congthresh, "uint"); +MODULE_PARM_DESC(max_user_congthresh, + "Global limit for the maximum congestion threshold an " + "unprivileged user can set"); + #define FUSE_SUPER_MAGIC 0x65735546 #define FUSE_DEFAULT_BLKSIZE 512 +/** Maximum number of outstanding background requests */ +#define FUSE_DEFAULT_MAX_BACKGROUND 12 + +/** Congestion starts at 75% of maximum */ +#define FUSE_DEFAULT_CONGESTION_THRESHOLD (FUSE_DEFAULT_MAX_BACKGROUND * 3 / 4) + struct fuse_mount_data { int fd; unsigned rootmode; @@ -517,6 +542,8 @@ void fuse_conn_init(struct fuse_conn *fc) INIT_LIST_HEAD(&fc->bg_queue); INIT_LIST_HEAD(&fc->entry); atomic_set(&fc->num_waiting, 0); + fc->max_background = FUSE_DEFAULT_MAX_BACKGROUND; + fc->congestion_threshold = FUSE_DEFAULT_CONGESTION_THRESHOLD; fc->khctr = 0; fc->polled_files = RB_ROOT; fc->reqctr = 0; @@ -727,6 +754,54 @@ static const struct super_operations fuse_super_operations = { .show_options = fuse_show_options, }; +static void sanitize_global_limit(unsigned *limit) +{ + if (*limit == 0) + *limit = ((num_physpages << PAGE_SHIFT) >> 13) / + sizeof(struct fuse_req); + + if (*limit >= 1 << 16) + *limit = (1 << 16) - 1; +} + +static int set_global_limit(const char *val, struct kernel_param *kp) +{ + int rv; + + rv = param_set_uint(val, kp); + if (rv) + return rv; + + sanitize_global_limit((unsigned *)kp->arg); + + return 0; +} + +static void process_init_limits(struct fuse_conn *fc, struct fuse_init_out *arg) +{ + int cap_sys_admin = capable(CAP_SYS_ADMIN); + + if (arg->minor < 13) + return; + + sanitize_global_limit(&max_user_bgreq); + sanitize_global_limit(&max_user_congthresh); + + if (arg->max_background) { + fc->max_background = arg->max_background; + + if (!cap_sys_admin && fc->max_background > max_user_bgreq) + fc->max_background = max_user_bgreq; + } + if (arg->congestion_threshold) { + fc->congestion_threshold = arg->congestion_threshold; + + if (!cap_sys_admin && + fc->congestion_threshold > max_user_congthresh) + fc->congestion_threshold = max_user_congthresh; + } +} + static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) { struct fuse_init_out *arg = &req->misc.init_out; @@ -736,6 +811,8 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) else { unsigned long ra_pages; + process_init_limits(fc, arg); + if (arg->minor >= 6) { ra_pages = arg->max_readahead / PAGE_CACHE_SIZE; if (arg->flags & FUSE_ASYNC_READ) @@ -894,6 +971,8 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) if (err) goto err_put_conn; + sb->s_bdi = &fc->bdi; + /* Handle umasking inside the fuse code */ if (sb->s_flags & MS_POSIXACL) fc->dont_mask = 1; @@ -1148,6 +1227,9 @@ static int __init fuse_init(void) if (res) goto err_sysfs_cleanup; + sanitize_global_limit(&max_user_bgreq); + sanitize_global_limit(&max_user_congthresh); + return 0; err_sysfs_cleanup: diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c index c3ac180..247436c 100644 --- a/fs/gfs2/ops_inode.c +++ b/fs/gfs2/ops_inode.c @@ -12,7 +12,6 @@ #include <linux/completion.h> #include <linux/buffer_head.h> #include <linux/namei.h> -#include <linux/utsname.h> #include <linux/mm.h> #include <linux/xattr.h> #include <linux/posix_acl.h> diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 28c590b..8f1cfb0 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -179,7 +179,7 @@ static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state) * always aligned to a 64 bit boundary. * * The size of the buffer is in bytes, but is it assumed that it is - * always ok to to read a complete multiple of 64 bits at the end + * always ok to read a complete multiple of 64 bits at the end * of the block in case the end is no aligned to a natural boundary. * * Return: the block number (bitmap buffer scope) that was found diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index a93b885..1333354 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -31,12 +31,10 @@ #include <linux/statfs.h> #include <linux/security.h> #include <linux/ima.h> +#include <linux/magic.h> #include <asm/uaccess.h> -/* some random number */ -#define HUGETLBFS_MAGIC 0x958458f6 - static const struct super_operations hugetlbfs_ops; static const struct address_space_operations hugetlbfs_aops; const struct file_operations hugetlbfs_file_operations; @@ -507,6 +505,13 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, uid_t uid, inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; INIT_LIST_HEAD(&inode->i_mapping->private_list); info = HUGETLBFS_I(inode); + /* + * The policy is initialized here even if we are creating a + * private inode because initialization simply creates an + * an empty rb tree and calls spin_lock_init(), later when we + * call mpol_free_shared_policy() it will just return because + * the rb tree will still be empty. + */ mpol_shared_policy_init(&info->policy, NULL); switch (mode & S_IFMT) { default: @@ -937,7 +942,7 @@ static int can_do_hugetlb_shm(void) } struct file *hugetlb_file_setup(const char *name, size_t size, int acctflag, - struct user_struct **user) + struct user_struct **user, int creat_flags) { int error = -ENOMEM; struct file *file; @@ -949,7 +954,7 @@ struct file *hugetlb_file_setup(const char *name, size_t size, int acctflag, if (!hugetlbfs_vfsmount) return ERR_PTR(-ENOENT); - if (!can_do_hugetlb_shm()) { + if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { *user = current_user(); if (user_shm_lock(size, *user)) { WARN_ONCE(1, @@ -14,6 +14,7 @@ #include <linux/module.h> #include <linux/backing-dev.h> #include <linux/wait.h> +#include <linux/rwsem.h> #include <linux/hash.h> #include <linux/swap.h> #include <linux/security.h> @@ -87,14 +88,18 @@ static struct hlist_head *inode_hashtable __read_mostly; DEFINE_SPINLOCK(inode_lock); /* - * iprune_mutex provides exclusion between the kswapd or try_to_free_pages + * iprune_sem provides exclusion between the kswapd or try_to_free_pages * icache shrinking path, and the umount path. Without this exclusion, * by the time prune_icache calls iput for the inode whose pages it has * been invalidating, or by the time it calls clear_inode & destroy_inode * from its final dispose_list, the struct super_block they refer to * (for inode->i_sb->s_op) may already have been freed and reused. + * + * We make this an rwsem because the fastpath is icache shrinking. In + * some cases a filesystem may be doing a significant amount of work in + * its inode reclaim code, so this should improve parallelism. */ -static DEFINE_MUTEX(iprune_mutex); +static DECLARE_RWSEM(iprune_sem); /* * Statistics gathering.. @@ -123,7 +128,7 @@ static void wake_up_inode(struct inode *inode) int inode_init_always(struct super_block *sb, struct inode *inode) { static const struct address_space_operations empty_aops; - static struct inode_operations empty_iops; + static const struct inode_operations empty_iops; static const struct file_operations empty_fops; struct address_space *const mapping = &inode->i_data; @@ -182,9 +187,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode) if (sb->s_bdev) { struct backing_dev_info *bdi; - bdi = sb->s_bdev->bd_inode_backing_dev_info; - if (!bdi) - bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info; + bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info; mapping->backing_dev_info = bdi; } inode->i_private = NULL; @@ -383,7 +386,7 @@ static int invalidate_list(struct list_head *head, struct list_head *dispose) /* * We can reschedule here without worrying about the list's * consistency because the per-sb list of inodes must not - * change during umount anymore, and because iprune_mutex keeps + * change during umount anymore, and because iprune_sem keeps * shrink_icache_memory() away. */ cond_resched_lock(&inode_lock); @@ -422,7 +425,7 @@ int invalidate_inodes(struct super_block *sb) int busy; LIST_HEAD(throw_away); - mutex_lock(&iprune_mutex); + down_write(&iprune_sem); spin_lock(&inode_lock); inotify_unmount_inodes(&sb->s_inodes); fsnotify_unmount_inodes(&sb->s_inodes); @@ -430,7 +433,7 @@ int invalidate_inodes(struct super_block *sb) spin_unlock(&inode_lock); dispose_list(&throw_away); - mutex_unlock(&iprune_mutex); + up_write(&iprune_sem); return busy; } @@ -469,7 +472,7 @@ static void prune_icache(int nr_to_scan) int nr_scanned; unsigned long reap = 0; - mutex_lock(&iprune_mutex); + down_read(&iprune_sem); spin_lock(&inode_lock); for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) { struct inode *inode; @@ -511,7 +514,7 @@ static void prune_icache(int nr_to_scan) spin_unlock(&inode_lock); dispose_list(&freeable); - mutex_unlock(&iprune_mutex); + up_read(&iprune_sem); } /* @@ -697,13 +700,15 @@ void unlock_new_inode(struct inode *inode) } #endif /* - * This is special! We do not need the spinlock - * when clearing I_LOCK, because we're guaranteed - * that nobody else tries to do anything about the - * state of the inode when it is locked, as we - * just created it (so there can be no old holders - * that haven't tested I_LOCK). + * This is special! We do not need the spinlock when clearing I_LOCK, + * because we're guaranteed that nobody else tries to do anything about + * the state of the inode when it is locked, as we just created it (so + * there can be no old holders that haven't tested I_LOCK). + * However we must emit the memory barrier so that other CPUs reliably + * see the clearing of I_LOCK after the other inode initialisation has + * completed. */ + smp_mb(); WARN_ON((inode->i_state & (I_LOCK|I_NEW)) != (I_LOCK|I_NEW)); inode->i_state &= ~(I_LOCK|I_NEW); wake_up_inode(inode); diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c index 61f32f3..b0435dd 100644 --- a/fs/jbd/checkpoint.c +++ b/fs/jbd/checkpoint.c @@ -456,7 +456,7 @@ int cleanup_journal_tail(journal_t *journal) { transaction_t * transaction; tid_t first_tid; - unsigned long blocknr, freed; + unsigned int blocknr, freed; if (is_journal_aborted(journal)) return 1; @@ -502,8 +502,8 @@ int cleanup_journal_tail(journal_t *journal) freed = freed + journal->j_last - journal->j_first; jbd_debug(1, - "Cleaning journal tail from %d to %d (offset %lu), " - "freeing %lu\n", + "Cleaning journal tail from %d to %d (offset %u), " + "freeing %u\n", journal->j_tail_sequence, first_tid, blocknr, freed); journal->j_free += freed; diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c index 618e21c..4bd8825 100644 --- a/fs/jbd/commit.c +++ b/fs/jbd/commit.c @@ -308,7 +308,7 @@ void journal_commit_transaction(journal_t *journal) int bufs; int flags; int err; - unsigned long blocknr; + unsigned int blocknr; ktime_t start_time; u64 commit_time; char *tagp = NULL; diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index f96f850..bd3c073 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c @@ -276,7 +276,7 @@ static void journal_kill_thread(journal_t *journal) int journal_write_metadata_buffer(transaction_t *transaction, struct journal_head *jh_in, struct journal_head **jh_out, - unsigned long blocknr) + unsigned int blocknr) { int need_copy_out = 0; int done_copy_out = 0; @@ -567,9 +567,9 @@ int log_wait_commit(journal_t *journal, tid_t tid) * Log buffer allocation routines: */ -int journal_next_log_block(journal_t *journal, unsigned long *retp) +int journal_next_log_block(journal_t *journal, unsigned int *retp) { - unsigned long blocknr; + unsigned int blocknr; spin_lock(&journal->j_state_lock); J_ASSERT(journal->j_free > 1); @@ -590,11 +590,11 @@ int journal_next_log_block(journal_t *journal, unsigned long *retp) * this is a no-op. If needed, we can use j_blk_offset - everything is * ready. */ -int journal_bmap(journal_t *journal, unsigned long blocknr, - unsigned long *retp) +int journal_bmap(journal_t *journal, unsigned int blocknr, + unsigned int *retp) { int err = 0; - unsigned long ret; + unsigned int ret; if (journal->j_inode) { ret = bmap(journal->j_inode, blocknr); @@ -604,7 +604,7 @@ int journal_bmap(journal_t *journal, unsigned long blocknr, char b[BDEVNAME_SIZE]; printk(KERN_ALERT "%s: journal block not found " - "at offset %lu on %s\n", + "at offset %u on %s\n", __func__, blocknr, bdevname(journal->j_dev, b)); @@ -630,7 +630,7 @@ int journal_bmap(journal_t *journal, unsigned long blocknr, struct journal_head *journal_get_descriptor_buffer(journal_t *journal) { struct buffer_head *bh; - unsigned long blocknr; + unsigned int blocknr; int err; err = journal_next_log_block(journal, &blocknr); @@ -774,7 +774,7 @@ journal_t * journal_init_inode (struct inode *inode) journal_t *journal = journal_init_common(); int err; int n; - unsigned long blocknr; + unsigned int blocknr; if (!journal) return NULL; @@ -846,12 +846,12 @@ static void journal_fail_superblock (journal_t *journal) static int journal_reset(journal_t *journal) { journal_superblock_t *sb = journal->j_superblock; - unsigned long first, last; + unsigned int first, last; first = be32_to_cpu(sb->s_first); last = be32_to_cpu(sb->s_maxlen); if (first + JFS_MIN_JOURNAL_BLOCKS > last + 1) { - printk(KERN_ERR "JBD: Journal too short (blocks %lu-%lu).\n", + printk(KERN_ERR "JBD: Journal too short (blocks %u-%u).\n", first, last); journal_fail_superblock(journal); return -EINVAL; @@ -885,7 +885,7 @@ static int journal_reset(journal_t *journal) **/ int journal_create(journal_t *journal) { - unsigned long blocknr; + unsigned int blocknr; struct buffer_head *bh; journal_superblock_t *sb; int i, err; @@ -969,14 +969,14 @@ void journal_update_superblock(journal_t *journal, int wait) if (sb->s_start == 0 && journal->j_tail_sequence == journal->j_transaction_sequence) { jbd_debug(1,"JBD: Skipping superblock update on recovered sb " - "(start %ld, seq %d, errno %d)\n", + "(start %u, seq %d, errno %d)\n", journal->j_tail, journal->j_tail_sequence, journal->j_errno); goto out; } spin_lock(&journal->j_state_lock); - jbd_debug(1,"JBD: updating superblock (start %ld, seq %d, errno %d)\n", + jbd_debug(1,"JBD: updating superblock (start %u, seq %d, errno %d)\n", journal->j_tail, journal->j_tail_sequence, journal->j_errno); sb->s_sequence = cpu_to_be32(journal->j_tail_sequence); @@ -1371,7 +1371,7 @@ int journal_flush(journal_t *journal) { int err = 0; transaction_t *transaction = NULL; - unsigned long old_tail; + unsigned int old_tail; spin_lock(&journal->j_state_lock); diff --git a/fs/jbd/recovery.c b/fs/jbd/recovery.c index db5e982..cb1a49a 100644 --- a/fs/jbd/recovery.c +++ b/fs/jbd/recovery.c @@ -70,7 +70,7 @@ static int do_readahead(journal_t *journal, unsigned int start) { int err; unsigned int max, nbufs, next; - unsigned long blocknr; + unsigned int blocknr; struct buffer_head *bh; struct buffer_head * bufs[MAXBUF]; @@ -132,7 +132,7 @@ static int jread(struct buffer_head **bhp, journal_t *journal, unsigned int offset) { int err; - unsigned long blocknr; + unsigned int blocknr; struct buffer_head *bh; *bhp = NULL; @@ -314,7 +314,7 @@ static int do_one_pass(journal_t *journal, struct recovery_info *info, enum passtype pass) { unsigned int first_commit_ID, next_commit_ID; - unsigned long next_log_block; + unsigned int next_log_block; int err, success = 0; journal_superblock_t * sb; journal_header_t * tmp; @@ -367,14 +367,14 @@ static int do_one_pass(journal_t *journal, if (tid_geq(next_commit_ID, info->end_transaction)) break; - jbd_debug(2, "Scanning for sequence ID %u at %lu/%lu\n", + jbd_debug(2, "Scanning for sequence ID %u at %u/%u\n", next_commit_ID, next_log_block, journal->j_last); /* Skip over each chunk of the transaction looking * either the next descriptor block or the final commit * record. */ - jbd_debug(3, "JBD: checking block %ld\n", next_log_block); + jbd_debug(3, "JBD: checking block %u\n", next_log_block); err = jread(&bh, journal, next_log_block); if (err) goto failed; @@ -429,7 +429,7 @@ static int do_one_pass(journal_t *journal, tagp = &bh->b_data[sizeof(journal_header_t)]; while ((tagp - bh->b_data +sizeof(journal_block_tag_t)) <= journal->j_blocksize) { - unsigned long io_block; + unsigned int io_block; tag = (journal_block_tag_t *) tagp; flags = be32_to_cpu(tag->t_flags); @@ -443,10 +443,10 @@ static int do_one_pass(journal_t *journal, success = err; printk (KERN_ERR "JBD: IO error %d recovering " - "block %ld in log\n", + "block %u in log\n", err, io_block); } else { - unsigned long blocknr; + unsigned int blocknr; J_ASSERT(obh != NULL); blocknr = be32_to_cpu(tag->t_blocknr); @@ -581,7 +581,7 @@ static int scan_revoke_records(journal_t *journal, struct buffer_head *bh, max = be32_to_cpu(header->r_count); while (offset < max) { - unsigned long blocknr; + unsigned int blocknr; int err; blocknr = be32_to_cpu(* ((__be32 *) (bh->b_data+offset))); diff --git a/fs/jbd/revoke.c b/fs/jbd/revoke.c index da6cd9b..ad71732 100644 --- a/fs/jbd/revoke.c +++ b/fs/jbd/revoke.c @@ -101,7 +101,7 @@ struct jbd_revoke_record_s { struct list_head hash; tid_t sequence; /* Used for recovery only */ - unsigned long blocknr; + unsigned int blocknr; }; @@ -126,7 +126,7 @@ static void flush_descriptor(journal_t *, struct journal_head *, int, int); /* Utility functions to maintain the revoke table */ /* Borrowed from buffer.c: this is a tried and tested block hash function */ -static inline int hash(journal_t *journal, unsigned long block) +static inline int hash(journal_t *journal, unsigned int block) { struct jbd_revoke_table_s *table = journal->j_revoke; int hash_shift = table->hash_shift; @@ -136,7 +136,7 @@ static inline int hash(journal_t *journal, unsigned long block) (block << (hash_shift - 12))) & (table->hash_size - 1); } -static int insert_revoke_hash(journal_t *journal, unsigned long blocknr, +static int insert_revoke_hash(journal_t *journal, unsigned int blocknr, tid_t seq) { struct list_head *hash_list; @@ -166,7 +166,7 @@ oom: /* Find a revoke record in the journal's hash table. */ static struct jbd_revoke_record_s *find_revoke_record(journal_t *journal, - unsigned long blocknr) + unsigned int blocknr) { struct list_head *hash_list; struct jbd_revoke_record_s *record; @@ -332,7 +332,7 @@ void journal_destroy_revoke(journal_t *journal) * by one. */ -int journal_revoke(handle_t *handle, unsigned long blocknr, +int journal_revoke(handle_t *handle, unsigned int blocknr, struct buffer_head *bh_in) { struct buffer_head *bh = NULL; @@ -401,7 +401,7 @@ int journal_revoke(handle_t *handle, unsigned long blocknr, } } - jbd_debug(2, "insert revoke for block %lu, bh_in=%p\n", blocknr, bh_in); + jbd_debug(2, "insert revoke for block %u, bh_in=%p\n", blocknr, bh_in); err = insert_revoke_hash(journal, blocknr, handle->h_transaction->t_tid); BUFFER_TRACE(bh_in, "exit"); @@ -644,7 +644,7 @@ static void flush_descriptor(journal_t *journal, */ int journal_set_revoke(journal_t *journal, - unsigned long blocknr, + unsigned int blocknr, tid_t sequence) { struct jbd_revoke_record_s *record; @@ -668,7 +668,7 @@ int journal_set_revoke(journal_t *journal, */ int journal_test_revoke(journal_t *journal, - unsigned long blocknr, + unsigned int blocknr, tid_t sequence) { struct jbd_revoke_record_s *record; diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index c03ac11..006f9ad 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c @@ -56,7 +56,8 @@ get_transaction(journal_t *journal, transaction_t *transaction) spin_lock_init(&transaction->t_handle_lock); /* Set up the commit timer for the new transaction. */ - journal->j_commit_timer.expires = round_jiffies(transaction->t_expires); + journal->j_commit_timer.expires = + round_jiffies_up(transaction->t_expires); add_timer(&journal->j_commit_timer); J_ASSERT(journal->j_running_transaction == NULL); @@ -228,6 +229,8 @@ repeat_locked: __log_space_left(journal)); spin_unlock(&transaction->t_handle_lock); spin_unlock(&journal->j_state_lock); + + lock_map_acquire(&handle->h_lockdep_map); out: if (unlikely(new_transaction)) /* It's usually NULL */ kfree(new_transaction); @@ -292,9 +295,6 @@ handle_t *journal_start(journal_t *journal, int nblocks) handle = ERR_PTR(err); goto out; } - - lock_map_acquire(&handle->h_lockdep_map); - out: return handle; } @@ -416,6 +416,7 @@ int journal_restart(handle_t *handle, int nblocks) __log_start_commit(journal, transaction->t_tid); spin_unlock(&journal->j_state_lock); + lock_map_release(&handle->h_lockdep_map); handle->h_buffer_credits = nblocks; ret = start_this_handle(journal, handle); return ret; diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 7b4088b..26d991d 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -25,6 +25,7 @@ #include <linux/writeback.h> #include <linux/backing-dev.h> #include <linux/bio.h> +#include <linux/blkdev.h> #include <trace/events/jbd2.h> /* @@ -133,8 +134,8 @@ static int journal_submit_commit_record(journal_t *journal, bh->b_end_io = journal_end_buffer_io_sync; if (journal->j_flags & JBD2_BARRIER && - !JBD2_HAS_INCOMPAT_FEATURE(journal, - JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) { + !JBD2_HAS_INCOMPAT_FEATURE(journal, + JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) { set_buffer_ordered(bh); barrier_done = 1; } @@ -220,7 +221,6 @@ static int journal_submit_inode_data_buffers(struct address_space *mapping) .nr_to_write = mapping->nrpages * 2, .range_start = 0, .range_end = i_size_read(mapping->host), - .for_writepages = 1, }; ret = generic_writepages(mapping, &wbc); @@ -707,11 +707,13 @@ start_journal_io: /* Done it all: now write the commit record asynchronously. */ if (JBD2_HAS_INCOMPAT_FEATURE(journal, - JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) { + JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) { err = journal_submit_commit_record(journal, commit_transaction, &cbh, crc32_sum); if (err) __jbd2_journal_abort_hard(journal); + if (journal->j_flags & JBD2_BARRIER) + blkdev_issue_flush(journal->j_dev, NULL); } /* @@ -834,7 +836,7 @@ wait_for_iobuf: jbd_debug(3, "JBD: commit phase 5\n"); if (!JBD2_HAS_INCOMPAT_FEATURE(journal, - JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) { + JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) { err = journal_submit_commit_record(journal, commit_transaction, &cbh, crc32_sum); if (err) diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index e378cb3..53b86e1 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -768,7 +768,7 @@ static void jbd2_seq_history_stop(struct seq_file *seq, void *v) { } -static struct seq_operations jbd2_seq_history_ops = { +static const struct seq_operations jbd2_seq_history_ops = { .start = jbd2_seq_history_start, .next = jbd2_seq_history_next, .stop = jbd2_seq_history_stop, @@ -872,7 +872,7 @@ static void jbd2_seq_info_stop(struct seq_file *seq, void *v) { } -static struct seq_operations jbd2_seq_info_ops = { +static const struct seq_operations jbd2_seq_info_ops = { .start = jbd2_seq_info_start, .next = jbd2_seq_info_next, .stop = jbd2_seq_info_stop, @@ -1187,6 +1187,12 @@ static int journal_reset(journal_t *journal) first = be32_to_cpu(sb->s_first); last = be32_to_cpu(sb->s_maxlen); + if (first + JBD2_MIN_JOURNAL_BLOCKS > last + 1) { + printk(KERN_ERR "JBD: Journal too short (blocks %llu-%llu).\n", + first, last); + journal_fail_superblock(journal); + return -EINVAL; + } journal->j_first = first; journal->j_last = last; diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 6213ac7..a051270 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -57,7 +57,7 @@ jbd2_get_transaction(journal_t *journal, transaction_t *transaction) INIT_LIST_HEAD(&transaction->t_private_list); /* Set up the commit timer for the new transaction. */ - journal->j_commit_timer.expires = round_jiffies(transaction->t_expires); + journal->j_commit_timer.expires = round_jiffies_up(transaction->t_expires); add_timer(&journal->j_commit_timer); J_ASSERT(journal->j_running_transaction == NULL); @@ -238,6 +238,8 @@ repeat_locked: __jbd2_log_space_left(journal)); spin_unlock(&transaction->t_handle_lock); spin_unlock(&journal->j_state_lock); + + lock_map_acquire(&handle->h_lockdep_map); out: if (unlikely(new_transaction)) /* It's usually NULL */ kfree(new_transaction); @@ -303,8 +305,6 @@ handle_t *jbd2_journal_start(journal_t *journal, int nblocks) handle = ERR_PTR(err); goto out; } - - lock_map_acquire(&handle->h_lockdep_map); out: return handle; } @@ -426,6 +426,7 @@ int jbd2_journal_restart(handle_t *handle, int nblocks) __jbd2_log_start_commit(journal, transaction->t_tid); spin_unlock(&journal->j_state_lock); + lock_map_release(&handle->h_lockdep_map); handle->h_buffer_credits = nblocks; ret = start_this_handle(journal, handle); return ret; diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c index e958010..3ff50da 100644 --- a/fs/jffs2/background.c +++ b/fs/jffs2/background.c @@ -15,6 +15,7 @@ #include <linux/completion.h> #include <linux/sched.h> #include <linux/freezer.h> +#include <linux/kthread.h> #include "nodelist.h" @@ -31,7 +32,7 @@ void jffs2_garbage_collect_trigger(struct jffs2_sb_info *c) /* This must only ever be called when no GC thread is currently running */ int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c) { - pid_t pid; + struct task_struct *tsk; int ret = 0; BUG_ON(c->gc_task); @@ -39,15 +40,16 @@ int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c) init_completion(&c->gc_thread_start); init_completion(&c->gc_thread_exit); - pid = kernel_thread(jffs2_garbage_collect_thread, c, CLONE_FS|CLONE_FILES); - if (pid < 0) { - printk(KERN_WARNING "fork failed for JFFS2 garbage collect thread: %d\n", -pid); + tsk = kthread_run(jffs2_garbage_collect_thread, c, "jffs2_gcd_mtd%d", c->mtd->index); + if (IS_ERR(tsk)) { + printk(KERN_WARNING "fork failed for JFFS2 garbage collect thread: %ld\n", -PTR_ERR(tsk)); complete(&c->gc_thread_exit); - ret = pid; + ret = PTR_ERR(tsk); } else { /* Wait for it... */ - D1(printk(KERN_DEBUG "JFFS2: Garbage collect thread is pid %d\n", pid)); + D1(printk(KERN_DEBUG "JFFS2: Garbage collect thread is pid %d\n", tsk->pid)); wait_for_completion(&c->gc_thread_start); + ret = tsk->pid; } return ret; @@ -71,7 +73,6 @@ static int jffs2_garbage_collect_thread(void *_c) { struct jffs2_sb_info *c = _c; - daemonize("jffs2_gcd_mtd%d", c->mtd->index); allow_signal(SIGKILL); allow_signal(SIGSTOP); allow_signal(SIGCONT); @@ -107,6 +108,11 @@ static int jffs2_garbage_collect_thread(void *_c) * the GC thread get there first. */ schedule_timeout_interruptible(msecs_to_jiffies(50)); + if (kthread_should_stop()) { + D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): kthread_stop() called.\n")); + goto die; + } + /* Put_super will send a SIGKILL and then wait on the sem. */ while (signal_pending(current) || freezing(current)) { diff --git a/fs/jffs2/malloc.c b/fs/jffs2/malloc.c index 9eff2bd..c082868 100644 --- a/fs/jffs2/malloc.c +++ b/fs/jffs2/malloc.c @@ -39,13 +39,13 @@ int __init jffs2_create_slab_caches(void) raw_dirent_slab = kmem_cache_create("jffs2_raw_dirent", sizeof(struct jffs2_raw_dirent), - 0, 0, NULL); + 0, SLAB_HWCACHE_ALIGN, NULL); if (!raw_dirent_slab) goto err; raw_inode_slab = kmem_cache_create("jffs2_raw_inode", sizeof(struct jffs2_raw_inode), - 0, 0, NULL); + 0, SLAB_HWCACHE_ALIGN, NULL); if (!raw_inode_slab) goto err; diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c index 0035c02..9a80e8e 100644 --- a/fs/jffs2/super.c +++ b/fs/jffs2/super.c @@ -123,7 +123,7 @@ static struct dentry *jffs2_get_parent(struct dentry *child) return d_obtain_alias(jffs2_iget(child->d_inode->i_sb, pino)); } -static struct export_operations jffs2_export_ops = { +static const struct export_operations jffs2_export_ops = { .get_parent = jffs2_get_parent, .fh_to_dentry = jffs2_fh_to_dentry, .fh_to_parent = jffs2_fh_to_parent, diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c index 1f3b0fc..fc9032d 100644 --- a/fs/lockd/clntlock.c +++ b/fs/lockd/clntlock.c @@ -166,7 +166,7 @@ __be32 nlmclnt_grant(const struct sockaddr *addr, const struct nlm_lock *lock) */ if (fl_blocked->fl_u.nfs_fl.owner->pid != lock->svid) continue; - if (!nlm_cmp_addr(nlm_addr(block->b_host), addr)) + if (!rpc_cmp_addr(nlm_addr(block->b_host), addr)) continue; if (nfs_compare_fh(NFS_FH(fl_blocked->fl_file->f_path.dentry->d_inode) ,fh) != 0) continue; diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c index 4336adb..c81249f 100644 --- a/fs/lockd/clntproc.c +++ b/fs/lockd/clntproc.c @@ -458,7 +458,7 @@ static void nlmclnt_locks_release_private(struct file_lock *fl) nlm_put_lockowner(fl->fl_u.nfs_fl.owner); } -static struct file_lock_operations nlmclnt_lock_ops = { +static const struct file_lock_operations nlmclnt_lock_ops = { .fl_copy_lock = nlmclnt_locks_copy_lock, .fl_release_private = nlmclnt_locks_release_private, }; diff --git a/fs/lockd/host.c b/fs/lockd/host.c index 7cb076a..4600c20 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c @@ -111,7 +111,7 @@ static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni) */ chain = &nlm_hosts[nlm_hash_address(ni->sap)]; hlist_for_each_entry(host, pos, chain, h_hash) { - if (!nlm_cmp_addr(nlm_addr(host), ni->sap)) + if (!rpc_cmp_addr(nlm_addr(host), ni->sap)) continue; /* See if we have an NSM handle for this client */ @@ -125,7 +125,7 @@ static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni) if (host->h_server != ni->server) continue; if (ni->server && - !nlm_cmp_addr(nlm_srcaddr(host), ni->src_sap)) + !rpc_cmp_addr(nlm_srcaddr(host), ni->src_sap)) continue; /* Move to head of hash chain. */ diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c index 30c9331..f956651 100644 --- a/fs/lockd/mon.c +++ b/fs/lockd/mon.c @@ -209,7 +209,7 @@ static struct nsm_handle *nsm_lookup_addr(const struct sockaddr *sap) struct nsm_handle *nsm; list_for_each_entry(nsm, &nsm_handles, sm_link) - if (nlm_cmp_addr(nsm_addr(nsm), sap)) + if (rpc_cmp_addr(nsm_addr(nsm), sap)) return nsm; return NULL; } diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c index e577a78..d100179 100644 --- a/fs/lockd/svclock.c +++ b/fs/lockd/svclock.c @@ -705,7 +705,7 @@ static int nlmsvc_same_owner(struct file_lock *fl1, struct file_lock *fl2) return fl1->fl_owner == fl2->fl_owner && fl1->fl_pid == fl2->fl_pid; } -struct lock_manager_operations nlmsvc_lock_operations = { +const struct lock_manager_operations nlmsvc_lock_operations = { .fl_compare_owner = nlmsvc_same_owner, .fl_notify = nlmsvc_notify_blocked, .fl_grant = nlmsvc_grant_deferred, diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c index 9e4d6aab..ad478da 100644 --- a/fs/lockd/svcsubs.c +++ b/fs/lockd/svcsubs.c @@ -417,7 +417,7 @@ EXPORT_SYMBOL_GPL(nlmsvc_unlock_all_by_sb); static int nlmsvc_match_ip(void *datap, struct nlm_host *host) { - return nlm_cmp_addr(nlm_srcaddr(host), datap); + return rpc_cmp_addr(nlm_srcaddr(host), datap); } /** diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c index 0336f2b..b583ab0 100644 --- a/fs/lockd/xdr.c +++ b/fs/lockd/xdr.c @@ -8,7 +8,6 @@ #include <linux/types.h> #include <linux/sched.h> -#include <linux/utsname.h> #include <linux/nfs.h> #include <linux/sunrpc/xdr.h> diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c index e1d5286..ad9dbbc 100644 --- a/fs/lockd/xdr4.c +++ b/fs/lockd/xdr4.c @@ -9,7 +9,6 @@ #include <linux/types.h> #include <linux/sched.h> -#include <linux/utsname.h> #include <linux/nfs.h> #include <linux/sunrpc/xdr.h> @@ -434,7 +434,7 @@ static int lease_mylease_callback(struct file_lock *fl, struct file_lock *try) return fl->fl_file == try->fl_file; } -static struct lock_manager_operations lease_manager_ops = { +static const struct lock_manager_operations lease_manager_ops = { .fl_break = lease_break_callback, .fl_release_private = lease_release_private_callback, .fl_mylease = lease_mylease_callback, diff --git a/fs/minix/dir.c b/fs/minix/dir.c index d407e7a..6198731 100644 --- a/fs/minix/dir.c +++ b/fs/minix/dir.c @@ -308,14 +308,18 @@ int minix_delete_entry(struct minix_dir_entry *de, struct page *page) struct inode *inode = (struct inode*)mapping->host; char *kaddr = page_address(page); loff_t pos = page_offset(page) + (char*)de - kaddr; - unsigned len = minix_sb(inode->i_sb)->s_dirsize; + struct minix_sb_info *sbi = minix_sb(inode->i_sb); + unsigned len = sbi->s_dirsize; int err; lock_page(page); err = __minix_write_begin(NULL, mapping, pos, len, AOP_FLAG_UNINTERRUPTIBLE, &page, NULL); if (err == 0) { - de->inode = 0; + if (sbi->s_version == MINIX_V3) + ((minix3_dirent *) de)->inode = 0; + else + de->inode = 0; err = dir_commit_chunk(page, pos, len); } else { unlock_page(page); @@ -440,7 +444,10 @@ void minix_set_link(struct minix_dir_entry *de, struct page *page, err = __minix_write_begin(NULL, mapping, pos, sbi->s_dirsize, AOP_FLAG_UNINTERRUPTIBLE, &page, NULL); if (err == 0) { - de->inode = inode->i_ino; + if (sbi->s_version == MINIX_V3) + ((minix3_dirent *) de)->inode = inode->i_ino; + else + de->inode = inode->i_ino; err = dir_commit_chunk(page, pos, sbi->s_dirsize); } else { unlock_page(page); @@ -470,7 +477,14 @@ ino_t minix_inode_by_name(struct dentry *dentry) ino_t res = 0; if (de) { - res = de->inode; + struct address_space *mapping = page->mapping; + struct inode *inode = mapping->host; + struct minix_sb_info *sbi = minix_sb(inode->i_sb); + + if (sbi->s_version == MINIX_V3) + res = ((minix3_dirent *) de)->inode; + else + res = de->inode; dir_put_page(page); } return res; diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c index 9c59072..b8b5b30 100644 --- a/fs/ncpfs/dir.c +++ b/fs/ncpfs/dir.c @@ -1241,7 +1241,7 @@ ncp_date_unix2dos(int unix_date, __le16 *time, __le16 *date) month = 2; } else { nl_day = (year & 3) || day <= 59 ? day : day - 1; - for (month = 0; month < 12; month++) + for (month = 1; month < 12; month++) if (day_n[month] > nl_day) break; } diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c index fa038df..53a7ed7 100644 --- a/fs/ncpfs/ioctl.c +++ b/fs/ncpfs/ioctl.c @@ -442,7 +442,7 @@ static int __ncp_ioctl(struct inode *inode, struct file *filp, if (dentry) { struct inode* s_inode = dentry->d_inode; - if (inode) { + if (s_inode) { NCP_FINFO(s_inode)->volNumber = vnum; NCP_FINFO(s_inode)->dirEntNum = de; NCP_FINFO(s_inode)->DosDirNum = dosde; diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index e5a2dac..76b0aa0 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c @@ -222,7 +222,7 @@ static unsigned decode_sessionid(struct xdr_stream *xdr, p = read_buf(xdr, len); if (unlikely(p == NULL)) - return htonl(NFS4ERR_RESOURCE);; + return htonl(NFS4ERR_RESOURCE); memcpy(sid->data, p, len); return 0; diff --git a/fs/nfs/client.c b/fs/nfs/client.c index e350bd6..63976c0 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -648,8 +648,6 @@ static int nfs_start_lockd(struct nfs_server *server) .hostname = clp->cl_hostname, .address = (struct sockaddr *)&clp->cl_addr, .addrlen = clp->cl_addrlen, - .protocol = server->flags & NFS_MOUNT_TCP ? - IPPROTO_TCP : IPPROTO_UDP, .nfs_version = clp->rpc_ops->version, .noresvport = server->flags & NFS_MOUNT_NORESVPORT ? 1 : 0, @@ -660,6 +658,14 @@ static int nfs_start_lockd(struct nfs_server *server) if (server->flags & NFS_MOUNT_NONLM) return 0; + switch (clp->cl_proto) { + default: + nlm_init.protocol = IPPROTO_TCP; + break; + case XPRT_TRANSPORT_UDP: + nlm_init.protocol = IPPROTO_UDP; + } + host = nlmclnt_init(&nlm_init); if (IS_ERR(host)) return PTR_ERR(host); @@ -787,7 +793,7 @@ static int nfs_init_server(struct nfs_server *server, dprintk("--> nfs_init_server()\n"); #ifdef CONFIG_NFS_V3 - if (data->flags & NFS_MOUNT_VER3) + if (data->version == 3) cl_init.rpc_ops = &nfs_v3_clientops; #endif @@ -933,10 +939,6 @@ static int nfs_probe_fsinfo(struct nfs_server *server, struct nfs_fh *mntfh, str goto out_error; nfs_server_set_fsinfo(server, &fsinfo); - error = bdi_init(&server->backing_dev_info); - if (error) - goto out_error; - /* Get some general file system info */ if (server->namelen == 0) { @@ -968,6 +970,7 @@ static void nfs_server_copy_userdata(struct nfs_server *target, struct nfs_serve target->acdirmin = source->acdirmin; target->acdirmax = source->acdirmax; target->caps = source->caps; + target->options = source->options; } /* @@ -995,6 +998,12 @@ static struct nfs_server *nfs_alloc_server(void) return NULL; } + if (bdi_init(&server->backing_dev_info)) { + nfs_free_iostats(server->io_stats); + kfree(server); + return NULL; + } + return server; } @@ -1529,7 +1538,7 @@ static void *nfs_server_list_next(struct seq_file *p, void *v, loff_t *pos); static void nfs_server_list_stop(struct seq_file *p, void *v); static int nfs_server_list_show(struct seq_file *m, void *v); -static struct seq_operations nfs_server_list_ops = { +static const struct seq_operations nfs_server_list_ops = { .start = nfs_server_list_start, .next = nfs_server_list_next, .stop = nfs_server_list_stop, @@ -1550,7 +1559,7 @@ static void *nfs_volume_list_next(struct seq_file *p, void *v, loff_t *pos); static void nfs_volume_list_stop(struct seq_file *p, void *v); static int nfs_volume_list_show(struct seq_file *m, void *v); -static struct seq_operations nfs_volume_list_ops = { +static const struct seq_operations nfs_volume_list_ops = { .start = nfs_volume_list_start, .next = nfs_volume_list_next, .stop = nfs_volume_list_stop, diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c index 379be67..70fad69 100644 --- a/fs/nfs/fscache.c +++ b/fs/nfs/fscache.c @@ -58,17 +58,34 @@ void nfs_fscache_release_client_cookie(struct nfs_client *clp) /* * Get the cache cookie for an NFS superblock. We have to handle * uniquification here because the cache doesn't do it for us. + * + * The default uniquifier is just an empty string, but it may be overridden + * either by the 'fsc=xxx' option to mount, or by inheriting it from the parent + * superblock across an automount point of some nature. */ -void nfs_fscache_get_super_cookie(struct super_block *sb, - struct nfs_parsed_mount_data *data) +void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, + struct nfs_clone_mount *mntdata) { struct nfs_fscache_key *key, *xkey; struct nfs_server *nfss = NFS_SB(sb); struct rb_node **p, *parent; - const char *uniq = data->fscache_uniq ?: ""; int diff, ulen; - ulen = strlen(uniq); + if (uniq) { + ulen = strlen(uniq); + } else if (mntdata) { + struct nfs_server *mnt_s = NFS_SB(mntdata->sb); + if (mnt_s->fscache_key) { + uniq = mnt_s->fscache_key->key.uniquifier; + ulen = mnt_s->fscache_key->key.uniq_len; + } + } + + if (!uniq) { + uniq = ""; + ulen = 1; + } + key = kzalloc(sizeof(*key) + ulen, GFP_KERNEL); if (!key) return; diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h index 6e809bb..b9c572d 100644 --- a/fs/nfs/fscache.h +++ b/fs/nfs/fscache.h @@ -74,7 +74,8 @@ extern void nfs_fscache_get_client_cookie(struct nfs_client *); extern void nfs_fscache_release_client_cookie(struct nfs_client *); extern void nfs_fscache_get_super_cookie(struct super_block *, - struct nfs_parsed_mount_data *); + const char *, + struct nfs_clone_mount *); extern void nfs_fscache_release_super_cookie(struct super_block *); extern void nfs_fscache_init_inode_cookie(struct inode *); @@ -173,7 +174,8 @@ static inline void nfs_fscache_release_client_cookie(struct nfs_client *clp) {} static inline void nfs_fscache_get_super_cookie( struct super_block *sb, - struct nfs_parsed_mount_data *data) + const char *uniq, + struct nfs_clone_mount *mntdata) { } static inline void nfs_fscache_release_super_cookie(struct super_block *sb) {} diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c index c862c93..5e078b2 100644 --- a/fs/nfs/nfs2xdr.c +++ b/fs/nfs/nfs2xdr.c @@ -13,7 +13,6 @@ #include <linux/time.h> #include <linux/mm.h> #include <linux/slab.h> -#include <linux/utsname.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/in.h> diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index ee6a13f..3f8881d 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c @@ -7,7 +7,6 @@ */ #include <linux/mm.h> -#include <linux/utsname.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/sunrpc/clnt.h> diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index 35869a4..5fe5492 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c @@ -10,7 +10,6 @@ #include <linux/time.h> #include <linux/mm.h> #include <linux/slab.h> -#include <linux/utsname.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/in.h> diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index be6544a..ed7c269 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -36,7 +36,6 @@ */ #include <linux/mm.h> -#include <linux/utsname.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/string.h> diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 1434080..2ef4fec 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -638,7 +638,7 @@ static void nfs4_fl_release_lock(struct file_lock *fl) nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner); } -static struct file_lock_operations nfs4_fl_lock_ops = { +static const struct file_lock_operations nfs4_fl_lock_ops = { .fl_copy_lock = nfs4_fl_copy_lock, .fl_release_private = nfs4_fl_release_lock, }; diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index cfc30d3..83ad47c 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -39,7 +39,6 @@ #include <linux/time.h> #include <linux/mm.h> #include <linux/slab.h> -#include <linux/utsname.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/in.h> diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c index 7be72d9..ef58385 100644 --- a/fs/nfs/proc.c +++ b/fs/nfs/proc.c @@ -32,7 +32,6 @@ #include <linux/slab.h> #include <linux/time.h> #include <linux/mm.h> -#include <linux/utsname.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/in.h> diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 867f705..810770f 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -728,6 +728,27 @@ static void nfs_umount_begin(struct super_block *sb) unlock_kernel(); } +static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(int flags) +{ + struct nfs_parsed_mount_data *data; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (data) { + data->flags = flags; + data->rsize = NFS_MAX_FILE_IO_SIZE; + data->wsize = NFS_MAX_FILE_IO_SIZE; + data->acregmin = NFS_DEF_ACREGMIN; + data->acregmax = NFS_DEF_ACREGMAX; + data->acdirmin = NFS_DEF_ACDIRMIN; + data->acdirmax = NFS_DEF_ACDIRMAX; + data->nfs_server.port = NFS_UNSPEC_PORT; + data->auth_flavors[0] = RPC_AUTH_UNIX; + data->auth_flavor_len = 1; + data->minorversion = 0; + } + return data; +} + /* * Sanity-check a server address provided by the mount command. * @@ -1430,10 +1451,13 @@ static int nfs_try_mount(struct nfs_parsed_mount_data *args, int status; if (args->mount_server.version == 0) { - if (args->flags & NFS_MOUNT_VER3) - args->mount_server.version = NFS_MNT3_VERSION; - else - args->mount_server.version = NFS_MNT_VERSION; + switch (args->version) { + default: + args->mount_server.version = NFS_MNT3_VERSION; + break; + case 2: + args->mount_server.version = NFS_MNT_VERSION; + } } request.version = args->mount_server.version; @@ -1634,20 +1658,6 @@ static int nfs_validate_mount_data(void *options, if (data == NULL) goto out_no_data; - args->flags = (NFS_MOUNT_VER3 | NFS_MOUNT_TCP); - args->rsize = NFS_MAX_FILE_IO_SIZE; - args->wsize = NFS_MAX_FILE_IO_SIZE; - args->acregmin = NFS_DEF_ACREGMIN; - args->acregmax = NFS_DEF_ACREGMAX; - args->acdirmin = NFS_DEF_ACDIRMIN; - args->acdirmax = NFS_DEF_ACDIRMAX; - args->mount_server.port = NFS_UNSPEC_PORT; - args->nfs_server.port = NFS_UNSPEC_PORT; - args->nfs_server.protocol = XPRT_TRANSPORT_TCP; - args->auth_flavors[0] = RPC_AUTH_UNIX; - args->auth_flavor_len = 1; - args->minorversion = 0; - switch (data->version) { case 1: data->namlen = 0; @@ -1778,7 +1788,7 @@ static int nfs_validate_mount_data(void *options, } #ifndef CONFIG_NFS_V3 - if (args->flags & NFS_MOUNT_VER3) + if (args->version == 3) goto out_v3_not_compiled; #endif /* !CONFIG_NFS_V3 */ @@ -1918,6 +1928,8 @@ static inline void nfs_initialise_sb(struct super_block *sb) if (server->flags & NFS_MOUNT_NOAC) sb->s_flags |= MS_SYNCHRONOUS; + sb->s_bdi = &server->backing_dev_info; + nfs_super_set_maxbytes(sb, server->maxfilesize); } @@ -1934,7 +1946,7 @@ static void nfs_fill_super(struct super_block *sb, if (data->bsize) sb->s_blocksize = nfs_block_size(data->bsize, &sb->s_blocksize_bits); - if (server->flags & NFS_MOUNT_VER3) { + if (server->nfs_client->rpc_ops->version == 3) { /* The VFS shouldn't apply the umask to mode bits. We will do * so ourselves when necessary. */ @@ -1958,7 +1970,7 @@ static void nfs_clone_super(struct super_block *sb, sb->s_blocksize = old_sb->s_blocksize; sb->s_maxbytes = old_sb->s_maxbytes; - if (server->flags & NFS_MOUNT_VER3) { + if (server->nfs_client->rpc_ops->version == 3) { /* The VFS shouldn't apply the umask to mode bits. We will do * so ourselves when necessary. */ @@ -2092,7 +2104,7 @@ static int nfs_get_sb(struct file_system_type *fs_type, }; int error = -ENOMEM; - data = kzalloc(sizeof(*data), GFP_KERNEL); + data = nfs_alloc_parsed_mount_data(NFS_MOUNT_VER3 | NFS_MOUNT_TCP); mntfh = kzalloc(sizeof(*mntfh), GFP_KERNEL); if (data == NULL || mntfh == NULL) goto out_free_fh; @@ -2142,7 +2154,8 @@ static int nfs_get_sb(struct file_system_type *fs_type, if (!s->s_root) { /* initial superblock/root creation */ nfs_fill_super(s, data); - nfs_fscache_get_super_cookie(s, data); + nfs_fscache_get_super_cookie( + s, data ? data->fscache_uniq : NULL, NULL); } mntroot = nfs_get_root(s, mntfh); @@ -2188,8 +2201,8 @@ static void nfs_kill_super(struct super_block *s) { struct nfs_server *server = NFS_SB(s); - bdi_unregister(&server->backing_dev_info); kill_anon_super(s); + bdi_unregister(&server->backing_dev_info); nfs_fscache_release_super_cookie(s); nfs_free_server(server); } @@ -2243,6 +2256,7 @@ static int nfs_xdev_get_sb(struct file_system_type *fs_type, int flags, if (!s->s_root) { /* initial superblock/root creation */ nfs_clone_super(s, data->sb); + nfs_fscache_get_super_cookie(s, NULL, data); } mntroot = nfs_get_root(s, data->fh); @@ -2360,18 +2374,7 @@ static int nfs4_validate_mount_data(void *options, if (data == NULL) goto out_no_data; - args->rsize = NFS_MAX_FILE_IO_SIZE; - args->wsize = NFS_MAX_FILE_IO_SIZE; - args->acregmin = NFS_DEF_ACREGMIN; - args->acregmax = NFS_DEF_ACREGMAX; - args->acdirmin = NFS_DEF_ACDIRMIN; - args->acdirmax = NFS_DEF_ACDIRMAX; - args->nfs_server.port = NFS_UNSPEC_PORT; - args->auth_flavors[0] = RPC_AUTH_UNIX; - args->auth_flavor_len = 1; args->version = 4; - args->minorversion = 0; - switch (data->version) { case 1: if (data->host_addrlen > sizeof(args->nfs_server.address)) @@ -2506,7 +2509,8 @@ static int nfs4_remote_get_sb(struct file_system_type *fs_type, if (!s->s_root) { /* initial superblock/root creation */ nfs4_fill_super(s); - nfs_fscache_get_super_cookie(s, data); + nfs_fscache_get_super_cookie( + s, data ? data->fscache_uniq : NULL, NULL); } mntroot = nfs4_get_root(s, mntfh); @@ -2654,7 +2658,7 @@ static int nfs4_get_sb(struct file_system_type *fs_type, struct nfs_parsed_mount_data *data; int error = -ENOMEM; - data = kzalloc(sizeof(*data), GFP_KERNEL); + data = nfs_alloc_parsed_mount_data(0); if (data == NULL) goto out_free_data; @@ -2739,6 +2743,7 @@ static int nfs4_xdev_get_sb(struct file_system_type *fs_type, int flags, if (!s->s_root) { /* initial superblock/root creation */ nfs4_clone_super(s, data->sb); + nfs_fscache_get_super_cookie(s, NULL, data); } mntroot = nfs4_get_root(s, data->fh); @@ -2820,6 +2825,7 @@ static int nfs4_remote_referral_get_sb(struct file_system_type *fs_type, if (!s->s_root) { /* initial superblock/root creation */ nfs4_fill_super(s); + nfs_fscache_get_super_cookie(s, NULL, data); } mntroot = nfs4_get_root(s, &mntfh); diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 120acad..53eb26c 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1490,7 +1490,6 @@ static int nfs_write_mapping(struct address_space *mapping, int how) .nr_to_write = LONG_MAX, .range_start = 0, .range_end = LLONG_MAX, - .for_writepages = 1, }; return __nfs_write_mapping(mapping, &wbc, how); diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c index d946264..c1c9e03 100644 --- a/fs/nfsd/export.c +++ b/fs/nfsd/export.c @@ -1341,6 +1341,8 @@ exp_pseudoroot(struct svc_rqst *rqstp, struct svc_fh *fhp) if (rv) goto out; rv = check_nfsd_access(exp, rqstp); + if (rv) + fh_put(fhp); out: exp_put(exp); return rv; @@ -1515,7 +1517,7 @@ static int e_show(struct seq_file *m, void *p) return svc_export_show(m, &svc_export_cache, cp); } -struct seq_operations nfs_exports_op = { +const struct seq_operations nfs_exports_op = { .start = e_start, .next = e_next, .stop = e_stop, diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c index 01d4ec1..edf926e 100644 --- a/fs/nfsd/nfs3xdr.c +++ b/fs/nfsd/nfs3xdr.c @@ -814,17 +814,6 @@ encode_entry_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name, return p; } -static __be32 * -encode_entryplus_baggage(struct nfsd3_readdirres *cd, __be32 *p, - struct svc_fh *fhp) -{ - p = encode_post_op_attr(cd->rqstp, p, fhp); - *p++ = xdr_one; /* yes, a file handle follows */ - p = encode_fh(p, fhp); - fh_put(fhp); - return p; -} - static int compose_entry_fh(struct nfsd3_readdirres *cd, struct svc_fh *fhp, const char *name, int namlen) @@ -836,29 +825,54 @@ compose_entry_fh(struct nfsd3_readdirres *cd, struct svc_fh *fhp, dparent = cd->fh.fh_dentry; exp = cd->fh.fh_export; - fh_init(fhp, NFS3_FHSIZE); if (isdotent(name, namlen)) { if (namlen == 2) { dchild = dget_parent(dparent); if (dchild == dparent) { /* filesystem root - cannot return filehandle for ".." */ dput(dchild); - return 1; + return -ENOENT; } } else dchild = dget(dparent); } else dchild = lookup_one_len(name, dparent, namlen); if (IS_ERR(dchild)) - return 1; - if (d_mountpoint(dchild) || - fh_compose(fhp, exp, dchild, &cd->fh) != 0 || - !dchild->d_inode) - rv = 1; + return -ENOENT; + rv = -ENOENT; + if (d_mountpoint(dchild)) + goto out; + rv = fh_compose(fhp, exp, dchild, &cd->fh); + if (rv) + goto out; + if (!dchild->d_inode) + goto out; + rv = 0; +out: dput(dchild); return rv; } +__be32 *encode_entryplus_baggage(struct nfsd3_readdirres *cd, __be32 *p, const char *name, int namlen) +{ + struct svc_fh fh; + int err; + + fh_init(&fh, NFS3_FHSIZE); + err = compose_entry_fh(cd, &fh, name, namlen); + if (err) { + *p++ = 0; + *p++ = 0; + goto out; + } + p = encode_post_op_attr(cd->rqstp, p, &fh); + *p++ = xdr_one; /* yes, a file handle follows */ + p = encode_fh(p, &fh); +out: + fh_put(&fh); + return p; +} + /* * Encode a directory entry. This one works for both normal readdir * and readdirplus. @@ -929,16 +943,8 @@ encode_entry(struct readdir_cd *ccd, const char *name, int namlen, p = encode_entry_baggage(cd, p, name, namlen, ino); - /* throw in readdirplus baggage */ - if (plus) { - struct svc_fh fh; - - if (compose_entry_fh(cd, &fh, name, namlen) > 0) { - *p++ = 0; - *p++ = 0; - } else - p = encode_entryplus_baggage(cd, p, &fh); - } + if (plus) + p = encode_entryplus_baggage(cd, p, name, namlen); num_entry_words = p - cd->buffer; } else if (cd->rqstp->rq_respages[pn+1] != NULL) { /* temporarily encode entry into next page, then move back to @@ -951,17 +957,8 @@ encode_entry(struct readdir_cd *ccd, const char *name, int namlen, p1 = encode_entry_baggage(cd, p1, name, namlen, ino); - /* throw in readdirplus baggage */ - if (plus) { - struct svc_fh fh; - - if (compose_entry_fh(cd, &fh, name, namlen) > 0) { - /* zero out the filehandle */ - *p1++ = 0; - *p1++ = 0; - } else - p1 = encode_entryplus_baggage(cd, p1, &fh); - } + if (plus) + p = encode_entryplus_baggage(cd, p1, name, namlen); /* determine entry word length and lengths to go in pages */ num_entry_words = p1 - tmp; diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c index 54b8b41..725d02f 100644 --- a/fs/nfsd/nfs4acl.c +++ b/fs/nfsd/nfs4acl.c @@ -321,7 +321,7 @@ _posix_to_nfsv4_one(struct posix_acl *pacl, struct nfs4_acl *acl, deny = ~pas.group & pas.other; if (deny) { ace->type = NFS4_ACE_ACCESS_DENIED_ACE_TYPE; - ace->flag = eflag | NFS4_ACE_IDENTIFIER_GROUP; + ace->flag = eflag; ace->access_mask = deny_mask_from_posix(deny, flags); ace->whotype = NFS4_ACL_WHO_GROUP; ace++; @@ -335,7 +335,7 @@ _posix_to_nfsv4_one(struct posix_acl *pacl, struct nfs4_acl *acl, if (deny) { ace->type = NFS4_ACE_ACCESS_DENIED_ACE_TYPE; ace->flag = eflag | NFS4_ACE_IDENTIFIER_GROUP; - ace->access_mask = mask_from_posix(deny, flags); + ace->access_mask = deny_mask_from_posix(deny, flags); ace->whotype = NFS4_ACL_WHO_NAMED; ace->who = pa->e_id; ace++; diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 3fd23f7..24e8d78 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -43,25 +43,30 @@ #include <linux/sunrpc/xdr.h> #include <linux/sunrpc/svc.h> #include <linux/sunrpc/clnt.h> +#include <linux/sunrpc/svcsock.h> #include <linux/nfsd/nfsd.h> #include <linux/nfsd/state.h> #include <linux/sunrpc/sched.h> #include <linux/nfs4.h> +#include <linux/sunrpc/xprtsock.h> #define NFSDDBG_FACILITY NFSDDBG_PROC #define NFSPROC4_CB_NULL 0 #define NFSPROC4_CB_COMPOUND 1 +#define NFS4_STATEID_SIZE 16 /* Index of predefined Linux callback client operations */ enum { - NFSPROC4_CLNT_CB_NULL = 0, + NFSPROC4_CLNT_CB_NULL = 0, NFSPROC4_CLNT_CB_RECALL, + NFSPROC4_CLNT_CB_SEQUENCE, }; enum nfs_cb_opnum4 { OP_CB_RECALL = 4, + OP_CB_SEQUENCE = 11, }; #define NFS4_MAXTAGLEN 20 @@ -70,17 +75,29 @@ enum nfs_cb_opnum4 { #define NFS4_dec_cb_null_sz 0 #define cb_compound_enc_hdr_sz 4 #define cb_compound_dec_hdr_sz (3 + (NFS4_MAXTAGLEN >> 2)) +#define sessionid_sz (NFS4_MAX_SESSIONID_LEN >> 2) +#define cb_sequence_enc_sz (sessionid_sz + 4 + \ + 1 /* no referring calls list yet */) +#define cb_sequence_dec_sz (op_dec_sz + sessionid_sz + 4) + #define op_enc_sz 1 #define op_dec_sz 2 #define enc_nfs4_fh_sz (1 + (NFS4_FHSIZE >> 2)) #define enc_stateid_sz (NFS4_STATEID_SIZE >> 2) #define NFS4_enc_cb_recall_sz (cb_compound_enc_hdr_sz + \ + cb_sequence_enc_sz + \ 1 + enc_stateid_sz + \ enc_nfs4_fh_sz) #define NFS4_dec_cb_recall_sz (cb_compound_dec_hdr_sz + \ + cb_sequence_dec_sz + \ op_dec_sz) +struct nfs4_rpc_args { + void *args_op; + struct nfsd4_cb_sequence args_seq; +}; + /* * Generic encode routines from fs/nfs/nfs4xdr.c */ @@ -137,11 +154,13 @@ xdr_error: \ } while (0) struct nfs4_cb_compound_hdr { - int status; - u32 ident; + /* args */ + u32 ident; /* minorversion 0 only */ u32 nops; __be32 *nops_p; u32 minorversion; + /* res */ + int status; u32 taglen; char *tag; }; @@ -238,6 +257,27 @@ encode_cb_recall(struct xdr_stream *xdr, struct nfs4_delegation *dp, hdr->nops++; } +static void +encode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_cb_sequence *args, + struct nfs4_cb_compound_hdr *hdr) +{ + __be32 *p; + + if (hdr->minorversion == 0) + return; + + RESERVE_SPACE(1 + NFS4_MAX_SESSIONID_LEN + 20); + + WRITE32(OP_CB_SEQUENCE); + WRITEMEM(args->cbs_clp->cl_sessionid.data, NFS4_MAX_SESSIONID_LEN); + WRITE32(args->cbs_clp->cl_cb_seq_nr); + WRITE32(0); /* slotid, always 0 */ + WRITE32(0); /* highest slotid always 0 */ + WRITE32(0); /* cachethis always 0 */ + WRITE32(0); /* FIXME: support referring_call_lists */ + hdr->nops++; +} + static int nfs4_xdr_enc_cb_null(struct rpc_rqst *req, __be32 *p) { @@ -249,15 +289,19 @@ nfs4_xdr_enc_cb_null(struct rpc_rqst *req, __be32 *p) } static int -nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p, struct nfs4_delegation *args) +nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p, + struct nfs4_rpc_args *rpc_args) { struct xdr_stream xdr; + struct nfs4_delegation *args = rpc_args->args_op; struct nfs4_cb_compound_hdr hdr = { .ident = args->dl_ident, + .minorversion = rpc_args->args_seq.cbs_minorversion, }; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_cb_compound_hdr(&xdr, &hdr); + encode_cb_sequence(&xdr, &rpc_args->args_seq, &hdr); encode_cb_recall(&xdr, args, &hdr); encode_cb_nops(&hdr); return 0; @@ -299,6 +343,57 @@ decode_cb_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected) return 0; } +/* + * Our current back channel implmentation supports a single backchannel + * with a single slot. + */ +static int +decode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_cb_sequence *res, + struct rpc_rqst *rqstp) +{ + struct nfs4_sessionid id; + int status; + u32 dummy; + __be32 *p; + + if (res->cbs_minorversion == 0) + return 0; + + status = decode_cb_op_hdr(xdr, OP_CB_SEQUENCE); + if (status) + return status; + + /* + * If the server returns different values for sessionID, slotID or + * sequence number, the server is looney tunes. + */ + status = -ESERVERFAULT; + + READ_BUF(NFS4_MAX_SESSIONID_LEN + 16); + memcpy(id.data, p, NFS4_MAX_SESSIONID_LEN); + p += XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN); + if (memcmp(id.data, res->cbs_clp->cl_sessionid.data, + NFS4_MAX_SESSIONID_LEN)) { + dprintk("%s Invalid session id\n", __func__); + goto out; + } + READ32(dummy); + if (dummy != res->cbs_clp->cl_cb_seq_nr) { + dprintk("%s Invalid sequence number\n", __func__); + goto out; + } + READ32(dummy); /* slotid must be 0 */ + if (dummy != 0) { + dprintk("%s Invalid slotid\n", __func__); + goto out; + } + /* FIXME: process highest slotid and target highest slotid */ + status = 0; +out: + return status; +} + + static int nfs4_xdr_dec_cb_null(struct rpc_rqst *req, __be32 *p) { @@ -306,7 +401,8 @@ nfs4_xdr_dec_cb_null(struct rpc_rqst *req, __be32 *p) } static int -nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p) +nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p, + struct nfsd4_cb_sequence *seq) { struct xdr_stream xdr; struct nfs4_cb_compound_hdr hdr; @@ -316,6 +412,11 @@ nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p) status = decode_cb_compound_hdr(&xdr, &hdr); if (status) goto out; + if (seq) { + status = decode_cb_sequence(&xdr, seq, rqstp); + if (status) + goto out; + } status = decode_cb_op_hdr(&xdr, OP_CB_RECALL); out: return status; @@ -377,16 +478,15 @@ static int max_cb_time(void) int setup_callback_client(struct nfs4_client *clp) { - struct sockaddr_in addr; struct nfs4_cb_conn *cb = &clp->cl_cb_conn; struct rpc_timeout timeparms = { .to_initval = max_cb_time(), .to_retries = 0, }; struct rpc_create_args args = { - .protocol = IPPROTO_TCP, - .address = (struct sockaddr *)&addr, - .addrsize = sizeof(addr), + .protocol = XPRT_TRANSPORT_TCP, + .address = (struct sockaddr *) &cb->cb_addr, + .addrsize = cb->cb_addrlen, .timeout = &timeparms, .program = &cb_program, .prognumber = cb->cb_prog, @@ -399,13 +499,10 @@ int setup_callback_client(struct nfs4_client *clp) if (!clp->cl_principal && (clp->cl_flavor >= RPC_AUTH_GSS_KRB5)) return -EINVAL; - - /* Initialize address */ - memset(&addr, 0, sizeof(addr)); - addr.sin_family = AF_INET; - addr.sin_port = htons(cb->cb_port); - addr.sin_addr.s_addr = htonl(cb->cb_addr); - + if (cb->cb_minorversion) { + args.bc_xprt = clp->cl_cb_xprt; + args.protocol = XPRT_TRANSPORT_BC_TCP; + } /* Create RPC client */ client = rpc_create(&args); if (IS_ERR(client)) { @@ -439,42 +536,29 @@ static const struct rpc_call_ops nfsd4_cb_probe_ops = { .rpc_call_done = nfsd4_cb_probe_done, }; -static struct rpc_cred *lookup_cb_cred(struct nfs4_cb_conn *cb) -{ - struct auth_cred acred = { - .machine_cred = 1 - }; +static struct rpc_cred *callback_cred; - /* - * Note in the gss case this doesn't actually have to wait for a - * gss upcall (or any calls to the client); this just creates a - * non-uptodate cred which the rpc state machine will fill in with - * a refresh_upcall later. - */ - return rpcauth_lookup_credcache(cb->cb_client->cl_auth, &acred, - RPCAUTH_LOOKUP_NEW); +int set_callback_cred(void) +{ + callback_cred = rpc_lookup_machine_cred(); + if (!callback_cred) + return -ENOMEM; + return 0; } + void do_probe_callback(struct nfs4_client *clp) { struct nfs4_cb_conn *cb = &clp->cl_cb_conn; struct rpc_message msg = { .rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL], .rpc_argp = clp, + .rpc_cred = callback_cred }; - struct rpc_cred *cred; int status; - cred = lookup_cb_cred(cb); - if (IS_ERR(cred)) { - status = PTR_ERR(cred); - goto out; - } - cb->cb_cred = cred; - msg.rpc_cred = cb->cb_cred; status = rpc_call_async(cb->cb_client, &msg, RPC_TASK_SOFT, &nfsd4_cb_probe_ops, (void *)clp); -out: if (status) { warn_no_callback_path(clp, status); put_nfs4_client(clp); @@ -503,11 +587,95 @@ nfsd4_probe_callback(struct nfs4_client *clp) do_probe_callback(clp); } +/* + * There's currently a single callback channel slot. + * If the slot is available, then mark it busy. Otherwise, set the + * thread for sleeping on the callback RPC wait queue. + */ +static int nfsd41_cb_setup_sequence(struct nfs4_client *clp, + struct rpc_task *task) +{ + struct nfs4_rpc_args *args = task->tk_msg.rpc_argp; + u32 *ptr = (u32 *)clp->cl_sessionid.data; + int status = 0; + + dprintk("%s: %u:%u:%u:%u\n", __func__, + ptr[0], ptr[1], ptr[2], ptr[3]); + + if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) { + rpc_sleep_on(&clp->cl_cb_waitq, task, NULL); + dprintk("%s slot is busy\n", __func__); + status = -EAGAIN; + goto out; + } + + /* + * We'll need the clp during XDR encoding and decoding, + * and the sequence during decoding to verify the reply + */ + args->args_seq.cbs_clp = clp; + task->tk_msg.rpc_resp = &args->args_seq; + +out: + dprintk("%s status=%d\n", __func__, status); + return status; +} + +/* + * TODO: cb_sequence should support referring call lists, cachethis, multiple + * slots, and mark callback channel down on communication errors. + */ +static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata) +{ + struct nfs4_delegation *dp = calldata; + struct nfs4_client *clp = dp->dl_client; + struct nfs4_rpc_args *args = task->tk_msg.rpc_argp; + u32 minorversion = clp->cl_cb_conn.cb_minorversion; + int status = 0; + + args->args_seq.cbs_minorversion = minorversion; + if (minorversion) { + status = nfsd41_cb_setup_sequence(clp, task); + if (status) { + if (status != -EAGAIN) { + /* terminate rpc task */ + task->tk_status = status; + task->tk_action = NULL; + } + return; + } + } + rpc_call_start(task); +} + +static void nfsd4_cb_done(struct rpc_task *task, void *calldata) +{ + struct nfs4_delegation *dp = calldata; + struct nfs4_client *clp = dp->dl_client; + + dprintk("%s: minorversion=%d\n", __func__, + clp->cl_cb_conn.cb_minorversion); + + if (clp->cl_cb_conn.cb_minorversion) { + /* No need for lock, access serialized in nfsd4_cb_prepare */ + ++clp->cl_cb_seq_nr; + clear_bit(0, &clp->cl_cb_slot_busy); + rpc_wake_up_next(&clp->cl_cb_waitq); + dprintk("%s: freed slot, new seqid=%d\n", __func__, + clp->cl_cb_seq_nr); + + /* We're done looking into the sequence information */ + task->tk_msg.rpc_resp = NULL; + } +} + static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata) { struct nfs4_delegation *dp = calldata; struct nfs4_client *clp = dp->dl_client; + nfsd4_cb_done(task, calldata); + switch (task->tk_status) { case -EIO: /* Network partition? */ @@ -520,16 +688,19 @@ static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata) break; default: /* success, or error we can't handle */ - return; + goto done; } if (dp->dl_retries--) { rpc_delay(task, 2*HZ); task->tk_status = 0; rpc_restart_call(task); + return; } else { atomic_set(&clp->cl_cb_conn.cb_set, 0); warn_no_callback_path(clp, task->tk_status); } +done: + kfree(task->tk_msg.rpc_argp); } static void nfsd4_cb_recall_release(void *calldata) @@ -542,6 +713,7 @@ static void nfsd4_cb_recall_release(void *calldata) } static const struct rpc_call_ops nfsd4_cb_recall_ops = { + .rpc_call_prepare = nfsd4_cb_prepare, .rpc_call_done = nfsd4_cb_recall_done, .rpc_release = nfsd4_cb_recall_release, }; @@ -554,17 +726,24 @@ nfsd4_cb_recall(struct nfs4_delegation *dp) { struct nfs4_client *clp = dp->dl_client; struct rpc_clnt *clnt = clp->cl_cb_conn.cb_client; + struct nfs4_rpc_args *args; struct rpc_message msg = { .rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL], - .rpc_argp = dp, - .rpc_cred = clp->cl_cb_conn.cb_cred + .rpc_cred = callback_cred }; - int status; + int status = -ENOMEM; + args = kzalloc(sizeof(*args), GFP_KERNEL); + if (!args) + goto out; + args->args_op = dp; + msg.rpc_argp = args; dp->dl_retries = 1; status = rpc_call_async(clnt, &msg, RPC_TASK_SOFT, &nfsd4_cb_recall_ops, dp); +out: if (status) { + kfree(args); put_nfs4_client(clp); nfs4_put_delegation(dp); } diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c index cdfa86f..ba2c199 100644 --- a/fs/nfsd/nfs4idmap.c +++ b/fs/nfsd/nfs4idmap.c @@ -38,7 +38,6 @@ #include <linux/init.h> #include <linux/mm.h> -#include <linux/utsname.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/sunrpc/clnt.h> diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 7c88017..bebc0c2 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -68,7 +68,6 @@ check_attr_support(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, u32 *bmval, u32 *writable) { struct dentry *dentry = cstate->current_fh.fh_dentry; - struct svc_export *exp = cstate->current_fh.fh_export; /* * Check about attributes are supported by the NFSv4 server or not. @@ -80,17 +79,13 @@ check_attr_support(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, return nfserr_attrnotsupp; /* - * Check FATTR4_WORD0_ACL & FATTR4_WORD0_FS_LOCATIONS can be supported + * Check FATTR4_WORD0_ACL can be supported * in current environment or not. */ if (bmval[0] & FATTR4_WORD0_ACL) { if (!IS_POSIXACL(dentry->d_inode)) return nfserr_attrnotsupp; } - if (bmval[0] & FATTR4_WORD0_FS_LOCATIONS) { - if (exp->ex_fslocs.locations == NULL) - return nfserr_attrnotsupp; - } /* * According to spec, read-only attributes return ERR_INVAL. @@ -123,6 +118,35 @@ nfsd4_check_open_attributes(struct svc_rqst *rqstp, return status; } +static int +is_create_with_attrs(struct nfsd4_open *open) +{ + return open->op_create == NFS4_OPEN_CREATE + && (open->op_createmode == NFS4_CREATE_UNCHECKED + || open->op_createmode == NFS4_CREATE_GUARDED + || open->op_createmode == NFS4_CREATE_EXCLUSIVE4_1); +} + +/* + * if error occurs when setting the acl, just clear the acl bit + * in the returned attr bitmap. + */ +static void +do_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp, + struct nfs4_acl *acl, u32 *bmval) +{ + __be32 status; + + status = nfsd4_set_nfs4_acl(rqstp, fhp, acl); + if (status) + /* + * We should probably fail the whole open at this point, + * but we've already created the file, so it's too late; + * So this seems the least of evils: + */ + bmval[0] &= ~FATTR4_WORD0_ACL; +} + static inline void fh_dup2(struct svc_fh *dst, struct svc_fh *src) { @@ -206,6 +230,9 @@ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_o if (status) goto out; + if (is_create_with_attrs(open) && open->op_acl != NULL) + do_set_nfs4_acl(rqstp, &resfh, open->op_acl, open->op_bmval); + set_change_info(&open->op_cinfo, current_fh); fh_dup2(current_fh, &resfh); @@ -536,12 +563,17 @@ nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, status = nfserr_badtype; } - if (!status) { - fh_unlock(&cstate->current_fh); - set_change_info(&create->cr_cinfo, &cstate->current_fh); - fh_dup2(&cstate->current_fh, &resfh); - } + if (status) + goto out; + + if (create->cr_acl != NULL) + do_set_nfs4_acl(rqstp, &resfh, create->cr_acl, + create->cr_bmval); + fh_unlock(&cstate->current_fh); + set_change_info(&create->cr_cinfo, &cstate->current_fh); + fh_dup2(&cstate->current_fh, &resfh); +out: fh_put(&resfh); return status; } @@ -947,34 +979,6 @@ static struct nfsd4_operation nfsd4_ops[]; static const char *nfsd4_op_name(unsigned opnum); /* - * This is a replay of a compound for which no cache entry pages - * were used. Encode the sequence operation, and if cachethis is FALSE - * encode the uncache rep error on the next operation. - */ -static __be32 -nfsd4_enc_uncached_replay(struct nfsd4_compoundargs *args, - struct nfsd4_compoundres *resp) -{ - struct nfsd4_op *op; - - dprintk("--> %s resp->opcnt %d ce_cachethis %u \n", __func__, - resp->opcnt, resp->cstate.slot->sl_cache_entry.ce_cachethis); - - /* Encode the replayed sequence operation */ - BUG_ON(resp->opcnt != 1); - op = &args->ops[resp->opcnt - 1]; - nfsd4_encode_operation(resp, op); - - /*return nfserr_retry_uncached_rep in next operation. */ - if (resp->cstate.slot->sl_cache_entry.ce_cachethis == 0) { - op = &args->ops[resp->opcnt++]; - op->status = nfserr_retry_uncached_rep; - nfsd4_encode_operation(resp, op); - } - return op->status; -} - -/* * Enforce NFSv4.1 COMPOUND ordering rules. * * TODO: @@ -1083,13 +1087,10 @@ nfsd4_proc_compound(struct svc_rqst *rqstp, BUG_ON(op->status == nfs_ok); encode_op: - /* Only from SEQUENCE or CREATE_SESSION */ + /* Only from SEQUENCE */ if (resp->cstate.status == nfserr_replay_cache) { dprintk("%s NFS4.1 replay from cache\n", __func__); - if (nfsd4_not_cached(resp)) - status = nfsd4_enc_uncached_replay(args, resp); - else - status = op->status; + status = op->status; goto out; } if (op->status == nfserr_replay_me) { diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 980a216..2153f9b 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -55,6 +55,7 @@ #include <linux/lockd/bind.h> #include <linux/module.h> #include <linux/sunrpc/svcauth_gss.h> +#include <linux/sunrpc/clnt.h> #define NFSDDBG_FACILITY NFSDDBG_PROC @@ -413,36 +414,65 @@ gen_sessionid(struct nfsd4_session *ses) } /* - * Give the client the number of slots it requests bound by - * NFSD_MAX_SLOTS_PER_SESSION and by sv_drc_max_pages. + * The protocol defines ca_maxresponssize_cached to include the size of + * the rpc header, but all we need to cache is the data starting after + * the end of the initial SEQUENCE operation--the rest we regenerate + * each time. Therefore we can advertise a ca_maxresponssize_cached + * value that is the number of bytes in our cache plus a few additional + * bytes. In order to stay on the safe side, and not promise more than + * we can cache, those additional bytes must be the minimum possible: 24 + * bytes of rpc header (xid through accept state, with AUTH_NULL + * verifier), 12 for the compound header (with zero-length tag), and 44 + * for the SEQUENCE op response: + */ +#define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44) + +/* + * Give the client the number of ca_maxresponsesize_cached slots it + * requests, of size bounded by NFSD_SLOT_CACHE_SIZE, + * NFSD_MAX_MEM_PER_SESSION, and nfsd_drc_max_mem. Do not allow more + * than NFSD_MAX_SLOTS_PER_SESSION. * - * If we run out of pages (sv_drc_pages_used == sv_drc_max_pages) we - * should (up to a point) re-negotiate active sessions and reduce their - * slot usage to make rooom for new connections. For now we just fail the - * create session. + * If we run out of reserved DRC memory we should (up to a point) + * re-negotiate active sessions and reduce their slot usage to make + * rooom for new connections. For now we just fail the create session. */ -static int set_forechannel_maxreqs(struct nfsd4_channel_attrs *fchan) +static int set_forechannel_drc_size(struct nfsd4_channel_attrs *fchan) { - int status = 0, np = fchan->maxreqs * NFSD_PAGES_PER_SLOT; + int mem, size = fchan->maxresp_cached; if (fchan->maxreqs < 1) return nfserr_inval; - else if (fchan->maxreqs > NFSD_MAX_SLOTS_PER_SESSION) - fchan->maxreqs = NFSD_MAX_SLOTS_PER_SESSION; - spin_lock(&nfsd_serv->sv_lock); - if (np + nfsd_serv->sv_drc_pages_used > nfsd_serv->sv_drc_max_pages) - np = nfsd_serv->sv_drc_max_pages - nfsd_serv->sv_drc_pages_used; - nfsd_serv->sv_drc_pages_used += np; - spin_unlock(&nfsd_serv->sv_lock); + if (size < NFSD_MIN_HDR_SEQ_SZ) + size = NFSD_MIN_HDR_SEQ_SZ; + size -= NFSD_MIN_HDR_SEQ_SZ; + if (size > NFSD_SLOT_CACHE_SIZE) + size = NFSD_SLOT_CACHE_SIZE; + + /* bound the maxreqs by NFSD_MAX_MEM_PER_SESSION */ + mem = fchan->maxreqs * size; + if (mem > NFSD_MAX_MEM_PER_SESSION) { + fchan->maxreqs = NFSD_MAX_MEM_PER_SESSION / size; + if (fchan->maxreqs > NFSD_MAX_SLOTS_PER_SESSION) + fchan->maxreqs = NFSD_MAX_SLOTS_PER_SESSION; + mem = fchan->maxreqs * size; + } - if (np <= 0) { - status = nfserr_resource; - fchan->maxreqs = 0; - } else - fchan->maxreqs = np / NFSD_PAGES_PER_SLOT; + spin_lock(&nfsd_drc_lock); + /* bound the total session drc memory ussage */ + if (mem + nfsd_drc_mem_used > nfsd_drc_max_mem) { + fchan->maxreqs = (nfsd_drc_max_mem - nfsd_drc_mem_used) / size; + mem = fchan->maxreqs * size; + } + nfsd_drc_mem_used += mem; + spin_unlock(&nfsd_drc_lock); - return status; + if (fchan->maxreqs == 0) + return nfserr_serverfault; + + fchan->maxresp_cached = size + NFSD_MIN_HDR_SEQ_SZ; + return 0; } /* @@ -466,36 +496,41 @@ static int init_forechannel_attrs(struct svc_rqst *rqstp, fchan->maxresp_sz = maxcount; session_fchan->maxresp_sz = fchan->maxresp_sz; - /* Set the max response cached size our default which is - * a multiple of PAGE_SIZE and small */ - session_fchan->maxresp_cached = NFSD_PAGES_PER_SLOT * PAGE_SIZE; - fchan->maxresp_cached = session_fchan->maxresp_cached; - /* Use the client's maxops if possible */ if (fchan->maxops > NFSD_MAX_OPS_PER_COMPOUND) fchan->maxops = NFSD_MAX_OPS_PER_COMPOUND; session_fchan->maxops = fchan->maxops; - /* try to use the client requested number of slots */ - if (fchan->maxreqs > NFSD_MAX_SLOTS_PER_SESSION) - fchan->maxreqs = NFSD_MAX_SLOTS_PER_SESSION; - /* FIXME: Error means no more DRC pages so the server should * recover pages from existing sessions. For now fail session * creation. */ - status = set_forechannel_maxreqs(fchan); + status = set_forechannel_drc_size(fchan); + session_fchan->maxresp_cached = fchan->maxresp_cached; session_fchan->maxreqs = fchan->maxreqs; + + dprintk("%s status %d\n", __func__, status); return status; } +static void +free_session_slots(struct nfsd4_session *ses) +{ + int i; + + for (i = 0; i < ses->se_fchannel.maxreqs; i++) + kfree(ses->se_slots[i]); +} + static int alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp, struct nfsd4_create_session *cses) { struct nfsd4_session *new, tmp; - int idx, status = nfserr_resource, slotsize; + struct nfsd4_slot *sp; + int idx, slotsize, cachesize, i; + int status; memset(&tmp, 0, sizeof(tmp)); @@ -506,14 +541,27 @@ alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp, if (status) goto out; - /* allocate struct nfsd4_session and slot table in one piece */ - slotsize = tmp.se_fchannel.maxreqs * sizeof(struct nfsd4_slot); + BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot) + + sizeof(struct nfsd4_session) > PAGE_SIZE); + + status = nfserr_serverfault; + /* allocate struct nfsd4_session and slot table pointers in one piece */ + slotsize = tmp.se_fchannel.maxreqs * sizeof(struct nfsd4_slot *); new = kzalloc(sizeof(*new) + slotsize, GFP_KERNEL); if (!new) goto out; memcpy(new, &tmp, sizeof(*new)); + /* allocate each struct nfsd4_slot and data cache in one piece */ + cachesize = new->se_fchannel.maxresp_cached - NFSD_MIN_HDR_SEQ_SZ; + for (i = 0; i < new->se_fchannel.maxreqs; i++) { + sp = kzalloc(sizeof(*sp) + cachesize, GFP_KERNEL); + if (!sp) + goto out_free; + new->se_slots[i] = sp; + } + new->se_client = clp; gen_sessionid(new); idx = hash_sessionid(&new->se_sessionid); @@ -530,6 +578,10 @@ alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp, status = nfs_ok; out: return status; +out_free: + free_session_slots(new); + kfree(new); + goto out; } /* caller must hold sessionid_lock */ @@ -572,19 +624,16 @@ release_session(struct nfsd4_session *ses) nfsd4_put_session(ses); } -static void nfsd4_release_respages(struct page **respages, short resused); - void free_session(struct kref *kref) { struct nfsd4_session *ses; - int i; ses = container_of(kref, struct nfsd4_session, se_ref); - for (i = 0; i < ses->se_fchannel.maxreqs; i++) { - struct nfsd4_cache_entry *e = &ses->se_slots[i].sl_cache_entry; - nfsd4_release_respages(e->ce_respages, e->ce_resused); - } + spin_lock(&nfsd_drc_lock); + nfsd_drc_mem_used -= ses->se_fchannel.maxreqs * NFSD_SLOT_CACHE_SIZE; + spin_unlock(&nfsd_drc_lock); + free_session_slots(ses); kfree(ses); } @@ -647,18 +696,14 @@ shutdown_callback_client(struct nfs4_client *clp) clp->cl_cb_conn.cb_client = NULL; rpc_shutdown_client(clnt); } - if (clp->cl_cb_conn.cb_cred) { - put_rpccred(clp->cl_cb_conn.cb_cred); - clp->cl_cb_conn.cb_cred = NULL; - } } static inline void free_client(struct nfs4_client *clp) { shutdown_callback_client(clp); - nfsd4_release_respages(clp->cl_slot.sl_cache_entry.ce_respages, - clp->cl_slot.sl_cache_entry.ce_resused); + if (clp->cl_cb_xprt) + svc_xprt_put(clp->cl_cb_xprt); if (clp->cl_cred.cr_group_info) put_group_info(clp->cl_cred.cr_group_info); kfree(clp->cl_principal); @@ -714,25 +759,6 @@ expire_client(struct nfs4_client *clp) put_nfs4_client(clp); } -static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir) -{ - struct nfs4_client *clp; - - clp = alloc_client(name); - if (clp == NULL) - return NULL; - memcpy(clp->cl_recdir, recdir, HEXDIR_LEN); - atomic_set(&clp->cl_count, 1); - atomic_set(&clp->cl_cb_conn.cb_set, 0); - INIT_LIST_HEAD(&clp->cl_idhash); - INIT_LIST_HEAD(&clp->cl_strhash); - INIT_LIST_HEAD(&clp->cl_openowners); - INIT_LIST_HEAD(&clp->cl_delegations); - INIT_LIST_HEAD(&clp->cl_sessions); - INIT_LIST_HEAD(&clp->cl_lru); - return clp; -} - static void copy_verf(struct nfs4_client *target, nfs4_verifier *source) { memcpy(target->cl_verifier.data, source->data, @@ -795,6 +821,46 @@ static void gen_confirm(struct nfs4_client *clp) *p++ = i++; } +static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir, + struct svc_rqst *rqstp, nfs4_verifier *verf) +{ + struct nfs4_client *clp; + struct sockaddr *sa = svc_addr(rqstp); + char *princ; + + clp = alloc_client(name); + if (clp == NULL) + return NULL; + + princ = svc_gss_principal(rqstp); + if (princ) { + clp->cl_principal = kstrdup(princ, GFP_KERNEL); + if (clp->cl_principal == NULL) { + free_client(clp); + return NULL; + } + } + + memcpy(clp->cl_recdir, recdir, HEXDIR_LEN); + atomic_set(&clp->cl_count, 1); + atomic_set(&clp->cl_cb_conn.cb_set, 0); + INIT_LIST_HEAD(&clp->cl_idhash); + INIT_LIST_HEAD(&clp->cl_strhash); + INIT_LIST_HEAD(&clp->cl_openowners); + INIT_LIST_HEAD(&clp->cl_delegations); + INIT_LIST_HEAD(&clp->cl_sessions); + INIT_LIST_HEAD(&clp->cl_lru); + clear_bit(0, &clp->cl_cb_slot_busy); + rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); + copy_verf(clp, verf); + rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa); + clp->cl_flavor = rqstp->rq_flavor; + copy_cred(&clp->cl_cred, &rqstp->rq_cred); + gen_confirm(clp); + + return clp; +} + static int check_name(struct xdr_netobj name) { if (name.len == 0) @@ -902,93 +968,40 @@ find_unconfirmed_client_by_str(const char *dname, unsigned int hashval, return NULL; } -/* a helper function for parse_callback */ -static int -parse_octet(unsigned int *lenp, char **addrp) -{ - unsigned int len = *lenp; - char *p = *addrp; - int n = -1; - char c; - - for (;;) { - if (!len) - break; - len--; - c = *p++; - if (c == '.') - break; - if ((c < '0') || (c > '9')) { - n = -1; - break; - } - if (n < 0) - n = 0; - n = (n * 10) + (c - '0'); - if (n > 255) { - n = -1; - break; - } - } - *lenp = len; - *addrp = p; - return n; -} - -/* parse and set the setclientid ipv4 callback address */ -static int -parse_ipv4(unsigned int addr_len, char *addr_val, unsigned int *cbaddrp, unsigned short *cbportp) -{ - int temp = 0; - u32 cbaddr = 0; - u16 cbport = 0; - u32 addrlen = addr_len; - char *addr = addr_val; - int i, shift; - - /* ipaddress */ - shift = 24; - for(i = 4; i > 0 ; i--) { - if ((temp = parse_octet(&addrlen, &addr)) < 0) { - return 0; - } - cbaddr |= (temp << shift); - if (shift > 0) - shift -= 8; - } - *cbaddrp = cbaddr; - - /* port */ - shift = 8; - for(i = 2; i > 0 ; i--) { - if ((temp = parse_octet(&addrlen, &addr)) < 0) { - return 0; - } - cbport |= (temp << shift); - if (shift > 0) - shift -= 8; - } - *cbportp = cbport; - return 1; -} - static void -gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se) +gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, u32 scopeid) { struct nfs4_cb_conn *cb = &clp->cl_cb_conn; - - /* Currently, we only support tcp for the callback channel */ - if ((se->se_callback_netid_len != 3) || memcmp((char *)se->se_callback_netid_val, "tcp", 3)) + unsigned short expected_family; + + /* Currently, we only support tcp and tcp6 for the callback channel */ + if (se->se_callback_netid_len == 3 && + !memcmp(se->se_callback_netid_val, "tcp", 3)) + expected_family = AF_INET; + else if (se->se_callback_netid_len == 4 && + !memcmp(se->se_callback_netid_val, "tcp6", 4)) + expected_family = AF_INET6; + else goto out_err; - if ( !(parse_ipv4(se->se_callback_addr_len, se->se_callback_addr_val, - &cb->cb_addr, &cb->cb_port))) + cb->cb_addrlen = rpc_uaddr2sockaddr(se->se_callback_addr_val, + se->se_callback_addr_len, + (struct sockaddr *) &cb->cb_addr, + sizeof(cb->cb_addr)); + + if (!cb->cb_addrlen || cb->cb_addr.ss_family != expected_family) goto out_err; + + if (cb->cb_addr.ss_family == AF_INET6) + ((struct sockaddr_in6 *) &cb->cb_addr)->sin6_scope_id = scopeid; + cb->cb_minorversion = 0; cb->cb_prog = se->se_callback_prog; cb->cb_ident = se->se_callback_ident; return; out_err: + cb->cb_addr.ss_family = AF_UNSPEC; + cb->cb_addrlen = 0; dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) " "will not receive delegations\n", clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id); @@ -996,175 +1009,87 @@ out_err: return; } -void -nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp) -{ - struct nfsd4_compoundres *resp = rqstp->rq_resp; - - resp->cstate.statp = statp; -} - /* - * Dereference the result pages. + * Cache a reply. nfsd4_check_drc_limit() has bounded the cache size. */ -static void -nfsd4_release_respages(struct page **respages, short resused) +void +nfsd4_store_cache_entry(struct nfsd4_compoundres *resp) { - int i; + struct nfsd4_slot *slot = resp->cstate.slot; + unsigned int base; - dprintk("--> %s\n", __func__); - for (i = 0; i < resused; i++) { - if (!respages[i]) - continue; - put_page(respages[i]); - respages[i] = NULL; - } -} + dprintk("--> %s slot %p\n", __func__, slot); -static void -nfsd4_copy_pages(struct page **topages, struct page **frompages, short count) -{ - int i; + slot->sl_opcnt = resp->opcnt; + slot->sl_status = resp->cstate.status; - for (i = 0; i < count; i++) { - topages[i] = frompages[i]; - if (!topages[i]) - continue; - get_page(topages[i]); + if (nfsd4_not_cached(resp)) { + slot->sl_datalen = 0; + return; } + slot->sl_datalen = (char *)resp->p - (char *)resp->cstate.datap; + base = (char *)resp->cstate.datap - + (char *)resp->xbuf->head[0].iov_base; + if (read_bytes_from_xdr_buf(resp->xbuf, base, slot->sl_data, + slot->sl_datalen)) + WARN("%s: sessions DRC could not cache compound\n", __func__); + return; } /* - * Cache the reply pages up to NFSD_PAGES_PER_SLOT + 1, clearing the previous - * pages. We add a page to NFSD_PAGES_PER_SLOT for the case where the total - * length of the XDR response is less than se_fmaxresp_cached - * (NFSD_PAGES_PER_SLOT * PAGE_SIZE) but the xdr_buf pages is used for a - * of the reply (e.g. readdir). + * Encode the replay sequence operation from the slot values. + * If cachethis is FALSE encode the uncached rep error on the next + * operation which sets resp->p and increments resp->opcnt for + * nfs4svc_encode_compoundres. * - * Store the base and length of the rq_req.head[0] page - * of the NFSv4.1 data, just past the rpc header. */ -void -nfsd4_store_cache_entry(struct nfsd4_compoundres *resp) +static __be32 +nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args, + struct nfsd4_compoundres *resp) { - struct nfsd4_cache_entry *entry = &resp->cstate.slot->sl_cache_entry; - struct svc_rqst *rqstp = resp->rqstp; - struct nfsd4_compoundargs *args = rqstp->rq_argp; - struct nfsd4_op *op = &args->ops[resp->opcnt]; - struct kvec *resv = &rqstp->rq_res.head[0]; - - dprintk("--> %s entry %p\n", __func__, entry); - - /* Don't cache a failed OP_SEQUENCE. */ - if (resp->opcnt == 1 && op->opnum == OP_SEQUENCE && resp->cstate.status) - return; + struct nfsd4_op *op; + struct nfsd4_slot *slot = resp->cstate.slot; - nfsd4_release_respages(entry->ce_respages, entry->ce_resused); - entry->ce_opcnt = resp->opcnt; - entry->ce_status = resp->cstate.status; + dprintk("--> %s resp->opcnt %d cachethis %u \n", __func__, + resp->opcnt, resp->cstate.slot->sl_cachethis); - /* - * Don't need a page to cache just the sequence operation - the slot - * does this for us! - */ + /* Encode the replayed sequence operation */ + op = &args->ops[resp->opcnt - 1]; + nfsd4_encode_operation(resp, op); - if (nfsd4_not_cached(resp)) { - entry->ce_resused = 0; - entry->ce_rpchdrlen = 0; - dprintk("%s Just cache SEQUENCE. ce_cachethis %d\n", __func__, - resp->cstate.slot->sl_cache_entry.ce_cachethis); - return; - } - entry->ce_resused = rqstp->rq_resused; - if (entry->ce_resused > NFSD_PAGES_PER_SLOT + 1) - entry->ce_resused = NFSD_PAGES_PER_SLOT + 1; - nfsd4_copy_pages(entry->ce_respages, rqstp->rq_respages, - entry->ce_resused); - entry->ce_datav.iov_base = resp->cstate.statp; - entry->ce_datav.iov_len = resv->iov_len - ((char *)resp->cstate.statp - - (char *)page_address(rqstp->rq_respages[0])); - /* Current request rpc header length*/ - entry->ce_rpchdrlen = (char *)resp->cstate.statp - - (char *)page_address(rqstp->rq_respages[0]); -} - -/* - * We keep the rpc header, but take the nfs reply from the replycache. - */ -static int -nfsd41_copy_replay_data(struct nfsd4_compoundres *resp, - struct nfsd4_cache_entry *entry) -{ - struct svc_rqst *rqstp = resp->rqstp; - struct kvec *resv = &resp->rqstp->rq_res.head[0]; - int len; - - /* Current request rpc header length*/ - len = (char *)resp->cstate.statp - - (char *)page_address(rqstp->rq_respages[0]); - if (entry->ce_datav.iov_len + len > PAGE_SIZE) { - dprintk("%s v41 cached reply too large (%Zd).\n", __func__, - entry->ce_datav.iov_len); - return 0; + /* Return nfserr_retry_uncached_rep in next operation. */ + if (args->opcnt > 1 && slot->sl_cachethis == 0) { + op = &args->ops[resp->opcnt++]; + op->status = nfserr_retry_uncached_rep; + nfsd4_encode_operation(resp, op); } - /* copy the cached reply nfsd data past the current rpc header */ - memcpy((char *)resv->iov_base + len, entry->ce_datav.iov_base, - entry->ce_datav.iov_len); - resv->iov_len = len + entry->ce_datav.iov_len; - return 1; + return op->status; } /* - * Keep the first page of the replay. Copy the NFSv4.1 data from the first - * cached page. Replace any futher replay pages from the cache. + * The sequence operation is not cached because we can use the slot and + * session values. */ __be32 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp, struct nfsd4_sequence *seq) { - struct nfsd4_cache_entry *entry = &resp->cstate.slot->sl_cache_entry; + struct nfsd4_slot *slot = resp->cstate.slot; __be32 status; - dprintk("--> %s entry %p\n", __func__, entry); - - /* - * If this is just the sequence operation, we did not keep - * a page in the cache entry because we can just use the - * slot info stored in struct nfsd4_sequence that was checked - * against the slot in nfsd4_sequence(). - * - * This occurs when seq->cachethis is FALSE, or when the client - * session inactivity timer fires and a solo sequence operation - * is sent (lease renewal). - */ - if (seq && nfsd4_not_cached(resp)) { - seq->maxslots = resp->cstate.session->se_fchannel.maxreqs; - return nfs_ok; - } - - if (!nfsd41_copy_replay_data(resp, entry)) { - /* - * Not enough room to use the replay rpc header, send the - * cached header. Release all the allocated result pages. - */ - svc_free_res_pages(resp->rqstp); - nfsd4_copy_pages(resp->rqstp->rq_respages, entry->ce_respages, - entry->ce_resused); - } else { - /* Release all but the first allocated result page */ + dprintk("--> %s slot %p\n", __func__, slot); - resp->rqstp->rq_resused--; - svc_free_res_pages(resp->rqstp); + /* Either returns 0 or nfserr_retry_uncached */ + status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp); + if (status == nfserr_retry_uncached_rep) + return status; - nfsd4_copy_pages(&resp->rqstp->rq_respages[1], - &entry->ce_respages[1], - entry->ce_resused - 1); - } + /* The sequence operation has been encoded, cstate->datap set. */ + memcpy(resp->cstate.datap, slot->sl_data, slot->sl_datalen); - resp->rqstp->rq_resused = entry->ce_resused; - resp->opcnt = entry->ce_opcnt; - resp->cstate.iovlen = entry->ce_datav.iov_len + entry->ce_rpchdrlen; - status = entry->ce_status; + resp->opcnt = slot->sl_opcnt; + resp->p = resp->cstate.datap + XDR_QUADLEN(slot->sl_datalen); + status = slot->sl_status; return status; } @@ -1194,13 +1119,15 @@ nfsd4_exchange_id(struct svc_rqst *rqstp, int status; unsigned int strhashval; char dname[HEXDIR_LEN]; + char addr_str[INET6_ADDRSTRLEN]; nfs4_verifier verf = exid->verifier; - u32 ip_addr = svc_addr_in(rqstp)->sin_addr.s_addr; + struct sockaddr *sa = svc_addr(rqstp); + rpc_ntop(sa, addr_str, sizeof(addr_str)); dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p " - " ip_addr=%u flags %x, spa_how %d\n", + "ip_addr=%s flags %x, spa_how %d\n", __func__, rqstp, exid, exid->clname.len, exid->clname.data, - ip_addr, exid->flags, exid->spa_how); + addr_str, exid->flags, exid->spa_how); if (!check_name(exid->clname) || (exid->flags & ~EXCHGID4_FLAG_MASK_A)) return nfserr_inval; @@ -1281,28 +1208,23 @@ nfsd4_exchange_id(struct svc_rqst *rqstp, out_new: /* Normal case */ - new = create_client(exid->clname, dname); + new = create_client(exid->clname, dname, rqstp, &verf); if (new == NULL) { - status = nfserr_resource; + status = nfserr_serverfault; goto out; } - copy_verf(new, &verf); - copy_cred(&new->cl_cred, &rqstp->rq_cred); - new->cl_addr = ip_addr; gen_clid(new); - gen_confirm(new); add_to_unconfirmed(new, strhashval); out_copy: exid->clientid.cl_boot = new->cl_clientid.cl_boot; exid->clientid.cl_id = new->cl_clientid.cl_id; - new->cl_slot.sl_seqid = 0; exid->seqid = 1; nfsd4_set_ex_flags(new, exid); dprintk("nfsd4_exchange_id seqid %d flags %x\n", - new->cl_slot.sl_seqid, new->cl_exchange_flags); + new->cl_cs_slot.sl_seqid, new->cl_exchange_flags); status = nfs_ok; out: @@ -1313,40 +1235,60 @@ error: } static int -check_slot_seqid(u32 seqid, struct nfsd4_slot *slot) +check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse) { - dprintk("%s enter. seqid %d slot->sl_seqid %d\n", __func__, seqid, - slot->sl_seqid); + dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid, + slot_seqid); /* The slot is in use, and no response has been sent. */ - if (slot->sl_inuse) { - if (seqid == slot->sl_seqid) + if (slot_inuse) { + if (seqid == slot_seqid) return nfserr_jukebox; else return nfserr_seq_misordered; } /* Normal */ - if (likely(seqid == slot->sl_seqid + 1)) + if (likely(seqid == slot_seqid + 1)) return nfs_ok; /* Replay */ - if (seqid == slot->sl_seqid) + if (seqid == slot_seqid) return nfserr_replay_cache; /* Wraparound */ - if (seqid == 1 && (slot->sl_seqid + 1) == 0) + if (seqid == 1 && (slot_seqid + 1) == 0) return nfs_ok; /* Misordered replay or misordered new request */ return nfserr_seq_misordered; } +/* + * Cache the create session result into the create session single DRC + * slot cache by saving the xdr structure. sl_seqid has been set. + * Do this for solo or embedded create session operations. + */ +static void +nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses, + struct nfsd4_clid_slot *slot, int nfserr) +{ + slot->sl_status = nfserr; + memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses)); +} + +static __be32 +nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses, + struct nfsd4_clid_slot *slot) +{ + memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses)); + return slot->sl_status; +} + __be32 nfsd4_create_session(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_create_session *cr_ses) { - u32 ip_addr = svc_addr_in(rqstp)->sin_addr.s_addr; - struct nfsd4_compoundres *resp = rqstp->rq_resp; + struct sockaddr *sa = svc_addr(rqstp); struct nfs4_client *conf, *unconf; - struct nfsd4_slot *slot = NULL; + struct nfsd4_clid_slot *cs_slot = NULL; int status = 0; nfs4_lock_state(); @@ -1354,40 +1296,38 @@ nfsd4_create_session(struct svc_rqst *rqstp, conf = find_confirmed_client(&cr_ses->clientid); if (conf) { - slot = &conf->cl_slot; - status = check_slot_seqid(cr_ses->seqid, slot); + cs_slot = &conf->cl_cs_slot; + status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0); if (status == nfserr_replay_cache) { dprintk("Got a create_session replay! seqid= %d\n", - slot->sl_seqid); - cstate->slot = slot; - cstate->status = status; + cs_slot->sl_seqid); /* Return the cached reply status */ - status = nfsd4_replay_cache_entry(resp, NULL); + status = nfsd4_replay_create_session(cr_ses, cs_slot); goto out; - } else if (cr_ses->seqid != conf->cl_slot.sl_seqid + 1) { + } else if (cr_ses->seqid != cs_slot->sl_seqid + 1) { status = nfserr_seq_misordered; dprintk("Sequence misordered!\n"); dprintk("Expected seqid= %d but got seqid= %d\n", - slot->sl_seqid, cr_ses->seqid); + cs_slot->sl_seqid, cr_ses->seqid); goto out; } - conf->cl_slot.sl_seqid++; + cs_slot->sl_seqid++; } else if (unconf) { if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) || - (ip_addr != unconf->cl_addr)) { + !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) { status = nfserr_clid_inuse; goto out; } - slot = &unconf->cl_slot; - status = check_slot_seqid(cr_ses->seqid, slot); + cs_slot = &unconf->cl_cs_slot; + status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0); if (status) { /* an unconfirmed replay returns misordered */ status = nfserr_seq_misordered; - goto out; + goto out_cache; } - slot->sl_seqid++; /* from 0 to 1 */ + cs_slot->sl_seqid++; /* from 0 to 1 */ move_to_confirmed(unconf); /* @@ -1396,6 +1336,19 @@ nfsd4_create_session(struct svc_rqst *rqstp, cr_ses->flags &= ~SESSION4_PERSIST; cr_ses->flags &= ~SESSION4_RDMA; + if (cr_ses->flags & SESSION4_BACK_CHAN) { + unconf->cl_cb_xprt = rqstp->rq_xprt; + svc_xprt_get(unconf->cl_cb_xprt); + rpc_copy_addr( + (struct sockaddr *)&unconf->cl_cb_conn.cb_addr, + sa); + unconf->cl_cb_conn.cb_addrlen = svc_addr_len(sa); + unconf->cl_cb_conn.cb_minorversion = + cstate->minorversion; + unconf->cl_cb_conn.cb_prog = cr_ses->callback_prog; + unconf->cl_cb_seq_nr = 1; + nfsd4_probe_callback(unconf); + } conf = unconf; } else { status = nfserr_stale_clientid; @@ -1408,12 +1361,11 @@ nfsd4_create_session(struct svc_rqst *rqstp, memcpy(cr_ses->sessionid.data, conf->cl_sessionid.data, NFS4_MAX_SESSIONID_LEN); - cr_ses->seqid = slot->sl_seqid; + cr_ses->seqid = cs_slot->sl_seqid; - slot->sl_inuse = true; - cstate->slot = slot; - /* Ensure a page is used for the cache */ - slot->sl_cache_entry.ce_cachethis = 1; +out_cache: + /* cache solo and embedded create sessions under the state lock */ + nfsd4_cache_create_session(cr_ses, cs_slot, status); out: nfs4_unlock_state(); dprintk("%s returns %d\n", __func__, ntohl(status)); @@ -1478,18 +1430,23 @@ nfsd4_sequence(struct svc_rqst *rqstp, if (seq->slotid >= session->se_fchannel.maxreqs) goto out; - slot = &session->se_slots[seq->slotid]; + slot = session->se_slots[seq->slotid]; dprintk("%s: slotid %d\n", __func__, seq->slotid); - status = check_slot_seqid(seq->seqid, slot); + /* We do not negotiate the number of slots yet, so set the + * maxslots to the session maxreqs which is used to encode + * sr_highest_slotid and the sr_target_slot id to maxslots */ + seq->maxslots = session->se_fchannel.maxreqs; + + status = check_slot_seqid(seq->seqid, slot->sl_seqid, slot->sl_inuse); if (status == nfserr_replay_cache) { cstate->slot = slot; cstate->session = session; /* Return the cached reply status and set cstate->status - * for nfsd4_svc_encode_compoundres processing */ + * for nfsd4_proc_compound processing */ status = nfsd4_replay_cache_entry(resp, seq); cstate->status = nfserr_replay_cache; - goto replay_cache; + goto out; } if (status) goto out; @@ -1497,23 +1454,23 @@ nfsd4_sequence(struct svc_rqst *rqstp, /* Success! bump slot seqid */ slot->sl_inuse = true; slot->sl_seqid = seq->seqid; - slot->sl_cache_entry.ce_cachethis = seq->cachethis; - /* Always set the cache entry cachethis for solo sequence */ - if (nfsd4_is_solo_sequence(resp)) - slot->sl_cache_entry.ce_cachethis = 1; + slot->sl_cachethis = seq->cachethis; cstate->slot = slot; cstate->session = session; -replay_cache: - /* Renew the clientid on success and on replay. - * Hold a session reference until done processing the compound: + /* Hold a session reference until done processing the compound: * nfsd4_put_session called only if the cstate slot is set. */ - renew_client(session->se_client); nfsd4_get_session(session); out: spin_unlock(&sessionid_lock); + /* Renew the clientid on success and on replay */ + if (cstate->session) { + nfs4_lock_state(); + renew_client(session->se_client); + nfs4_unlock_state(); + } dprintk("%s: return %d\n", __func__, ntohl(status)); return status; } @@ -1522,7 +1479,7 @@ __be32 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_setclientid *setclid) { - struct sockaddr_in *sin = svc_addr_in(rqstp); + struct sockaddr *sa = svc_addr(rqstp); struct xdr_netobj clname = { .len = setclid->se_namelen, .data = setclid->se_name, @@ -1531,7 +1488,6 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, unsigned int strhashval; struct nfs4_client *conf, *unconf, *new; __be32 status; - char *princ; char dname[HEXDIR_LEN]; if (!check_name(clname)) @@ -1554,8 +1510,11 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, /* RFC 3530 14.2.33 CASE 0: */ status = nfserr_clid_inuse; if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) { - dprintk("NFSD: setclientid: string in use by client" - " at %pI4\n", &conf->cl_addr); + char addr_str[INET6_ADDRSTRLEN]; + rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str, + sizeof(addr_str)); + dprintk("NFSD: setclientid: string in use by client " + "at %s\n", addr_str); goto out; } } @@ -1573,7 +1532,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, */ if (unconf) expire_client(unconf); - new = create_client(clname, dname); + new = create_client(clname, dname, rqstp, &clverifier); if (new == NULL) goto out; gen_clid(new); @@ -1590,7 +1549,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, */ expire_client(unconf); } - new = create_client(clname, dname); + new = create_client(clname, dname, rqstp, &clverifier); if (new == NULL) goto out; copy_clid(new, conf); @@ -1600,7 +1559,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, * probable client reboot; state will be removed if * confirmed. */ - new = create_client(clname, dname); + new = create_client(clname, dname, rqstp, &clverifier); if (new == NULL) goto out; gen_clid(new); @@ -1611,25 +1570,12 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, * confirmed. */ expire_client(unconf); - new = create_client(clname, dname); + new = create_client(clname, dname, rqstp, &clverifier); if (new == NULL) goto out; gen_clid(new); } - copy_verf(new, &clverifier); - new->cl_addr = sin->sin_addr.s_addr; - new->cl_flavor = rqstp->rq_flavor; - princ = svc_gss_principal(rqstp); - if (princ) { - new->cl_principal = kstrdup(princ, GFP_KERNEL); - if (new->cl_principal == NULL) { - free_client(new); - goto out; - } - } - copy_cred(&new->cl_cred, &rqstp->rq_cred); - gen_confirm(new); - gen_callback(new, setclid); + gen_callback(new, setclid, rpc_get_scope_id(sa)); add_to_unconfirmed(new, strhashval); setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot; setclid->se_clientid.cl_id = new->cl_clientid.cl_id; @@ -1651,7 +1597,7 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_setclientid_confirm *setclientid_confirm) { - struct sockaddr_in *sin = svc_addr_in(rqstp); + struct sockaddr *sa = svc_addr(rqstp); struct nfs4_client *conf, *unconf; nfs4_verifier confirm = setclientid_confirm->sc_confirm; clientid_t * clid = &setclientid_confirm->sc_clientid; @@ -1670,9 +1616,9 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp, unconf = find_unconfirmed_client(clid); status = nfserr_clid_inuse; - if (conf && conf->cl_addr != sin->sin_addr.s_addr) + if (conf && !rpc_cmp_addr((struct sockaddr *) &conf->cl_addr, sa)) goto out; - if (unconf && unconf->cl_addr != sin->sin_addr.s_addr) + if (unconf && !rpc_cmp_addr((struct sockaddr *) &unconf->cl_addr, sa)) goto out; /* @@ -2163,7 +2109,7 @@ int nfsd_change_deleg_cb(struct file_lock **onlist, int arg) return -EAGAIN; } -static struct lock_manager_operations nfsd_lease_mng_ops = { +static const struct lock_manager_operations nfsd_lease_mng_ops = { .fl_break = nfsd_break_deleg_cb, .fl_release_private = nfsd_release_deleg_cb, .fl_copy_lock = nfsd_copy_lock_deleg_cb, @@ -3368,7 +3314,7 @@ nfs4_transform_lock_offset(struct file_lock *lock) /* Hack!: For now, we're defining this just so we can use a pointer to it * as a unique cookie to identify our (NFSv4's) posix locks. */ -static struct lock_manager_operations nfsd_posix_mng_ops = { +static const struct lock_manager_operations nfsd_posix_mng_ops = { }; static inline void @@ -4072,7 +4018,7 @@ set_max_delegations(void) /* initialization to perform when the nfsd service is started: */ -static void +static int __nfs4_state_start(void) { unsigned long grace_time; @@ -4084,19 +4030,26 @@ __nfs4_state_start(void) printk(KERN_INFO "NFSD: starting %ld-second grace period\n", grace_time/HZ); laundry_wq = create_singlethread_workqueue("nfsd4"); + if (laundry_wq == NULL) + return -ENOMEM; queue_delayed_work(laundry_wq, &laundromat_work, grace_time); set_max_delegations(); + return set_callback_cred(); } -void +int nfs4_state_start(void) { + int ret; + if (nfs4_init) - return; + return 0; nfsd4_load_reboot_recovery_data(); - __nfs4_state_start(); + ret = __nfs4_state_start(); + if (ret) + return ret; nfs4_init = 1; - return; + return 0; } time_t diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 2dcc7fe..0fbd50c 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -1599,7 +1599,8 @@ static __be32 nfsd4_encode_fs_location4(struct nfsd4_fs_location *location, static char *nfsd4_path(struct svc_rqst *rqstp, struct svc_export *exp, __be32 *stat) { struct svc_fh tmp_fh; - char *path, *rootpath; + char *path = NULL, *rootpath; + size_t rootlen; fh_init(&tmp_fh, NFS4_FHSIZE); *stat = exp_pseudoroot(rqstp, &tmp_fh); @@ -1609,14 +1610,18 @@ static char *nfsd4_path(struct svc_rqst *rqstp, struct svc_export *exp, __be32 * path = exp->ex_pathname; - if (strncmp(path, rootpath, strlen(rootpath))) { + rootlen = strlen(rootpath); + if (strncmp(path, rootpath, rootlen)) { dprintk("nfsd: fs_locations failed;" "%s is not contained in %s\n", path, rootpath); *stat = nfserr_notsupp; - return NULL; + path = NULL; + goto out; } - - return path + strlen(rootpath); + path += rootlen; +out: + fh_put(&tmp_fh); + return path; } /* @@ -1793,11 +1798,6 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp, goto out_nfserr; } } - if (bmval0 & FATTR4_WORD0_FS_LOCATIONS) { - if (exp->ex_fslocs.locations == NULL) { - bmval0 &= ~FATTR4_WORD0_FS_LOCATIONS; - } - } if ((buflen -= 16) < 0) goto out_resource; @@ -1825,8 +1825,6 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp, goto out_resource; if (!aclsupport) word0 &= ~FATTR4_WORD0_ACL; - if (!exp->ex_fslocs.locations) - word0 &= ~FATTR4_WORD0_FS_LOCATIONS; if (!word2) { WRITE32(2); WRITE32(word0); @@ -3064,6 +3062,7 @@ nfsd4_encode_sequence(struct nfsd4_compoundres *resp, int nfserr, WRITE32(0); ADJUST_ARGS(); + resp->cstate.datap = p; /* DRC cache data pointer */ return 0; } @@ -3166,7 +3165,7 @@ static int nfsd4_check_drc_limit(struct nfsd4_compoundres *resp) return status; session = resp->cstate.session; - if (session == NULL || slot->sl_cache_entry.ce_cachethis == 0) + if (session == NULL || slot->sl_cachethis == 0) return status; if (resp->opcnt >= args->opcnt) @@ -3291,6 +3290,7 @@ nfs4svc_encode_compoundres(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_compo /* * All that remains is to write the tag and operation count... */ + struct nfsd4_compound_state *cs = &resp->cstate; struct kvec *iov; p = resp->tagp; *p++ = htonl(resp->taglen); @@ -3304,17 +3304,11 @@ nfs4svc_encode_compoundres(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_compo iov = &rqstp->rq_res.head[0]; iov->iov_len = ((char*)resp->p) - (char*)iov->iov_base; BUG_ON(iov->iov_len > PAGE_SIZE); - if (nfsd4_has_session(&resp->cstate)) { - if (resp->cstate.status == nfserr_replay_cache && - !nfsd4_not_cached(resp)) { - iov->iov_len = resp->cstate.iovlen; - } else { - nfsd4_store_cache_entry(resp); - dprintk("%s: SET SLOT STATE TO AVAILABLE\n", __func__); - resp->cstate.slot->sl_inuse = 0; - } - if (resp->cstate.session) - nfsd4_put_session(resp->cstate.session); + if (nfsd4_has_session(cs) && cs->status != nfserr_replay_cache) { + nfsd4_store_cache_entry(resp); + dprintk("%s: SET SLOT STATE TO AVAILABLE\n", __func__); + resp->cstate.slot->sl_inuse = false; + nfsd4_put_session(resp->cstate.session); } return 1; } diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index 7e906c5..00388d2 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -174,12 +174,13 @@ static const struct file_operations exports_operations = { }; extern int nfsd_pool_stats_open(struct inode *inode, struct file *file); +extern int nfsd_pool_stats_release(struct inode *inode, struct file *file); static struct file_operations pool_stats_operations = { .open = nfsd_pool_stats_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = nfsd_pool_stats_release, .owner = THIS_MODULE, }; @@ -776,10 +777,7 @@ static ssize_t write_pool_threads(struct file *file, char *buf, size_t size) size -= len; mesg += len; } - - mutex_unlock(&nfsd_mutex); - return (mesg-buf); - + rv = mesg - buf; out_free: kfree(nthreads); mutex_unlock(&nfsd_mutex); diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c index 8847f3f..01965b2 100644 --- a/fs/nfsd/nfsfh.c +++ b/fs/nfsd/nfsfh.c @@ -397,44 +397,51 @@ static inline void _fh_update_old(struct dentry *dentry, fh->ofh_dirino = 0; } -__be32 -fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry, - struct svc_fh *ref_fh) +static bool is_root_export(struct svc_export *exp) { - /* ref_fh is a reference file handle. - * if it is non-null and for the same filesystem, then we should compose - * a filehandle which is of the same version, where possible. - * Currently, that means that if ref_fh->fh_handle.fh_version == 0xca - * Then create a 32byte filehandle using nfs_fhbase_old - * - */ + return exp->ex_path.dentry == exp->ex_path.dentry->d_sb->s_root; +} - u8 version; - u8 fsid_type = 0; - struct inode * inode = dentry->d_inode; - struct dentry *parent = dentry->d_parent; - __u32 *datap; - dev_t ex_dev = exp->ex_path.dentry->d_inode->i_sb->s_dev; - int root_export = (exp->ex_path.dentry == exp->ex_path.dentry->d_sb->s_root); +static struct super_block *exp_sb(struct svc_export *exp) +{ + return exp->ex_path.dentry->d_inode->i_sb; +} - dprintk("nfsd: fh_compose(exp %02x:%02x/%ld %s/%s, ino=%ld)\n", - MAJOR(ex_dev), MINOR(ex_dev), - (long) exp->ex_path.dentry->d_inode->i_ino, - parent->d_name.name, dentry->d_name.name, - (inode ? inode->i_ino : 0)); +static bool fsid_type_ok_for_exp(u8 fsid_type, struct svc_export *exp) +{ + switch (fsid_type) { + case FSID_DEV: + if (!old_valid_dev(exp_sb(exp)->s_dev)) + return 0; + /* FALL THROUGH */ + case FSID_MAJOR_MINOR: + case FSID_ENCODE_DEV: + return exp_sb(exp)->s_type->fs_flags & FS_REQUIRES_DEV; + case FSID_NUM: + return exp->ex_flags & NFSEXP_FSID; + case FSID_UUID8: + case FSID_UUID16: + if (!is_root_export(exp)) + return 0; + /* fall through */ + case FSID_UUID4_INUM: + case FSID_UUID16_INUM: + return exp->ex_uuid != NULL; + } + return 1; +} - /* Choose filehandle version and fsid type based on - * the reference filehandle (if it is in the same export) - * or the export options. - */ - retry: + +static void set_version_and_fsid_type(struct svc_fh *fhp, struct svc_export *exp, struct svc_fh *ref_fh) +{ + u8 version; + u8 fsid_type; +retry: version = 1; if (ref_fh && ref_fh->fh_export == exp) { version = ref_fh->fh_handle.fh_version; fsid_type = ref_fh->fh_handle.fh_fsid_type; - if (ref_fh == fhp) - fh_put(ref_fh); ref_fh = NULL; switch (version) { @@ -447,58 +454,66 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry, goto retry; } - /* Need to check that this type works for this - * export point. As the fsid -> filesystem mapping - * was guided by user-space, there is no guarantee - * that the filesystem actually supports that fsid - * type. If it doesn't we loop around again without - * ref_fh set. + /* + * As the fsid -> filesystem mapping was guided by + * user-space, there is no guarantee that the filesystem + * actually supports that fsid type. If it doesn't we + * loop around again without ref_fh set. */ - switch(fsid_type) { - case FSID_DEV: - if (!old_valid_dev(ex_dev)) - goto retry; - /* FALL THROUGH */ - case FSID_MAJOR_MINOR: - case FSID_ENCODE_DEV: - if (!(exp->ex_path.dentry->d_inode->i_sb->s_type->fs_flags - & FS_REQUIRES_DEV)) - goto retry; - break; - case FSID_NUM: - if (! (exp->ex_flags & NFSEXP_FSID)) - goto retry; - break; - case FSID_UUID8: - case FSID_UUID16: - if (!root_export) - goto retry; - /* fall through */ - case FSID_UUID4_INUM: - case FSID_UUID16_INUM: - if (exp->ex_uuid == NULL) - goto retry; - break; - } + if (!fsid_type_ok_for_exp(fsid_type, exp)) + goto retry; } else if (exp->ex_flags & NFSEXP_FSID) { fsid_type = FSID_NUM; } else if (exp->ex_uuid) { if (fhp->fh_maxsize >= 64) { - if (root_export) + if (is_root_export(exp)) fsid_type = FSID_UUID16; else fsid_type = FSID_UUID16_INUM; } else { - if (root_export) + if (is_root_export(exp)) fsid_type = FSID_UUID8; else fsid_type = FSID_UUID4_INUM; } - } else if (!old_valid_dev(ex_dev)) + } else if (!old_valid_dev(exp_sb(exp)->s_dev)) /* for newer device numbers, we must use a newer fsid format */ fsid_type = FSID_ENCODE_DEV; else fsid_type = FSID_DEV; + fhp->fh_handle.fh_version = version; + if (version) + fhp->fh_handle.fh_fsid_type = fsid_type; +} + +__be32 +fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry, + struct svc_fh *ref_fh) +{ + /* ref_fh is a reference file handle. + * if it is non-null and for the same filesystem, then we should compose + * a filehandle which is of the same version, where possible. + * Currently, that means that if ref_fh->fh_handle.fh_version == 0xca + * Then create a 32byte filehandle using nfs_fhbase_old + * + */ + + struct inode * inode = dentry->d_inode; + struct dentry *parent = dentry->d_parent; + __u32 *datap; + dev_t ex_dev = exp_sb(exp)->s_dev; + + dprintk("nfsd: fh_compose(exp %02x:%02x/%ld %s/%s, ino=%ld)\n", + MAJOR(ex_dev), MINOR(ex_dev), + (long) exp->ex_path.dentry->d_inode->i_ino, + parent->d_name.name, dentry->d_name.name, + (inode ? inode->i_ino : 0)); + + /* Choose filehandle version and fsid type based on + * the reference filehandle (if it is in the same export) + * or the export options. + */ + set_version_and_fsid_type(fhp, exp, ref_fh); if (ref_fh == fhp) fh_put(ref_fh); @@ -516,7 +531,7 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry, fhp->fh_export = exp; cache_get(&exp->h); - if (version == 0xca) { + if (fhp->fh_handle.fh_version == 0xca) { /* old style filehandle please */ memset(&fhp->fh_handle.fh_base, 0, NFS_FHSIZE); fhp->fh_handle.fh_size = NFS_FHSIZE; @@ -530,22 +545,22 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry, _fh_update_old(dentry, exp, &fhp->fh_handle); } else { int len; - fhp->fh_handle.fh_version = 1; fhp->fh_handle.fh_auth_type = 0; datap = fhp->fh_handle.fh_auth+0; - fhp->fh_handle.fh_fsid_type = fsid_type; - mk_fsid(fsid_type, datap, ex_dev, + mk_fsid(fhp->fh_handle.fh_fsid_type, datap, ex_dev, exp->ex_path.dentry->d_inode->i_ino, exp->ex_fsid, exp->ex_uuid); - len = key_len(fsid_type); + len = key_len(fhp->fh_handle.fh_fsid_type); datap += len/4; fhp->fh_handle.fh_size = 4 + len; if (inode) _fh_update(fhp, exp, dentry); - if (fhp->fh_handle.fh_fileid_type == 255) + if (fhp->fh_handle.fh_fileid_type == 255) { + fh_put(fhp); return nfserr_opnotsupp; + } } return 0; @@ -639,8 +654,7 @@ enum fsid_source fsid_source(struct svc_fh *fhp) case FSID_DEV: case FSID_ENCODE_DEV: case FSID_MAJOR_MINOR: - if (fhp->fh_export->ex_path.dentry->d_inode->i_sb->s_type->fs_flags - & FS_REQUIRES_DEV) + if (exp_sb(fhp->fh_export)->s_type->fs_flags & FS_REQUIRES_DEV) return FSIDSOURCE_DEV; break; case FSID_NUM: diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index 24d58ad..67ea83e 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -34,6 +34,7 @@ #include <linux/nfsd/syscall.h> #include <linux/lockd/bind.h> #include <linux/nfsacl.h> +#include <linux/seq_file.h> #define NFSDDBG_FACILITY NFSDDBG_SVC @@ -66,6 +67,16 @@ struct timeval nfssvc_boot; DEFINE_MUTEX(nfsd_mutex); struct svc_serv *nfsd_serv; +/* + * nfsd_drc_lock protects nfsd_drc_max_pages and nfsd_drc_pages_used. + * nfsd_drc_max_pages limits the total amount of memory available for + * version 4.1 DRC caches. + * nfsd_drc_pages_used tracks the current version 4.1 DRC memory usage. + */ +spinlock_t nfsd_drc_lock; +unsigned int nfsd_drc_max_mem; +unsigned int nfsd_drc_mem_used; + #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) static struct svc_stat nfsd_acl_svcstats; static struct svc_version * nfsd_acl_version[] = { @@ -235,13 +246,12 @@ void nfsd_reset_versions(void) */ static void set_max_drc(void) { - /* The percent of nr_free_buffer_pages used by the V4.1 server DRC */ - #define NFSD_DRC_SIZE_SHIFT 7 - nfsd_serv->sv_drc_max_pages = nr_free_buffer_pages() - >> NFSD_DRC_SIZE_SHIFT; - nfsd_serv->sv_drc_pages_used = 0; - dprintk("%s svc_drc_max_pages %u\n", __func__, - nfsd_serv->sv_drc_max_pages); + #define NFSD_DRC_SIZE_SHIFT 10 + nfsd_drc_max_mem = (nr_free_buffer_pages() + >> NFSD_DRC_SIZE_SHIFT) * PAGE_SIZE; + nfsd_drc_mem_used = 0; + spin_lock_init(&nfsd_drc_lock); + dprintk("%s nfsd_drc_max_mem %u \n", __func__, nfsd_drc_max_mem); } int nfsd_create_serv(void) @@ -401,7 +411,9 @@ nfsd_svc(unsigned short port, int nrservs) error = nfsd_racache_init(2*nrservs); if (error<0) goto out; - nfs4_state_start(); + error = nfs4_state_start(); + if (error) + goto out; nfsd_reset_versions(); @@ -569,10 +581,6 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp) + rqstp->rq_res.head[0].iov_len; rqstp->rq_res.head[0].iov_len += sizeof(__be32); - /* NFSv4.1 DRC requires statp */ - if (rqstp->rq_vers == 4) - nfsd4_set_statp(rqstp, statp); - /* Now call the procedure handler, and encode NFS status. */ nfserr = proc->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp); nfserr = map_new_errors(rqstp->rq_vers, nfserr); @@ -607,7 +615,25 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp) int nfsd_pool_stats_open(struct inode *inode, struct file *file) { - if (nfsd_serv == NULL) + int ret; + mutex_lock(&nfsd_mutex); + if (nfsd_serv == NULL) { + mutex_unlock(&nfsd_mutex); return -ENODEV; - return svc_pool_stats_open(nfsd_serv, file); + } + /* bump up the psudo refcount while traversing */ + svc_get(nfsd_serv); + ret = svc_pool_stats_open(nfsd_serv, file); + mutex_unlock(&nfsd_mutex); + return ret; +} + +int nfsd_pool_stats_release(struct inode *inode, struct file *file) +{ + int ret = seq_release(inode, file); + mutex_lock(&nfsd_mutex); + /* this function really, really should have been called svc_put() */ + svc_destroy(nfsd_serv); + mutex_unlock(&nfsd_mutex); + return ret; } diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 8fa09bf..a293f02 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -89,6 +89,12 @@ struct raparm_hbucket { #define RAPARM_HASH_MASK (RAPARM_HASH_SIZE-1) static struct raparm_hbucket raparm_hash[RAPARM_HASH_SIZE]; +static inline int +nfsd_v4client(struct svc_rqst *rq) +{ + return rq->rq_prog == NFS_PROGRAM && rq->rq_vers == 4; +} + /* * Called from nfsd_lookup and encode_dirent. Check if we have crossed * a mount point. @@ -115,7 +121,8 @@ nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp, path_put(&path); goto out; } - if ((exp->ex_flags & NFSEXP_CROSSMOUNT) || EX_NOHIDE(exp2)) { + if (nfsd_v4client(rqstp) || + (exp->ex_flags & NFSEXP_CROSSMOUNT) || EX_NOHIDE(exp2)) { /* successfully crossed mount point */ /* * This is subtle: path.dentry is *not* on path.mnt diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c index c668bca..6a2711f 100644 --- a/fs/nilfs2/btnode.c +++ b/fs/nilfs2/btnode.c @@ -46,7 +46,7 @@ void nilfs_btnode_cache_init_once(struct address_space *btnc) INIT_LIST_HEAD(&btnc->i_mmap_nonlinear); } -static struct address_space_operations def_btnode_aops = { +static const struct address_space_operations def_btnode_aops = { .sync_page = block_sync_page, }; diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c index 6bd84a0..fc8278c 100644 --- a/fs/nilfs2/file.c +++ b/fs/nilfs2/file.c @@ -151,7 +151,7 @@ struct file_operations nilfs_file_operations = { .splice_read = generic_file_splice_read, }; -struct inode_operations nilfs_file_inode_operations = { +const struct inode_operations nilfs_file_inode_operations = { .truncate = nilfs_truncate, .setattr = nilfs_setattr, .permission = nilfs_permission, diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c index 1b3c2bb..e6de0a2 100644 --- a/fs/nilfs2/gcinode.c +++ b/fs/nilfs2/gcinode.c @@ -52,7 +52,7 @@ #include "dat.h" #include "ifile.h" -static struct address_space_operations def_gcinode_aops = { +static const struct address_space_operations def_gcinode_aops = { .sync_page = block_sync_page, }; diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 807e584..2d2c501 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -238,7 +238,7 @@ nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, return size; } -struct address_space_operations nilfs_aops = { +const struct address_space_operations nilfs_aops = { .writepage = nilfs_writepage, .readpage = nilfs_readpage, .sync_page = block_sync_page, diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index 156bf60..b18c499 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c @@ -427,12 +427,12 @@ nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc) } -static struct address_space_operations def_mdt_aops = { +static const struct address_space_operations def_mdt_aops = { .writepage = nilfs_mdt_write_page, .sync_page = block_sync_page, }; -static struct inode_operations def_mdt_iops; +static const struct inode_operations def_mdt_iops; static struct file_operations def_mdt_fops; /* diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c index df70dad..ed02e88 100644 --- a/fs/nilfs2/namei.c +++ b/fs/nilfs2/namei.c @@ -448,7 +448,7 @@ out: return err; } -struct inode_operations nilfs_dir_inode_operations = { +const struct inode_operations nilfs_dir_inode_operations = { .create = nilfs_create, .lookup = nilfs_lookup, .link = nilfs_link, @@ -462,12 +462,12 @@ struct inode_operations nilfs_dir_inode_operations = { .permission = nilfs_permission, }; -struct inode_operations nilfs_special_inode_operations = { +const struct inode_operations nilfs_special_inode_operations = { .setattr = nilfs_setattr, .permission = nilfs_permission, }; -struct inode_operations nilfs_symlink_inode_operations = { +const struct inode_operations nilfs_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = page_follow_link_light, .put_link = page_put_link, diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h index 724c637..bad7368 100644 --- a/fs/nilfs2/nilfs.h +++ b/fs/nilfs2/nilfs.h @@ -295,12 +295,12 @@ void nilfs_clear_gcdat_inode(struct the_nilfs *); * Inodes and files operations */ extern struct file_operations nilfs_dir_operations; -extern struct inode_operations nilfs_file_inode_operations; +extern const struct inode_operations nilfs_file_inode_operations; extern struct file_operations nilfs_file_operations; -extern struct address_space_operations nilfs_aops; -extern struct inode_operations nilfs_dir_inode_operations; -extern struct inode_operations nilfs_special_inode_operations; -extern struct inode_operations nilfs_symlink_inode_operations; +extern const struct address_space_operations nilfs_aops; +extern const struct inode_operations nilfs_dir_inode_operations; +extern const struct inode_operations nilfs_special_inode_operations; +extern const struct inode_operations nilfs_symlink_inode_operations; /* * filesystem type diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 55f3d6b..644e667 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -504,7 +504,7 @@ static int nilfs_show_options(struct seq_file *seq, struct vfsmount *vfs) return 0; } -static struct super_operations nilfs_sops = { +static const struct super_operations nilfs_sops = { .alloc_inode = nilfs_alloc_inode, .destroy_inode = nilfs_destroy_inode, .dirty_inode = nilfs_dirty_inode, @@ -560,7 +560,7 @@ nilfs_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, nilfs_nfs_get_inode); } -static struct export_operations nilfs_export_ops = { +static const struct export_operations nilfs_export_ops = { .fh_to_dentry = nilfs_fh_to_dentry, .fh_to_parent = nilfs_fh_to_parent, .get_parent = nilfs_get_parent, diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index d4168e2..ad391a8 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c @@ -591,9 +591,7 @@ int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data) nilfs->ns_mount_state = le16_to_cpu(sbp->s_state); - bdi = nilfs->ns_bdev->bd_inode_backing_dev_info; - if (!bdi) - bdi = nilfs->ns_bdev->bd_inode->i_mapping->backing_dev_info; + bdi = nilfs->ns_bdev->bd_inode->i_mapping->backing_dev_info; nilfs->ns_bdi = bdi ? : &default_backing_dev_info; /* Finding last segment */ diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index 4350d49..663c0e3 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c @@ -2146,46 +2146,6 @@ static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov, } /** - * ntfs_file_writev - - * - * Basically the same as generic_file_writev() except that it ends up calling - * ntfs_file_aio_write_nolock() instead of __generic_file_aio_write_nolock(). - */ -static ssize_t ntfs_file_writev(struct file *file, const struct iovec *iov, - unsigned long nr_segs, loff_t *ppos) -{ - struct address_space *mapping = file->f_mapping; - struct inode *inode = mapping->host; - struct kiocb kiocb; - ssize_t ret; - - mutex_lock(&inode->i_mutex); - init_sync_kiocb(&kiocb, file); - ret = ntfs_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos); - if (ret == -EIOCBQUEUED) - ret = wait_on_sync_kiocb(&kiocb); - mutex_unlock(&inode->i_mutex); - if (ret > 0) { - int err = generic_write_sync(file, *ppos - ret, ret); - if (err < 0) - ret = err; - } - return ret; -} - -/** - * ntfs_file_write - simple wrapper for ntfs_file_writev() - */ -static ssize_t ntfs_file_write(struct file *file, const char __user *buf, - size_t count, loff_t *ppos) -{ - struct iovec local_iov = { .iov_base = (void __user *)buf, - .iov_len = count }; - - return ntfs_file_writev(file, &local_iov, 1, ppos); -} - -/** * ntfs_file_fsync - sync a file to disk * @filp: file to be synced * @dentry: dentry describing the file to sync @@ -2247,7 +2207,7 @@ const struct file_operations ntfs_file_ops = { .read = do_sync_read, /* Read from file. */ .aio_read = generic_file_aio_read, /* Async read from file. */ #ifdef NTFS_RW - .write = ntfs_file_write, /* Write to file. */ + .write = do_sync_write, /* Write to file. */ .aio_write = ntfs_file_aio_write, /* Async write to file. */ /*.release = ,*/ /* Last file is closed. See fs/ext2/file.c:: diff --git a/fs/ntfs/layout.h b/fs/ntfs/layout.h index 50931b1..8b2549f 100644 --- a/fs/ntfs/layout.h +++ b/fs/ntfs/layout.h @@ -829,7 +829,7 @@ enum { /* Note, FILE_ATTR_VALID_SET_FLAGS masks out the old DOS VolId, the F_A_DEVICE, F_A_DIRECTORY, F_A_SPARSE_FILE, F_A_REPARSE_POINT, F_A_COMPRESSED, and F_A_ENCRYPTED and preserves the rest. This mask - is used to to obtain all flags that are valid for setting. */ + is used to obtain all flags that are valid for setting. */ /* * The flag FILE_ATTR_DUP_FILENAME_INDEX_PRESENT is present in all * FILENAME_ATTR attributes but not in the STANDARD_INFORMATION diff --git a/fs/ntfs/malloc.h b/fs/ntfs/malloc.h index cd0be3f..a44b14c 100644 --- a/fs/ntfs/malloc.h +++ b/fs/ntfs/malloc.h @@ -47,7 +47,7 @@ static inline void *__ntfs_malloc(unsigned long size, gfp_t gfp_mask) return kmalloc(PAGE_SIZE, gfp_mask & ~__GFP_HIGHMEM); /* return (void *)__get_free_page(gfp_mask); */ } - if (likely(size >> PAGE_SHIFT < num_physpages)) + if (likely((size >> PAGE_SHIFT) < totalram_pages)) return __vmalloc(size, gfp_mask, PAGE_KERNEL); return NULL; } diff --git a/fs/ocfs2/Makefile b/fs/ocfs2/Makefile index 0159607..31f25ce 100644 --- a/fs/ocfs2/Makefile +++ b/fs/ocfs2/Makefile @@ -28,6 +28,7 @@ ocfs2-objs := \ locks.o \ mmap.o \ namei.o \ + refcounttree.o \ resize.o \ slot_map.o \ suballoc.o \ diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index ab513dd..38a42f5 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -49,10 +49,21 @@ #include "super.h" #include "uptodate.h" #include "xattr.h" +#include "refcounttree.h" #include "buffer_head_io.h" +enum ocfs2_contig_type { + CONTIG_NONE = 0, + CONTIG_LEFT, + CONTIG_RIGHT, + CONTIG_LEFTRIGHT, +}; +static enum ocfs2_contig_type + ocfs2_extent_rec_contig(struct super_block *sb, + struct ocfs2_extent_rec *ext, + struct ocfs2_extent_rec *insert_rec); /* * Operations for a specific extent tree type. * @@ -79,18 +90,30 @@ struct ocfs2_extent_tree_operations { * that value. new_clusters is the delta, and must be * added to the total. Required. */ - void (*eo_update_clusters)(struct inode *inode, - struct ocfs2_extent_tree *et, + void (*eo_update_clusters)(struct ocfs2_extent_tree *et, u32 new_clusters); /* + * If this extent tree is supported by an extent map, insert + * a record into the map. + */ + void (*eo_extent_map_insert)(struct ocfs2_extent_tree *et, + struct ocfs2_extent_rec *rec); + + /* + * If this extent tree is supported by an extent map, truncate the + * map to clusters, + */ + void (*eo_extent_map_truncate)(struct ocfs2_extent_tree *et, + u32 clusters); + + /* * If ->eo_insert_check() exists, it is called before rec is * inserted into the extent tree. It is optional. */ - int (*eo_insert_check)(struct inode *inode, - struct ocfs2_extent_tree *et, + int (*eo_insert_check)(struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *rec); - int (*eo_sanity_check)(struct inode *inode, struct ocfs2_extent_tree *et); + int (*eo_sanity_check)(struct ocfs2_extent_tree *et); /* * -------------------------------------------------------------- @@ -109,8 +132,17 @@ struct ocfs2_extent_tree_operations { * it exists. If it does not, et->et_max_leaf_clusters is set * to 0 (unlimited). Optional. */ - void (*eo_fill_max_leaf_clusters)(struct inode *inode, - struct ocfs2_extent_tree *et); + void (*eo_fill_max_leaf_clusters)(struct ocfs2_extent_tree *et); + + /* + * ->eo_extent_contig test whether the 2 ocfs2_extent_rec + * are contiguous or not. Optional. Don't need to set it if use + * ocfs2_extent_rec as the tree leaf. + */ + enum ocfs2_contig_type + (*eo_extent_contig)(struct ocfs2_extent_tree *et, + struct ocfs2_extent_rec *ext, + struct ocfs2_extent_rec *insert_rec); }; @@ -121,19 +153,22 @@ struct ocfs2_extent_tree_operations { static u64 ocfs2_dinode_get_last_eb_blk(struct ocfs2_extent_tree *et); static void ocfs2_dinode_set_last_eb_blk(struct ocfs2_extent_tree *et, u64 blkno); -static void ocfs2_dinode_update_clusters(struct inode *inode, - struct ocfs2_extent_tree *et, +static void ocfs2_dinode_update_clusters(struct ocfs2_extent_tree *et, u32 clusters); -static int ocfs2_dinode_insert_check(struct inode *inode, - struct ocfs2_extent_tree *et, +static void ocfs2_dinode_extent_map_insert(struct ocfs2_extent_tree *et, + struct ocfs2_extent_rec *rec); +static void ocfs2_dinode_extent_map_truncate(struct ocfs2_extent_tree *et, + u32 clusters); +static int ocfs2_dinode_insert_check(struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *rec); -static int ocfs2_dinode_sanity_check(struct inode *inode, - struct ocfs2_extent_tree *et); +static int ocfs2_dinode_sanity_check(struct ocfs2_extent_tree *et); static void ocfs2_dinode_fill_root_el(struct ocfs2_extent_tree *et); static struct ocfs2_extent_tree_operations ocfs2_dinode_et_ops = { .eo_set_last_eb_blk = ocfs2_dinode_set_last_eb_blk, .eo_get_last_eb_blk = ocfs2_dinode_get_last_eb_blk, .eo_update_clusters = ocfs2_dinode_update_clusters, + .eo_extent_map_insert = ocfs2_dinode_extent_map_insert, + .eo_extent_map_truncate = ocfs2_dinode_extent_map_truncate, .eo_insert_check = ocfs2_dinode_insert_check, .eo_sanity_check = ocfs2_dinode_sanity_check, .eo_fill_root_el = ocfs2_dinode_fill_root_el, @@ -156,40 +191,53 @@ static u64 ocfs2_dinode_get_last_eb_blk(struct ocfs2_extent_tree *et) return le64_to_cpu(di->i_last_eb_blk); } -static void ocfs2_dinode_update_clusters(struct inode *inode, - struct ocfs2_extent_tree *et, +static void ocfs2_dinode_update_clusters(struct ocfs2_extent_tree *et, u32 clusters) { + struct ocfs2_inode_info *oi = cache_info_to_inode(et->et_ci); struct ocfs2_dinode *di = et->et_object; le32_add_cpu(&di->i_clusters, clusters); - spin_lock(&OCFS2_I(inode)->ip_lock); - OCFS2_I(inode)->ip_clusters = le32_to_cpu(di->i_clusters); - spin_unlock(&OCFS2_I(inode)->ip_lock); + spin_lock(&oi->ip_lock); + oi->ip_clusters = le32_to_cpu(di->i_clusters); + spin_unlock(&oi->ip_lock); } -static int ocfs2_dinode_insert_check(struct inode *inode, - struct ocfs2_extent_tree *et, +static void ocfs2_dinode_extent_map_insert(struct ocfs2_extent_tree *et, + struct ocfs2_extent_rec *rec) +{ + struct inode *inode = &cache_info_to_inode(et->et_ci)->vfs_inode; + + ocfs2_extent_map_insert_rec(inode, rec); +} + +static void ocfs2_dinode_extent_map_truncate(struct ocfs2_extent_tree *et, + u32 clusters) +{ + struct inode *inode = &cache_info_to_inode(et->et_ci)->vfs_inode; + + ocfs2_extent_map_trunc(inode, clusters); +} + +static int ocfs2_dinode_insert_check(struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *rec) { - struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct ocfs2_inode_info *oi = cache_info_to_inode(et->et_ci); + struct ocfs2_super *osb = OCFS2_SB(oi->vfs_inode.i_sb); - BUG_ON(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL); + BUG_ON(oi->ip_dyn_features & OCFS2_INLINE_DATA_FL); mlog_bug_on_msg(!ocfs2_sparse_alloc(osb) && - (OCFS2_I(inode)->ip_clusters != - le32_to_cpu(rec->e_cpos)), + (oi->ip_clusters != le32_to_cpu(rec->e_cpos)), "Device %s, asking for sparse allocation: inode %llu, " "cpos %u, clusters %u\n", osb->dev_str, - (unsigned long long)OCFS2_I(inode)->ip_blkno, - rec->e_cpos, - OCFS2_I(inode)->ip_clusters); + (unsigned long long)oi->ip_blkno, + rec->e_cpos, oi->ip_clusters); return 0; } -static int ocfs2_dinode_sanity_check(struct inode *inode, - struct ocfs2_extent_tree *et) +static int ocfs2_dinode_sanity_check(struct ocfs2_extent_tree *et) { struct ocfs2_dinode *di = et->et_object; @@ -229,8 +277,7 @@ static u64 ocfs2_xattr_value_get_last_eb_blk(struct ocfs2_extent_tree *et) return le64_to_cpu(vb->vb_xv->xr_last_eb_blk); } -static void ocfs2_xattr_value_update_clusters(struct inode *inode, - struct ocfs2_extent_tree *et, +static void ocfs2_xattr_value_update_clusters(struct ocfs2_extent_tree *et, u32 clusters) { struct ocfs2_xattr_value_buf *vb = et->et_object; @@ -252,12 +299,11 @@ static void ocfs2_xattr_tree_fill_root_el(struct ocfs2_extent_tree *et) et->et_root_el = &xb->xb_attrs.xb_root.xt_list; } -static void ocfs2_xattr_tree_fill_max_leaf_clusters(struct inode *inode, - struct ocfs2_extent_tree *et) +static void ocfs2_xattr_tree_fill_max_leaf_clusters(struct ocfs2_extent_tree *et) { + struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci); et->et_max_leaf_clusters = - ocfs2_clusters_for_bytes(inode->i_sb, - OCFS2_MAX_XATTR_TREE_LEAF_SIZE); + ocfs2_clusters_for_bytes(sb, OCFS2_MAX_XATTR_TREE_LEAF_SIZE); } static void ocfs2_xattr_tree_set_last_eb_blk(struct ocfs2_extent_tree *et, @@ -277,8 +323,7 @@ static u64 ocfs2_xattr_tree_get_last_eb_blk(struct ocfs2_extent_tree *et) return le64_to_cpu(xt->xt_last_eb_blk); } -static void ocfs2_xattr_tree_update_clusters(struct inode *inode, - struct ocfs2_extent_tree *et, +static void ocfs2_xattr_tree_update_clusters(struct ocfs2_extent_tree *et, u32 clusters) { struct ocfs2_xattr_block *xb = et->et_object; @@ -309,8 +354,7 @@ static u64 ocfs2_dx_root_get_last_eb_blk(struct ocfs2_extent_tree *et) return le64_to_cpu(dx_root->dr_last_eb_blk); } -static void ocfs2_dx_root_update_clusters(struct inode *inode, - struct ocfs2_extent_tree *et, +static void ocfs2_dx_root_update_clusters(struct ocfs2_extent_tree *et, u32 clusters) { struct ocfs2_dx_root_block *dx_root = et->et_object; @@ -318,8 +362,7 @@ static void ocfs2_dx_root_update_clusters(struct inode *inode, le32_add_cpu(&dx_root->dr_clusters, clusters); } -static int ocfs2_dx_root_sanity_check(struct inode *inode, - struct ocfs2_extent_tree *et) +static int ocfs2_dx_root_sanity_check(struct ocfs2_extent_tree *et) { struct ocfs2_dx_root_block *dx_root = et->et_object; @@ -343,8 +386,54 @@ static struct ocfs2_extent_tree_operations ocfs2_dx_root_et_ops = { .eo_fill_root_el = ocfs2_dx_root_fill_root_el, }; +static void ocfs2_refcount_tree_fill_root_el(struct ocfs2_extent_tree *et) +{ + struct ocfs2_refcount_block *rb = et->et_object; + + et->et_root_el = &rb->rf_list; +} + +static void ocfs2_refcount_tree_set_last_eb_blk(struct ocfs2_extent_tree *et, + u64 blkno) +{ + struct ocfs2_refcount_block *rb = et->et_object; + + rb->rf_last_eb_blk = cpu_to_le64(blkno); +} + +static u64 ocfs2_refcount_tree_get_last_eb_blk(struct ocfs2_extent_tree *et) +{ + struct ocfs2_refcount_block *rb = et->et_object; + + return le64_to_cpu(rb->rf_last_eb_blk); +} + +static void ocfs2_refcount_tree_update_clusters(struct ocfs2_extent_tree *et, + u32 clusters) +{ + struct ocfs2_refcount_block *rb = et->et_object; + + le32_add_cpu(&rb->rf_clusters, clusters); +} + +static enum ocfs2_contig_type +ocfs2_refcount_tree_extent_contig(struct ocfs2_extent_tree *et, + struct ocfs2_extent_rec *ext, + struct ocfs2_extent_rec *insert_rec) +{ + return CONTIG_NONE; +} + +static struct ocfs2_extent_tree_operations ocfs2_refcount_tree_et_ops = { + .eo_set_last_eb_blk = ocfs2_refcount_tree_set_last_eb_blk, + .eo_get_last_eb_blk = ocfs2_refcount_tree_get_last_eb_blk, + .eo_update_clusters = ocfs2_refcount_tree_update_clusters, + .eo_fill_root_el = ocfs2_refcount_tree_fill_root_el, + .eo_extent_contig = ocfs2_refcount_tree_extent_contig, +}; + static void __ocfs2_init_extent_tree(struct ocfs2_extent_tree *et, - struct inode *inode, + struct ocfs2_caching_info *ci, struct buffer_head *bh, ocfs2_journal_access_func access, void *obj, @@ -352,6 +441,7 @@ static void __ocfs2_init_extent_tree(struct ocfs2_extent_tree *et, { et->et_ops = ops; et->et_root_bh = bh; + et->et_ci = ci; et->et_root_journal_access = access; if (!obj) obj = (void *)bh->b_data; @@ -361,41 +451,49 @@ static void __ocfs2_init_extent_tree(struct ocfs2_extent_tree *et, if (!et->et_ops->eo_fill_max_leaf_clusters) et->et_max_leaf_clusters = 0; else - et->et_ops->eo_fill_max_leaf_clusters(inode, et); + et->et_ops->eo_fill_max_leaf_clusters(et); } void ocfs2_init_dinode_extent_tree(struct ocfs2_extent_tree *et, - struct inode *inode, + struct ocfs2_caching_info *ci, struct buffer_head *bh) { - __ocfs2_init_extent_tree(et, inode, bh, ocfs2_journal_access_di, + __ocfs2_init_extent_tree(et, ci, bh, ocfs2_journal_access_di, NULL, &ocfs2_dinode_et_ops); } void ocfs2_init_xattr_tree_extent_tree(struct ocfs2_extent_tree *et, - struct inode *inode, + struct ocfs2_caching_info *ci, struct buffer_head *bh) { - __ocfs2_init_extent_tree(et, inode, bh, ocfs2_journal_access_xb, + __ocfs2_init_extent_tree(et, ci, bh, ocfs2_journal_access_xb, NULL, &ocfs2_xattr_tree_et_ops); } void ocfs2_init_xattr_value_extent_tree(struct ocfs2_extent_tree *et, - struct inode *inode, + struct ocfs2_caching_info *ci, struct ocfs2_xattr_value_buf *vb) { - __ocfs2_init_extent_tree(et, inode, vb->vb_bh, vb->vb_access, vb, + __ocfs2_init_extent_tree(et, ci, vb->vb_bh, vb->vb_access, vb, &ocfs2_xattr_value_et_ops); } void ocfs2_init_dx_root_extent_tree(struct ocfs2_extent_tree *et, - struct inode *inode, + struct ocfs2_caching_info *ci, struct buffer_head *bh) { - __ocfs2_init_extent_tree(et, inode, bh, ocfs2_journal_access_dr, + __ocfs2_init_extent_tree(et, ci, bh, ocfs2_journal_access_dr, NULL, &ocfs2_dx_root_et_ops); } +void ocfs2_init_refcount_extent_tree(struct ocfs2_extent_tree *et, + struct ocfs2_caching_info *ci, + struct buffer_head *bh) +{ + __ocfs2_init_extent_tree(et, ci, bh, ocfs2_journal_access_rb, + NULL, &ocfs2_refcount_tree_et_ops); +} + static inline void ocfs2_et_set_last_eb_blk(struct ocfs2_extent_tree *et, u64 new_last_eb_blk) { @@ -407,78 +505,71 @@ static inline u64 ocfs2_et_get_last_eb_blk(struct ocfs2_extent_tree *et) return et->et_ops->eo_get_last_eb_blk(et); } -static inline void ocfs2_et_update_clusters(struct inode *inode, - struct ocfs2_extent_tree *et, +static inline void ocfs2_et_update_clusters(struct ocfs2_extent_tree *et, u32 clusters) { - et->et_ops->eo_update_clusters(inode, et, clusters); + et->et_ops->eo_update_clusters(et, clusters); +} + +static inline void ocfs2_et_extent_map_insert(struct ocfs2_extent_tree *et, + struct ocfs2_extent_rec *rec) +{ + if (et->et_ops->eo_extent_map_insert) + et->et_ops->eo_extent_map_insert(et, rec); +} + +static inline void ocfs2_et_extent_map_truncate(struct ocfs2_extent_tree *et, + u32 clusters) +{ + if (et->et_ops->eo_extent_map_truncate) + et->et_ops->eo_extent_map_truncate(et, clusters); } static inline int ocfs2_et_root_journal_access(handle_t *handle, - struct inode *inode, struct ocfs2_extent_tree *et, int type) { - return et->et_root_journal_access(handle, inode, et->et_root_bh, + return et->et_root_journal_access(handle, et->et_ci, et->et_root_bh, type); } -static inline int ocfs2_et_insert_check(struct inode *inode, - struct ocfs2_extent_tree *et, +static inline enum ocfs2_contig_type + ocfs2_et_extent_contig(struct ocfs2_extent_tree *et, + struct ocfs2_extent_rec *rec, + struct ocfs2_extent_rec *insert_rec) +{ + if (et->et_ops->eo_extent_contig) + return et->et_ops->eo_extent_contig(et, rec, insert_rec); + + return ocfs2_extent_rec_contig( + ocfs2_metadata_cache_get_super(et->et_ci), + rec, insert_rec); +} + +static inline int ocfs2_et_insert_check(struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *rec) { int ret = 0; if (et->et_ops->eo_insert_check) - ret = et->et_ops->eo_insert_check(inode, et, rec); + ret = et->et_ops->eo_insert_check(et, rec); return ret; } -static inline int ocfs2_et_sanity_check(struct inode *inode, - struct ocfs2_extent_tree *et) +static inline int ocfs2_et_sanity_check(struct ocfs2_extent_tree *et) { int ret = 0; if (et->et_ops->eo_sanity_check) - ret = et->et_ops->eo_sanity_check(inode, et); + ret = et->et_ops->eo_sanity_check(et); return ret; } static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc); static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt, struct ocfs2_extent_block *eb); - -/* - * Structures which describe a path through a btree, and functions to - * manipulate them. - * - * The idea here is to be as generic as possible with the tree - * manipulation code. - */ -struct ocfs2_path_item { - struct buffer_head *bh; - struct ocfs2_extent_list *el; -}; - -#define OCFS2_MAX_PATH_DEPTH 5 - -struct ocfs2_path { - int p_tree_depth; - ocfs2_journal_access_func p_root_access; - struct ocfs2_path_item p_node[OCFS2_MAX_PATH_DEPTH]; -}; - -#define path_root_bh(_path) ((_path)->p_node[0].bh) -#define path_root_el(_path) ((_path)->p_node[0].el) -#define path_root_access(_path)((_path)->p_root_access) -#define path_leaf_bh(_path) ((_path)->p_node[(_path)->p_tree_depth].bh) -#define path_leaf_el(_path) ((_path)->p_node[(_path)->p_tree_depth].el) -#define path_num_items(_path) ((_path)->p_tree_depth + 1) - -static int ocfs2_find_path(struct inode *inode, struct ocfs2_path *path, - u32 cpos); -static void ocfs2_adjust_rightmost_records(struct inode *inode, - handle_t *handle, +static void ocfs2_adjust_rightmost_records(handle_t *handle, + struct ocfs2_extent_tree *et, struct ocfs2_path *path, struct ocfs2_extent_rec *insert_rec); /* @@ -486,7 +577,7 @@ static void ocfs2_adjust_rightmost_records(struct inode *inode, * to build another path. Generally, this involves freeing the buffer * heads. */ -static void ocfs2_reinit_path(struct ocfs2_path *path, int keep_root) +void ocfs2_reinit_path(struct ocfs2_path *path, int keep_root) { int i, start = 0, depth = 0; struct ocfs2_path_item *node; @@ -515,7 +606,7 @@ static void ocfs2_reinit_path(struct ocfs2_path *path, int keep_root) path->p_tree_depth = depth; } -static void ocfs2_free_path(struct ocfs2_path *path) +void ocfs2_free_path(struct ocfs2_path *path) { if (path) { ocfs2_reinit_path(path, 0); @@ -613,13 +704,13 @@ static struct ocfs2_path *ocfs2_new_path(struct buffer_head *root_bh, return path; } -static struct ocfs2_path *ocfs2_new_path_from_path(struct ocfs2_path *path) +struct ocfs2_path *ocfs2_new_path_from_path(struct ocfs2_path *path) { return ocfs2_new_path(path_root_bh(path), path_root_el(path), path_root_access(path)); } -static struct ocfs2_path *ocfs2_new_path_from_et(struct ocfs2_extent_tree *et) +struct ocfs2_path *ocfs2_new_path_from_et(struct ocfs2_extent_tree *et) { return ocfs2_new_path(et->et_root_bh, et->et_root_el, et->et_root_journal_access); @@ -632,10 +723,10 @@ static struct ocfs2_path *ocfs2_new_path_from_et(struct ocfs2_extent_tree *et) * I don't like the way this function's name looks next to * ocfs2_journal_access_path(), but I don't have a better one. */ -static int ocfs2_path_bh_journal_access(handle_t *handle, - struct inode *inode, - struct ocfs2_path *path, - int idx) +int ocfs2_path_bh_journal_access(handle_t *handle, + struct ocfs2_caching_info *ci, + struct ocfs2_path *path, + int idx) { ocfs2_journal_access_func access = path_root_access(path); @@ -645,15 +736,16 @@ static int ocfs2_path_bh_journal_access(handle_t *handle, if (idx) access = ocfs2_journal_access_eb; - return access(handle, inode, path->p_node[idx].bh, + return access(handle, ci, path->p_node[idx].bh, OCFS2_JOURNAL_ACCESS_WRITE); } /* * Convenience function to journal all components in a path. */ -static int ocfs2_journal_access_path(struct inode *inode, handle_t *handle, - struct ocfs2_path *path) +int ocfs2_journal_access_path(struct ocfs2_caching_info *ci, + handle_t *handle, + struct ocfs2_path *path) { int i, ret = 0; @@ -661,7 +753,7 @@ static int ocfs2_journal_access_path(struct inode *inode, handle_t *handle, goto out; for(i = 0; i < path_num_items(path); i++) { - ret = ocfs2_path_bh_journal_access(handle, inode, path, i); + ret = ocfs2_path_bh_journal_access(handle, ci, path, i); if (ret < 0) { mlog_errno(ret); goto out; @@ -702,17 +794,9 @@ int ocfs2_search_extent_list(struct ocfs2_extent_list *el, u32 v_cluster) return ret; } -enum ocfs2_contig_type { - CONTIG_NONE = 0, - CONTIG_LEFT, - CONTIG_RIGHT, - CONTIG_LEFTRIGHT, -}; - - /* * NOTE: ocfs2_block_extent_contig(), ocfs2_extents_adjacent() and - * ocfs2_extent_contig only work properly against leaf nodes! + * ocfs2_extent_rec_contig only work properly against leaf nodes! */ static int ocfs2_block_extent_contig(struct super_block *sb, struct ocfs2_extent_rec *ext, @@ -738,9 +822,9 @@ static int ocfs2_extents_adjacent(struct ocfs2_extent_rec *left, } static enum ocfs2_contig_type - ocfs2_extent_contig(struct inode *inode, - struct ocfs2_extent_rec *ext, - struct ocfs2_extent_rec *insert_rec) + ocfs2_extent_rec_contig(struct super_block *sb, + struct ocfs2_extent_rec *ext, + struct ocfs2_extent_rec *insert_rec) { u64 blkno = le64_to_cpu(insert_rec->e_blkno); @@ -753,12 +837,12 @@ static enum ocfs2_contig_type return CONTIG_NONE; if (ocfs2_extents_adjacent(ext, insert_rec) && - ocfs2_block_extent_contig(inode->i_sb, ext, blkno)) + ocfs2_block_extent_contig(sb, ext, blkno)) return CONTIG_RIGHT; blkno = le64_to_cpu(ext->e_blkno); if (ocfs2_extents_adjacent(insert_rec, ext) && - ocfs2_block_extent_contig(inode->i_sb, insert_rec, blkno)) + ocfs2_block_extent_contig(sb, insert_rec, blkno)) return CONTIG_LEFT; return CONTIG_NONE; @@ -853,13 +937,13 @@ static int ocfs2_validate_extent_block(struct super_block *sb, return 0; } -int ocfs2_read_extent_block(struct inode *inode, u64 eb_blkno, +int ocfs2_read_extent_block(struct ocfs2_caching_info *ci, u64 eb_blkno, struct buffer_head **bh) { int rc; struct buffer_head *tmp = *bh; - rc = ocfs2_read_block(inode, eb_blkno, &tmp, + rc = ocfs2_read_block(ci, eb_blkno, &tmp, ocfs2_validate_extent_block); /* If ocfs2_read_block() got us a new bh, pass it up. */ @@ -874,7 +958,6 @@ int ocfs2_read_extent_block(struct inode *inode, u64 eb_blkno, * How many free extents have we got before we need more meta data? */ int ocfs2_num_free_extents(struct ocfs2_super *osb, - struct inode *inode, struct ocfs2_extent_tree *et) { int retval; @@ -889,7 +972,8 @@ int ocfs2_num_free_extents(struct ocfs2_super *osb, last_eb_blk = ocfs2_et_get_last_eb_blk(et); if (last_eb_blk) { - retval = ocfs2_read_extent_block(inode, last_eb_blk, &eb_bh); + retval = ocfs2_read_extent_block(et->et_ci, last_eb_blk, + &eb_bh); if (retval < 0) { mlog_errno(retval); goto bail; @@ -913,9 +997,8 @@ bail: * sets h_signature, h_blkno, h_suballoc_bit, h_suballoc_slot, and * l_count for you */ -static int ocfs2_create_new_meta_bhs(struct ocfs2_super *osb, - handle_t *handle, - struct inode *inode, +static int ocfs2_create_new_meta_bhs(handle_t *handle, + struct ocfs2_extent_tree *et, int wanted, struct ocfs2_alloc_context *meta_ac, struct buffer_head *bhs[]) @@ -924,6 +1007,8 @@ static int ocfs2_create_new_meta_bhs(struct ocfs2_super *osb, u16 suballoc_bit_start; u32 num_got; u64 first_blkno; + struct ocfs2_super *osb = + OCFS2_SB(ocfs2_metadata_cache_get_super(et->et_ci)); struct ocfs2_extent_block *eb; mlog_entry_void(); @@ -949,9 +1034,10 @@ static int ocfs2_create_new_meta_bhs(struct ocfs2_super *osb, mlog_errno(status); goto bail; } - ocfs2_set_new_buffer_uptodate(inode, bhs[i]); + ocfs2_set_new_buffer_uptodate(et->et_ci, bhs[i]); - status = ocfs2_journal_access_eb(handle, inode, bhs[i], + status = ocfs2_journal_access_eb(handle, et->et_ci, + bhs[i], OCFS2_JOURNAL_ACCESS_CREATE); if (status < 0) { mlog_errno(status); @@ -1023,7 +1109,6 @@ static inline u32 ocfs2_sum_rightmost_rec(struct ocfs2_extent_list *el) * extent block's rightmost record. */ static int ocfs2_adjust_rightmost_branch(handle_t *handle, - struct inode *inode, struct ocfs2_extent_tree *et) { int status; @@ -1037,7 +1122,7 @@ static int ocfs2_adjust_rightmost_branch(handle_t *handle, return status; } - status = ocfs2_find_path(inode, path, UINT_MAX); + status = ocfs2_find_path(et->et_ci, path, UINT_MAX); if (status < 0) { mlog_errno(status); goto out; @@ -1050,7 +1135,7 @@ static int ocfs2_adjust_rightmost_branch(handle_t *handle, goto out; } - status = ocfs2_journal_access_path(inode, handle, path); + status = ocfs2_journal_access_path(et->et_ci, handle, path); if (status < 0) { mlog_errno(status); goto out; @@ -1059,7 +1144,7 @@ static int ocfs2_adjust_rightmost_branch(handle_t *handle, el = path_leaf_el(path); rec = &el->l_recs[le32_to_cpu(el->l_next_free_rec) - 1]; - ocfs2_adjust_rightmost_records(inode, handle, path, rec); + ocfs2_adjust_rightmost_records(handle, et, path, rec); out: ocfs2_free_path(path); @@ -1068,7 +1153,7 @@ out: /* * Add an entire tree branch to our inode. eb_bh is the extent block - * to start at, if we don't want to start the branch at the dinode + * to start at, if we don't want to start the branch at the root * structure. * * last_eb_bh is required as we have to update it's next_leaf pointer @@ -1077,9 +1162,7 @@ out: * the new branch will be 'empty' in the sense that every block will * contain a single record with cluster count == 0. */ -static int ocfs2_add_branch(struct ocfs2_super *osb, - handle_t *handle, - struct inode *inode, +static int ocfs2_add_branch(handle_t *handle, struct ocfs2_extent_tree *et, struct buffer_head *eb_bh, struct buffer_head **last_eb_bh, @@ -1123,7 +1206,7 @@ static int ocfs2_add_branch(struct ocfs2_super *osb, if (root_end > new_cpos) { mlog(0, "adjust the cluster end from %u to %u\n", root_end, new_cpos); - status = ocfs2_adjust_rightmost_branch(handle, inode, et); + status = ocfs2_adjust_rightmost_branch(handle, et); if (status) { mlog_errno(status); goto bail; @@ -1139,7 +1222,7 @@ static int ocfs2_add_branch(struct ocfs2_super *osb, goto bail; } - status = ocfs2_create_new_meta_bhs(osb, handle, inode, new_blocks, + status = ocfs2_create_new_meta_bhs(handle, et, new_blocks, meta_ac, new_eb_bhs); if (status < 0) { mlog_errno(status); @@ -1161,7 +1244,7 @@ static int ocfs2_add_branch(struct ocfs2_super *osb, BUG_ON(!OCFS2_IS_VALID_EXTENT_BLOCK(eb)); eb_el = &eb->h_list; - status = ocfs2_journal_access_eb(handle, inode, bh, + status = ocfs2_journal_access_eb(handle, et->et_ci, bh, OCFS2_JOURNAL_ACCESS_CREATE); if (status < 0) { mlog_errno(status); @@ -1201,20 +1284,20 @@ static int ocfs2_add_branch(struct ocfs2_super *osb, * journal_dirty erroring as it won't unless we've aborted the * handle (in which case we would never be here) so reserving * the write with journal_access is all we need to do. */ - status = ocfs2_journal_access_eb(handle, inode, *last_eb_bh, + status = ocfs2_journal_access_eb(handle, et->et_ci, *last_eb_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto bail; } - status = ocfs2_et_root_journal_access(handle, inode, et, + status = ocfs2_et_root_journal_access(handle, et, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto bail; } if (eb_bh) { - status = ocfs2_journal_access_eb(handle, inode, eb_bh, + status = ocfs2_journal_access_eb(handle, et->et_ci, eb_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); @@ -1274,9 +1357,7 @@ bail: * returns back the new extent block so you can add a branch to it * after this call. */ -static int ocfs2_shift_tree_depth(struct ocfs2_super *osb, - handle_t *handle, - struct inode *inode, +static int ocfs2_shift_tree_depth(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_alloc_context *meta_ac, struct buffer_head **ret_new_eb_bh) @@ -1290,7 +1371,7 @@ static int ocfs2_shift_tree_depth(struct ocfs2_super *osb, mlog_entry_void(); - status = ocfs2_create_new_meta_bhs(osb, handle, inode, 1, meta_ac, + status = ocfs2_create_new_meta_bhs(handle, et, 1, meta_ac, &new_eb_bh); if (status < 0) { mlog_errno(status); @@ -1304,7 +1385,7 @@ static int ocfs2_shift_tree_depth(struct ocfs2_super *osb, eb_el = &eb->h_list; root_el = et->et_root_el; - status = ocfs2_journal_access_eb(handle, inode, new_eb_bh, + status = ocfs2_journal_access_eb(handle, et->et_ci, new_eb_bh, OCFS2_JOURNAL_ACCESS_CREATE); if (status < 0) { mlog_errno(status); @@ -1323,7 +1404,7 @@ static int ocfs2_shift_tree_depth(struct ocfs2_super *osb, goto bail; } - status = ocfs2_et_root_journal_access(handle, inode, et, + status = ocfs2_et_root_journal_access(handle, et, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); @@ -1379,9 +1460,7 @@ bail: * * return status < 0 indicates an error. */ -static int ocfs2_find_branch_target(struct ocfs2_super *osb, - struct inode *inode, - struct ocfs2_extent_tree *et, +static int ocfs2_find_branch_target(struct ocfs2_extent_tree *et, struct buffer_head **target_bh) { int status = 0, i; @@ -1399,19 +1478,21 @@ static int ocfs2_find_branch_target(struct ocfs2_super *osb, while(le16_to_cpu(el->l_tree_depth) > 1) { if (le16_to_cpu(el->l_next_free_rec) == 0) { - ocfs2_error(inode->i_sb, "Dinode %llu has empty " + ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci), + "Owner %llu has empty " "extent list (next_free_rec == 0)", - (unsigned long long)OCFS2_I(inode)->ip_blkno); + (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci)); status = -EIO; goto bail; } i = le16_to_cpu(el->l_next_free_rec) - 1; blkno = le64_to_cpu(el->l_recs[i].e_blkno); if (!blkno) { - ocfs2_error(inode->i_sb, "Dinode %llu has extent " + ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci), + "Owner %llu has extent " "list where extent # %d has no physical " "block start", - (unsigned long long)OCFS2_I(inode)->ip_blkno, i); + (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), i); status = -EIO; goto bail; } @@ -1419,7 +1500,7 @@ static int ocfs2_find_branch_target(struct ocfs2_super *osb, brelse(bh); bh = NULL; - status = ocfs2_read_extent_block(inode, blkno, &bh); + status = ocfs2_read_extent_block(et->et_ci, blkno, &bh); if (status < 0) { mlog_errno(status); goto bail; @@ -1460,20 +1541,18 @@ bail: * * *last_eb_bh will be updated by ocfs2_add_branch(). */ -static int ocfs2_grow_tree(struct inode *inode, handle_t *handle, - struct ocfs2_extent_tree *et, int *final_depth, - struct buffer_head **last_eb_bh, +static int ocfs2_grow_tree(handle_t *handle, struct ocfs2_extent_tree *et, + int *final_depth, struct buffer_head **last_eb_bh, struct ocfs2_alloc_context *meta_ac) { int ret, shift; struct ocfs2_extent_list *el = et->et_root_el; int depth = le16_to_cpu(el->l_tree_depth); - struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct buffer_head *bh = NULL; BUG_ON(meta_ac == NULL); - shift = ocfs2_find_branch_target(osb, inode, et, &bh); + shift = ocfs2_find_branch_target(et, &bh); if (shift < 0) { ret = shift; mlog_errno(ret); @@ -1490,8 +1569,7 @@ static int ocfs2_grow_tree(struct inode *inode, handle_t *handle, /* ocfs2_shift_tree_depth will return us a buffer with * the new extent block (so we can pass that to * ocfs2_add_branch). */ - ret = ocfs2_shift_tree_depth(osb, handle, inode, et, - meta_ac, &bh); + ret = ocfs2_shift_tree_depth(handle, et, meta_ac, &bh); if (ret < 0) { mlog_errno(ret); goto out; @@ -1517,7 +1595,7 @@ static int ocfs2_grow_tree(struct inode *inode, handle_t *handle, /* call ocfs2_add_branch to add the final part of the tree with * the new data. */ mlog(0, "add branch. bh = %p\n", bh); - ret = ocfs2_add_branch(osb, handle, inode, et, bh, last_eb_bh, + ret = ocfs2_add_branch(handle, et, bh, last_eb_bh, meta_ac); if (ret < 0) { mlog_errno(ret); @@ -1687,7 +1765,7 @@ set_and_inc: * * The array index of the subtree root is passed back. */ -static int ocfs2_find_subtree_root(struct inode *inode, +static int ocfs2_find_subtree_root(struct ocfs2_extent_tree *et, struct ocfs2_path *left, struct ocfs2_path *right) { @@ -1705,10 +1783,10 @@ static int ocfs2_find_subtree_root(struct inode *inode, * The caller didn't pass two adjacent paths. */ mlog_bug_on_msg(i > left->p_tree_depth, - "Inode %lu, left depth %u, right depth %u\n" + "Owner %llu, left depth %u, right depth %u\n" "left leaf blk %llu, right leaf blk %llu\n", - inode->i_ino, left->p_tree_depth, - right->p_tree_depth, + (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), + left->p_tree_depth, right->p_tree_depth, (unsigned long long)path_leaf_bh(left)->b_blocknr, (unsigned long long)path_leaf_bh(right)->b_blocknr); } while (left->p_node[i].bh->b_blocknr == @@ -1725,7 +1803,7 @@ typedef void (path_insert_t)(void *, struct buffer_head *); * This code can be called with a cpos larger than the tree, in which * case it will return the rightmost path. */ -static int __ocfs2_find_path(struct inode *inode, +static int __ocfs2_find_path(struct ocfs2_caching_info *ci, struct ocfs2_extent_list *root_el, u32 cpos, path_insert_t *func, void *data) { @@ -1736,15 +1814,14 @@ static int __ocfs2_find_path(struct inode *inode, struct ocfs2_extent_block *eb; struct ocfs2_extent_list *el; struct ocfs2_extent_rec *rec; - struct ocfs2_inode_info *oi = OCFS2_I(inode); el = root_el; while (el->l_tree_depth) { if (le16_to_cpu(el->l_next_free_rec) == 0) { - ocfs2_error(inode->i_sb, - "Inode %llu has empty extent list at " + ocfs2_error(ocfs2_metadata_cache_get_super(ci), + "Owner %llu has empty extent list at " "depth %u\n", - (unsigned long long)oi->ip_blkno, + (unsigned long long)ocfs2_metadata_cache_owner(ci), le16_to_cpu(el->l_tree_depth)); ret = -EROFS; goto out; @@ -1767,10 +1844,10 @@ static int __ocfs2_find_path(struct inode *inode, blkno = le64_to_cpu(el->l_recs[i].e_blkno); if (blkno == 0) { - ocfs2_error(inode->i_sb, - "Inode %llu has bad blkno in extent list " + ocfs2_error(ocfs2_metadata_cache_get_super(ci), + "Owner %llu has bad blkno in extent list " "at depth %u (index %d)\n", - (unsigned long long)oi->ip_blkno, + (unsigned long long)ocfs2_metadata_cache_owner(ci), le16_to_cpu(el->l_tree_depth), i); ret = -EROFS; goto out; @@ -1778,7 +1855,7 @@ static int __ocfs2_find_path(struct inode *inode, brelse(bh); bh = NULL; - ret = ocfs2_read_extent_block(inode, blkno, &bh); + ret = ocfs2_read_extent_block(ci, blkno, &bh); if (ret) { mlog_errno(ret); goto out; @@ -1789,10 +1866,10 @@ static int __ocfs2_find_path(struct inode *inode, if (le16_to_cpu(el->l_next_free_rec) > le16_to_cpu(el->l_count)) { - ocfs2_error(inode->i_sb, - "Inode %llu has bad count in extent list " + ocfs2_error(ocfs2_metadata_cache_get_super(ci), + "Owner %llu has bad count in extent list " "at block %llu (next free=%u, count=%u)\n", - (unsigned long long)oi->ip_blkno, + (unsigned long long)ocfs2_metadata_cache_owner(ci), (unsigned long long)bh->b_blocknr, le16_to_cpu(el->l_next_free_rec), le16_to_cpu(el->l_count)); @@ -1836,14 +1913,14 @@ static void find_path_ins(void *data, struct buffer_head *bh) ocfs2_path_insert_eb(fp->path, fp->index, bh); fp->index++; } -static int ocfs2_find_path(struct inode *inode, struct ocfs2_path *path, - u32 cpos) +int ocfs2_find_path(struct ocfs2_caching_info *ci, + struct ocfs2_path *path, u32 cpos) { struct find_path_data data; data.index = 1; data.path = path; - return __ocfs2_find_path(inode, path_root_el(path), cpos, + return __ocfs2_find_path(ci, path_root_el(path), cpos, find_path_ins, &data); } @@ -1868,13 +1945,14 @@ static void find_leaf_ins(void *data, struct buffer_head *bh) * * This function doesn't handle non btree extent lists. */ -int ocfs2_find_leaf(struct inode *inode, struct ocfs2_extent_list *root_el, - u32 cpos, struct buffer_head **leaf_bh) +int ocfs2_find_leaf(struct ocfs2_caching_info *ci, + struct ocfs2_extent_list *root_el, u32 cpos, + struct buffer_head **leaf_bh) { int ret; struct buffer_head *bh = NULL; - ret = __ocfs2_find_path(inode, root_el, cpos, find_leaf_ins, &bh); + ret = __ocfs2_find_path(ci, root_el, cpos, find_leaf_ins, &bh); if (ret) { mlog_errno(ret); goto out; @@ -1980,7 +2058,7 @@ static void ocfs2_adjust_root_records(struct ocfs2_extent_list *root_el, * - When we've adjusted the last extent record in the left path leaf and the * 1st extent record in the right path leaf during cross extent block merge. */ -static void ocfs2_complete_edge_insert(struct inode *inode, handle_t *handle, +static void ocfs2_complete_edge_insert(handle_t *handle, struct ocfs2_path *left_path, struct ocfs2_path *right_path, int subtree_index) @@ -2058,8 +2136,8 @@ static void ocfs2_complete_edge_insert(struct inode *inode, handle_t *handle, mlog_errno(ret); } -static int ocfs2_rotate_subtree_right(struct inode *inode, - handle_t *handle, +static int ocfs2_rotate_subtree_right(handle_t *handle, + struct ocfs2_extent_tree *et, struct ocfs2_path *left_path, struct ocfs2_path *right_path, int subtree_index) @@ -2075,10 +2153,10 @@ static int ocfs2_rotate_subtree_right(struct inode *inode, left_el = path_leaf_el(left_path); if (left_el->l_next_free_rec != left_el->l_count) { - ocfs2_error(inode->i_sb, + ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci), "Inode %llu has non-full interior leaf node %llu" "(next free = %u)", - (unsigned long long)OCFS2_I(inode)->ip_blkno, + (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), (unsigned long long)left_leaf_bh->b_blocknr, le16_to_cpu(left_el->l_next_free_rec)); return -EROFS; @@ -2094,7 +2172,7 @@ static int ocfs2_rotate_subtree_right(struct inode *inode, root_bh = left_path->p_node[subtree_index].bh; BUG_ON(root_bh != right_path->p_node[subtree_index].bh); - ret = ocfs2_path_bh_journal_access(handle, inode, right_path, + ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path, subtree_index); if (ret) { mlog_errno(ret); @@ -2102,14 +2180,14 @@ static int ocfs2_rotate_subtree_right(struct inode *inode, } for(i = subtree_index + 1; i < path_num_items(right_path); i++) { - ret = ocfs2_path_bh_journal_access(handle, inode, + ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path, i); if (ret) { mlog_errno(ret); goto out; } - ret = ocfs2_path_bh_journal_access(handle, inode, + ret = ocfs2_path_bh_journal_access(handle, et->et_ci, left_path, i); if (ret) { mlog_errno(ret); @@ -2123,7 +2201,7 @@ static int ocfs2_rotate_subtree_right(struct inode *inode, /* This is a code error, not a disk corruption. */ mlog_bug_on_msg(!right_el->l_next_free_rec, "Inode %llu: Rotate fails " "because rightmost leaf block %llu is empty\n", - (unsigned long long)OCFS2_I(inode)->ip_blkno, + (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), (unsigned long long)right_leaf_bh->b_blocknr); ocfs2_create_empty_extent(right_el); @@ -2157,8 +2235,8 @@ static int ocfs2_rotate_subtree_right(struct inode *inode, goto out; } - ocfs2_complete_edge_insert(inode, handle, left_path, right_path, - subtree_index); + ocfs2_complete_edge_insert(handle, left_path, right_path, + subtree_index); out: return ret; @@ -2248,10 +2326,18 @@ static int ocfs2_extend_rotate_transaction(handle_t *handle, int subtree_depth, int op_credits, struct ocfs2_path *path) { + int ret; int credits = (path->p_tree_depth - subtree_depth) * 2 + 1 + op_credits; - if (handle->h_buffer_credits < credits) - return ocfs2_extend_trans(handle, credits); + if (handle->h_buffer_credits < credits) { + ret = ocfs2_extend_trans(handle, + credits - handle->h_buffer_credits); + if (ret) + return ret; + + if (unlikely(handle->h_buffer_credits < credits)) + return ocfs2_extend_trans(handle, credits); + } return 0; } @@ -2321,8 +2407,8 @@ static int ocfs2_leftmost_rec_contains(struct ocfs2_extent_list *el, u32 cpos) * *ret_left_path will contain a valid path which can be passed to * ocfs2_insert_path(). */ -static int ocfs2_rotate_tree_right(struct inode *inode, - handle_t *handle, +static int ocfs2_rotate_tree_right(handle_t *handle, + struct ocfs2_extent_tree *et, enum ocfs2_split_type split, u32 insert_cpos, struct ocfs2_path *right_path, @@ -2331,6 +2417,7 @@ static int ocfs2_rotate_tree_right(struct inode *inode, int ret, start, orig_credits = handle->h_buffer_credits; u32 cpos; struct ocfs2_path *left_path = NULL; + struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci); *ret_left_path = NULL; @@ -2341,7 +2428,7 @@ static int ocfs2_rotate_tree_right(struct inode *inode, goto out; } - ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, right_path, &cpos); + ret = ocfs2_find_cpos_for_left_leaf(sb, right_path, &cpos); if (ret) { mlog_errno(ret); goto out; @@ -2379,7 +2466,7 @@ static int ocfs2_rotate_tree_right(struct inode *inode, mlog(0, "Rotating a tree: ins. cpos: %u, left path cpos: %u\n", insert_cpos, cpos); - ret = ocfs2_find_path(inode, left_path, cpos); + ret = ocfs2_find_path(et->et_ci, left_path, cpos); if (ret) { mlog_errno(ret); goto out; @@ -2387,10 +2474,11 @@ static int ocfs2_rotate_tree_right(struct inode *inode, mlog_bug_on_msg(path_leaf_bh(left_path) == path_leaf_bh(right_path), - "Inode %lu: error during insert of %u " + "Owner %llu: error during insert of %u " "(left path cpos %u) results in two identical " "paths ending at %llu\n", - inode->i_ino, insert_cpos, cpos, + (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), + insert_cpos, cpos, (unsigned long long) path_leaf_bh(left_path)->b_blocknr); @@ -2416,7 +2504,7 @@ static int ocfs2_rotate_tree_right(struct inode *inode, goto out_ret_path; } - start = ocfs2_find_subtree_root(inode, left_path, right_path); + start = ocfs2_find_subtree_root(et, left_path, right_path); mlog(0, "Subtree root at index %d (blk %llu, depth %d)\n", start, @@ -2430,7 +2518,7 @@ static int ocfs2_rotate_tree_right(struct inode *inode, goto out; } - ret = ocfs2_rotate_subtree_right(inode, handle, left_path, + ret = ocfs2_rotate_subtree_right(handle, et, left_path, right_path, start); if (ret) { mlog_errno(ret); @@ -2462,8 +2550,7 @@ static int ocfs2_rotate_tree_right(struct inode *inode, */ ocfs2_mv_path(right_path, left_path); - ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, right_path, - &cpos); + ret = ocfs2_find_cpos_for_left_leaf(sb, right_path, &cpos); if (ret) { mlog_errno(ret); goto out; @@ -2477,7 +2564,8 @@ out_ret_path: return ret; } -static int ocfs2_update_edge_lengths(struct inode *inode, handle_t *handle, +static int ocfs2_update_edge_lengths(handle_t *handle, + struct ocfs2_extent_tree *et, int subtree_index, struct ocfs2_path *path) { int i, idx, ret; @@ -2502,7 +2590,7 @@ static int ocfs2_update_edge_lengths(struct inode *inode, handle_t *handle, goto out; } - ret = ocfs2_journal_access_path(inode, handle, path); + ret = ocfs2_journal_access_path(et->et_ci, handle, path); if (ret) { mlog_errno(ret); goto out; @@ -2532,7 +2620,8 @@ out: return ret; } -static void ocfs2_unlink_path(struct inode *inode, handle_t *handle, +static void ocfs2_unlink_path(handle_t *handle, + struct ocfs2_extent_tree *et, struct ocfs2_cached_dealloc_ctxt *dealloc, struct ocfs2_path *path, int unlink_start) { @@ -2554,12 +2643,12 @@ static void ocfs2_unlink_path(struct inode *inode, handle_t *handle, mlog(ML_ERROR, "Inode %llu, attempted to remove extent block " "%llu with %u records\n", - (unsigned long long)OCFS2_I(inode)->ip_blkno, + (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), (unsigned long long)le64_to_cpu(eb->h_blkno), le16_to_cpu(el->l_next_free_rec)); ocfs2_journal_dirty(handle, bh); - ocfs2_remove_from_cache(inode, bh); + ocfs2_remove_from_cache(et->et_ci, bh); continue; } @@ -2572,11 +2661,12 @@ static void ocfs2_unlink_path(struct inode *inode, handle_t *handle, if (ret) mlog_errno(ret); - ocfs2_remove_from_cache(inode, bh); + ocfs2_remove_from_cache(et->et_ci, bh); } } -static void ocfs2_unlink_subtree(struct inode *inode, handle_t *handle, +static void ocfs2_unlink_subtree(handle_t *handle, + struct ocfs2_extent_tree *et, struct ocfs2_path *left_path, struct ocfs2_path *right_path, int subtree_index, @@ -2607,17 +2697,17 @@ static void ocfs2_unlink_subtree(struct inode *inode, handle_t *handle, ocfs2_journal_dirty(handle, root_bh); ocfs2_journal_dirty(handle, path_leaf_bh(left_path)); - ocfs2_unlink_path(inode, handle, dealloc, right_path, + ocfs2_unlink_path(handle, et, dealloc, right_path, subtree_index + 1); } -static int ocfs2_rotate_subtree_left(struct inode *inode, handle_t *handle, +static int ocfs2_rotate_subtree_left(handle_t *handle, + struct ocfs2_extent_tree *et, struct ocfs2_path *left_path, struct ocfs2_path *right_path, int subtree_index, struct ocfs2_cached_dealloc_ctxt *dealloc, - int *deleted, - struct ocfs2_extent_tree *et) + int *deleted) { int ret, i, del_right_subtree = 0, right_has_empty = 0; struct buffer_head *root_bh, *et_root_bh = path_root_bh(right_path); @@ -2653,7 +2743,7 @@ static int ocfs2_rotate_subtree_left(struct inode *inode, handle_t *handle, return -EAGAIN; if (le16_to_cpu(right_leaf_el->l_next_free_rec) > 1) { - ret = ocfs2_journal_access_eb(handle, inode, + ret = ocfs2_journal_access_eb(handle, et->et_ci, path_leaf_bh(right_path), OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { @@ -2672,7 +2762,7 @@ static int ocfs2_rotate_subtree_left(struct inode *inode, handle_t *handle, * We have to update i_last_eb_blk during the meta * data delete. */ - ret = ocfs2_et_root_journal_access(handle, inode, et, + ret = ocfs2_et_root_journal_access(handle, et, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); @@ -2688,7 +2778,7 @@ static int ocfs2_rotate_subtree_left(struct inode *inode, handle_t *handle, */ BUG_ON(right_has_empty && !del_right_subtree); - ret = ocfs2_path_bh_journal_access(handle, inode, right_path, + ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path, subtree_index); if (ret) { mlog_errno(ret); @@ -2696,14 +2786,14 @@ static int ocfs2_rotate_subtree_left(struct inode *inode, handle_t *handle, } for(i = subtree_index + 1; i < path_num_items(right_path); i++) { - ret = ocfs2_path_bh_journal_access(handle, inode, + ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path, i); if (ret) { mlog_errno(ret); goto out; } - ret = ocfs2_path_bh_journal_access(handle, inode, + ret = ocfs2_path_bh_journal_access(handle, et->et_ci, left_path, i); if (ret) { mlog_errno(ret); @@ -2740,9 +2830,9 @@ static int ocfs2_rotate_subtree_left(struct inode *inode, handle_t *handle, mlog_errno(ret); if (del_right_subtree) { - ocfs2_unlink_subtree(inode, handle, left_path, right_path, + ocfs2_unlink_subtree(handle, et, left_path, right_path, subtree_index, dealloc); - ret = ocfs2_update_edge_lengths(inode, handle, subtree_index, + ret = ocfs2_update_edge_lengths(handle, et, subtree_index, left_path); if (ret) { mlog_errno(ret); @@ -2766,7 +2856,7 @@ static int ocfs2_rotate_subtree_left(struct inode *inode, handle_t *handle, *deleted = 1; } else - ocfs2_complete_edge_insert(inode, handle, left_path, right_path, + ocfs2_complete_edge_insert(handle, left_path, right_path, subtree_index); out: @@ -2852,8 +2942,8 @@ out: return ret; } -static int ocfs2_rotate_rightmost_leaf_left(struct inode *inode, - handle_t *handle, +static int ocfs2_rotate_rightmost_leaf_left(handle_t *handle, + struct ocfs2_extent_tree *et, struct ocfs2_path *path) { int ret; @@ -2863,7 +2953,7 @@ static int ocfs2_rotate_rightmost_leaf_left(struct inode *inode, if (!ocfs2_is_empty_extent(&el->l_recs[0])) return 0; - ret = ocfs2_path_bh_journal_access(handle, inode, path, + ret = ocfs2_path_bh_journal_access(handle, et->et_ci, path, path_num_items(path) - 1); if (ret) { mlog_errno(ret); @@ -2880,24 +2970,24 @@ out: return ret; } -static int __ocfs2_rotate_tree_left(struct inode *inode, - handle_t *handle, int orig_credits, +static int __ocfs2_rotate_tree_left(handle_t *handle, + struct ocfs2_extent_tree *et, + int orig_credits, struct ocfs2_path *path, struct ocfs2_cached_dealloc_ctxt *dealloc, - struct ocfs2_path **empty_extent_path, - struct ocfs2_extent_tree *et) + struct ocfs2_path **empty_extent_path) { int ret, subtree_root, deleted; u32 right_cpos; struct ocfs2_path *left_path = NULL; struct ocfs2_path *right_path = NULL; + struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci); BUG_ON(!ocfs2_is_empty_extent(&(path_leaf_el(path)->l_recs[0]))); *empty_extent_path = NULL; - ret = ocfs2_find_cpos_for_right_leaf(inode->i_sb, path, - &right_cpos); + ret = ocfs2_find_cpos_for_right_leaf(sb, path, &right_cpos); if (ret) { mlog_errno(ret); goto out; @@ -2920,13 +3010,13 @@ static int __ocfs2_rotate_tree_left(struct inode *inode, } while (right_cpos) { - ret = ocfs2_find_path(inode, right_path, right_cpos); + ret = ocfs2_find_path(et->et_ci, right_path, right_cpos); if (ret) { mlog_errno(ret); goto out; } - subtree_root = ocfs2_find_subtree_root(inode, left_path, + subtree_root = ocfs2_find_subtree_root(et, left_path, right_path); mlog(0, "Subtree root at index %d (blk %llu, depth %d)\n", @@ -2946,16 +3036,16 @@ static int __ocfs2_rotate_tree_left(struct inode *inode, * Caller might still want to make changes to the * tree root, so re-add it to the journal here. */ - ret = ocfs2_path_bh_journal_access(handle, inode, + ret = ocfs2_path_bh_journal_access(handle, et->et_ci, left_path, 0); if (ret) { mlog_errno(ret); goto out; } - ret = ocfs2_rotate_subtree_left(inode, handle, left_path, + ret = ocfs2_rotate_subtree_left(handle, et, left_path, right_path, subtree_root, - dealloc, &deleted, et); + dealloc, &deleted); if (ret == -EAGAIN) { /* * The rotation has to temporarily stop due to @@ -2982,7 +3072,7 @@ static int __ocfs2_rotate_tree_left(struct inode *inode, ocfs2_mv_path(left_path, right_path); - ret = ocfs2_find_cpos_for_right_leaf(inode->i_sb, left_path, + ret = ocfs2_find_cpos_for_right_leaf(sb, left_path, &right_cpos); if (ret) { mlog_errno(ret); @@ -2997,10 +3087,10 @@ out: return ret; } -static int ocfs2_remove_rightmost_path(struct inode *inode, handle_t *handle, +static int ocfs2_remove_rightmost_path(handle_t *handle, + struct ocfs2_extent_tree *et, struct ocfs2_path *path, - struct ocfs2_cached_dealloc_ctxt *dealloc, - struct ocfs2_extent_tree *et) + struct ocfs2_cached_dealloc_ctxt *dealloc) { int ret, subtree_index; u32 cpos; @@ -3009,7 +3099,7 @@ static int ocfs2_remove_rightmost_path(struct inode *inode, handle_t *handle, struct ocfs2_extent_list *el; - ret = ocfs2_et_sanity_check(inode, et); + ret = ocfs2_et_sanity_check(et); if (ret) goto out; /* @@ -3024,13 +3114,14 @@ static int ocfs2_remove_rightmost_path(struct inode *inode, handle_t *handle, goto out; } - ret = ocfs2_journal_access_path(inode, handle, path); + ret = ocfs2_journal_access_path(et->et_ci, handle, path); if (ret) { mlog_errno(ret); goto out; } - ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, path, &cpos); + ret = ocfs2_find_cpos_for_left_leaf(ocfs2_metadata_cache_get_super(et->et_ci), + path, &cpos); if (ret) { mlog_errno(ret); goto out; @@ -3048,23 +3139,23 @@ static int ocfs2_remove_rightmost_path(struct inode *inode, handle_t *handle, goto out; } - ret = ocfs2_find_path(inode, left_path, cpos); + ret = ocfs2_find_path(et->et_ci, left_path, cpos); if (ret) { mlog_errno(ret); goto out; } - ret = ocfs2_journal_access_path(inode, handle, left_path); + ret = ocfs2_journal_access_path(et->et_ci, handle, left_path); if (ret) { mlog_errno(ret); goto out; } - subtree_index = ocfs2_find_subtree_root(inode, left_path, path); + subtree_index = ocfs2_find_subtree_root(et, left_path, path); - ocfs2_unlink_subtree(inode, handle, left_path, path, + ocfs2_unlink_subtree(handle, et, left_path, path, subtree_index, dealloc); - ret = ocfs2_update_edge_lengths(inode, handle, subtree_index, + ret = ocfs2_update_edge_lengths(handle, et, subtree_index, left_path); if (ret) { mlog_errno(ret); @@ -3078,10 +3169,10 @@ static int ocfs2_remove_rightmost_path(struct inode *inode, handle_t *handle, * 'path' is also the leftmost path which * means it must be the only one. This gets * handled differently because we want to - * revert the inode back to having extents + * revert the root back to having extents * in-line. */ - ocfs2_unlink_path(inode, handle, dealloc, path, 1); + ocfs2_unlink_path(handle, et, dealloc, path, 1); el = et->et_root_el; el->l_tree_depth = 0; @@ -3114,10 +3205,10 @@ out: * the rightmost tree leaf record is removed so the caller is * responsible for detecting and correcting that. */ -static int ocfs2_rotate_tree_left(struct inode *inode, handle_t *handle, +static int ocfs2_rotate_tree_left(handle_t *handle, + struct ocfs2_extent_tree *et, struct ocfs2_path *path, - struct ocfs2_cached_dealloc_ctxt *dealloc, - struct ocfs2_extent_tree *et) + struct ocfs2_cached_dealloc_ctxt *dealloc) { int ret, orig_credits = handle->h_buffer_credits; struct ocfs2_path *tmp_path = NULL, *restart_path = NULL; @@ -3134,8 +3225,7 @@ rightmost_no_delete: * Inline extents. This is trivially handled, so do * it up front. */ - ret = ocfs2_rotate_rightmost_leaf_left(inode, handle, - path); + ret = ocfs2_rotate_rightmost_leaf_left(handle, et, path); if (ret) mlog_errno(ret); goto out; @@ -3151,7 +3241,7 @@ rightmost_no_delete: * * 1) is handled via ocfs2_rotate_rightmost_leaf_left() * 2a) we need the left branch so that we can update it with the unlink - * 2b) we need to bring the inode back to inline extents. + * 2b) we need to bring the root back to inline extents. */ eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data; @@ -3167,9 +3257,9 @@ rightmost_no_delete: if (le16_to_cpu(el->l_next_free_rec) == 0) { ret = -EIO; - ocfs2_error(inode->i_sb, - "Inode %llu has empty extent block at %llu", - (unsigned long long)OCFS2_I(inode)->ip_blkno, + ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci), + "Owner %llu has empty extent block at %llu", + (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), (unsigned long long)le64_to_cpu(eb->h_blkno)); goto out; } @@ -3183,8 +3273,8 @@ rightmost_no_delete: * nonempty list. */ - ret = ocfs2_remove_rightmost_path(inode, handle, path, - dealloc, et); + ret = ocfs2_remove_rightmost_path(handle, et, path, + dealloc); if (ret) mlog_errno(ret); goto out; @@ -3195,8 +3285,8 @@ rightmost_no_delete: * and restarting from there. */ try_rotate: - ret = __ocfs2_rotate_tree_left(inode, handle, orig_credits, path, - dealloc, &restart_path, et); + ret = __ocfs2_rotate_tree_left(handle, et, orig_credits, path, + dealloc, &restart_path); if (ret && ret != -EAGAIN) { mlog_errno(ret); goto out; @@ -3206,9 +3296,9 @@ try_rotate: tmp_path = restart_path; restart_path = NULL; - ret = __ocfs2_rotate_tree_left(inode, handle, orig_credits, + ret = __ocfs2_rotate_tree_left(handle, et, orig_credits, tmp_path, dealloc, - &restart_path, et); + &restart_path); if (ret && ret != -EAGAIN) { mlog_errno(ret); goto out; @@ -3259,7 +3349,7 @@ static void ocfs2_cleanup_merge(struct ocfs2_extent_list *el, } } -static int ocfs2_get_right_path(struct inode *inode, +static int ocfs2_get_right_path(struct ocfs2_extent_tree *et, struct ocfs2_path *left_path, struct ocfs2_path **ret_right_path) { @@ -3276,8 +3366,8 @@ static int ocfs2_get_right_path(struct inode *inode, left_el = path_leaf_el(left_path); BUG_ON(left_el->l_next_free_rec != left_el->l_count); - ret = ocfs2_find_cpos_for_right_leaf(inode->i_sb, left_path, - &right_cpos); + ret = ocfs2_find_cpos_for_right_leaf(ocfs2_metadata_cache_get_super(et->et_ci), + left_path, &right_cpos); if (ret) { mlog_errno(ret); goto out; @@ -3293,7 +3383,7 @@ static int ocfs2_get_right_path(struct inode *inode, goto out; } - ret = ocfs2_find_path(inode, right_path, right_cpos); + ret = ocfs2_find_path(et->et_ci, right_path, right_cpos); if (ret) { mlog_errno(ret); goto out; @@ -3313,9 +3403,9 @@ out: * For index == l_count - 1, the "next" means the 1st extent rec of the * next extent block. */ -static int ocfs2_merge_rec_right(struct inode *inode, - struct ocfs2_path *left_path, +static int ocfs2_merge_rec_right(struct ocfs2_path *left_path, handle_t *handle, + struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *split_rec, int index) { @@ -3336,7 +3426,7 @@ static int ocfs2_merge_rec_right(struct inode *inode, if (index == le16_to_cpu(el->l_next_free_rec) - 1 && le16_to_cpu(el->l_next_free_rec) == le16_to_cpu(el->l_count)) { /* we meet with a cross extent block merge. */ - ret = ocfs2_get_right_path(inode, left_path, &right_path); + ret = ocfs2_get_right_path(et, left_path, &right_path); if (ret) { mlog_errno(ret); goto out; @@ -3355,8 +3445,8 @@ static int ocfs2_merge_rec_right(struct inode *inode, le16_to_cpu(left_rec->e_leaf_clusters) != le32_to_cpu(right_rec->e_cpos)); - subtree_index = ocfs2_find_subtree_root(inode, - left_path, right_path); + subtree_index = ocfs2_find_subtree_root(et, left_path, + right_path); ret = ocfs2_extend_rotate_transaction(handle, subtree_index, handle->h_buffer_credits, @@ -3369,7 +3459,7 @@ static int ocfs2_merge_rec_right(struct inode *inode, root_bh = left_path->p_node[subtree_index].bh; BUG_ON(root_bh != right_path->p_node[subtree_index].bh); - ret = ocfs2_path_bh_journal_access(handle, inode, right_path, + ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path, subtree_index); if (ret) { mlog_errno(ret); @@ -3378,14 +3468,14 @@ static int ocfs2_merge_rec_right(struct inode *inode, for (i = subtree_index + 1; i < path_num_items(right_path); i++) { - ret = ocfs2_path_bh_journal_access(handle, inode, + ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path, i); if (ret) { mlog_errno(ret); goto out; } - ret = ocfs2_path_bh_journal_access(handle, inode, + ret = ocfs2_path_bh_journal_access(handle, et->et_ci, left_path, i); if (ret) { mlog_errno(ret); @@ -3398,7 +3488,7 @@ static int ocfs2_merge_rec_right(struct inode *inode, right_rec = &el->l_recs[index + 1]; } - ret = ocfs2_path_bh_journal_access(handle, inode, left_path, + ret = ocfs2_path_bh_journal_access(handle, et->et_ci, left_path, path_num_items(left_path) - 1); if (ret) { mlog_errno(ret); @@ -3409,7 +3499,8 @@ static int ocfs2_merge_rec_right(struct inode *inode, le32_add_cpu(&right_rec->e_cpos, -split_clusters); le64_add_cpu(&right_rec->e_blkno, - -ocfs2_clusters_to_blocks(inode->i_sb, split_clusters)); + -ocfs2_clusters_to_blocks(ocfs2_metadata_cache_get_super(et->et_ci), + split_clusters)); le16_add_cpu(&right_rec->e_leaf_clusters, split_clusters); ocfs2_cleanup_merge(el, index); @@ -3423,8 +3514,8 @@ static int ocfs2_merge_rec_right(struct inode *inode, if (ret) mlog_errno(ret); - ocfs2_complete_edge_insert(inode, handle, left_path, - right_path, subtree_index); + ocfs2_complete_edge_insert(handle, left_path, right_path, + subtree_index); } out: if (right_path) @@ -3432,7 +3523,7 @@ out: return ret; } -static int ocfs2_get_left_path(struct inode *inode, +static int ocfs2_get_left_path(struct ocfs2_extent_tree *et, struct ocfs2_path *right_path, struct ocfs2_path **ret_left_path) { @@ -3445,7 +3536,7 @@ static int ocfs2_get_left_path(struct inode *inode, /* This function shouldn't be called for non-trees. */ BUG_ON(right_path->p_tree_depth == 0); - ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, + ret = ocfs2_find_cpos_for_left_leaf(ocfs2_metadata_cache_get_super(et->et_ci), right_path, &left_cpos); if (ret) { mlog_errno(ret); @@ -3462,7 +3553,7 @@ static int ocfs2_get_left_path(struct inode *inode, goto out; } - ret = ocfs2_find_path(inode, left_path, left_cpos); + ret = ocfs2_find_path(et->et_ci, left_path, left_cpos); if (ret) { mlog_errno(ret); goto out; @@ -3485,12 +3576,11 @@ out: * remove the rightmost leaf extent block in the right_path and change * the right path to indicate the new rightmost path. */ -static int ocfs2_merge_rec_left(struct inode *inode, - struct ocfs2_path *right_path, +static int ocfs2_merge_rec_left(struct ocfs2_path *right_path, handle_t *handle, + struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *split_rec, struct ocfs2_cached_dealloc_ctxt *dealloc, - struct ocfs2_extent_tree *et, int index) { int ret, i, subtree_index = 0, has_empty_extent = 0; @@ -3508,7 +3598,7 @@ static int ocfs2_merge_rec_left(struct inode *inode, right_rec = &el->l_recs[index]; if (index == 0) { /* we meet with a cross extent block merge. */ - ret = ocfs2_get_left_path(inode, right_path, &left_path); + ret = ocfs2_get_left_path(et, right_path, &left_path); if (ret) { mlog_errno(ret); goto out; @@ -3524,8 +3614,8 @@ static int ocfs2_merge_rec_left(struct inode *inode, le16_to_cpu(left_rec->e_leaf_clusters) != le32_to_cpu(split_rec->e_cpos)); - subtree_index = ocfs2_find_subtree_root(inode, - left_path, right_path); + subtree_index = ocfs2_find_subtree_root(et, left_path, + right_path); ret = ocfs2_extend_rotate_transaction(handle, subtree_index, handle->h_buffer_credits, @@ -3538,7 +3628,7 @@ static int ocfs2_merge_rec_left(struct inode *inode, root_bh = left_path->p_node[subtree_index].bh; BUG_ON(root_bh != right_path->p_node[subtree_index].bh); - ret = ocfs2_path_bh_journal_access(handle, inode, right_path, + ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path, subtree_index); if (ret) { mlog_errno(ret); @@ -3547,14 +3637,14 @@ static int ocfs2_merge_rec_left(struct inode *inode, for (i = subtree_index + 1; i < path_num_items(right_path); i++) { - ret = ocfs2_path_bh_journal_access(handle, inode, + ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path, i); if (ret) { mlog_errno(ret); goto out; } - ret = ocfs2_path_bh_journal_access(handle, inode, + ret = ocfs2_path_bh_journal_access(handle, et->et_ci, left_path, i); if (ret) { mlog_errno(ret); @@ -3567,7 +3657,7 @@ static int ocfs2_merge_rec_left(struct inode *inode, has_empty_extent = 1; } - ret = ocfs2_path_bh_journal_access(handle, inode, right_path, + ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path, path_num_items(right_path) - 1); if (ret) { mlog_errno(ret); @@ -3586,7 +3676,8 @@ static int ocfs2_merge_rec_left(struct inode *inode, le32_add_cpu(&right_rec->e_cpos, split_clusters); le64_add_cpu(&right_rec->e_blkno, - ocfs2_clusters_to_blocks(inode->i_sb, split_clusters)); + ocfs2_clusters_to_blocks(ocfs2_metadata_cache_get_super(et->et_ci), + split_clusters)); le16_add_cpu(&right_rec->e_leaf_clusters, -split_clusters); ocfs2_cleanup_merge(el, index); @@ -3608,9 +3699,9 @@ static int ocfs2_merge_rec_left(struct inode *inode, if (le16_to_cpu(right_rec->e_leaf_clusters) == 0 && le16_to_cpu(el->l_next_free_rec) == 1) { - ret = ocfs2_remove_rightmost_path(inode, handle, + ret = ocfs2_remove_rightmost_path(handle, et, right_path, - dealloc, et); + dealloc); if (ret) { mlog_errno(ret); goto out; @@ -3622,7 +3713,7 @@ static int ocfs2_merge_rec_left(struct inode *inode, ocfs2_mv_path(right_path, left_path); left_path = NULL; } else - ocfs2_complete_edge_insert(inode, handle, left_path, + ocfs2_complete_edge_insert(handle, left_path, right_path, subtree_index); } out: @@ -3631,15 +3722,13 @@ out: return ret; } -static int ocfs2_try_to_merge_extent(struct inode *inode, - handle_t *handle, +static int ocfs2_try_to_merge_extent(handle_t *handle, + struct ocfs2_extent_tree *et, struct ocfs2_path *path, int split_index, struct ocfs2_extent_rec *split_rec, struct ocfs2_cached_dealloc_ctxt *dealloc, - struct ocfs2_merge_ctxt *ctxt, - struct ocfs2_extent_tree *et) - + struct ocfs2_merge_ctxt *ctxt) { int ret = 0; struct ocfs2_extent_list *el = path_leaf_el(path); @@ -3655,8 +3744,7 @@ static int ocfs2_try_to_merge_extent(struct inode *inode, * extents - having more than one in a leaf is * illegal. */ - ret = ocfs2_rotate_tree_left(inode, handle, path, - dealloc, et); + ret = ocfs2_rotate_tree_left(handle, et, path, dealloc); if (ret) { mlog_errno(ret); goto out; @@ -3685,8 +3773,7 @@ static int ocfs2_try_to_merge_extent(struct inode *inode, * prevoius extent block. It is more efficient and easier * if we do merge_right first and merge_left later. */ - ret = ocfs2_merge_rec_right(inode, path, - handle, split_rec, + ret = ocfs2_merge_rec_right(path, handle, et, split_rec, split_index); if (ret) { mlog_errno(ret); @@ -3699,8 +3786,7 @@ static int ocfs2_try_to_merge_extent(struct inode *inode, BUG_ON(!ocfs2_is_empty_extent(&el->l_recs[0])); /* The merge left us with an empty extent, remove it. */ - ret = ocfs2_rotate_tree_left(inode, handle, path, - dealloc, et); + ret = ocfs2_rotate_tree_left(handle, et, path, dealloc); if (ret) { mlog_errno(ret); goto out; @@ -3712,18 +3798,15 @@ static int ocfs2_try_to_merge_extent(struct inode *inode, * Note that we don't pass split_rec here on purpose - * we've merged it into the rec already. */ - ret = ocfs2_merge_rec_left(inode, path, - handle, rec, - dealloc, et, - split_index); + ret = ocfs2_merge_rec_left(path, handle, et, rec, + dealloc, split_index); if (ret) { mlog_errno(ret); goto out; } - ret = ocfs2_rotate_tree_left(inode, handle, path, - dealloc, et); + ret = ocfs2_rotate_tree_left(handle, et, path, dealloc); /* * Error from this last rotate is not critical, so * print but don't bubble it up. @@ -3740,19 +3823,16 @@ static int ocfs2_try_to_merge_extent(struct inode *inode, * the record on the left (hence the left merge). */ if (ctxt->c_contig_type == CONTIG_RIGHT) { - ret = ocfs2_merge_rec_left(inode, - path, - handle, split_rec, - dealloc, et, + ret = ocfs2_merge_rec_left(path, handle, et, + split_rec, dealloc, split_index); if (ret) { mlog_errno(ret); goto out; } } else { - ret = ocfs2_merge_rec_right(inode, - path, - handle, split_rec, + ret = ocfs2_merge_rec_right(path, handle, + et, split_rec, split_index); if (ret) { mlog_errno(ret); @@ -3765,8 +3845,8 @@ static int ocfs2_try_to_merge_extent(struct inode *inode, * The merge may have left an empty extent in * our leaf. Try to rotate it away. */ - ret = ocfs2_rotate_tree_left(inode, handle, path, - dealloc, et); + ret = ocfs2_rotate_tree_left(handle, et, path, + dealloc); if (ret) mlog_errno(ret); ret = 0; @@ -3812,10 +3892,10 @@ static void ocfs2_subtract_from_rec(struct super_block *sb, * list. If this leaf is part of an allocation tree, it is assumed * that the tree above has been prepared. */ -static void ocfs2_insert_at_leaf(struct ocfs2_extent_rec *insert_rec, +static void ocfs2_insert_at_leaf(struct ocfs2_extent_tree *et, + struct ocfs2_extent_rec *insert_rec, struct ocfs2_extent_list *el, - struct ocfs2_insert_type *insert, - struct inode *inode) + struct ocfs2_insert_type *insert) { int i = insert->ins_contig_index; unsigned int range; @@ -3827,7 +3907,8 @@ static void ocfs2_insert_at_leaf(struct ocfs2_extent_rec *insert_rec, i = ocfs2_search_extent_list(el, le32_to_cpu(insert_rec->e_cpos)); BUG_ON(i == -1); rec = &el->l_recs[i]; - ocfs2_subtract_from_rec(inode->i_sb, insert->ins_split, rec, + ocfs2_subtract_from_rec(ocfs2_metadata_cache_get_super(et->et_ci), + insert->ins_split, rec, insert_rec); goto rotate; } @@ -3869,10 +3950,10 @@ static void ocfs2_insert_at_leaf(struct ocfs2_extent_rec *insert_rec, mlog_bug_on_msg(le16_to_cpu(el->l_next_free_rec) >= le16_to_cpu(el->l_count), - "inode %lu, depth %u, count %u, next free %u, " + "owner %llu, depth %u, count %u, next free %u, " "rec.cpos %u, rec.clusters %u, " "insert.cpos %u, insert.clusters %u\n", - inode->i_ino, + ocfs2_metadata_cache_owner(et->et_ci), le16_to_cpu(el->l_tree_depth), le16_to_cpu(el->l_count), le16_to_cpu(el->l_next_free_rec), @@ -3900,8 +3981,8 @@ rotate: ocfs2_rotate_leaf(el, insert_rec); } -static void ocfs2_adjust_rightmost_records(struct inode *inode, - handle_t *handle, +static void ocfs2_adjust_rightmost_records(handle_t *handle, + struct ocfs2_extent_tree *et, struct ocfs2_path *path, struct ocfs2_extent_rec *insert_rec) { @@ -3919,9 +4000,9 @@ static void ocfs2_adjust_rightmost_records(struct inode *inode, next_free = le16_to_cpu(el->l_next_free_rec); if (next_free == 0) { - ocfs2_error(inode->i_sb, - "Dinode %llu has a bad extent list", - (unsigned long long)OCFS2_I(inode)->ip_blkno); + ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci), + "Owner %llu has a bad extent list", + (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci)); ret = -EIO; return; } @@ -3941,7 +4022,8 @@ static void ocfs2_adjust_rightmost_records(struct inode *inode, } } -static int ocfs2_append_rec_to_path(struct inode *inode, handle_t *handle, +static int ocfs2_append_rec_to_path(handle_t *handle, + struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *insert_rec, struct ocfs2_path *right_path, struct ocfs2_path **ret_left_path) @@ -3969,8 +4051,8 @@ static int ocfs2_append_rec_to_path(struct inode *inode, handle_t *handle, (next_free == 1 && ocfs2_is_empty_extent(&el->l_recs[0]))) { u32 left_cpos; - ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, right_path, - &left_cpos); + ret = ocfs2_find_cpos_for_left_leaf(ocfs2_metadata_cache_get_super(et->et_ci), + right_path, &left_cpos); if (ret) { mlog_errno(ret); goto out; @@ -3992,7 +4074,8 @@ static int ocfs2_append_rec_to_path(struct inode *inode, handle_t *handle, goto out; } - ret = ocfs2_find_path(inode, left_path, left_cpos); + ret = ocfs2_find_path(et->et_ci, left_path, + left_cpos); if (ret) { mlog_errno(ret); goto out; @@ -4005,13 +4088,13 @@ static int ocfs2_append_rec_to_path(struct inode *inode, handle_t *handle, } } - ret = ocfs2_journal_access_path(inode, handle, right_path); + ret = ocfs2_journal_access_path(et->et_ci, handle, right_path); if (ret) { mlog_errno(ret); goto out; } - ocfs2_adjust_rightmost_records(inode, handle, right_path, insert_rec); + ocfs2_adjust_rightmost_records(handle, et, right_path, insert_rec); *ret_left_path = left_path; ret = 0; @@ -4022,7 +4105,7 @@ out: return ret; } -static void ocfs2_split_record(struct inode *inode, +static void ocfs2_split_record(struct ocfs2_extent_tree *et, struct ocfs2_path *left_path, struct ocfs2_path *right_path, struct ocfs2_extent_rec *split_rec, @@ -4095,7 +4178,8 @@ static void ocfs2_split_record(struct inode *inode, } rec = &el->l_recs[index]; - ocfs2_subtract_from_rec(inode->i_sb, split, rec, split_rec); + ocfs2_subtract_from_rec(ocfs2_metadata_cache_get_super(et->et_ci), + split, rec, split_rec); ocfs2_rotate_leaf(insert_el, split_rec); } @@ -4107,8 +4191,8 @@ static void ocfs2_split_record(struct inode *inode, * in. left_path should only be passed in if we need to update that * portion of the tree after an edge insert. */ -static int ocfs2_insert_path(struct inode *inode, - handle_t *handle, +static int ocfs2_insert_path(handle_t *handle, + struct ocfs2_extent_tree *et, struct ocfs2_path *left_path, struct ocfs2_path *right_path, struct ocfs2_extent_rec *insert_rec, @@ -4134,7 +4218,7 @@ static int ocfs2_insert_path(struct inode *inode, goto out; } - ret = ocfs2_journal_access_path(inode, handle, left_path); + ret = ocfs2_journal_access_path(et->et_ci, handle, left_path); if (ret < 0) { mlog_errno(ret); goto out; @@ -4145,7 +4229,7 @@ static int ocfs2_insert_path(struct inode *inode, * Pass both paths to the journal. The majority of inserts * will be touching all components anyway. */ - ret = ocfs2_journal_access_path(inode, handle, right_path); + ret = ocfs2_journal_access_path(et->et_ci, handle, right_path); if (ret < 0) { mlog_errno(ret); goto out; @@ -4157,7 +4241,7 @@ static int ocfs2_insert_path(struct inode *inode, * of splits, but it's easier to just let one separate * function sort it all out. */ - ocfs2_split_record(inode, left_path, right_path, + ocfs2_split_record(et, left_path, right_path, insert_rec, insert->ins_split); /* @@ -4171,8 +4255,8 @@ static int ocfs2_insert_path(struct inode *inode, if (ret) mlog_errno(ret); } else - ocfs2_insert_at_leaf(insert_rec, path_leaf_el(right_path), - insert, inode); + ocfs2_insert_at_leaf(et, insert_rec, path_leaf_el(right_path), + insert); ret = ocfs2_journal_dirty(handle, leaf_bh); if (ret) @@ -4185,10 +4269,10 @@ static int ocfs2_insert_path(struct inode *inode, * * XXX: Should we extend the transaction here? */ - subtree_index = ocfs2_find_subtree_root(inode, left_path, + subtree_index = ocfs2_find_subtree_root(et, left_path, right_path); - ocfs2_complete_edge_insert(inode, handle, left_path, - right_path, subtree_index); + ocfs2_complete_edge_insert(handle, left_path, right_path, + subtree_index); } ret = 0; @@ -4196,8 +4280,7 @@ out: return ret; } -static int ocfs2_do_insert_extent(struct inode *inode, - handle_t *handle, +static int ocfs2_do_insert_extent(handle_t *handle, struct ocfs2_extent_tree *et, struct ocfs2_extent_rec *insert_rec, struct ocfs2_insert_type *type) @@ -4210,7 +4293,7 @@ static int ocfs2_do_insert_extent(struct inode *inode, el = et->et_root_el; - ret = ocfs2_et_root_journal_access(handle, inode, et, + ret = ocfs2_et_root_journal_access(handle, et, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); @@ -4218,7 +4301,7 @@ static int ocfs2_do_insert_extent(struct inode *inode, } if (le16_to_cpu(el->l_tree_depth) == 0) { - ocfs2_insert_at_leaf(insert_rec, el, type, inode); + ocfs2_insert_at_leaf(et, insert_rec, el, type); goto out_update_clusters; } @@ -4241,7 +4324,7 @@ static int ocfs2_do_insert_extent(struct inode *inode, cpos = UINT_MAX; } - ret = ocfs2_find_path(inode, right_path, cpos); + ret = ocfs2_find_path(et->et_ci, right_path, cpos); if (ret) { mlog_errno(ret); goto out; @@ -4260,7 +4343,7 @@ static int ocfs2_do_insert_extent(struct inode *inode, * can wind up skipping both of these two special cases... */ if (rotate) { - ret = ocfs2_rotate_tree_right(inode, handle, type->ins_split, + ret = ocfs2_rotate_tree_right(handle, et, type->ins_split, le32_to_cpu(insert_rec->e_cpos), right_path, &left_path); if (ret) { @@ -4272,7 +4355,7 @@ static int ocfs2_do_insert_extent(struct inode *inode, * ocfs2_rotate_tree_right() might have extended the * transaction without re-journaling our tree root. */ - ret = ocfs2_et_root_journal_access(handle, inode, et, + ret = ocfs2_et_root_journal_access(handle, et, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); @@ -4280,7 +4363,7 @@ static int ocfs2_do_insert_extent(struct inode *inode, } } else if (type->ins_appending == APPEND_TAIL && type->ins_contig != CONTIG_LEFT) { - ret = ocfs2_append_rec_to_path(inode, handle, insert_rec, + ret = ocfs2_append_rec_to_path(handle, et, insert_rec, right_path, &left_path); if (ret) { mlog_errno(ret); @@ -4288,7 +4371,7 @@ static int ocfs2_do_insert_extent(struct inode *inode, } } - ret = ocfs2_insert_path(inode, handle, left_path, right_path, + ret = ocfs2_insert_path(handle, et, left_path, right_path, insert_rec, type); if (ret) { mlog_errno(ret); @@ -4297,7 +4380,7 @@ static int ocfs2_do_insert_extent(struct inode *inode, out_update_clusters: if (type->ins_split == SPLIT_NONE) - ocfs2_et_update_clusters(inode, et, + ocfs2_et_update_clusters(et, le16_to_cpu(insert_rec->e_leaf_clusters)); ret = ocfs2_journal_dirty(handle, et->et_root_bh); @@ -4312,7 +4395,8 @@ out: } static enum ocfs2_contig_type -ocfs2_figure_merge_contig_type(struct inode *inode, struct ocfs2_path *path, +ocfs2_figure_merge_contig_type(struct ocfs2_extent_tree *et, + struct ocfs2_path *path, struct ocfs2_extent_list *el, int index, struct ocfs2_extent_rec *split_rec) { @@ -4324,12 +4408,12 @@ ocfs2_figure_merge_contig_type(struct inode *inode, struct ocfs2_path *path, struct ocfs2_path *left_path = NULL, *right_path = NULL; struct buffer_head *bh; struct ocfs2_extent_block *eb; + struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci); if (index > 0) { rec = &el->l_recs[index - 1]; } else if (path->p_tree_depth > 0) { - status = ocfs2_find_cpos_for_left_leaf(inode->i_sb, - path, &left_cpos); + status = ocfs2_find_cpos_for_left_leaf(sb, path, &left_cpos); if (status) goto out; @@ -4338,7 +4422,8 @@ ocfs2_figure_merge_contig_type(struct inode *inode, struct ocfs2_path *path, if (!left_path) goto out; - status = ocfs2_find_path(inode, left_path, left_cpos); + status = ocfs2_find_path(et->et_ci, left_path, + left_cpos); if (status) goto out; @@ -4348,7 +4433,7 @@ ocfs2_figure_merge_contig_type(struct inode *inode, struct ocfs2_path *path, le16_to_cpu(new_el->l_count)) { bh = path_leaf_bh(left_path); eb = (struct ocfs2_extent_block *)bh->b_data; - ocfs2_error(inode->i_sb, + ocfs2_error(sb, "Extent block #%llu has an " "invalid l_next_free_rec of " "%d. It should have " @@ -4373,7 +4458,7 @@ ocfs2_figure_merge_contig_type(struct inode *inode, struct ocfs2_path *path, if (split_rec->e_cpos == el->l_recs[index].e_cpos) ret = CONTIG_RIGHT; } else { - ret = ocfs2_extent_contig(inode, rec, split_rec); + ret = ocfs2_et_extent_contig(et, rec, split_rec); } } @@ -4382,8 +4467,7 @@ ocfs2_figure_merge_contig_type(struct inode *inode, struct ocfs2_path *path, rec = &el->l_recs[index + 1]; else if (le16_to_cpu(el->l_next_free_rec) == le16_to_cpu(el->l_count) && path->p_tree_depth > 0) { - status = ocfs2_find_cpos_for_right_leaf(inode->i_sb, - path, &right_cpos); + status = ocfs2_find_cpos_for_right_leaf(sb, path, &right_cpos); if (status) goto out; @@ -4394,7 +4478,7 @@ ocfs2_figure_merge_contig_type(struct inode *inode, struct ocfs2_path *path, if (!right_path) goto out; - status = ocfs2_find_path(inode, right_path, right_cpos); + status = ocfs2_find_path(et->et_ci, right_path, right_cpos); if (status) goto out; @@ -4404,7 +4488,7 @@ ocfs2_figure_merge_contig_type(struct inode *inode, struct ocfs2_path *path, if (le16_to_cpu(new_el->l_next_free_rec) <= 1) { bh = path_leaf_bh(right_path); eb = (struct ocfs2_extent_block *)bh->b_data; - ocfs2_error(inode->i_sb, + ocfs2_error(sb, "Extent block #%llu has an " "invalid l_next_free_rec of %d", (unsigned long long)le64_to_cpu(eb->h_blkno), @@ -4419,7 +4503,7 @@ ocfs2_figure_merge_contig_type(struct inode *inode, struct ocfs2_path *path, if (rec) { enum ocfs2_contig_type contig_type; - contig_type = ocfs2_extent_contig(inode, rec, split_rec); + contig_type = ocfs2_et_extent_contig(et, rec, split_rec); if (contig_type == CONTIG_LEFT && ret == CONTIG_RIGHT) ret = CONTIG_LEFTRIGHT; @@ -4436,11 +4520,10 @@ out: return ret; } -static void ocfs2_figure_contig_type(struct inode *inode, +static void ocfs2_figure_contig_type(struct ocfs2_extent_tree *et, struct ocfs2_insert_type *insert, struct ocfs2_extent_list *el, - struct ocfs2_extent_rec *insert_rec, - struct ocfs2_extent_tree *et) + struct ocfs2_extent_rec *insert_rec) { int i; enum ocfs2_contig_type contig_type = CONTIG_NONE; @@ -4448,8 +4531,8 @@ static void ocfs2_figure_contig_type(struct inode *inode, BUG_ON(le16_to_cpu(el->l_tree_depth) != 0); for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) { - contig_type = ocfs2_extent_contig(inode, &el->l_recs[i], - insert_rec); + contig_type = ocfs2_et_extent_contig(et, &el->l_recs[i], + insert_rec); if (contig_type != CONTIG_NONE) { insert->ins_contig_index = i; break; @@ -4530,8 +4613,7 @@ set_tail_append: * All of the information is stored on the ocfs2_insert_type * structure. */ -static int ocfs2_figure_insert_type(struct inode *inode, - struct ocfs2_extent_tree *et, +static int ocfs2_figure_insert_type(struct ocfs2_extent_tree *et, struct buffer_head **last_eb_bh, struct ocfs2_extent_rec *insert_rec, int *free_records, @@ -4555,7 +4637,7 @@ static int ocfs2_figure_insert_type(struct inode *inode, * ocfs2_figure_insert_type() and ocfs2_add_branch() * may want it later. */ - ret = ocfs2_read_extent_block(inode, + ret = ocfs2_read_extent_block(et->et_ci, ocfs2_et_get_last_eb_blk(et), &bh); if (ret) { @@ -4578,7 +4660,7 @@ static int ocfs2_figure_insert_type(struct inode *inode, le16_to_cpu(el->l_next_free_rec); if (!insert->ins_tree_depth) { - ocfs2_figure_contig_type(inode, insert, el, insert_rec, et); + ocfs2_figure_contig_type(et, insert, el, insert_rec); ocfs2_figure_appending_type(insert, el, insert_rec); return 0; } @@ -4596,7 +4678,7 @@ static int ocfs2_figure_insert_type(struct inode *inode, * us the rightmost tree path. This is accounted for below in * the appending code. */ - ret = ocfs2_find_path(inode, path, le32_to_cpu(insert_rec->e_cpos)); + ret = ocfs2_find_path(et->et_ci, path, le32_to_cpu(insert_rec->e_cpos)); if (ret) { mlog_errno(ret); goto out; @@ -4612,7 +4694,7 @@ static int ocfs2_figure_insert_type(struct inode *inode, * into two types of appends: simple record append, or a * rotate inside the tail leaf. */ - ocfs2_figure_contig_type(inode, insert, el, insert_rec, et); + ocfs2_figure_contig_type(et, insert, el, insert_rec); /* * The insert code isn't quite ready to deal with all cases of @@ -4657,13 +4739,11 @@ out: } /* - * Insert an extent into an inode btree. + * Insert an extent into a btree. * - * The caller needs to update fe->i_clusters + * The caller needs to update the owning btree's cluster count. */ -int ocfs2_insert_extent(struct ocfs2_super *osb, - handle_t *handle, - struct inode *inode, +int ocfs2_insert_extent(handle_t *handle, struct ocfs2_extent_tree *et, u32 cpos, u64 start_blk, @@ -4677,21 +4757,22 @@ int ocfs2_insert_extent(struct ocfs2_super *osb, struct ocfs2_insert_type insert = {0, }; struct ocfs2_extent_rec rec; - mlog(0, "add %u clusters at position %u to inode %llu\n", - new_clusters, cpos, (unsigned long long)OCFS2_I(inode)->ip_blkno); + mlog(0, "add %u clusters at position %u to owner %llu\n", + new_clusters, cpos, + (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci)); memset(&rec, 0, sizeof(rec)); rec.e_cpos = cpu_to_le32(cpos); rec.e_blkno = cpu_to_le64(start_blk); rec.e_leaf_clusters = cpu_to_le16(new_clusters); rec.e_flags = flags; - status = ocfs2_et_insert_check(inode, et, &rec); + status = ocfs2_et_insert_check(et, &rec); if (status) { mlog_errno(status); goto bail; } - status = ocfs2_figure_insert_type(inode, et, &last_eb_bh, &rec, + status = ocfs2_figure_insert_type(et, &last_eb_bh, &rec, &free_records, &insert); if (status < 0) { mlog_errno(status); @@ -4705,7 +4786,7 @@ int ocfs2_insert_extent(struct ocfs2_super *osb, free_records, insert.ins_tree_depth); if (insert.ins_contig == CONTIG_NONE && free_records == 0) { - status = ocfs2_grow_tree(inode, handle, et, + status = ocfs2_grow_tree(handle, et, &insert.ins_tree_depth, &last_eb_bh, meta_ac); if (status) { @@ -4715,11 +4796,11 @@ int ocfs2_insert_extent(struct ocfs2_super *osb, } /* Finally, we can add clusters. This might rotate the tree for us. */ - status = ocfs2_do_insert_extent(inode, handle, et, &rec, &insert); + status = ocfs2_do_insert_extent(handle, et, &rec, &insert); if (status < 0) mlog_errno(status); - else if (et->et_ops == &ocfs2_dinode_et_ops) - ocfs2_extent_map_insert_rec(inode, &rec); + else + ocfs2_et_extent_map_insert(et, &rec); bail: brelse(last_eb_bh); @@ -4735,13 +4816,11 @@ bail: * it is not limited to the file storage. Any extent tree can use this * function if it implements the proper ocfs2_extent_tree. */ -int ocfs2_add_clusters_in_btree(struct ocfs2_super *osb, - struct inode *inode, +int ocfs2_add_clusters_in_btree(handle_t *handle, + struct ocfs2_extent_tree *et, u32 *logical_offset, u32 clusters_to_add, int mark_unwritten, - struct ocfs2_extent_tree *et, - handle_t *handle, struct ocfs2_alloc_context *data_ac, struct ocfs2_alloc_context *meta_ac, enum ocfs2_alloc_restarted *reason_ret) @@ -4752,13 +4831,15 @@ int ocfs2_add_clusters_in_btree(struct ocfs2_super *osb, u32 bit_off, num_bits; u64 block; u8 flags = 0; + struct ocfs2_super *osb = + OCFS2_SB(ocfs2_metadata_cache_get_super(et->et_ci)); BUG_ON(!clusters_to_add); if (mark_unwritten) flags = OCFS2_EXT_UNWRITTEN; - free_extents = ocfs2_num_free_extents(osb, inode, et); + free_extents = ocfs2_num_free_extents(osb, et); if (free_extents < 0) { status = free_extents; mlog_errno(status); @@ -4795,7 +4876,7 @@ int ocfs2_add_clusters_in_btree(struct ocfs2_super *osb, BUG_ON(num_bits > clusters_to_add); /* reserve our write early -- insert_extent may update the tree root */ - status = ocfs2_et_root_journal_access(handle, inode, et, + status = ocfs2_et_root_journal_access(handle, et, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); @@ -4803,10 +4884,10 @@ int ocfs2_add_clusters_in_btree(struct ocfs2_super *osb, } block = ocfs2_clusters_to_blocks(osb->sb, bit_off); - mlog(0, "Allocating %u clusters at block %u for inode %llu\n", - num_bits, bit_off, (unsigned long long)OCFS2_I(inode)->ip_blkno); - status = ocfs2_insert_extent(osb, handle, inode, et, - *logical_offset, block, + mlog(0, "Allocating %u clusters at block %u for owner %llu\n", + num_bits, bit_off, + (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci)); + status = ocfs2_insert_extent(handle, et, *logical_offset, block, num_bits, flags, meta_ac); if (status < 0) { mlog_errno(status); @@ -4856,10 +4937,9 @@ static void ocfs2_make_right_split_rec(struct super_block *sb, split_rec->e_flags = rec->e_flags; } -static int ocfs2_split_and_insert(struct inode *inode, - handle_t *handle, - struct ocfs2_path *path, +static int ocfs2_split_and_insert(handle_t *handle, struct ocfs2_extent_tree *et, + struct ocfs2_path *path, struct buffer_head **last_eb_bh, int split_index, struct ocfs2_extent_rec *orig_split_rec, @@ -4892,7 +4972,7 @@ leftright: if (le16_to_cpu(rightmost_el->l_next_free_rec) == le16_to_cpu(rightmost_el->l_count)) { - ret = ocfs2_grow_tree(inode, handle, et, + ret = ocfs2_grow_tree(handle, et, &depth, last_eb_bh, meta_ac); if (ret) { mlog_errno(ret); @@ -4921,8 +5001,8 @@ leftright: */ insert.ins_split = SPLIT_RIGHT; - ocfs2_make_right_split_rec(inode->i_sb, &tmprec, insert_range, - &rec); + ocfs2_make_right_split_rec(ocfs2_metadata_cache_get_super(et->et_ci), + &tmprec, insert_range, &rec); split_rec = tmprec; @@ -4930,7 +5010,7 @@ leftright: do_leftright = 1; } - ret = ocfs2_do_insert_extent(inode, handle, et, &split_rec, &insert); + ret = ocfs2_do_insert_extent(handle, et, &split_rec, &insert); if (ret) { mlog_errno(ret); goto out; @@ -4946,7 +5026,7 @@ leftright: ocfs2_reinit_path(path, 1); cpos = le32_to_cpu(split_rec.e_cpos); - ret = ocfs2_find_path(inode, path, cpos); + ret = ocfs2_find_path(et->et_ci, path, cpos); if (ret) { mlog_errno(ret); goto out; @@ -4961,8 +5041,8 @@ out: return ret; } -static int ocfs2_replace_extent_rec(struct inode *inode, - handle_t *handle, +static int ocfs2_replace_extent_rec(handle_t *handle, + struct ocfs2_extent_tree *et, struct ocfs2_path *path, struct ocfs2_extent_list *el, int split_index, @@ -4970,7 +5050,7 @@ static int ocfs2_replace_extent_rec(struct inode *inode, { int ret; - ret = ocfs2_path_bh_journal_access(handle, inode, path, + ret = ocfs2_path_bh_journal_access(handle, et->et_ci, path, path_num_items(path) - 1); if (ret) { mlog_errno(ret); @@ -4985,9 +5065,8 @@ out: } /* - * Mark part or all of the extent record at split_index in the leaf - * pointed to by path as written. This removes the unwritten - * extent flag. + * Split part or all of the extent record at split_index in the leaf + * pointed to by path. Merge with the contiguous extent record if needed. * * Care is taken to handle contiguousness so as to not grow the tree. * @@ -5004,14 +5083,13 @@ out: * have been brought into cache (and pinned via the journal), so the * extra overhead is not expressed in terms of disk reads. */ -static int __ocfs2_mark_extent_written(struct inode *inode, - struct ocfs2_extent_tree *et, - handle_t *handle, - struct ocfs2_path *path, - int split_index, - struct ocfs2_extent_rec *split_rec, - struct ocfs2_alloc_context *meta_ac, - struct ocfs2_cached_dealloc_ctxt *dealloc) +int ocfs2_split_extent(handle_t *handle, + struct ocfs2_extent_tree *et, + struct ocfs2_path *path, + int split_index, + struct ocfs2_extent_rec *split_rec, + struct ocfs2_alloc_context *meta_ac, + struct ocfs2_cached_dealloc_ctxt *dealloc) { int ret = 0; struct ocfs2_extent_list *el = path_leaf_el(path); @@ -5020,12 +5098,6 @@ static int __ocfs2_mark_extent_written(struct inode *inode, struct ocfs2_merge_ctxt ctxt; struct ocfs2_extent_list *rightmost_el; - if (!(rec->e_flags & OCFS2_EXT_UNWRITTEN)) { - ret = -EIO; - mlog_errno(ret); - goto out; - } - if (le32_to_cpu(rec->e_cpos) > le32_to_cpu(split_rec->e_cpos) || ((le32_to_cpu(rec->e_cpos) + le16_to_cpu(rec->e_leaf_clusters)) < (le32_to_cpu(split_rec->e_cpos) + le16_to_cpu(split_rec->e_leaf_clusters)))) { @@ -5034,19 +5106,19 @@ static int __ocfs2_mark_extent_written(struct inode *inode, goto out; } - ctxt.c_contig_type = ocfs2_figure_merge_contig_type(inode, path, el, + ctxt.c_contig_type = ocfs2_figure_merge_contig_type(et, path, el, split_index, split_rec); /* * The core merge / split code wants to know how much room is - * left in this inodes allocation tree, so we pass the + * left in this allocation tree, so we pass the * rightmost extent list. */ if (path->p_tree_depth) { struct ocfs2_extent_block *eb; - ret = ocfs2_read_extent_block(inode, + ret = ocfs2_read_extent_block(et->et_ci, ocfs2_et_get_last_eb_blk(et), &last_eb_bh); if (ret) { @@ -5073,19 +5145,18 @@ static int __ocfs2_mark_extent_written(struct inode *inode, if (ctxt.c_contig_type == CONTIG_NONE) { if (ctxt.c_split_covers_rec) - ret = ocfs2_replace_extent_rec(inode, handle, - path, el, + ret = ocfs2_replace_extent_rec(handle, et, path, el, split_index, split_rec); else - ret = ocfs2_split_and_insert(inode, handle, path, et, + ret = ocfs2_split_and_insert(handle, et, path, &last_eb_bh, split_index, split_rec, meta_ac); if (ret) mlog_errno(ret); } else { - ret = ocfs2_try_to_merge_extent(inode, handle, path, + ret = ocfs2_try_to_merge_extent(handle, et, path, split_index, split_rec, - dealloc, &ctxt, et); + dealloc, &ctxt); if (ret) mlog_errno(ret); } @@ -5096,46 +5167,31 @@ out: } /* - * Mark the already-existing extent at cpos as written for len clusters. + * Change the flags of the already-existing extent at cpos for len clusters. + * + * new_flags: the flags we want to set. + * clear_flags: the flags we want to clear. + * phys: the new physical offset we want this new extent starts from. * * If the existing extent is larger than the request, initiate a * split. An attempt will be made at merging with adjacent extents. * * The caller is responsible for passing down meta_ac if we'll need it. */ -int ocfs2_mark_extent_written(struct inode *inode, - struct ocfs2_extent_tree *et, - handle_t *handle, u32 cpos, u32 len, u32 phys, - struct ocfs2_alloc_context *meta_ac, - struct ocfs2_cached_dealloc_ctxt *dealloc) +int ocfs2_change_extent_flag(handle_t *handle, + struct ocfs2_extent_tree *et, + u32 cpos, u32 len, u32 phys, + struct ocfs2_alloc_context *meta_ac, + struct ocfs2_cached_dealloc_ctxt *dealloc, + int new_flags, int clear_flags) { int ret, index; - u64 start_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys); + struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci); + u64 start_blkno = ocfs2_clusters_to_blocks(sb, phys); struct ocfs2_extent_rec split_rec; struct ocfs2_path *left_path = NULL; struct ocfs2_extent_list *el; - - mlog(0, "Inode %lu cpos %u, len %u, phys %u (%llu)\n", - inode->i_ino, cpos, len, phys, (unsigned long long)start_blkno); - - if (!ocfs2_writes_unwritten_extents(OCFS2_SB(inode->i_sb))) { - ocfs2_error(inode->i_sb, "Inode %llu has unwritten extents " - "that are being written to, but the feature bit " - "is not set in the super block.", - (unsigned long long)OCFS2_I(inode)->ip_blkno); - ret = -EROFS; - goto out; - } - - /* - * XXX: This should be fixed up so that we just re-insert the - * next extent records. - * - * XXX: This is a hack on the extent tree, maybe it should be - * an op? - */ - if (et->et_ops == &ocfs2_dinode_et_ops) - ocfs2_extent_map_trunc(inode, 0); + struct ocfs2_extent_rec *rec; left_path = ocfs2_new_path_from_et(et); if (!left_path) { @@ -5144,7 +5200,7 @@ int ocfs2_mark_extent_written(struct inode *inode, goto out; } - ret = ocfs2_find_path(inode, left_path, cpos); + ret = ocfs2_find_path(et->et_ci, left_path, cpos); if (ret) { mlog_errno(ret); goto out; @@ -5153,34 +5209,102 @@ int ocfs2_mark_extent_written(struct inode *inode, index = ocfs2_search_extent_list(el, cpos); if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) { - ocfs2_error(inode->i_sb, - "Inode %llu has an extent at cpos %u which can no " + ocfs2_error(sb, + "Owner %llu has an extent at cpos %u which can no " "longer be found.\n", - (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos); + (unsigned long long) + ocfs2_metadata_cache_owner(et->et_ci), cpos); ret = -EROFS; goto out; } + ret = -EIO; + rec = &el->l_recs[index]; + if (new_flags && (rec->e_flags & new_flags)) { + mlog(ML_ERROR, "Owner %llu tried to set %d flags on an " + "extent that already had them", + (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), + new_flags); + goto out; + } + + if (clear_flags && !(rec->e_flags & clear_flags)) { + mlog(ML_ERROR, "Owner %llu tried to clear %d flags on an " + "extent that didn't have them", + (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), + clear_flags); + goto out; + } + memset(&split_rec, 0, sizeof(struct ocfs2_extent_rec)); split_rec.e_cpos = cpu_to_le32(cpos); split_rec.e_leaf_clusters = cpu_to_le16(len); split_rec.e_blkno = cpu_to_le64(start_blkno); - split_rec.e_flags = path_leaf_el(left_path)->l_recs[index].e_flags; - split_rec.e_flags &= ~OCFS2_EXT_UNWRITTEN; - - ret = __ocfs2_mark_extent_written(inode, et, handle, left_path, - index, &split_rec, meta_ac, - dealloc); + split_rec.e_flags = rec->e_flags; + if (new_flags) + split_rec.e_flags |= new_flags; + if (clear_flags) + split_rec.e_flags &= ~clear_flags; + + ret = ocfs2_split_extent(handle, et, left_path, + index, &split_rec, meta_ac, + dealloc); if (ret) mlog_errno(ret); out: ocfs2_free_path(left_path); return ret; + } -static int ocfs2_split_tree(struct inode *inode, struct ocfs2_extent_tree *et, - handle_t *handle, struct ocfs2_path *path, +/* + * Mark the already-existing extent at cpos as written for len clusters. + * This removes the unwritten extent flag. + * + * If the existing extent is larger than the request, initiate a + * split. An attempt will be made at merging with adjacent extents. + * + * The caller is responsible for passing down meta_ac if we'll need it. + */ +int ocfs2_mark_extent_written(struct inode *inode, + struct ocfs2_extent_tree *et, + handle_t *handle, u32 cpos, u32 len, u32 phys, + struct ocfs2_alloc_context *meta_ac, + struct ocfs2_cached_dealloc_ctxt *dealloc) +{ + int ret; + + mlog(0, "Inode %lu cpos %u, len %u, phys clusters %u\n", + inode->i_ino, cpos, len, phys); + + if (!ocfs2_writes_unwritten_extents(OCFS2_SB(inode->i_sb))) { + ocfs2_error(inode->i_sb, "Inode %llu has unwritten extents " + "that are being written to, but the feature bit " + "is not set in the super block.", + (unsigned long long)OCFS2_I(inode)->ip_blkno); + ret = -EROFS; + goto out; + } + + /* + * XXX: This should be fixed up so that we just re-insert the + * next extent records. + */ + ocfs2_et_extent_map_truncate(et, 0); + + ret = ocfs2_change_extent_flag(handle, et, cpos, + len, phys, meta_ac, dealloc, + 0, OCFS2_EXT_UNWRITTEN); + if (ret) + mlog_errno(ret); + +out: + return ret; +} + +static int ocfs2_split_tree(handle_t *handle, struct ocfs2_extent_tree *et, + struct ocfs2_path *path, int index, u32 new_range, struct ocfs2_alloc_context *meta_ac) { @@ -5197,11 +5321,12 @@ static int ocfs2_split_tree(struct inode *inode, struct ocfs2_extent_tree *et, */ el = path_leaf_el(path); rec = &el->l_recs[index]; - ocfs2_make_right_split_rec(inode->i_sb, &split_rec, new_range, rec); + ocfs2_make_right_split_rec(ocfs2_metadata_cache_get_super(et->et_ci), + &split_rec, new_range, rec); depth = path->p_tree_depth; if (depth > 0) { - ret = ocfs2_read_extent_block(inode, + ret = ocfs2_read_extent_block(et->et_ci, ocfs2_et_get_last_eb_blk(et), &last_eb_bh); if (ret < 0) { @@ -5224,7 +5349,7 @@ static int ocfs2_split_tree(struct inode *inode, struct ocfs2_extent_tree *et, if (le16_to_cpu(rightmost_el->l_next_free_rec) == le16_to_cpu(rightmost_el->l_count)) { - ret = ocfs2_grow_tree(inode, handle, et, &depth, &last_eb_bh, + ret = ocfs2_grow_tree(handle, et, &depth, &last_eb_bh, meta_ac); if (ret) { mlog_errno(ret); @@ -5238,7 +5363,7 @@ static int ocfs2_split_tree(struct inode *inode, struct ocfs2_extent_tree *et, insert.ins_split = SPLIT_RIGHT; insert.ins_tree_depth = depth; - ret = ocfs2_do_insert_extent(inode, handle, et, &split_rec, &insert); + ret = ocfs2_do_insert_extent(handle, et, &split_rec, &insert); if (ret) mlog_errno(ret); @@ -5247,23 +5372,23 @@ out: return ret; } -static int ocfs2_truncate_rec(struct inode *inode, handle_t *handle, +static int ocfs2_truncate_rec(handle_t *handle, + struct ocfs2_extent_tree *et, struct ocfs2_path *path, int index, struct ocfs2_cached_dealloc_ctxt *dealloc, - u32 cpos, u32 len, - struct ocfs2_extent_tree *et) + u32 cpos, u32 len) { int ret; u32 left_cpos, rec_range, trunc_range; int wants_rotate = 0, is_rightmost_tree_rec = 0; - struct super_block *sb = inode->i_sb; + struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci); struct ocfs2_path *left_path = NULL; struct ocfs2_extent_list *el = path_leaf_el(path); struct ocfs2_extent_rec *rec; struct ocfs2_extent_block *eb; if (ocfs2_is_empty_extent(&el->l_recs[0]) && index > 0) { - ret = ocfs2_rotate_tree_left(inode, handle, path, dealloc, et); + ret = ocfs2_rotate_tree_left(handle, et, path, dealloc); if (ret) { mlog_errno(ret); goto out; @@ -5295,14 +5420,13 @@ static int ocfs2_truncate_rec(struct inode *inode, handle_t *handle, * by this leaf and the one to it's left. * * There are two cases we can skip: - * 1) Path is the leftmost one in our inode tree. + * 1) Path is the leftmost one in our btree. * 2) The leaf is rightmost and will be empty after * we remove the extent record - the rotate code * knows how to update the newly formed edge. */ - ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, path, - &left_cpos); + ret = ocfs2_find_cpos_for_left_leaf(sb, path, &left_cpos); if (ret) { mlog_errno(ret); goto out; @@ -5316,7 +5440,8 @@ static int ocfs2_truncate_rec(struct inode *inode, handle_t *handle, goto out; } - ret = ocfs2_find_path(inode, left_path, left_cpos); + ret = ocfs2_find_path(et->et_ci, left_path, + left_cpos); if (ret) { mlog_errno(ret); goto out; @@ -5332,13 +5457,13 @@ static int ocfs2_truncate_rec(struct inode *inode, handle_t *handle, goto out; } - ret = ocfs2_journal_access_path(inode, handle, path); + ret = ocfs2_journal_access_path(et->et_ci, handle, path); if (ret) { mlog_errno(ret); goto out; } - ret = ocfs2_journal_access_path(inode, handle, left_path); + ret = ocfs2_journal_access_path(et->et_ci, handle, left_path); if (ret) { mlog_errno(ret); goto out; @@ -5361,7 +5486,7 @@ static int ocfs2_truncate_rec(struct inode *inode, handle_t *handle, * be deleted by the rotate code. */ rec = &el->l_recs[next_free - 1]; - ocfs2_adjust_rightmost_records(inode, handle, path, + ocfs2_adjust_rightmost_records(handle, et, path, rec); } } else if (le32_to_cpu(rec->e_cpos) == cpos) { @@ -5373,11 +5498,12 @@ static int ocfs2_truncate_rec(struct inode *inode, handle_t *handle, /* Remove rightmost portion of the record */ le16_add_cpu(&rec->e_leaf_clusters, -len); if (is_rightmost_tree_rec) - ocfs2_adjust_rightmost_records(inode, handle, path, rec); + ocfs2_adjust_rightmost_records(handle, et, path, rec); } else { /* Caller should have trapped this. */ - mlog(ML_ERROR, "Inode %llu: Invalid record truncate: (%u, %u) " - "(%u, %u)\n", (unsigned long long)OCFS2_I(inode)->ip_blkno, + mlog(ML_ERROR, "Owner %llu: Invalid record truncate: (%u, %u) " + "(%u, %u)\n", + (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), le32_to_cpu(rec->e_cpos), le16_to_cpu(rec->e_leaf_clusters), cpos, len); BUG(); @@ -5386,14 +5512,14 @@ static int ocfs2_truncate_rec(struct inode *inode, handle_t *handle, if (left_path) { int subtree_index; - subtree_index = ocfs2_find_subtree_root(inode, left_path, path); - ocfs2_complete_edge_insert(inode, handle, left_path, path, + subtree_index = ocfs2_find_subtree_root(et, left_path, path); + ocfs2_complete_edge_insert(handle, left_path, path, subtree_index); } ocfs2_journal_dirty(handle, path_leaf_bh(path)); - ret = ocfs2_rotate_tree_left(inode, handle, path, dealloc, et); + ret = ocfs2_rotate_tree_left(handle, et, path, dealloc); if (ret) { mlog_errno(ret); goto out; @@ -5404,9 +5530,9 @@ out: return ret; } -int ocfs2_remove_extent(struct inode *inode, +int ocfs2_remove_extent(handle_t *handle, struct ocfs2_extent_tree *et, - u32 cpos, u32 len, handle_t *handle, + u32 cpos, u32 len, struct ocfs2_alloc_context *meta_ac, struct ocfs2_cached_dealloc_ctxt *dealloc) { @@ -5416,7 +5542,11 @@ int ocfs2_remove_extent(struct inode *inode, struct ocfs2_extent_list *el; struct ocfs2_path *path = NULL; - ocfs2_extent_map_trunc(inode, 0); + /* + * XXX: Why are we truncating to 0 instead of wherever this + * affects us? + */ + ocfs2_et_extent_map_truncate(et, 0); path = ocfs2_new_path_from_et(et); if (!path) { @@ -5425,7 +5555,7 @@ int ocfs2_remove_extent(struct inode *inode, goto out; } - ret = ocfs2_find_path(inode, path, cpos); + ret = ocfs2_find_path(et->et_ci, path, cpos); if (ret) { mlog_errno(ret); goto out; @@ -5434,10 +5564,11 @@ int ocfs2_remove_extent(struct inode *inode, el = path_leaf_el(path); index = ocfs2_search_extent_list(el, cpos); if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) { - ocfs2_error(inode->i_sb, - "Inode %llu has an extent at cpos %u which can no " + ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci), + "Owner %llu has an extent at cpos %u which can no " "longer be found.\n", - (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos); + (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), + cpos); ret = -EROFS; goto out; } @@ -5464,20 +5595,21 @@ int ocfs2_remove_extent(struct inode *inode, BUG_ON(cpos < le32_to_cpu(rec->e_cpos) || trunc_range > rec_range); - mlog(0, "Inode %llu, remove (cpos %u, len %u). Existing index %d " + mlog(0, "Owner %llu, remove (cpos %u, len %u). Existing index %d " "(cpos %u, len %u)\n", - (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos, len, index, + (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), + cpos, len, index, le32_to_cpu(rec->e_cpos), ocfs2_rec_clusters(el, rec)); if (le32_to_cpu(rec->e_cpos) == cpos || rec_range == trunc_range) { - ret = ocfs2_truncate_rec(inode, handle, path, index, dealloc, - cpos, len, et); + ret = ocfs2_truncate_rec(handle, et, path, index, dealloc, + cpos, len); if (ret) { mlog_errno(ret); goto out; } } else { - ret = ocfs2_split_tree(inode, et, handle, path, index, + ret = ocfs2_split_tree(handle, et, path, index, trunc_range, meta_ac); if (ret) { mlog_errno(ret); @@ -5490,7 +5622,7 @@ int ocfs2_remove_extent(struct inode *inode, */ ocfs2_reinit_path(path, 1); - ret = ocfs2_find_path(inode, path, cpos); + ret = ocfs2_find_path(et->et_ci, path, cpos); if (ret) { mlog_errno(ret); goto out; @@ -5499,9 +5631,9 @@ int ocfs2_remove_extent(struct inode *inode, el = path_leaf_el(path); index = ocfs2_search_extent_list(el, cpos); if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) { - ocfs2_error(inode->i_sb, - "Inode %llu: split at cpos %u lost record.", - (unsigned long long)OCFS2_I(inode)->ip_blkno, + ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci), + "Owner %llu: split at cpos %u lost record.", + (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), cpos); ret = -EROFS; goto out; @@ -5515,18 +5647,18 @@ int ocfs2_remove_extent(struct inode *inode, rec_range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); if (rec_range != trunc_range) { - ocfs2_error(inode->i_sb, - "Inode %llu: error after split at cpos %u" + ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci), + "Owner %llu: error after split at cpos %u" "trunc len %u, existing record is (%u,%u)", - (unsigned long long)OCFS2_I(inode)->ip_blkno, + (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), cpos, len, le32_to_cpu(rec->e_cpos), ocfs2_rec_clusters(el, rec)); ret = -EROFS; goto out; } - ret = ocfs2_truncate_rec(inode, handle, path, index, dealloc, - cpos, len, et); + ret = ocfs2_truncate_rec(handle, et, path, index, dealloc, + cpos, len); if (ret) { mlog_errno(ret); goto out; @@ -5573,7 +5705,7 @@ int ocfs2_remove_btree_range(struct inode *inode, goto out; } - ret = ocfs2_et_root_journal_access(handle, inode, et, + ret = ocfs2_et_root_journal_access(handle, et, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); @@ -5583,14 +5715,13 @@ int ocfs2_remove_btree_range(struct inode *inode, vfs_dq_free_space_nodirty(inode, ocfs2_clusters_to_bytes(inode->i_sb, len)); - ret = ocfs2_remove_extent(inode, et, cpos, len, handle, meta_ac, - dealloc); + ret = ocfs2_remove_extent(handle, et, cpos, len, meta_ac, dealloc); if (ret) { mlog_errno(ret); goto out_commit; } - ocfs2_et_update_clusters(inode, et, -len); + ocfs2_et_update_clusters(et, -len); ret = ocfs2_journal_dirty(handle, et->et_root_bh); if (ret) { @@ -5690,7 +5821,7 @@ int ocfs2_truncate_log_append(struct ocfs2_super *osb, goto bail; } - status = ocfs2_journal_access_di(handle, tl_inode, tl_bh, + status = ocfs2_journal_access_di(handle, INODE_CACHE(tl_inode), tl_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); @@ -5752,7 +5883,7 @@ static int ocfs2_replay_truncate_records(struct ocfs2_super *osb, while (i >= 0) { /* Caller has given us at least enough credits to * update the truncate log dinode */ - status = ocfs2_journal_access_di(handle, tl_inode, tl_bh, + status = ocfs2_journal_access_di(handle, INODE_CACHE(tl_inode), tl_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); @@ -6010,7 +6141,7 @@ int ocfs2_begin_truncate_log_recovery(struct ocfs2_super *osb, tl->tl_used = 0; ocfs2_compute_meta_ecc(osb->sb, tl_bh->b_data, &di->i_check); - status = ocfs2_write_block(osb, tl_bh, tl_inode); + status = ocfs2_write_block(osb, tl_bh, INODE_CACHE(tl_inode)); if (status < 0) { mlog_errno(status); goto bail; @@ -6400,9 +6531,9 @@ ocfs2_find_per_slot_free_list(int type, return fl; } -static int ocfs2_cache_block_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt, - int type, int slot, u64 blkno, - unsigned int bit) +int ocfs2_cache_block_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt, + int type, int slot, u64 blkno, + unsigned int bit) { int ret; struct ocfs2_per_slot_free_list *fl; @@ -6518,7 +6649,7 @@ static int ocfs2_find_new_last_ext_blk(struct inode *inode, goto out; } - ret = ocfs2_find_leaf(inode, path_root_el(path), cpos, &bh); + ret = ocfs2_find_leaf(INODE_CACHE(inode), path_root_el(path), cpos, &bh); if (ret) { mlog_errno(ret); goto out; @@ -6551,7 +6682,7 @@ out: */ static int ocfs2_trim_tree(struct inode *inode, struct ocfs2_path *path, handle_t *handle, struct ocfs2_truncate_context *tc, - u32 clusters_to_del, u64 *delete_start) + u32 clusters_to_del, u64 *delete_start, u8 *flags) { int ret, i, index = path->p_tree_depth; u32 new_edge = 0; @@ -6561,6 +6692,7 @@ static int ocfs2_trim_tree(struct inode *inode, struct ocfs2_path *path, struct ocfs2_extent_rec *rec; *delete_start = 0; + *flags = 0; while (index >= 0) { bh = path->p_node[index].bh; @@ -6648,6 +6780,7 @@ find_tail_record: *delete_start = le64_to_cpu(rec->e_blkno) + ocfs2_clusters_to_blocks(inode->i_sb, le16_to_cpu(rec->e_leaf_clusters)); + *flags = rec->e_flags; /* * If it's now empty, remove this record. @@ -6719,7 +6852,7 @@ delete: mlog(0, "deleting this extent block.\n"); - ocfs2_remove_from_cache(inode, bh); + ocfs2_remove_from_cache(INODE_CACHE(inode), bh); BUG_ON(ocfs2_rec_clusters(el, &el->l_recs[0])); BUG_ON(le32_to_cpu(el->l_recs[0].e_cpos)); @@ -6747,7 +6880,8 @@ static int ocfs2_do_truncate(struct ocfs2_super *osb, struct buffer_head *fe_bh, handle_t *handle, struct ocfs2_truncate_context *tc, - struct ocfs2_path *path) + struct ocfs2_path *path, + struct ocfs2_alloc_context *meta_ac) { int status; struct ocfs2_dinode *fe; @@ -6755,6 +6889,7 @@ static int ocfs2_do_truncate(struct ocfs2_super *osb, struct ocfs2_extent_list *el; struct buffer_head *last_eb_bh = NULL; u64 delete_blk = 0; + u8 rec_flags; fe = (struct ocfs2_dinode *) fe_bh->b_data; @@ -6769,14 +6904,14 @@ static int ocfs2_do_truncate(struct ocfs2_super *osb, * Each component will be touched, so we might as well journal * here to avoid having to handle errors later. */ - status = ocfs2_journal_access_path(inode, handle, path); + status = ocfs2_journal_access_path(INODE_CACHE(inode), handle, path); if (status < 0) { mlog_errno(status); goto bail; } if (last_eb_bh) { - status = ocfs2_journal_access_eb(handle, inode, last_eb_bh, + status = ocfs2_journal_access_eb(handle, INODE_CACHE(inode), last_eb_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); @@ -6810,7 +6945,7 @@ static int ocfs2_do_truncate(struct ocfs2_super *osb, inode->i_blocks = ocfs2_inode_sector_count(inode); status = ocfs2_trim_tree(inode, path, handle, tc, - clusters_to_del, &delete_blk); + clusters_to_del, &delete_blk, &rec_flags); if (status) { mlog_errno(status); goto bail; @@ -6842,8 +6977,16 @@ static int ocfs2_do_truncate(struct ocfs2_super *osb, } if (delete_blk) { - status = ocfs2_truncate_log_append(osb, handle, delete_blk, - clusters_to_del); + if (rec_flags & OCFS2_EXT_REFCOUNTED) + status = ocfs2_decrease_refcount(inode, handle, + ocfs2_blocks_to_clusters(osb->sb, + delete_blk), + clusters_to_del, meta_ac, + &tc->tc_dealloc, 1); + else + status = ocfs2_truncate_log_append(osb, handle, + delete_blk, + clusters_to_del); if (status < 0) { mlog_errno(status); goto bail; @@ -6863,9 +7006,9 @@ static int ocfs2_zero_func(handle_t *handle, struct buffer_head *bh) return 0; } -static void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle, - unsigned int from, unsigned int to, - struct page *page, int zero, u64 *phys) +void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle, + unsigned int from, unsigned int to, + struct page *page, int zero, u64 *phys) { int ret, partial = 0; @@ -6933,20 +7076,16 @@ out: ocfs2_unlock_and_free_pages(pages, numpages); } -static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end, - struct page **pages, int *num) +int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end, + struct page **pages, int *num) { int numpages, ret = 0; - struct super_block *sb = inode->i_sb; struct address_space *mapping = inode->i_mapping; unsigned long index; loff_t last_page_bytes; BUG_ON(start > end); - BUG_ON(start >> OCFS2_SB(sb)->s_clustersize_bits != - (end - 1) >> OCFS2_SB(sb)->s_clustersize_bits); - numpages = 0; last_page_bytes = PAGE_ALIGN(end); index = start >> PAGE_CACHE_SHIFT; @@ -6974,6 +7113,17 @@ out: return ret; } +static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end, + struct page **pages, int *num) +{ + struct super_block *sb = inode->i_sb; + + BUG_ON(start >> OCFS2_SB(sb)->s_clustersize_bits != + (end - 1) >> OCFS2_SB(sb)->s_clustersize_bits); + + return ocfs2_grab_pages(inode, start, end, pages, num); +} + /* * Zero the area past i_size but still within an allocated * cluster. This avoids exposing nonzero data on subsequent file @@ -7138,7 +7288,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, goto out_unlock; } - ret = ocfs2_journal_access_di(handle, inode, di_bh, + ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); @@ -7218,9 +7368,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, * this proves to be false, we could always re-build * the in-inode data from our pages. */ - ocfs2_init_dinode_extent_tree(&et, inode, di_bh); - ret = ocfs2_insert_extent(osb, handle, inode, &et, - 0, block, 1, 0, NULL); + ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh); + ret = ocfs2_insert_extent(handle, &et, 0, block, 1, 0, NULL); if (ret) { mlog_errno(ret); goto out_commit; @@ -7262,11 +7411,14 @@ int ocfs2_commit_truncate(struct ocfs2_super *osb, { int status, i, credits, tl_sem = 0; u32 clusters_to_del, new_highest_cpos, range; + u64 blkno = 0; struct ocfs2_extent_list *el; handle_t *handle = NULL; struct inode *tl_inode = osb->osb_tl_inode; struct ocfs2_path *path = NULL; struct ocfs2_dinode *di = (struct ocfs2_dinode *)fe_bh->b_data; + struct ocfs2_alloc_context *meta_ac = NULL; + struct ocfs2_refcount_tree *ref_tree = NULL; mlog_entry_void(); @@ -7292,10 +7444,12 @@ start: goto bail; } + credits = 0; + /* * Truncate always works against the rightmost tree branch. */ - status = ocfs2_find_path(inode, path, UINT_MAX); + status = ocfs2_find_path(INODE_CACHE(inode), path, UINT_MAX); if (status) { mlog_errno(status); goto bail; @@ -7332,10 +7486,15 @@ start: clusters_to_del = 0; } else if (le32_to_cpu(el->l_recs[i].e_cpos) >= new_highest_cpos) { clusters_to_del = ocfs2_rec_clusters(el, &el->l_recs[i]); + blkno = le64_to_cpu(el->l_recs[i].e_blkno); } else if (range > new_highest_cpos) { clusters_to_del = (ocfs2_rec_clusters(el, &el->l_recs[i]) + le32_to_cpu(el->l_recs[i].e_cpos)) - new_highest_cpos; + blkno = le64_to_cpu(el->l_recs[i].e_blkno) + + ocfs2_clusters_to_blocks(inode->i_sb, + ocfs2_rec_clusters(el, &el->l_recs[i]) - + clusters_to_del); } else { status = 0; goto bail; @@ -7344,6 +7503,29 @@ start: mlog(0, "clusters_to_del = %u in this pass, tail blk=%llu\n", clusters_to_del, (unsigned long long)path_leaf_bh(path)->b_blocknr); + if (el->l_recs[i].e_flags & OCFS2_EXT_REFCOUNTED && clusters_to_del) { + BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & + OCFS2_HAS_REFCOUNT_FL)); + + status = ocfs2_lock_refcount_tree(osb, + le64_to_cpu(di->i_refcount_loc), + 1, &ref_tree, NULL); + if (status) { + mlog_errno(status); + goto bail; + } + + status = ocfs2_prepare_refcount_change_for_del(inode, fe_bh, + blkno, + clusters_to_del, + &credits, + &meta_ac); + if (status < 0) { + mlog_errno(status); + goto bail; + } + } + mutex_lock(&tl_inode->i_mutex); tl_sem = 1; /* ocfs2_truncate_log_needs_flush guarantees us at least one @@ -7357,7 +7539,7 @@ start: } } - credits = ocfs2_calc_tree_trunc_credits(osb->sb, clusters_to_del, + credits += ocfs2_calc_tree_trunc_credits(osb->sb, clusters_to_del, (struct ocfs2_dinode *)fe_bh->b_data, el); handle = ocfs2_start_trans(osb, credits); @@ -7369,7 +7551,7 @@ start: } status = ocfs2_do_truncate(osb, clusters_to_del, inode, fe_bh, handle, - tc, path); + tc, path, meta_ac); if (status < 0) { mlog_errno(status); goto bail; @@ -7383,6 +7565,16 @@ start: ocfs2_reinit_path(path, 1); + if (meta_ac) { + ocfs2_free_alloc_context(meta_ac); + meta_ac = NULL; + } + + if (ref_tree) { + ocfs2_unlock_refcount_tree(osb, ref_tree, 1); + ref_tree = NULL; + } + /* * The check above will catch the case where we've truncated * away all allocation. @@ -7399,6 +7591,12 @@ bail: if (handle) ocfs2_commit_trans(osb, handle); + if (meta_ac) + ocfs2_free_alloc_context(meta_ac); + + if (ref_tree) + ocfs2_unlock_refcount_tree(osb, ref_tree, 1); + ocfs2_run_deallocs(osb, &tc->tc_dealloc); ocfs2_free_path(path); @@ -7445,7 +7643,7 @@ int ocfs2_prepare_truncate(struct ocfs2_super *osb, ocfs2_init_dealloc_ctxt(&(*tc)->tc_dealloc); if (fe->id2.i_list.l_tree_depth) { - status = ocfs2_read_extent_block(inode, + status = ocfs2_read_extent_block(INODE_CACHE(inode), le64_to_cpu(fe->i_last_eb_blk), &last_eb_bh); if (status < 0) { @@ -7507,7 +7705,7 @@ int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh, goto out; } - ret = ocfs2_journal_access_di(handle, inode, di_bh, + ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h index 353254b..9c122d5 100644 --- a/fs/ocfs2/alloc.h +++ b/fs/ocfs2/alloc.h @@ -45,7 +45,8 @@ * * ocfs2_extent_tree contains info for the root of the b-tree, it must have a * root ocfs2_extent_list and a root_bh so that they can be used in the b-tree - * functions. With metadata ecc, we now call different journal_access + * functions. It needs the ocfs2_caching_info structure associated with + * I/O on the tree. With metadata ecc, we now call different journal_access * functions for each type of metadata, so it must have the * root_journal_access function. * ocfs2_extent_tree_operations abstract the normal operations we do for @@ -56,6 +57,7 @@ struct ocfs2_extent_tree { struct ocfs2_extent_tree_operations *et_ops; struct buffer_head *et_root_bh; struct ocfs2_extent_list *et_root_el; + struct ocfs2_caching_info *et_ci; ocfs2_journal_access_func et_root_journal_access; void *et_object; unsigned int et_max_leaf_clusters; @@ -66,31 +68,32 @@ struct ocfs2_extent_tree { * specified object buffer. */ void ocfs2_init_dinode_extent_tree(struct ocfs2_extent_tree *et, - struct inode *inode, + struct ocfs2_caching_info *ci, struct buffer_head *bh); void ocfs2_init_xattr_tree_extent_tree(struct ocfs2_extent_tree *et, - struct inode *inode, + struct ocfs2_caching_info *ci, struct buffer_head *bh); struct ocfs2_xattr_value_buf; void ocfs2_init_xattr_value_extent_tree(struct ocfs2_extent_tree *et, - struct inode *inode, + struct ocfs2_caching_info *ci, struct ocfs2_xattr_value_buf *vb); void ocfs2_init_dx_root_extent_tree(struct ocfs2_extent_tree *et, - struct inode *inode, + struct ocfs2_caching_info *ci, struct buffer_head *bh); +void ocfs2_init_refcount_extent_tree(struct ocfs2_extent_tree *et, + struct ocfs2_caching_info *ci, + struct buffer_head *bh); /* * Read an extent block into *bh. If *bh is NULL, a bh will be * allocated. This is a cached read. The extent block will be validated * with ocfs2_validate_extent_block(). */ -int ocfs2_read_extent_block(struct inode *inode, u64 eb_blkno, +int ocfs2_read_extent_block(struct ocfs2_caching_info *ci, u64 eb_blkno, struct buffer_head **bh); struct ocfs2_alloc_context; -int ocfs2_insert_extent(struct ocfs2_super *osb, - handle_t *handle, - struct inode *inode, +int ocfs2_insert_extent(handle_t *handle, struct ocfs2_extent_tree *et, u32 cpos, u64 start_blk, @@ -103,25 +106,36 @@ enum ocfs2_alloc_restarted { RESTART_TRANS, RESTART_META }; -int ocfs2_add_clusters_in_btree(struct ocfs2_super *osb, - struct inode *inode, +int ocfs2_add_clusters_in_btree(handle_t *handle, + struct ocfs2_extent_tree *et, u32 *logical_offset, u32 clusters_to_add, int mark_unwritten, - struct ocfs2_extent_tree *et, - handle_t *handle, struct ocfs2_alloc_context *data_ac, struct ocfs2_alloc_context *meta_ac, enum ocfs2_alloc_restarted *reason_ret); struct ocfs2_cached_dealloc_ctxt; +struct ocfs2_path; +int ocfs2_split_extent(handle_t *handle, + struct ocfs2_extent_tree *et, + struct ocfs2_path *path, + int split_index, + struct ocfs2_extent_rec *split_rec, + struct ocfs2_alloc_context *meta_ac, + struct ocfs2_cached_dealloc_ctxt *dealloc); int ocfs2_mark_extent_written(struct inode *inode, struct ocfs2_extent_tree *et, handle_t *handle, u32 cpos, u32 len, u32 phys, struct ocfs2_alloc_context *meta_ac, struct ocfs2_cached_dealloc_ctxt *dealloc); -int ocfs2_remove_extent(struct inode *inode, - struct ocfs2_extent_tree *et, - u32 cpos, u32 len, handle_t *handle, +int ocfs2_change_extent_flag(handle_t *handle, + struct ocfs2_extent_tree *et, + u32 cpos, u32 len, u32 phys, + struct ocfs2_alloc_context *meta_ac, + struct ocfs2_cached_dealloc_ctxt *dealloc, + int new_flags, int clear_flags); +int ocfs2_remove_extent(handle_t *handle, struct ocfs2_extent_tree *et, + u32 cpos, u32 len, struct ocfs2_alloc_context *meta_ac, struct ocfs2_cached_dealloc_ctxt *dealloc); int ocfs2_remove_btree_range(struct inode *inode, @@ -130,7 +144,6 @@ int ocfs2_remove_btree_range(struct inode *inode, struct ocfs2_cached_dealloc_ctxt *dealloc); int ocfs2_num_free_extents(struct ocfs2_super *osb, - struct inode *inode, struct ocfs2_extent_tree *et); /* @@ -195,6 +208,9 @@ static inline void ocfs2_init_dealloc_ctxt(struct ocfs2_cached_dealloc_ctxt *c) } int ocfs2_cache_cluster_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt, u64 blkno, unsigned int bit); +int ocfs2_cache_block_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt, + int type, int slot, u64 blkno, + unsigned int bit); static inline int ocfs2_dealloc_has_cluster(struct ocfs2_cached_dealloc_ctxt *c) { return c->c_global_allocator != NULL; @@ -222,8 +238,9 @@ int ocfs2_commit_truncate(struct ocfs2_super *osb, int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh, unsigned int start, unsigned int end, int trunc); -int ocfs2_find_leaf(struct inode *inode, struct ocfs2_extent_list *root_el, - u32 cpos, struct buffer_head **leaf_bh); +int ocfs2_find_leaf(struct ocfs2_caching_info *ci, + struct ocfs2_extent_list *root_el, u32 cpos, + struct buffer_head **leaf_bh); int ocfs2_search_extent_list(struct ocfs2_extent_list *el, u32 v_cluster); /* @@ -254,4 +271,50 @@ static inline int ocfs2_is_empty_extent(struct ocfs2_extent_rec *rec) return !rec->e_leaf_clusters; } +int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end, + struct page **pages, int *num); +void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle, + unsigned int from, unsigned int to, + struct page *page, int zero, u64 *phys); +/* + * Structures which describe a path through a btree, and functions to + * manipulate them. + * + * The idea here is to be as generic as possible with the tree + * manipulation code. + */ +struct ocfs2_path_item { + struct buffer_head *bh; + struct ocfs2_extent_list *el; +}; + +#define OCFS2_MAX_PATH_DEPTH 5 + +struct ocfs2_path { + int p_tree_depth; + ocfs2_journal_access_func p_root_access; + struct ocfs2_path_item p_node[OCFS2_MAX_PATH_DEPTH]; +}; + +#define path_root_bh(_path) ((_path)->p_node[0].bh) +#define path_root_el(_path) ((_path)->p_node[0].el) +#define path_root_access(_path)((_path)->p_root_access) +#define path_leaf_bh(_path) ((_path)->p_node[(_path)->p_tree_depth].bh) +#define path_leaf_el(_path) ((_path)->p_node[(_path)->p_tree_depth].el) +#define path_num_items(_path) ((_path)->p_tree_depth + 1) + +void ocfs2_reinit_path(struct ocfs2_path *path, int keep_root); +void ocfs2_free_path(struct ocfs2_path *path); +int ocfs2_find_path(struct ocfs2_caching_info *ci, + struct ocfs2_path *path, + u32 cpos); +struct ocfs2_path *ocfs2_new_path_from_path(struct ocfs2_path *path); +struct ocfs2_path *ocfs2_new_path_from_et(struct ocfs2_extent_tree *et); +int ocfs2_path_bh_journal_access(handle_t *handle, + struct ocfs2_caching_info *ci, + struct ocfs2_path *path, + int idx); +int ocfs2_journal_access_path(struct ocfs2_caching_info *ci, + handle_t *handle, + struct ocfs2_path *path); #endif /* OCFS2_ALLOC_H */ diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 747f15e..deb2b13 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -44,6 +44,7 @@ #include "suballoc.h" #include "super.h" #include "symlink.h" +#include "refcounttree.h" #include "buffer_head_io.h" @@ -126,8 +127,8 @@ bail: return err; } -static int ocfs2_get_block(struct inode *inode, sector_t iblock, - struct buffer_head *bh_result, int create) +int ocfs2_get_block(struct inode *inode, sector_t iblock, + struct buffer_head *bh_result, int create) { int err = 0; unsigned int ext_flags; @@ -590,6 +591,8 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock, goto bail; } + /* We should already CoW the refcounted extent. */ + BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED); /* * get_more_blocks() expects us to describe a hole by clearing * the mapped bit on bh_result(). @@ -687,6 +690,10 @@ static ssize_t ocfs2_direct_IO(int rw, if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) return 0; + /* Fallback to buffered I/O if we are appending. */ + if (i_size_read(inode) <= offset) + return 0; + ret = blockdev_direct_IO_no_locking(rw, iocb, inode, inode->i_sb->s_bdev, iov, offset, nr_segs, @@ -1259,7 +1266,8 @@ static int ocfs2_write_cluster(struct address_space *mapping, goto out; } } else if (unwritten) { - ocfs2_init_dinode_extent_tree(&et, inode, wc->w_di_bh); + ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), + wc->w_di_bh); ret = ocfs2_mark_extent_written(inode, &et, wc->w_handle, cpos, 1, phys, meta_ac, &wc->w_dealloc); @@ -1448,6 +1456,9 @@ static int ocfs2_populate_write_desc(struct inode *inode, goto out; } + /* We should already CoW the refcountd extent. */ + BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED); + /* * Assume worst case - that we're writing in * the middle of the extent. @@ -1528,7 +1539,7 @@ static int ocfs2_write_begin_inline(struct address_space *mapping, goto out; } - ret = ocfs2_journal_access_di(handle, inode, wc->w_di_bh, + ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { ocfs2_commit_trans(osb, handle); @@ -1699,6 +1710,19 @@ int ocfs2_write_begin_nolock(struct address_space *mapping, goto out; } + ret = ocfs2_check_range_for_refcount(inode, pos, len); + if (ret < 0) { + mlog_errno(ret); + goto out; + } else if (ret == 1) { + ret = ocfs2_refcount_cow(inode, di_bh, + wc->w_cpos, wc->w_clen, UINT_MAX); + if (ret) { + mlog_errno(ret); + goto out; + } + } + ret = ocfs2_populate_write_desc(inode, wc, &clusters_to_alloc, &extents_to_split); if (ret) { @@ -1726,7 +1750,8 @@ int ocfs2_write_begin_nolock(struct address_space *mapping, (long long)i_size_read(inode), le32_to_cpu(di->i_clusters), clusters_to_alloc, extents_to_split); - ocfs2_init_dinode_extent_tree(&et, inode, wc->w_di_bh); + ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), + wc->w_di_bh); ret = ocfs2_lock_allocators(inode, &et, clusters_to_alloc, extents_to_split, &data_ac, &meta_ac); @@ -1773,7 +1798,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping, * We don't want this to fail in ocfs2_write_end(), so do it * here. */ - ret = ocfs2_journal_access_di(handle, inode, wc->w_di_bh, + ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h index 503e492..c48e93f 100644 --- a/fs/ocfs2/aops.h +++ b/fs/ocfs2/aops.h @@ -57,6 +57,8 @@ int ocfs2_read_inline_data(struct inode *inode, struct page *page, struct buffer_head *di_bh); int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size); +int ocfs2_get_block(struct inode *inode, sector_t iblock, + struct buffer_head *bh_result, int create); /* all ocfs2_dio_end_io()'s fault */ #define ocfs2_iocb_is_rw_locked(iocb) \ test_bit(0, (unsigned long *)&iocb->private) diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c index 15c8e6d..d43d34a 100644 --- a/fs/ocfs2/buffer_head_io.c +++ b/fs/ocfs2/buffer_head_io.c @@ -52,12 +52,12 @@ enum ocfs2_state_bits { BUFFER_FNS(NeedsValidate, needs_validate); int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh, - struct inode *inode) + struct ocfs2_caching_info *ci) { int ret = 0; - mlog_entry("(bh->b_blocknr = %llu, inode=%p)\n", - (unsigned long long)bh->b_blocknr, inode); + mlog_entry("(bh->b_blocknr = %llu, ci=%p)\n", + (unsigned long long)bh->b_blocknr, ci); BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO); BUG_ON(buffer_jbd(bh)); @@ -70,7 +70,7 @@ int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh, goto out; } - mutex_lock(&OCFS2_I(inode)->ip_io_mutex); + ocfs2_metadata_cache_io_lock(ci); lock_buffer(bh); set_buffer_uptodate(bh); @@ -85,7 +85,7 @@ int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh, wait_on_buffer(bh); if (buffer_uptodate(bh)) { - ocfs2_set_buffer_uptodate(inode, bh); + ocfs2_set_buffer_uptodate(ci, bh); } else { /* We don't need to remove the clustered uptodate * information for this bh as it's not marked locally @@ -94,7 +94,7 @@ int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh, put_bh(bh); } - mutex_unlock(&OCFS2_I(inode)->ip_io_mutex); + ocfs2_metadata_cache_io_unlock(ci); out: mlog_exit(ret); return ret; @@ -177,7 +177,7 @@ bail: return status; } -int ocfs2_read_blocks(struct inode *inode, u64 block, int nr, +int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, struct buffer_head *bhs[], int flags, int (*validate)(struct super_block *sb, struct buffer_head *bh)) @@ -185,11 +185,12 @@ int ocfs2_read_blocks(struct inode *inode, u64 block, int nr, int status = 0; int i, ignore_cache = 0; struct buffer_head *bh; + struct super_block *sb = ocfs2_metadata_cache_get_super(ci); - mlog_entry("(inode=%p, block=(%llu), nr=(%d), flags=%d)\n", - inode, (unsigned long long)block, nr, flags); + mlog_entry("(ci=%p, block=(%llu), nr=(%d), flags=%d)\n", + ci, (unsigned long long)block, nr, flags); - BUG_ON(!inode); + BUG_ON(!ci); BUG_ON((flags & OCFS2_BH_READAHEAD) && (flags & OCFS2_BH_IGNORE_CACHE)); @@ -212,12 +213,12 @@ int ocfs2_read_blocks(struct inode *inode, u64 block, int nr, goto bail; } - mutex_lock(&OCFS2_I(inode)->ip_io_mutex); + ocfs2_metadata_cache_io_lock(ci); for (i = 0 ; i < nr ; i++) { if (bhs[i] == NULL) { - bhs[i] = sb_getblk(inode->i_sb, block++); + bhs[i] = sb_getblk(sb, block++); if (bhs[i] == NULL) { - mutex_unlock(&OCFS2_I(inode)->ip_io_mutex); + ocfs2_metadata_cache_io_unlock(ci); status = -EIO; mlog_errno(status); goto bail; @@ -250,11 +251,11 @@ int ocfs2_read_blocks(struct inode *inode, u64 block, int nr, * before our is-it-in-flight check. */ - if (!ignore_cache && !ocfs2_buffer_uptodate(inode, bh)) { + if (!ignore_cache && !ocfs2_buffer_uptodate(ci, bh)) { mlog(ML_UPTODATE, - "bh (%llu), inode %llu not uptodate\n", + "bh (%llu), owner %llu not uptodate\n", (unsigned long long)bh->b_blocknr, - (unsigned long long)OCFS2_I(inode)->ip_blkno); + (unsigned long long)ocfs2_metadata_cache_owner(ci)); /* We're using ignore_cache here to say * "go to disk" */ ignore_cache = 1; @@ -283,7 +284,7 @@ int ocfs2_read_blocks(struct inode *inode, u64 block, int nr, * previously submitted request than we are * done here. */ if ((flags & OCFS2_BH_READAHEAD) - && ocfs2_buffer_read_ahead(inode, bh)) + && ocfs2_buffer_read_ahead(ci, bh)) continue; lock_buffer(bh); @@ -305,7 +306,7 @@ int ocfs2_read_blocks(struct inode *inode, u64 block, int nr, * buffer lock. */ if (!(flags & OCFS2_BH_IGNORE_CACHE) && !(flags & OCFS2_BH_READAHEAD) - && ocfs2_buffer_uptodate(inode, bh)) { + && ocfs2_buffer_uptodate(ci, bh)) { unlock_buffer(bh); continue; } @@ -327,7 +328,7 @@ int ocfs2_read_blocks(struct inode *inode, u64 block, int nr, if (!(flags & OCFS2_BH_READAHEAD)) { /* We know this can't have changed as we hold the - * inode sem. Avoid doing any work on the bh if the + * owner sem. Avoid doing any work on the bh if the * journal has it. */ if (!buffer_jbd(bh)) wait_on_buffer(bh); @@ -351,7 +352,7 @@ int ocfs2_read_blocks(struct inode *inode, u64 block, int nr, * that better not have changed */ BUG_ON(buffer_jbd(bh)); clear_buffer_needs_validate(bh); - status = validate(inode->i_sb, bh); + status = validate(sb, bh); if (status) { put_bh(bh); bhs[i] = NULL; @@ -363,9 +364,9 @@ int ocfs2_read_blocks(struct inode *inode, u64 block, int nr, /* Always set the buffer in the cache, even if it was * a forced read, or read-ahead which hasn't yet * completed. */ - ocfs2_set_buffer_uptodate(inode, bh); + ocfs2_set_buffer_uptodate(ci, bh); } - mutex_unlock(&OCFS2_I(inode)->ip_io_mutex); + ocfs2_metadata_cache_io_unlock(ci); mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n", (unsigned long long)block, nr, @@ -399,7 +400,7 @@ static void ocfs2_check_super_or_backup(struct super_block *sb, /* * Write super block and backups doesn't need to collaborate with journal, - * so we don't need to lock ip_io_mutex and inode doesn't need to bea passed + * so we don't need to lock ip_io_mutex and ci doesn't need to bea passed * into this function. */ int ocfs2_write_super_or_backup(struct ocfs2_super *osb, diff --git a/fs/ocfs2/buffer_head_io.h b/fs/ocfs2/buffer_head_io.h index c75d682..b97bcc6 100644 --- a/fs/ocfs2/buffer_head_io.h +++ b/fs/ocfs2/buffer_head_io.h @@ -33,7 +33,7 @@ void ocfs2_end_buffer_io_sync(struct buffer_head *bh, int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh, - struct inode *inode); + struct ocfs2_caching_info *ci); int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block, unsigned int nr, struct buffer_head *bhs[]); @@ -44,7 +44,7 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block, * be set even for a READAHEAD call, as it marks the buffer for later * validation. */ -int ocfs2_read_blocks(struct inode *inode, u64 block, int nr, +int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr, struct buffer_head *bhs[], int flags, int (*validate)(struct super_block *sb, struct buffer_head *bh)); @@ -55,7 +55,7 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb, #define OCFS2_BH_IGNORE_CACHE 1 #define OCFS2_BH_READAHEAD 8 -static inline int ocfs2_read_block(struct inode *inode, u64 off, +static inline int ocfs2_read_block(struct ocfs2_caching_info *ci, u64 off, struct buffer_head **bh, int (*validate)(struct super_block *sb, struct buffer_head *bh)) @@ -68,7 +68,7 @@ static inline int ocfs2_read_block(struct inode *inode, u64 off, goto bail; } - status = ocfs2_read_blocks(inode, off, 1, bh, 0, validate); + status = ocfs2_read_blocks(ci, off, 1, bh, 0, validate); bail: return status; diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c index 96df541..1cd2934 100644 --- a/fs/ocfs2/cluster/masklog.c +++ b/fs/ocfs2/cluster/masklog.c @@ -111,6 +111,7 @@ static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = { define_mask(EXPORT), define_mask(XATTR), define_mask(QUOTA), + define_mask(REFCOUNT), define_mask(ERROR), define_mask(NOTICE), define_mask(KTHREAD), diff --git a/fs/ocfs2/cluster/masklog.h b/fs/ocfs2/cluster/masklog.h index 696c32e..9b4d117 100644 --- a/fs/ocfs2/cluster/masklog.h +++ b/fs/ocfs2/cluster/masklog.h @@ -113,6 +113,7 @@ #define ML_EXPORT 0x0000000010000000ULL /* ocfs2 export operations */ #define ML_XATTR 0x0000000020000000ULL /* ocfs2 extended attributes */ #define ML_QUOTA 0x0000000040000000ULL /* ocfs2 quota operations */ +#define ML_REFCOUNT 0x0000000080000000ULL /* refcount tree operations */ /* bits that are infrequently given and frequently matched in the high word */ #define ML_ERROR 0x0000000100000000ULL /* sent to KERN_ERR */ #define ML_NOTICE 0x0000000200000000ULL /* setn to KERN_NOTICE */ diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c index f842487..cfb2be7 100644 --- a/fs/ocfs2/cluster/netdebug.c +++ b/fs/ocfs2/cluster/netdebug.c @@ -163,7 +163,7 @@ static void nst_seq_stop(struct seq_file *seq, void *v) { } -static struct seq_operations nst_seq_ops = { +static const struct seq_operations nst_seq_ops = { .start = nst_seq_start, .next = nst_seq_next, .stop = nst_seq_stop, @@ -344,7 +344,7 @@ static void sc_seq_stop(struct seq_file *seq, void *v) { } -static struct seq_operations sc_seq_ops = { +static const struct seq_operations sc_seq_ops = { .start = sc_seq_start, .next = sc_seq_next, .stop = sc_seq_stop, diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index b358f3b..28c3ec2 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c @@ -176,7 +176,7 @@ static int ocfs2_dx_dir_link_trailer(struct inode *dir, handle_t *handle, struct ocfs2_dx_root_block *dx_root; struct ocfs2_dir_block_trailer *trailer; - ret = ocfs2_journal_access_dr(handle, dir, dx_root_bh, + ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); @@ -564,7 +564,8 @@ static int ocfs2_read_dir_block_direct(struct inode *dir, u64 phys, int ret; struct buffer_head *tmp = *bh; - ret = ocfs2_read_block(dir, phys, &tmp, ocfs2_validate_dir_block); + ret = ocfs2_read_block(INODE_CACHE(dir), phys, &tmp, + ocfs2_validate_dir_block); if (ret) { mlog_errno(ret); goto out; @@ -622,7 +623,8 @@ static int ocfs2_read_dx_root(struct inode *dir, struct ocfs2_dinode *di, u64 blkno = le64_to_cpu(di->i_dx_root); struct buffer_head *tmp = *dx_root_bh; - ret = ocfs2_read_block(dir, blkno, &tmp, ocfs2_validate_dx_root); + ret = ocfs2_read_block(INODE_CACHE(dir), blkno, &tmp, + ocfs2_validate_dx_root); /* If ocfs2_read_block() got us a new bh, pass it up. */ if (!ret && !*dx_root_bh) @@ -662,7 +664,8 @@ static int ocfs2_read_dx_leaf(struct inode *dir, u64 blkno, int ret; struct buffer_head *tmp = *dx_leaf_bh; - ret = ocfs2_read_block(dir, blkno, &tmp, ocfs2_validate_dx_leaf); + ret = ocfs2_read_block(INODE_CACHE(dir), blkno, &tmp, + ocfs2_validate_dx_leaf); /* If ocfs2_read_block() got us a new bh, pass it up. */ if (!ret && !*dx_leaf_bh) @@ -680,7 +683,7 @@ static int ocfs2_read_dx_leaves(struct inode *dir, u64 start, int num, { int ret; - ret = ocfs2_read_blocks(dir, start, num, dx_leaf_bhs, 0, + ret = ocfs2_read_blocks(INODE_CACHE(dir), start, num, dx_leaf_bhs, 0, ocfs2_validate_dx_leaf); if (ret) mlog_errno(ret); @@ -802,7 +805,8 @@ static int ocfs2_dx_dir_lookup_rec(struct inode *inode, struct ocfs2_extent_rec *rec = NULL; if (el->l_tree_depth) { - ret = ocfs2_find_leaf(inode, el, major_hash, &eb_bh); + ret = ocfs2_find_leaf(INODE_CACHE(inode), el, major_hash, + &eb_bh); if (ret) { mlog_errno(ret); goto out; @@ -1133,7 +1137,8 @@ int ocfs2_update_entry(struct inode *dir, handle_t *handle, if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) access = ocfs2_journal_access_di; - ret = access(handle, dir, de_bh, OCFS2_JOURNAL_ACCESS_WRITE); + ret = access(handle, INODE_CACHE(dir), de_bh, + OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; @@ -1176,7 +1181,7 @@ static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir, goto bail; } if (de == de_del) { - status = access(handle, dir, bh, + status = access(handle, INODE_CACHE(dir), bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { status = -EIO; @@ -1326,7 +1331,7 @@ static int ocfs2_delete_entry_dx(handle_t *handle, struct inode *dir, * the entry count needs to be updated. Also, we might be * adding to the start of the free list. */ - ret = ocfs2_journal_access_dr(handle, dir, dx_root_bh, + ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); @@ -1334,7 +1339,7 @@ static int ocfs2_delete_entry_dx(handle_t *handle, struct inode *dir, } if (!ocfs2_dx_root_inline(dx_root)) { - ret = ocfs2_journal_access_dl(handle, dir, + ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), lookup->dl_dx_leaf_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { @@ -1493,7 +1498,7 @@ static int __ocfs2_dx_dir_leaf_insert(struct inode *dir, handle_t *handle, int ret; struct ocfs2_dx_leaf *dx_leaf; - ret = ocfs2_journal_access_dl(handle, dir, dx_leaf_bh, + ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), dx_leaf_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); @@ -1523,7 +1528,7 @@ static int ocfs2_dx_dir_insert(struct inode *dir, handle_t *handle, struct ocfs2_dx_root_block *dx_root; struct buffer_head *dx_root_bh = lookup->dl_dx_root_bh; - ret = ocfs2_journal_access_dr(handle, dir, dx_root_bh, + ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); @@ -1645,11 +1650,13 @@ int __ocfs2_add_entry(handle_t *handle, */ if (ocfs2_free_list_at_root(lookup)) { bh = lookup->dl_dx_root_bh; - retval = ocfs2_journal_access_dr(handle, dir, bh, + retval = ocfs2_journal_access_dr(handle, + INODE_CACHE(dir), bh, OCFS2_JOURNAL_ACCESS_WRITE); } else { bh = lookup->dl_prev_leaf_bh; - retval = ocfs2_journal_access_db(handle, dir, bh, + retval = ocfs2_journal_access_db(handle, + INODE_CACHE(dir), bh, OCFS2_JOURNAL_ACCESS_WRITE); } if (retval) { @@ -1700,11 +1707,13 @@ int __ocfs2_add_entry(handle_t *handle, } if (insert_bh == parent_fe_bh) - status = ocfs2_journal_access_di(handle, dir, + status = ocfs2_journal_access_di(handle, + INODE_CACHE(dir), insert_bh, OCFS2_JOURNAL_ACCESS_WRITE); else { - status = ocfs2_journal_access_db(handle, dir, + status = ocfs2_journal_access_db(handle, + INODE_CACHE(dir), insert_bh, OCFS2_JOURNAL_ACCESS_WRITE); @@ -2280,7 +2289,7 @@ static int ocfs2_fill_new_dir_id(struct ocfs2_super *osb, struct ocfs2_inline_data *data = &di->id2.i_data; unsigned int size = le16_to_cpu(data->id_count); - ret = ocfs2_journal_access_di(handle, inode, di_bh, + ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); @@ -2332,9 +2341,9 @@ static int ocfs2_fill_new_dir_el(struct ocfs2_super *osb, goto bail; } - ocfs2_set_new_buffer_uptodate(inode, new_bh); + ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), new_bh); - status = ocfs2_journal_access_db(handle, inode, new_bh, + status = ocfs2_journal_access_db(handle, INODE_CACHE(inode), new_bh, OCFS2_JOURNAL_ACCESS_CREATE); if (status < 0) { mlog_errno(status); @@ -2418,9 +2427,9 @@ static int ocfs2_dx_dir_attach_index(struct ocfs2_super *osb, ret = -EIO; goto out; } - ocfs2_set_new_buffer_uptodate(dir, dx_root_bh); + ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), dx_root_bh); - ret = ocfs2_journal_access_dr(handle, dir, dx_root_bh, + ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh, OCFS2_JOURNAL_ACCESS_CREATE); if (ret < 0) { mlog_errno(ret); @@ -2454,7 +2463,7 @@ static int ocfs2_dx_dir_attach_index(struct ocfs2_super *osb, if (ret) mlog_errno(ret); - ret = ocfs2_journal_access_di(handle, dir, di_bh, + ret = ocfs2_journal_access_di(handle, INODE_CACHE(dir), di_bh, OCFS2_JOURNAL_ACCESS_CREATE); if (ret) { mlog_errno(ret); @@ -2495,9 +2504,9 @@ static int ocfs2_dx_dir_format_cluster(struct ocfs2_super *osb, } dx_leaves[i] = bh; - ocfs2_set_new_buffer_uptodate(dir, bh); + ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), bh); - ret = ocfs2_journal_access_dl(handle, dir, bh, + ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), bh, OCFS2_JOURNAL_ACCESS_CREATE); if (ret < 0) { mlog_errno(ret); @@ -2582,7 +2591,6 @@ static int ocfs2_dx_dir_new_cluster(struct inode *dir, { int ret; u64 phys_blkno; - struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); ret = __ocfs2_dx_dir_new_cluster(dir, cpos, handle, data_ac, dx_leaves, num_dx_leaves, &phys_blkno); @@ -2591,7 +2599,7 @@ static int ocfs2_dx_dir_new_cluster(struct inode *dir, goto out; } - ret = ocfs2_insert_extent(osb, handle, dir, et, cpos, phys_blkno, 1, 0, + ret = ocfs2_insert_extent(handle, et, cpos, phys_blkno, 1, 0, meta_ac); if (ret) mlog_errno(ret); @@ -2895,7 +2903,7 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh, struct ocfs2_extent_tree dx_et; int did_quota = 0, bytes_allocated = 0; - ocfs2_init_dinode_extent_tree(&et, dir, di_bh); + ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(dir), di_bh); alloc = ocfs2_clusters_for_bytes(sb, bytes); dx_alloc = 0; @@ -3005,9 +3013,9 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh, goto out_commit; } - ocfs2_set_new_buffer_uptodate(dir, dirdata_bh); + ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), dirdata_bh); - ret = ocfs2_journal_access_db(handle, dir, dirdata_bh, + ret = ocfs2_journal_access_db(handle, INODE_CACHE(dir), dirdata_bh, OCFS2_JOURNAL_ACCESS_CREATE); if (ret) { mlog_errno(ret); @@ -3060,7 +3068,7 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh, * We let the later dirent insert modify c/mtime - to the user * the data hasn't changed. */ - ret = ocfs2_journal_access_di(handle, dir, di_bh, + ret = ocfs2_journal_access_di(handle, INODE_CACHE(dir), di_bh, OCFS2_JOURNAL_ACCESS_CREATE); if (ret) { mlog_errno(ret); @@ -3085,7 +3093,7 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh, * This should never fail as our extent list is empty and all * related blocks have been journaled already. */ - ret = ocfs2_insert_extent(osb, handle, dir, &et, 0, blkno, len, + ret = ocfs2_insert_extent(handle, &et, 0, blkno, len, 0, NULL); if (ret) { mlog_errno(ret); @@ -3117,8 +3125,10 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh, ocfs2_dx_dir_index_root_block(dir, dx_root_bh, dirdata_bh); } else { - ocfs2_init_dx_root_extent_tree(&dx_et, dir, dx_root_bh); - ret = ocfs2_insert_extent(osb, handle, dir, &dx_et, 0, + ocfs2_init_dx_root_extent_tree(&dx_et, + INODE_CACHE(dir), + dx_root_bh); + ret = ocfs2_insert_extent(handle, &dx_et, 0, dx_insert_blkno, 1, 0, NULL); if (ret) mlog_errno(ret); @@ -3138,7 +3148,7 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh, } blkno = ocfs2_clusters_to_blocks(dir->i_sb, bit_off); - ret = ocfs2_insert_extent(osb, handle, dir, &et, 1, + ret = ocfs2_insert_extent(handle, &et, 1, blkno, len, 0, NULL); if (ret) { mlog_errno(ret); @@ -3337,8 +3347,9 @@ static int ocfs2_extend_dir(struct ocfs2_super *osb, spin_lock(&OCFS2_I(dir)->ip_lock); if (dir_i_size == ocfs2_clusters_to_bytes(sb, OCFS2_I(dir)->ip_clusters)) { spin_unlock(&OCFS2_I(dir)->ip_lock); - ocfs2_init_dinode_extent_tree(&et, dir, parent_fe_bh); - num_free_extents = ocfs2_num_free_extents(osb, dir, &et); + ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(dir), + parent_fe_bh); + num_free_extents = ocfs2_num_free_extents(osb, &et); if (num_free_extents < 0) { status = num_free_extents; mlog_errno(status); @@ -3387,9 +3398,9 @@ do_extend: goto bail; } - ocfs2_set_new_buffer_uptodate(dir, new_bh); + ocfs2_set_new_buffer_uptodate(INODE_CACHE(dir), new_bh); - status = ocfs2_journal_access_db(handle, dir, new_bh, + status = ocfs2_journal_access_db(handle, INODE_CACHE(dir), new_bh, OCFS2_JOURNAL_ACCESS_CREATE); if (status < 0) { mlog_errno(status); @@ -3829,7 +3840,7 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir, (unsigned long long)OCFS2_I(dir)->ip_blkno, (unsigned long long)leaf_blkno, insert_hash); - ocfs2_init_dx_root_extent_tree(&et, dir, dx_root_bh); + ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh); dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data; /* @@ -3885,7 +3896,7 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir, } did_quota = 1; - ret = ocfs2_journal_access_dl(handle, dir, dx_leaf_bh, + ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), dx_leaf_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); @@ -3949,7 +3960,8 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir, } for (i = 0; i < num_dx_leaves; i++) { - ret = ocfs2_journal_access_dl(handle, dir, orig_dx_leaves[i], + ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), + orig_dx_leaves[i], OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); @@ -4165,7 +4177,7 @@ static int ocfs2_expand_inline_dx_root(struct inode *dir, * failure to add the dx_root_bh to the journal won't result * us losing clusters. */ - ret = ocfs2_journal_access_dr(handle, dir, dx_root_bh, + ret = ocfs2_journal_access_dr(handle, INODE_CACHE(dir), dx_root_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); @@ -4207,9 +4219,8 @@ static int ocfs2_expand_inline_dx_root(struct inode *dir, /* This should never fail considering we start with an empty * dx_root. */ - ocfs2_init_dx_root_extent_tree(&et, dir, dx_root_bh); - ret = ocfs2_insert_extent(osb, handle, dir, &et, 0, - insert_blkno, 1, 0, NULL); + ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh); + ret = ocfs2_insert_extent(handle, &et, 0, insert_blkno, 1, 0, NULL); if (ret) mlog_errno(ret); did_quota = 0; @@ -4469,7 +4480,7 @@ static int ocfs2_dx_dir_remove_index(struct inode *dir, goto out_unlock; } - ret = ocfs2_journal_access_di(handle, dir, di_bh, + ret = ocfs2_journal_access_di(handle, INODE_CACHE(dir), di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); @@ -4532,7 +4543,7 @@ int ocfs2_dx_dir_truncate(struct inode *dir, struct buffer_head *di_bh) if (ocfs2_dx_root_inline(dx_root)) goto remove_index; - ocfs2_init_dx_root_extent_tree(&et, dir, dx_root_bh); + ocfs2_init_dx_root_extent_tree(&et, INODE_CACHE(dir), dx_root_bh); /* XXX: What if dr_clusters is too large? */ while (le32_to_cpu(dx_root->dr_clusters)) { @@ -4565,7 +4576,7 @@ remove_index: goto out; } - ocfs2_remove_from_cache(dir, dx_root_bh); + ocfs2_remove_from_cache(INODE_CACHE(dir), dx_root_bh); out: ocfs2_schedule_truncate_log_flush(osb, 1); ocfs2_run_deallocs(osb, &dealloc); diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c index 81eff8e..01cf8cc 100644 --- a/fs/ocfs2/dlm/dlmast.c +++ b/fs/ocfs2/dlm/dlmast.c @@ -30,7 +30,6 @@ #include <linux/types.h> #include <linux/slab.h> #include <linux/highmem.h> -#include <linux/utsname.h> #include <linux/init.h> #include <linux/sysctl.h> #include <linux/random.h> diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c index 75997b4..ca96bce 100644 --- a/fs/ocfs2/dlm/dlmconvert.c +++ b/fs/ocfs2/dlm/dlmconvert.c @@ -30,7 +30,6 @@ #include <linux/types.h> #include <linux/slab.h> #include <linux/highmem.h> -#include <linux/utsname.h> #include <linux/init.h> #include <linux/sysctl.h> #include <linux/random.h> diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c index df52f70..ca46002 100644 --- a/fs/ocfs2/dlm/dlmdebug.c +++ b/fs/ocfs2/dlm/dlmdebug.c @@ -27,7 +27,6 @@ #include <linux/types.h> #include <linux/slab.h> #include <linux/highmem.h> -#include <linux/utsname.h> #include <linux/sysctl.h> #include <linux/spinlock.h> #include <linux/debugfs.h> @@ -683,7 +682,7 @@ static int lockres_seq_show(struct seq_file *s, void *v) return 0; } -static struct seq_operations debug_lockres_ops = { +static const struct seq_operations debug_lockres_ops = { .start = lockres_seq_start, .stop = lockres_seq_stop, .next = lockres_seq_next, diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index 4d9e6b2..0334000 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c @@ -28,7 +28,6 @@ #include <linux/types.h> #include <linux/slab.h> #include <linux/highmem.h> -#include <linux/utsname.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/delay.h> diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c index 83a9f29..437698e 100644 --- a/fs/ocfs2/dlm/dlmlock.c +++ b/fs/ocfs2/dlm/dlmlock.c @@ -30,7 +30,6 @@ #include <linux/types.h> #include <linux/slab.h> #include <linux/highmem.h> -#include <linux/utsname.h> #include <linux/init.h> #include <linux/sysctl.h> #include <linux/random.h> diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index f8b653f..83bcaf2 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c @@ -30,7 +30,6 @@ #include <linux/types.h> #include <linux/slab.h> #include <linux/highmem.h> -#include <linux/utsname.h> #include <linux/init.h> #include <linux/sysctl.h> #include <linux/random.h> diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index 43e6e32..d9fa3d2 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c @@ -30,7 +30,6 @@ #include <linux/types.h> #include <linux/slab.h> #include <linux/highmem.h> -#include <linux/utsname.h> #include <linux/init.h> #include <linux/sysctl.h> #include <linux/random.h> diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c index d490b66..52ec020 100644 --- a/fs/ocfs2/dlm/dlmthread.c +++ b/fs/ocfs2/dlm/dlmthread.c @@ -30,7 +30,6 @@ #include <linux/types.h> #include <linux/slab.h> #include <linux/highmem.h> -#include <linux/utsname.h> #include <linux/init.h> #include <linux/sysctl.h> #include <linux/random.h> @@ -212,14 +211,18 @@ static int dlm_purge_lockres(struct dlm_ctxt *dlm, spin_lock(&dlm->spinlock); } + spin_lock(&res->spinlock); if (!list_empty(&res->purge)) { mlog(0, "removing lockres %.*s:%p from purgelist, " "master = %d\n", res->lockname.len, res->lockname.name, res, master); list_del_init(&res->purge); + spin_unlock(&res->spinlock); dlm_lockres_put(res); dlm->purge_count--; - } + } else + spin_unlock(&res->spinlock); + __dlm_unhash_lockres(res); /* lockres is not in the hash now. drop the flag and wake up diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c index 756f5b0..00f53b2 100644 --- a/fs/ocfs2/dlm/dlmunlock.c +++ b/fs/ocfs2/dlm/dlmunlock.c @@ -30,7 +30,6 @@ #include <linux/types.h> #include <linux/slab.h> #include <linux/highmem.h> -#include <linux/utsname.h> #include <linux/init.h> #include <linux/sysctl.h> #include <linux/random.h> diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 110bb57..0d38d67 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -53,6 +53,7 @@ #include "super.h" #include "uptodate.h" #include "quota.h" +#include "refcounttree.h" #include "buffer_head_io.h" @@ -110,6 +111,11 @@ static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb, static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres); +static int ocfs2_check_refcount_downconvert(struct ocfs2_lock_res *lockres, + int new_level); +static int ocfs2_refcount_convert_worker(struct ocfs2_lock_res *lockres, + int blocking); + #define mlog_meta_lvb(__level, __lockres) ocfs2_dump_meta_lvb_info(__level, __PRETTY_FUNCTION__, __LINE__, __lockres) /* This aids in debugging situations where a bad LVB might be involved. */ @@ -278,6 +284,12 @@ static struct ocfs2_lock_res_ops ocfs2_qinfo_lops = { .flags = LOCK_TYPE_REQUIRES_REFRESH | LOCK_TYPE_USES_LVB, }; +static struct ocfs2_lock_res_ops ocfs2_refcount_block_lops = { + .check_downconvert = ocfs2_check_refcount_downconvert, + .downconvert_worker = ocfs2_refcount_convert_worker, + .flags = 0, +}; + static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres) { return lockres->l_type == OCFS2_LOCK_TYPE_META || @@ -306,6 +318,12 @@ static inline struct ocfs2_mem_dqinfo *ocfs2_lock_res_qinfo(struct ocfs2_lock_re return (struct ocfs2_mem_dqinfo *)lockres->l_priv; } +static inline struct ocfs2_refcount_tree * +ocfs2_lock_res_refcount_tree(struct ocfs2_lock_res *res) +{ + return container_of(res, struct ocfs2_refcount_tree, rf_lockres); +} + static inline struct ocfs2_super *ocfs2_get_lockres_osb(struct ocfs2_lock_res *lockres) { if (lockres->l_ops->get_osb) @@ -693,6 +711,17 @@ void ocfs2_qinfo_lock_res_init(struct ocfs2_lock_res *lockres, info); } +void ocfs2_refcount_lock_res_init(struct ocfs2_lock_res *lockres, + struct ocfs2_super *osb, u64 ref_blkno, + unsigned int generation) +{ + ocfs2_lock_res_init_once(lockres); + ocfs2_build_lock_name(OCFS2_LOCK_TYPE_REFCOUNT, ref_blkno, + generation, lockres->l_name); + ocfs2_lock_res_init_common(osb, lockres, OCFS2_LOCK_TYPE_REFCOUNT, + &ocfs2_refcount_block_lops, osb); +} + void ocfs2_lock_res_free(struct ocfs2_lock_res *res) { mlog_entry_void(); @@ -1548,8 +1577,10 @@ int ocfs2_rw_lock(struct inode *inode, int write) (unsigned long long)OCFS2_I(inode)->ip_blkno, write ? "EXMODE" : "PRMODE"); - if (ocfs2_mount_local(osb)) + if (ocfs2_mount_local(osb)) { + mlog_exit(0); return 0; + } lockres = &OCFS2_I(inode)->ip_rw_lockres; @@ -2127,7 +2158,7 @@ static int ocfs2_inode_lock_update(struct inode *inode, /* This will discard any caching information we might have had * for the inode metadata. */ - ocfs2_metadata_cache_purge(inode); + ocfs2_metadata_cache_purge(INODE_CACHE(inode)); ocfs2_extent_map_trunc(inode, 0); @@ -3009,6 +3040,7 @@ static void ocfs2_unlock_ast(void *opaque, int error) "unlock_action %d\n", error, lockres->l_name, lockres->l_unlock_action); spin_unlock_irqrestore(&lockres->l_lock, flags); + mlog_exit_void(); return; } @@ -3495,11 +3527,11 @@ out: return UNBLOCK_CONTINUE; } -static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres, - int new_level) +static int ocfs2_ci_checkpointed(struct ocfs2_caching_info *ci, + struct ocfs2_lock_res *lockres, + int new_level) { - struct inode *inode = ocfs2_lock_res_inode(lockres); - int checkpointed = ocfs2_inode_fully_checkpointed(inode); + int checkpointed = ocfs2_ci_fully_checkpointed(ci); BUG_ON(new_level != DLM_LOCK_NL && new_level != DLM_LOCK_PR); BUG_ON(lockres->l_level != DLM_LOCK_EX && !checkpointed); @@ -3507,10 +3539,18 @@ static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres, if (checkpointed) return 1; - ocfs2_start_checkpoint(OCFS2_SB(inode->i_sb)); + ocfs2_start_checkpoint(OCFS2_SB(ocfs2_metadata_cache_get_super(ci))); return 0; } +static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres, + int new_level) +{ + struct inode *inode = ocfs2_lock_res_inode(lockres); + + return ocfs2_ci_checkpointed(INODE_CACHE(inode), lockres, new_level); +} + static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres) { struct inode *inode = ocfs2_lock_res_inode(lockres); @@ -3640,6 +3680,26 @@ static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres, return UNBLOCK_CONTINUE_POST; } +static int ocfs2_check_refcount_downconvert(struct ocfs2_lock_res *lockres, + int new_level) +{ + struct ocfs2_refcount_tree *tree = + ocfs2_lock_res_refcount_tree(lockres); + + return ocfs2_ci_checkpointed(&tree->rf_ci, lockres, new_level); +} + +static int ocfs2_refcount_convert_worker(struct ocfs2_lock_res *lockres, + int blocking) +{ + struct ocfs2_refcount_tree *tree = + ocfs2_lock_res_refcount_tree(lockres); + + ocfs2_metadata_cache_purge(&tree->rf_ci); + + return UNBLOCK_CONTINUE; +} + static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres) { struct ocfs2_qinfo_lvb *lvb; @@ -3752,6 +3812,37 @@ bail: return status; } +int ocfs2_refcount_lock(struct ocfs2_refcount_tree *ref_tree, int ex) +{ + int status; + int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; + struct ocfs2_lock_res *lockres = &ref_tree->rf_lockres; + struct ocfs2_super *osb = lockres->l_priv; + + + if (ocfs2_is_hard_readonly(osb)) + return -EROFS; + + if (ocfs2_mount_local(osb)) + return 0; + + status = ocfs2_cluster_lock(osb, lockres, level, 0, 0); + if (status < 0) + mlog_errno(status); + + return status; +} + +void ocfs2_refcount_unlock(struct ocfs2_refcount_tree *ref_tree, int ex) +{ + int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; + struct ocfs2_lock_res *lockres = &ref_tree->rf_lockres; + struct ocfs2_super *osb = lockres->l_priv; + + if (!ocfs2_mount_local(osb)) + ocfs2_cluster_unlock(osb, lockres, level); +} + /* * This is the filesystem locking protocol. It provides the lock handling * hooks for the underlying DLM. It has a maximum version number. diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h index 7553836..d1ce48e 100644 --- a/fs/ocfs2/dlmglue.h +++ b/fs/ocfs2/dlmglue.h @@ -101,6 +101,9 @@ void ocfs2_file_lock_res_init(struct ocfs2_lock_res *lockres, struct ocfs2_mem_dqinfo; void ocfs2_qinfo_lock_res_init(struct ocfs2_lock_res *lockres, struct ocfs2_mem_dqinfo *info); +void ocfs2_refcount_lock_res_init(struct ocfs2_lock_res *lockres, + struct ocfs2_super *osb, u64 ref_blkno, + unsigned int generation); void ocfs2_lock_res_free(struct ocfs2_lock_res *res); int ocfs2_create_new_inode_locks(struct inode *inode); int ocfs2_drop_inode_locks(struct inode *inode); @@ -148,6 +151,9 @@ int ocfs2_file_lock(struct file *file, int ex, int trylock); void ocfs2_file_unlock(struct file *file); int ocfs2_qinfo_lock(struct ocfs2_mem_dqinfo *oinfo, int ex); void ocfs2_qinfo_unlock(struct ocfs2_mem_dqinfo *oinfo, int ex); +struct ocfs2_refcount_tree; +int ocfs2_refcount_lock(struct ocfs2_refcount_tree *ref_tree, int ex); +void ocfs2_refcount_unlock(struct ocfs2_refcount_tree *ref_tree, int ex); void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres); diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c index f2bb1a0..843db64 100644 --- a/fs/ocfs2/extent_map.c +++ b/fs/ocfs2/extent_map.c @@ -293,7 +293,7 @@ static int ocfs2_last_eb_is_empty(struct inode *inode, struct ocfs2_extent_block *eb; struct ocfs2_extent_list *el; - ret = ocfs2_read_extent_block(inode, last_eb_blk, &eb_bh); + ret = ocfs2_read_extent_block(INODE_CACHE(inode), last_eb_blk, &eb_bh); if (ret) { mlog_errno(ret); goto out; @@ -353,11 +353,11 @@ static int ocfs2_search_for_hole_index(struct ocfs2_extent_list *el, * eb_bh is NULL. Otherwise, eb_bh should point to the extent block * containing el. */ -static int ocfs2_figure_hole_clusters(struct inode *inode, - struct ocfs2_extent_list *el, - struct buffer_head *eb_bh, - u32 v_cluster, - u32 *num_clusters) +int ocfs2_figure_hole_clusters(struct ocfs2_caching_info *ci, + struct ocfs2_extent_list *el, + struct buffer_head *eb_bh, + u32 v_cluster, + u32 *num_clusters) { int ret, i; struct buffer_head *next_eb_bh = NULL; @@ -375,7 +375,7 @@ static int ocfs2_figure_hole_clusters(struct inode *inode, if (le64_to_cpu(eb->h_next_leaf_blk) == 0ULL) goto no_more_extents; - ret = ocfs2_read_extent_block(inode, + ret = ocfs2_read_extent_block(ci, le64_to_cpu(eb->h_next_leaf_blk), &next_eb_bh); if (ret) { @@ -428,7 +428,8 @@ static int ocfs2_get_clusters_nocache(struct inode *inode, tree_height = le16_to_cpu(el->l_tree_depth); if (tree_height > 0) { - ret = ocfs2_find_leaf(inode, el, v_cluster, &eb_bh); + ret = ocfs2_find_leaf(INODE_CACHE(inode), el, v_cluster, + &eb_bh); if (ret) { mlog_errno(ret); goto out; @@ -455,7 +456,8 @@ static int ocfs2_get_clusters_nocache(struct inode *inode, * field. */ if (hole_len) { - ret = ocfs2_figure_hole_clusters(inode, el, eb_bh, + ret = ocfs2_figure_hole_clusters(INODE_CACHE(inode), + el, eb_bh, v_cluster, &len); if (ret) { mlog_errno(ret); @@ -539,7 +541,8 @@ static void ocfs2_relative_extent_offsets(struct super_block *sb, int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster, u32 *p_cluster, u32 *num_clusters, - struct ocfs2_extent_list *el) + struct ocfs2_extent_list *el, + unsigned int *extent_flags) { int ret = 0, i; struct buffer_head *eb_bh = NULL; @@ -548,7 +551,8 @@ int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster, u32 coff; if (el->l_tree_depth) { - ret = ocfs2_find_leaf(inode, el, v_cluster, &eb_bh); + ret = ocfs2_find_leaf(INODE_CACHE(inode), el, v_cluster, + &eb_bh); if (ret) { mlog_errno(ret); goto out; @@ -590,6 +594,9 @@ int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster, *p_cluster = *p_cluster + coff; if (num_clusters) *num_clusters = ocfs2_rec_clusters(el, rec) - coff; + + if (extent_flags) + *extent_flags = rec->e_flags; } out: if (eb_bh) @@ -862,8 +869,8 @@ int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr, BUG_ON(bhs[done + i]->b_blocknr != (p_block + i)); } - rc = ocfs2_read_blocks(inode, p_block, count, bhs + done, - flags, validate); + rc = ocfs2_read_blocks(INODE_CACHE(inode), p_block, count, + bhs + done, flags, validate); if (rc) { mlog_errno(rc); break; diff --git a/fs/ocfs2/extent_map.h b/fs/ocfs2/extent_map.h index b7dd973..e79d41c 100644 --- a/fs/ocfs2/extent_map.h +++ b/fs/ocfs2/extent_map.h @@ -55,12 +55,18 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster, u32 *p_cluster, u32 *num_clusters, - struct ocfs2_extent_list *el); + struct ocfs2_extent_list *el, + unsigned int *extent_flags); int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr, struct buffer_head *bhs[], int flags, int (*validate)(struct super_block *sb, struct buffer_head *bh)); +int ocfs2_figure_hole_clusters(struct ocfs2_caching_info *ci, + struct ocfs2_extent_list *el, + struct buffer_head *eb_bh, + u32 v_cluster, + u32 *num_clusters); static inline int ocfs2_read_virt_block(struct inode *inode, u64 v_block, struct buffer_head **bh, int (*validate)(struct super_block *sb, diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 221c5e9..89fc8ee 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -59,6 +59,7 @@ #include "xattr.h" #include "acl.h" #include "quota.h" +#include "refcounttree.h" #include "buffer_head_io.h" @@ -259,7 +260,7 @@ int ocfs2_update_inode_atime(struct inode *inode, goto out; } - ret = ocfs2_journal_access_di(handle, inode, bh, + ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); @@ -334,6 +335,39 @@ out: return ret; } +static int ocfs2_cow_file_pos(struct inode *inode, + struct buffer_head *fe_bh, + u64 offset) +{ + int status; + u32 phys, cpos = offset >> OCFS2_SB(inode->i_sb)->s_clustersize_bits; + unsigned int num_clusters = 0; + unsigned int ext_flags = 0; + + /* + * If the new offset is aligned to the range of the cluster, there is + * no space for ocfs2_zero_range_for_truncate to fill, so no need to + * CoW either. + */ + if ((offset & (OCFS2_SB(inode->i_sb)->s_clustersize - 1)) == 0) + return 0; + + status = ocfs2_get_clusters(inode, cpos, &phys, + &num_clusters, &ext_flags); + if (status) { + mlog_errno(status); + goto out; + } + + if (!(ext_flags & OCFS2_EXT_REFCOUNTED)) + goto out; + + return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1); + +out: + return status; +} + static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb, struct inode *inode, struct buffer_head *fe_bh, @@ -346,6 +380,17 @@ static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb, mlog_entry_void(); + /* + * We need to CoW the cluster contains the offset if it is reflinked + * since we will call ocfs2_zero_range_for_truncate later which will + * write "0" from offset to the end of the cluster. + */ + status = ocfs2_cow_file_pos(inode, fe_bh, new_i_size); + if (status) { + mlog_errno(status); + return status; + } + /* TODO: This needs to actually orphan the inode in this * transaction. */ @@ -356,7 +401,7 @@ static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb, goto out; } - status = ocfs2_journal_access_di(handle, inode, fe_bh, + status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), fe_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); @@ -486,6 +531,8 @@ bail_unlock_sem: up_write(&OCFS2_I(inode)->ip_alloc_sem); bail: + if (!status && OCFS2_I(inode)->ip_clusters == 0) + status = ocfs2_try_remove_refcount_tree(inode, di_bh); mlog_exit(status); return status; @@ -515,11 +562,10 @@ int ocfs2_add_inode_data(struct ocfs2_super *osb, int ret; struct ocfs2_extent_tree et; - ocfs2_init_dinode_extent_tree(&et, inode, fe_bh); - ret = ocfs2_add_clusters_in_btree(osb, inode, logical_offset, - clusters_to_add, mark_unwritten, - &et, handle, - data_ac, meta_ac, reason_ret); + ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), fe_bh); + ret = ocfs2_add_clusters_in_btree(handle, &et, logical_offset, + clusters_to_add, mark_unwritten, + data_ac, meta_ac, reason_ret); return ret; } @@ -564,7 +610,7 @@ restart_all: (unsigned long long)OCFS2_I(inode)->ip_blkno, (long long)i_size_read(inode), le32_to_cpu(fe->i_clusters), clusters_to_add); - ocfs2_init_dinode_extent_tree(&et, inode, bh); + ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), bh); status = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0, &data_ac, &meta_ac); if (status) { @@ -593,7 +639,7 @@ restarted_transaction: /* reserve a write to the file entry early on - that we if we * run out of credits in the allocation path, we can still * update i_size. */ - status = ocfs2_journal_access_di(handle, inode, bh, + status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); @@ -1131,7 +1177,7 @@ static int __ocfs2_write_remove_suid(struct inode *inode, goto out; } - ret = ocfs2_journal_access_di(handle, inode, bh, + ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) { mlog_errno(ret); @@ -1395,7 +1441,7 @@ static int ocfs2_remove_inode_range(struct inode *inode, struct address_space *mapping = inode->i_mapping; struct ocfs2_extent_tree et; - ocfs2_init_dinode_extent_tree(&et, inode, di_bh); + ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh); ocfs2_init_dealloc_ctxt(&dealloc); if (byte_len == 0) @@ -1657,6 +1703,70 @@ static long ocfs2_fallocate(struct inode *inode, int mode, loff_t offset, OCFS2_IOC_RESVSP64, &sr, change_size); } +int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos, + size_t count) +{ + int ret = 0; + unsigned int extent_flags; + u32 cpos, clusters, extent_len, phys_cpos; + struct super_block *sb = inode->i_sb; + + if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)) || + !(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)) + return 0; + + cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits; + clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos; + + while (clusters) { + ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len, + &extent_flags); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + if (phys_cpos && (extent_flags & OCFS2_EXT_REFCOUNTED)) { + ret = 1; + break; + } + + if (extent_len > clusters) + extent_len = clusters; + + clusters -= extent_len; + cpos += extent_len; + } +out: + return ret; +} + +static int ocfs2_prepare_inode_for_refcount(struct inode *inode, + loff_t pos, size_t count, + int *meta_level) +{ + int ret; + struct buffer_head *di_bh = NULL; + u32 cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits; + u32 clusters = + ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos; + + ret = ocfs2_inode_lock(inode, &di_bh, 1); + if (ret) { + mlog_errno(ret); + goto out; + } + + *meta_level = 1; + + ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX); + if (ret) + mlog_errno(ret); +out: + brelse(di_bh); + return ret; +} + static int ocfs2_prepare_inode_for_write(struct dentry *dentry, loff_t *ppos, size_t count, @@ -1713,6 +1823,22 @@ static int ocfs2_prepare_inode_for_write(struct dentry *dentry, end = saved_pos + count; + ret = ocfs2_check_range_for_refcount(inode, saved_pos, count); + if (ret == 1) { + ocfs2_inode_unlock(inode, meta_level); + meta_level = -1; + + ret = ocfs2_prepare_inode_for_refcount(inode, + saved_pos, + count, + &meta_level); + } + + if (ret < 0) { + mlog_errno(ret); + goto out_unlock; + } + /* * Skip the O_DIRECT checks if we don't need * them. @@ -1759,7 +1885,8 @@ static int ocfs2_prepare_inode_for_write(struct dentry *dentry, *ppos = saved_pos; out_unlock: - ocfs2_inode_unlock(inode, meta_level); + if (meta_level >= 0) + ocfs2_inode_unlock(inode, meta_level); out: return ret; diff --git a/fs/ocfs2/file.h b/fs/ocfs2/file.h index 172f9fb..d66cf4f 100644 --- a/fs/ocfs2/file.h +++ b/fs/ocfs2/file.h @@ -69,4 +69,6 @@ int ocfs2_update_inode_atime(struct inode *inode, int ocfs2_change_file_space(struct file *file, unsigned int cmd, struct ocfs2_space_resv *sr); +int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos, + size_t count); #endif /* OCFS2_FILE_H */ diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index 4dc8890..0297fb8 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c @@ -53,6 +53,7 @@ #include "sysfile.h" #include "uptodate.h" #include "xattr.h" +#include "refcounttree.h" #include "buffer_head_io.h" @@ -562,7 +563,8 @@ static int ocfs2_truncate_for_delete(struct ocfs2_super *osb, goto out; } - status = ocfs2_journal_access_di(handle, inode, fe_bh, + status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), + fe_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); @@ -646,7 +648,7 @@ static int ocfs2_remove_inode(struct inode *inode, } /* set the inodes dtime */ - status = ocfs2_journal_access_di(handle, inode, di_bh, + status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); @@ -662,7 +664,7 @@ static int ocfs2_remove_inode(struct inode *inode, goto bail_commit; } - ocfs2_remove_from_cache(inode, di_bh); + ocfs2_remove_from_cache(INODE_CACHE(inode), di_bh); vfs_dq_free_inode(inode); status = ocfs2_free_dinode(handle, inode_alloc_inode, @@ -781,6 +783,12 @@ static int ocfs2_wipe_inode(struct inode *inode, goto bail_unlock_dir; } + status = ocfs2_remove_refcount_tree(inode, di_bh); + if (status < 0) { + mlog_errno(status); + goto bail_unlock_dir; + } + status = ocfs2_remove_inode(inode, di_bh, orphan_dir_inode, orphan_dir_bh); if (status < 0) @@ -1112,13 +1120,14 @@ void ocfs2_clear_inode(struct inode *inode) ocfs2_lock_res_free(&oi->ip_inode_lockres); ocfs2_lock_res_free(&oi->ip_open_lockres); - ocfs2_metadata_cache_purge(inode); + ocfs2_metadata_cache_exit(INODE_CACHE(inode)); - mlog_bug_on_msg(oi->ip_metadata_cache.ci_num_cached, + mlog_bug_on_msg(INODE_CACHE(inode)->ci_num_cached, "Clear inode of %llu, inode has %u cache items\n", - (unsigned long long)oi->ip_blkno, oi->ip_metadata_cache.ci_num_cached); + (unsigned long long)oi->ip_blkno, + INODE_CACHE(inode)->ci_num_cached); - mlog_bug_on_msg(!(oi->ip_flags & OCFS2_INODE_CACHE_INLINE), + mlog_bug_on_msg(!(INODE_CACHE(inode)->ci_flags & OCFS2_CACHE_FL_INLINE), "Clear inode of %llu, inode has a bad flag\n", (unsigned long long)oi->ip_blkno); @@ -1145,9 +1154,7 @@ void ocfs2_clear_inode(struct inode *inode) (unsigned long long)oi->ip_blkno, oi->ip_open_count); /* Clear all other flags. */ - oi->ip_flags = OCFS2_INODE_CACHE_INLINE; - oi->ip_created_trans = 0; - oi->ip_last_trans = 0; + oi->ip_flags = 0; oi->ip_dir_start_lookup = 0; oi->ip_blkno = 0ULL; @@ -1239,7 +1246,7 @@ int ocfs2_mark_inode_dirty(handle_t *handle, mlog_entry("(inode %llu)\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); - status = ocfs2_journal_access_di(handle, inode, bh, + status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); @@ -1380,8 +1387,8 @@ int ocfs2_read_inode_block_full(struct inode *inode, struct buffer_head **bh, int rc; struct buffer_head *tmp = *bh; - rc = ocfs2_read_blocks(inode, OCFS2_I(inode)->ip_blkno, 1, &tmp, - flags, ocfs2_validate_inode_block); + rc = ocfs2_read_blocks(INODE_CACHE(inode), OCFS2_I(inode)->ip_blkno, + 1, &tmp, flags, ocfs2_validate_inode_block); /* If ocfs2_read_blocks() got us a new bh, pass it up. */ if (!rc && !*bh) @@ -1394,3 +1401,56 @@ int ocfs2_read_inode_block(struct inode *inode, struct buffer_head **bh) { return ocfs2_read_inode_block_full(inode, bh, 0); } + + +static u64 ocfs2_inode_cache_owner(struct ocfs2_caching_info *ci) +{ + struct ocfs2_inode_info *oi = cache_info_to_inode(ci); + + return oi->ip_blkno; +} + +static struct super_block *ocfs2_inode_cache_get_super(struct ocfs2_caching_info *ci) +{ + struct ocfs2_inode_info *oi = cache_info_to_inode(ci); + + return oi->vfs_inode.i_sb; +} + +static void ocfs2_inode_cache_lock(struct ocfs2_caching_info *ci) +{ + struct ocfs2_inode_info *oi = cache_info_to_inode(ci); + + spin_lock(&oi->ip_lock); +} + +static void ocfs2_inode_cache_unlock(struct ocfs2_caching_info *ci) +{ + struct ocfs2_inode_info *oi = cache_info_to_inode(ci); + + spin_unlock(&oi->ip_lock); +} + +static void ocfs2_inode_cache_io_lock(struct ocfs2_caching_info *ci) +{ + struct ocfs2_inode_info *oi = cache_info_to_inode(ci); + + mutex_lock(&oi->ip_io_mutex); +} + +static void ocfs2_inode_cache_io_unlock(struct ocfs2_caching_info *ci) +{ + struct ocfs2_inode_info *oi = cache_info_to_inode(ci); + + mutex_unlock(&oi->ip_io_mutex); +} + +const struct ocfs2_caching_operations ocfs2_inode_caching_ops = { + .co_owner = ocfs2_inode_cache_owner, + .co_get_super = ocfs2_inode_cache_get_super, + .co_cache_lock = ocfs2_inode_cache_lock, + .co_cache_unlock = ocfs2_inode_cache_unlock, + .co_io_lock = ocfs2_inode_cache_io_lock, + .co_io_unlock = ocfs2_inode_cache_io_unlock, +}; + diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h index ea71525..ba4fe07 100644 --- a/fs/ocfs2/inode.h +++ b/fs/ocfs2/inode.h @@ -60,12 +60,6 @@ struct ocfs2_inode_info u32 ip_dir_start_lookup; - /* next two are protected by trans_inc_lock */ - /* which transaction were we created on? Zero if none. */ - unsigned long ip_created_trans; - /* last transaction we were a part of. */ - unsigned long ip_last_trans; - struct ocfs2_caching_info ip_metadata_cache; struct ocfs2_extent_map ip_extent_map; @@ -106,8 +100,6 @@ struct ocfs2_inode_info #define OCFS2_INODE_MAYBE_ORPHANED 0x00000020 /* Does someone have the file open O_DIRECT */ #define OCFS2_INODE_OPEN_DIRECT 0x00000040 -/* Indicates that the metadata cache should be used as an array. */ -#define OCFS2_INODE_CACHE_INLINE 0x00000080 static inline struct ocfs2_inode_info *OCFS2_I(struct inode *inode) { @@ -120,6 +112,12 @@ static inline struct ocfs2_inode_info *OCFS2_I(struct inode *inode) extern struct kmem_cache *ocfs2_inode_cache; extern const struct address_space_operations ocfs2_aops; +extern const struct ocfs2_caching_operations ocfs2_inode_caching_ops; + +static inline struct ocfs2_caching_info *INODE_CACHE(struct inode *inode) +{ + return &OCFS2_I(inode)->ip_metadata_cache; +} void ocfs2_clear_inode(struct inode *inode); void ocfs2_delete_inode(struct inode *inode); @@ -172,4 +170,10 @@ int ocfs2_read_inode_block(struct inode *inode, struct buffer_head **bh); /* The same, but can be passed OCFS2_BH_* flags */ int ocfs2_read_inode_block_full(struct inode *inode, struct buffer_head **bh, int flags); + +static inline struct ocfs2_inode_info *cache_info_to_inode(struct ocfs2_caching_info *ci) +{ + return container_of(ci, struct ocfs2_inode_info, ip_metadata_cache); +} + #endif /* OCFS2_INODE_H */ diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c index 467b413..31fbb06 100644 --- a/fs/ocfs2/ioctl.c +++ b/fs/ocfs2/ioctl.c @@ -21,6 +21,7 @@ #include "ocfs2_fs.h" #include "ioctl.h" #include "resize.h" +#include "refcounttree.h" #include <linux/ext2_fs.h> @@ -115,6 +116,9 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) int status; struct ocfs2_space_resv sr; struct ocfs2_new_group_input input; + struct reflink_arguments args; + const char *old_path, *new_path; + bool preserve; switch (cmd) { case OCFS2_IOC_GETFLAGS: @@ -160,6 +164,15 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return -EFAULT; return ocfs2_group_add(inode, &input); + case OCFS2_IOC_REFLINK: + if (copy_from_user(&args, (struct reflink_arguments *)arg, + sizeof(args))) + return -EFAULT; + old_path = (const char *)(unsigned long)args.old_path; + new_path = (const char *)(unsigned long)args.new_path; + preserve = (args.preserve != 0); + + return ocfs2_reflink_ioctl(inode, old_path, new_path, preserve); default: return -ENOTTY; } @@ -182,6 +195,7 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg) case OCFS2_IOC_GROUP_EXTEND: case OCFS2_IOC_GROUP_ADD: case OCFS2_IOC_GROUP_ADD64: + case OCFS2_IOC_REFLINK: break; default: return -ENOIOCTLCMD; diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index c48b93a..54c16b6 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -48,6 +48,7 @@ #include "slot_map.h" #include "super.h" #include "sysfile.h" +#include "uptodate.h" #include "quota.h" #include "buffer_head_io.h" @@ -554,6 +555,14 @@ static struct ocfs2_triggers eb_triggers = { .ot_offset = offsetof(struct ocfs2_extent_block, h_check), }; +static struct ocfs2_triggers rb_triggers = { + .ot_triggers = { + .t_commit = ocfs2_commit_trigger, + .t_abort = ocfs2_abort_trigger, + }, + .ot_offset = offsetof(struct ocfs2_refcount_block, rf_check), +}; + static struct ocfs2_triggers gd_triggers = { .ot_triggers = { .t_commit = ocfs2_commit_trigger, @@ -601,14 +610,16 @@ static struct ocfs2_triggers dl_triggers = { }; static int __ocfs2_journal_access(handle_t *handle, - struct inode *inode, + struct ocfs2_caching_info *ci, struct buffer_head *bh, struct ocfs2_triggers *triggers, int type) { int status; + struct ocfs2_super *osb = + OCFS2_SB(ocfs2_metadata_cache_get_super(ci)); - BUG_ON(!inode); + BUG_ON(!ci || !ci->ci_ops); BUG_ON(!handle); BUG_ON(!bh); @@ -627,15 +638,15 @@ static int __ocfs2_journal_access(handle_t *handle, BUG(); } - /* Set the current transaction information on the inode so + /* Set the current transaction information on the ci so * that the locking code knows whether it can drop it's locks - * on this inode or not. We're protected from the commit + * on this ci or not. We're protected from the commit * thread updating the current transaction id until * ocfs2_commit_trans() because ocfs2_start_trans() took * j_trans_barrier for us. */ - ocfs2_set_inode_lock_trans(OCFS2_SB(inode->i_sb)->journal, inode); + ocfs2_set_ci_lock_trans(osb->journal, ci); - mutex_lock(&OCFS2_I(inode)->ip_io_mutex); + ocfs2_metadata_cache_io_lock(ci); switch (type) { case OCFS2_JOURNAL_ACCESS_CREATE: case OCFS2_JOURNAL_ACCESS_WRITE: @@ -650,9 +661,9 @@ static int __ocfs2_journal_access(handle_t *handle, status = -EINVAL; mlog(ML_ERROR, "Uknown access type!\n"); } - if (!status && ocfs2_meta_ecc(OCFS2_SB(inode->i_sb)) && triggers) + if (!status && ocfs2_meta_ecc(osb) && triggers) jbd2_journal_set_triggers(bh, &triggers->ot_triggers); - mutex_unlock(&OCFS2_I(inode)->ip_io_mutex); + ocfs2_metadata_cache_io_unlock(ci); if (status < 0) mlog(ML_ERROR, "Error %d getting %d access to buffer!\n", @@ -662,66 +673,65 @@ static int __ocfs2_journal_access(handle_t *handle, return status; } -int ocfs2_journal_access_di(handle_t *handle, struct inode *inode, - struct buffer_head *bh, int type) +int ocfs2_journal_access_di(handle_t *handle, struct ocfs2_caching_info *ci, + struct buffer_head *bh, int type) { - return __ocfs2_journal_access(handle, inode, bh, &di_triggers, - type); + return __ocfs2_journal_access(handle, ci, bh, &di_triggers, type); } -int ocfs2_journal_access_eb(handle_t *handle, struct inode *inode, +int ocfs2_journal_access_eb(handle_t *handle, struct ocfs2_caching_info *ci, struct buffer_head *bh, int type) { - return __ocfs2_journal_access(handle, inode, bh, &eb_triggers, - type); + return __ocfs2_journal_access(handle, ci, bh, &eb_triggers, type); } -int ocfs2_journal_access_gd(handle_t *handle, struct inode *inode, +int ocfs2_journal_access_rb(handle_t *handle, struct ocfs2_caching_info *ci, struct buffer_head *bh, int type) { - return __ocfs2_journal_access(handle, inode, bh, &gd_triggers, + return __ocfs2_journal_access(handle, ci, bh, &rb_triggers, type); } -int ocfs2_journal_access_db(handle_t *handle, struct inode *inode, +int ocfs2_journal_access_gd(handle_t *handle, struct ocfs2_caching_info *ci, struct buffer_head *bh, int type) { - return __ocfs2_journal_access(handle, inode, bh, &db_triggers, - type); + return __ocfs2_journal_access(handle, ci, bh, &gd_triggers, type); } -int ocfs2_journal_access_xb(handle_t *handle, struct inode *inode, +int ocfs2_journal_access_db(handle_t *handle, struct ocfs2_caching_info *ci, struct buffer_head *bh, int type) { - return __ocfs2_journal_access(handle, inode, bh, &xb_triggers, - type); + return __ocfs2_journal_access(handle, ci, bh, &db_triggers, type); } -int ocfs2_journal_access_dq(handle_t *handle, struct inode *inode, +int ocfs2_journal_access_xb(handle_t *handle, struct ocfs2_caching_info *ci, struct buffer_head *bh, int type) { - return __ocfs2_journal_access(handle, inode, bh, &dq_triggers, - type); + return __ocfs2_journal_access(handle, ci, bh, &xb_triggers, type); } -int ocfs2_journal_access_dr(handle_t *handle, struct inode *inode, +int ocfs2_journal_access_dq(handle_t *handle, struct ocfs2_caching_info *ci, struct buffer_head *bh, int type) { - return __ocfs2_journal_access(handle, inode, bh, &dr_triggers, - type); + return __ocfs2_journal_access(handle, ci, bh, &dq_triggers, type); } -int ocfs2_journal_access_dl(handle_t *handle, struct inode *inode, +int ocfs2_journal_access_dr(handle_t *handle, struct ocfs2_caching_info *ci, struct buffer_head *bh, int type) { - return __ocfs2_journal_access(handle, inode, bh, &dl_triggers, - type); + return __ocfs2_journal_access(handle, ci, bh, &dr_triggers, type); +} + +int ocfs2_journal_access_dl(handle_t *handle, struct ocfs2_caching_info *ci, + struct buffer_head *bh, int type) +{ + return __ocfs2_journal_access(handle, ci, bh, &dl_triggers, type); } -int ocfs2_journal_access(handle_t *handle, struct inode *inode, +int ocfs2_journal_access(handle_t *handle, struct ocfs2_caching_info *ci, struct buffer_head *bh, int type) { - return __ocfs2_journal_access(handle, inode, bh, NULL, type); + return __ocfs2_journal_access(handle, ci, bh, NULL, type); } int ocfs2_journal_dirty(handle_t *handle, @@ -898,7 +908,7 @@ static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb, ocfs2_bump_recovery_generation(fe); ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &fe->i_check); - status = ocfs2_write_block(osb, bh, journal->j_inode); + status = ocfs2_write_block(osb, bh, INODE_CACHE(journal->j_inode)); if (status < 0) mlog_errno(status); @@ -1642,7 +1652,7 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb, ocfs2_get_recovery_generation(fe); ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &fe->i_check); - status = ocfs2_write_block(osb, bh, inode); + status = ocfs2_write_block(osb, bh, INODE_CACHE(inode)); if (status < 0) mlog_errno(status); diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h index 2c3222a..3f74e09 100644 --- a/fs/ocfs2/journal.h +++ b/fs/ocfs2/journal.h @@ -90,56 +90,66 @@ static inline unsigned long ocfs2_inc_trans_id(struct ocfs2_journal *j) return old_id; } -static inline void ocfs2_set_inode_lock_trans(struct ocfs2_journal *journal, - struct inode *inode) +static inline void ocfs2_set_ci_lock_trans(struct ocfs2_journal *journal, + struct ocfs2_caching_info *ci) { spin_lock(&trans_inc_lock); - OCFS2_I(inode)->ip_last_trans = journal->j_trans_id; + ci->ci_last_trans = journal->j_trans_id; spin_unlock(&trans_inc_lock); } /* Used to figure out whether it's safe to drop a metadata lock on an - * inode. Returns true if all the inodes changes have been + * cached object. Returns true if all the object's changes have been * checkpointed to disk. You should be holding the spinlock on the * metadata lock while calling this to be sure that nobody can take * the lock and put it on another transaction. */ -static inline int ocfs2_inode_fully_checkpointed(struct inode *inode) +static inline int ocfs2_ci_fully_checkpointed(struct ocfs2_caching_info *ci) { int ret; - struct ocfs2_journal *journal = OCFS2_SB(inode->i_sb)->journal; + struct ocfs2_journal *journal = + OCFS2_SB(ocfs2_metadata_cache_get_super(ci))->journal; spin_lock(&trans_inc_lock); - ret = time_after(journal->j_trans_id, OCFS2_I(inode)->ip_last_trans); + ret = time_after(journal->j_trans_id, ci->ci_last_trans); spin_unlock(&trans_inc_lock); return ret; } -/* convenience function to check if an inode is still new (has never - * hit disk) Will do you a favor and set created_trans = 0 when you've - * been checkpointed. returns '1' if the inode is still new. */ -static inline int ocfs2_inode_is_new(struct inode *inode) +/* convenience function to check if an object backed by struct + * ocfs2_caching_info is still new (has never hit disk) Will do you a + * favor and set created_trans = 0 when you've + * been checkpointed. returns '1' if the ci is still new. */ +static inline int ocfs2_ci_is_new(struct ocfs2_caching_info *ci) { int ret; + struct ocfs2_journal *journal = + OCFS2_SB(ocfs2_metadata_cache_get_super(ci))->journal; + spin_lock(&trans_inc_lock); + ret = !(time_after(journal->j_trans_id, ci->ci_created_trans)); + if (!ret) + ci->ci_created_trans = 0; + spin_unlock(&trans_inc_lock); + return ret; +} + +/* Wrapper for inodes so we can check system files */ +static inline int ocfs2_inode_is_new(struct inode *inode) +{ /* System files are never "new" as they're written out by * mkfs. This helps us early during mount, before we have the * journal open and j_trans_id could be junk. */ if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) return 0; - spin_lock(&trans_inc_lock); - ret = !(time_after(OCFS2_SB(inode->i_sb)->journal->j_trans_id, - OCFS2_I(inode)->ip_created_trans)); - if (!ret) - OCFS2_I(inode)->ip_created_trans = 0; - spin_unlock(&trans_inc_lock); - return ret; + + return ocfs2_ci_is_new(INODE_CACHE(inode)); } -static inline void ocfs2_inode_set_new(struct ocfs2_super *osb, - struct inode *inode) +static inline void ocfs2_ci_set_new(struct ocfs2_super *osb, + struct ocfs2_caching_info *ci) { spin_lock(&trans_inc_lock); - OCFS2_I(inode)->ip_created_trans = osb->journal->j_trans_id; + ci->ci_created_trans = osb->journal->j_trans_id; spin_unlock(&trans_inc_lock); } @@ -200,7 +210,7 @@ static inline void ocfs2_checkpoint_inode(struct inode *inode) if (ocfs2_mount_local(osb)) return; - if (!ocfs2_inode_fully_checkpointed(inode)) { + if (!ocfs2_ci_fully_checkpointed(INODE_CACHE(inode))) { /* WARNING: This only kicks off a single * checkpoint. If someone races you and adds more * metadata to the journal, you won't know, and will @@ -210,7 +220,7 @@ static inline void ocfs2_checkpoint_inode(struct inode *inode) ocfs2_start_checkpoint(osb); wait_event(osb->journal->j_checkpointed, - ocfs2_inode_fully_checkpointed(inode)); + ocfs2_ci_fully_checkpointed(INODE_CACHE(inode))); } } @@ -266,31 +276,34 @@ int ocfs2_extend_trans(handle_t *handle, int nblocks); /* ocfs2_inode */ -int ocfs2_journal_access_di(handle_t *handle, struct inode *inode, +int ocfs2_journal_access_di(handle_t *handle, struct ocfs2_caching_info *ci, struct buffer_head *bh, int type); /* ocfs2_extent_block */ -int ocfs2_journal_access_eb(handle_t *handle, struct inode *inode, +int ocfs2_journal_access_eb(handle_t *handle, struct ocfs2_caching_info *ci, + struct buffer_head *bh, int type); +/* ocfs2_refcount_block */ +int ocfs2_journal_access_rb(handle_t *handle, struct ocfs2_caching_info *ci, struct buffer_head *bh, int type); /* ocfs2_group_desc */ -int ocfs2_journal_access_gd(handle_t *handle, struct inode *inode, +int ocfs2_journal_access_gd(handle_t *handle, struct ocfs2_caching_info *ci, struct buffer_head *bh, int type); /* ocfs2_xattr_block */ -int ocfs2_journal_access_xb(handle_t *handle, struct inode *inode, +int ocfs2_journal_access_xb(handle_t *handle, struct ocfs2_caching_info *ci, struct buffer_head *bh, int type); /* quota blocks */ -int ocfs2_journal_access_dq(handle_t *handle, struct inode *inode, +int ocfs2_journal_access_dq(handle_t *handle, struct ocfs2_caching_info *ci, struct buffer_head *bh, int type); /* dirblock */ -int ocfs2_journal_access_db(handle_t *handle, struct inode *inode, +int ocfs2_journal_access_db(handle_t *handle, struct ocfs2_caching_info *ci, struct buffer_head *bh, int type); /* ocfs2_dx_root_block */ -int ocfs2_journal_access_dr(handle_t *handle, struct inode *inode, +int ocfs2_journal_access_dr(handle_t *handle, struct ocfs2_caching_info *ci, struct buffer_head *bh, int type); /* ocfs2_dx_leaf */ -int ocfs2_journal_access_dl(handle_t *handle, struct inode *inode, +int ocfs2_journal_access_dl(handle_t *handle, struct ocfs2_caching_info *ci, struct buffer_head *bh, int type); /* Anything that has no ecc */ -int ocfs2_journal_access(handle_t *handle, struct inode *inode, +int ocfs2_journal_access(handle_t *handle, struct ocfs2_caching_info *ci, struct buffer_head *bh, int type); /* @@ -477,6 +490,23 @@ static inline int ocfs2_calc_dxi_expand_credits(struct super_block *sb) return credits; } +/* inode update, new refcount block and its allocation credits. */ +#define OCFS2_REFCOUNT_TREE_CREATE_CREDITS (OCFS2_INODE_UPDATE_CREDITS + 1 \ + + OCFS2_SUBALLOC_ALLOC) + +/* inode and the refcount block update. */ +#define OCFS2_REFCOUNT_TREE_SET_CREDITS (OCFS2_INODE_UPDATE_CREDITS + 1) + +/* + * inode and the refcount block update. + * It doesn't include the credits for sub alloc change. + * So if we need to free the bit, OCFS2_SUBALLOC_FREE needs to be added. + */ +#define OCFS2_REFCOUNT_TREE_REMOVE_CREDITS (OCFS2_INODE_UPDATE_CREDITS + 1) + +/* 2 metadata alloc, 2 new blocks and root refcount block */ +#define OCFS2_EXPAND_REFCOUNT_TREE_CREDITS (OCFS2_SUBALLOC_ALLOC * 2 + 3) + /* * Please note that the caller must make sure that root_el is the root * of extent tree. So for an inode, it should be &fe->id2.i_list. Otherwise diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c index bac7e6a..ac10f83 100644 --- a/fs/ocfs2/localalloc.c +++ b/fs/ocfs2/localalloc.c @@ -297,8 +297,8 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb) } memcpy(alloc_copy, alloc, bh->b_size); - status = ocfs2_journal_access_di(handle, local_alloc_inode, bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = ocfs2_journal_access_di(handle, INODE_CACHE(local_alloc_inode), + bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto out_commit; @@ -392,7 +392,7 @@ int ocfs2_begin_local_alloc_recovery(struct ocfs2_super *osb, ocfs2_clear_local_alloc(alloc); ocfs2_compute_meta_ecc(osb->sb, alloc_bh->b_data, &alloc->i_check); - status = ocfs2_write_block(osb, alloc_bh, inode); + status = ocfs2_write_block(osb, alloc_bh, INODE_CACHE(inode)); if (status < 0) mlog_errno(status); @@ -678,7 +678,8 @@ int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb, * delete bits from it! */ *num_bits = bits_wanted; - status = ocfs2_journal_access_di(handle, local_alloc_inode, + status = ocfs2_journal_access_di(handle, + INODE_CACHE(local_alloc_inode), osb->local_alloc_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { @@ -1156,7 +1157,8 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb, } memcpy(alloc_copy, alloc, osb->local_alloc_bh->b_size); - status = ocfs2_journal_access_di(handle, local_alloc_inode, + status = ocfs2_journal_access_di(handle, + INODE_CACHE(local_alloc_inode), osb->local_alloc_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index 8601f93..f010b22 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -69,7 +69,6 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb, struct inode *dir, struct inode *inode, - struct dentry *dentry, dev_t dev, struct buffer_head **new_fe_bh, struct buffer_head *parent_fe_bh, @@ -78,7 +77,7 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb, static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb, struct inode **ret_orphan_dir, - struct inode *inode, + u64 blkno, char *name, struct ocfs2_dir_lookup_result *lookup); @@ -358,8 +357,12 @@ static int ocfs2_mknod(struct inode *dir, } did_quota_inode = 1; + mlog_entry("(0x%p, 0x%p, %d, %lu, '%.*s')\n", dir, dentry, + inode->i_mode, (unsigned long)dev, dentry->d_name.len, + dentry->d_name.name); + /* do the real work now. */ - status = ocfs2_mknod_locked(osb, dir, inode, dentry, dev, + status = ocfs2_mknod_locked(osb, dir, inode, dev, &new_fe_bh, parent_fe_bh, handle, inode_ac); if (status < 0) { @@ -375,7 +378,8 @@ static int ocfs2_mknod(struct inode *dir, goto leave; } - status = ocfs2_journal_access_di(handle, dir, parent_fe_bh, + status = ocfs2_journal_access_di(handle, INODE_CACHE(dir), + parent_fe_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); @@ -465,7 +469,6 @@ leave: static int ocfs2_mknod_locked(struct ocfs2_super *osb, struct inode *dir, struct inode *inode, - struct dentry *dentry, dev_t dev, struct buffer_head **new_fe_bh, struct buffer_head *parent_fe_bh, @@ -479,10 +482,6 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb, u16 suballoc_bit; u16 feat; - mlog_entry("(0x%p, 0x%p, %d, %lu, '%.*s')\n", dir, dentry, - inode->i_mode, (unsigned long)dev, dentry->d_name.len, - dentry->d_name.name); - *new_fe_bh = NULL; status = ocfs2_claim_new_inode(osb, handle, dir, parent_fe_bh, @@ -507,9 +506,10 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb, mlog_errno(status); goto leave; } - ocfs2_set_new_buffer_uptodate(inode, *new_fe_bh); + ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), *new_fe_bh); - status = ocfs2_journal_access_di(handle, inode, *new_fe_bh, + status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), + *new_fe_bh, OCFS2_JOURNAL_ACCESS_CREATE); if (status < 0) { mlog_errno(status); @@ -565,7 +565,7 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb, } ocfs2_populate_inode(inode, fe, 1); - ocfs2_inode_set_new(osb, inode); + ocfs2_ci_set_new(osb, INODE_CACHE(inode)); if (!ocfs2_mount_local(osb)) { status = ocfs2_create_new_inode_locks(inode); if (status < 0) @@ -682,7 +682,7 @@ static int ocfs2_link(struct dentry *old_dentry, goto out_unlock_inode; } - err = ocfs2_journal_access_di(handle, inode, fe_bh, + err = ocfs2_journal_access_di(handle, INODE_CACHE(inode), fe_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (err < 0) { mlog_errno(err); @@ -850,7 +850,8 @@ static int ocfs2_unlink(struct inode *dir, } if (inode_is_unlinkable(inode)) { - status = ocfs2_prepare_orphan_dir(osb, &orphan_dir, inode, + status = ocfs2_prepare_orphan_dir(osb, &orphan_dir, + OCFS2_I(inode)->ip_blkno, orphan_name, &orphan_insert); if (status < 0) { mlog_errno(status); @@ -866,7 +867,7 @@ static int ocfs2_unlink(struct inode *dir, goto leave; } - status = ocfs2_journal_access_di(handle, inode, fe_bh, + status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), fe_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); @@ -1241,9 +1242,8 @@ static int ocfs2_rename(struct inode *old_dir, if (S_ISDIR(new_inode->i_mode) || (new_inode->i_nlink == 1)) { status = ocfs2_prepare_orphan_dir(osb, &orphan_dir, - new_inode, - orphan_name, - &orphan_insert); + OCFS2_I(new_inode)->ip_blkno, + orphan_name, &orphan_insert); if (status < 0) { mlog_errno(status); goto bail; @@ -1284,7 +1284,8 @@ static int ocfs2_rename(struct inode *old_dir, goto bail; } } - status = ocfs2_journal_access_di(handle, new_inode, newfe_bh, + status = ocfs2_journal_access_di(handle, INODE_CACHE(new_inode), + newfe_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); @@ -1331,7 +1332,8 @@ static int ocfs2_rename(struct inode *old_dir, old_inode->i_ctime = CURRENT_TIME; mark_inode_dirty(old_inode); - status = ocfs2_journal_access_di(handle, old_inode, old_inode_bh, + status = ocfs2_journal_access_di(handle, INODE_CACHE(old_inode), + old_inode_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status >= 0) { old_di = (struct ocfs2_dinode *) old_inode_bh->b_data; @@ -1407,9 +1409,10 @@ static int ocfs2_rename(struct inode *old_dir, (int)old_dir_nlink, old_dir->i_nlink); } else { struct ocfs2_dinode *fe; - status = ocfs2_journal_access_di(handle, old_dir, - old_dir_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = ocfs2_journal_access_di(handle, + INODE_CACHE(old_dir), + old_dir_bh, + OCFS2_JOURNAL_ACCESS_WRITE); fe = (struct ocfs2_dinode *) old_dir_bh->b_data; ocfs2_set_links_count(fe, old_dir->i_nlink); status = ocfs2_journal_dirty(handle, old_dir_bh); @@ -1527,9 +1530,11 @@ static int ocfs2_create_symlink_data(struct ocfs2_super *osb, mlog_errno(status); goto bail; } - ocfs2_set_new_buffer_uptodate(inode, bhs[virtual]); + ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), + bhs[virtual]); - status = ocfs2_journal_access(handle, inode, bhs[virtual], + status = ocfs2_journal_access(handle, INODE_CACHE(inode), + bhs[virtual], OCFS2_JOURNAL_ACCESS_CREATE); if (status < 0) { mlog_errno(status); @@ -1692,7 +1697,11 @@ static int ocfs2_symlink(struct inode *dir, } did_quota_inode = 1; - status = ocfs2_mknod_locked(osb, dir, inode, dentry, + mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", dir, dentry, + inode->i_mode, dentry->d_name.len, + dentry->d_name.name); + + status = ocfs2_mknod_locked(osb, dir, inode, 0, &new_fe_bh, parent_fe_bh, handle, inode_ac); if (status < 0) { @@ -1842,7 +1851,7 @@ bail: static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb, struct inode **ret_orphan_dir, - struct inode *inode, + u64 blkno, char *name, struct ocfs2_dir_lookup_result *lookup) { @@ -1850,7 +1859,7 @@ static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb, struct buffer_head *orphan_dir_bh = NULL; int status = 0; - status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno, name); + status = ocfs2_blkno_stringify(blkno, name); if (status < 0) { mlog_errno(status); return status; @@ -1917,7 +1926,9 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb, goto leave; } - status = ocfs2_journal_access_di(handle, orphan_dir_inode, orphan_dir_bh, + status = ocfs2_journal_access_di(handle, + INODE_CACHE(orphan_dir_inode), + orphan_dir_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); @@ -2002,7 +2013,9 @@ int ocfs2_orphan_del(struct ocfs2_super *osb, goto leave; } - status = ocfs2_journal_access_di(handle,orphan_dir_inode, orphan_dir_bh, + status = ocfs2_journal_access_di(handle, + INODE_CACHE(orphan_dir_inode), + orphan_dir_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); @@ -2028,6 +2041,274 @@ leave: return status; } +int ocfs2_create_inode_in_orphan(struct inode *dir, + int mode, + struct inode **new_inode) +{ + int status, did_quota_inode = 0; + struct inode *inode = NULL; + struct inode *orphan_dir = NULL; + struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); + struct ocfs2_dinode *di = NULL; + handle_t *handle = NULL; + char orphan_name[OCFS2_ORPHAN_NAMELEN + 1]; + struct buffer_head *parent_di_bh = NULL; + struct buffer_head *new_di_bh = NULL; + struct ocfs2_alloc_context *inode_ac = NULL; + struct ocfs2_dir_lookup_result orphan_insert = { NULL, }; + + status = ocfs2_inode_lock(dir, &parent_di_bh, 1); + if (status < 0) { + if (status != -ENOENT) + mlog_errno(status); + return status; + } + + /* + * We give the orphan dir the root blkno to fake an orphan name, + * and allocate enough space for our insertion. + */ + status = ocfs2_prepare_orphan_dir(osb, &orphan_dir, + osb->root_blkno, + orphan_name, &orphan_insert); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + /* reserve an inode spot */ + status = ocfs2_reserve_new_inode(osb, &inode_ac); + if (status < 0) { + if (status != -ENOSPC) + mlog_errno(status); + goto leave; + } + + inode = ocfs2_get_init_inode(dir, mode); + if (!inode) { + status = -ENOMEM; + mlog_errno(status); + goto leave; + } + + handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb, 0, 0)); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + handle = NULL; + mlog_errno(status); + goto leave; + } + + /* We don't use standard VFS wrapper because we don't want vfs_dq_init + * to be called. */ + if (sb_any_quota_active(osb->sb) && + osb->sb->dq_op->alloc_inode(inode, 1) == NO_QUOTA) { + status = -EDQUOT; + goto leave; + } + did_quota_inode = 1; + + /* do the real work now. */ + status = ocfs2_mknod_locked(osb, dir, inode, + 0, &new_di_bh, parent_di_bh, handle, + inode_ac); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno, orphan_name); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + di = (struct ocfs2_dinode *)new_di_bh->b_data; + status = ocfs2_orphan_add(osb, handle, inode, di, orphan_name, + &orphan_insert, orphan_dir); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + /* get open lock so that only nodes can't remove it from orphan dir. */ + status = ocfs2_open_lock(inode); + if (status < 0) + mlog_errno(status); + +leave: + if (status < 0 && did_quota_inode) + vfs_dq_free_inode(inode); + if (handle) + ocfs2_commit_trans(osb, handle); + + if (orphan_dir) { + /* This was locked for us in ocfs2_prepare_orphan_dir() */ + ocfs2_inode_unlock(orphan_dir, 1); + mutex_unlock(&orphan_dir->i_mutex); + iput(orphan_dir); + } + + if (status == -ENOSPC) + mlog(0, "Disk is full\n"); + + if ((status < 0) && inode) { + clear_nlink(inode); + iput(inode); + } + + if (inode_ac) + ocfs2_free_alloc_context(inode_ac); + + brelse(new_di_bh); + + if (!status) + *new_inode = inode; + + ocfs2_free_dir_lookup_result(&orphan_insert); + + ocfs2_inode_unlock(dir, 1); + brelse(parent_di_bh); + return status; +} + +int ocfs2_mv_orphaned_inode_to_new(struct inode *dir, + struct inode *inode, + struct dentry *dentry) +{ + int status = 0; + struct buffer_head *parent_di_bh = NULL; + handle_t *handle = NULL; + struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); + struct ocfs2_dinode *dir_di, *di; + struct inode *orphan_dir_inode = NULL; + struct buffer_head *orphan_dir_bh = NULL; + struct buffer_head *di_bh = NULL; + struct ocfs2_dir_lookup_result lookup = { NULL, }; + + mlog_entry("(0x%p, 0x%p, %.*s')\n", dir, dentry, + dentry->d_name.len, dentry->d_name.name); + + status = ocfs2_inode_lock(dir, &parent_di_bh, 1); + if (status < 0) { + if (status != -ENOENT) + mlog_errno(status); + return status; + } + + dir_di = (struct ocfs2_dinode *) parent_di_bh->b_data; + if (!dir_di->i_links_count) { + /* can't make a file in a deleted directory. */ + status = -ENOENT; + goto leave; + } + + status = ocfs2_check_dir_for_entry(dir, dentry->d_name.name, + dentry->d_name.len); + if (status) + goto leave; + + /* get a spot inside the dir. */ + status = ocfs2_prepare_dir_for_insert(osb, dir, parent_di_bh, + dentry->d_name.name, + dentry->d_name.len, &lookup); + if (status < 0) { + mlog_errno(status); + goto leave; + } + + orphan_dir_inode = ocfs2_get_system_file_inode(osb, + ORPHAN_DIR_SYSTEM_INODE, + osb->slot_num); + if (!orphan_dir_inode) { + status = -EEXIST; + mlog_errno(status); + goto leave; + } + + mutex_lock(&orphan_dir_inode->i_mutex); + + status = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1); + if (status < 0) { + mlog_errno(status); + mutex_unlock(&orphan_dir_inode->i_mutex); + iput(orphan_dir_inode); + goto leave; + } + + status = ocfs2_read_inode_block(inode, &di_bh); + if (status < 0) { + mlog_errno(status); + goto orphan_unlock; + } + + handle = ocfs2_start_trans(osb, ocfs2_rename_credits(osb->sb)); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + handle = NULL; + mlog_errno(status); + goto orphan_unlock; + } + + status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), + di_bh, OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto out_commit; + } + + status = ocfs2_orphan_del(osb, handle, orphan_dir_inode, inode, + orphan_dir_bh); + if (status < 0) { + mlog_errno(status); + goto out_commit; + } + + di = (struct ocfs2_dinode *)di_bh->b_data; + le32_add_cpu(&di->i_flags, -OCFS2_ORPHANED_FL); + di->i_orphaned_slot = 0; + ocfs2_journal_dirty(handle, di_bh); + + status = ocfs2_add_entry(handle, dentry, inode, + OCFS2_I(inode)->ip_blkno, parent_di_bh, + &lookup); + if (status < 0) { + mlog_errno(status); + goto out_commit; + } + + status = ocfs2_dentry_attach_lock(dentry, inode, + OCFS2_I(dir)->ip_blkno); + if (status) { + mlog_errno(status); + goto out_commit; + } + + insert_inode_hash(inode); + dentry->d_op = &ocfs2_dentry_ops; + d_instantiate(dentry, inode); + status = 0; +out_commit: + ocfs2_commit_trans(osb, handle); +orphan_unlock: + ocfs2_inode_unlock(orphan_dir_inode, 1); + mutex_unlock(&orphan_dir_inode->i_mutex); + iput(orphan_dir_inode); +leave: + + ocfs2_inode_unlock(dir, 1); + + brelse(di_bh); + brelse(parent_di_bh); + brelse(orphan_dir_bh); + + ocfs2_free_dir_lookup_result(&lookup); + + mlog_exit(status); + + return status; +} + const struct inode_operations ocfs2_dir_iops = { .create = ocfs2_create, .lookup = ocfs2_lookup, diff --git a/fs/ocfs2/namei.h b/fs/ocfs2/namei.h index 688aef6..e5d059d 100644 --- a/fs/ocfs2/namei.h +++ b/fs/ocfs2/namei.h @@ -35,5 +35,11 @@ int ocfs2_orphan_del(struct ocfs2_super *osb, struct inode *orphan_dir_inode, struct inode *inode, struct buffer_head *orphan_dir_bh); +int ocfs2_create_inode_in_orphan(struct inode *dir, + int mode, + struct inode **new_inode); +int ocfs2_mv_orphaned_inode_to_new(struct inode *dir, + struct inode *new_inode, + struct dentry *new_dentry); #endif /* OCFS2_NAMEI_H */ diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 39e1d5a..eae4046 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -51,20 +51,51 @@ /* For struct ocfs2_blockcheck_stats */ #include "blockcheck.h" + +/* Caching of metadata buffers */ + /* Most user visible OCFS2 inodes will have very few pieces of * metadata, but larger files (including bitmaps, etc) must be taken * into account when designing an access scheme. We allow a small * amount of inlined blocks to be stored on an array and grow the * structure into a rb tree when necessary. */ -#define OCFS2_INODE_MAX_CACHE_ARRAY 2 +#define OCFS2_CACHE_INFO_MAX_ARRAY 2 + +/* Flags for ocfs2_caching_info */ + +enum ocfs2_caching_info_flags { + /* Indicates that the metadata cache is using the inline array */ + OCFS2_CACHE_FL_INLINE = 1<<1, +}; +struct ocfs2_caching_operations; struct ocfs2_caching_info { + /* + * The parent structure provides the locks, but because the + * parent structure can differ, it provides locking operations + * to struct ocfs2_caching_info. + */ + const struct ocfs2_caching_operations *ci_ops; + + /* next two are protected by trans_inc_lock */ + /* which transaction were we created on? Zero if none. */ + unsigned long ci_created_trans; + /* last transaction we were a part of. */ + unsigned long ci_last_trans; + + /* Cache structures */ + unsigned int ci_flags; unsigned int ci_num_cached; union { - sector_t ci_array[OCFS2_INODE_MAX_CACHE_ARRAY]; + sector_t ci_array[OCFS2_CACHE_INFO_MAX_ARRAY]; struct rb_root ci_tree; } ci_cache; }; +/* + * Need this prototype here instead of in uptodate.h because journal.h + * uses it. + */ +struct super_block *ocfs2_metadata_cache_get_super(struct ocfs2_caching_info *ci); /* this limits us to 256 nodes * if we need more, we can do a kmalloc for the map */ @@ -377,12 +408,17 @@ struct ocfs2_super /* the group we used to allocate inodes. */ u64 osb_inode_alloc_group; + + /* rb tree root for refcount lock. */ + struct rb_root osb_rf_lock_tree; + struct ocfs2_refcount_tree *osb_ref_tree_lru; }; #define OCFS2_SB(sb) ((struct ocfs2_super *)(sb)->s_fs_info) /* Useful typedef for passing around journal access functions */ -typedef int (*ocfs2_journal_access_func)(handle_t *handle, struct inode *inode, +typedef int (*ocfs2_journal_access_func)(handle_t *handle, + struct ocfs2_caching_info *ci, struct buffer_head *bh, int type); static inline int ocfs2_should_order_data(struct inode *inode) @@ -480,6 +516,13 @@ static inline void ocfs2_add_links_count(struct ocfs2_dinode *di, int n) ocfs2_set_links_count(di, links); } +static inline int ocfs2_refcount_tree(struct ocfs2_super *osb) +{ + if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_REFCOUNT_TREE) + return 1; + return 0; +} + /* set / clear functions because cluster events can make these happen * in parallel so we want the transitions to be atomic. this also * means that any future flags osb_flags must be protected by spinlock @@ -578,6 +621,9 @@ static inline int ocfs2_uses_extended_slot_map(struct ocfs2_super *osb) #define OCFS2_IS_VALID_DX_LEAF(ptr) \ (!strcmp((ptr)->dl_signature, OCFS2_DX_LEAF_SIGNATURE)) +#define OCFS2_IS_VALID_REFCOUNT_BLOCK(ptr) \ + (!strcmp((ptr)->rf_signature, OCFS2_REFCOUNT_BLOCK_SIGNATURE)) + static inline unsigned long ino_from_blkno(struct super_block *sb, u64 blkno) { diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index 7ab6e9e..e9431e4 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h @@ -68,6 +68,7 @@ #define OCFS2_DIR_TRAILER_SIGNATURE "DIRTRL1" #define OCFS2_DX_ROOT_SIGNATURE "DXDIR01" #define OCFS2_DX_LEAF_SIGNATURE "DXLEAF1" +#define OCFS2_REFCOUNT_BLOCK_SIGNATURE "REFCNT1" /* Compatibility flags */ #define OCFS2_HAS_COMPAT_FEATURE(sb,mask) \ @@ -98,7 +99,8 @@ | OCFS2_FEATURE_INCOMPAT_USERSPACE_STACK \ | OCFS2_FEATURE_INCOMPAT_XATTR \ | OCFS2_FEATURE_INCOMPAT_META_ECC \ - | OCFS2_FEATURE_INCOMPAT_INDEXED_DIRS) + | OCFS2_FEATURE_INCOMPAT_INDEXED_DIRS \ + | OCFS2_FEATURE_INCOMPAT_REFCOUNT_TREE) #define OCFS2_FEATURE_RO_COMPAT_SUPP (OCFS2_FEATURE_RO_COMPAT_UNWRITTEN \ | OCFS2_FEATURE_RO_COMPAT_USRQUOTA \ | OCFS2_FEATURE_RO_COMPAT_GRPQUOTA) @@ -160,6 +162,9 @@ /* Metadata checksum and error correction */ #define OCFS2_FEATURE_INCOMPAT_META_ECC 0x0800 +/* Refcount tree support */ +#define OCFS2_FEATURE_INCOMPAT_REFCOUNT_TREE 0x1000 + /* * backup superblock flag is used to indicate that this volume * has backup superblocks. @@ -223,6 +228,7 @@ #define OCFS2_HAS_XATTR_FL (0x0002) #define OCFS2_INLINE_XATTR_FL (0x0004) #define OCFS2_INDEXED_DIR_FL (0x0008) +#define OCFS2_HAS_REFCOUNT_FL (0x0010) /* Inode attributes, keep in sync with EXT2 */ #define OCFS2_SECRM_FL (0x00000001) /* Secure deletion */ @@ -241,8 +247,11 @@ /* * Extent record flags (e_node.leaf.flags) */ -#define OCFS2_EXT_UNWRITTEN (0x01) /* Extent is allocated but - * unwritten */ +#define OCFS2_EXT_UNWRITTEN (0x01) /* Extent is allocated but + * unwritten */ +#define OCFS2_EXT_REFCOUNTED (0x02) /* Extent is reference + * counted in an associated + * refcount tree */ /* * ioctl commands @@ -292,6 +301,15 @@ struct ocfs2_new_group_input { #define OCFS2_IOC_GROUP_ADD _IOW('o', 2,struct ocfs2_new_group_input) #define OCFS2_IOC_GROUP_ADD64 _IOW('o', 3,struct ocfs2_new_group_input) +/* Used to pass 2 file names to reflink. */ +struct reflink_arguments { + __u64 old_path; + __u64 new_path; + __u64 preserve; +}; +#define OCFS2_IOC_REFLINK _IOW('o', 4, struct reflink_arguments) + + /* * Journal Flags (ocfs2_dinode.id1.journal1.i_flags) */ @@ -717,7 +735,8 @@ struct ocfs2_dinode { __le64 i_xattr_loc; /*80*/ struct ocfs2_block_check i_check; /* Error checking */ /*88*/ __le64 i_dx_root; /* Pointer to dir index root block */ - __le64 i_reserved2[5]; +/*90*/ __le64 i_refcount_loc; + __le64 i_reserved2[4]; /*B8*/ union { __le64 i_pad1; /* Generic way to refer to this 64bit union */ @@ -901,6 +920,60 @@ struct ocfs2_group_desc /*40*/ __u8 bg_bitmap[0]; }; +struct ocfs2_refcount_rec { +/*00*/ __le64 r_cpos; /* Physical offset, in clusters */ + __le32 r_clusters; /* Clusters covered by this extent */ + __le32 r_refcount; /* Reference count of this extent */ +/*10*/ +}; +#define OCFS2_32BIT_POS_MASK (0xffffffffULL) + +#define OCFS2_REFCOUNT_LEAF_FL (0x00000001) +#define OCFS2_REFCOUNT_TREE_FL (0x00000002) + +struct ocfs2_refcount_list { +/*00*/ __le16 rl_count; /* Maximum number of entries possible + in rl_records */ + __le16 rl_used; /* Current number of used records */ + __le32 rl_reserved2; + __le64 rl_reserved1; /* Pad to sizeof(ocfs2_refcount_record) */ +/*10*/ struct ocfs2_refcount_rec rl_recs[0]; /* Refcount records */ +}; + + +struct ocfs2_refcount_block { +/*00*/ __u8 rf_signature[8]; /* Signature for verification */ + __le16 rf_suballoc_slot; /* Slot suballocator this block + belongs to */ + __le16 rf_suballoc_bit; /* Bit offset in suballocator + block group */ + __le32 rf_fs_generation; /* Must match superblock */ +/*10*/ __le64 rf_blkno; /* Offset on disk, in blocks */ + __le64 rf_parent; /* Parent block, only valid if + OCFS2_REFCOUNT_LEAF_FL is set in + rf_flags */ +/*20*/ struct ocfs2_block_check rf_check; /* Error checking */ + __le64 rf_last_eb_blk; /* Pointer to last extent block */ +/*30*/ __le32 rf_count; /* Number of inodes sharing this + refcount tree */ + __le32 rf_flags; /* See the flags above */ + __le32 rf_clusters; /* clusters covered by refcount tree. */ + __le32 rf_cpos; /* cluster offset in refcount tree.*/ +/*40*/ __le32 rf_generation; /* generation number. all be the same + * for the same refcount tree. */ + __le32 rf_reserved0; + __le64 rf_reserved1[7]; +/*80*/ union { + struct ocfs2_refcount_list rf_records; /* List of refcount + records */ + struct ocfs2_extent_list rf_list; /* Extent record list, + only valid if + OCFS2_REFCOUNT_TREE_FL + is set in rf_flags */ + }; +/* Actual on-disk size is one block */ +}; + /* * On disk extended attribute structure for OCFS2. */ @@ -1312,6 +1385,32 @@ static inline u16 ocfs2_xattr_recs_per_xb(struct super_block *sb) return size / sizeof(struct ocfs2_extent_rec); } + +static inline u16 ocfs2_extent_recs_per_rb(struct super_block *sb) +{ + int size; + + size = sb->s_blocksize - + offsetof(struct ocfs2_refcount_block, rf_list.l_recs); + + return size / sizeof(struct ocfs2_extent_rec); +} + +static inline u16 ocfs2_refcount_recs_per_rb(struct super_block *sb) +{ + int size; + + size = sb->s_blocksize - + offsetof(struct ocfs2_refcount_block, rf_records.rl_recs); + + return size / sizeof(struct ocfs2_refcount_rec); +} + +static inline u32 +ocfs2_get_ref_rec_low_cpos(const struct ocfs2_refcount_rec *rec) +{ + return le64_to_cpu(rec->r_cpos) & OCFS2_32BIT_POS_MASK; +} #else static inline int ocfs2_fast_symlink_chars(int blocksize) { diff --git a/fs/ocfs2/ocfs2_lockid.h b/fs/ocfs2/ocfs2_lockid.h index c212cf5..d277aab 100644 --- a/fs/ocfs2/ocfs2_lockid.h +++ b/fs/ocfs2/ocfs2_lockid.h @@ -49,6 +49,7 @@ enum ocfs2_lock_type { OCFS2_LOCK_TYPE_QINFO, OCFS2_LOCK_TYPE_NFS_SYNC, OCFS2_LOCK_TYPE_ORPHAN_SCAN, + OCFS2_LOCK_TYPE_REFCOUNT, OCFS2_NUM_LOCK_TYPES }; @@ -89,6 +90,9 @@ static inline char ocfs2_lock_type_char(enum ocfs2_lock_type type) case OCFS2_LOCK_TYPE_ORPHAN_SCAN: c = 'P'; break; + case OCFS2_LOCK_TYPE_REFCOUNT: + c = 'T'; + break; default: c = '\0'; } @@ -110,6 +114,7 @@ static char *ocfs2_lock_type_strings[] = { [OCFS2_LOCK_TYPE_QINFO] = "Quota", [OCFS2_LOCK_TYPE_NFS_SYNC] = "NFSSync", [OCFS2_LOCK_TYPE_ORPHAN_SCAN] = "OrphanScan", + [OCFS2_LOCK_TYPE_REFCOUNT] = "Refcount", }; static inline const char *ocfs2_lock_type_string(enum ocfs2_lock_type type) diff --git a/fs/ocfs2/quota.h b/fs/ocfs2/quota.h index 3fb96fc..e5df9d1 100644 --- a/fs/ocfs2/quota.h +++ b/fs/ocfs2/quota.h @@ -109,7 +109,7 @@ void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex); int ocfs2_read_quota_block(struct inode *inode, u64 v_block, struct buffer_head **bh); -extern struct dquot_operations ocfs2_quota_operations; +extern const struct dquot_operations ocfs2_quota_operations; extern struct quota_format_type ocfs2_quota_format; int ocfs2_quota_setup(void); diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c index 44f2a5e..b437dc0 100644 --- a/fs/ocfs2/quota_global.c +++ b/fs/ocfs2/quota_global.c @@ -154,7 +154,7 @@ static int ocfs2_get_quota_block(struct inode *inode, int block, err = -EIO; mlog_errno(err); } - return err;; + return err; } /* Read data from global quotafile - avoid pagecache and such because we cannot @@ -253,8 +253,9 @@ ssize_t ocfs2_quota_write(struct super_block *sb, int type, flush_dcache_page(bh->b_page); set_buffer_uptodate(bh); unlock_buffer(bh); - ocfs2_set_buffer_uptodate(gqinode, bh); - err = ocfs2_journal_access_dq(handle, gqinode, bh, ja_type); + ocfs2_set_buffer_uptodate(INODE_CACHE(gqinode), bh); + err = ocfs2_journal_access_dq(handle, INODE_CACHE(gqinode), bh, + ja_type); if (err < 0) { brelse(bh); goto out; @@ -849,7 +850,7 @@ static void ocfs2_destroy_dquot(struct dquot *dquot) kmem_cache_free(ocfs2_dquot_cachep, dquot); } -struct dquot_operations ocfs2_quota_operations = { +const struct dquot_operations ocfs2_quota_operations = { .initialize = dquot_initialize, .drop = dquot_drop, .alloc_space = dquot_alloc_space, diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c index bdb09cb..1a2c50a 100644 --- a/fs/ocfs2/quota_local.c +++ b/fs/ocfs2/quota_local.c @@ -108,7 +108,7 @@ static int ocfs2_modify_bh(struct inode *inode, struct buffer_head *bh, mlog_errno(status); return status; } - status = ocfs2_journal_access_dq(handle, inode, bh, + status = ocfs2_journal_access_dq(handle, INODE_CACHE(inode), bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); @@ -510,7 +510,8 @@ static int ocfs2_recover_local_quota_file(struct inode *lqinode, goto out_commit; } /* Release local quota file entry */ - status = ocfs2_journal_access_dq(handle, lqinode, + status = ocfs2_journal_access_dq(handle, + INODE_CACHE(lqinode), qbh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); @@ -619,7 +620,8 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb, mlog_errno(status); goto out_bh; } - status = ocfs2_journal_access_dq(handle, lqinode, bh, + status = ocfs2_journal_access_dq(handle, INODE_CACHE(lqinode), + bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); @@ -993,8 +995,8 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk( goto out_trans; } dchunk = (struct ocfs2_local_disk_chunk *)bh->b_data; - ocfs2_set_new_buffer_uptodate(lqinode, bh); - status = ocfs2_journal_access_dq(handle, lqinode, bh, + ocfs2_set_new_buffer_uptodate(INODE_CACHE(lqinode), bh); + status = ocfs2_journal_access_dq(handle, INODE_CACHE(lqinode), bh, OCFS2_JOURNAL_ACCESS_CREATE); if (status < 0) { mlog_errno(status); @@ -1027,8 +1029,8 @@ static struct ocfs2_quota_chunk *ocfs2_local_quota_add_chunk( mlog_errno(status); goto out_trans; } - ocfs2_set_new_buffer_uptodate(lqinode, dbh); - status = ocfs2_journal_access_dq(handle, lqinode, dbh, + ocfs2_set_new_buffer_uptodate(INODE_CACHE(lqinode), dbh); + status = ocfs2_journal_access_dq(handle, INODE_CACHE(lqinode), dbh, OCFS2_JOURNAL_ACCESS_CREATE); if (status < 0) { mlog_errno(status); @@ -1131,7 +1133,7 @@ static struct ocfs2_quota_chunk *ocfs2_extend_local_quota_file( mlog_errno(status); goto out; } - ocfs2_set_new_buffer_uptodate(lqinode, bh); + ocfs2_set_new_buffer_uptodate(INODE_CACHE(lqinode), bh); /* Local quota info, chunk header and the new block we initialize */ handle = ocfs2_start_trans(OCFS2_SB(sb), @@ -1143,7 +1145,7 @@ static struct ocfs2_quota_chunk *ocfs2_extend_local_quota_file( goto out; } /* Zero created block */ - status = ocfs2_journal_access_dq(handle, lqinode, bh, + status = ocfs2_journal_access_dq(handle, INODE_CACHE(lqinode), bh, OCFS2_JOURNAL_ACCESS_CREATE); if (status < 0) { mlog_errno(status); @@ -1158,7 +1160,8 @@ static struct ocfs2_quota_chunk *ocfs2_extend_local_quota_file( goto out_trans; } /* Update chunk header */ - status = ocfs2_journal_access_dq(handle, lqinode, chunk->qc_headerbh, + status = ocfs2_journal_access_dq(handle, INODE_CACHE(lqinode), + chunk->qc_headerbh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); @@ -1292,7 +1295,8 @@ static int ocfs2_local_release_dquot(struct dquot *dquot) goto out; } - status = ocfs2_journal_access_dq(handle, sb_dqopt(sb)->files[type], + status = ocfs2_journal_access_dq(handle, + INODE_CACHE(sb_dqopt(sb)->files[type]), od->dq_chunk->qc_headerbh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c new file mode 100644 index 0000000..60287fc --- /dev/null +++ b/fs/ocfs2/refcounttree.c @@ -0,0 +1,4313 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * refcounttree.c + * + * Copyright (C) 2009 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include <linux/sort.h> +#define MLOG_MASK_PREFIX ML_REFCOUNT +#include <cluster/masklog.h> +#include "ocfs2.h" +#include "inode.h" +#include "alloc.h" +#include "suballoc.h" +#include "journal.h" +#include "uptodate.h" +#include "super.h" +#include "buffer_head_io.h" +#include "blockcheck.h" +#include "refcounttree.h" +#include "sysfile.h" +#include "dlmglue.h" +#include "extent_map.h" +#include "aops.h" +#include "xattr.h" +#include "namei.h" + +#include <linux/bio.h> +#include <linux/blkdev.h> +#include <linux/gfp.h> +#include <linux/slab.h> +#include <linux/writeback.h> +#include <linux/pagevec.h> +#include <linux/swap.h> +#include <linux/security.h> +#include <linux/fsnotify.h> +#include <linux/quotaops.h> +#include <linux/namei.h> +#include <linux/mount.h> + +struct ocfs2_cow_context { + struct inode *inode; + u32 cow_start; + u32 cow_len; + struct ocfs2_extent_tree data_et; + struct ocfs2_refcount_tree *ref_tree; + struct buffer_head *ref_root_bh; + struct ocfs2_alloc_context *meta_ac; + struct ocfs2_alloc_context *data_ac; + struct ocfs2_cached_dealloc_ctxt dealloc; + void *cow_object; + struct ocfs2_post_refcount *post_refcount; + int extra_credits; + int (*get_clusters)(struct ocfs2_cow_context *context, + u32 v_cluster, u32 *p_cluster, + u32 *num_clusters, + unsigned int *extent_flags); + int (*cow_duplicate_clusters)(handle_t *handle, + struct ocfs2_cow_context *context, + u32 cpos, u32 old_cluster, + u32 new_cluster, u32 new_len); +}; + +static inline struct ocfs2_refcount_tree * +cache_info_to_refcount(struct ocfs2_caching_info *ci) +{ + return container_of(ci, struct ocfs2_refcount_tree, rf_ci); +} + +static int ocfs2_validate_refcount_block(struct super_block *sb, + struct buffer_head *bh) +{ + int rc; + struct ocfs2_refcount_block *rb = + (struct ocfs2_refcount_block *)bh->b_data; + + mlog(0, "Validating refcount block %llu\n", + (unsigned long long)bh->b_blocknr); + + BUG_ON(!buffer_uptodate(bh)); + + /* + * If the ecc fails, we return the error but otherwise + * leave the filesystem running. We know any error is + * local to this block. + */ + rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &rb->rf_check); + if (rc) { + mlog(ML_ERROR, "Checksum failed for refcount block %llu\n", + (unsigned long long)bh->b_blocknr); + return rc; + } + + + if (!OCFS2_IS_VALID_REFCOUNT_BLOCK(rb)) { + ocfs2_error(sb, + "Refcount block #%llu has bad signature %.*s", + (unsigned long long)bh->b_blocknr, 7, + rb->rf_signature); + return -EINVAL; + } + + if (le64_to_cpu(rb->rf_blkno) != bh->b_blocknr) { + ocfs2_error(sb, + "Refcount block #%llu has an invalid rf_blkno " + "of %llu", + (unsigned long long)bh->b_blocknr, + (unsigned long long)le64_to_cpu(rb->rf_blkno)); + return -EINVAL; + } + + if (le32_to_cpu(rb->rf_fs_generation) != OCFS2_SB(sb)->fs_generation) { + ocfs2_error(sb, + "Refcount block #%llu has an invalid " + "rf_fs_generation of #%u", + (unsigned long long)bh->b_blocknr, + le32_to_cpu(rb->rf_fs_generation)); + return -EINVAL; + } + + return 0; +} + +static int ocfs2_read_refcount_block(struct ocfs2_caching_info *ci, + u64 rb_blkno, + struct buffer_head **bh) +{ + int rc; + struct buffer_head *tmp = *bh; + + rc = ocfs2_read_block(ci, rb_blkno, &tmp, + ocfs2_validate_refcount_block); + + /* If ocfs2_read_block() got us a new bh, pass it up. */ + if (!rc && !*bh) + *bh = tmp; + + return rc; +} + +static u64 ocfs2_refcount_cache_owner(struct ocfs2_caching_info *ci) +{ + struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci); + + return rf->rf_blkno; +} + +static struct super_block * +ocfs2_refcount_cache_get_super(struct ocfs2_caching_info *ci) +{ + struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci); + + return rf->rf_sb; +} + +static void ocfs2_refcount_cache_lock(struct ocfs2_caching_info *ci) +{ + struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci); + + spin_lock(&rf->rf_lock); +} + +static void ocfs2_refcount_cache_unlock(struct ocfs2_caching_info *ci) +{ + struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci); + + spin_unlock(&rf->rf_lock); +} + +static void ocfs2_refcount_cache_io_lock(struct ocfs2_caching_info *ci) +{ + struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci); + + mutex_lock(&rf->rf_io_mutex); +} + +static void ocfs2_refcount_cache_io_unlock(struct ocfs2_caching_info *ci) +{ + struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci); + + mutex_unlock(&rf->rf_io_mutex); +} + +static const struct ocfs2_caching_operations ocfs2_refcount_caching_ops = { + .co_owner = ocfs2_refcount_cache_owner, + .co_get_super = ocfs2_refcount_cache_get_super, + .co_cache_lock = ocfs2_refcount_cache_lock, + .co_cache_unlock = ocfs2_refcount_cache_unlock, + .co_io_lock = ocfs2_refcount_cache_io_lock, + .co_io_unlock = ocfs2_refcount_cache_io_unlock, +}; + +static struct ocfs2_refcount_tree * +ocfs2_find_refcount_tree(struct ocfs2_super *osb, u64 blkno) +{ + struct rb_node *n = osb->osb_rf_lock_tree.rb_node; + struct ocfs2_refcount_tree *tree = NULL; + + while (n) { + tree = rb_entry(n, struct ocfs2_refcount_tree, rf_node); + + if (blkno < tree->rf_blkno) + n = n->rb_left; + else if (blkno > tree->rf_blkno) + n = n->rb_right; + else + return tree; + } + + return NULL; +} + +/* osb_lock is already locked. */ +static void ocfs2_insert_refcount_tree(struct ocfs2_super *osb, + struct ocfs2_refcount_tree *new) +{ + u64 rf_blkno = new->rf_blkno; + struct rb_node *parent = NULL; + struct rb_node **p = &osb->osb_rf_lock_tree.rb_node; + struct ocfs2_refcount_tree *tmp; + + while (*p) { + parent = *p; + + tmp = rb_entry(parent, struct ocfs2_refcount_tree, + rf_node); + + if (rf_blkno < tmp->rf_blkno) + p = &(*p)->rb_left; + else if (rf_blkno > tmp->rf_blkno) + p = &(*p)->rb_right; + else { + /* This should never happen! */ + mlog(ML_ERROR, "Duplicate refcount block %llu found!\n", + (unsigned long long)rf_blkno); + BUG(); + } + } + + rb_link_node(&new->rf_node, parent, p); + rb_insert_color(&new->rf_node, &osb->osb_rf_lock_tree); +} + +static void ocfs2_free_refcount_tree(struct ocfs2_refcount_tree *tree) +{ + ocfs2_metadata_cache_exit(&tree->rf_ci); + ocfs2_simple_drop_lockres(OCFS2_SB(tree->rf_sb), &tree->rf_lockres); + ocfs2_lock_res_free(&tree->rf_lockres); + kfree(tree); +} + +static inline void +ocfs2_erase_refcount_tree_from_list_no_lock(struct ocfs2_super *osb, + struct ocfs2_refcount_tree *tree) +{ + rb_erase(&tree->rf_node, &osb->osb_rf_lock_tree); + if (osb->osb_ref_tree_lru && osb->osb_ref_tree_lru == tree) + osb->osb_ref_tree_lru = NULL; +} + +static void ocfs2_erase_refcount_tree_from_list(struct ocfs2_super *osb, + struct ocfs2_refcount_tree *tree) +{ + spin_lock(&osb->osb_lock); + ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree); + spin_unlock(&osb->osb_lock); +} + +void ocfs2_kref_remove_refcount_tree(struct kref *kref) +{ + struct ocfs2_refcount_tree *tree = + container_of(kref, struct ocfs2_refcount_tree, rf_getcnt); + + ocfs2_free_refcount_tree(tree); +} + +static inline void +ocfs2_refcount_tree_get(struct ocfs2_refcount_tree *tree) +{ + kref_get(&tree->rf_getcnt); +} + +static inline void +ocfs2_refcount_tree_put(struct ocfs2_refcount_tree *tree) +{ + kref_put(&tree->rf_getcnt, ocfs2_kref_remove_refcount_tree); +} + +static inline void ocfs2_init_refcount_tree_ci(struct ocfs2_refcount_tree *new, + struct super_block *sb) +{ + ocfs2_metadata_cache_init(&new->rf_ci, &ocfs2_refcount_caching_ops); + mutex_init(&new->rf_io_mutex); + new->rf_sb = sb; + spin_lock_init(&new->rf_lock); +} + +static inline void ocfs2_init_refcount_tree_lock(struct ocfs2_super *osb, + struct ocfs2_refcount_tree *new, + u64 rf_blkno, u32 generation) +{ + init_rwsem(&new->rf_sem); + ocfs2_refcount_lock_res_init(&new->rf_lockres, osb, + rf_blkno, generation); +} + +static struct ocfs2_refcount_tree* +ocfs2_allocate_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno) +{ + struct ocfs2_refcount_tree *new; + + new = kzalloc(sizeof(struct ocfs2_refcount_tree), GFP_NOFS); + if (!new) + return NULL; + + new->rf_blkno = rf_blkno; + kref_init(&new->rf_getcnt); + ocfs2_init_refcount_tree_ci(new, osb->sb); + + return new; +} + +static int ocfs2_get_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno, + struct ocfs2_refcount_tree **ret_tree) +{ + int ret = 0; + struct ocfs2_refcount_tree *tree, *new = NULL; + struct buffer_head *ref_root_bh = NULL; + struct ocfs2_refcount_block *ref_rb; + + spin_lock(&osb->osb_lock); + if (osb->osb_ref_tree_lru && + osb->osb_ref_tree_lru->rf_blkno == rf_blkno) + tree = osb->osb_ref_tree_lru; + else + tree = ocfs2_find_refcount_tree(osb, rf_blkno); + if (tree) + goto out; + + spin_unlock(&osb->osb_lock); + + new = ocfs2_allocate_refcount_tree(osb, rf_blkno); + if (!new) { + ret = -ENOMEM; + mlog_errno(ret); + return ret; + } + /* + * We need the generation to create the refcount tree lock and since + * it isn't changed during the tree modification, we are safe here to + * read without protection. + * We also have to purge the cache after we create the lock since the + * refcount block may have the stale data. It can only be trusted when + * we hold the refcount lock. + */ + ret = ocfs2_read_refcount_block(&new->rf_ci, rf_blkno, &ref_root_bh); + if (ret) { + mlog_errno(ret); + ocfs2_metadata_cache_exit(&new->rf_ci); + kfree(new); + return ret; + } + + ref_rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; + new->rf_generation = le32_to_cpu(ref_rb->rf_generation); + ocfs2_init_refcount_tree_lock(osb, new, rf_blkno, + new->rf_generation); + ocfs2_metadata_cache_purge(&new->rf_ci); + + spin_lock(&osb->osb_lock); + tree = ocfs2_find_refcount_tree(osb, rf_blkno); + if (tree) + goto out; + + ocfs2_insert_refcount_tree(osb, new); + + tree = new; + new = NULL; + +out: + *ret_tree = tree; + + osb->osb_ref_tree_lru = tree; + + spin_unlock(&osb->osb_lock); + + if (new) + ocfs2_free_refcount_tree(new); + + brelse(ref_root_bh); + return ret; +} + +static int ocfs2_get_refcount_block(struct inode *inode, u64 *ref_blkno) +{ + int ret; + struct buffer_head *di_bh = NULL; + struct ocfs2_dinode *di; + + ret = ocfs2_read_inode_block(inode, &di_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)); + + di = (struct ocfs2_dinode *)di_bh->b_data; + *ref_blkno = le64_to_cpu(di->i_refcount_loc); + brelse(di_bh); +out: + return ret; +} + +static int __ocfs2_lock_refcount_tree(struct ocfs2_super *osb, + struct ocfs2_refcount_tree *tree, int rw) +{ + int ret; + + ret = ocfs2_refcount_lock(tree, rw); + if (ret) { + mlog_errno(ret); + goto out; + } + + if (rw) + down_write(&tree->rf_sem); + else + down_read(&tree->rf_sem); + +out: + return ret; +} + +/* + * Lock the refcount tree pointed by ref_blkno and return the tree. + * In most case, we lock the tree and read the refcount block. + * So read it here if the caller really needs it. + * + * If the tree has been re-created by other node, it will free the + * old one and re-create it. + */ +int ocfs2_lock_refcount_tree(struct ocfs2_super *osb, + u64 ref_blkno, int rw, + struct ocfs2_refcount_tree **ret_tree, + struct buffer_head **ref_bh) +{ + int ret, delete_tree = 0; + struct ocfs2_refcount_tree *tree = NULL; + struct buffer_head *ref_root_bh = NULL; + struct ocfs2_refcount_block *rb; + +again: + ret = ocfs2_get_refcount_tree(osb, ref_blkno, &tree); + if (ret) { + mlog_errno(ret); + return ret; + } + + ocfs2_refcount_tree_get(tree); + + ret = __ocfs2_lock_refcount_tree(osb, tree, rw); + if (ret) { + mlog_errno(ret); + ocfs2_refcount_tree_put(tree); + goto out; + } + + ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno, + &ref_root_bh); + if (ret) { + mlog_errno(ret); + ocfs2_unlock_refcount_tree(osb, tree, rw); + ocfs2_refcount_tree_put(tree); + goto out; + } + + rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; + /* + * If the refcount block has been freed and re-created, we may need + * to recreate the refcount tree also. + * + * Here we just remove the tree from the rb-tree, and the last + * kref holder will unlock and delete this refcount_tree. + * Then we goto "again" and ocfs2_get_refcount_tree will create + * the new refcount tree for us. + */ + if (tree->rf_generation != le32_to_cpu(rb->rf_generation)) { + if (!tree->rf_removed) { + ocfs2_erase_refcount_tree_from_list(osb, tree); + tree->rf_removed = 1; + delete_tree = 1; + } + + ocfs2_unlock_refcount_tree(osb, tree, rw); + /* + * We get an extra reference when we create the refcount + * tree, so another put will destroy it. + */ + if (delete_tree) + ocfs2_refcount_tree_put(tree); + brelse(ref_root_bh); + ref_root_bh = NULL; + goto again; + } + + *ret_tree = tree; + if (ref_bh) { + *ref_bh = ref_root_bh; + ref_root_bh = NULL; + } +out: + brelse(ref_root_bh); + return ret; +} + +int ocfs2_lock_refcount_tree_by_inode(struct inode *inode, int rw, + struct ocfs2_refcount_tree **ret_tree, + struct buffer_head **ref_bh) +{ + int ret; + u64 ref_blkno; + + ret = ocfs2_get_refcount_block(inode, &ref_blkno); + if (ret) { + mlog_errno(ret); + return ret; + } + + return ocfs2_lock_refcount_tree(OCFS2_SB(inode->i_sb), ref_blkno, + rw, ret_tree, ref_bh); +} + +void ocfs2_unlock_refcount_tree(struct ocfs2_super *osb, + struct ocfs2_refcount_tree *tree, int rw) +{ + if (rw) + up_write(&tree->rf_sem); + else + up_read(&tree->rf_sem); + + ocfs2_refcount_unlock(tree, rw); + ocfs2_refcount_tree_put(tree); +} + +void ocfs2_purge_refcount_trees(struct ocfs2_super *osb) +{ + struct rb_node *node; + struct ocfs2_refcount_tree *tree; + struct rb_root *root = &osb->osb_rf_lock_tree; + + while ((node = rb_last(root)) != NULL) { + tree = rb_entry(node, struct ocfs2_refcount_tree, rf_node); + + mlog(0, "Purge tree %llu\n", + (unsigned long long) tree->rf_blkno); + + rb_erase(&tree->rf_node, root); + ocfs2_free_refcount_tree(tree); + } +} + +/* + * Create a refcount tree for an inode. + * We take for granted that the inode is already locked. + */ +static int ocfs2_create_refcount_tree(struct inode *inode, + struct buffer_head *di_bh) +{ + int ret; + handle_t *handle = NULL; + struct ocfs2_alloc_context *meta_ac = NULL; + struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct buffer_head *new_bh = NULL; + struct ocfs2_refcount_block *rb; + struct ocfs2_refcount_tree *new_tree = NULL, *tree = NULL; + u16 suballoc_bit_start; + u32 num_got; + u64 first_blkno; + + BUG_ON(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL); + + mlog(0, "create tree for inode %lu\n", inode->i_ino); + + ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac); + if (ret) { + mlog_errno(ret); + goto out; + } + + handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_CREATE_CREDITS); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + goto out; + } + + ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + ret = ocfs2_claim_metadata(osb, handle, meta_ac, 1, + &suballoc_bit_start, &num_got, + &first_blkno); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + new_tree = ocfs2_allocate_refcount_tree(osb, first_blkno); + if (!new_tree) { + ret = -ENOMEM; + mlog_errno(ret); + goto out_commit; + } + + new_bh = sb_getblk(inode->i_sb, first_blkno); + ocfs2_set_new_buffer_uptodate(&new_tree->rf_ci, new_bh); + + ret = ocfs2_journal_access_rb(handle, &new_tree->rf_ci, new_bh, + OCFS2_JOURNAL_ACCESS_CREATE); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + /* Initialize ocfs2_refcount_block. */ + rb = (struct ocfs2_refcount_block *)new_bh->b_data; + memset(rb, 0, inode->i_sb->s_blocksize); + strcpy((void *)rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE); + rb->rf_suballoc_slot = cpu_to_le16(osb->slot_num); + rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start); + rb->rf_fs_generation = cpu_to_le32(osb->fs_generation); + rb->rf_blkno = cpu_to_le64(first_blkno); + rb->rf_count = cpu_to_le32(1); + rb->rf_records.rl_count = + cpu_to_le16(ocfs2_refcount_recs_per_rb(osb->sb)); + spin_lock(&osb->osb_lock); + rb->rf_generation = osb->s_next_generation++; + spin_unlock(&osb->osb_lock); + + ocfs2_journal_dirty(handle, new_bh); + + spin_lock(&oi->ip_lock); + oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL; + di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); + di->i_refcount_loc = cpu_to_le64(first_blkno); + spin_unlock(&oi->ip_lock); + + mlog(0, "created tree for inode %lu, refblock %llu\n", + inode->i_ino, (unsigned long long)first_blkno); + + ocfs2_journal_dirty(handle, di_bh); + + /* + * We have to init the tree lock here since it will use + * the generation number to create it. + */ + new_tree->rf_generation = le32_to_cpu(rb->rf_generation); + ocfs2_init_refcount_tree_lock(osb, new_tree, first_blkno, + new_tree->rf_generation); + + spin_lock(&osb->osb_lock); + tree = ocfs2_find_refcount_tree(osb, first_blkno); + + /* + * We've just created a new refcount tree in this block. If + * we found a refcount tree on the ocfs2_super, it must be + * one we just deleted. We free the old tree before + * inserting the new tree. + */ + BUG_ON(tree && tree->rf_generation == new_tree->rf_generation); + if (tree) + ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree); + ocfs2_insert_refcount_tree(osb, new_tree); + spin_unlock(&osb->osb_lock); + new_tree = NULL; + if (tree) + ocfs2_refcount_tree_put(tree); + +out_commit: + ocfs2_commit_trans(osb, handle); + +out: + if (new_tree) { + ocfs2_metadata_cache_exit(&new_tree->rf_ci); + kfree(new_tree); + } + + brelse(new_bh); + if (meta_ac) + ocfs2_free_alloc_context(meta_ac); + + return ret; +} + +static int ocfs2_set_refcount_tree(struct inode *inode, + struct buffer_head *di_bh, + u64 refcount_loc) +{ + int ret; + handle_t *handle = NULL; + struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct buffer_head *ref_root_bh = NULL; + struct ocfs2_refcount_block *rb; + struct ocfs2_refcount_tree *ref_tree; + + BUG_ON(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL); + + ret = ocfs2_lock_refcount_tree(osb, refcount_loc, 1, + &ref_tree, &ref_root_bh); + if (ret) { + mlog_errno(ret); + return ret; + } + + handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_SET_CREDITS); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + goto out; + } + + ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, ref_root_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; + le32_add_cpu(&rb->rf_count, 1); + + ocfs2_journal_dirty(handle, ref_root_bh); + + spin_lock(&oi->ip_lock); + oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL; + di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); + di->i_refcount_loc = cpu_to_le64(refcount_loc); + spin_unlock(&oi->ip_lock); + ocfs2_journal_dirty(handle, di_bh); + +out_commit: + ocfs2_commit_trans(osb, handle); +out: + ocfs2_unlock_refcount_tree(osb, ref_tree, 1); + brelse(ref_root_bh); + + return ret; +} + +int ocfs2_remove_refcount_tree(struct inode *inode, struct buffer_head *di_bh) +{ + int ret, delete_tree = 0; + handle_t *handle = NULL; + struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct ocfs2_refcount_block *rb; + struct inode *alloc_inode = NULL; + struct buffer_head *alloc_bh = NULL; + struct buffer_head *blk_bh = NULL; + struct ocfs2_refcount_tree *ref_tree; + int credits = OCFS2_REFCOUNT_TREE_REMOVE_CREDITS; + u64 blk = 0, bg_blkno = 0, ref_blkno = le64_to_cpu(di->i_refcount_loc); + u16 bit = 0; + + if (!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)) + return 0; + + BUG_ON(!ref_blkno); + ret = ocfs2_lock_refcount_tree(osb, ref_blkno, 1, &ref_tree, &blk_bh); + if (ret) { + mlog_errno(ret); + return ret; + } + + rb = (struct ocfs2_refcount_block *)blk_bh->b_data; + + /* + * If we are the last user, we need to free the block. + * So lock the allocator ahead. + */ + if (le32_to_cpu(rb->rf_count) == 1) { + blk = le64_to_cpu(rb->rf_blkno); + bit = le16_to_cpu(rb->rf_suballoc_bit); + bg_blkno = ocfs2_which_suballoc_group(blk, bit); + + alloc_inode = ocfs2_get_system_file_inode(osb, + EXTENT_ALLOC_SYSTEM_INODE, + le16_to_cpu(rb->rf_suballoc_slot)); + if (!alloc_inode) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + mutex_lock(&alloc_inode->i_mutex); + + ret = ocfs2_inode_lock(alloc_inode, &alloc_bh, 1); + if (ret) { + mlog_errno(ret); + goto out_mutex; + } + + credits += OCFS2_SUBALLOC_FREE; + } + + handle = ocfs2_start_trans(osb, credits); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + goto out_unlock; + } + + ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, blk_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + spin_lock(&oi->ip_lock); + oi->ip_dyn_features &= ~OCFS2_HAS_REFCOUNT_FL; + di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features); + di->i_refcount_loc = 0; + spin_unlock(&oi->ip_lock); + ocfs2_journal_dirty(handle, di_bh); + + le32_add_cpu(&rb->rf_count , -1); + ocfs2_journal_dirty(handle, blk_bh); + + if (!rb->rf_count) { + delete_tree = 1; + ocfs2_erase_refcount_tree_from_list(osb, ref_tree); + ret = ocfs2_free_suballoc_bits(handle, alloc_inode, + alloc_bh, bit, bg_blkno, 1); + if (ret) + mlog_errno(ret); + } + +out_commit: + ocfs2_commit_trans(osb, handle); +out_unlock: + if (alloc_inode) { + ocfs2_inode_unlock(alloc_inode, 1); + brelse(alloc_bh); + } +out_mutex: + if (alloc_inode) { + mutex_unlock(&alloc_inode->i_mutex); + iput(alloc_inode); + } +out: + ocfs2_unlock_refcount_tree(osb, ref_tree, 1); + if (delete_tree) + ocfs2_refcount_tree_put(ref_tree); + brelse(blk_bh); + + return ret; +} + +static void ocfs2_find_refcount_rec_in_rl(struct ocfs2_caching_info *ci, + struct buffer_head *ref_leaf_bh, + u64 cpos, unsigned int len, + struct ocfs2_refcount_rec *ret_rec, + int *index) +{ + int i = 0; + struct ocfs2_refcount_block *rb = + (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; + struct ocfs2_refcount_rec *rec = NULL; + + for (; i < le16_to_cpu(rb->rf_records.rl_used); i++) { + rec = &rb->rf_records.rl_recs[i]; + + if (le64_to_cpu(rec->r_cpos) + + le32_to_cpu(rec->r_clusters) <= cpos) + continue; + else if (le64_to_cpu(rec->r_cpos) > cpos) + break; + + /* ok, cpos fail in this rec. Just return. */ + if (ret_rec) + *ret_rec = *rec; + goto out; + } + + if (ret_rec) { + /* We meet with a hole here, so fake the rec. */ + ret_rec->r_cpos = cpu_to_le64(cpos); + ret_rec->r_refcount = 0; + if (i < le16_to_cpu(rb->rf_records.rl_used) && + le64_to_cpu(rec->r_cpos) < cpos + len) + ret_rec->r_clusters = + cpu_to_le32(le64_to_cpu(rec->r_cpos) - cpos); + else + ret_rec->r_clusters = cpu_to_le32(len); + } + +out: + *index = i; +} + +/* + * Try to remove refcount tree. The mechanism is: + * 1) Check whether i_clusters == 0, if no, exit. + * 2) check whether we have i_xattr_loc in dinode. if yes, exit. + * 3) Check whether we have inline xattr stored outside, if yes, exit. + * 4) Remove the tree. + */ +int ocfs2_try_remove_refcount_tree(struct inode *inode, + struct buffer_head *di_bh) +{ + int ret; + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; + + down_write(&oi->ip_xattr_sem); + down_write(&oi->ip_alloc_sem); + + if (oi->ip_clusters) + goto out; + + if ((oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) && di->i_xattr_loc) + goto out; + + if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL && + ocfs2_has_inline_xattr_value_outside(inode, di)) + goto out; + + ret = ocfs2_remove_refcount_tree(inode, di_bh); + if (ret) + mlog_errno(ret); +out: + up_write(&oi->ip_alloc_sem); + up_write(&oi->ip_xattr_sem); + return 0; +} + +/* + * Given a cpos and len, try to find the refcount record which contains cpos. + * 1. If cpos can be found in one refcount record, return the record. + * 2. If cpos can't be found, return a fake record which start from cpos + * and end at a small value between cpos+len and start of the next record. + * This fake record has r_refcount = 0. + */ +static int ocfs2_get_refcount_rec(struct ocfs2_caching_info *ci, + struct buffer_head *ref_root_bh, + u64 cpos, unsigned int len, + struct ocfs2_refcount_rec *ret_rec, + int *index, + struct buffer_head **ret_bh) +{ + int ret = 0, i, found; + u32 low_cpos; + struct ocfs2_extent_list *el; + struct ocfs2_extent_rec *tmp, *rec = NULL; + struct ocfs2_extent_block *eb; + struct buffer_head *eb_bh = NULL, *ref_leaf_bh = NULL; + struct super_block *sb = ocfs2_metadata_cache_get_super(ci); + struct ocfs2_refcount_block *rb = + (struct ocfs2_refcount_block *)ref_root_bh->b_data; + + if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)) { + ocfs2_find_refcount_rec_in_rl(ci, ref_root_bh, cpos, len, + ret_rec, index); + *ret_bh = ref_root_bh; + get_bh(ref_root_bh); + return 0; + } + + el = &rb->rf_list; + low_cpos = cpos & OCFS2_32BIT_POS_MASK; + + if (el->l_tree_depth) { + ret = ocfs2_find_leaf(ci, el, low_cpos, &eb_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + eb = (struct ocfs2_extent_block *) eb_bh->b_data; + el = &eb->h_list; + + if (el->l_tree_depth) { + ocfs2_error(sb, + "refcount tree %llu has non zero tree " + "depth in leaf btree tree block %llu\n", + (unsigned long long)ocfs2_metadata_cache_owner(ci), + (unsigned long long)eb_bh->b_blocknr); + ret = -EROFS; + goto out; + } + } + + found = 0; + for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) { + rec = &el->l_recs[i]; + + if (le32_to_cpu(rec->e_cpos) <= low_cpos) { + found = 1; + break; + } + } + + /* adjust len when we have ocfs2_extent_rec after it. */ + if (found && i < le16_to_cpu(el->l_next_free_rec) - 1) { + tmp = &el->l_recs[i+1]; + + if (le32_to_cpu(tmp->e_cpos) < cpos + len) + len = le32_to_cpu(tmp->e_cpos) - cpos; + } + + ret = ocfs2_read_refcount_block(ci, le64_to_cpu(rec->e_blkno), + &ref_leaf_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + ocfs2_find_refcount_rec_in_rl(ci, ref_leaf_bh, cpos, len, + ret_rec, index); + *ret_bh = ref_leaf_bh; +out: + brelse(eb_bh); + return ret; +} + +enum ocfs2_ref_rec_contig { + REF_CONTIG_NONE = 0, + REF_CONTIG_LEFT, + REF_CONTIG_RIGHT, + REF_CONTIG_LEFTRIGHT, +}; + +static enum ocfs2_ref_rec_contig + ocfs2_refcount_rec_adjacent(struct ocfs2_refcount_block *rb, + int index) +{ + if ((rb->rf_records.rl_recs[index].r_refcount == + rb->rf_records.rl_recs[index + 1].r_refcount) && + (le64_to_cpu(rb->rf_records.rl_recs[index].r_cpos) + + le32_to_cpu(rb->rf_records.rl_recs[index].r_clusters) == + le64_to_cpu(rb->rf_records.rl_recs[index + 1].r_cpos))) + return REF_CONTIG_RIGHT; + + return REF_CONTIG_NONE; +} + +static enum ocfs2_ref_rec_contig + ocfs2_refcount_rec_contig(struct ocfs2_refcount_block *rb, + int index) +{ + enum ocfs2_ref_rec_contig ret = REF_CONTIG_NONE; + + if (index < le16_to_cpu(rb->rf_records.rl_used) - 1) + ret = ocfs2_refcount_rec_adjacent(rb, index); + + if (index > 0) { + enum ocfs2_ref_rec_contig tmp; + + tmp = ocfs2_refcount_rec_adjacent(rb, index - 1); + + if (tmp == REF_CONTIG_RIGHT) { + if (ret == REF_CONTIG_RIGHT) + ret = REF_CONTIG_LEFTRIGHT; + else + ret = REF_CONTIG_LEFT; + } + } + + return ret; +} + +static void ocfs2_rotate_refcount_rec_left(struct ocfs2_refcount_block *rb, + int index) +{ + BUG_ON(rb->rf_records.rl_recs[index].r_refcount != + rb->rf_records.rl_recs[index+1].r_refcount); + + le32_add_cpu(&rb->rf_records.rl_recs[index].r_clusters, + le32_to_cpu(rb->rf_records.rl_recs[index+1].r_clusters)); + + if (index < le16_to_cpu(rb->rf_records.rl_used) - 2) + memmove(&rb->rf_records.rl_recs[index + 1], + &rb->rf_records.rl_recs[index + 2], + sizeof(struct ocfs2_refcount_rec) * + (le16_to_cpu(rb->rf_records.rl_used) - index - 2)); + + memset(&rb->rf_records.rl_recs[le16_to_cpu(rb->rf_records.rl_used) - 1], + 0, sizeof(struct ocfs2_refcount_rec)); + le16_add_cpu(&rb->rf_records.rl_used, -1); +} + +/* + * Merge the refcount rec if we are contiguous with the adjacent recs. + */ +static void ocfs2_refcount_rec_merge(struct ocfs2_refcount_block *rb, + int index) +{ + enum ocfs2_ref_rec_contig contig = + ocfs2_refcount_rec_contig(rb, index); + + if (contig == REF_CONTIG_NONE) + return; + + if (contig == REF_CONTIG_LEFT || contig == REF_CONTIG_LEFTRIGHT) { + BUG_ON(index == 0); + index--; + } + + ocfs2_rotate_refcount_rec_left(rb, index); + + if (contig == REF_CONTIG_LEFTRIGHT) + ocfs2_rotate_refcount_rec_left(rb, index); +} + +/* + * Change the refcount indexed by "index" in ref_bh. + * If refcount reaches 0, remove it. + */ +static int ocfs2_change_refcount_rec(handle_t *handle, + struct ocfs2_caching_info *ci, + struct buffer_head *ref_leaf_bh, + int index, int merge, int change) +{ + int ret; + struct ocfs2_refcount_block *rb = + (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; + struct ocfs2_refcount_list *rl = &rb->rf_records; + struct ocfs2_refcount_rec *rec = &rl->rl_recs[index]; + + ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + + mlog(0, "change index %d, old count %u, change %d\n", index, + le32_to_cpu(rec->r_refcount), change); + le32_add_cpu(&rec->r_refcount, change); + + if (!rec->r_refcount) { + if (index != le16_to_cpu(rl->rl_used) - 1) { + memmove(rec, rec + 1, + (le16_to_cpu(rl->rl_used) - index - 1) * + sizeof(struct ocfs2_refcount_rec)); + memset(&rl->rl_recs[le16_to_cpu(rl->rl_used) - 1], + 0, sizeof(struct ocfs2_refcount_rec)); + } + + le16_add_cpu(&rl->rl_used, -1); + } else if (merge) + ocfs2_refcount_rec_merge(rb, index); + + ret = ocfs2_journal_dirty(handle, ref_leaf_bh); + if (ret) + mlog_errno(ret); +out: + return ret; +} + +static int ocfs2_expand_inline_ref_root(handle_t *handle, + struct ocfs2_caching_info *ci, + struct buffer_head *ref_root_bh, + struct buffer_head **ref_leaf_bh, + struct ocfs2_alloc_context *meta_ac) +{ + int ret; + u16 suballoc_bit_start; + u32 num_got; + u64 blkno; + struct super_block *sb = ocfs2_metadata_cache_get_super(ci); + struct buffer_head *new_bh = NULL; + struct ocfs2_refcount_block *new_rb; + struct ocfs2_refcount_block *root_rb = + (struct ocfs2_refcount_block *)ref_root_bh->b_data; + + ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_claim_metadata(OCFS2_SB(sb), handle, meta_ac, 1, + &suballoc_bit_start, &num_got, + &blkno); + if (ret) { + mlog_errno(ret); + goto out; + } + + new_bh = sb_getblk(sb, blkno); + if (new_bh == NULL) { + ret = -EIO; + mlog_errno(ret); + goto out; + } + ocfs2_set_new_buffer_uptodate(ci, new_bh); + + ret = ocfs2_journal_access_rb(handle, ci, new_bh, + OCFS2_JOURNAL_ACCESS_CREATE); + if (ret) { + mlog_errno(ret); + goto out; + } + + /* + * Initialize ocfs2_refcount_block. + * It should contain the same information as the old root. + * so just memcpy it and change the corresponding field. + */ + memcpy(new_bh->b_data, ref_root_bh->b_data, sb->s_blocksize); + + new_rb = (struct ocfs2_refcount_block *)new_bh->b_data; + new_rb->rf_suballoc_slot = cpu_to_le16(OCFS2_SB(sb)->slot_num); + new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start); + new_rb->rf_blkno = cpu_to_le64(blkno); + new_rb->rf_cpos = cpu_to_le32(0); + new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr); + new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL); + ocfs2_journal_dirty(handle, new_bh); + + /* Now change the root. */ + memset(&root_rb->rf_list, 0, sb->s_blocksize - + offsetof(struct ocfs2_refcount_block, rf_list)); + root_rb->rf_list.l_count = cpu_to_le16(ocfs2_extent_recs_per_rb(sb)); + root_rb->rf_clusters = cpu_to_le32(1); + root_rb->rf_list.l_next_free_rec = cpu_to_le16(1); + root_rb->rf_list.l_recs[0].e_blkno = cpu_to_le64(blkno); + root_rb->rf_list.l_recs[0].e_leaf_clusters = cpu_to_le16(1); + root_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_TREE_FL); + + ocfs2_journal_dirty(handle, ref_root_bh); + + mlog(0, "new leaf block %llu, used %u\n", (unsigned long long)blkno, + le16_to_cpu(new_rb->rf_records.rl_used)); + + *ref_leaf_bh = new_bh; + new_bh = NULL; +out: + brelse(new_bh); + return ret; +} + +static int ocfs2_refcount_rec_no_intersect(struct ocfs2_refcount_rec *prev, + struct ocfs2_refcount_rec *next) +{ + if (ocfs2_get_ref_rec_low_cpos(prev) + le32_to_cpu(prev->r_clusters) <= + ocfs2_get_ref_rec_low_cpos(next)) + return 1; + + return 0; +} + +static int cmp_refcount_rec_by_low_cpos(const void *a, const void *b) +{ + const struct ocfs2_refcount_rec *l = a, *r = b; + u32 l_cpos = ocfs2_get_ref_rec_low_cpos(l); + u32 r_cpos = ocfs2_get_ref_rec_low_cpos(r); + + if (l_cpos > r_cpos) + return 1; + if (l_cpos < r_cpos) + return -1; + return 0; +} + +static int cmp_refcount_rec_by_cpos(const void *a, const void *b) +{ + const struct ocfs2_refcount_rec *l = a, *r = b; + u64 l_cpos = le64_to_cpu(l->r_cpos); + u64 r_cpos = le64_to_cpu(r->r_cpos); + + if (l_cpos > r_cpos) + return 1; + if (l_cpos < r_cpos) + return -1; + return 0; +} + +static void swap_refcount_rec(void *a, void *b, int size) +{ + struct ocfs2_refcount_rec *l = a, *r = b, tmp; + + tmp = *(struct ocfs2_refcount_rec *)l; + *(struct ocfs2_refcount_rec *)l = + *(struct ocfs2_refcount_rec *)r; + *(struct ocfs2_refcount_rec *)r = tmp; +} + +/* + * The refcount cpos are ordered by their 64bit cpos, + * But we will use the low 32 bit to be the e_cpos in the b-tree. + * So we need to make sure that this pos isn't intersected with others. + * + * Note: The refcount block is already sorted by their low 32 bit cpos, + * So just try the middle pos first, and we will exit when we find + * the good position. + */ +static int ocfs2_find_refcount_split_pos(struct ocfs2_refcount_list *rl, + u32 *split_pos, int *split_index) +{ + int num_used = le16_to_cpu(rl->rl_used); + int delta, middle = num_used / 2; + + for (delta = 0; delta < middle; delta++) { + /* Let's check delta earlier than middle */ + if (ocfs2_refcount_rec_no_intersect( + &rl->rl_recs[middle - delta - 1], + &rl->rl_recs[middle - delta])) { + *split_index = middle - delta; + break; + } + + /* For even counts, don't walk off the end */ + if ((middle + delta + 1) == num_used) + continue; + + /* Now try delta past middle */ + if (ocfs2_refcount_rec_no_intersect( + &rl->rl_recs[middle + delta], + &rl->rl_recs[middle + delta + 1])) { + *split_index = middle + delta + 1; + break; + } + } + + if (delta >= middle) + return -ENOSPC; + + *split_pos = ocfs2_get_ref_rec_low_cpos(&rl->rl_recs[*split_index]); + return 0; +} + +static int ocfs2_divide_leaf_refcount_block(struct buffer_head *ref_leaf_bh, + struct buffer_head *new_bh, + u32 *split_cpos) +{ + int split_index = 0, num_moved, ret; + u32 cpos = 0; + struct ocfs2_refcount_block *rb = + (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; + struct ocfs2_refcount_list *rl = &rb->rf_records; + struct ocfs2_refcount_block *new_rb = + (struct ocfs2_refcount_block *)new_bh->b_data; + struct ocfs2_refcount_list *new_rl = &new_rb->rf_records; + + mlog(0, "split old leaf refcount block %llu, count = %u, used = %u\n", + (unsigned long long)ref_leaf_bh->b_blocknr, + le32_to_cpu(rl->rl_count), le32_to_cpu(rl->rl_used)); + + /* + * XXX: Improvement later. + * If we know all the high 32 bit cpos is the same, no need to sort. + * + * In order to make the whole process safe, we do: + * 1. sort the entries by their low 32 bit cpos first so that we can + * find the split cpos easily. + * 2. call ocfs2_insert_extent to insert the new refcount block. + * 3. move the refcount rec to the new block. + * 4. sort the entries by their 64 bit cpos. + * 5. dirty the new_rb and rb. + */ + sort(&rl->rl_recs, le16_to_cpu(rl->rl_used), + sizeof(struct ocfs2_refcount_rec), + cmp_refcount_rec_by_low_cpos, swap_refcount_rec); + + ret = ocfs2_find_refcount_split_pos(rl, &cpos, &split_index); + if (ret) { + mlog_errno(ret); + return ret; + } + + new_rb->rf_cpos = cpu_to_le32(cpos); + + /* move refcount records starting from split_index to the new block. */ + num_moved = le16_to_cpu(rl->rl_used) - split_index; + memcpy(new_rl->rl_recs, &rl->rl_recs[split_index], + num_moved * sizeof(struct ocfs2_refcount_rec)); + + /*ok, remove the entries we just moved over to the other block. */ + memset(&rl->rl_recs[split_index], 0, + num_moved * sizeof(struct ocfs2_refcount_rec)); + + /* change old and new rl_used accordingly. */ + le16_add_cpu(&rl->rl_used, -num_moved); + new_rl->rl_used = cpu_to_le32(num_moved); + + sort(&rl->rl_recs, le16_to_cpu(rl->rl_used), + sizeof(struct ocfs2_refcount_rec), + cmp_refcount_rec_by_cpos, swap_refcount_rec); + + sort(&new_rl->rl_recs, le16_to_cpu(new_rl->rl_used), + sizeof(struct ocfs2_refcount_rec), + cmp_refcount_rec_by_cpos, swap_refcount_rec); + + *split_cpos = cpos; + return 0; +} + +static int ocfs2_new_leaf_refcount_block(handle_t *handle, + struct ocfs2_caching_info *ci, + struct buffer_head *ref_root_bh, + struct buffer_head *ref_leaf_bh, + struct ocfs2_alloc_context *meta_ac) +{ + int ret; + u16 suballoc_bit_start; + u32 num_got, new_cpos; + u64 blkno; + struct super_block *sb = ocfs2_metadata_cache_get_super(ci); + struct ocfs2_refcount_block *root_rb = + (struct ocfs2_refcount_block *)ref_root_bh->b_data; + struct buffer_head *new_bh = NULL; + struct ocfs2_refcount_block *new_rb; + struct ocfs2_extent_tree ref_et; + + BUG_ON(!(le32_to_cpu(root_rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)); + + ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_claim_metadata(OCFS2_SB(sb), handle, meta_ac, 1, + &suballoc_bit_start, &num_got, + &blkno); + if (ret) { + mlog_errno(ret); + goto out; + } + + new_bh = sb_getblk(sb, blkno); + if (new_bh == NULL) { + ret = -EIO; + mlog_errno(ret); + goto out; + } + ocfs2_set_new_buffer_uptodate(ci, new_bh); + + ret = ocfs2_journal_access_rb(handle, ci, new_bh, + OCFS2_JOURNAL_ACCESS_CREATE); + if (ret) { + mlog_errno(ret); + goto out; + } + + /* Initialize ocfs2_refcount_block. */ + new_rb = (struct ocfs2_refcount_block *)new_bh->b_data; + memset(new_rb, 0, sb->s_blocksize); + strcpy((void *)new_rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE); + new_rb->rf_suballoc_slot = cpu_to_le16(OCFS2_SB(sb)->slot_num); + new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start); + new_rb->rf_fs_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation); + new_rb->rf_blkno = cpu_to_le64(blkno); + new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr); + new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL); + new_rb->rf_records.rl_count = + cpu_to_le16(ocfs2_refcount_recs_per_rb(sb)); + new_rb->rf_generation = root_rb->rf_generation; + + ret = ocfs2_divide_leaf_refcount_block(ref_leaf_bh, new_bh, &new_cpos); + if (ret) { + mlog_errno(ret); + goto out; + } + + ocfs2_journal_dirty(handle, ref_leaf_bh); + ocfs2_journal_dirty(handle, new_bh); + + ocfs2_init_refcount_extent_tree(&ref_et, ci, ref_root_bh); + + mlog(0, "insert new leaf block %llu at %u\n", + (unsigned long long)new_bh->b_blocknr, new_cpos); + + /* Insert the new leaf block with the specific offset cpos. */ + ret = ocfs2_insert_extent(handle, &ref_et, new_cpos, new_bh->b_blocknr, + 1, 0, meta_ac); + if (ret) + mlog_errno(ret); + +out: + brelse(new_bh); + return ret; +} + +static int ocfs2_expand_refcount_tree(handle_t *handle, + struct ocfs2_caching_info *ci, + struct buffer_head *ref_root_bh, + struct buffer_head *ref_leaf_bh, + struct ocfs2_alloc_context *meta_ac) +{ + int ret; + struct buffer_head *expand_bh = NULL; + + if (ref_root_bh == ref_leaf_bh) { + /* + * the old root bh hasn't been expanded to a b-tree, + * so expand it first. + */ + ret = ocfs2_expand_inline_ref_root(handle, ci, ref_root_bh, + &expand_bh, meta_ac); + if (ret) { + mlog_errno(ret); + goto out; + } + } else { + expand_bh = ref_leaf_bh; + get_bh(expand_bh); + } + + + /* Now add a new refcount block into the tree.*/ + ret = ocfs2_new_leaf_refcount_block(handle, ci, ref_root_bh, + expand_bh, meta_ac); + if (ret) + mlog_errno(ret); +out: + brelse(expand_bh); + return ret; +} + +/* + * Adjust the extent rec in b-tree representing ref_leaf_bh. + * + * Only called when we have inserted a new refcount rec at index 0 + * which means ocfs2_extent_rec.e_cpos may need some change. + */ +static int ocfs2_adjust_refcount_rec(handle_t *handle, + struct ocfs2_caching_info *ci, + struct buffer_head *ref_root_bh, + struct buffer_head *ref_leaf_bh, + struct ocfs2_refcount_rec *rec) +{ + int ret = 0, i; + u32 new_cpos, old_cpos; + struct ocfs2_path *path = NULL; + struct ocfs2_extent_tree et; + struct ocfs2_refcount_block *rb = + (struct ocfs2_refcount_block *)ref_root_bh->b_data; + struct ocfs2_extent_list *el; + + if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)) + goto out; + + rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; + old_cpos = le32_to_cpu(rb->rf_cpos); + new_cpos = le64_to_cpu(rec->r_cpos) & OCFS2_32BIT_POS_MASK; + if (old_cpos <= new_cpos) + goto out; + + ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh); + + path = ocfs2_new_path_from_et(&et); + if (!path) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + + ret = ocfs2_find_path(ci, path, old_cpos); + if (ret) { + mlog_errno(ret); + goto out; + } + + /* + * 2 more credits, one for the leaf refcount block, one for + * the extent block contains the extent rec. + */ + ret = ocfs2_extend_trans(handle, handle->h_buffer_credits + 2); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_journal_access_eb(handle, ci, path_leaf_bh(path), + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + /* change the leaf extent block first. */ + el = path_leaf_el(path); + + for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) + if (le32_to_cpu(el->l_recs[i].e_cpos) == old_cpos) + break; + + BUG_ON(i == le16_to_cpu(el->l_next_free_rec)); + + el->l_recs[i].e_cpos = cpu_to_le32(new_cpos); + + /* change the r_cpos in the leaf block. */ + rb->rf_cpos = cpu_to_le32(new_cpos); + + ocfs2_journal_dirty(handle, path_leaf_bh(path)); + ocfs2_journal_dirty(handle, ref_leaf_bh); + +out: + ocfs2_free_path(path); + return ret; +} + +static int ocfs2_insert_refcount_rec(handle_t *handle, + struct ocfs2_caching_info *ci, + struct buffer_head *ref_root_bh, + struct buffer_head *ref_leaf_bh, + struct ocfs2_refcount_rec *rec, + int index, int merge, + struct ocfs2_alloc_context *meta_ac) +{ + int ret; + struct ocfs2_refcount_block *rb = + (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; + struct ocfs2_refcount_list *rf_list = &rb->rf_records; + struct buffer_head *new_bh = NULL; + + BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL); + + if (rf_list->rl_used == rf_list->rl_count) { + u64 cpos = le64_to_cpu(rec->r_cpos); + u32 len = le32_to_cpu(rec->r_clusters); + + ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh, + ref_leaf_bh, meta_ac); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_get_refcount_rec(ci, ref_root_bh, + cpos, len, NULL, &index, + &new_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + ref_leaf_bh = new_bh; + rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; + rf_list = &rb->rf_records; + } + + ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + + if (index < le16_to_cpu(rf_list->rl_used)) + memmove(&rf_list->rl_recs[index + 1], + &rf_list->rl_recs[index], + (le16_to_cpu(rf_list->rl_used) - index) * + sizeof(struct ocfs2_refcount_rec)); + + mlog(0, "insert refcount record start %llu, len %u, count %u " + "to leaf block %llu at index %d\n", + (unsigned long long)le64_to_cpu(rec->r_cpos), + le32_to_cpu(rec->r_clusters), le32_to_cpu(rec->r_refcount), + (unsigned long long)ref_leaf_bh->b_blocknr, index); + + rf_list->rl_recs[index] = *rec; + + le16_add_cpu(&rf_list->rl_used, 1); + + if (merge) + ocfs2_refcount_rec_merge(rb, index); + + ret = ocfs2_journal_dirty(handle, ref_leaf_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + if (index == 0) { + ret = ocfs2_adjust_refcount_rec(handle, ci, + ref_root_bh, + ref_leaf_bh, rec); + if (ret) + mlog_errno(ret); + } +out: + brelse(new_bh); + return ret; +} + +/* + * Split the refcount_rec indexed by "index" in ref_leaf_bh. + * This is much simple than our b-tree code. + * split_rec is the new refcount rec we want to insert. + * If split_rec->r_refcount > 0, we are changing the refcount(in case we + * increase refcount or decrease a refcount to non-zero). + * If split_rec->r_refcount == 0, we are punching a hole in current refcount + * rec( in case we decrease a refcount to zero). + */ +static int ocfs2_split_refcount_rec(handle_t *handle, + struct ocfs2_caching_info *ci, + struct buffer_head *ref_root_bh, + struct buffer_head *ref_leaf_bh, + struct ocfs2_refcount_rec *split_rec, + int index, int merge, + struct ocfs2_alloc_context *meta_ac, + struct ocfs2_cached_dealloc_ctxt *dealloc) +{ + int ret, recs_need; + u32 len; + struct ocfs2_refcount_block *rb = + (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; + struct ocfs2_refcount_list *rf_list = &rb->rf_records; + struct ocfs2_refcount_rec *orig_rec = &rf_list->rl_recs[index]; + struct ocfs2_refcount_rec *tail_rec = NULL; + struct buffer_head *new_bh = NULL; + + BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL); + + mlog(0, "original r_pos %llu, cluster %u, split %llu, cluster %u\n", + le64_to_cpu(orig_rec->r_cpos), le32_to_cpu(orig_rec->r_clusters), + le64_to_cpu(split_rec->r_cpos), + le32_to_cpu(split_rec->r_clusters)); + + /* + * If we just need to split the header or tail clusters, + * no more recs are needed, just split is OK. + * Otherwise we at least need one new recs. + */ + if (!split_rec->r_refcount && + (split_rec->r_cpos == orig_rec->r_cpos || + le64_to_cpu(split_rec->r_cpos) + + le32_to_cpu(split_rec->r_clusters) == + le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters))) + recs_need = 0; + else + recs_need = 1; + + /* + * We need one more rec if we split in the middle and the new rec have + * some refcount in it. + */ + if (split_rec->r_refcount && + (split_rec->r_cpos != orig_rec->r_cpos && + le64_to_cpu(split_rec->r_cpos) + + le32_to_cpu(split_rec->r_clusters) != + le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters))) + recs_need++; + + /* If the leaf block don't have enough record, expand it. */ + if (le16_to_cpu(rf_list->rl_used) + recs_need > rf_list->rl_count) { + struct ocfs2_refcount_rec tmp_rec; + u64 cpos = le64_to_cpu(orig_rec->r_cpos); + len = le32_to_cpu(orig_rec->r_clusters); + ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh, + ref_leaf_bh, meta_ac); + if (ret) { + mlog_errno(ret); + goto out; + } + + /* + * We have to re-get it since now cpos may be moved to + * another leaf block. + */ + ret = ocfs2_get_refcount_rec(ci, ref_root_bh, + cpos, len, &tmp_rec, &index, + &new_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + ref_leaf_bh = new_bh; + rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; + rf_list = &rb->rf_records; + orig_rec = &rf_list->rl_recs[index]; + } + + ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + + /* + * We have calculated out how many new records we need and store + * in recs_need, so spare enough space first by moving the records + * after "index" to the end. + */ + if (index != le16_to_cpu(rf_list->rl_used) - 1) + memmove(&rf_list->rl_recs[index + 1 + recs_need], + &rf_list->rl_recs[index + 1], + (le16_to_cpu(rf_list->rl_used) - index - 1) * + sizeof(struct ocfs2_refcount_rec)); + + len = (le64_to_cpu(orig_rec->r_cpos) + + le32_to_cpu(orig_rec->r_clusters)) - + (le64_to_cpu(split_rec->r_cpos) + + le32_to_cpu(split_rec->r_clusters)); + + /* + * If we have "len", the we will split in the tail and move it + * to the end of the space we have just spared. + */ + if (len) { + tail_rec = &rf_list->rl_recs[index + recs_need]; + + memcpy(tail_rec, orig_rec, sizeof(struct ocfs2_refcount_rec)); + le64_add_cpu(&tail_rec->r_cpos, + le32_to_cpu(tail_rec->r_clusters) - len); + tail_rec->r_clusters = le32_to_cpu(len); + } + + /* + * If the split pos isn't the same as the original one, we need to + * split in the head. + * + * Note: We have the chance that split_rec.r_refcount = 0, + * recs_need = 0 and len > 0, which means we just cut the head from + * the orig_rec and in that case we have done some modification in + * orig_rec above, so the check for r_cpos is faked. + */ + if (split_rec->r_cpos != orig_rec->r_cpos && tail_rec != orig_rec) { + len = le64_to_cpu(split_rec->r_cpos) - + le64_to_cpu(orig_rec->r_cpos); + orig_rec->r_clusters = cpu_to_le32(len); + index++; + } + + le16_add_cpu(&rf_list->rl_used, recs_need); + + if (split_rec->r_refcount) { + rf_list->rl_recs[index] = *split_rec; + mlog(0, "insert refcount record start %llu, len %u, count %u " + "to leaf block %llu at index %d\n", + (unsigned long long)le64_to_cpu(split_rec->r_cpos), + le32_to_cpu(split_rec->r_clusters), + le32_to_cpu(split_rec->r_refcount), + (unsigned long long)ref_leaf_bh->b_blocknr, index); + + if (merge) + ocfs2_refcount_rec_merge(rb, index); + } + + ret = ocfs2_journal_dirty(handle, ref_leaf_bh); + if (ret) + mlog_errno(ret); + +out: + brelse(new_bh); + return ret; +} + +static int __ocfs2_increase_refcount(handle_t *handle, + struct ocfs2_caching_info *ci, + struct buffer_head *ref_root_bh, + u64 cpos, u32 len, int merge, + struct ocfs2_alloc_context *meta_ac, + struct ocfs2_cached_dealloc_ctxt *dealloc) +{ + int ret = 0, index; + struct buffer_head *ref_leaf_bh = NULL; + struct ocfs2_refcount_rec rec; + unsigned int set_len = 0; + + mlog(0, "Tree owner %llu, add refcount start %llu, len %u\n", + (unsigned long long)ocfs2_metadata_cache_owner(ci), + (unsigned long long)cpos, len); + + while (len) { + ret = ocfs2_get_refcount_rec(ci, ref_root_bh, + cpos, len, &rec, &index, + &ref_leaf_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + set_len = le32_to_cpu(rec.r_clusters); + + /* + * Here we may meet with 3 situations: + * + * 1. If we find an already existing record, and the length + * is the same, cool, we just need to increase the r_refcount + * and it is OK. + * 2. If we find a hole, just insert it with r_refcount = 1. + * 3. If we are in the middle of one extent record, split + * it. + */ + if (rec.r_refcount && le64_to_cpu(rec.r_cpos) == cpos && + set_len <= len) { + mlog(0, "increase refcount rec, start %llu, len %u, " + "count %u\n", (unsigned long long)cpos, set_len, + le32_to_cpu(rec.r_refcount)); + ret = ocfs2_change_refcount_rec(handle, ci, + ref_leaf_bh, index, + merge, 1); + if (ret) { + mlog_errno(ret); + goto out; + } + } else if (!rec.r_refcount) { + rec.r_refcount = cpu_to_le32(1); + + mlog(0, "insert refcount rec, start %llu, len %u\n", + (unsigned long long)le64_to_cpu(rec.r_cpos), + set_len); + ret = ocfs2_insert_refcount_rec(handle, ci, ref_root_bh, + ref_leaf_bh, + &rec, index, + merge, meta_ac); + if (ret) { + mlog_errno(ret); + goto out; + } + } else { + set_len = min((u64)(cpos + len), + le64_to_cpu(rec.r_cpos) + set_len) - cpos; + rec.r_cpos = cpu_to_le64(cpos); + rec.r_clusters = cpu_to_le32(set_len); + le32_add_cpu(&rec.r_refcount, 1); + + mlog(0, "split refcount rec, start %llu, " + "len %u, count %u\n", + (unsigned long long)le64_to_cpu(rec.r_cpos), + set_len, le32_to_cpu(rec.r_refcount)); + ret = ocfs2_split_refcount_rec(handle, ci, + ref_root_bh, ref_leaf_bh, + &rec, index, merge, + meta_ac, dealloc); + if (ret) { + mlog_errno(ret); + goto out; + } + } + + cpos += set_len; + len -= set_len; + brelse(ref_leaf_bh); + ref_leaf_bh = NULL; + } + +out: + brelse(ref_leaf_bh); + return ret; +} + +static int ocfs2_remove_refcount_extent(handle_t *handle, + struct ocfs2_caching_info *ci, + struct buffer_head *ref_root_bh, + struct buffer_head *ref_leaf_bh, + struct ocfs2_alloc_context *meta_ac, + struct ocfs2_cached_dealloc_ctxt *dealloc) +{ + int ret; + struct super_block *sb = ocfs2_metadata_cache_get_super(ci); + struct ocfs2_refcount_block *rb = + (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; + struct ocfs2_extent_tree et; + + BUG_ON(rb->rf_records.rl_used); + + ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh); + ret = ocfs2_remove_extent(handle, &et, le32_to_cpu(rb->rf_cpos), + 1, meta_ac, dealloc); + if (ret) { + mlog_errno(ret); + goto out; + } + + ocfs2_remove_from_cache(ci, ref_leaf_bh); + + /* + * add the freed block to the dealloc so that it will be freed + * when we run dealloc. + */ + ret = ocfs2_cache_block_dealloc(dealloc, EXTENT_ALLOC_SYSTEM_INODE, + le16_to_cpu(rb->rf_suballoc_slot), + le64_to_cpu(rb->rf_blkno), + le16_to_cpu(rb->rf_suballoc_bit)); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out; + } + + rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; + + le32_add_cpu(&rb->rf_clusters, -1); + + /* + * check whether we need to restore the root refcount block if + * there is no leaf extent block at atll. + */ + if (!rb->rf_list.l_next_free_rec) { + BUG_ON(rb->rf_clusters); + + mlog(0, "reset refcount tree root %llu to be a record block.\n", + (unsigned long long)ref_root_bh->b_blocknr); + + rb->rf_flags = 0; + rb->rf_parent = 0; + rb->rf_cpos = 0; + memset(&rb->rf_records, 0, sb->s_blocksize - + offsetof(struct ocfs2_refcount_block, rf_records)); + rb->rf_records.rl_count = + cpu_to_le16(ocfs2_refcount_recs_per_rb(sb)); + } + + ocfs2_journal_dirty(handle, ref_root_bh); + +out: + return ret; +} + +int ocfs2_increase_refcount(handle_t *handle, + struct ocfs2_caching_info *ci, + struct buffer_head *ref_root_bh, + u64 cpos, u32 len, + struct ocfs2_alloc_context *meta_ac, + struct ocfs2_cached_dealloc_ctxt *dealloc) +{ + return __ocfs2_increase_refcount(handle, ci, ref_root_bh, + cpos, len, 1, + meta_ac, dealloc); +} + +static int ocfs2_decrease_refcount_rec(handle_t *handle, + struct ocfs2_caching_info *ci, + struct buffer_head *ref_root_bh, + struct buffer_head *ref_leaf_bh, + int index, u64 cpos, unsigned int len, + struct ocfs2_alloc_context *meta_ac, + struct ocfs2_cached_dealloc_ctxt *dealloc) +{ + int ret; + struct ocfs2_refcount_block *rb = + (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; + struct ocfs2_refcount_rec *rec = &rb->rf_records.rl_recs[index]; + + BUG_ON(cpos < le64_to_cpu(rec->r_cpos)); + BUG_ON(cpos + len > + le64_to_cpu(rec->r_cpos) + le32_to_cpu(rec->r_clusters)); + + if (cpos == le64_to_cpu(rec->r_cpos) && + len == le32_to_cpu(rec->r_clusters)) + ret = ocfs2_change_refcount_rec(handle, ci, + ref_leaf_bh, index, 1, -1); + else { + struct ocfs2_refcount_rec split = *rec; + split.r_cpos = cpu_to_le64(cpos); + split.r_clusters = cpu_to_le32(len); + + le32_add_cpu(&split.r_refcount, -1); + + mlog(0, "split refcount rec, start %llu, " + "len %u, count %u, original start %llu, len %u\n", + (unsigned long long)le64_to_cpu(split.r_cpos), + len, le32_to_cpu(split.r_refcount), + (unsigned long long)le64_to_cpu(rec->r_cpos), + le32_to_cpu(rec->r_clusters)); + ret = ocfs2_split_refcount_rec(handle, ci, + ref_root_bh, ref_leaf_bh, + &split, index, 1, + meta_ac, dealloc); + } + + if (ret) { + mlog_errno(ret); + goto out; + } + + /* Remove the leaf refcount block if it contains no refcount record. */ + if (!rb->rf_records.rl_used && ref_leaf_bh != ref_root_bh) { + ret = ocfs2_remove_refcount_extent(handle, ci, ref_root_bh, + ref_leaf_bh, meta_ac, + dealloc); + if (ret) + mlog_errno(ret); + } + +out: + return ret; +} + +static int __ocfs2_decrease_refcount(handle_t *handle, + struct ocfs2_caching_info *ci, + struct buffer_head *ref_root_bh, + u64 cpos, u32 len, + struct ocfs2_alloc_context *meta_ac, + struct ocfs2_cached_dealloc_ctxt *dealloc, + int delete) +{ + int ret = 0, index = 0; + struct ocfs2_refcount_rec rec; + unsigned int r_count = 0, r_len; + struct super_block *sb = ocfs2_metadata_cache_get_super(ci); + struct buffer_head *ref_leaf_bh = NULL; + + mlog(0, "Tree owner %llu, decrease refcount start %llu, " + "len %u, delete %u\n", + (unsigned long long)ocfs2_metadata_cache_owner(ci), + (unsigned long long)cpos, len, delete); + + while (len) { + ret = ocfs2_get_refcount_rec(ci, ref_root_bh, + cpos, len, &rec, &index, + &ref_leaf_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + r_count = le32_to_cpu(rec.r_refcount); + BUG_ON(r_count == 0); + if (!delete) + BUG_ON(r_count > 1); + + r_len = min((u64)(cpos + len), le64_to_cpu(rec.r_cpos) + + le32_to_cpu(rec.r_clusters)) - cpos; + + ret = ocfs2_decrease_refcount_rec(handle, ci, ref_root_bh, + ref_leaf_bh, index, + cpos, r_len, + meta_ac, dealloc); + if (ret) { + mlog_errno(ret); + goto out; + } + + if (le32_to_cpu(rec.r_refcount) == 1 && delete) { + ret = ocfs2_cache_cluster_dealloc(dealloc, + ocfs2_clusters_to_blocks(sb, cpos), + r_len); + if (ret) { + mlog_errno(ret); + goto out; + } + } + + cpos += r_len; + len -= r_len; + brelse(ref_leaf_bh); + ref_leaf_bh = NULL; + } + +out: + brelse(ref_leaf_bh); + return ret; +} + +/* Caller must hold refcount tree lock. */ +int ocfs2_decrease_refcount(struct inode *inode, + handle_t *handle, u32 cpos, u32 len, + struct ocfs2_alloc_context *meta_ac, + struct ocfs2_cached_dealloc_ctxt *dealloc, + int delete) +{ + int ret; + u64 ref_blkno; + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct buffer_head *ref_root_bh = NULL; + struct ocfs2_refcount_tree *tree; + + BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)); + + ret = ocfs2_get_refcount_block(inode, &ref_blkno); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb), ref_blkno, &tree); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno, + &ref_root_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = __ocfs2_decrease_refcount(handle, &tree->rf_ci, ref_root_bh, + cpos, len, meta_ac, dealloc, delete); + if (ret) + mlog_errno(ret); +out: + brelse(ref_root_bh); + return ret; +} + +/* + * Mark the already-existing extent at cpos as refcounted for len clusters. + * This adds the refcount extent flag. + * + * If the existing extent is larger than the request, initiate a + * split. An attempt will be made at merging with adjacent extents. + * + * The caller is responsible for passing down meta_ac if we'll need it. + */ +static int ocfs2_mark_extent_refcounted(struct inode *inode, + struct ocfs2_extent_tree *et, + handle_t *handle, u32 cpos, + u32 len, u32 phys, + struct ocfs2_alloc_context *meta_ac, + struct ocfs2_cached_dealloc_ctxt *dealloc) +{ + int ret; + + mlog(0, "Inode %lu refcount tree cpos %u, len %u, phys cluster %u\n", + inode->i_ino, cpos, len, phys); + + if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) { + ocfs2_error(inode->i_sb, "Inode %lu want to use refcount " + "tree, but the feature bit is not set in the " + "super block.", inode->i_ino); + ret = -EROFS; + goto out; + } + + ret = ocfs2_change_extent_flag(handle, et, cpos, + len, phys, meta_ac, dealloc, + OCFS2_EXT_REFCOUNTED, 0); + if (ret) + mlog_errno(ret); + +out: + return ret; +} + +/* + * Given some contiguous physical clusters, calculate what we need + * for modifying their refcount. + */ +static int ocfs2_calc_refcount_meta_credits(struct super_block *sb, + struct ocfs2_caching_info *ci, + struct buffer_head *ref_root_bh, + u64 start_cpos, + u32 clusters, + int *meta_add, + int *credits) +{ + int ret = 0, index, ref_blocks = 0, recs_add = 0; + u64 cpos = start_cpos; + struct ocfs2_refcount_block *rb; + struct ocfs2_refcount_rec rec; + struct buffer_head *ref_leaf_bh = NULL, *prev_bh = NULL; + u32 len; + + mlog(0, "start_cpos %llu, clusters %u\n", + (unsigned long long)start_cpos, clusters); + while (clusters) { + ret = ocfs2_get_refcount_rec(ci, ref_root_bh, + cpos, clusters, &rec, + &index, &ref_leaf_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + if (ref_leaf_bh != prev_bh) { + /* + * Now we encounter a new leaf block, so calculate + * whether we need to extend the old leaf. + */ + if (prev_bh) { + rb = (struct ocfs2_refcount_block *) + prev_bh->b_data; + + if (le64_to_cpu(rb->rf_records.rl_used) + + recs_add > + le16_to_cpu(rb->rf_records.rl_count)) + ref_blocks++; + } + + recs_add = 0; + *credits += 1; + brelse(prev_bh); + prev_bh = ref_leaf_bh; + get_bh(prev_bh); + } + + rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; + + mlog(0, "recs_add %d,cpos %llu, clusters %u, rec->r_cpos %llu," + "rec->r_clusters %u, rec->r_refcount %u, index %d\n", + recs_add, (unsigned long long)cpos, clusters, + (unsigned long long)le64_to_cpu(rec.r_cpos), + le32_to_cpu(rec.r_clusters), + le32_to_cpu(rec.r_refcount), index); + + len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) + + le32_to_cpu(rec.r_clusters)) - cpos; + /* + * If the refcount rec already exist, cool. We just need + * to check whether there is a split. Otherwise we just need + * to increase the refcount. + * If we will insert one, increases recs_add. + * + * We record all the records which will be inserted to the + * same refcount block, so that we can tell exactly whether + * we need a new refcount block or not. + */ + if (rec.r_refcount) { + /* Check whether we need a split at the beginning. */ + if (cpos == start_cpos && + cpos != le64_to_cpu(rec.r_cpos)) + recs_add++; + + /* Check whether we need a split in the end. */ + if (cpos + clusters < le64_to_cpu(rec.r_cpos) + + le32_to_cpu(rec.r_clusters)) + recs_add++; + } else + recs_add++; + + brelse(ref_leaf_bh); + ref_leaf_bh = NULL; + clusters -= len; + cpos += len; + } + + if (prev_bh) { + rb = (struct ocfs2_refcount_block *)prev_bh->b_data; + + if (le64_to_cpu(rb->rf_records.rl_used) + recs_add > + le16_to_cpu(rb->rf_records.rl_count)) + ref_blocks++; + + *credits += 1; + } + + if (!ref_blocks) + goto out; + + mlog(0, "we need ref_blocks %d\n", ref_blocks); + *meta_add += ref_blocks; + *credits += ref_blocks; + + /* + * So we may need ref_blocks to insert into the tree. + * That also means we need to change the b-tree and add that number + * of records since we never merge them. + * We need one more block for expansion since the new created leaf + * block is also full and needs split. + */ + rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; + if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) { + struct ocfs2_extent_tree et; + + ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh); + *meta_add += ocfs2_extend_meta_needed(et.et_root_el); + *credits += ocfs2_calc_extend_credits(sb, + et.et_root_el, + ref_blocks); + } else { + *credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS; + *meta_add += 1; + } + +out: + brelse(ref_leaf_bh); + brelse(prev_bh); + return ret; +} + +/* + * For refcount tree, we will decrease some contiguous clusters + * refcount count, so just go through it to see how many blocks + * we gonna touch and whether we need to create new blocks. + * + * Normally the refcount blocks store these refcount should be + * continguous also, so that we can get the number easily. + * As for meta_ac, we will at most add split 2 refcount record and + * 2 more refcount block, so just check it in a rough way. + * + * Caller must hold refcount tree lock. + */ +int ocfs2_prepare_refcount_change_for_del(struct inode *inode, + struct buffer_head *di_bh, + u64 phys_blkno, + u32 clusters, + int *credits, + struct ocfs2_alloc_context **meta_ac) +{ + int ret, ref_blocks = 0; + struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct buffer_head *ref_root_bh = NULL; + struct ocfs2_refcount_tree *tree; + u64 start_cpos = ocfs2_blocks_to_clusters(inode->i_sb, phys_blkno); + + if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) { + ocfs2_error(inode->i_sb, "Inode %lu want to use refcount " + "tree, but the feature bit is not set in the " + "super block.", inode->i_ino); + ret = -EROFS; + goto out; + } + + BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)); + + ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb), + le64_to_cpu(di->i_refcount_loc), &tree); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_read_refcount_block(&tree->rf_ci, + le64_to_cpu(di->i_refcount_loc), + &ref_root_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_calc_refcount_meta_credits(inode->i_sb, + &tree->rf_ci, + ref_root_bh, + start_cpos, clusters, + &ref_blocks, credits); + if (ret) { + mlog_errno(ret); + goto out; + } + + mlog(0, "reserve new metadata %d, credits = %d\n", + ref_blocks, *credits); + + if (ref_blocks) { + ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb), + ref_blocks, meta_ac); + if (ret) + mlog_errno(ret); + } + +out: + brelse(ref_root_bh); + return ret; +} + +#define MAX_CONTIG_BYTES 1048576 + +static inline unsigned int ocfs2_cow_contig_clusters(struct super_block *sb) +{ + return ocfs2_clusters_for_bytes(sb, MAX_CONTIG_BYTES); +} + +static inline unsigned int ocfs2_cow_contig_mask(struct super_block *sb) +{ + return ~(ocfs2_cow_contig_clusters(sb) - 1); +} + +/* + * Given an extent that starts at 'start' and an I/O that starts at 'cpos', + * find an offset (start + (n * contig_clusters)) that is closest to cpos + * while still being less than or equal to it. + * + * The goal is to break the extent at a multiple of contig_clusters. + */ +static inline unsigned int ocfs2_cow_align_start(struct super_block *sb, + unsigned int start, + unsigned int cpos) +{ + BUG_ON(start > cpos); + + return start + ((cpos - start) & ocfs2_cow_contig_mask(sb)); +} + +/* + * Given a cluster count of len, pad it out so that it is a multiple + * of contig_clusters. + */ +static inline unsigned int ocfs2_cow_align_length(struct super_block *sb, + unsigned int len) +{ + unsigned int padded = + (len + (ocfs2_cow_contig_clusters(sb) - 1)) & + ocfs2_cow_contig_mask(sb); + + /* Did we wrap? */ + if (padded < len) + padded = UINT_MAX; + + return padded; +} + +/* + * Calculate out the start and number of virtual clusters we need to to CoW. + * + * cpos is vitual start cluster position we want to do CoW in a + * file and write_len is the cluster length. + * max_cpos is the place where we want to stop CoW intentionally. + * + * Normal we will start CoW from the beginning of extent record cotaining cpos. + * We try to break up extents on boundaries of MAX_CONTIG_BYTES so that we + * get good I/O from the resulting extent tree. + */ +static int ocfs2_refcount_cal_cow_clusters(struct inode *inode, + struct ocfs2_extent_list *el, + u32 cpos, + u32 write_len, + u32 max_cpos, + u32 *cow_start, + u32 *cow_len) +{ + int ret = 0; + int tree_height = le16_to_cpu(el->l_tree_depth), i; + struct buffer_head *eb_bh = NULL; + struct ocfs2_extent_block *eb = NULL; + struct ocfs2_extent_rec *rec; + unsigned int want_clusters, rec_end = 0; + int contig_clusters = ocfs2_cow_contig_clusters(inode->i_sb); + int leaf_clusters; + + BUG_ON(cpos + write_len > max_cpos); + + if (tree_height > 0) { + ret = ocfs2_find_leaf(INODE_CACHE(inode), el, cpos, &eb_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + eb = (struct ocfs2_extent_block *) eb_bh->b_data; + el = &eb->h_list; + + if (el->l_tree_depth) { + ocfs2_error(inode->i_sb, + "Inode %lu has non zero tree depth in " + "leaf block %llu\n", inode->i_ino, + (unsigned long long)eb_bh->b_blocknr); + ret = -EROFS; + goto out; + } + } + + *cow_len = 0; + for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) { + rec = &el->l_recs[i]; + + if (ocfs2_is_empty_extent(rec)) { + mlog_bug_on_msg(i != 0, "Inode %lu has empty record in " + "index %d\n", inode->i_ino, i); + continue; + } + + if (le32_to_cpu(rec->e_cpos) + + le16_to_cpu(rec->e_leaf_clusters) <= cpos) + continue; + + if (*cow_len == 0) { + /* + * We should find a refcounted record in the + * first pass. + */ + BUG_ON(!(rec->e_flags & OCFS2_EXT_REFCOUNTED)); + *cow_start = le32_to_cpu(rec->e_cpos); + } + + /* + * If we encounter a hole, a non-refcounted record or + * pass the max_cpos, stop the search. + */ + if ((!(rec->e_flags & OCFS2_EXT_REFCOUNTED)) || + (*cow_len && rec_end != le32_to_cpu(rec->e_cpos)) || + (max_cpos <= le32_to_cpu(rec->e_cpos))) + break; + + leaf_clusters = le16_to_cpu(rec->e_leaf_clusters); + rec_end = le32_to_cpu(rec->e_cpos) + leaf_clusters; + if (rec_end > max_cpos) { + rec_end = max_cpos; + leaf_clusters = rec_end - le32_to_cpu(rec->e_cpos); + } + + /* + * How many clusters do we actually need from + * this extent? First we see how many we actually + * need to complete the write. If that's smaller + * than contig_clusters, we try for contig_clusters. + */ + if (!*cow_len) + want_clusters = write_len; + else + want_clusters = (cpos + write_len) - + (*cow_start + *cow_len); + if (want_clusters < contig_clusters) + want_clusters = contig_clusters; + + /* + * If the write does not cover the whole extent, we + * need to calculate how we're going to split the extent. + * We try to do it on contig_clusters boundaries. + * + * Any extent smaller than contig_clusters will be + * CoWed in its entirety. + */ + if (leaf_clusters <= contig_clusters) + *cow_len += leaf_clusters; + else if (*cow_len || (*cow_start == cpos)) { + /* + * This extent needs to be CoW'd from its + * beginning, so all we have to do is compute + * how many clusters to grab. We align + * want_clusters to the edge of contig_clusters + * to get better I/O. + */ + want_clusters = ocfs2_cow_align_length(inode->i_sb, + want_clusters); + + if (leaf_clusters < want_clusters) + *cow_len += leaf_clusters; + else + *cow_len += want_clusters; + } else if ((*cow_start + contig_clusters) >= + (cpos + write_len)) { + /* + * Breaking off contig_clusters at the front + * of the extent will cover our write. That's + * easy. + */ + *cow_len = contig_clusters; + } else if ((rec_end - cpos) <= contig_clusters) { + /* + * Breaking off contig_clusters at the tail of + * this extent will cover cpos. + */ + *cow_start = rec_end - contig_clusters; + *cow_len = contig_clusters; + } else if ((rec_end - cpos) <= want_clusters) { + /* + * While we can't fit the entire write in this + * extent, we know that the write goes from cpos + * to the end of the extent. Break that off. + * We try to break it at some multiple of + * contig_clusters from the front of the extent. + * Failing that (ie, cpos is within + * contig_clusters of the front), we'll CoW the + * entire extent. + */ + *cow_start = ocfs2_cow_align_start(inode->i_sb, + *cow_start, cpos); + *cow_len = rec_end - *cow_start; + } else { + /* + * Ok, the entire write lives in the middle of + * this extent. Let's try to slice the extent up + * nicely. Optimally, our CoW region starts at + * m*contig_clusters from the beginning of the + * extent and goes for n*contig_clusters, + * covering the entire write. + */ + *cow_start = ocfs2_cow_align_start(inode->i_sb, + *cow_start, cpos); + + want_clusters = (cpos + write_len) - *cow_start; + want_clusters = ocfs2_cow_align_length(inode->i_sb, + want_clusters); + if (*cow_start + want_clusters <= rec_end) + *cow_len = want_clusters; + else + *cow_len = rec_end - *cow_start; + } + + /* Have we covered our entire write yet? */ + if ((*cow_start + *cow_len) >= (cpos + write_len)) + break; + + /* + * If we reach the end of the extent block and don't get enough + * clusters, continue with the next extent block if possible. + */ + if (i + 1 == le16_to_cpu(el->l_next_free_rec) && + eb && eb->h_next_leaf_blk) { + brelse(eb_bh); + eb_bh = NULL; + + ret = ocfs2_read_extent_block(INODE_CACHE(inode), + le64_to_cpu(eb->h_next_leaf_blk), + &eb_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + eb = (struct ocfs2_extent_block *) eb_bh->b_data; + el = &eb->h_list; + i = -1; + } + } + +out: + brelse(eb_bh); + return ret; +} + +/* + * Prepare meta_ac, data_ac and calculate credits when we want to add some + * num_clusters in data_tree "et" and change the refcount for the old + * clusters(starting form p_cluster) in the refcount tree. + * + * Note: + * 1. since we may split the old tree, so we at most will need num_clusters + 2 + * more new leaf records. + * 2. In some case, we may not need to reserve new clusters(e.g, reflink), so + * just give data_ac = NULL. + */ +static int ocfs2_lock_refcount_allocators(struct super_block *sb, + u32 p_cluster, u32 num_clusters, + struct ocfs2_extent_tree *et, + struct ocfs2_caching_info *ref_ci, + struct buffer_head *ref_root_bh, + struct ocfs2_alloc_context **meta_ac, + struct ocfs2_alloc_context **data_ac, + int *credits) +{ + int ret = 0, meta_add = 0; + int num_free_extents = ocfs2_num_free_extents(OCFS2_SB(sb), et); + + if (num_free_extents < 0) { + ret = num_free_extents; + mlog_errno(ret); + goto out; + } + + if (num_free_extents < num_clusters + 2) + meta_add = + ocfs2_extend_meta_needed(et->et_root_el); + + *credits += ocfs2_calc_extend_credits(sb, et->et_root_el, + num_clusters + 2); + + ret = ocfs2_calc_refcount_meta_credits(sb, ref_ci, ref_root_bh, + p_cluster, num_clusters, + &meta_add, credits); + if (ret) { + mlog_errno(ret); + goto out; + } + + mlog(0, "reserve new metadata %d, clusters %u, credits = %d\n", + meta_add, num_clusters, *credits); + ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(sb), meta_add, + meta_ac); + if (ret) { + mlog_errno(ret); + goto out; + } + + if (data_ac) { + ret = ocfs2_reserve_clusters(OCFS2_SB(sb), num_clusters, + data_ac); + if (ret) + mlog_errno(ret); + } + +out: + if (ret) { + if (*meta_ac) { + ocfs2_free_alloc_context(*meta_ac); + *meta_ac = NULL; + } + } + + return ret; +} + +static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh) +{ + BUG_ON(buffer_dirty(bh)); + + clear_buffer_mapped(bh); + + return 0; +} + +static int ocfs2_duplicate_clusters_by_page(handle_t *handle, + struct ocfs2_cow_context *context, + u32 cpos, u32 old_cluster, + u32 new_cluster, u32 new_len) +{ + int ret = 0, partial; + struct ocfs2_caching_info *ci = context->data_et.et_ci; + struct super_block *sb = ocfs2_metadata_cache_get_super(ci); + u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster); + struct page *page; + pgoff_t page_index; + unsigned int from, to; + loff_t offset, end, map_end; + struct address_space *mapping = context->inode->i_mapping; + + mlog(0, "old_cluster %u, new %u, len %u at offset %u\n", old_cluster, + new_cluster, new_len, cpos); + + offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits; + end = offset + (new_len << OCFS2_SB(sb)->s_clustersize_bits); + + while (offset < end) { + page_index = offset >> PAGE_CACHE_SHIFT; + map_end = (page_index + 1) << PAGE_CACHE_SHIFT; + if (map_end > end) + map_end = end; + + /* from, to is the offset within the page. */ + from = offset & (PAGE_CACHE_SIZE - 1); + to = PAGE_CACHE_SIZE; + if (map_end & (PAGE_CACHE_SIZE - 1)) + to = map_end & (PAGE_CACHE_SIZE - 1); + + page = grab_cache_page(mapping, page_index); + + /* This page can't be dirtied before we CoW it out. */ + BUG_ON(PageDirty(page)); + + if (!PageUptodate(page)) { + ret = block_read_full_page(page, ocfs2_get_block); + if (ret) { + mlog_errno(ret); + goto unlock; + } + lock_page(page); + } + + if (page_has_buffers(page)) { + ret = walk_page_buffers(handle, page_buffers(page), + from, to, &partial, + ocfs2_clear_cow_buffer); + if (ret) { + mlog_errno(ret); + goto unlock; + } + } + + ocfs2_map_and_dirty_page(context->inode, + handle, from, to, + page, 0, &new_block); + mark_page_accessed(page); +unlock: + unlock_page(page); + page_cache_release(page); + page = NULL; + offset = map_end; + if (ret) + break; + } + + return ret; +} + +static int ocfs2_duplicate_clusters_by_jbd(handle_t *handle, + struct ocfs2_cow_context *context, + u32 cpos, u32 old_cluster, + u32 new_cluster, u32 new_len) +{ + int ret = 0; + struct super_block *sb = context->inode->i_sb; + struct ocfs2_caching_info *ci = context->data_et.et_ci; + int i, blocks = ocfs2_clusters_to_blocks(sb, new_len); + u64 old_block = ocfs2_clusters_to_blocks(sb, old_cluster); + u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster); + struct ocfs2_super *osb = OCFS2_SB(sb); + struct buffer_head *old_bh = NULL; + struct buffer_head *new_bh = NULL; + + mlog(0, "old_cluster %u, new %u, len %u\n", old_cluster, + new_cluster, new_len); + + for (i = 0; i < blocks; i++, old_block++, new_block++) { + new_bh = sb_getblk(osb->sb, new_block); + if (new_bh == NULL) { + ret = -EIO; + mlog_errno(ret); + break; + } + + ocfs2_set_new_buffer_uptodate(ci, new_bh); + + ret = ocfs2_read_block(ci, old_block, &old_bh, NULL); + if (ret) { + mlog_errno(ret); + break; + } + + ret = ocfs2_journal_access(handle, ci, new_bh, + OCFS2_JOURNAL_ACCESS_CREATE); + if (ret) { + mlog_errno(ret); + break; + } + + memcpy(new_bh->b_data, old_bh->b_data, sb->s_blocksize); + ret = ocfs2_journal_dirty(handle, new_bh); + if (ret) { + mlog_errno(ret); + break; + } + + brelse(new_bh); + brelse(old_bh); + new_bh = NULL; + old_bh = NULL; + } + + brelse(new_bh); + brelse(old_bh); + return ret; +} + +static int ocfs2_clear_ext_refcount(handle_t *handle, + struct ocfs2_extent_tree *et, + u32 cpos, u32 p_cluster, u32 len, + unsigned int ext_flags, + struct ocfs2_alloc_context *meta_ac, + struct ocfs2_cached_dealloc_ctxt *dealloc) +{ + int ret, index; + struct ocfs2_extent_rec replace_rec; + struct ocfs2_path *path = NULL; + struct ocfs2_extent_list *el; + struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci); + u64 ino = ocfs2_metadata_cache_owner(et->et_ci); + + mlog(0, "inode %llu cpos %u, len %u, p_cluster %u, ext_flags %u\n", + (unsigned long long)ino, cpos, len, p_cluster, ext_flags); + + memset(&replace_rec, 0, sizeof(replace_rec)); + replace_rec.e_cpos = cpu_to_le32(cpos); + replace_rec.e_leaf_clusters = cpu_to_le16(len); + replace_rec.e_blkno = cpu_to_le64(ocfs2_clusters_to_blocks(sb, + p_cluster)); + replace_rec.e_flags = ext_flags; + replace_rec.e_flags &= ~OCFS2_EXT_REFCOUNTED; + + path = ocfs2_new_path_from_et(et); + if (!path) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + + ret = ocfs2_find_path(et->et_ci, path, cpos); + if (ret) { + mlog_errno(ret); + goto out; + } + + el = path_leaf_el(path); + + index = ocfs2_search_extent_list(el, cpos); + if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) { + ocfs2_error(sb, + "Inode %llu has an extent at cpos %u which can no " + "longer be found.\n", + (unsigned long long)ino, cpos); + ret = -EROFS; + goto out; + } + + ret = ocfs2_split_extent(handle, et, path, index, + &replace_rec, meta_ac, dealloc); + if (ret) + mlog_errno(ret); + +out: + ocfs2_free_path(path); + return ret; +} + +static int ocfs2_replace_clusters(handle_t *handle, + struct ocfs2_cow_context *context, + u32 cpos, u32 old, + u32 new, u32 len, + unsigned int ext_flags) +{ + int ret; + struct ocfs2_caching_info *ci = context->data_et.et_ci; + u64 ino = ocfs2_metadata_cache_owner(ci); + + mlog(0, "inode %llu, cpos %u, old %u, new %u, len %u, ext_flags %u\n", + (unsigned long long)ino, cpos, old, new, len, ext_flags); + + /*If the old clusters is unwritten, no need to duplicate. */ + if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) { + ret = context->cow_duplicate_clusters(handle, context, cpos, + old, new, len); + if (ret) { + mlog_errno(ret); + goto out; + } + } + + ret = ocfs2_clear_ext_refcount(handle, &context->data_et, + cpos, new, len, ext_flags, + context->meta_ac, &context->dealloc); + if (ret) + mlog_errno(ret); +out: + return ret; +} + +static int ocfs2_cow_sync_writeback(struct super_block *sb, + struct ocfs2_cow_context *context, + u32 cpos, u32 num_clusters) +{ + int ret = 0; + loff_t offset, end, map_end; + pgoff_t page_index; + struct page *page; + + if (ocfs2_should_order_data(context->inode)) + return 0; + + offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits; + end = offset + (num_clusters << OCFS2_SB(sb)->s_clustersize_bits); + + ret = filemap_fdatawrite_range(context->inode->i_mapping, + offset, end - 1); + if (ret < 0) { + mlog_errno(ret); + return ret; + } + + while (offset < end) { + page_index = offset >> PAGE_CACHE_SHIFT; + map_end = (page_index + 1) << PAGE_CACHE_SHIFT; + if (map_end > end) + map_end = end; + + page = grab_cache_page(context->inode->i_mapping, page_index); + BUG_ON(!page); + + wait_on_page_writeback(page); + if (PageError(page)) { + ret = -EIO; + mlog_errno(ret); + } else + mark_page_accessed(page); + + unlock_page(page); + page_cache_release(page); + page = NULL; + offset = map_end; + if (ret) + break; + } + + return ret; +} + +static int ocfs2_di_get_clusters(struct ocfs2_cow_context *context, + u32 v_cluster, u32 *p_cluster, + u32 *num_clusters, + unsigned int *extent_flags) +{ + return ocfs2_get_clusters(context->inode, v_cluster, p_cluster, + num_clusters, extent_flags); +} + +static int ocfs2_make_clusters_writable(struct super_block *sb, + struct ocfs2_cow_context *context, + u32 cpos, u32 p_cluster, + u32 num_clusters, unsigned int e_flags) +{ + int ret, delete, index, credits = 0; + u32 new_bit, new_len; + unsigned int set_len; + struct ocfs2_super *osb = OCFS2_SB(sb); + handle_t *handle; + struct buffer_head *ref_leaf_bh = NULL; + struct ocfs2_caching_info *ref_ci = &context->ref_tree->rf_ci; + struct ocfs2_refcount_rec rec; + + mlog(0, "cpos %u, p_cluster %u, num_clusters %u, e_flags %u\n", + cpos, p_cluster, num_clusters, e_flags); + + ret = ocfs2_lock_refcount_allocators(sb, p_cluster, num_clusters, + &context->data_et, + ref_ci, + context->ref_root_bh, + &context->meta_ac, + &context->data_ac, &credits); + if (ret) { + mlog_errno(ret); + return ret; + } + + if (context->post_refcount) + credits += context->post_refcount->credits; + + credits += context->extra_credits; + handle = ocfs2_start_trans(osb, credits); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + goto out; + } + + while (num_clusters) { + ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh, + p_cluster, num_clusters, + &rec, &index, &ref_leaf_bh); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + BUG_ON(!rec.r_refcount); + set_len = min((u64)p_cluster + num_clusters, + le64_to_cpu(rec.r_cpos) + + le32_to_cpu(rec.r_clusters)) - p_cluster; + + /* + * There are many different situation here. + * 1. If refcount == 1, remove the flag and don't COW. + * 2. If refcount > 1, allocate clusters. + * Here we may not allocate r_len once at a time, so continue + * until we reach num_clusters. + */ + if (le32_to_cpu(rec.r_refcount) == 1) { + delete = 0; + ret = ocfs2_clear_ext_refcount(handle, + &context->data_et, + cpos, p_cluster, + set_len, e_flags, + context->meta_ac, + &context->dealloc); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + } else { + delete = 1; + + ret = __ocfs2_claim_clusters(osb, handle, + context->data_ac, + 1, set_len, + &new_bit, &new_len); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + ret = ocfs2_replace_clusters(handle, context, + cpos, p_cluster, new_bit, + new_len, e_flags); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + set_len = new_len; + } + + ret = __ocfs2_decrease_refcount(handle, ref_ci, + context->ref_root_bh, + p_cluster, set_len, + context->meta_ac, + &context->dealloc, delete); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + cpos += set_len; + p_cluster += set_len; + num_clusters -= set_len; + brelse(ref_leaf_bh); + ref_leaf_bh = NULL; + } + + /* handle any post_cow action. */ + if (context->post_refcount && context->post_refcount->func) { + ret = context->post_refcount->func(context->inode, handle, + context->post_refcount->para); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + } + + /* + * Here we should write the new page out first if we are + * in write-back mode. + */ + if (context->get_clusters == ocfs2_di_get_clusters) { + ret = ocfs2_cow_sync_writeback(sb, context, cpos, num_clusters); + if (ret) + mlog_errno(ret); + } + +out_commit: + ocfs2_commit_trans(osb, handle); + +out: + if (context->data_ac) { + ocfs2_free_alloc_context(context->data_ac); + context->data_ac = NULL; + } + if (context->meta_ac) { + ocfs2_free_alloc_context(context->meta_ac); + context->meta_ac = NULL; + } + brelse(ref_leaf_bh); + + return ret; +} + +static int ocfs2_replace_cow(struct ocfs2_cow_context *context) +{ + int ret = 0; + struct inode *inode = context->inode; + u32 cow_start = context->cow_start, cow_len = context->cow_len; + u32 p_cluster, num_clusters; + unsigned int ext_flags; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + + if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) { + ocfs2_error(inode->i_sb, "Inode %lu want to use refcount " + "tree, but the feature bit is not set in the " + "super block.", inode->i_ino); + return -EROFS; + } + + ocfs2_init_dealloc_ctxt(&context->dealloc); + + while (cow_len) { + ret = context->get_clusters(context, cow_start, &p_cluster, + &num_clusters, &ext_flags); + if (ret) { + mlog_errno(ret); + break; + } + + BUG_ON(!(ext_flags & OCFS2_EXT_REFCOUNTED)); + + if (cow_len < num_clusters) + num_clusters = cow_len; + + ret = ocfs2_make_clusters_writable(inode->i_sb, context, + cow_start, p_cluster, + num_clusters, ext_flags); + if (ret) { + mlog_errno(ret); + break; + } + + cow_len -= num_clusters; + cow_start += num_clusters; + } + + if (ocfs2_dealloc_has_cluster(&context->dealloc)) { + ocfs2_schedule_truncate_log_flush(osb, 1); + ocfs2_run_deallocs(osb, &context->dealloc); + } + + return ret; +} + +/* + * Starting at cpos, try to CoW write_len clusters. Don't CoW + * past max_cpos. This will stop when it runs into a hole or an + * unrefcounted extent. + */ +static int ocfs2_refcount_cow_hunk(struct inode *inode, + struct buffer_head *di_bh, + u32 cpos, u32 write_len, u32 max_cpos) +{ + int ret; + u32 cow_start = 0, cow_len = 0; + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; + struct buffer_head *ref_root_bh = NULL; + struct ocfs2_refcount_tree *ref_tree; + struct ocfs2_cow_context *context = NULL; + + BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)); + + ret = ocfs2_refcount_cal_cow_clusters(inode, &di->id2.i_list, + cpos, write_len, max_cpos, + &cow_start, &cow_len); + if (ret) { + mlog_errno(ret); + goto out; + } + + mlog(0, "CoW inode %lu, cpos %u, write_len %u, cow_start %u, " + "cow_len %u\n", inode->i_ino, + cpos, write_len, cow_start, cow_len); + + BUG_ON(cow_len == 0); + + context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS); + if (!context) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + + ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc), + 1, &ref_tree, &ref_root_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + context->inode = inode; + context->cow_start = cow_start; + context->cow_len = cow_len; + context->ref_tree = ref_tree; + context->ref_root_bh = ref_root_bh; + context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page; + context->get_clusters = ocfs2_di_get_clusters; + + ocfs2_init_dinode_extent_tree(&context->data_et, + INODE_CACHE(inode), di_bh); + + ret = ocfs2_replace_cow(context); + if (ret) + mlog_errno(ret); + + /* + * truncate the extent map here since no matter whether we meet with + * any error during the action, we shouldn't trust cached extent map + * any more. + */ + ocfs2_extent_map_trunc(inode, cow_start); + + ocfs2_unlock_refcount_tree(osb, ref_tree, 1); + brelse(ref_root_bh); +out: + kfree(context); + return ret; +} + +/* + * CoW any and all clusters between cpos and cpos+write_len. + * Don't CoW past max_cpos. If this returns successfully, all + * clusters between cpos and cpos+write_len are safe to modify. + */ +int ocfs2_refcount_cow(struct inode *inode, + struct buffer_head *di_bh, + u32 cpos, u32 write_len, u32 max_cpos) +{ + int ret = 0; + u32 p_cluster, num_clusters; + unsigned int ext_flags; + + while (write_len) { + ret = ocfs2_get_clusters(inode, cpos, &p_cluster, + &num_clusters, &ext_flags); + if (ret) { + mlog_errno(ret); + break; + } + + if (write_len < num_clusters) + num_clusters = write_len; + + if (ext_flags & OCFS2_EXT_REFCOUNTED) { + ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos, + num_clusters, max_cpos); + if (ret) { + mlog_errno(ret); + break; + } + } + + write_len -= num_clusters; + cpos += num_clusters; + } + + return ret; +} + +static int ocfs2_xattr_value_get_clusters(struct ocfs2_cow_context *context, + u32 v_cluster, u32 *p_cluster, + u32 *num_clusters, + unsigned int *extent_flags) +{ + struct inode *inode = context->inode; + struct ocfs2_xattr_value_root *xv = context->cow_object; + + return ocfs2_xattr_get_clusters(inode, v_cluster, p_cluster, + num_clusters, &xv->xr_list, + extent_flags); +} + +/* + * Given a xattr value root, calculate the most meta/credits we need for + * refcount tree change if we truncate it to 0. + */ +int ocfs2_refcounted_xattr_delete_need(struct inode *inode, + struct ocfs2_caching_info *ref_ci, + struct buffer_head *ref_root_bh, + struct ocfs2_xattr_value_root *xv, + int *meta_add, int *credits) +{ + int ret = 0, index, ref_blocks = 0; + u32 p_cluster, num_clusters; + u32 cpos = 0, clusters = le32_to_cpu(xv->xr_clusters); + struct ocfs2_refcount_block *rb; + struct ocfs2_refcount_rec rec; + struct buffer_head *ref_leaf_bh = NULL; + + while (cpos < clusters) { + ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster, + &num_clusters, &xv->xr_list, + NULL); + if (ret) { + mlog_errno(ret); + goto out; + } + + cpos += num_clusters; + + while (num_clusters) { + ret = ocfs2_get_refcount_rec(ref_ci, ref_root_bh, + p_cluster, num_clusters, + &rec, &index, + &ref_leaf_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + BUG_ON(!rec.r_refcount); + + rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data; + + /* + * We really don't know whether the other clusters is in + * this refcount block or not, so just take the worst + * case that all the clusters are in this block and each + * one will split a refcount rec, so totally we need + * clusters * 2 new refcount rec. + */ + if (le64_to_cpu(rb->rf_records.rl_used) + clusters * 2 > + le16_to_cpu(rb->rf_records.rl_count)) + ref_blocks++; + + *credits += 1; + brelse(ref_leaf_bh); + ref_leaf_bh = NULL; + + if (num_clusters <= le32_to_cpu(rec.r_clusters)) + break; + else + num_clusters -= le32_to_cpu(rec.r_clusters); + p_cluster += num_clusters; + } + } + + *meta_add += ref_blocks; + if (!ref_blocks) + goto out; + + rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; + if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) + *credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS; + else { + struct ocfs2_extent_tree et; + + ocfs2_init_refcount_extent_tree(&et, ref_ci, ref_root_bh); + *credits += ocfs2_calc_extend_credits(inode->i_sb, + et.et_root_el, + ref_blocks); + } + +out: + brelse(ref_leaf_bh); + return ret; +} + +/* + * Do CoW for xattr. + */ +int ocfs2_refcount_cow_xattr(struct inode *inode, + struct ocfs2_dinode *di, + struct ocfs2_xattr_value_buf *vb, + struct ocfs2_refcount_tree *ref_tree, + struct buffer_head *ref_root_bh, + u32 cpos, u32 write_len, + struct ocfs2_post_refcount *post) +{ + int ret; + struct ocfs2_xattr_value_root *xv = vb->vb_xv; + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_cow_context *context = NULL; + u32 cow_start, cow_len; + + BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)); + + ret = ocfs2_refcount_cal_cow_clusters(inode, &xv->xr_list, + cpos, write_len, UINT_MAX, + &cow_start, &cow_len); + if (ret) { + mlog_errno(ret); + goto out; + } + + BUG_ON(cow_len == 0); + + context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS); + if (!context) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + + context->inode = inode; + context->cow_start = cow_start; + context->cow_len = cow_len; + context->ref_tree = ref_tree; + context->ref_root_bh = ref_root_bh;; + context->cow_object = xv; + + context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_jbd; + /* We need the extra credits for duplicate_clusters by jbd. */ + context->extra_credits = + ocfs2_clusters_to_blocks(inode->i_sb, 1) * cow_len; + context->get_clusters = ocfs2_xattr_value_get_clusters; + context->post_refcount = post; + + ocfs2_init_xattr_value_extent_tree(&context->data_et, + INODE_CACHE(inode), vb); + + ret = ocfs2_replace_cow(context); + if (ret) + mlog_errno(ret); + +out: + kfree(context); + return ret; +} + +/* + * Insert a new extent into refcount tree and mark a extent rec + * as refcounted in the dinode tree. + */ +int ocfs2_add_refcount_flag(struct inode *inode, + struct ocfs2_extent_tree *data_et, + struct ocfs2_caching_info *ref_ci, + struct buffer_head *ref_root_bh, + u32 cpos, u32 p_cluster, u32 num_clusters, + struct ocfs2_cached_dealloc_ctxt *dealloc, + struct ocfs2_post_refcount *post) +{ + int ret; + handle_t *handle; + int credits = 1, ref_blocks = 0; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct ocfs2_alloc_context *meta_ac = NULL; + + ret = ocfs2_calc_refcount_meta_credits(inode->i_sb, + ref_ci, ref_root_bh, + p_cluster, num_clusters, + &ref_blocks, &credits); + if (ret) { + mlog_errno(ret); + goto out; + } + + mlog(0, "reserve new metadata %d, credits = %d\n", + ref_blocks, credits); + + if (ref_blocks) { + ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb), + ref_blocks, &meta_ac); + if (ret) { + mlog_errno(ret); + goto out; + } + } + + if (post) + credits += post->credits; + + handle = ocfs2_start_trans(osb, credits); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + goto out; + } + + ret = ocfs2_mark_extent_refcounted(inode, data_et, handle, + cpos, num_clusters, p_cluster, + meta_ac, dealloc); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + ret = __ocfs2_increase_refcount(handle, ref_ci, ref_root_bh, + p_cluster, num_clusters, 0, + meta_ac, dealloc); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + if (post && post->func) { + ret = post->func(inode, handle, post->para); + if (ret) + mlog_errno(ret); + } + +out_commit: + ocfs2_commit_trans(osb, handle); +out: + if (meta_ac) + ocfs2_free_alloc_context(meta_ac); + return ret; +} + +static int ocfs2_change_ctime(struct inode *inode, + struct buffer_head *di_bh) +{ + int ret; + handle_t *handle; + struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; + + handle = ocfs2_start_trans(OCFS2_SB(inode->i_sb), + OCFS2_INODE_UPDATE_CREDITS); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + goto out; + } + + ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + inode->i_ctime = CURRENT_TIME; + di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); + di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); + + ocfs2_journal_dirty(handle, di_bh); + +out_commit: + ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); +out: + return ret; +} + +static int ocfs2_attach_refcount_tree(struct inode *inode, + struct buffer_head *di_bh) +{ + int ret, data_changed = 0; + struct buffer_head *ref_root_bh = NULL; + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct ocfs2_refcount_tree *ref_tree; + unsigned int ext_flags; + loff_t size; + u32 cpos, num_clusters, clusters, p_cluster; + struct ocfs2_cached_dealloc_ctxt dealloc; + struct ocfs2_extent_tree di_et; + + ocfs2_init_dealloc_ctxt(&dealloc); + + if (!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)) { + ret = ocfs2_create_refcount_tree(inode, di_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + } + + BUG_ON(!di->i_refcount_loc); + ret = ocfs2_lock_refcount_tree(osb, + le64_to_cpu(di->i_refcount_loc), 1, + &ref_tree, &ref_root_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + ocfs2_init_dinode_extent_tree(&di_et, INODE_CACHE(inode), di_bh); + + size = i_size_read(inode); + clusters = ocfs2_clusters_for_bytes(inode->i_sb, size); + + cpos = 0; + while (cpos < clusters) { + ret = ocfs2_get_clusters(inode, cpos, &p_cluster, + &num_clusters, &ext_flags); + + if (p_cluster && !(ext_flags & OCFS2_EXT_REFCOUNTED)) { + ret = ocfs2_add_refcount_flag(inode, &di_et, + &ref_tree->rf_ci, + ref_root_bh, cpos, + p_cluster, num_clusters, + &dealloc, NULL); + if (ret) { + mlog_errno(ret); + goto unlock; + } + + data_changed = 1; + } + cpos += num_clusters; + } + + if (oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) { + ret = ocfs2_xattr_attach_refcount_tree(inode, di_bh, + &ref_tree->rf_ci, + ref_root_bh, + &dealloc); + if (ret) { + mlog_errno(ret); + goto unlock; + } + } + + if (data_changed) { + ret = ocfs2_change_ctime(inode, di_bh); + if (ret) + mlog_errno(ret); + } + +unlock: + ocfs2_unlock_refcount_tree(osb, ref_tree, 1); + brelse(ref_root_bh); + + if (!ret && ocfs2_dealloc_has_cluster(&dealloc)) { + ocfs2_schedule_truncate_log_flush(osb, 1); + ocfs2_run_deallocs(osb, &dealloc); + } +out: + /* + * Empty the extent map so that we may get the right extent + * record from the disk. + */ + ocfs2_extent_map_trunc(inode, 0); + + return ret; +} + +static int ocfs2_add_refcounted_extent(struct inode *inode, + struct ocfs2_extent_tree *et, + struct ocfs2_caching_info *ref_ci, + struct buffer_head *ref_root_bh, + u32 cpos, u32 p_cluster, u32 num_clusters, + unsigned int ext_flags, + struct ocfs2_cached_dealloc_ctxt *dealloc) +{ + int ret; + handle_t *handle; + int credits = 0; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct ocfs2_alloc_context *meta_ac = NULL; + + ret = ocfs2_lock_refcount_allocators(inode->i_sb, + p_cluster, num_clusters, + et, ref_ci, + ref_root_bh, &meta_ac, + NULL, &credits); + if (ret) { + mlog_errno(ret); + goto out; + } + + handle = ocfs2_start_trans(osb, credits); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + goto out; + } + + ret = ocfs2_insert_extent(handle, et, cpos, + cpu_to_le64(ocfs2_clusters_to_blocks(inode->i_sb, + p_cluster)), + num_clusters, ext_flags, meta_ac); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + ret = ocfs2_increase_refcount(handle, ref_ci, ref_root_bh, + p_cluster, num_clusters, + meta_ac, dealloc); + if (ret) + mlog_errno(ret); + +out_commit: + ocfs2_commit_trans(osb, handle); +out: + if (meta_ac) + ocfs2_free_alloc_context(meta_ac); + return ret; +} + +static int ocfs2_duplicate_extent_list(struct inode *s_inode, + struct inode *t_inode, + struct buffer_head *t_bh, + struct ocfs2_caching_info *ref_ci, + struct buffer_head *ref_root_bh, + struct ocfs2_cached_dealloc_ctxt *dealloc) +{ + int ret = 0; + u32 p_cluster, num_clusters, clusters, cpos; + loff_t size; + unsigned int ext_flags; + struct ocfs2_extent_tree et; + + ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(t_inode), t_bh); + + size = i_size_read(s_inode); + clusters = ocfs2_clusters_for_bytes(s_inode->i_sb, size); + + cpos = 0; + while (cpos < clusters) { + ret = ocfs2_get_clusters(s_inode, cpos, &p_cluster, + &num_clusters, &ext_flags); + + if (p_cluster) { + ret = ocfs2_add_refcounted_extent(t_inode, &et, + ref_ci, ref_root_bh, + cpos, p_cluster, + num_clusters, + ext_flags, + dealloc); + if (ret) { + mlog_errno(ret); + goto out; + } + } + + cpos += num_clusters; + } + +out: + return ret; +} + +/* + * change the new file's attributes to the src. + * + * reflink creates a snapshot of a file, that means the attributes + * must be identical except for three exceptions - nlink, ino, and ctime. + */ +static int ocfs2_complete_reflink(struct inode *s_inode, + struct buffer_head *s_bh, + struct inode *t_inode, + struct buffer_head *t_bh, + bool preserve) +{ + int ret; + handle_t *handle; + struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data; + struct ocfs2_dinode *di = (struct ocfs2_dinode *)t_bh->b_data; + loff_t size = i_size_read(s_inode); + + handle = ocfs2_start_trans(OCFS2_SB(t_inode->i_sb), + OCFS2_INODE_UPDATE_CREDITS); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + return ret; + } + + ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + spin_lock(&OCFS2_I(t_inode)->ip_lock); + OCFS2_I(t_inode)->ip_clusters = OCFS2_I(s_inode)->ip_clusters; + OCFS2_I(t_inode)->ip_attr = OCFS2_I(s_inode)->ip_attr; + OCFS2_I(t_inode)->ip_dyn_features = OCFS2_I(s_inode)->ip_dyn_features; + spin_unlock(&OCFS2_I(t_inode)->ip_lock); + i_size_write(t_inode, size); + + di->i_xattr_inline_size = s_di->i_xattr_inline_size; + di->i_clusters = s_di->i_clusters; + di->i_size = s_di->i_size; + di->i_dyn_features = s_di->i_dyn_features; + di->i_attr = s_di->i_attr; + + if (preserve) { + di->i_uid = s_di->i_uid; + di->i_gid = s_di->i_gid; + di->i_mode = s_di->i_mode; + + /* + * update time. + * we want mtime to appear identical to the source and + * update ctime. + */ + t_inode->i_ctime = CURRENT_TIME; + + di->i_ctime = cpu_to_le64(t_inode->i_ctime.tv_sec); + di->i_ctime_nsec = cpu_to_le32(t_inode->i_ctime.tv_nsec); + + t_inode->i_mtime = s_inode->i_mtime; + di->i_mtime = s_di->i_mtime; + di->i_mtime_nsec = s_di->i_mtime_nsec; + } + + ocfs2_journal_dirty(handle, t_bh); + +out_commit: + ocfs2_commit_trans(OCFS2_SB(t_inode->i_sb), handle); + return ret; +} + +static int ocfs2_create_reflink_node(struct inode *s_inode, + struct buffer_head *s_bh, + struct inode *t_inode, + struct buffer_head *t_bh, + bool preserve) +{ + int ret; + struct buffer_head *ref_root_bh = NULL; + struct ocfs2_cached_dealloc_ctxt dealloc; + struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb); + struct ocfs2_refcount_block *rb; + struct ocfs2_dinode *di = (struct ocfs2_dinode *)s_bh->b_data; + struct ocfs2_refcount_tree *ref_tree; + + ocfs2_init_dealloc_ctxt(&dealloc); + + ret = ocfs2_set_refcount_tree(t_inode, t_bh, + le64_to_cpu(di->i_refcount_loc)); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc), + 1, &ref_tree, &ref_root_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data; + + ret = ocfs2_duplicate_extent_list(s_inode, t_inode, t_bh, + &ref_tree->rf_ci, ref_root_bh, + &dealloc); + if (ret) { + mlog_errno(ret); + goto out_unlock_refcount; + } + + ret = ocfs2_complete_reflink(s_inode, s_bh, t_inode, t_bh, preserve); + if (ret) + mlog_errno(ret); + +out_unlock_refcount: + ocfs2_unlock_refcount_tree(osb, ref_tree, 1); + brelse(ref_root_bh); +out: + if (ocfs2_dealloc_has_cluster(&dealloc)) { + ocfs2_schedule_truncate_log_flush(osb, 1); + ocfs2_run_deallocs(osb, &dealloc); + } + + return ret; +} + +static int __ocfs2_reflink(struct dentry *old_dentry, + struct buffer_head *old_bh, + struct inode *new_inode, + bool preserve) +{ + int ret; + struct inode *inode = old_dentry->d_inode; + struct buffer_head *new_bh = NULL; + + ret = filemap_fdatawrite(inode->i_mapping); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_attach_refcount_tree(inode, old_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + mutex_lock(&new_inode->i_mutex); + ret = ocfs2_inode_lock(new_inode, &new_bh, 1); + if (ret) { + mlog_errno(ret); + goto out_unlock; + } + + ret = ocfs2_create_reflink_node(inode, old_bh, + new_inode, new_bh, preserve); + if (ret) { + mlog_errno(ret); + goto inode_unlock; + } + + if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_XATTR_FL) { + ret = ocfs2_reflink_xattrs(inode, old_bh, + new_inode, new_bh, + preserve); + if (ret) + mlog_errno(ret); + } +inode_unlock: + ocfs2_inode_unlock(new_inode, 1); + brelse(new_bh); +out_unlock: + mutex_unlock(&new_inode->i_mutex); +out: + if (!ret) { + ret = filemap_fdatawait(inode->i_mapping); + if (ret) + mlog_errno(ret); + } + return ret; +} + +static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir, + struct dentry *new_dentry, bool preserve) +{ + int error; + struct inode *inode = old_dentry->d_inode; + struct buffer_head *old_bh = NULL; + struct inode *new_orphan_inode = NULL; + + if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) + return -EOPNOTSUPP; + + error = ocfs2_create_inode_in_orphan(dir, inode->i_mode, + &new_orphan_inode); + if (error) { + mlog_errno(error); + goto out; + } + + error = ocfs2_inode_lock(inode, &old_bh, 1); + if (error) { + mlog_errno(error); + goto out; + } + + down_write(&OCFS2_I(inode)->ip_xattr_sem); + down_write(&OCFS2_I(inode)->ip_alloc_sem); + error = __ocfs2_reflink(old_dentry, old_bh, + new_orphan_inode, preserve); + up_write(&OCFS2_I(inode)->ip_alloc_sem); + up_write(&OCFS2_I(inode)->ip_xattr_sem); + + ocfs2_inode_unlock(inode, 1); + brelse(old_bh); + + if (error) { + mlog_errno(error); + goto out; + } + + /* If the security isn't preserved, we need to re-initialize them. */ + if (!preserve) { + error = ocfs2_init_security_and_acl(dir, new_orphan_inode); + if (error) + mlog_errno(error); + } +out: + if (!error) { + error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode, + new_dentry); + if (error) + mlog_errno(error); + } + + if (new_orphan_inode) { + /* + * We need to open_unlock the inode no matter whether we + * succeed or not, so that other nodes can delete it later. + */ + ocfs2_open_unlock(new_orphan_inode); + if (error) + iput(new_orphan_inode); + } + + return error; +} + +/* + * Below here are the bits used by OCFS2_IOC_REFLINK() to fake + * sys_reflink(). This will go away when vfs_reflink() exists in + * fs/namei.c. + */ + +/* copied from may_create in VFS. */ +static inline int ocfs2_may_create(struct inode *dir, struct dentry *child) +{ + if (child->d_inode) + return -EEXIST; + if (IS_DEADDIR(dir)) + return -ENOENT; + return inode_permission(dir, MAY_WRITE | MAY_EXEC); +} + +/* copied from user_path_parent. */ +static int ocfs2_user_path_parent(const char __user *path, + struct nameidata *nd, char **name) +{ + char *s = getname(path); + int error; + + if (IS_ERR(s)) + return PTR_ERR(s); + + error = path_lookup(s, LOOKUP_PARENT, nd); + if (error) + putname(s); + else + *name = s; + + return error; +} + +/** + * ocfs2_vfs_reflink - Create a reference-counted link + * + * @old_dentry: source dentry + inode + * @dir: directory to create the target + * @new_dentry: target dentry + * @preserve: if true, preserve all file attributes + */ +int ocfs2_vfs_reflink(struct dentry *old_dentry, struct inode *dir, + struct dentry *new_dentry, bool preserve) +{ + struct inode *inode = old_dentry->d_inode; + int error; + + if (!inode) + return -ENOENT; + + error = ocfs2_may_create(dir, new_dentry); + if (error) + return error; + + if (dir->i_sb != inode->i_sb) + return -EXDEV; + + /* + * A reflink to an append-only or immutable file cannot be created. + */ + if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) + return -EPERM; + + /* Only regular files can be reflinked. */ + if (!S_ISREG(inode->i_mode)) + return -EPERM; + + /* + * If the caller wants to preserve ownership, they require the + * rights to do so. + */ + if (preserve) { + if ((current_fsuid() != inode->i_uid) && !capable(CAP_CHOWN)) + return -EPERM; + if (!in_group_p(inode->i_gid) && !capable(CAP_CHOWN)) + return -EPERM; + } + + /* + * If the caller is modifying any aspect of the attributes, they + * are not creating a snapshot. They need read permission on the + * file. + */ + if (!preserve) { + error = inode_permission(inode, MAY_READ); + if (error) + return error; + } + + mutex_lock(&inode->i_mutex); + vfs_dq_init(dir); + error = ocfs2_reflink(old_dentry, dir, new_dentry, preserve); + mutex_unlock(&inode->i_mutex); + if (!error) + fsnotify_create(dir, new_dentry); + return error; +} +/* + * Most codes are copied from sys_linkat. + */ +int ocfs2_reflink_ioctl(struct inode *inode, + const char __user *oldname, + const char __user *newname, + bool preserve) +{ + struct dentry *new_dentry; + struct nameidata nd; + struct path old_path; + int error; + char *to = NULL; + + if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) + return -EOPNOTSUPP; + + error = user_path_at(AT_FDCWD, oldname, 0, &old_path); + if (error) { + mlog_errno(error); + return error; + } + + error = ocfs2_user_path_parent(newname, &nd, &to); + if (error) { + mlog_errno(error); + goto out; + } + + error = -EXDEV; + if (old_path.mnt != nd.path.mnt) + goto out_release; + new_dentry = lookup_create(&nd, 0); + error = PTR_ERR(new_dentry); + if (IS_ERR(new_dentry)) { + mlog_errno(error); + goto out_unlock; + } + + error = mnt_want_write(nd.path.mnt); + if (error) { + mlog_errno(error); + goto out_dput; + } + + error = ocfs2_vfs_reflink(old_path.dentry, + nd.path.dentry->d_inode, + new_dentry, preserve); + mnt_drop_write(nd.path.mnt); +out_dput: + dput(new_dentry); +out_unlock: + mutex_unlock(&nd.path.dentry->d_inode->i_mutex); +out_release: + path_put(&nd.path); + putname(to); +out: + path_put(&old_path); + + return error; +} diff --git a/fs/ocfs2/refcounttree.h b/fs/ocfs2/refcounttree.h new file mode 100644 index 0000000..c1d19b1 --- /dev/null +++ b/fs/ocfs2/refcounttree.h @@ -0,0 +1,106 @@ +/* -*- mode: c; c-basic-offset: 8; -*- + * vim: noexpandtab sw=8 ts=8 sts=0: + * + * refcounttree.h + * + * Copyright (C) 2009 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +#ifndef OCFS2_REFCOUNTTREE_H +#define OCFS2_REFCOUNTTREE_H + +struct ocfs2_refcount_tree { + struct rb_node rf_node; + u64 rf_blkno; + u32 rf_generation; + struct rw_semaphore rf_sem; + struct ocfs2_lock_res rf_lockres; + struct kref rf_getcnt; + int rf_removed; + + /* the following 4 fields are used by caching_info. */ + struct ocfs2_caching_info rf_ci; + spinlock_t rf_lock; + struct mutex rf_io_mutex; + struct super_block *rf_sb; +}; + +void ocfs2_purge_refcount_trees(struct ocfs2_super *osb); +int ocfs2_lock_refcount_tree(struct ocfs2_super *osb, u64 ref_blkno, int rw, + struct ocfs2_refcount_tree **tree, + struct buffer_head **ref_bh); +void ocfs2_unlock_refcount_tree(struct ocfs2_super *osb, + struct ocfs2_refcount_tree *tree, + int rw); + +int ocfs2_decrease_refcount(struct inode *inode, + handle_t *handle, u32 cpos, u32 len, + struct ocfs2_alloc_context *meta_ac, + struct ocfs2_cached_dealloc_ctxt *dealloc, + int delete); +int ocfs2_prepare_refcount_change_for_del(struct inode *inode, + struct buffer_head *di_bh, + u64 phys_blkno, + u32 clusters, + int *credits, + struct ocfs2_alloc_context **meta_ac); +int ocfs2_refcount_cow(struct inode *inode, struct buffer_head *di_bh, + u32 cpos, u32 write_len, u32 max_cpos); + +typedef int (ocfs2_post_refcount_func)(struct inode *inode, + handle_t *handle, + void *para); +/* + * Some refcount caller need to do more work after we modify the data b-tree + * during refcount operation(including CoW and add refcount flag), and make the + * transaction complete. So it must give us this structure so that we can do it + * within our transaction. + * + */ +struct ocfs2_post_refcount { + int credits; /* credits it need for journal. */ + ocfs2_post_refcount_func *func; /* real function. */ + void *para; +}; + +int ocfs2_refcounted_xattr_delete_need(struct inode *inode, + struct ocfs2_caching_info *ref_ci, + struct buffer_head *ref_root_bh, + struct ocfs2_xattr_value_root *xv, + int *meta_add, int *credits); +int ocfs2_refcount_cow_xattr(struct inode *inode, + struct ocfs2_dinode *di, + struct ocfs2_xattr_value_buf *vb, + struct ocfs2_refcount_tree *ref_tree, + struct buffer_head *ref_root_bh, + u32 cpos, u32 write_len, + struct ocfs2_post_refcount *post); +int ocfs2_add_refcount_flag(struct inode *inode, + struct ocfs2_extent_tree *data_et, + struct ocfs2_caching_info *ref_ci, + struct buffer_head *ref_root_bh, + u32 cpos, u32 p_cluster, u32 num_clusters, + struct ocfs2_cached_dealloc_ctxt *dealloc, + struct ocfs2_post_refcount *post); +int ocfs2_remove_refcount_tree(struct inode *inode, struct buffer_head *di_bh); +int ocfs2_try_remove_refcount_tree(struct inode *inode, + struct buffer_head *di_bh); +int ocfs2_increase_refcount(handle_t *handle, + struct ocfs2_caching_info *ci, + struct buffer_head *ref_root_bh, + u64 cpos, u32 len, + struct ocfs2_alloc_context *meta_ac, + struct ocfs2_cached_dealloc_ctxt *dealloc); +int ocfs2_reflink_ioctl(struct inode *inode, + const char __user *oldname, + const char __user *newname, + bool preserve); +#endif /* OCFS2_REFCOUNTTREE_H */ diff --git a/fs/ocfs2/resize.c b/fs/ocfs2/resize.c index 424adaa..3c3d673 100644 --- a/fs/ocfs2/resize.c +++ b/fs/ocfs2/resize.c @@ -106,8 +106,8 @@ static int ocfs2_update_last_group_and_inode(handle_t *handle, mlog_entry("(new_clusters=%d, first_new_cluster = %u)\n", new_clusters, first_new_cluster); - ret = ocfs2_journal_access_gd(handle, bm_inode, group_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_journal_access_gd(handle, INODE_CACHE(bm_inode), + group_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) { mlog_errno(ret); goto out; @@ -141,7 +141,7 @@ static int ocfs2_update_last_group_and_inode(handle_t *handle, } /* update the inode accordingly. */ - ret = ocfs2_journal_access_di(handle, bm_inode, bm_bh, + ret = ocfs2_journal_access_di(handle, INODE_CACHE(bm_inode), bm_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) { mlog_errno(ret); @@ -514,7 +514,7 @@ int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input) goto out_unlock; } - ocfs2_set_new_buffer_uptodate(inode, group_bh); + ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), group_bh); ret = ocfs2_verify_group_and_input(main_bm_inode, fe, input, group_bh); if (ret) { @@ -536,8 +536,8 @@ int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input) cl = &fe->id2.i_chain; cr = &cl->cl_recs[input->chain]; - ret = ocfs2_journal_access_gd(handle, main_bm_inode, group_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_journal_access_gd(handle, INODE_CACHE(main_bm_inode), + group_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) { mlog_errno(ret); goto out_commit; @@ -552,8 +552,8 @@ int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input) goto out_commit; } - ret = ocfs2_journal_access_di(handle, main_bm_inode, main_bm_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + ret = ocfs2_journal_access_di(handle, INODE_CACHE(main_bm_inode), + main_bm_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) { mlog_errno(ret); goto out_commit; diff --git a/fs/ocfs2/slot_map.c b/fs/ocfs2/slot_map.c index 40661e7..bfbd7e9 100644 --- a/fs/ocfs2/slot_map.c +++ b/fs/ocfs2/slot_map.c @@ -150,8 +150,8 @@ int ocfs2_refresh_slot_info(struct ocfs2_super *osb) * be !NULL. Thus, ocfs2_read_blocks() will ignore blocknr. If * this is not true, the read of -1 (UINT64_MAX) will fail. */ - ret = ocfs2_read_blocks(si->si_inode, -1, si->si_blocks, si->si_bh, - OCFS2_BH_IGNORE_CACHE, NULL); + ret = ocfs2_read_blocks(INODE_CACHE(si->si_inode), -1, si->si_blocks, + si->si_bh, OCFS2_BH_IGNORE_CACHE, NULL); if (ret == 0) { spin_lock(&osb->osb_lock); ocfs2_update_slot_info(si); @@ -213,7 +213,7 @@ static int ocfs2_update_disk_slot(struct ocfs2_super *osb, ocfs2_update_disk_slot_old(si, slot_num, &bh); spin_unlock(&osb->osb_lock); - status = ocfs2_write_block(osb, bh, si->si_inode); + status = ocfs2_write_block(osb, bh, INODE_CACHE(si->si_inode)); if (status < 0) mlog_errno(status); @@ -404,8 +404,8 @@ static int ocfs2_map_slot_buffers(struct ocfs2_super *osb, (unsigned long long)blkno); bh = NULL; /* Acquire a fresh bh */ - status = ocfs2_read_blocks(si->si_inode, blkno, 1, &bh, - OCFS2_BH_IGNORE_CACHE, NULL); + status = ocfs2_read_blocks(INODE_CACHE(si->si_inode), blkno, + 1, &bh, OCFS2_BH_IGNORE_CACHE, NULL); if (status < 0) { mlog_errno(status); goto bail; diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index 73a16d4..c30b644 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c @@ -310,7 +310,7 @@ int ocfs2_read_group_descriptor(struct inode *inode, struct ocfs2_dinode *di, int rc; struct buffer_head *tmp = *bh; - rc = ocfs2_read_block(inode, gd_blkno, &tmp, + rc = ocfs2_read_block(INODE_CACHE(inode), gd_blkno, &tmp, ocfs2_validate_group_descriptor); if (rc) goto out; @@ -352,7 +352,7 @@ static int ocfs2_block_group_fill(handle_t *handle, } status = ocfs2_journal_access_gd(handle, - alloc_inode, + INODE_CACHE(alloc_inode), bg_bh, OCFS2_JOURNAL_ACCESS_CREATE); if (status < 0) { @@ -476,7 +476,7 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb, mlog_errno(status); goto bail; } - ocfs2_set_new_buffer_uptodate(alloc_inode, bg_bh); + ocfs2_set_new_buffer_uptodate(INODE_CACHE(alloc_inode), bg_bh); status = ocfs2_block_group_fill(handle, alloc_inode, @@ -491,7 +491,7 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb, bg = (struct ocfs2_group_desc *) bg_bh->b_data; - status = ocfs2_journal_access_di(handle, alloc_inode, + status = ocfs2_journal_access_di(handle, INODE_CACHE(alloc_inode), bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); @@ -1033,7 +1033,7 @@ static inline int ocfs2_block_group_set_bits(handle_t *handle, journal_type = OCFS2_JOURNAL_ACCESS_UNDO; status = ocfs2_journal_access_gd(handle, - alloc_inode, + INODE_CACHE(alloc_inode), group_bh, journal_type); if (status < 0) { @@ -1106,7 +1106,8 @@ static int ocfs2_relink_block_group(handle_t *handle, bg_ptr = le64_to_cpu(bg->bg_next_group); prev_bg_ptr = le64_to_cpu(prev_bg->bg_next_group); - status = ocfs2_journal_access_gd(handle, alloc_inode, prev_bg_bh, + status = ocfs2_journal_access_gd(handle, INODE_CACHE(alloc_inode), + prev_bg_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); @@ -1121,8 +1122,8 @@ static int ocfs2_relink_block_group(handle_t *handle, goto out_rollback; } - status = ocfs2_journal_access_gd(handle, alloc_inode, bg_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = ocfs2_journal_access_gd(handle, INODE_CACHE(alloc_inode), + bg_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto out_rollback; @@ -1136,8 +1137,8 @@ static int ocfs2_relink_block_group(handle_t *handle, goto out_rollback; } - status = ocfs2_journal_access_di(handle, alloc_inode, fe_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = ocfs2_journal_access_di(handle, INODE_CACHE(alloc_inode), + fe_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto out_rollback; @@ -1288,7 +1289,7 @@ static int ocfs2_alloc_dinode_update_counts(struct inode *inode, struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data; struct ocfs2_chain_list *cl = (struct ocfs2_chain_list *) &di->id2.i_chain; - ret = ocfs2_journal_access_di(handle, inode, di_bh, + ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) { mlog_errno(ret); @@ -1461,7 +1462,7 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac, /* Ok, claim our bits now: set the info on dinode, chainlist * and then the group */ status = ocfs2_journal_access_di(handle, - alloc_inode, + INODE_CACHE(alloc_inode), ac->ac_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { @@ -1907,8 +1908,8 @@ static inline int ocfs2_block_group_clear_bits(handle_t *handle, if (ocfs2_is_cluster_bitmap(alloc_inode)) journal_type = OCFS2_JOURNAL_ACCESS_UNDO; - status = ocfs2_journal_access_gd(handle, alloc_inode, group_bh, - journal_type); + status = ocfs2_journal_access_gd(handle, INODE_CACHE(alloc_inode), + group_bh, journal_type); if (status < 0) { mlog_errno(status); goto bail; @@ -1993,8 +1994,8 @@ int ocfs2_free_suballoc_bits(handle_t *handle, goto bail; } - status = ocfs2_journal_access_di(handle, alloc_inode, alloc_bh, - OCFS2_JOURNAL_ACCESS_WRITE); + status = ocfs2_journal_access_di(handle, INODE_CACHE(alloc_inode), + alloc_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto bail; @@ -2151,7 +2152,7 @@ int ocfs2_lock_allocators(struct inode *inode, BUG_ON(clusters_to_add != 0 && data_ac == NULL); - num_free_extents = ocfs2_num_free_extents(osb, inode, et); + num_free_extents = ocfs2_num_free_extents(osb, et); if (num_free_extents < 0) { ret = num_free_extents; mlog_errno(ret); diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index a3f8871..4cc3c89 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -28,7 +28,6 @@ #include <linux/types.h> #include <linux/slab.h> #include <linux/highmem.h> -#include <linux/utsname.h> #include <linux/init.h> #include <linux/random.h> #include <linux/statfs.h> @@ -69,6 +68,7 @@ #include "ver.h" #include "xattr.h" #include "quota.h" +#include "refcounttree.h" #include "buffer_head_io.h" @@ -965,7 +965,7 @@ static int ocfs2_quota_off(struct super_block *sb, int type, int remount) return vfs_quota_disable(sb, type, DQUOT_LIMITS_ENABLED); } -static struct quotactl_ops ocfs2_quotactl_ops = { +static const struct quotactl_ops ocfs2_quotactl_ops = { .quota_on = ocfs2_quota_on, .quota_off = ocfs2_quota_off, .quota_sync = vfs_quota_sync, @@ -1668,8 +1668,6 @@ static void ocfs2_inode_init_once(void *data) spin_lock_init(&oi->ip_lock); ocfs2_extent_map_init(&oi->vfs_inode); INIT_LIST_HEAD(&oi->ip_io_markers); - oi->ip_created_trans = 0; - oi->ip_last_trans = 0; oi->ip_dir_start_lookup = 0; init_rwsem(&oi->ip_alloc_sem); @@ -1683,7 +1681,8 @@ static void ocfs2_inode_init_once(void *data) ocfs2_lock_res_init_once(&oi->ip_inode_lockres); ocfs2_lock_res_init_once(&oi->ip_open_lockres); - ocfs2_metadata_cache_init(&oi->vfs_inode); + ocfs2_metadata_cache_init(INODE_CACHE(&oi->vfs_inode), + &ocfs2_inode_caching_ops); inode_init_once(&oi->vfs_inode); } @@ -1859,6 +1858,8 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err) ocfs2_sync_blockdev(sb); + ocfs2_purge_refcount_trees(osb); + /* No cluster connection means we've failed during mount, so skip * all the steps which depended on that to complete. */ if (osb->cconn) { @@ -2065,6 +2066,8 @@ static int ocfs2_initialize_super(struct super_block *sb, goto bail; } + osb->osb_rf_lock_tree = RB_ROOT; + osb->s_feature_compat = le32_to_cpu(OCFS2_RAW_SB(di)->s_feature_compat); osb->s_feature_ro_compat = @@ -2490,7 +2493,8 @@ void __ocfs2_abort(struct super_block* sb, /* Force a panic(). This stinks, but it's better than letting * things continue without having a proper hard readonly * here. */ - OCFS2_SB(sb)->s_mount_opt |= OCFS2_MOUNT_ERRORS_PANIC; + if (!ocfs2_mount_local(OCFS2_SB(sb))) + OCFS2_SB(sb)->s_mount_opt |= OCFS2_MOUNT_ERRORS_PANIC; ocfs2_handle_error(sb); } diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c index 579dd1b..e342103 100644 --- a/fs/ocfs2/symlink.c +++ b/fs/ocfs2/symlink.c @@ -38,7 +38,6 @@ #include <linux/types.h> #include <linux/slab.h> #include <linux/pagemap.h> -#include <linux/utsname.h> #include <linux/namei.h> #define MLOG_MASK_PREFIX ML_NAMEI diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c index 187b99f..b6284f2 100644 --- a/fs/ocfs2/uptodate.c +++ b/fs/ocfs2/uptodate.c @@ -75,15 +75,77 @@ struct ocfs2_meta_cache_item { static struct kmem_cache *ocfs2_uptodate_cachep = NULL; -void ocfs2_metadata_cache_init(struct inode *inode) +u64 ocfs2_metadata_cache_owner(struct ocfs2_caching_info *ci) { - struct ocfs2_inode_info *oi = OCFS2_I(inode); - struct ocfs2_caching_info *ci = &oi->ip_metadata_cache; + BUG_ON(!ci || !ci->ci_ops); - oi->ip_flags |= OCFS2_INODE_CACHE_INLINE; + return ci->ci_ops->co_owner(ci); +} + +struct super_block *ocfs2_metadata_cache_get_super(struct ocfs2_caching_info *ci) +{ + BUG_ON(!ci || !ci->ci_ops); + + return ci->ci_ops->co_get_super(ci); +} + +static void ocfs2_metadata_cache_lock(struct ocfs2_caching_info *ci) +{ + BUG_ON(!ci || !ci->ci_ops); + + ci->ci_ops->co_cache_lock(ci); +} + +static void ocfs2_metadata_cache_unlock(struct ocfs2_caching_info *ci) +{ + BUG_ON(!ci || !ci->ci_ops); + + ci->ci_ops->co_cache_unlock(ci); +} + +void ocfs2_metadata_cache_io_lock(struct ocfs2_caching_info *ci) +{ + BUG_ON(!ci || !ci->ci_ops); + + ci->ci_ops->co_io_lock(ci); +} + +void ocfs2_metadata_cache_io_unlock(struct ocfs2_caching_info *ci) +{ + BUG_ON(!ci || !ci->ci_ops); + + ci->ci_ops->co_io_unlock(ci); +} + + +static void ocfs2_metadata_cache_reset(struct ocfs2_caching_info *ci, + int clear) +{ + ci->ci_flags |= OCFS2_CACHE_FL_INLINE; ci->ci_num_cached = 0; + + if (clear) { + ci->ci_created_trans = 0; + ci->ci_last_trans = 0; + } +} + +void ocfs2_metadata_cache_init(struct ocfs2_caching_info *ci, + const struct ocfs2_caching_operations *ops) +{ + BUG_ON(!ops); + + ci->ci_ops = ops; + ocfs2_metadata_cache_reset(ci, 1); } +void ocfs2_metadata_cache_exit(struct ocfs2_caching_info *ci) +{ + ocfs2_metadata_cache_purge(ci); + ocfs2_metadata_cache_reset(ci, 1); +} + + /* No lock taken here as 'root' is not expected to be visible to other * processes. */ static unsigned int ocfs2_purge_copied_metadata_tree(struct rb_root *root) @@ -112,19 +174,20 @@ static unsigned int ocfs2_purge_copied_metadata_tree(struct rb_root *root) * This function is a few more lines longer than necessary due to some * accounting done here, but I think it's worth tracking down those * bugs sooner -- Mark */ -void ocfs2_metadata_cache_purge(struct inode *inode) +void ocfs2_metadata_cache_purge(struct ocfs2_caching_info *ci) { - struct ocfs2_inode_info *oi = OCFS2_I(inode); unsigned int tree, to_purge, purged; - struct ocfs2_caching_info *ci = &oi->ip_metadata_cache; struct rb_root root = RB_ROOT; - spin_lock(&oi->ip_lock); - tree = !(oi->ip_flags & OCFS2_INODE_CACHE_INLINE); + BUG_ON(!ci || !ci->ci_ops); + + ocfs2_metadata_cache_lock(ci); + tree = !(ci->ci_flags & OCFS2_CACHE_FL_INLINE); to_purge = ci->ci_num_cached; - mlog(0, "Purge %u %s items from Inode %llu\n", to_purge, - tree ? "array" : "tree", (unsigned long long)oi->ip_blkno); + mlog(0, "Purge %u %s items from Owner %llu\n", to_purge, + tree ? "array" : "tree", + (unsigned long long)ocfs2_metadata_cache_owner(ci)); /* If we're a tree, save off the root so that we can safely * initialize the cache. We do the work to free tree members @@ -132,16 +195,17 @@ void ocfs2_metadata_cache_purge(struct inode *inode) if (tree) root = ci->ci_cache.ci_tree; - ocfs2_metadata_cache_init(inode); - spin_unlock(&oi->ip_lock); + ocfs2_metadata_cache_reset(ci, 0); + ocfs2_metadata_cache_unlock(ci); purged = ocfs2_purge_copied_metadata_tree(&root); /* If possible, track the number wiped so that we can more * easily detect counting errors. Unfortunately, this is only * meaningful for trees. */ if (tree && purged != to_purge) - mlog(ML_ERROR, "Inode %llu, count = %u, purged = %u\n", - (unsigned long long)oi->ip_blkno, to_purge, purged); + mlog(ML_ERROR, "Owner %llu, count = %u, purged = %u\n", + (unsigned long long)ocfs2_metadata_cache_owner(ci), + to_purge, purged); } /* Returns the index in the cache array, -1 if not found. @@ -182,27 +246,25 @@ ocfs2_search_cache_tree(struct ocfs2_caching_info *ci, return NULL; } -static int ocfs2_buffer_cached(struct ocfs2_inode_info *oi, +static int ocfs2_buffer_cached(struct ocfs2_caching_info *ci, struct buffer_head *bh) { int index = -1; struct ocfs2_meta_cache_item *item = NULL; - spin_lock(&oi->ip_lock); + ocfs2_metadata_cache_lock(ci); - mlog(0, "Inode %llu, query block %llu (inline = %u)\n", - (unsigned long long)oi->ip_blkno, + mlog(0, "Owner %llu, query block %llu (inline = %u)\n", + (unsigned long long)ocfs2_metadata_cache_owner(ci), (unsigned long long) bh->b_blocknr, - !!(oi->ip_flags & OCFS2_INODE_CACHE_INLINE)); + !!(ci->ci_flags & OCFS2_CACHE_FL_INLINE)); - if (oi->ip_flags & OCFS2_INODE_CACHE_INLINE) - index = ocfs2_search_cache_array(&oi->ip_metadata_cache, - bh->b_blocknr); + if (ci->ci_flags & OCFS2_CACHE_FL_INLINE) + index = ocfs2_search_cache_array(ci, bh->b_blocknr); else - item = ocfs2_search_cache_tree(&oi->ip_metadata_cache, - bh->b_blocknr); + item = ocfs2_search_cache_tree(ci, bh->b_blocknr); - spin_unlock(&oi->ip_lock); + ocfs2_metadata_cache_unlock(ci); mlog(0, "index = %d, item = %p\n", index, item); @@ -214,7 +276,7 @@ static int ocfs2_buffer_cached(struct ocfs2_inode_info *oi, * * This can be called under lock_buffer() */ -int ocfs2_buffer_uptodate(struct inode *inode, +int ocfs2_buffer_uptodate(struct ocfs2_caching_info *ci, struct buffer_head *bh) { /* Doesn't matter if the bh is in our cache or not -- if it's @@ -230,24 +292,24 @@ int ocfs2_buffer_uptodate(struct inode *inode, /* Ok, locally the buffer is marked as up to date, now search * our cache to see if we can trust that. */ - return ocfs2_buffer_cached(OCFS2_I(inode), bh); + return ocfs2_buffer_cached(ci, bh); } -/* +/* * Determine whether a buffer is currently out on a read-ahead request. - * ip_io_sem should be held to serialize submitters with the logic here. + * ci_io_sem should be held to serialize submitters with the logic here. */ -int ocfs2_buffer_read_ahead(struct inode *inode, +int ocfs2_buffer_read_ahead(struct ocfs2_caching_info *ci, struct buffer_head *bh) { - return buffer_locked(bh) && ocfs2_buffer_cached(OCFS2_I(inode), bh); + return buffer_locked(bh) && ocfs2_buffer_cached(ci, bh); } /* Requires ip_lock */ static void ocfs2_append_cache_array(struct ocfs2_caching_info *ci, sector_t block) { - BUG_ON(ci->ci_num_cached >= OCFS2_INODE_MAX_CACHE_ARRAY); + BUG_ON(ci->ci_num_cached >= OCFS2_CACHE_INFO_MAX_ARRAY); mlog(0, "block %llu takes position %u\n", (unsigned long long) block, ci->ci_num_cached); @@ -292,66 +354,64 @@ static void __ocfs2_insert_cache_tree(struct ocfs2_caching_info *ci, ci->ci_num_cached++; } -static inline int ocfs2_insert_can_use_array(struct ocfs2_inode_info *oi, - struct ocfs2_caching_info *ci) +/* co_cache_lock() must be held */ +static inline int ocfs2_insert_can_use_array(struct ocfs2_caching_info *ci) { - assert_spin_locked(&oi->ip_lock); - - return (oi->ip_flags & OCFS2_INODE_CACHE_INLINE) && - (ci->ci_num_cached < OCFS2_INODE_MAX_CACHE_ARRAY); + return (ci->ci_flags & OCFS2_CACHE_FL_INLINE) && + (ci->ci_num_cached < OCFS2_CACHE_INFO_MAX_ARRAY); } -/* tree should be exactly OCFS2_INODE_MAX_CACHE_ARRAY wide. NULL the +/* tree should be exactly OCFS2_CACHE_INFO_MAX_ARRAY wide. NULL the * pointers in tree after we use them - this allows caller to detect - * when to free in case of error. */ -static void ocfs2_expand_cache(struct ocfs2_inode_info *oi, + * when to free in case of error. + * + * The co_cache_lock() must be held. */ +static void ocfs2_expand_cache(struct ocfs2_caching_info *ci, struct ocfs2_meta_cache_item **tree) { int i; - struct ocfs2_caching_info *ci = &oi->ip_metadata_cache; - mlog_bug_on_msg(ci->ci_num_cached != OCFS2_INODE_MAX_CACHE_ARRAY, - "Inode %llu, num cached = %u, should be %u\n", - (unsigned long long)oi->ip_blkno, ci->ci_num_cached, - OCFS2_INODE_MAX_CACHE_ARRAY); - mlog_bug_on_msg(!(oi->ip_flags & OCFS2_INODE_CACHE_INLINE), - "Inode %llu not marked as inline anymore!\n", - (unsigned long long)oi->ip_blkno); - assert_spin_locked(&oi->ip_lock); + mlog_bug_on_msg(ci->ci_num_cached != OCFS2_CACHE_INFO_MAX_ARRAY, + "Owner %llu, num cached = %u, should be %u\n", + (unsigned long long)ocfs2_metadata_cache_owner(ci), + ci->ci_num_cached, OCFS2_CACHE_INFO_MAX_ARRAY); + mlog_bug_on_msg(!(ci->ci_flags & OCFS2_CACHE_FL_INLINE), + "Owner %llu not marked as inline anymore!\n", + (unsigned long long)ocfs2_metadata_cache_owner(ci)); /* Be careful to initialize the tree members *first* because * once the ci_tree is used, the array is junk... */ - for(i = 0; i < OCFS2_INODE_MAX_CACHE_ARRAY; i++) + for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++) tree[i]->c_block = ci->ci_cache.ci_array[i]; - oi->ip_flags &= ~OCFS2_INODE_CACHE_INLINE; + ci->ci_flags &= ~OCFS2_CACHE_FL_INLINE; ci->ci_cache.ci_tree = RB_ROOT; /* this will be set again by __ocfs2_insert_cache_tree */ ci->ci_num_cached = 0; - for(i = 0; i < OCFS2_INODE_MAX_CACHE_ARRAY; i++) { + for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++) { __ocfs2_insert_cache_tree(ci, tree[i]); tree[i] = NULL; } mlog(0, "Expanded %llu to a tree cache: flags 0x%x, num = %u\n", - (unsigned long long)oi->ip_blkno, oi->ip_flags, ci->ci_num_cached); + (unsigned long long)ocfs2_metadata_cache_owner(ci), + ci->ci_flags, ci->ci_num_cached); } /* Slow path function - memory allocation is necessary. See the * comment above ocfs2_set_buffer_uptodate for more information. */ -static void __ocfs2_set_buffer_uptodate(struct ocfs2_inode_info *oi, +static void __ocfs2_set_buffer_uptodate(struct ocfs2_caching_info *ci, sector_t block, int expand_tree) { int i; - struct ocfs2_caching_info *ci = &oi->ip_metadata_cache; struct ocfs2_meta_cache_item *new = NULL; - struct ocfs2_meta_cache_item *tree[OCFS2_INODE_MAX_CACHE_ARRAY] = + struct ocfs2_meta_cache_item *tree[OCFS2_CACHE_INFO_MAX_ARRAY] = { NULL, }; - mlog(0, "Inode %llu, block %llu, expand = %d\n", - (unsigned long long)oi->ip_blkno, + mlog(0, "Owner %llu, block %llu, expand = %d\n", + (unsigned long long)ocfs2_metadata_cache_owner(ci), (unsigned long long)block, expand_tree); new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_NOFS); @@ -364,7 +424,7 @@ static void __ocfs2_set_buffer_uptodate(struct ocfs2_inode_info *oi, if (expand_tree) { /* Do *not* allocate an array here - the removal code * has no way of tracking that. */ - for(i = 0; i < OCFS2_INODE_MAX_CACHE_ARRAY; i++) { + for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++) { tree[i] = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_NOFS); if (!tree[i]) { @@ -376,21 +436,21 @@ static void __ocfs2_set_buffer_uptodate(struct ocfs2_inode_info *oi, } } - spin_lock(&oi->ip_lock); - if (ocfs2_insert_can_use_array(oi, ci)) { + ocfs2_metadata_cache_lock(ci); + if (ocfs2_insert_can_use_array(ci)) { mlog(0, "Someone cleared the tree underneath us\n"); /* Ok, items were removed from the cache in between * locks. Detect this and revert back to the fast path */ ocfs2_append_cache_array(ci, block); - spin_unlock(&oi->ip_lock); + ocfs2_metadata_cache_unlock(ci); goto out_free; } if (expand_tree) - ocfs2_expand_cache(oi, tree); + ocfs2_expand_cache(ci, tree); __ocfs2_insert_cache_tree(ci, new); - spin_unlock(&oi->ip_lock); + ocfs2_metadata_cache_unlock(ci); new = NULL; out_free: @@ -400,14 +460,14 @@ out_free: /* If these were used, then ocfs2_expand_cache re-set them to * NULL for us. */ if (tree[0]) { - for(i = 0; i < OCFS2_INODE_MAX_CACHE_ARRAY; i++) + for (i = 0; i < OCFS2_CACHE_INFO_MAX_ARRAY; i++) if (tree[i]) kmem_cache_free(ocfs2_uptodate_cachep, tree[i]); } } -/* Item insertion is guarded by ip_io_mutex, so the insertion path takes +/* Item insertion is guarded by co_io_lock(), so the insertion path takes * advantage of this by not rechecking for a duplicate insert during * the slow case. Additionally, if the cache needs to be bumped up to * a tree, the code will not recheck after acquiring the lock -- @@ -425,59 +485,55 @@ out_free: * Readahead buffers can be passed in here before the I/O request is * completed. */ -void ocfs2_set_buffer_uptodate(struct inode *inode, +void ocfs2_set_buffer_uptodate(struct ocfs2_caching_info *ci, struct buffer_head *bh) { int expand; - struct ocfs2_inode_info *oi = OCFS2_I(inode); - struct ocfs2_caching_info *ci = &oi->ip_metadata_cache; /* The block may very well exist in our cache already, so avoid * doing any more work in that case. */ - if (ocfs2_buffer_cached(oi, bh)) + if (ocfs2_buffer_cached(ci, bh)) return; - mlog(0, "Inode %llu, inserting block %llu\n", - (unsigned long long)oi->ip_blkno, + mlog(0, "Owner %llu, inserting block %llu\n", + (unsigned long long)ocfs2_metadata_cache_owner(ci), (unsigned long long)bh->b_blocknr); /* No need to recheck under spinlock - insertion is guarded by - * ip_io_mutex */ - spin_lock(&oi->ip_lock); - if (ocfs2_insert_can_use_array(oi, ci)) { + * co_io_lock() */ + ocfs2_metadata_cache_lock(ci); + if (ocfs2_insert_can_use_array(ci)) { /* Fast case - it's an array and there's a free * spot. */ ocfs2_append_cache_array(ci, bh->b_blocknr); - spin_unlock(&oi->ip_lock); + ocfs2_metadata_cache_unlock(ci); return; } expand = 0; - if (oi->ip_flags & OCFS2_INODE_CACHE_INLINE) { + if (ci->ci_flags & OCFS2_CACHE_FL_INLINE) { /* We need to bump things up to a tree. */ expand = 1; } - spin_unlock(&oi->ip_lock); + ocfs2_metadata_cache_unlock(ci); - __ocfs2_set_buffer_uptodate(oi, bh->b_blocknr, expand); + __ocfs2_set_buffer_uptodate(ci, bh->b_blocknr, expand); } /* Called against a newly allocated buffer. Most likely nobody should * be able to read this sort of metadata while it's still being - * allocated, but this is careful to take ip_io_mutex anyway. */ -void ocfs2_set_new_buffer_uptodate(struct inode *inode, + * allocated, but this is careful to take co_io_lock() anyway. */ +void ocfs2_set_new_buffer_uptodate(struct ocfs2_caching_info *ci, struct buffer_head *bh) { - struct ocfs2_inode_info *oi = OCFS2_I(inode); - /* This should definitely *not* exist in our cache */ - BUG_ON(ocfs2_buffer_cached(oi, bh)); + BUG_ON(ocfs2_buffer_cached(ci, bh)); set_buffer_uptodate(bh); - mutex_lock(&oi->ip_io_mutex); - ocfs2_set_buffer_uptodate(inode, bh); - mutex_unlock(&oi->ip_io_mutex); + ocfs2_metadata_cache_io_lock(ci); + ocfs2_set_buffer_uptodate(ci, bh); + ocfs2_metadata_cache_io_unlock(ci); } /* Requires ip_lock. */ @@ -487,7 +543,7 @@ static void ocfs2_remove_metadata_array(struct ocfs2_caching_info *ci, sector_t *array = ci->ci_cache.ci_array; int bytes; - BUG_ON(index < 0 || index >= OCFS2_INODE_MAX_CACHE_ARRAY); + BUG_ON(index < 0 || index >= OCFS2_CACHE_INFO_MAX_ARRAY); BUG_ON(index >= ci->ci_num_cached); BUG_ON(!ci->ci_num_cached); @@ -515,21 +571,19 @@ static void ocfs2_remove_metadata_tree(struct ocfs2_caching_info *ci, ci->ci_num_cached--; } -static void ocfs2_remove_block_from_cache(struct inode *inode, +static void ocfs2_remove_block_from_cache(struct ocfs2_caching_info *ci, sector_t block) { int index; struct ocfs2_meta_cache_item *item = NULL; - struct ocfs2_inode_info *oi = OCFS2_I(inode); - struct ocfs2_caching_info *ci = &oi->ip_metadata_cache; - spin_lock(&oi->ip_lock); - mlog(0, "Inode %llu, remove %llu, items = %u, array = %u\n", - (unsigned long long)oi->ip_blkno, + ocfs2_metadata_cache_lock(ci); + mlog(0, "Owner %llu, remove %llu, items = %u, array = %u\n", + (unsigned long long)ocfs2_metadata_cache_owner(ci), (unsigned long long) block, ci->ci_num_cached, - oi->ip_flags & OCFS2_INODE_CACHE_INLINE); + ci->ci_flags & OCFS2_CACHE_FL_INLINE); - if (oi->ip_flags & OCFS2_INODE_CACHE_INLINE) { + if (ci->ci_flags & OCFS2_CACHE_FL_INLINE) { index = ocfs2_search_cache_array(ci, block); if (index != -1) ocfs2_remove_metadata_array(ci, index); @@ -538,7 +592,7 @@ static void ocfs2_remove_block_from_cache(struct inode *inode, if (item) ocfs2_remove_metadata_tree(ci, item); } - spin_unlock(&oi->ip_lock); + ocfs2_metadata_cache_unlock(ci); if (item) kmem_cache_free(ocfs2_uptodate_cachep, item); @@ -549,23 +603,24 @@ static void ocfs2_remove_block_from_cache(struct inode *inode, * bother reverting things to an inlined array in the case of a remove * which moves us back under the limit. */ -void ocfs2_remove_from_cache(struct inode *inode, +void ocfs2_remove_from_cache(struct ocfs2_caching_info *ci, struct buffer_head *bh) { sector_t block = bh->b_blocknr; - ocfs2_remove_block_from_cache(inode, block); + ocfs2_remove_block_from_cache(ci, block); } /* Called when we remove xattr clusters from an inode. */ -void ocfs2_remove_xattr_clusters_from_cache(struct inode *inode, +void ocfs2_remove_xattr_clusters_from_cache(struct ocfs2_caching_info *ci, sector_t block, u32 c_len) { - unsigned int i, b_len = ocfs2_clusters_to_blocks(inode->i_sb, 1) * c_len; + struct super_block *sb = ocfs2_metadata_cache_get_super(ci); + unsigned int i, b_len = ocfs2_clusters_to_blocks(sb, 1) * c_len; for (i = 0; i < b_len; i++, block++) - ocfs2_remove_block_from_cache(inode, block); + ocfs2_remove_block_from_cache(ci, block); } int __init init_ocfs2_uptodate_cache(void) @@ -577,7 +632,7 @@ int __init init_ocfs2_uptodate_cache(void) return -ENOMEM; mlog(0, "%u inlined cache items per inode.\n", - OCFS2_INODE_MAX_CACHE_ARRAY); + OCFS2_CACHE_INFO_MAX_ARRAY); return 0; } diff --git a/fs/ocfs2/uptodate.h b/fs/ocfs2/uptodate.h index 531b4b3..0d826fe 100644 --- a/fs/ocfs2/uptodate.h +++ b/fs/ocfs2/uptodate.h @@ -26,24 +26,59 @@ #ifndef OCFS2_UPTODATE_H #define OCFS2_UPTODATE_H +/* + * The caching code relies on locking provided by the user of + * struct ocfs2_caching_info. These operations connect that up. + */ +struct ocfs2_caching_operations { + /* + * A u64 representing the owning structure. Usually this + * is the block number (i_blkno or whatnot). This is used so + * that caching log messages can identify the owning structure. + */ + u64 (*co_owner)(struct ocfs2_caching_info *ci); + + /* The superblock is needed during I/O. */ + struct super_block *(*co_get_super)(struct ocfs2_caching_info *ci); + /* + * Lock and unlock the caching data. These will not sleep, and + * should probably be spinlocks. + */ + void (*co_cache_lock)(struct ocfs2_caching_info *ci); + void (*co_cache_unlock)(struct ocfs2_caching_info *ci); + + /* + * Lock and unlock for disk I/O. These will sleep, and should + * be mutexes. + */ + void (*co_io_lock)(struct ocfs2_caching_info *ci); + void (*co_io_unlock)(struct ocfs2_caching_info *ci); +}; + int __init init_ocfs2_uptodate_cache(void); void exit_ocfs2_uptodate_cache(void); -void ocfs2_metadata_cache_init(struct inode *inode); -void ocfs2_metadata_cache_purge(struct inode *inode); +void ocfs2_metadata_cache_init(struct ocfs2_caching_info *ci, + const struct ocfs2_caching_operations *ops); +void ocfs2_metadata_cache_purge(struct ocfs2_caching_info *ci); +void ocfs2_metadata_cache_exit(struct ocfs2_caching_info *ci); + +u64 ocfs2_metadata_cache_owner(struct ocfs2_caching_info *ci); +void ocfs2_metadata_cache_io_lock(struct ocfs2_caching_info *ci); +void ocfs2_metadata_cache_io_unlock(struct ocfs2_caching_info *ci); -int ocfs2_buffer_uptodate(struct inode *inode, +int ocfs2_buffer_uptodate(struct ocfs2_caching_info *ci, struct buffer_head *bh); -void ocfs2_set_buffer_uptodate(struct inode *inode, +void ocfs2_set_buffer_uptodate(struct ocfs2_caching_info *ci, struct buffer_head *bh); -void ocfs2_set_new_buffer_uptodate(struct inode *inode, +void ocfs2_set_new_buffer_uptodate(struct ocfs2_caching_info *ci, struct buffer_head *bh); -void ocfs2_remove_from_cache(struct inode *inode, +void ocfs2_remove_from_cache(struct ocfs2_caching_info *ci, struct buffer_head *bh); -void ocfs2_remove_xattr_clusters_from_cache(struct inode *inode, +void ocfs2_remove_xattr_clusters_from_cache(struct ocfs2_caching_info *ci, sector_t block, u32 c_len); -int ocfs2_buffer_read_ahead(struct inode *inode, +int ocfs2_buffer_read_ahead(struct ocfs2_caching_info *ci, struct buffer_head *bh); #endif /* OCFS2_UPTODATE_H */ diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index d1a27cd..fe34190 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -55,7 +55,8 @@ #include "buffer_head_io.h" #include "super.h" #include "xattr.h" - +#include "refcounttree.h" +#include "acl.h" struct ocfs2_xattr_def_value_root { struct ocfs2_xattr_value_root xv; @@ -140,7 +141,7 @@ struct ocfs2_xattr_search { int not_found; }; -static int ocfs2_xattr_bucket_get_name_value(struct inode *inode, +static int ocfs2_xattr_bucket_get_name_value(struct super_block *sb, struct ocfs2_xattr_header *xh, int index, int *block_off, @@ -157,7 +158,7 @@ static int ocfs2_xattr_index_block_find(struct inode *inode, struct ocfs2_xattr_search *xs); static int ocfs2_xattr_tree_list_index_block(struct inode *inode, - struct ocfs2_xattr_tree_root *xt, + struct buffer_head *blk_bh, char *buffer, size_t buffer_size); @@ -170,12 +171,42 @@ static int ocfs2_xattr_set_entry_index_block(struct inode *inode, struct ocfs2_xattr_search *xs, struct ocfs2_xattr_set_ctxt *ctxt); -static int ocfs2_delete_xattr_index_block(struct inode *inode, - struct buffer_head *xb_bh); +typedef int (xattr_tree_rec_func)(struct inode *inode, + struct buffer_head *root_bh, + u64 blkno, u32 cpos, u32 len, void *para); +static int ocfs2_iterate_xattr_index_block(struct inode *inode, + struct buffer_head *root_bh, + xattr_tree_rec_func *rec_func, + void *para); +static int ocfs2_delete_xattr_in_bucket(struct inode *inode, + struct ocfs2_xattr_bucket *bucket, + void *para); +static int ocfs2_rm_xattr_cluster(struct inode *inode, + struct buffer_head *root_bh, + u64 blkno, + u32 cpos, + u32 len, + void *para); + static int ocfs2_mv_xattr_buckets(struct inode *inode, handle_t *handle, u64 src_blk, u64 last_blk, u64 to_blk, unsigned int start_bucket, u32 *first_hash); +static int ocfs2_prepare_refcount_xattr(struct inode *inode, + struct ocfs2_dinode *di, + struct ocfs2_xattr_info *xi, + struct ocfs2_xattr_search *xis, + struct ocfs2_xattr_search *xbs, + struct ocfs2_refcount_tree **ref_tree, + int *meta_need, + int *credits); +static int ocfs2_get_xattr_tree_value_root(struct super_block *sb, + struct ocfs2_xattr_bucket *bucket, + int offset, + struct ocfs2_xattr_value_root **xv, + struct buffer_head **bh); +static int ocfs2_xattr_security_set(struct inode *inode, const char *name, + const void *value, size_t size, int flags); static inline u16 ocfs2_xattr_buckets_per_cluster(struct ocfs2_super *osb) { @@ -254,9 +285,9 @@ static int ocfs2_init_xattr_bucket(struct ocfs2_xattr_bucket *bucket, break; } - if (!ocfs2_buffer_uptodate(bucket->bu_inode, + if (!ocfs2_buffer_uptodate(INODE_CACHE(bucket->bu_inode), bucket->bu_bhs[i])) - ocfs2_set_new_buffer_uptodate(bucket->bu_inode, + ocfs2_set_new_buffer_uptodate(INODE_CACHE(bucket->bu_inode), bucket->bu_bhs[i]); } @@ -271,7 +302,7 @@ static int ocfs2_read_xattr_bucket(struct ocfs2_xattr_bucket *bucket, { int rc; - rc = ocfs2_read_blocks(bucket->bu_inode, xb_blkno, + rc = ocfs2_read_blocks(INODE_CACHE(bucket->bu_inode), xb_blkno, bucket->bu_blocks, bucket->bu_bhs, 0, NULL); if (!rc) { @@ -297,7 +328,8 @@ static int ocfs2_xattr_bucket_journal_access(handle_t *handle, int i, rc = 0; for (i = 0; i < bucket->bu_blocks; i++) { - rc = ocfs2_journal_access(handle, bucket->bu_inode, + rc = ocfs2_journal_access(handle, + INODE_CACHE(bucket->bu_inode), bucket->bu_bhs[i], type); if (rc) { mlog_errno(rc); @@ -399,7 +431,7 @@ static int ocfs2_read_xattr_block(struct inode *inode, u64 xb_blkno, int rc; struct buffer_head *tmp = *bh; - rc = ocfs2_read_block(inode, xb_blkno, &tmp, + rc = ocfs2_read_block(INODE_CACHE(inode), xb_blkno, &tmp, ocfs2_validate_xattr_block); /* If ocfs2_read_block() got us a new bh, pass it up. */ @@ -596,15 +628,14 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode, int status = 0; handle_t *handle = ctxt->handle; enum ocfs2_alloc_restarted why; - struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); u32 prev_clusters, logical_start = le32_to_cpu(vb->vb_xv->xr_clusters); struct ocfs2_extent_tree et; mlog(0, "(clusters_to_add for xattr= %u)\n", clusters_to_add); - ocfs2_init_xattr_value_extent_tree(&et, inode, vb); + ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb); - status = vb->vb_access(handle, inode, vb->vb_bh, + status = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); @@ -612,13 +643,11 @@ static int ocfs2_xattr_extend_allocation(struct inode *inode, } prev_clusters = le32_to_cpu(vb->vb_xv->xr_clusters); - status = ocfs2_add_clusters_in_btree(osb, - inode, + status = ocfs2_add_clusters_in_btree(handle, + &et, &logical_start, clusters_to_add, 0, - &et, - handle, ctxt->data_ac, ctxt->meta_ac, &why); @@ -649,6 +678,7 @@ leave: static int __ocfs2_remove_xattr_range(struct inode *inode, struct ocfs2_xattr_value_buf *vb, u32 cpos, u32 phys_cpos, u32 len, + unsigned int ext_flags, struct ocfs2_xattr_set_ctxt *ctxt) { int ret; @@ -656,16 +686,16 @@ static int __ocfs2_remove_xattr_range(struct inode *inode, handle_t *handle = ctxt->handle; struct ocfs2_extent_tree et; - ocfs2_init_xattr_value_extent_tree(&et, inode, vb); + ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb); - ret = vb->vb_access(handle, inode, vb->vb_bh, + ret = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out; } - ret = ocfs2_remove_extent(inode, &et, cpos, len, handle, ctxt->meta_ac, + ret = ocfs2_remove_extent(handle, &et, cpos, len, ctxt->meta_ac, &ctxt->dealloc); if (ret) { mlog_errno(ret); @@ -680,7 +710,14 @@ static int __ocfs2_remove_xattr_range(struct inode *inode, goto out; } - ret = ocfs2_cache_cluster_dealloc(&ctxt->dealloc, phys_blkno, len); + if (ext_flags & OCFS2_EXT_REFCOUNTED) + ret = ocfs2_decrease_refcount(inode, handle, + ocfs2_blocks_to_clusters(inode->i_sb, + phys_blkno), + len, ctxt->meta_ac, &ctxt->dealloc, 1); + else + ret = ocfs2_cache_cluster_dealloc(&ctxt->dealloc, + phys_blkno, len); if (ret) mlog_errno(ret); @@ -695,6 +732,7 @@ static int ocfs2_xattr_shrink_size(struct inode *inode, struct ocfs2_xattr_set_ctxt *ctxt) { int ret = 0; + unsigned int ext_flags; u32 trunc_len, cpos, phys_cpos, alloc_size; u64 block; @@ -706,7 +744,7 @@ static int ocfs2_xattr_shrink_size(struct inode *inode, while (trunc_len) { ret = ocfs2_xattr_get_clusters(inode, cpos, &phys_cpos, &alloc_size, - &vb->vb_xv->xr_list); + &vb->vb_xv->xr_list, &ext_flags); if (ret) { mlog_errno(ret); goto out; @@ -717,15 +755,15 @@ static int ocfs2_xattr_shrink_size(struct inode *inode, ret = __ocfs2_remove_xattr_range(inode, vb, cpos, phys_cpos, alloc_size, - ctxt); + ext_flags, ctxt); if (ret) { mlog_errno(ret); goto out; } block = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos); - ocfs2_remove_xattr_clusters_from_cache(inode, block, - alloc_size); + ocfs2_remove_xattr_clusters_from_cache(INODE_CACHE(inode), + block, alloc_size); cpos += alloc_size; trunc_len -= alloc_size; } @@ -810,6 +848,23 @@ static int ocfs2_xattr_list_entries(struct inode *inode, return result; } +int ocfs2_has_inline_xattr_value_outside(struct inode *inode, + struct ocfs2_dinode *di) +{ + struct ocfs2_xattr_header *xh; + int i; + + xh = (struct ocfs2_xattr_header *) + ((void *)di + inode->i_sb->s_blocksize - + le16_to_cpu(di->i_xattr_inline_size)); + + for (i = 0; i < le16_to_cpu(xh->xh_count); i++) + if (!ocfs2_xattr_is_local(&xh->xh_entries[i])) + return 1; + + return 0; +} + static int ocfs2_xattr_ibody_list(struct inode *inode, struct ocfs2_dinode *di, char *buffer, @@ -855,11 +910,9 @@ static int ocfs2_xattr_block_list(struct inode *inode, struct ocfs2_xattr_header *header = &xb->xb_attrs.xb_header; ret = ocfs2_xattr_list_entries(inode, header, buffer, buffer_size); - } else { - struct ocfs2_xattr_tree_root *xt = &xb->xb_attrs.xb_root; - ret = ocfs2_xattr_tree_list_index_block(inode, xt, + } else + ret = ocfs2_xattr_tree_list_index_block(inode, blk_bh, buffer, buffer_size); - } brelse(blk_bh); @@ -961,7 +1014,7 @@ static int ocfs2_xattr_get_value_outside(struct inode *inode, cpos = 0; while (cpos < clusters) { ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster, - &num_clusters, el); + &num_clusters, el, NULL); if (ret) { mlog_errno(ret); goto out; @@ -970,7 +1023,8 @@ static int ocfs2_xattr_get_value_outside(struct inode *inode, blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster); /* Copy ocfs2_xattr_value */ for (i = 0; i < num_clusters * bpc; i++, blkno++) { - ret = ocfs2_read_block(inode, blkno, &bh, NULL); + ret = ocfs2_read_block(INODE_CACHE(inode), blkno, + &bh, NULL); if (ret) { mlog_errno(ret); goto out; @@ -1085,7 +1139,7 @@ static int ocfs2_xattr_block_get(struct inode *inode, i = xs->here - xs->header->xh_entries; if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) { - ret = ocfs2_xattr_bucket_get_name_value(inode, + ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb, bucket_xh(xs->bucket), i, &block_off, @@ -1183,7 +1237,7 @@ static int ocfs2_xattr_get(struct inode *inode, static int __ocfs2_xattr_set_value_outside(struct inode *inode, handle_t *handle, - struct ocfs2_xattr_value_root *xv, + struct ocfs2_xattr_value_buf *vb, const void *value, int value_len) { @@ -1194,28 +1248,34 @@ static int __ocfs2_xattr_set_value_outside(struct inode *inode, u32 clusters = ocfs2_clusters_for_bytes(inode->i_sb, value_len); u64 blkno; struct buffer_head *bh = NULL; + unsigned int ext_flags; + struct ocfs2_xattr_value_root *xv = vb->vb_xv; BUG_ON(clusters > le32_to_cpu(xv->xr_clusters)); while (cpos < clusters) { ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster, - &num_clusters, &xv->xr_list); + &num_clusters, &xv->xr_list, + &ext_flags); if (ret) { mlog_errno(ret); goto out; } + BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED); + blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster); for (i = 0; i < num_clusters * bpc; i++, blkno++) { - ret = ocfs2_read_block(inode, blkno, &bh, NULL); + ret = ocfs2_read_block(INODE_CACHE(inode), blkno, + &bh, NULL); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_journal_access(handle, - inode, + INODE_CACHE(inode), bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) { @@ -1266,7 +1326,7 @@ static int ocfs2_xattr_cleanup(struct inode *inode, void *val = xs->base + offs; size_t size = OCFS2_XATTR_SIZE(name_len) + OCFS2_XATTR_ROOT_SIZE; - ret = vb->vb_access(handle, inode, vb->vb_bh, + ret = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); @@ -1294,7 +1354,7 @@ static int ocfs2_xattr_update_entry(struct inode *inode, { int ret; - ret = vb->vb_access(handle, inode, vb->vb_bh, + ret = vb->vb_access(handle, INODE_CACHE(inode), vb->vb_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); @@ -1355,7 +1415,7 @@ static int ocfs2_xattr_set_value_outside(struct inode *inode, mlog_errno(ret); return ret; } - ret = __ocfs2_xattr_set_value_outside(inode, ctxt->handle, vb->vb_xv, + ret = __ocfs2_xattr_set_value_outside(inode, ctxt->handle, vb, xi->value, xi->value_len); if (ret < 0) mlog_errno(ret); @@ -1594,7 +1654,7 @@ static int ocfs2_xattr_set_entry(struct inode *inode, ret = __ocfs2_xattr_set_value_outside(inode, handle, - vb.vb_xv, + &vb, xi->value, xi->value_len); if (ret < 0) @@ -1615,7 +1675,7 @@ static int ocfs2_xattr_set_entry(struct inode *inode, } } - ret = ocfs2_journal_access_di(handle, inode, xs->inode_bh, + ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), xs->inode_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); @@ -1623,7 +1683,7 @@ static int ocfs2_xattr_set_entry(struct inode *inode, } if (!(flag & OCFS2_INLINE_XATTR_FL)) { - ret = vb.vb_access(handle, inode, vb.vb_bh, + ret = vb.vb_access(handle, INODE_CACHE(inode), vb.vb_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); @@ -1700,51 +1760,112 @@ out: return ret; } +/* + * In xattr remove, if it is stored outside and refcounted, we may have + * the chance to split the refcount tree. So need the allocators. + */ +static int ocfs2_lock_xattr_remove_allocators(struct inode *inode, + struct ocfs2_xattr_value_root *xv, + struct ocfs2_caching_info *ref_ci, + struct buffer_head *ref_root_bh, + struct ocfs2_alloc_context **meta_ac, + int *ref_credits) +{ + int ret, meta_add = 0; + u32 p_cluster, num_clusters; + unsigned int ext_flags; + + *ref_credits = 0; + ret = ocfs2_xattr_get_clusters(inode, 0, &p_cluster, + &num_clusters, + &xv->xr_list, + &ext_flags); + if (ret) { + mlog_errno(ret); + goto out; + } + + if (!(ext_flags & OCFS2_EXT_REFCOUNTED)) + goto out; + + ret = ocfs2_refcounted_xattr_delete_need(inode, ref_ci, + ref_root_bh, xv, + &meta_add, ref_credits); + if (ret) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb), + meta_add, meta_ac); + if (ret) + mlog_errno(ret); + +out: + return ret; +} + static int ocfs2_remove_value_outside(struct inode*inode, struct ocfs2_xattr_value_buf *vb, - struct ocfs2_xattr_header *header) + struct ocfs2_xattr_header *header, + struct ocfs2_caching_info *ref_ci, + struct buffer_head *ref_root_bh) { - int ret = 0, i; + int ret = 0, i, ref_credits; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, }; + void *val; ocfs2_init_dealloc_ctxt(&ctxt.dealloc); - ctxt.handle = ocfs2_start_trans(osb, - ocfs2_remove_extent_credits(osb->sb)); - if (IS_ERR(ctxt.handle)) { - ret = PTR_ERR(ctxt.handle); - mlog_errno(ret); - goto out; - } - for (i = 0; i < le16_to_cpu(header->xh_count); i++) { struct ocfs2_xattr_entry *entry = &header->xh_entries[i]; - if (!ocfs2_xattr_is_local(entry)) { - void *val; + if (ocfs2_xattr_is_local(entry)) + continue; - val = (void *)header + - le16_to_cpu(entry->xe_name_offset); - vb->vb_xv = (struct ocfs2_xattr_value_root *) - (val + OCFS2_XATTR_SIZE(entry->xe_name_len)); - ret = ocfs2_xattr_value_truncate(inode, vb, 0, &ctxt); - if (ret < 0) { - mlog_errno(ret); - break; - } + val = (void *)header + + le16_to_cpu(entry->xe_name_offset); + vb->vb_xv = (struct ocfs2_xattr_value_root *) + (val + OCFS2_XATTR_SIZE(entry->xe_name_len)); + + ret = ocfs2_lock_xattr_remove_allocators(inode, vb->vb_xv, + ref_ci, ref_root_bh, + &ctxt.meta_ac, + &ref_credits); + + ctxt.handle = ocfs2_start_trans(osb, ref_credits + + ocfs2_remove_extent_credits(osb->sb)); + if (IS_ERR(ctxt.handle)) { + ret = PTR_ERR(ctxt.handle); + mlog_errno(ret); + break; + } + + ret = ocfs2_xattr_value_truncate(inode, vb, 0, &ctxt); + if (ret < 0) { + mlog_errno(ret); + break; + } + + ocfs2_commit_trans(osb, ctxt.handle); + if (ctxt.meta_ac) { + ocfs2_free_alloc_context(ctxt.meta_ac); + ctxt.meta_ac = NULL; } } - ocfs2_commit_trans(osb, ctxt.handle); + if (ctxt.meta_ac) + ocfs2_free_alloc_context(ctxt.meta_ac); ocfs2_schedule_truncate_log_flush(osb, 1); ocfs2_run_deallocs(osb, &ctxt.dealloc); -out: return ret; } static int ocfs2_xattr_ibody_remove(struct inode *inode, - struct buffer_head *di_bh) + struct buffer_head *di_bh, + struct ocfs2_caching_info *ref_ci, + struct buffer_head *ref_root_bh) { struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; @@ -1759,13 +1880,21 @@ static int ocfs2_xattr_ibody_remove(struct inode *inode, ((void *)di + inode->i_sb->s_blocksize - le16_to_cpu(di->i_xattr_inline_size)); - ret = ocfs2_remove_value_outside(inode, &vb, header); + ret = ocfs2_remove_value_outside(inode, &vb, header, + ref_ci, ref_root_bh); return ret; } +struct ocfs2_rm_xattr_bucket_para { + struct ocfs2_caching_info *ref_ci; + struct buffer_head *ref_root_bh; +}; + static int ocfs2_xattr_block_remove(struct inode *inode, - struct buffer_head *blk_bh) + struct buffer_head *blk_bh, + struct ocfs2_caching_info *ref_ci, + struct buffer_head *ref_root_bh) { struct ocfs2_xattr_block *xb; int ret = 0; @@ -1773,19 +1902,29 @@ static int ocfs2_xattr_block_remove(struct inode *inode, .vb_bh = blk_bh, .vb_access = ocfs2_journal_access_xb, }; + struct ocfs2_rm_xattr_bucket_para args = { + .ref_ci = ref_ci, + .ref_root_bh = ref_root_bh, + }; xb = (struct ocfs2_xattr_block *)blk_bh->b_data; if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) { struct ocfs2_xattr_header *header = &(xb->xb_attrs.xb_header); - ret = ocfs2_remove_value_outside(inode, &vb, header); + ret = ocfs2_remove_value_outside(inode, &vb, header, + ref_ci, ref_root_bh); } else - ret = ocfs2_delete_xattr_index_block(inode, blk_bh); + ret = ocfs2_iterate_xattr_index_block(inode, + blk_bh, + ocfs2_rm_xattr_cluster, + &args); return ret; } static int ocfs2_xattr_free_block(struct inode *inode, - u64 block) + u64 block, + struct ocfs2_caching_info *ref_ci, + struct buffer_head *ref_root_bh) { struct inode *xb_alloc_inode; struct buffer_head *xb_alloc_bh = NULL; @@ -1803,7 +1942,7 @@ static int ocfs2_xattr_free_block(struct inode *inode, goto out; } - ret = ocfs2_xattr_block_remove(inode, blk_bh); + ret = ocfs2_xattr_block_remove(inode, blk_bh, ref_ci, ref_root_bh); if (ret < 0) { mlog_errno(ret); goto out; @@ -1863,6 +2002,9 @@ int ocfs2_xattr_remove(struct inode *inode, struct buffer_head *di_bh) { struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; + struct ocfs2_refcount_tree *ref_tree = NULL; + struct buffer_head *ref_root_bh = NULL; + struct ocfs2_caching_info *ref_ci = NULL; handle_t *handle; int ret; @@ -1872,8 +2014,21 @@ int ocfs2_xattr_remove(struct inode *inode, struct buffer_head *di_bh) if (!(oi->ip_dyn_features & OCFS2_HAS_XATTR_FL)) return 0; + if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL) { + ret = ocfs2_lock_refcount_tree(OCFS2_SB(inode->i_sb), + le64_to_cpu(di->i_refcount_loc), + 1, &ref_tree, &ref_root_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + ref_ci = &ref_tree->rf_ci; + + } + if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) { - ret = ocfs2_xattr_ibody_remove(inode, di_bh); + ret = ocfs2_xattr_ibody_remove(inode, di_bh, + ref_ci, ref_root_bh); if (ret < 0) { mlog_errno(ret); goto out; @@ -1882,7 +2037,8 @@ int ocfs2_xattr_remove(struct inode *inode, struct buffer_head *di_bh) if (di->i_xattr_loc) { ret = ocfs2_xattr_free_block(inode, - le64_to_cpu(di->i_xattr_loc)); + le64_to_cpu(di->i_xattr_loc), + ref_ci, ref_root_bh); if (ret < 0) { mlog_errno(ret); goto out; @@ -1896,7 +2052,7 @@ int ocfs2_xattr_remove(struct inode *inode, struct buffer_head *di_bh) mlog_errno(ret); goto out; } - ret = ocfs2_journal_access_di(handle, inode, di_bh, + ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); @@ -1916,6 +2072,9 @@ int ocfs2_xattr_remove(struct inode *inode, struct buffer_head *di_bh) out_commit: ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); out: + if (ref_tree) + ocfs2_unlock_refcount_tree(OCFS2_SB(inode->i_sb), ref_tree, 1); + brelse(ref_root_bh); return ret; } @@ -2083,6 +2242,84 @@ cleanup: return ret; } +static int ocfs2_create_xattr_block(handle_t *handle, + struct inode *inode, + struct buffer_head *inode_bh, + struct ocfs2_alloc_context *meta_ac, + struct buffer_head **ret_bh, + int indexed) +{ + int ret; + u16 suballoc_bit_start; + u32 num_got; + u64 first_blkno; + struct ocfs2_dinode *di = (struct ocfs2_dinode *)inode_bh->b_data; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct buffer_head *new_bh = NULL; + struct ocfs2_xattr_block *xblk; + + ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), inode_bh, + OCFS2_JOURNAL_ACCESS_CREATE); + if (ret < 0) { + mlog_errno(ret); + goto end; + } + + ret = ocfs2_claim_metadata(osb, handle, meta_ac, 1, + &suballoc_bit_start, &num_got, + &first_blkno); + if (ret < 0) { + mlog_errno(ret); + goto end; + } + + new_bh = sb_getblk(inode->i_sb, first_blkno); + ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), new_bh); + + ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode), + new_bh, + OCFS2_JOURNAL_ACCESS_CREATE); + if (ret < 0) { + mlog_errno(ret); + goto end; + } + + /* Initialize ocfs2_xattr_block */ + xblk = (struct ocfs2_xattr_block *)new_bh->b_data; + memset(xblk, 0, inode->i_sb->s_blocksize); + strcpy((void *)xblk, OCFS2_XATTR_BLOCK_SIGNATURE); + xblk->xb_suballoc_slot = cpu_to_le16(osb->slot_num); + xblk->xb_suballoc_bit = cpu_to_le16(suballoc_bit_start); + xblk->xb_fs_generation = cpu_to_le32(osb->fs_generation); + xblk->xb_blkno = cpu_to_le64(first_blkno); + + if (indexed) { + struct ocfs2_xattr_tree_root *xr = &xblk->xb_attrs.xb_root; + xr->xt_clusters = cpu_to_le32(1); + xr->xt_last_eb_blk = 0; + xr->xt_list.l_tree_depth = 0; + xr->xt_list.l_count = cpu_to_le16( + ocfs2_xattr_recs_per_xb(inode->i_sb)); + xr->xt_list.l_next_free_rec = cpu_to_le16(1); + xblk->xb_flags = cpu_to_le16(OCFS2_XATTR_INDEXED); + } + + ret = ocfs2_journal_dirty(handle, new_bh); + if (ret < 0) { + mlog_errno(ret); + goto end; + } + di->i_xattr_loc = cpu_to_le64(first_blkno); + ocfs2_journal_dirty(handle, inode_bh); + + *ret_bh = new_bh; + new_bh = NULL; + +end: + brelse(new_bh); + return ret; +} + /* * ocfs2_xattr_block_set() * @@ -2095,63 +2332,24 @@ static int ocfs2_xattr_block_set(struct inode *inode, struct ocfs2_xattr_set_ctxt *ctxt) { struct buffer_head *new_bh = NULL; - struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - struct ocfs2_dinode *di = (struct ocfs2_dinode *)xs->inode_bh->b_data; handle_t *handle = ctxt->handle; struct ocfs2_xattr_block *xblk = NULL; - u16 suballoc_bit_start; - u32 num_got; - u64 first_blkno; int ret; if (!xs->xattr_bh) { - ret = ocfs2_journal_access_di(handle, inode, xs->inode_bh, - OCFS2_JOURNAL_ACCESS_CREATE); - if (ret < 0) { - mlog_errno(ret); - goto end; - } - - ret = ocfs2_claim_metadata(osb, handle, ctxt->meta_ac, 1, - &suballoc_bit_start, &num_got, - &first_blkno); - if (ret < 0) { - mlog_errno(ret); - goto end; - } - - new_bh = sb_getblk(inode->i_sb, first_blkno); - ocfs2_set_new_buffer_uptodate(inode, new_bh); - - ret = ocfs2_journal_access_xb(handle, inode, new_bh, - OCFS2_JOURNAL_ACCESS_CREATE); - if (ret < 0) { + ret = ocfs2_create_xattr_block(handle, inode, xs->inode_bh, + ctxt->meta_ac, &new_bh, 0); + if (ret) { mlog_errno(ret); goto end; } - /* Initialize ocfs2_xattr_block */ xs->xattr_bh = new_bh; - xblk = (struct ocfs2_xattr_block *)new_bh->b_data; - memset(xblk, 0, inode->i_sb->s_blocksize); - strcpy((void *)xblk, OCFS2_XATTR_BLOCK_SIGNATURE); - xblk->xb_suballoc_slot = cpu_to_le16(osb->slot_num); - xblk->xb_suballoc_bit = cpu_to_le16(suballoc_bit_start); - xblk->xb_fs_generation = cpu_to_le32(osb->fs_generation); - xblk->xb_blkno = cpu_to_le64(first_blkno); - + xblk = (struct ocfs2_xattr_block *)xs->xattr_bh->b_data; xs->header = &xblk->xb_attrs.xb_header; xs->base = (void *)xs->header; xs->end = (void *)xblk + inode->i_sb->s_blocksize; xs->here = xs->header->xh_entries; - - ret = ocfs2_journal_dirty(handle, new_bh); - if (ret < 0) { - mlog_errno(ret); - goto end; - } - di->i_xattr_loc = cpu_to_le64(first_blkno); - ocfs2_journal_dirty(handle, xs->inode_bh); } else xblk = (struct ocfs2_xattr_block *)xs->xattr_bh->b_data; @@ -2273,7 +2471,7 @@ static int ocfs2_calc_xattr_set_need(struct inode *inode, old_in_xb = 1; if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) { - ret = ocfs2_xattr_bucket_get_name_value(inode, + ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb, bucket_xh(xbs->bucket), i, &block_off, &name_offset); @@ -2428,6 +2626,7 @@ static int ocfs2_init_xattr_set_ctxt(struct inode *inode, struct ocfs2_xattr_search *xis, struct ocfs2_xattr_search *xbs, struct ocfs2_xattr_set_ctxt *ctxt, + int extra_meta, int *credits) { int clusters_add, meta_add, ret; @@ -2444,6 +2643,7 @@ static int ocfs2_init_xattr_set_ctxt(struct inode *inode, return ret; } + meta_add += extra_meta; mlog(0, "Set xattr %s, reserve meta blocks = %d, clusters = %d, " "credits = %d\n", xi->name, meta_add, clusters_add, *credits); @@ -2598,7 +2798,7 @@ static int __ocfs2_xattr_set_handle(struct inode *inode, if (!ret) { /* Update inode ctime. */ - ret = ocfs2_journal_access_di(ctxt->handle, inode, + ret = ocfs2_journal_access_di(ctxt->handle, INODE_CACHE(inode), xis->inode_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { @@ -2711,10 +2911,11 @@ int ocfs2_xattr_set(struct inode *inode, { struct buffer_head *di_bh = NULL; struct ocfs2_dinode *di; - int ret, credits; + int ret, credits, ref_meta = 0, ref_credits = 0; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct inode *tl_inode = osb->osb_tl_inode; struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, }; + struct ocfs2_refcount_tree *ref_tree = NULL; struct ocfs2_xattr_info xi = { .name_index = name_index, @@ -2779,6 +2980,17 @@ int ocfs2_xattr_set(struct inode *inode, goto cleanup; } + /* Check whether the value is refcounted and do some prepartion. */ + if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL && + (!xis.not_found || !xbs.not_found)) { + ret = ocfs2_prepare_refcount_xattr(inode, di, &xi, + &xis, &xbs, &ref_tree, + &ref_meta, &ref_credits); + if (ret) { + mlog_errno(ret); + goto cleanup; + } + } mutex_lock(&tl_inode->i_mutex); @@ -2793,7 +3005,7 @@ int ocfs2_xattr_set(struct inode *inode, mutex_unlock(&tl_inode->i_mutex); ret = ocfs2_init_xattr_set_ctxt(inode, di, &xi, &xis, - &xbs, &ctxt, &credits); + &xbs, &ctxt, ref_meta, &credits); if (ret) { mlog_errno(ret); goto cleanup; @@ -2801,7 +3013,7 @@ int ocfs2_xattr_set(struct inode *inode, /* we need to update inode's ctime field, so add credit for it. */ credits += OCFS2_INODE_UPDATE_CREDITS; - ctxt.handle = ocfs2_start_trans(osb, credits); + ctxt.handle = ocfs2_start_trans(osb, credits + ref_credits); if (IS_ERR(ctxt.handle)) { ret = PTR_ERR(ctxt.handle); mlog_errno(ret); @@ -2819,8 +3031,16 @@ int ocfs2_xattr_set(struct inode *inode, if (ocfs2_dealloc_has_cluster(&ctxt.dealloc)) ocfs2_schedule_truncate_log_flush(osb, 1); ocfs2_run_deallocs(osb, &ctxt.dealloc); + cleanup: + if (ref_tree) + ocfs2_unlock_refcount_tree(osb, ref_tree, 1); up_write(&OCFS2_I(inode)->ip_xattr_sem); + if (!value && !ret) { + ret = ocfs2_try_remove_refcount_tree(inode, di_bh); + if (ret) + mlog_errno(ret); + } ocfs2_inode_unlock(inode, 1); cleanup_nolock: brelse(di_bh); @@ -2849,7 +3069,8 @@ static int ocfs2_xattr_get_rec(struct inode *inode, u64 e_blkno = 0; if (el->l_tree_depth) { - ret = ocfs2_find_leaf(inode, el, name_hash, &eb_bh); + ret = ocfs2_find_leaf(INODE_CACHE(inode), el, name_hash, + &eb_bh); if (ret) { mlog_errno(ret); goto out; @@ -2931,7 +3152,7 @@ static int ocfs2_find_xe_in_bucket(struct inode *inode, if (cmp) continue; - ret = ocfs2_xattr_bucket_get_name_value(inode, + ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb, xh, i, &block_off, @@ -3175,7 +3396,7 @@ struct ocfs2_xattr_tree_list { size_t result; }; -static int ocfs2_xattr_bucket_get_name_value(struct inode *inode, +static int ocfs2_xattr_bucket_get_name_value(struct super_block *sb, struct ocfs2_xattr_header *xh, int index, int *block_off, @@ -3188,8 +3409,8 @@ static int ocfs2_xattr_bucket_get_name_value(struct inode *inode, name_offset = le16_to_cpu(xh->xh_entries[index].xe_name_offset); - *block_off = name_offset >> inode->i_sb->s_blocksize_bits; - *new_offset = name_offset % inode->i_sb->s_blocksize; + *block_off = name_offset >> sb->s_blocksize_bits; + *new_offset = name_offset % sb->s_blocksize; return 0; } @@ -3209,7 +3430,7 @@ static int ocfs2_list_xattr_bucket(struct inode *inode, prefix = ocfs2_xattr_prefix(type); if (prefix) { - ret = ocfs2_xattr_bucket_get_name_value(inode, + ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb, bucket_xh(bucket), i, &block_off, @@ -3232,22 +3453,19 @@ static int ocfs2_list_xattr_bucket(struct inode *inode, return ret; } -static int ocfs2_xattr_tree_list_index_block(struct inode *inode, - struct ocfs2_xattr_tree_root *xt, - char *buffer, - size_t buffer_size) +static int ocfs2_iterate_xattr_index_block(struct inode *inode, + struct buffer_head *blk_bh, + xattr_tree_rec_func *rec_func, + void *para) { - struct ocfs2_extent_list *el = &xt->xt_list; + struct ocfs2_xattr_block *xb = + (struct ocfs2_xattr_block *)blk_bh->b_data; + struct ocfs2_extent_list *el = &xb->xb_attrs.xb_root.xt_list; int ret = 0; u32 name_hash = UINT_MAX, e_cpos = 0, num_clusters = 0; u64 p_blkno = 0; - struct ocfs2_xattr_tree_list xl = { - .buffer = buffer, - .buffer_size = buffer_size, - .result = 0, - }; - if (le16_to_cpu(el->l_next_free_rec) == 0) + if (!el->l_next_free_rec || !rec_func) return 0; while (name_hash > 0) { @@ -3255,16 +3473,15 @@ static int ocfs2_xattr_tree_list_index_block(struct inode *inode, &e_cpos, &num_clusters, el); if (ret) { mlog_errno(ret); - goto out; + break; } - ret = ocfs2_iterate_xattr_buckets(inode, p_blkno, num_clusters, - ocfs2_list_xattr_bucket, - &xl); + ret = rec_func(inode, blk_bh, p_blkno, e_cpos, + num_clusters, para); if (ret) { if (ret != -ERANGE) mlog_errno(ret); - goto out; + break; } if (e_cpos == 0) @@ -3273,6 +3490,37 @@ static int ocfs2_xattr_tree_list_index_block(struct inode *inode, name_hash = e_cpos - 1; } + return ret; + +} + +static int ocfs2_list_xattr_tree_rec(struct inode *inode, + struct buffer_head *root_bh, + u64 blkno, u32 cpos, u32 len, void *para) +{ + return ocfs2_iterate_xattr_buckets(inode, blkno, len, + ocfs2_list_xattr_bucket, para); +} + +static int ocfs2_xattr_tree_list_index_block(struct inode *inode, + struct buffer_head *blk_bh, + char *buffer, + size_t buffer_size) +{ + int ret; + struct ocfs2_xattr_tree_list xl = { + .buffer = buffer, + .buffer_size = buffer_size, + .result = 0, + }; + + ret = ocfs2_iterate_xattr_index_block(inode, blk_bh, + ocfs2_list_xattr_tree_rec, &xl); + if (ret) { + mlog_errno(ret); + goto out; + } + ret = xl.result; out: return ret; @@ -3426,7 +3674,7 @@ static int ocfs2_xattr_create_index_block(struct inode *inode, */ down_write(&oi->ip_alloc_sem); - ret = ocfs2_journal_access_xb(handle, inode, xb_bh, + ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode), xb_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); @@ -4263,9 +4511,9 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode, (unsigned long long)OCFS2_I(inode)->ip_blkno, prev_cpos, (unsigned long long)bucket_blkno(first)); - ocfs2_init_xattr_tree_extent_tree(&et, inode, root_bh); + ocfs2_init_xattr_tree_extent_tree(&et, INODE_CACHE(inode), root_bh); - ret = ocfs2_journal_access_xb(handle, inode, root_bh, + ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode), root_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) { mlog_errno(ret); @@ -4319,7 +4567,7 @@ static int ocfs2_add_new_xattr_cluster(struct inode *inode, mlog(0, "Insert %u clusters at block %llu for xattr at %u\n", num_bits, (unsigned long long)block, v_start); - ret = ocfs2_insert_extent(osb, handle, inode, &et, v_start, block, + ret = ocfs2_insert_extent(handle, &et, v_start, block, num_bits, 0, ctxt->meta_ac); if (ret < 0) { mlog_errno(ret); @@ -4798,10 +5046,13 @@ static int ocfs2_xattr_bucket_set_value_outside(struct inode *inode, struct ocfs2_xattr_entry *xe = xs->here; struct ocfs2_xattr_header *xh = bucket_xh(xs->bucket); void *base; + struct ocfs2_xattr_value_buf vb = { + .vb_access = ocfs2_journal_access, + }; BUG_ON(!xs->base || !xe || ocfs2_xattr_is_local(xe)); - ret = ocfs2_xattr_bucket_get_name_value(inode, xh, + ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb, xh, xe - xh->xh_entries, &block_off, &offset); @@ -4814,8 +5065,10 @@ static int ocfs2_xattr_bucket_set_value_outside(struct inode *inode, xv = (struct ocfs2_xattr_value_root *)(base + offset + OCFS2_XATTR_SIZE(xe->xe_name_len)); + vb.vb_xv = xv; + vb.vb_bh = xs->bucket->bu_bhs[block_off]; ret = __ocfs2_xattr_set_value_outside(inode, handle, - xv, val, value_len); + &vb, val, value_len); if (ret) mlog_errno(ret); out: @@ -4826,7 +5079,8 @@ static int ocfs2_rm_xattr_cluster(struct inode *inode, struct buffer_head *root_bh, u64 blkno, u32 cpos, - u32 len) + u32 len, + void *para) { int ret; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); @@ -4838,14 +5092,22 @@ static int ocfs2_rm_xattr_cluster(struct inode *inode, struct ocfs2_cached_dealloc_ctxt dealloc; struct ocfs2_extent_tree et; - ocfs2_init_xattr_tree_extent_tree(&et, inode, root_bh); + ret = ocfs2_iterate_xattr_buckets(inode, blkno, len, + ocfs2_delete_xattr_in_bucket, para); + if (ret) { + mlog_errno(ret); + return ret; + } + + ocfs2_init_xattr_tree_extent_tree(&et, INODE_CACHE(inode), root_bh); ocfs2_init_dealloc_ctxt(&dealloc); mlog(0, "rm xattr extent rec at %u len = %u, start from %llu\n", cpos, len, (unsigned long long)blkno); - ocfs2_remove_xattr_clusters_from_cache(inode, blkno, len); + ocfs2_remove_xattr_clusters_from_cache(INODE_CACHE(inode), blkno, + len); ret = ocfs2_lock_allocators(inode, &et, 0, 1, NULL, &meta_ac); if (ret) { @@ -4870,14 +5132,14 @@ static int ocfs2_rm_xattr_cluster(struct inode *inode, goto out; } - ret = ocfs2_journal_access_xb(handle, inode, root_bh, + ret = ocfs2_journal_access_xb(handle, INODE_CACHE(inode), root_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out_commit; } - ret = ocfs2_remove_extent(inode, &et, cpos, len, handle, meta_ac, + ret = ocfs2_remove_extent(handle, &et, cpos, len, meta_ac, &dealloc); if (ret) { mlog_errno(ret); @@ -5220,7 +5482,7 @@ static int ocfs2_delete_xattr_in_bucket(struct inode *inode, struct ocfs2_xattr_bucket *bucket, void *para) { - int ret = 0; + int ret = 0, ref_credits; struct ocfs2_xattr_header *xh = bucket_xh(bucket); u16 i; struct ocfs2_xattr_entry *xe; @@ -5228,7 +5490,9 @@ static int ocfs2_delete_xattr_in_bucket(struct inode *inode, struct ocfs2_xattr_set_ctxt ctxt = {NULL, NULL,}; int credits = ocfs2_remove_extent_credits(osb->sb) + ocfs2_blocks_per_xattr_bucket(inode->i_sb); - + struct ocfs2_xattr_value_root *xv; + struct ocfs2_rm_xattr_bucket_para *args = + (struct ocfs2_rm_xattr_bucket_para *)para; ocfs2_init_dealloc_ctxt(&ctxt.dealloc); @@ -5237,7 +5501,16 @@ static int ocfs2_delete_xattr_in_bucket(struct inode *inode, if (ocfs2_xattr_is_local(xe)) continue; - ctxt.handle = ocfs2_start_trans(osb, credits); + ret = ocfs2_get_xattr_tree_value_root(inode->i_sb, bucket, + i, &xv, NULL); + + ret = ocfs2_lock_xattr_remove_allocators(inode, xv, + args->ref_ci, + args->ref_root_bh, + &ctxt.meta_ac, + &ref_credits); + + ctxt.handle = ocfs2_start_trans(osb, credits + ref_credits); if (IS_ERR(ctxt.handle)) { ret = PTR_ERR(ctxt.handle); mlog_errno(ret); @@ -5248,57 +5521,1439 @@ static int ocfs2_delete_xattr_in_bucket(struct inode *inode, i, 0, &ctxt); ocfs2_commit_trans(osb, ctxt.handle); + if (ctxt.meta_ac) { + ocfs2_free_alloc_context(ctxt.meta_ac); + ctxt.meta_ac = NULL; + } if (ret) { mlog_errno(ret); break; } } + if (ctxt.meta_ac) + ocfs2_free_alloc_context(ctxt.meta_ac); ocfs2_schedule_truncate_log_flush(osb, 1); ocfs2_run_deallocs(osb, &ctxt.dealloc); return ret; } -static int ocfs2_delete_xattr_index_block(struct inode *inode, - struct buffer_head *xb_bh) +/* + * Whenever we modify a xattr value root in the bucket(e.g, CoW + * or change the extent record flag), we need to recalculate + * the metaecc for the whole bucket. So it is done here. + * + * Note: + * We have to give the extra credits for the caller. + */ +static int ocfs2_xattr_bucket_post_refcount(struct inode *inode, + handle_t *handle, + void *para) +{ + int ret; + struct ocfs2_xattr_bucket *bucket = + (struct ocfs2_xattr_bucket *)para; + + ret = ocfs2_xattr_bucket_journal_access(handle, bucket, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + return ret; + } + + ocfs2_xattr_bucket_journal_dirty(handle, bucket); + + return 0; +} + +/* + * Special action we need if the xattr value is refcounted. + * + * 1. If the xattr is refcounted, lock the tree. + * 2. CoW the xattr if we are setting the new value and the value + * will be stored outside. + * 3. In other case, decrease_refcount will work for us, so just + * lock the refcount tree, calculate the meta and credits is OK. + * + * We have to do CoW before ocfs2_init_xattr_set_ctxt since + * currently CoW is a completed transaction, while this function + * will also lock the allocators and let us deadlock. So we will + * CoW the whole xattr value. + */ +static int ocfs2_prepare_refcount_xattr(struct inode *inode, + struct ocfs2_dinode *di, + struct ocfs2_xattr_info *xi, + struct ocfs2_xattr_search *xis, + struct ocfs2_xattr_search *xbs, + struct ocfs2_refcount_tree **ref_tree, + int *meta_add, + int *credits) { - struct ocfs2_xattr_block *xb = - (struct ocfs2_xattr_block *)xb_bh->b_data; - struct ocfs2_extent_list *el = &xb->xb_attrs.xb_root.xt_list; int ret = 0; - u32 name_hash = UINT_MAX, e_cpos, num_clusters; - u64 p_blkno; + struct ocfs2_xattr_block *xb; + struct ocfs2_xattr_entry *xe; + char *base; + u32 p_cluster, num_clusters; + unsigned int ext_flags; + int name_offset, name_len; + struct ocfs2_xattr_value_buf vb; + struct ocfs2_xattr_bucket *bucket = NULL; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct ocfs2_post_refcount refcount; + struct ocfs2_post_refcount *p = NULL; + struct buffer_head *ref_root_bh = NULL; - if (le16_to_cpu(el->l_next_free_rec) == 0) - return 0; + if (!xis->not_found) { + xe = xis->here; + name_offset = le16_to_cpu(xe->xe_name_offset); + name_len = OCFS2_XATTR_SIZE(xe->xe_name_len); + base = xis->base; + vb.vb_bh = xis->inode_bh; + vb.vb_access = ocfs2_journal_access_di; + } else { + int i, block_off = 0; + xb = (struct ocfs2_xattr_block *)xbs->xattr_bh->b_data; + xe = xbs->here; + name_offset = le16_to_cpu(xe->xe_name_offset); + name_len = OCFS2_XATTR_SIZE(xe->xe_name_len); + i = xbs->here - xbs->header->xh_entries; - while (name_hash > 0) { - ret = ocfs2_xattr_get_rec(inode, name_hash, &p_blkno, - &e_cpos, &num_clusters, el); + if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) { + ret = ocfs2_xattr_bucket_get_name_value(inode->i_sb, + bucket_xh(xbs->bucket), + i, &block_off, + &name_offset); + if (ret) { + mlog_errno(ret); + goto out; + } + base = bucket_block(xbs->bucket, block_off); + vb.vb_bh = xbs->bucket->bu_bhs[block_off]; + vb.vb_access = ocfs2_journal_access; + + if (ocfs2_meta_ecc(osb)) { + /*create parameters for ocfs2_post_refcount. */ + bucket = xbs->bucket; + refcount.credits = bucket->bu_blocks; + refcount.para = bucket; + refcount.func = + ocfs2_xattr_bucket_post_refcount; + p = &refcount; + } + } else { + base = xbs->base; + vb.vb_bh = xbs->xattr_bh; + vb.vb_access = ocfs2_journal_access_xb; + } + } + + if (ocfs2_xattr_is_local(xe)) + goto out; + + vb.vb_xv = (struct ocfs2_xattr_value_root *) + (base + name_offset + name_len); + + ret = ocfs2_xattr_get_clusters(inode, 0, &p_cluster, + &num_clusters, &vb.vb_xv->xr_list, + &ext_flags); + if (ret) { + mlog_errno(ret); + goto out; + } + + /* + * We just need to check the 1st extent record, since we always + * CoW the whole xattr. So there shouldn't be a xattr with + * some REFCOUNT extent recs after the 1st one. + */ + if (!(ext_flags & OCFS2_EXT_REFCOUNTED)) + goto out; + + ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc), + 1, ref_tree, &ref_root_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + /* + * If we are deleting the xattr or the new size will be stored inside, + * cool, leave it there, the xattr truncate process will remove them + * for us(it still needs the refcount tree lock and the meta, credits). + * And the worse case is that every cluster truncate will split the + * refcount tree, and make the original extent become 3. So we will need + * 2 * cluster more extent recs at most. + */ + if (!xi->value || xi->value_len <= OCFS2_XATTR_INLINE_SIZE) { + + ret = ocfs2_refcounted_xattr_delete_need(inode, + &(*ref_tree)->rf_ci, + ref_root_bh, vb.vb_xv, + meta_add, credits); + if (ret) + mlog_errno(ret); + goto out; + } + + ret = ocfs2_refcount_cow_xattr(inode, di, &vb, + *ref_tree, ref_root_bh, 0, + le32_to_cpu(vb.vb_xv->xr_clusters), p); + if (ret) + mlog_errno(ret); + +out: + brelse(ref_root_bh); + return ret; +} + +/* + * Add the REFCOUNTED flags for all the extent rec in ocfs2_xattr_value_root. + * The physical clusters will be added to refcount tree. + */ +static int ocfs2_xattr_value_attach_refcount(struct inode *inode, + struct ocfs2_xattr_value_root *xv, + struct ocfs2_extent_tree *value_et, + struct ocfs2_caching_info *ref_ci, + struct buffer_head *ref_root_bh, + struct ocfs2_cached_dealloc_ctxt *dealloc, + struct ocfs2_post_refcount *refcount) +{ + int ret = 0; + u32 clusters = le32_to_cpu(xv->xr_clusters); + u32 cpos, p_cluster, num_clusters; + struct ocfs2_extent_list *el = &xv->xr_list; + unsigned int ext_flags; + + cpos = 0; + while (cpos < clusters) { + ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster, + &num_clusters, el, &ext_flags); + + cpos += num_clusters; + if ((ext_flags & OCFS2_EXT_REFCOUNTED)) + continue; + + BUG_ON(!p_cluster); + + ret = ocfs2_add_refcount_flag(inode, value_et, + ref_ci, ref_root_bh, + cpos - num_clusters, + p_cluster, num_clusters, + dealloc, refcount); + if (ret) { + mlog_errno(ret); + break; + } + } + + return ret; +} + +/* + * Given a normal ocfs2_xattr_header, refcount all the entries which + * have value stored outside. + * Used for xattrs stored in inode and ocfs2_xattr_block. + */ +static int ocfs2_xattr_attach_refcount_normal(struct inode *inode, + struct ocfs2_xattr_value_buf *vb, + struct ocfs2_xattr_header *header, + struct ocfs2_caching_info *ref_ci, + struct buffer_head *ref_root_bh, + struct ocfs2_cached_dealloc_ctxt *dealloc) +{ + + struct ocfs2_xattr_entry *xe; + struct ocfs2_xattr_value_root *xv; + struct ocfs2_extent_tree et; + int i, ret = 0; + + for (i = 0; i < le16_to_cpu(header->xh_count); i++) { + xe = &header->xh_entries[i]; + + if (ocfs2_xattr_is_local(xe)) + continue; + + xv = (struct ocfs2_xattr_value_root *)((void *)header + + le16_to_cpu(xe->xe_name_offset) + + OCFS2_XATTR_SIZE(xe->xe_name_len)); + + vb->vb_xv = xv; + ocfs2_init_xattr_value_extent_tree(&et, INODE_CACHE(inode), vb); + + ret = ocfs2_xattr_value_attach_refcount(inode, xv, &et, + ref_ci, ref_root_bh, + dealloc, NULL); + if (ret) { + mlog_errno(ret); + break; + } + } + + return ret; +} + +static int ocfs2_xattr_inline_attach_refcount(struct inode *inode, + struct buffer_head *fe_bh, + struct ocfs2_caching_info *ref_ci, + struct buffer_head *ref_root_bh, + struct ocfs2_cached_dealloc_ctxt *dealloc) +{ + struct ocfs2_dinode *di = (struct ocfs2_dinode *)fe_bh->b_data; + struct ocfs2_xattr_header *header = (struct ocfs2_xattr_header *) + (fe_bh->b_data + inode->i_sb->s_blocksize - + le16_to_cpu(di->i_xattr_inline_size)); + struct ocfs2_xattr_value_buf vb = { + .vb_bh = fe_bh, + .vb_access = ocfs2_journal_access_di, + }; + + return ocfs2_xattr_attach_refcount_normal(inode, &vb, header, + ref_ci, ref_root_bh, dealloc); +} + +struct ocfs2_xattr_tree_value_refcount_para { + struct ocfs2_caching_info *ref_ci; + struct buffer_head *ref_root_bh; + struct ocfs2_cached_dealloc_ctxt *dealloc; +}; + +static int ocfs2_get_xattr_tree_value_root(struct super_block *sb, + struct ocfs2_xattr_bucket *bucket, + int offset, + struct ocfs2_xattr_value_root **xv, + struct buffer_head **bh) +{ + int ret, block_off, name_offset; + struct ocfs2_xattr_header *xh = bucket_xh(bucket); + struct ocfs2_xattr_entry *xe = &xh->xh_entries[offset]; + void *base; + + ret = ocfs2_xattr_bucket_get_name_value(sb, + bucket_xh(bucket), + offset, + &block_off, + &name_offset); + if (ret) { + mlog_errno(ret); + goto out; + } + + base = bucket_block(bucket, block_off); + + *xv = (struct ocfs2_xattr_value_root *)(base + name_offset + + OCFS2_XATTR_SIZE(xe->xe_name_len)); + + if (bh) + *bh = bucket->bu_bhs[block_off]; +out: + return ret; +} + +/* + * For a given xattr bucket, refcount all the entries which + * have value stored outside. + */ +static int ocfs2_xattr_bucket_value_refcount(struct inode *inode, + struct ocfs2_xattr_bucket *bucket, + void *para) +{ + int i, ret = 0; + struct ocfs2_extent_tree et; + struct ocfs2_xattr_tree_value_refcount_para *ref = + (struct ocfs2_xattr_tree_value_refcount_para *)para; + struct ocfs2_xattr_header *xh = + (struct ocfs2_xattr_header *)bucket->bu_bhs[0]->b_data; + struct ocfs2_xattr_entry *xe; + struct ocfs2_xattr_value_buf vb = { + .vb_access = ocfs2_journal_access, + }; + struct ocfs2_post_refcount refcount = { + .credits = bucket->bu_blocks, + .para = bucket, + .func = ocfs2_xattr_bucket_post_refcount, + }; + struct ocfs2_post_refcount *p = NULL; + + /* We only need post_refcount if we support metaecc. */ + if (ocfs2_meta_ecc(OCFS2_SB(inode->i_sb))) + p = &refcount; + + mlog(0, "refcount bucket %llu, count = %u\n", + (unsigned long long)bucket_blkno(bucket), + le16_to_cpu(xh->xh_count)); + for (i = 0; i < le16_to_cpu(xh->xh_count); i++) { + xe = &xh->xh_entries[i]; + + if (ocfs2_xattr_is_local(xe)) + continue; + + ret = ocfs2_get_xattr_tree_value_root(inode->i_sb, bucket, i, + &vb.vb_xv, &vb.vb_bh); + if (ret) { + mlog_errno(ret); + break; + } + + ocfs2_init_xattr_value_extent_tree(&et, + INODE_CACHE(inode), &vb); + + ret = ocfs2_xattr_value_attach_refcount(inode, vb.vb_xv, + &et, ref->ref_ci, + ref->ref_root_bh, + ref->dealloc, p); + if (ret) { + mlog_errno(ret); + break; + } + } + + return ret; + +} + +static int ocfs2_refcount_xattr_tree_rec(struct inode *inode, + struct buffer_head *root_bh, + u64 blkno, u32 cpos, u32 len, void *para) +{ + return ocfs2_iterate_xattr_buckets(inode, blkno, len, + ocfs2_xattr_bucket_value_refcount, + para); +} + +static int ocfs2_xattr_block_attach_refcount(struct inode *inode, + struct buffer_head *blk_bh, + struct ocfs2_caching_info *ref_ci, + struct buffer_head *ref_root_bh, + struct ocfs2_cached_dealloc_ctxt *dealloc) +{ + int ret = 0; + struct ocfs2_xattr_block *xb = + (struct ocfs2_xattr_block *)blk_bh->b_data; + + if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) { + struct ocfs2_xattr_header *header = &xb->xb_attrs.xb_header; + struct ocfs2_xattr_value_buf vb = { + .vb_bh = blk_bh, + .vb_access = ocfs2_journal_access_xb, + }; + + ret = ocfs2_xattr_attach_refcount_normal(inode, &vb, header, + ref_ci, ref_root_bh, + dealloc); + } else { + struct ocfs2_xattr_tree_value_refcount_para para = { + .ref_ci = ref_ci, + .ref_root_bh = ref_root_bh, + .dealloc = dealloc, + }; + + ret = ocfs2_iterate_xattr_index_block(inode, blk_bh, + ocfs2_refcount_xattr_tree_rec, + ¶); + } + + return ret; +} + +int ocfs2_xattr_attach_refcount_tree(struct inode *inode, + struct buffer_head *fe_bh, + struct ocfs2_caching_info *ref_ci, + struct buffer_head *ref_root_bh, + struct ocfs2_cached_dealloc_ctxt *dealloc) +{ + int ret = 0; + struct ocfs2_inode_info *oi = OCFS2_I(inode); + struct ocfs2_dinode *di = (struct ocfs2_dinode *)fe_bh->b_data; + struct buffer_head *blk_bh = NULL; + + if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) { + ret = ocfs2_xattr_inline_attach_refcount(inode, fe_bh, + ref_ci, ref_root_bh, + dealloc); if (ret) { mlog_errno(ret); goto out; } + } + + if (!di->i_xattr_loc) + goto out; + + ret = ocfs2_read_xattr_block(inode, le64_to_cpu(di->i_xattr_loc), + &blk_bh); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + + ret = ocfs2_xattr_block_attach_refcount(inode, blk_bh, ref_ci, + ref_root_bh, dealloc); + if (ret) + mlog_errno(ret); + + brelse(blk_bh); +out: + + return ret; +} + +typedef int (should_xattr_reflinked)(struct ocfs2_xattr_entry *xe); +/* + * Store the information we need in xattr reflink. + * old_bh and new_bh are inode bh for the old and new inode. + */ +struct ocfs2_xattr_reflink { + struct inode *old_inode; + struct inode *new_inode; + struct buffer_head *old_bh; + struct buffer_head *new_bh; + struct ocfs2_caching_info *ref_ci; + struct buffer_head *ref_root_bh; + struct ocfs2_cached_dealloc_ctxt *dealloc; + should_xattr_reflinked *xattr_reflinked; +}; + +/* + * Given a xattr header and xe offset, + * return the proper xv and the corresponding bh. + * xattr in inode, block and xattr tree have different implementaions. + */ +typedef int (get_xattr_value_root)(struct super_block *sb, + struct buffer_head *bh, + struct ocfs2_xattr_header *xh, + int offset, + struct ocfs2_xattr_value_root **xv, + struct buffer_head **ret_bh, + void *para); + +/* + * Calculate all the xattr value root metadata stored in this xattr header and + * credits we need if we create them from the scratch. + * We use get_xattr_value_root so that all types of xattr container can use it. + */ +static int ocfs2_value_metas_in_xattr_header(struct super_block *sb, + struct buffer_head *bh, + struct ocfs2_xattr_header *xh, + int *metas, int *credits, + int *num_recs, + get_xattr_value_root *func, + void *para) +{ + int i, ret = 0; + struct ocfs2_xattr_value_root *xv; + struct ocfs2_xattr_entry *xe; + + for (i = 0; i < le16_to_cpu(xh->xh_count); i++) { + xe = &xh->xh_entries[i]; + if (ocfs2_xattr_is_local(xe)) + continue; + + ret = func(sb, bh, xh, i, &xv, NULL, para); + if (ret) { + mlog_errno(ret); + break; + } + + *metas += le16_to_cpu(xv->xr_list.l_tree_depth) * + le16_to_cpu(xv->xr_list.l_next_free_rec); + + *credits += ocfs2_calc_extend_credits(sb, + &def_xv.xv.xr_list, + le32_to_cpu(xv->xr_clusters)); + + /* + * If the value is a tree with depth > 1, We don't go deep + * to the extent block, so just calculate a maximum record num. + */ + if (!xv->xr_list.l_tree_depth) + *num_recs += xv->xr_list.l_next_free_rec; + else + *num_recs += ocfs2_clusters_for_bytes(sb, + XATTR_SIZE_MAX); + } + + return ret; +} + +/* Used by xattr inode and block to return the right xv and buffer_head. */ +static int ocfs2_get_xattr_value_root(struct super_block *sb, + struct buffer_head *bh, + struct ocfs2_xattr_header *xh, + int offset, + struct ocfs2_xattr_value_root **xv, + struct buffer_head **ret_bh, + void *para) +{ + struct ocfs2_xattr_entry *xe = &xh->xh_entries[offset]; + + *xv = (struct ocfs2_xattr_value_root *)((void *)xh + + le16_to_cpu(xe->xe_name_offset) + + OCFS2_XATTR_SIZE(xe->xe_name_len)); + + if (ret_bh) + *ret_bh = bh; + + return 0; +} + +/* + * Lock the meta_ac and caculate how much credits we need for reflink xattrs. + * It is only used for inline xattr and xattr block. + */ +static int ocfs2_reflink_lock_xattr_allocators(struct ocfs2_super *osb, + struct ocfs2_xattr_header *xh, + struct buffer_head *ref_root_bh, + int *credits, + struct ocfs2_alloc_context **meta_ac) +{ + int ret, meta_add = 0, num_recs = 0; + struct ocfs2_refcount_block *rb = + (struct ocfs2_refcount_block *)ref_root_bh->b_data; + + *credits = 0; + + ret = ocfs2_value_metas_in_xattr_header(osb->sb, NULL, xh, + &meta_add, credits, &num_recs, + ocfs2_get_xattr_value_root, + NULL); + if (ret) { + mlog_errno(ret); + goto out; + } + + /* + * We need to add/modify num_recs in refcount tree, so just calculate + * an approximate number we need for refcount tree change. + * Sometimes we need to split the tree, and after split, half recs + * will be moved to the new block, and a new block can only provide + * half number of recs. So we multiple new blocks by 2. + */ + num_recs = num_recs / ocfs2_refcount_recs_per_rb(osb->sb) * 2; + meta_add += num_recs; + *credits += num_recs + num_recs * OCFS2_EXPAND_REFCOUNT_TREE_CREDITS; + if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) + *credits += le16_to_cpu(rb->rf_list.l_tree_depth) * + le16_to_cpu(rb->rf_list.l_next_free_rec) + 1; + else + *credits += 1; + + ret = ocfs2_reserve_new_metadata_blocks(osb, meta_add, meta_ac); + if (ret) + mlog_errno(ret); + +out: + return ret; +} - ret = ocfs2_iterate_xattr_buckets(inode, p_blkno, num_clusters, - ocfs2_delete_xattr_in_bucket, - NULL); +/* + * Given a xattr header, reflink all the xattrs in this container. + * It can be used for inode, block and bucket. + * + * NOTE: + * Before we call this function, the caller has memcpy the xattr in + * old_xh to the new_xh. + * + * If args.xattr_reflinked is set, call it to decide whether the xe should + * be reflinked or not. If not, remove it from the new xattr header. + */ +static int ocfs2_reflink_xattr_header(handle_t *handle, + struct ocfs2_xattr_reflink *args, + struct buffer_head *old_bh, + struct ocfs2_xattr_header *xh, + struct buffer_head *new_bh, + struct ocfs2_xattr_header *new_xh, + struct ocfs2_xattr_value_buf *vb, + struct ocfs2_alloc_context *meta_ac, + get_xattr_value_root *func, + void *para) +{ + int ret = 0, i, j; + struct super_block *sb = args->old_inode->i_sb; + struct buffer_head *value_bh; + struct ocfs2_xattr_entry *xe, *last; + struct ocfs2_xattr_value_root *xv, *new_xv; + struct ocfs2_extent_tree data_et; + u32 clusters, cpos, p_cluster, num_clusters; + unsigned int ext_flags = 0; + + mlog(0, "reflink xattr in container %llu, count = %u\n", + (unsigned long long)old_bh->b_blocknr, le16_to_cpu(xh->xh_count)); + + last = &new_xh->xh_entries[le16_to_cpu(new_xh->xh_count)]; + for (i = 0, j = 0; i < le16_to_cpu(xh->xh_count); i++, j++) { + xe = &xh->xh_entries[i]; + + if (args->xattr_reflinked && !args->xattr_reflinked(xe)) { + xe = &new_xh->xh_entries[j]; + + le16_add_cpu(&new_xh->xh_count, -1); + if (new_xh->xh_count) { + memmove(xe, xe + 1, + (void *)last - (void *)xe); + memset(last, 0, + sizeof(struct ocfs2_xattr_entry)); + } + + /* + * We don't want j to increase in the next round since + * it is already moved ahead. + */ + j--; + continue; + } + + if (ocfs2_xattr_is_local(xe)) + continue; + + ret = func(sb, old_bh, xh, i, &xv, NULL, para); + if (ret) { + mlog_errno(ret); + break; + } + + ret = func(sb, new_bh, new_xh, j, &new_xv, &value_bh, para); + if (ret) { + mlog_errno(ret); + break; + } + + /* + * For the xattr which has l_tree_depth = 0, all the extent + * recs have already be copied to the new xh with the + * propriate OCFS2_EXT_REFCOUNTED flag we just need to + * increase the refount count int the refcount tree. + * + * For the xattr which has l_tree_depth > 0, we need + * to initialize it to the empty default value root, + * and then insert the extents one by one. + */ + if (xv->xr_list.l_tree_depth) { + memcpy(new_xv, &def_xv, sizeof(def_xv)); + vb->vb_xv = new_xv; + vb->vb_bh = value_bh; + ocfs2_init_xattr_value_extent_tree(&data_et, + INODE_CACHE(args->new_inode), vb); + } + + clusters = le32_to_cpu(xv->xr_clusters); + cpos = 0; + while (cpos < clusters) { + ret = ocfs2_xattr_get_clusters(args->old_inode, + cpos, + &p_cluster, + &num_clusters, + &xv->xr_list, + &ext_flags); + if (ret) { + mlog_errno(ret); + goto out; + } + + BUG_ON(!p_cluster); + + if (xv->xr_list.l_tree_depth) { + ret = ocfs2_insert_extent(handle, + &data_et, cpos, + ocfs2_clusters_to_blocks( + args->old_inode->i_sb, + p_cluster), + num_clusters, ext_flags, + meta_ac); + if (ret) { + mlog_errno(ret); + goto out; + } + } + + ret = ocfs2_increase_refcount(handle, args->ref_ci, + args->ref_root_bh, + p_cluster, num_clusters, + meta_ac, args->dealloc); + if (ret) { + mlog_errno(ret); + goto out; + } + + cpos += num_clusters; + } + } + +out: + return ret; +} + +static int ocfs2_reflink_xattr_inline(struct ocfs2_xattr_reflink *args) +{ + int ret = 0, credits = 0; + handle_t *handle; + struct ocfs2_super *osb = OCFS2_SB(args->old_inode->i_sb); + struct ocfs2_dinode *di = (struct ocfs2_dinode *)args->old_bh->b_data; + int inline_size = le16_to_cpu(di->i_xattr_inline_size); + int header_off = osb->sb->s_blocksize - inline_size; + struct ocfs2_xattr_header *xh = (struct ocfs2_xattr_header *) + (args->old_bh->b_data + header_off); + struct ocfs2_xattr_header *new_xh = (struct ocfs2_xattr_header *) + (args->new_bh->b_data + header_off); + struct ocfs2_alloc_context *meta_ac = NULL; + struct ocfs2_inode_info *new_oi; + struct ocfs2_dinode *new_di; + struct ocfs2_xattr_value_buf vb = { + .vb_bh = args->new_bh, + .vb_access = ocfs2_journal_access_di, + }; + + ret = ocfs2_reflink_lock_xattr_allocators(osb, xh, args->ref_root_bh, + &credits, &meta_ac); + if (ret) { + mlog_errno(ret); + goto out; + } + + handle = ocfs2_start_trans(osb, credits); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + goto out; + } + + ret = ocfs2_journal_access_di(handle, INODE_CACHE(args->new_inode), + args->new_bh, OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + memcpy(args->new_bh->b_data + header_off, + args->old_bh->b_data + header_off, inline_size); + + new_di = (struct ocfs2_dinode *)args->new_bh->b_data; + new_di->i_xattr_inline_size = cpu_to_le16(inline_size); + + ret = ocfs2_reflink_xattr_header(handle, args, args->old_bh, xh, + args->new_bh, new_xh, &vb, meta_ac, + ocfs2_get_xattr_value_root, NULL); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + new_oi = OCFS2_I(args->new_inode); + spin_lock(&new_oi->ip_lock); + new_oi->ip_dyn_features |= OCFS2_HAS_XATTR_FL | OCFS2_INLINE_XATTR_FL; + new_di->i_dyn_features = cpu_to_le16(new_oi->ip_dyn_features); + spin_unlock(&new_oi->ip_lock); + + ocfs2_journal_dirty(handle, args->new_bh); + +out_commit: + ocfs2_commit_trans(osb, handle); + +out: + if (meta_ac) + ocfs2_free_alloc_context(meta_ac); + return ret; +} + +static int ocfs2_create_empty_xattr_block(struct inode *inode, + struct buffer_head *fe_bh, + struct buffer_head **ret_bh, + int indexed) +{ + int ret; + handle_t *handle; + struct ocfs2_alloc_context *meta_ac; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + + ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac); + if (ret < 0) { + mlog_errno(ret); + return ret; + } + + handle = ocfs2_start_trans(osb, OCFS2_XATTR_BLOCK_CREATE_CREDITS); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + goto out; + } + + mlog(0, "create new xattr block for inode %llu, index = %d\n", + (unsigned long long)fe_bh->b_blocknr, indexed); + ret = ocfs2_create_xattr_block(handle, inode, fe_bh, + meta_ac, ret_bh, indexed); + if (ret) + mlog_errno(ret); + + ocfs2_commit_trans(osb, handle); +out: + ocfs2_free_alloc_context(meta_ac); + return ret; +} + +static int ocfs2_reflink_xattr_block(struct ocfs2_xattr_reflink *args, + struct buffer_head *blk_bh, + struct buffer_head *new_blk_bh) +{ + int ret = 0, credits = 0; + handle_t *handle; + struct ocfs2_inode_info *new_oi = OCFS2_I(args->new_inode); + struct ocfs2_dinode *new_di; + struct ocfs2_super *osb = OCFS2_SB(args->new_inode->i_sb); + int header_off = offsetof(struct ocfs2_xattr_block, xb_attrs.xb_header); + struct ocfs2_xattr_block *xb = + (struct ocfs2_xattr_block *)blk_bh->b_data; + struct ocfs2_xattr_header *xh = &xb->xb_attrs.xb_header; + struct ocfs2_xattr_block *new_xb = + (struct ocfs2_xattr_block *)new_blk_bh->b_data; + struct ocfs2_xattr_header *new_xh = &new_xb->xb_attrs.xb_header; + struct ocfs2_alloc_context *meta_ac; + struct ocfs2_xattr_value_buf vb = { + .vb_bh = new_blk_bh, + .vb_access = ocfs2_journal_access_xb, + }; + + ret = ocfs2_reflink_lock_xattr_allocators(osb, xh, args->ref_root_bh, + &credits, &meta_ac); + if (ret) { + mlog_errno(ret); + return ret; + } + + /* One more credits in case we need to add xattr flags in new inode. */ + handle = ocfs2_start_trans(osb, credits + 1); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + goto out; + } + + if (!(new_oi->ip_dyn_features & OCFS2_HAS_XATTR_FL)) { + ret = ocfs2_journal_access_di(handle, + INODE_CACHE(args->new_inode), + args->new_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + } + + ret = ocfs2_journal_access_xb(handle, INODE_CACHE(args->new_inode), + new_blk_bh, OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + memcpy(new_blk_bh->b_data + header_off, blk_bh->b_data + header_off, + osb->sb->s_blocksize - header_off); + + ret = ocfs2_reflink_xattr_header(handle, args, blk_bh, xh, + new_blk_bh, new_xh, &vb, meta_ac, + ocfs2_get_xattr_value_root, NULL); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + ocfs2_journal_dirty(handle, new_blk_bh); + + if (!(new_oi->ip_dyn_features & OCFS2_HAS_XATTR_FL)) { + new_di = (struct ocfs2_dinode *)args->new_bh->b_data; + spin_lock(&new_oi->ip_lock); + new_oi->ip_dyn_features |= OCFS2_HAS_XATTR_FL; + new_di->i_dyn_features = cpu_to_le16(new_oi->ip_dyn_features); + spin_unlock(&new_oi->ip_lock); + + ocfs2_journal_dirty(handle, args->new_bh); + } + +out_commit: + ocfs2_commit_trans(osb, handle); + +out: + ocfs2_free_alloc_context(meta_ac); + return ret; +} + +struct ocfs2_reflink_xattr_tree_args { + struct ocfs2_xattr_reflink *reflink; + struct buffer_head *old_blk_bh; + struct buffer_head *new_blk_bh; + struct ocfs2_xattr_bucket *old_bucket; + struct ocfs2_xattr_bucket *new_bucket; +}; + +/* + * NOTE: + * We have to handle the case that both old bucket and new bucket + * will call this function to get the right ret_bh. + * So The caller must give us the right bh. + */ +static int ocfs2_get_reflink_xattr_value_root(struct super_block *sb, + struct buffer_head *bh, + struct ocfs2_xattr_header *xh, + int offset, + struct ocfs2_xattr_value_root **xv, + struct buffer_head **ret_bh, + void *para) +{ + struct ocfs2_reflink_xattr_tree_args *args = + (struct ocfs2_reflink_xattr_tree_args *)para; + struct ocfs2_xattr_bucket *bucket; + + if (bh == args->old_bucket->bu_bhs[0]) + bucket = args->old_bucket; + else + bucket = args->new_bucket; + + return ocfs2_get_xattr_tree_value_root(sb, bucket, offset, + xv, ret_bh); +} + +struct ocfs2_value_tree_metas { + int num_metas; + int credits; + int num_recs; +}; + +static int ocfs2_value_tree_metas_in_bucket(struct super_block *sb, + struct buffer_head *bh, + struct ocfs2_xattr_header *xh, + int offset, + struct ocfs2_xattr_value_root **xv, + struct buffer_head **ret_bh, + void *para) +{ + struct ocfs2_xattr_bucket *bucket = + (struct ocfs2_xattr_bucket *)para; + + return ocfs2_get_xattr_tree_value_root(sb, bucket, offset, + xv, ret_bh); +} + +static int ocfs2_calc_value_tree_metas(struct inode *inode, + struct ocfs2_xattr_bucket *bucket, + void *para) +{ + struct ocfs2_value_tree_metas *metas = + (struct ocfs2_value_tree_metas *)para; + struct ocfs2_xattr_header *xh = + (struct ocfs2_xattr_header *)bucket->bu_bhs[0]->b_data; + + /* Add the credits for this bucket first. */ + metas->credits += bucket->bu_blocks; + return ocfs2_value_metas_in_xattr_header(inode->i_sb, bucket->bu_bhs[0], + xh, &metas->num_metas, + &metas->credits, &metas->num_recs, + ocfs2_value_tree_metas_in_bucket, + bucket); +} + +/* + * Given a xattr extent rec starting from blkno and having len clusters, + * iterate all the buckets calculate how much metadata we need for reflinking + * all the ocfs2_xattr_value_root and lock the allocators accordingly. + */ +static int ocfs2_lock_reflink_xattr_rec_allocators( + struct ocfs2_reflink_xattr_tree_args *args, + struct ocfs2_extent_tree *xt_et, + u64 blkno, u32 len, int *credits, + struct ocfs2_alloc_context **meta_ac, + struct ocfs2_alloc_context **data_ac) +{ + int ret, num_free_extents; + struct ocfs2_value_tree_metas metas; + struct ocfs2_super *osb = OCFS2_SB(args->reflink->old_inode->i_sb); + struct ocfs2_refcount_block *rb; + + memset(&metas, 0, sizeof(metas)); + + ret = ocfs2_iterate_xattr_buckets(args->reflink->old_inode, blkno, len, + ocfs2_calc_value_tree_metas, &metas); + if (ret) { + mlog_errno(ret); + goto out; + } + + *credits = metas.credits; + + /* + * Calculate we need for refcount tree change. + * + * We need to add/modify num_recs in refcount tree, so just calculate + * an approximate number we need for refcount tree change. + * Sometimes we need to split the tree, and after split, half recs + * will be moved to the new block, and a new block can only provide + * half number of recs. So we multiple new blocks by 2. + * In the end, we have to add credits for modifying the already + * existed refcount block. + */ + rb = (struct ocfs2_refcount_block *)args->reflink->ref_root_bh->b_data; + metas.num_recs = + (metas.num_recs + ocfs2_refcount_recs_per_rb(osb->sb) - 1) / + ocfs2_refcount_recs_per_rb(osb->sb) * 2; + metas.num_metas += metas.num_recs; + *credits += metas.num_recs + + metas.num_recs * OCFS2_EXPAND_REFCOUNT_TREE_CREDITS; + if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) + *credits += le16_to_cpu(rb->rf_list.l_tree_depth) * + le16_to_cpu(rb->rf_list.l_next_free_rec) + 1; + else + *credits += 1; + + /* count in the xattr tree change. */ + num_free_extents = ocfs2_num_free_extents(osb, xt_et); + if (num_free_extents < 0) { + ret = num_free_extents; + mlog_errno(ret); + goto out; + } + + if (num_free_extents < len) + metas.num_metas += ocfs2_extend_meta_needed(xt_et->et_root_el); + + *credits += ocfs2_calc_extend_credits(osb->sb, + xt_et->et_root_el, len); + + if (metas.num_metas) { + ret = ocfs2_reserve_new_metadata_blocks(osb, metas.num_metas, + meta_ac); if (ret) { mlog_errno(ret); goto out; } + } - ret = ocfs2_rm_xattr_cluster(inode, xb_bh, - p_blkno, e_cpos, num_clusters); + if (len) { + ret = ocfs2_reserve_clusters(osb, len, data_ac); + if (ret) + mlog_errno(ret); + } +out: + if (ret) { + if (*meta_ac) { + ocfs2_free_alloc_context(*meta_ac); + meta_ac = NULL; + } + } + + return ret; +} + +static int ocfs2_reflink_xattr_buckets(handle_t *handle, + u64 blkno, u64 new_blkno, u32 clusters, + struct ocfs2_alloc_context *meta_ac, + struct ocfs2_alloc_context *data_ac, + struct ocfs2_reflink_xattr_tree_args *args) +{ + int i, j, ret = 0; + struct super_block *sb = args->reflink->old_inode->i_sb; + u32 bpc = ocfs2_xattr_buckets_per_cluster(OCFS2_SB(sb)); + u32 num_buckets = clusters * bpc; + int bpb = args->old_bucket->bu_blocks; + struct ocfs2_xattr_value_buf vb = { + .vb_access = ocfs2_journal_access, + }; + + for (i = 0; i < num_buckets; i++, blkno += bpb, new_blkno += bpb) { + ret = ocfs2_read_xattr_bucket(args->old_bucket, blkno); if (ret) { mlog_errno(ret); break; } - if (e_cpos == 0) + ret = ocfs2_init_xattr_bucket(args->new_bucket, new_blkno); + if (ret) { + mlog_errno(ret); break; + } - name_hash = e_cpos - 1; + /* + * The real bucket num in this series of blocks is stored + * in the 1st bucket. + */ + if (i == 0) + num_buckets = le16_to_cpu( + bucket_xh(args->old_bucket)->xh_num_buckets); + + ret = ocfs2_xattr_bucket_journal_access(handle, + args->new_bucket, + OCFS2_JOURNAL_ACCESS_CREATE); + if (ret) { + mlog_errno(ret); + break; + } + + for (j = 0; j < bpb; j++) + memcpy(bucket_block(args->new_bucket, j), + bucket_block(args->old_bucket, j), + sb->s_blocksize); + + ocfs2_xattr_bucket_journal_dirty(handle, args->new_bucket); + + ret = ocfs2_reflink_xattr_header(handle, args->reflink, + args->old_bucket->bu_bhs[0], + bucket_xh(args->old_bucket), + args->new_bucket->bu_bhs[0], + bucket_xh(args->new_bucket), + &vb, meta_ac, + ocfs2_get_reflink_xattr_value_root, + args); + if (ret) { + mlog_errno(ret); + break; + } + + /* + * Re-access and dirty the bucket to calculate metaecc. + * Because we may extend the transaction in reflink_xattr_header + * which will let the already accessed block gone. + */ + ret = ocfs2_xattr_bucket_journal_access(handle, + args->new_bucket, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret) { + mlog_errno(ret); + break; + } + + ocfs2_xattr_bucket_journal_dirty(handle, args->new_bucket); + ocfs2_xattr_bucket_relse(args->old_bucket); + ocfs2_xattr_bucket_relse(args->new_bucket); + } + + ocfs2_xattr_bucket_relse(args->old_bucket); + ocfs2_xattr_bucket_relse(args->new_bucket); + return ret; +} +/* + * Create the same xattr extent record in the new inode's xattr tree. + */ +static int ocfs2_reflink_xattr_rec(struct inode *inode, + struct buffer_head *root_bh, + u64 blkno, + u32 cpos, + u32 len, + void *para) +{ + int ret, credits = 0; + u32 p_cluster, num_clusters; + u64 new_blkno; + handle_t *handle; + struct ocfs2_reflink_xattr_tree_args *args = + (struct ocfs2_reflink_xattr_tree_args *)para; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct ocfs2_alloc_context *meta_ac = NULL; + struct ocfs2_alloc_context *data_ac = NULL; + struct ocfs2_extent_tree et; + + ocfs2_init_xattr_tree_extent_tree(&et, + INODE_CACHE(args->reflink->new_inode), + args->new_blk_bh); + + ret = ocfs2_lock_reflink_xattr_rec_allocators(args, &et, blkno, + len, &credits, + &meta_ac, &data_ac); + if (ret) { + mlog_errno(ret); + goto out; + } + + handle = ocfs2_start_trans(osb, credits); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + mlog_errno(ret); + goto out; + } + + ret = ocfs2_claim_clusters(osb, handle, data_ac, + len, &p_cluster, &num_clusters); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + new_blkno = ocfs2_clusters_to_blocks(osb->sb, p_cluster); + + mlog(0, "reflink xattr buckets %llu to %llu, len %u\n", + (unsigned long long)blkno, (unsigned long long)new_blkno, len); + ret = ocfs2_reflink_xattr_buckets(handle, blkno, new_blkno, len, + meta_ac, data_ac, args); + if (ret) { + mlog_errno(ret); + goto out_commit; + } + + mlog(0, "insert new xattr extent rec start %llu len %u to %u\n", + (unsigned long long)new_blkno, len, cpos); + ret = ocfs2_insert_extent(handle, &et, cpos, new_blkno, + len, 0, meta_ac); + if (ret) + mlog_errno(ret); + +out_commit: + ocfs2_commit_trans(osb, handle); + +out: + if (meta_ac) + ocfs2_free_alloc_context(meta_ac); + if (data_ac) + ocfs2_free_alloc_context(data_ac); + return ret; +} + +/* + * Create reflinked xattr buckets. + * We will add bucket one by one, and refcount all the xattrs in the bucket + * if they are stored outside. + */ +static int ocfs2_reflink_xattr_tree(struct ocfs2_xattr_reflink *args, + struct buffer_head *blk_bh, + struct buffer_head *new_blk_bh) +{ + int ret; + struct ocfs2_reflink_xattr_tree_args para; + + memset(¶, 0, sizeof(para)); + para.reflink = args; + para.old_blk_bh = blk_bh; + para.new_blk_bh = new_blk_bh; + + para.old_bucket = ocfs2_xattr_bucket_new(args->old_inode); + if (!para.old_bucket) { + mlog_errno(-ENOMEM); + return -ENOMEM; + } + + para.new_bucket = ocfs2_xattr_bucket_new(args->new_inode); + if (!para.new_bucket) { + ret = -ENOMEM; + mlog_errno(ret); + goto out; + } + + ret = ocfs2_iterate_xattr_index_block(args->old_inode, blk_bh, + ocfs2_reflink_xattr_rec, + ¶); + if (ret) + mlog_errno(ret); + +out: + ocfs2_xattr_bucket_free(para.old_bucket); + ocfs2_xattr_bucket_free(para.new_bucket); + return ret; +} + +static int ocfs2_reflink_xattr_in_block(struct ocfs2_xattr_reflink *args, + struct buffer_head *blk_bh) +{ + int ret, indexed = 0; + struct buffer_head *new_blk_bh = NULL; + struct ocfs2_xattr_block *xb = + (struct ocfs2_xattr_block *)blk_bh->b_data; + + + if (le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED) + indexed = 1; + + ret = ocfs2_create_empty_xattr_block(args->new_inode, args->new_bh, + &new_blk_bh, indexed); + if (ret) { + mlog_errno(ret); + goto out; + } + + if (!(le16_to_cpu(xb->xb_flags) & OCFS2_XATTR_INDEXED)) + ret = ocfs2_reflink_xattr_block(args, blk_bh, new_blk_bh); + else + ret = ocfs2_reflink_xattr_tree(args, blk_bh, new_blk_bh); + if (ret) + mlog_errno(ret); + +out: + brelse(new_blk_bh); + return ret; +} + +static int ocfs2_reflink_xattr_no_security(struct ocfs2_xattr_entry *xe) +{ + int type = ocfs2_xattr_get_type(xe); + + return type != OCFS2_XATTR_INDEX_SECURITY && + type != OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS && + type != OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT; +} + +int ocfs2_reflink_xattrs(struct inode *old_inode, + struct buffer_head *old_bh, + struct inode *new_inode, + struct buffer_head *new_bh, + bool preserve_security) +{ + int ret; + struct ocfs2_xattr_reflink args; + struct ocfs2_inode_info *oi = OCFS2_I(old_inode); + struct ocfs2_dinode *di = (struct ocfs2_dinode *)old_bh->b_data; + struct buffer_head *blk_bh = NULL; + struct ocfs2_cached_dealloc_ctxt dealloc; + struct ocfs2_refcount_tree *ref_tree; + struct buffer_head *ref_root_bh = NULL; + + ret = ocfs2_lock_refcount_tree(OCFS2_SB(old_inode->i_sb), + le64_to_cpu(di->i_refcount_loc), + 1, &ref_tree, &ref_root_bh); + if (ret) { + mlog_errno(ret); + goto out; + } + + ocfs2_init_dealloc_ctxt(&dealloc); + + args.old_inode = old_inode; + args.new_inode = new_inode; + args.old_bh = old_bh; + args.new_bh = new_bh; + args.ref_ci = &ref_tree->rf_ci; + args.ref_root_bh = ref_root_bh; + args.dealloc = &dealloc; + if (preserve_security) + args.xattr_reflinked = NULL; + else + args.xattr_reflinked = ocfs2_reflink_xattr_no_security; + + if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL) { + ret = ocfs2_reflink_xattr_inline(&args); + if (ret) { + mlog_errno(ret); + goto out_unlock; + } + } + + if (!di->i_xattr_loc) + goto out_unlock; + + ret = ocfs2_read_xattr_block(old_inode, le64_to_cpu(di->i_xattr_loc), + &blk_bh); + if (ret < 0) { + mlog_errno(ret); + goto out_unlock; + } + + ret = ocfs2_reflink_xattr_in_block(&args, blk_bh); + if (ret) + mlog_errno(ret); + + brelse(blk_bh); + +out_unlock: + ocfs2_unlock_refcount_tree(OCFS2_SB(old_inode->i_sb), + ref_tree, 1); + brelse(ref_root_bh); + + if (ocfs2_dealloc_has_cluster(&dealloc)) { + ocfs2_schedule_truncate_log_flush(OCFS2_SB(old_inode->i_sb), 1); + ocfs2_run_deallocs(OCFS2_SB(old_inode->i_sb), &dealloc); } out: @@ -5306,6 +6961,51 @@ out: } /* + * Initialize security and acl for a already created inode. + * Used for reflink a non-preserve-security file. + * + * It uses common api like ocfs2_xattr_set, so the caller + * must not hold any lock expect i_mutex. + */ +int ocfs2_init_security_and_acl(struct inode *dir, + struct inode *inode) +{ + int ret = 0; + struct buffer_head *dir_bh = NULL; + struct ocfs2_security_xattr_info si = { + .enable = 1, + }; + + ret = ocfs2_init_security_get(inode, dir, &si); + if (!ret) { + ret = ocfs2_xattr_security_set(inode, si.name, + si.value, si.value_len, + XATTR_CREATE); + if (ret) { + mlog_errno(ret); + goto leave; + } + } else if (ret != -EOPNOTSUPP) { + mlog_errno(ret); + goto leave; + } + + ret = ocfs2_inode_lock(dir, &dir_bh, 0); + if (ret) { + mlog_errno(ret); + goto leave; + } + + ret = ocfs2_init_acl(NULL, inode, dir, NULL, dir_bh, NULL, NULL); + if (ret) + mlog_errno(ret); + + ocfs2_inode_unlock(dir, 0); + brelse(dir_bh); +leave: + return ret; +} +/* * 'security' attributes support */ static size_t ocfs2_xattr_security_list(struct inode *inode, char *list, diff --git a/fs/ocfs2/xattr.h b/fs/ocfs2/xattr.h index 1ca7e9a..08e3638 100644 --- a/fs/ocfs2/xattr.h +++ b/fs/ocfs2/xattr.h @@ -55,6 +55,8 @@ int ocfs2_xattr_set_handle(handle_t *, struct inode *, struct buffer_head *, int, const char *, const void *, size_t, int, struct ocfs2_alloc_context *, struct ocfs2_alloc_context *); +int ocfs2_has_inline_xattr_value_outside(struct inode *inode, + struct ocfs2_dinode *di); int ocfs2_xattr_remove(struct inode *, struct buffer_head *); int ocfs2_init_security_get(struct inode *, struct inode *, struct ocfs2_security_xattr_info *); @@ -83,5 +85,16 @@ struct ocfs2_xattr_value_buf { struct ocfs2_xattr_value_root *vb_xv; }; - +int ocfs2_xattr_attach_refcount_tree(struct inode *inode, + struct buffer_head *fe_bh, + struct ocfs2_caching_info *ref_ci, + struct buffer_head *ref_root_bh, + struct ocfs2_cached_dealloc_ctxt *dealloc); +int ocfs2_reflink_xattrs(struct inode *old_inode, + struct buffer_head *old_bh, + struct inode *new_inode, + struct buffer_head *new_bh, + bool preserve_security); +int ocfs2_init_security_and_acl(struct inode *dir, + struct inode *inode); #endif /* OCFS2_XATTR_H */ diff --git a/fs/omfs/dir.c b/fs/omfs/dir.c index c7275cf..3680bae 100644 --- a/fs/omfs/dir.c +++ b/fs/omfs/dir.c @@ -489,7 +489,7 @@ out: return ret; } -struct inode_operations omfs_dir_inops = { +const struct inode_operations omfs_dir_inops = { .lookup = omfs_lookup, .mkdir = omfs_mkdir, .rename = omfs_rename, diff --git a/fs/omfs/file.c b/fs/omfs/file.c index d17e774..4845fbb 100644 --- a/fs/omfs/file.c +++ b/fs/omfs/file.c @@ -333,11 +333,11 @@ struct file_operations omfs_file_operations = { .splice_read = generic_file_splice_read, }; -struct inode_operations omfs_file_inops = { +const struct inode_operations omfs_file_inops = { .truncate = omfs_truncate }; -struct address_space_operations omfs_aops = { +const struct address_space_operations omfs_aops = { .readpage = omfs_readpage, .readpages = omfs_readpages, .writepage = omfs_writepage, diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c index 379ae5f..f3b7c15 100644 --- a/fs/omfs/inode.c +++ b/fs/omfs/inode.c @@ -278,7 +278,7 @@ static int omfs_statfs(struct dentry *dentry, struct kstatfs *buf) return 0; } -static struct super_operations omfs_sops = { +static const struct super_operations omfs_sops = { .write_inode = omfs_write_inode, .delete_inode = omfs_delete_inode, .put_super = omfs_put_super, diff --git a/fs/omfs/omfs.h b/fs/omfs/omfs.h index 2bc0f06..df71039 100644 --- a/fs/omfs/omfs.h +++ b/fs/omfs/omfs.h @@ -45,15 +45,15 @@ extern int omfs_clear_range(struct super_block *sb, u64 block, int count); /* dir.c */ extern struct file_operations omfs_dir_operations; -extern struct inode_operations omfs_dir_inops; +extern const struct inode_operations omfs_dir_inops; extern int omfs_make_empty(struct inode *inode, struct super_block *sb); extern int omfs_is_bad(struct omfs_sb_info *sbi, struct omfs_header *header, u64 fsblock); /* file.c */ extern struct file_operations omfs_file_operations; -extern struct inode_operations omfs_file_inops; -extern struct address_space_operations omfs_aops; +extern const struct inode_operations omfs_file_inops; +extern const struct address_space_operations omfs_aops; extern void omfs_make_empty_table(struct buffer_head *bh, int offset); extern int omfs_shrink_inode(struct inode *inode); @@ -290,10 +290,9 @@ out: return error; } -SYSCALL_DEFINE2(truncate, const char __user *, path, unsigned long, length) +SYSCALL_DEFINE2(truncate, const char __user *, path, long, length) { - /* on 32-bit boxen it will cut the range 2^31--2^32-1 off */ - return do_sys_truncate(path, (long)length); + return do_sys_truncate(path, length); } static long do_sys_ftruncate(unsigned int fd, loff_t length, int small) diff --git a/fs/partitions/check.c b/fs/partitions/check.c index 619ba99..7b685e1 100644 --- a/fs/partitions/check.c +++ b/fs/partitions/check.c @@ -312,7 +312,7 @@ static struct attribute_group part_attr_group = { .attrs = part_attrs, }; -static struct attribute_group *part_attr_groups[] = { +static const struct attribute_group *part_attr_groups[] = { &part_attr_group, #ifdef CONFIG_BLK_DEV_IO_TRACE &blk_trace_attr_group, @@ -581,7 +581,7 @@ try_scan: } if (from + size > get_capacity(disk)) { - struct block_device_operations *bdops = disk->fops; + const struct block_device_operations *bdops = disk->fops; unsigned long long capacity; printk(KERN_WARNING diff --git a/fs/proc/array.c b/fs/proc/array.c index 725a650..0c6bc60 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -82,6 +82,7 @@ #include <linux/pid_namespace.h> #include <linux/ptrace.h> #include <linux/tracehook.h> +#include <linux/swapops.h> #include <asm/pgtable.h> #include <asm/processor.h> @@ -321,6 +322,87 @@ static inline void task_context_switch_counts(struct seq_file *m, p->nivcsw); } +struct stack_stats { + struct vm_area_struct *vma; + unsigned long startpage; + unsigned long usage; +}; + +static int stack_usage_pte_range(pmd_t *pmd, unsigned long addr, + unsigned long end, struct mm_walk *walk) +{ + struct stack_stats *ss = walk->private; + struct vm_area_struct *vma = ss->vma; + pte_t *pte, ptent; + spinlock_t *ptl; + int ret = 0; + + pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); + for (; addr != end; pte++, addr += PAGE_SIZE) { + ptent = *pte; + +#ifdef CONFIG_STACK_GROWSUP + if (pte_present(ptent) || is_swap_pte(ptent)) + ss->usage = addr - ss->startpage + PAGE_SIZE; +#else + if (pte_present(ptent) || is_swap_pte(ptent)) { + ss->usage = ss->startpage - addr + PAGE_SIZE; + pte++; + ret = 1; + break; + } +#endif + } + pte_unmap_unlock(pte - 1, ptl); + cond_resched(); + return ret; +} + +static inline unsigned long get_stack_usage_in_bytes(struct vm_area_struct *vma, + struct task_struct *task) +{ + struct stack_stats ss; + struct mm_walk stack_walk = { + .pmd_entry = stack_usage_pte_range, + .mm = vma->vm_mm, + .private = &ss, + }; + + if (!vma->vm_mm || is_vm_hugetlb_page(vma)) + return 0; + + ss.vma = vma; + ss.startpage = task->stack_start & PAGE_MASK; + ss.usage = 0; + +#ifdef CONFIG_STACK_GROWSUP + walk_page_range(KSTK_ESP(task) & PAGE_MASK, vma->vm_end, + &stack_walk); +#else + walk_page_range(vma->vm_start, (KSTK_ESP(task) & PAGE_MASK) + PAGE_SIZE, + &stack_walk); +#endif + return ss.usage; +} + +static inline void task_show_stack_usage(struct seq_file *m, + struct task_struct *task) +{ + struct vm_area_struct *vma; + struct mm_struct *mm = get_task_mm(task); + + if (mm) { + down_read(&mm->mmap_sem); + vma = find_vma(mm, task->stack_start); + if (vma) + seq_printf(m, "Stack usage:\t%lu kB\n", + get_stack_usage_in_bytes(vma, task) >> 10); + + up_read(&mm->mmap_sem); + mmput(mm); + } +} + int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) { @@ -340,6 +422,7 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, task_show_regs(m, task); #endif task_context_switch_counts(m, task); + task_show_stack_usage(m, task); return 0; } @@ -481,7 +564,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, rsslim, mm ? mm->start_code : 0, mm ? mm->end_code : 0, - (permitted && mm) ? mm->start_stack : 0, + (permitted) ? task->stack_start : 0, esp, eip, /* The signal information here is obsolete. diff --git a/fs/proc/base.c b/fs/proc/base.c index 6f742f6..837469a 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -447,7 +447,7 @@ static int proc_oom_score(struct task_struct *task, char *buffer) do_posix_clock_monotonic_gettime(&uptime); read_lock(&tasklist_lock); - points = badness(task, uptime.tv_sec); + points = badness(task->group_leader, uptime.tv_sec); read_unlock(&tasklist_lock); return sprintf(buffer, "%lu\n", points); } @@ -458,7 +458,7 @@ struct limit_names { }; static const struct limit_names lnames[RLIM_NLIMITS] = { - [RLIMIT_CPU] = {"Max cpu time", "ms"}, + [RLIMIT_CPU] = {"Max cpu time", "seconds"}, [RLIMIT_FSIZE] = {"Max file size", "bytes"}, [RLIMIT_DATA] = {"Max data size", "bytes"}, [RLIMIT_STACK] = {"Max stack size", "bytes"}, @@ -999,11 +999,17 @@ static ssize_t oom_adjust_read(struct file *file, char __user *buf, struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode); char buffer[PROC_NUMBUF]; size_t len; - int oom_adjust; + int oom_adjust = OOM_DISABLE; + unsigned long flags; if (!task) return -ESRCH; - oom_adjust = task->oomkilladj; + + if (lock_task_sighand(task, &flags)) { + oom_adjust = task->signal->oom_adj; + unlock_task_sighand(task, &flags); + } + put_task_struct(task); len = snprintf(buffer, sizeof(buffer), "%i\n", oom_adjust); @@ -1015,32 +1021,44 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct task_struct *task; - char buffer[PROC_NUMBUF], *end; - int oom_adjust; + char buffer[PROC_NUMBUF]; + long oom_adjust; + unsigned long flags; + int err; memset(buffer, 0, sizeof(buffer)); if (count > sizeof(buffer) - 1) count = sizeof(buffer) - 1; if (copy_from_user(buffer, buf, count)) return -EFAULT; - oom_adjust = simple_strtol(buffer, &end, 0); + + err = strict_strtol(strstrip(buffer), 0, &oom_adjust); + if (err) + return -EINVAL; if ((oom_adjust < OOM_ADJUST_MIN || oom_adjust > OOM_ADJUST_MAX) && oom_adjust != OOM_DISABLE) return -EINVAL; - if (*end == '\n') - end++; + task = get_proc_task(file->f_path.dentry->d_inode); if (!task) return -ESRCH; - if (oom_adjust < task->oomkilladj && !capable(CAP_SYS_RESOURCE)) { + if (!lock_task_sighand(task, &flags)) { + put_task_struct(task); + return -ESRCH; + } + + if (oom_adjust < task->signal->oom_adj && !capable(CAP_SYS_RESOURCE)) { + unlock_task_sighand(task, &flags); put_task_struct(task); return -EACCES; } - task->oomkilladj = oom_adjust; + + task->signal->oom_adj = oom_adjust; + + unlock_task_sighand(task, &flags); put_task_struct(task); - if (end - buffer == 0) - return -EIO; - return end - buffer; + + return count; } static const struct file_operations proc_oom_adjust_operations = { @@ -1169,17 +1187,16 @@ static ssize_t proc_fault_inject_write(struct file * file, count = sizeof(buffer) - 1; if (copy_from_user(buffer, buf, count)) return -EFAULT; - make_it_fail = simple_strtol(buffer, &end, 0); - if (*end == '\n') - end++; + make_it_fail = simple_strtol(strstrip(buffer), &end, 0); + if (*end) + return -EINVAL; task = get_proc_task(file->f_dentry->d_inode); if (!task) return -ESRCH; task->make_it_fail = make_it_fail; put_task_struct(task); - if (end - buffer == 0) - return -EIO; - return end - buffer; + + return count; } static const struct file_operations proc_fault_inject_operations = { @@ -2586,9 +2603,6 @@ static void proc_flush_task_mnt(struct vfsmount *mnt, pid_t pid, pid_t tgid) dput(dentry); } - if (tgid == 0) - goto out; - name.name = buf; name.len = snprintf(buf, sizeof(buf), "%d", tgid); leader = d_hash_and_lookup(mnt->mnt_root, &name); @@ -2645,17 +2659,16 @@ out: void proc_flush_task(struct task_struct *task) { int i; - struct pid *pid, *tgid = NULL; + struct pid *pid, *tgid; struct upid *upid; pid = task_pid(task); - if (thread_group_leader(task)) - tgid = task_tgid(task); + tgid = task_tgid(task); for (i = 0; i <= pid->level; i++) { upid = &pid->numbers[i]; proc_flush_task_mnt(upid->ns->proc_mnt, upid->nr, - tgid ? tgid->numbers[i].nr : 0); + tgid->numbers[i].nr); } upid = &pid->numbers[pid->level]; diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index 59b43a0..5601337 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -17,9 +17,15 @@ #include <linux/elfcore.h> #include <linux/vmalloc.h> #include <linux/highmem.h> +#include <linux/bootmem.h> #include <linux/init.h> #include <asm/uaccess.h> #include <asm/io.h> +#include <linux/list.h> +#include <linux/ioport.h> +#include <linux/mm.h> +#include <linux/memory.h> +#include <asm/sections.h> #define CORE_STR "CORE" @@ -29,17 +35,6 @@ static struct proc_dir_entry *proc_root_kcore; -static int open_kcore(struct inode * inode, struct file * filp) -{ - return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; -} - -static ssize_t read_kcore(struct file *, char __user *, size_t, loff_t *); - -static const struct file_operations proc_kcore_operations = { - .read = read_kcore, - .open = open_kcore, -}; #ifndef kc_vaddr_to_offset #define kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET) @@ -57,18 +52,19 @@ struct memelfnote void *data; }; -static struct kcore_list *kclist; +static LIST_HEAD(kclist_head); static DEFINE_RWLOCK(kclist_lock); +static int kcore_need_update = 1; void -kclist_add(struct kcore_list *new, void *addr, size_t size) +kclist_add(struct kcore_list *new, void *addr, size_t size, int type) { new->addr = (unsigned long)addr; new->size = size; + new->type = type; write_lock(&kclist_lock); - new->next = kclist; - kclist = new; + list_add_tail(&new->list, &kclist_head); write_unlock(&kclist_lock); } @@ -80,7 +76,7 @@ static size_t get_kcore_size(int *nphdr, size_t *elf_buflen) *nphdr = 1; /* PT_NOTE */ size = 0; - for (m=kclist; m; m=m->next) { + list_for_each_entry(m, &kclist_head, list) { try = kc_vaddr_to_offset((size_t)m->addr + m->size); if (try > size) size = try; @@ -97,6 +93,177 @@ static size_t get_kcore_size(int *nphdr, size_t *elf_buflen) return size + *elf_buflen; } +static void free_kclist_ents(struct list_head *head) +{ + struct kcore_list *tmp, *pos; + + list_for_each_entry_safe(pos, tmp, head, list) { + list_del(&pos->list); + kfree(pos); + } +} +/* + * Replace all KCORE_RAM/KCORE_VMEMMAP information with passed list. + */ +static void __kcore_update_ram(struct list_head *list) +{ + int nphdr; + size_t size; + struct kcore_list *tmp, *pos; + LIST_HEAD(garbage); + + write_lock(&kclist_lock); + if (kcore_need_update) { + list_for_each_entry_safe(pos, tmp, &kclist_head, list) { + if (pos->type == KCORE_RAM + || pos->type == KCORE_VMEMMAP) + list_move(&pos->list, &garbage); + } + list_splice_tail(list, &kclist_head); + } else + list_splice(list, &garbage); + kcore_need_update = 0; + proc_root_kcore->size = get_kcore_size(&nphdr, &size); + write_unlock(&kclist_lock); + + free_kclist_ents(&garbage); +} + + +#ifdef CONFIG_HIGHMEM +/* + * If no highmem, we can assume [0...max_low_pfn) continuous range of memory + * because memory hole is not as big as !HIGHMEM case. + * (HIGHMEM is special because part of memory is _invisible_ from the kernel.) + */ +static int kcore_update_ram(void) +{ + LIST_HEAD(head); + struct kcore_list *ent; + int ret = 0; + + ent = kmalloc(sizeof(*ent), GFP_KERNEL); + if (!ent) + return -ENOMEM; + ent->addr = (unsigned long)__va(0); + ent->size = max_low_pfn << PAGE_SHIFT; + ent->type = KCORE_RAM; + list_add(&ent->list, &head); + __kcore_update_ram(&head); + return ret; +} + +#else /* !CONFIG_HIGHMEM */ + +#ifdef CONFIG_SPARSEMEM_VMEMMAP +/* calculate vmemmap's address from given system ram pfn and register it */ +int get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head) +{ + unsigned long pfn = __pa(ent->addr) >> PAGE_SHIFT; + unsigned long nr_pages = ent->size >> PAGE_SHIFT; + unsigned long start, end; + struct kcore_list *vmm, *tmp; + + + start = ((unsigned long)pfn_to_page(pfn)) & PAGE_MASK; + end = ((unsigned long)pfn_to_page(pfn + nr_pages)) - 1; + end = ALIGN(end, PAGE_SIZE); + /* overlap check (because we have to align page */ + list_for_each_entry(tmp, head, list) { + if (tmp->type != KCORE_VMEMMAP) + continue; + if (start < tmp->addr + tmp->size) + if (end > tmp->addr) + end = tmp->addr; + } + if (start < end) { + vmm = kmalloc(sizeof(*vmm), GFP_KERNEL); + if (!vmm) + return 0; + vmm->addr = start; + vmm->size = end - start; + vmm->type = KCORE_VMEMMAP; + list_add_tail(&vmm->list, head); + } + return 1; + +} +#else +int get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head) +{ + return 1; +} + +#endif + +static int +kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg) +{ + struct list_head *head = (struct list_head *)arg; + struct kcore_list *ent; + + ent = kmalloc(sizeof(*ent), GFP_KERNEL); + if (!ent) + return -ENOMEM; + ent->addr = (unsigned long)__va((pfn << PAGE_SHIFT)); + ent->size = nr_pages << PAGE_SHIFT; + + /* Sanity check: Can happen in 32bit arch...maybe */ + if (ent->addr < (unsigned long) __va(0)) + goto free_out; + + /* cut not-mapped area. ....from ppc-32 code. */ + if (ULONG_MAX - ent->addr < ent->size) + ent->size = ULONG_MAX - ent->addr; + + /* cut when vmalloc() area is higher than direct-map area */ + if (VMALLOC_START > (unsigned long)__va(0)) { + if (ent->addr > VMALLOC_START) + goto free_out; + if (VMALLOC_START - ent->addr < ent->size) + ent->size = VMALLOC_START - ent->addr; + } + + ent->type = KCORE_RAM; + list_add_tail(&ent->list, head); + + if (!get_sparsemem_vmemmap_info(ent, head)) { + list_del(&ent->list); + goto free_out; + } + + return 0; +free_out: + kfree(ent); + return 1; +} + +static int kcore_update_ram(void) +{ + int nid, ret; + unsigned long end_pfn; + LIST_HEAD(head); + + /* Not inialized....update now */ + /* find out "max pfn" */ + end_pfn = 0; + for_each_node_state(nid, N_HIGH_MEMORY) { + unsigned long node_end; + node_end = NODE_DATA(nid)->node_start_pfn + + NODE_DATA(nid)->node_spanned_pages; + if (end_pfn < node_end) + end_pfn = node_end; + } + /* scan 0 to max_pfn */ + ret = walk_system_ram_range(0, end_pfn, &head, kclist_add_private); + if (ret) { + free_kclist_ents(&head); + return -ENOMEM; + } + __kcore_update_ram(&head); + return ret; +} +#endif /* CONFIG_HIGHMEM */ /*****************************************************************************/ /* @@ -192,7 +359,7 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff) nhdr->p_align = 0; /* setup ELF PT_LOAD program header for every area */ - for (m=kclist; m; m=m->next) { + list_for_each_entry(m, &kclist_head, list) { phdr = (struct elf_phdr *) bufp; bufp += sizeof(struct elf_phdr); offset += sizeof(struct elf_phdr); @@ -265,7 +432,8 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) unsigned long start; read_lock(&kclist_lock); - proc_root_kcore->size = size = get_kcore_size(&nphdr, &elf_buflen); + size = get_kcore_size(&nphdr, &elf_buflen); + if (buflen == 0 || *fpos >= size) { read_unlock(&kclist_lock); return 0; @@ -317,7 +485,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) struct kcore_list *m; read_lock(&kclist_lock); - for (m=kclist; m; m=m->next) { + list_for_each_entry(m, &kclist_head, list) { if (start >= m->addr && start < (m->addr+m->size)) break; } @@ -326,45 +494,14 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) if (m == NULL) { if (clear_user(buffer, tsz)) return -EFAULT; - } else if (is_vmalloc_addr((void *)start)) { + } else if (is_vmalloc_or_module_addr((void *)start)) { char * elf_buf; - struct vm_struct *m; - unsigned long curstart = start; - unsigned long cursize = tsz; elf_buf = kzalloc(tsz, GFP_KERNEL); if (!elf_buf) return -ENOMEM; - - read_lock(&vmlist_lock); - for (m=vmlist; m && cursize; m=m->next) { - unsigned long vmstart; - unsigned long vmsize; - unsigned long msize = m->size - PAGE_SIZE; - - if (((unsigned long)m->addr + msize) < - curstart) - continue; - if ((unsigned long)m->addr > (curstart + - cursize)) - break; - vmstart = (curstart < (unsigned long)m->addr ? - (unsigned long)m->addr : curstart); - if (((unsigned long)m->addr + msize) > - (curstart + cursize)) - vmsize = curstart + cursize - vmstart; - else - vmsize = (unsigned long)m->addr + - msize - vmstart; - curstart = vmstart + vmsize; - cursize -= vmsize; - /* don't dump ioremap'd stuff! (TA) */ - if (m->flags & VM_IOREMAP) - continue; - memcpy(elf_buf + (vmstart - start), - (char *)vmstart, vmsize); - } - read_unlock(&vmlist_lock); + vread(elf_buf, (char *)start, tsz); + /* we have to zero-fill user buffer even if no read */ if (copy_to_user(buffer, elf_buf, tsz)) { kfree(elf_buf); return -EFAULT; @@ -402,12 +539,96 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) return acc; } + +static int open_kcore(struct inode *inode, struct file *filp) +{ + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + if (kcore_need_update) + kcore_update_ram(); + if (i_size_read(inode) != proc_root_kcore->size) { + mutex_lock(&inode->i_mutex); + i_size_write(inode, proc_root_kcore->size); + mutex_unlock(&inode->i_mutex); + } + return 0; +} + + +static const struct file_operations proc_kcore_operations = { + .read = read_kcore, + .open = open_kcore, +}; + +#ifdef CONFIG_MEMORY_HOTPLUG +/* just remember that we have to update kcore */ +static int __meminit kcore_callback(struct notifier_block *self, + unsigned long action, void *arg) +{ + switch (action) { + case MEM_ONLINE: + case MEM_OFFLINE: + write_lock(&kclist_lock); + kcore_need_update = 1; + write_unlock(&kclist_lock); + } + return NOTIFY_OK; +} +#endif + + +static struct kcore_list kcore_vmalloc; + +#ifdef CONFIG_ARCH_PROC_KCORE_TEXT +static struct kcore_list kcore_text; +/* + * If defined, special segment is used for mapping kernel text instead of + * direct-map area. We need to create special TEXT section. + */ +static void __init proc_kcore_text_init(void) +{ + kclist_add(&kcore_text, _stext, _end - _stext, KCORE_TEXT); +} +#else +static void __init proc_kcore_text_init(void) +{ +} +#endif + +#if defined(CONFIG_MODULES) && defined(MODULES_VADDR) +/* + * MODULES_VADDR has no intersection with VMALLOC_ADDR. + */ +struct kcore_list kcore_modules; +static void __init add_modules_range(void) +{ + kclist_add(&kcore_modules, (void *)MODULES_VADDR, + MODULES_END - MODULES_VADDR, KCORE_VMALLOC); +} +#else +static void __init add_modules_range(void) +{ +} +#endif + static int __init proc_kcore_init(void) { - proc_root_kcore = proc_create("kcore", S_IRUSR, NULL, &proc_kcore_operations); - if (proc_root_kcore) - proc_root_kcore->size = - (size_t)high_memory - PAGE_OFFSET + PAGE_SIZE; + proc_root_kcore = proc_create("kcore", S_IRUSR, NULL, + &proc_kcore_operations); + if (!proc_root_kcore) { + printk(KERN_ERR "couldn't create /proc/kcore\n"); + return 0; /* Always returns 0. */ + } + /* Store text area if it's special */ + proc_kcore_text_init(); + /* Store vmalloc area */ + kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, + VMALLOC_END - VMALLOC_START, KCORE_VMALLOC); + add_modules_range(); + /* Store direct-map area from physical memory map */ + kcore_update_ram(); + hotplug_memory_notifier(kcore_callback, 0); + return 0; } module_init(proc_kcore_init); diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index 78faedc..c7bff4f 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -81,9 +81,11 @@ static int meminfo_proc_show(struct seq_file *m, void *v) "Writeback: %8lu kB\n" "AnonPages: %8lu kB\n" "Mapped: %8lu kB\n" + "Shmem: %8lu kB\n" "Slab: %8lu kB\n" "SReclaimable: %8lu kB\n" "SUnreclaim: %8lu kB\n" + "KernelStack: %8lu kB\n" "PageTables: %8lu kB\n" #ifdef CONFIG_QUICKLIST "Quicklists: %8lu kB\n" @@ -128,10 +130,12 @@ static int meminfo_proc_show(struct seq_file *m, void *v) K(global_page_state(NR_WRITEBACK)), K(global_page_state(NR_ANON_PAGES)), K(global_page_state(NR_FILE_MAPPED)), + K(global_page_state(NR_SHMEM)), K(global_page_state(NR_SLAB_RECLAIMABLE) + global_page_state(NR_SLAB_UNRECLAIMABLE)), K(global_page_state(NR_SLAB_RECLAIMABLE)), K(global_page_state(NR_SLAB_UNRECLAIMABLE)), + global_page_state(NR_KERNEL_STACK) * THREAD_SIZE / 1024, K(global_page_state(NR_PAGETABLE)), #ifdef CONFIG_QUICKLIST K(quicklist_total_size()), diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c index 7e14d1a..9fe7d7e 100644 --- a/fs/proc/nommu.c +++ b/fs/proc/nommu.c @@ -109,7 +109,7 @@ static void *nommu_region_list_next(struct seq_file *m, void *v, loff_t *pos) return rb_next((struct rb_node *) v); } -static struct seq_operations proc_nommu_region_list_seqop = { +static const struct seq_operations proc_nommu_region_list_seqop = { .start = nommu_region_list_start, .next = nommu_region_list_next, .stop = nommu_region_list_stop, diff --git a/fs/proc/page.c b/fs/proc/page.c index 2707c6c..2281c2c 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c @@ -2,6 +2,7 @@ #include <linux/compiler.h> #include <linux/fs.h> #include <linux/init.h> +#include <linux/ksm.h> #include <linux/mm.h> #include <linux/mmzone.h> #include <linux/proc_fs.h> @@ -95,6 +96,8 @@ static const struct file_operations proc_kpagecount_operations = { #define KPF_UNEVICTABLE 18 #define KPF_NOPAGE 20 +#define KPF_KSM 21 + /* kernel hacking assistances * WARNING: subject to change, never rely on them! */ @@ -137,6 +140,8 @@ static u64 get_uflags(struct page *page) u |= 1 << KPF_MMAP; if (PageAnon(page)) u |= 1 << KPF_ANON; + if (PageKsm(page)) + u |= 1 << KPF_KSM; /* * compound pages: export both head/tail info diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 9b1e4e9..f667e8a 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -153,7 +153,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf, /* careful: calling conventions are nasty here */ res = count; - error = table->proc_handler(table, write, filp, buf, &res, ppos); + error = table->proc_handler(table, write, buf, &res, ppos); if (!error) error = res; out: diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 9bd8be1..2a1bef9 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -243,6 +243,25 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) } else if (vma->vm_start <= mm->start_stack && vma->vm_end >= mm->start_stack) { name = "[stack]"; + } else { + unsigned long stack_start; + struct proc_maps_private *pmp; + + pmp = m->private; + stack_start = pmp->task->stack_start; + + if (vma->vm_start <= stack_start && + vma->vm_end >= stack_start) { + pad_len_spaces(m, len); + seq_printf(m, + "[threadstack:%08lx]", +#ifdef CONFIG_STACK_GROWSUP + vma->vm_end - stack_start +#else + stack_start - vma->vm_start +#endif + ); + } } } else { name = "[vdso]"; @@ -465,23 +484,28 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, return 0; } +#define CLEAR_REFS_ALL 1 +#define CLEAR_REFS_ANON 2 +#define CLEAR_REFS_MAPPED 3 + static ssize_t clear_refs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct task_struct *task; - char buffer[PROC_NUMBUF], *end; + char buffer[PROC_NUMBUF]; struct mm_struct *mm; struct vm_area_struct *vma; + long type; memset(buffer, 0, sizeof(buffer)); if (count > sizeof(buffer) - 1) count = sizeof(buffer) - 1; if (copy_from_user(buffer, buf, count)) return -EFAULT; - if (!simple_strtol(buffer, &end, 0)) + if (strict_strtol(strstrip(buffer), 10, &type)) + return -EINVAL; + if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED) return -EINVAL; - if (*end == '\n') - end++; task = get_proc_task(file->f_path.dentry->d_inode); if (!task) return -ESRCH; @@ -494,18 +518,31 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, down_read(&mm->mmap_sem); for (vma = mm->mmap; vma; vma = vma->vm_next) { clear_refs_walk.private = vma; - if (!is_vm_hugetlb_page(vma)) - walk_page_range(vma->vm_start, vma->vm_end, - &clear_refs_walk); + if (is_vm_hugetlb_page(vma)) + continue; + /* + * Writing 1 to /proc/pid/clear_refs affects all pages. + * + * Writing 2 to /proc/pid/clear_refs only affects + * Anonymous pages. + * + * Writing 3 to /proc/pid/clear_refs only affects file + * mapped pages. + */ + if (type == CLEAR_REFS_ANON && vma->vm_file) + continue; + if (type == CLEAR_REFS_MAPPED && !vma->vm_file) + continue; + walk_page_range(vma->vm_start, vma->vm_end, + &clear_refs_walk); } flush_tlb_mm(mm); up_read(&mm->mmap_sem); mmput(mm); } put_task_struct(task); - if (end - buffer == 0) - return -EIO; - return end - buffer; + + return count; } const struct file_operations proc_clear_refs_operations = { diff --git a/fs/qnx4/Kconfig b/fs/qnx4/Kconfig index be8e0e1..5f60899 100644 --- a/fs/qnx4/Kconfig +++ b/fs/qnx4/Kconfig @@ -6,20 +6,9 @@ config QNX4FS_FS QNX 4 and QNX 6 (the latter is also called QNX RTP). Further information is available at <http://www.qnx.com/>. Say Y if you intend to mount QNX hard disks or floppies. - Unless you say Y to "QNX4FS read-write support" below, you will - only be able to read these file systems. To compile this file system support as a module, choose M here: the module will be called qnx4. If you don't know whether you need it, then you don't need it: answer N. - -config QNX4FS_RW - bool "QNX4FS write support (DANGEROUS)" - depends on QNX4FS_FS && EXPERIMENTAL && BROKEN - help - Say Y if you want to test write support for QNX4 file systems. - - It's currently broken, so for now: - answer N. diff --git a/fs/qnx4/Makefile b/fs/qnx4/Makefile index e4d408c..4a283b3 100644 --- a/fs/qnx4/Makefile +++ b/fs/qnx4/Makefile @@ -4,4 +4,4 @@ obj-$(CONFIG_QNX4FS_FS) += qnx4.o -qnx4-objs := inode.o dir.o namei.o file.o bitmap.o truncate.o +qnx4-objs := inode.o dir.o namei.o bitmap.o diff --git a/fs/qnx4/bitmap.c b/fs/qnx4/bitmap.c index e1cd061..0afba06 100644 --- a/fs/qnx4/bitmap.c +++ b/fs/qnx4/bitmap.c @@ -78,84 +78,3 @@ unsigned long qnx4_count_free_blocks(struct super_block *sb) return total_free; } - -#ifdef CONFIG_QNX4FS_RW - -int qnx4_is_free(struct super_block *sb, long block) -{ - int start = le32_to_cpu(qnx4_sb(sb)->BitMap->di_first_xtnt.xtnt_blk) - 1; - int size = le32_to_cpu(qnx4_sb(sb)->BitMap->di_size); - struct buffer_head *bh; - const char *g; - int ret = -EIO; - - start += block / (QNX4_BLOCK_SIZE * 8); - QNX4DEBUG(("qnx4: is_free requesting block [%lu], bitmap in block [%lu]\n", - (unsigned long) block, (unsigned long) start)); - (void) size; /* CHECKME */ - bh = sb_bread(sb, start); - if (bh == NULL) { - return -EIO; - } - g = bh->b_data + (block % QNX4_BLOCK_SIZE); - if (((*g) & (1 << (block % 8))) == 0) { - QNX4DEBUG(("qnx4: is_free -> block is free\n")); - ret = 1; - } else { - QNX4DEBUG(("qnx4: is_free -> block is busy\n")); - ret = 0; - } - brelse(bh); - - return ret; -} - -int qnx4_set_bitmap(struct super_block *sb, long block, int busy) -{ - int start = le32_to_cpu(qnx4_sb(sb)->BitMap->di_first_xtnt.xtnt_blk) - 1; - int size = le32_to_cpu(qnx4_sb(sb)->BitMap->di_size); - struct buffer_head *bh; - char *g; - - start += block / (QNX4_BLOCK_SIZE * 8); - QNX4DEBUG(("qnx4: set_bitmap requesting block [%lu], bitmap in block [%lu]\n", - (unsigned long) block, (unsigned long) start)); - (void) size; /* CHECKME */ - bh = sb_bread(sb, start); - if (bh == NULL) { - return -EIO; - } - g = bh->b_data + (block % QNX4_BLOCK_SIZE); - if (busy == 0) { - (*g) &= ~(1 << (block % 8)); - } else { - (*g) |= (1 << (block % 8)); - } - mark_buffer_dirty(bh); - brelse(bh); - - return 0; -} - -static void qnx4_clear_inode(struct inode *inode) -{ - struct qnx4_inode_entry *qnx4_ino = qnx4_raw_inode(inode); - /* What for? */ - memset(qnx4_ino->di_fname, 0, sizeof qnx4_ino->di_fname); - qnx4_ino->di_size = 0; - qnx4_ino->di_num_xtnts = 0; - qnx4_ino->di_mode = 0; - qnx4_ino->di_status = 0; -} - -void qnx4_free_inode(struct inode *inode) -{ - if (inode->i_ino < 1) { - printk("free_inode: inode 0 or nonexistent inode\n"); - return; - } - qnx4_clear_inode(inode); - clear_inode(inode); -} - -#endif diff --git a/fs/qnx4/dir.c b/fs/qnx4/dir.c index 003c68f..86cc39c 100644 --- a/fs/qnx4/dir.c +++ b/fs/qnx4/dir.c @@ -85,9 +85,4 @@ const struct file_operations qnx4_dir_operations = const struct inode_operations qnx4_dir_inode_operations = { .lookup = qnx4_lookup, -#ifdef CONFIG_QNX4FS_RW - .create = qnx4_create, - .unlink = qnx4_unlink, - .rmdir = qnx4_rmdir, -#endif }; diff --git a/fs/qnx4/file.c b/fs/qnx4/file.c deleted file mode 100644 index 09b170a..0000000 --- a/fs/qnx4/file.c +++ /dev/null @@ -1,40 +0,0 @@ -/* - * QNX4 file system, Linux implementation. - * - * Version : 0.2.1 - * - * Using parts of the xiafs filesystem. - * - * History : - * - * 25-05-1998 by Richard Frowijn : first release. - * 21-06-1998 by Frank Denis : wrote qnx4_readpage to use generic_file_read. - * 27-06-1998 by Frank Denis : file overwriting. - */ - -#include "qnx4.h" - -/* - * We have mostly NULL's here: the current defaults are ok for - * the qnx4 filesystem. - */ -const struct file_operations qnx4_file_operations = -{ - .llseek = generic_file_llseek, - .read = do_sync_read, - .aio_read = generic_file_aio_read, - .mmap = generic_file_mmap, - .splice_read = generic_file_splice_read, -#ifdef CONFIG_QNX4FS_RW - .write = do_sync_write, - .aio_write = generic_file_aio_write, - .fsync = simple_fsync, -#endif -}; - -const struct inode_operations qnx4_file_inode_operations = -{ -#ifdef CONFIG_QNX4FS_RW - .truncate = qnx4_truncate, -#endif -}; diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c index 681df5f..d2cd179 100644 --- a/fs/qnx4/inode.c +++ b/fs/qnx4/inode.c @@ -28,73 +28,6 @@ static const struct super_operations qnx4_sops; -#ifdef CONFIG_QNX4FS_RW - -static void qnx4_delete_inode(struct inode *inode) -{ - QNX4DEBUG(("qnx4: deleting inode [%lu]\n", (unsigned long) inode->i_ino)); - truncate_inode_pages(&inode->i_data, 0); - inode->i_size = 0; - qnx4_truncate(inode); - lock_kernel(); - qnx4_free_inode(inode); - unlock_kernel(); -} - -static int qnx4_write_inode(struct inode *inode, int do_sync) -{ - struct qnx4_inode_entry *raw_inode; - int block, ino; - struct buffer_head *bh; - ino = inode->i_ino; - - QNX4DEBUG(("qnx4: write inode 1.\n")); - if (inode->i_nlink == 0) { - return 0; - } - if (!ino) { - printk("qnx4: bad inode number on dev %s: %d is out of range\n", - inode->i_sb->s_id, ino); - return -EIO; - } - QNX4DEBUG(("qnx4: write inode 2.\n")); - block = ino / QNX4_INODES_PER_BLOCK; - lock_kernel(); - if (!(bh = sb_bread(inode->i_sb, block))) { - printk("qnx4: major problem: unable to read inode from dev " - "%s\n", inode->i_sb->s_id); - unlock_kernel(); - return -EIO; - } - raw_inode = ((struct qnx4_inode_entry *) bh->b_data) + - (ino % QNX4_INODES_PER_BLOCK); - raw_inode->di_mode = cpu_to_le16(inode->i_mode); - raw_inode->di_uid = cpu_to_le16(fs_high2lowuid(inode->i_uid)); - raw_inode->di_gid = cpu_to_le16(fs_high2lowgid(inode->i_gid)); - raw_inode->di_nlink = cpu_to_le16(inode->i_nlink); - raw_inode->di_size = cpu_to_le32(inode->i_size); - raw_inode->di_mtime = cpu_to_le32(inode->i_mtime.tv_sec); - raw_inode->di_atime = cpu_to_le32(inode->i_atime.tv_sec); - raw_inode->di_ctime = cpu_to_le32(inode->i_ctime.tv_sec); - raw_inode->di_first_xtnt.xtnt_size = cpu_to_le32(inode->i_blocks); - mark_buffer_dirty(bh); - if (do_sync) { - sync_dirty_buffer(bh); - if (buffer_req(bh) && !buffer_uptodate(bh)) { - printk("qnx4: IO error syncing inode [%s:%08x]\n", - inode->i_sb->s_id, ino); - brelse(bh); - unlock_kernel(); - return -EIO; - } - } - brelse(bh); - unlock_kernel(); - return 0; -} - -#endif - static void qnx4_put_super(struct super_block *sb); static struct inode *qnx4_alloc_inode(struct super_block *sb); static void qnx4_destroy_inode(struct inode *inode); @@ -108,10 +41,6 @@ static const struct super_operations qnx4_sops = .put_super = qnx4_put_super, .statfs = qnx4_statfs, .remount_fs = qnx4_remount, -#ifdef CONFIG_QNX4FS_RW - .write_inode = qnx4_write_inode, - .delete_inode = qnx4_delete_inode, -#endif }; static int qnx4_remount(struct super_block *sb, int *flags, char *data) @@ -120,15 +49,7 @@ static int qnx4_remount(struct super_block *sb, int *flags, char *data) qs = qnx4_sb(sb); qs->Version = QNX4_VERSION; -#ifndef CONFIG_QNX4FS_RW *flags |= MS_RDONLY; -#endif - if (*flags & MS_RDONLY) { - return 0; - } - - mark_buffer_dirty(qs->sb_buf); - return 0; } @@ -354,9 +275,7 @@ static int qnx4_fill_super(struct super_block *s, void *data, int silent) } s->s_op = &qnx4_sops; s->s_magic = QNX4_SUPER_MAGIC; -#ifndef CONFIG_QNX4FS_RW s->s_flags |= MS_RDONLY; /* Yup, read-only yet */ -#endif qnx4_sb(s)->sb_buf = bh; qnx4_sb(s)->sb = (struct qnx4_super_block *) bh->b_data; @@ -489,8 +408,7 @@ struct inode *qnx4_iget(struct super_block *sb, unsigned long ino) memcpy(qnx4_inode, raw_inode, QNX4_DIR_ENTRY_SIZE); if (S_ISREG(inode->i_mode)) { - inode->i_op = &qnx4_file_inode_operations; - inode->i_fop = &qnx4_file_operations; + inode->i_fop = &generic_ro_fops; inode->i_mapping->a_ops = &qnx4_aops; qnx4_i(inode)->mmu_private = inode->i_size; } else if (S_ISDIR(inode->i_mode)) { diff --git a/fs/qnx4/namei.c b/fs/qnx4/namei.c index 5972ed2..ae1e7ed 100644 --- a/fs/qnx4/namei.c +++ b/fs/qnx4/namei.c @@ -134,108 +134,3 @@ out: return NULL; } - -#ifdef CONFIG_QNX4FS_RW -int qnx4_create(struct inode *dir, struct dentry *dentry, int mode, - struct nameidata *nd) -{ - QNX4DEBUG(("qnx4: qnx4_create\n")); - if (dir == NULL) { - return -ENOENT; - } - return -ENOSPC; -} - -int qnx4_rmdir(struct inode *dir, struct dentry *dentry) -{ - struct buffer_head *bh; - struct qnx4_inode_entry *de; - struct inode *inode; - int retval; - int ino; - - QNX4DEBUG(("qnx4: qnx4_rmdir [%s]\n", dentry->d_name.name)); - lock_kernel(); - bh = qnx4_find_entry(dentry->d_name.len, dir, dentry->d_name.name, - &de, &ino); - if (bh == NULL) { - unlock_kernel(); - return -ENOENT; - } - inode = dentry->d_inode; - if (inode->i_ino != ino) { - retval = -EIO; - goto end_rmdir; - } -#if 0 - if (!empty_dir(inode)) { - retval = -ENOTEMPTY; - goto end_rmdir; - } -#endif - if (inode->i_nlink != 2) { - QNX4DEBUG(("empty directory has nlink!=2 (%d)\n", inode->i_nlink)); - } - QNX4DEBUG(("qnx4: deleting directory\n")); - de->di_status = 0; - memset(de->di_fname, 0, sizeof de->di_fname); - de->di_mode = 0; - mark_buffer_dirty_inode(bh, dir); - clear_nlink(inode); - mark_inode_dirty(inode); - inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC; - inode_dec_link_count(dir); - retval = 0; - - end_rmdir: - brelse(bh); - - unlock_kernel(); - return retval; -} - -int qnx4_unlink(struct inode *dir, struct dentry *dentry) -{ - struct buffer_head *bh; - struct qnx4_inode_entry *de; - struct inode *inode; - int retval; - int ino; - - QNX4DEBUG(("qnx4: qnx4_unlink [%s]\n", dentry->d_name.name)); - lock_kernel(); - bh = qnx4_find_entry(dentry->d_name.len, dir, dentry->d_name.name, - &de, &ino); - if (bh == NULL) { - unlock_kernel(); - return -ENOENT; - } - inode = dentry->d_inode; - if (inode->i_ino != ino) { - retval = -EIO; - goto end_unlink; - } - retval = -EPERM; - if (!inode->i_nlink) { - QNX4DEBUG(("Deleting nonexistent file (%s:%lu), %d\n", - inode->i_sb->s_id, - inode->i_ino, inode->i_nlink)); - inode->i_nlink = 1; - } - de->di_status = 0; - memset(de->di_fname, 0, sizeof de->di_fname); - de->di_mode = 0; - mark_buffer_dirty_inode(bh, dir); - dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC; - mark_inode_dirty(dir); - inode->i_ctime = dir->i_ctime; - inode_dec_link_count(inode); - retval = 0; - -end_unlink: - unlock_kernel(); - brelse(bh); - - return retval; -} -#endif diff --git a/fs/qnx4/qnx4.h b/fs/qnx4/qnx4.h index 9efc089..33a6085 100644 --- a/fs/qnx4/qnx4.h +++ b/fs/qnx4/qnx4.h @@ -29,17 +29,9 @@ extern unsigned long qnx4_block_map(struct inode *inode, long iblock); extern struct buffer_head *qnx4_bread(struct inode *, int, int); -extern const struct inode_operations qnx4_file_inode_operations; extern const struct inode_operations qnx4_dir_inode_operations; -extern const struct file_operations qnx4_file_operations; extern const struct file_operations qnx4_dir_operations; extern int qnx4_is_free(struct super_block *sb, long block); -extern int qnx4_set_bitmap(struct super_block *sb, long block, int busy); -extern int qnx4_create(struct inode *inode, struct dentry *dentry, int mode, struct nameidata *nd); -extern void qnx4_truncate(struct inode *inode); -extern void qnx4_free_inode(struct inode *inode); -extern int qnx4_unlink(struct inode *dir, struct dentry *dentry); -extern int qnx4_rmdir(struct inode *dir, struct dentry *dentry); static inline struct qnx4_sb_info *qnx4_sb(struct super_block *sb) { diff --git a/fs/qnx4/truncate.c b/fs/qnx4/truncate.c deleted file mode 100644 index d94d9ee..0000000 --- a/fs/qnx4/truncate.c +++ /dev/null @@ -1,34 +0,0 @@ -/* - * QNX4 file system, Linux implementation. - * - * Version : 0.1 - * - * Using parts of the xiafs filesystem. - * - * History : - * - * 30-06-1998 by Frank DENIS : ugly filler. - */ - -#include <linux/smp_lock.h> -#include "qnx4.h" - -#ifdef CONFIG_QNX4FS_RW - -void qnx4_truncate(struct inode *inode) -{ - if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || - S_ISLNK(inode->i_mode))) { - return; - } - lock_kernel(); - if (!(S_ISDIR(inode->i_mode))) { - /* TODO */ - } - QNX4DEBUG(("qnx4: qnx4_truncate called\n")); - inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; - mark_inode_dirty(inode); - unlock_kernel(); -} - -#endif diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 38f7bd5..39b49c4 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -1839,7 +1839,7 @@ EXPORT_SYMBOL(dquot_commit_info); /* * Definitions of diskquota operations. */ -struct dquot_operations dquot_operations = { +const struct dquot_operations dquot_operations = { .initialize = dquot_initialize, .drop = dquot_drop, .alloc_space = dquot_alloc_space, @@ -2461,7 +2461,7 @@ out: } EXPORT_SYMBOL(vfs_set_dqinfo); -struct quotactl_ops vfs_quotactl_ops = { +const struct quotactl_ops vfs_quotactl_ops = { .quota_on = vfs_quota_on, .quota_off = vfs_quota_off, .quota_sync = vfs_quota_sync, diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c index a7f0110..a6090aa 100644 --- a/fs/ramfs/inode.c +++ b/fs/ramfs/inode.c @@ -34,12 +34,10 @@ #include <linux/ramfs.h> #include <linux/sched.h> #include <linux/parser.h> +#include <linux/magic.h> #include <asm/uaccess.h> #include "internal.h" -/* some random number */ -#define RAMFS_MAGIC 0x858458f6 - #define RAMFS_DEFAULT_MODE 0755 static const struct super_operations ramfs_ops; diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index 7adea74..f0ad05f 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -612,7 +612,7 @@ static int reiserfs_mark_dquot_dirty(struct dquot *); static int reiserfs_write_info(struct super_block *, int); static int reiserfs_quota_on(struct super_block *, int, int, char *, int); -static struct dquot_operations reiserfs_quota_operations = { +static const struct dquot_operations reiserfs_quota_operations = { .initialize = dquot_initialize, .drop = dquot_drop, .alloc_space = dquot_alloc_space, @@ -629,7 +629,7 @@ static struct dquot_operations reiserfs_quota_operations = { .destroy_dquot = dquot_destroy, }; -static struct quotactl_ops reiserfs_qctl_operations = { +static const struct quotactl_ops reiserfs_qctl_operations = { .quota_on = reiserfs_quota_on, .quota_off = vfs_quota_off, .quota_sync = vfs_quota_sync, diff --git a/fs/romfs/super.c b/fs/romfs/super.c index 4ab3c03..c117fa8 100644 --- a/fs/romfs/super.c +++ b/fs/romfs/super.c @@ -284,7 +284,7 @@ static const struct file_operations romfs_dir_operations = { .readdir = romfs_readdir, }; -static struct inode_operations romfs_dir_inode_operations = { +static const struct inode_operations romfs_dir_inode_operations = { .lookup = romfs_lookup, }; @@ -528,7 +528,7 @@ static int romfs_fill_super(struct super_block *sb, void *data, int silent) pos = (ROMFH_SIZE + len + 1 + ROMFH_PAD) & ROMFH_MASK; root = romfs_iget(sb, pos); - if (!root) + if (IS_ERR(root)) goto error; sb->s_root = d_alloc_root(root); diff --git a/fs/select.c b/fs/select.c index 8084834..a201fc3 100644 --- a/fs/select.c +++ b/fs/select.c @@ -41,22 +41,28 @@ * better solutions.. */ +#define MAX_SLACK (100 * NSEC_PER_MSEC) + static long __estimate_accuracy(struct timespec *tv) { long slack; int divfactor = 1000; + if (tv->tv_sec < 0) + return 0; + if (task_nice(current) > 0) divfactor = divfactor / 5; + if (tv->tv_sec > MAX_SLACK / (NSEC_PER_SEC/divfactor)) + return MAX_SLACK; + slack = tv->tv_nsec / divfactor; slack += tv->tv_sec * (NSEC_PER_SEC/divfactor); - if (slack > 100 * NSEC_PER_MSEC) - slack = 100 * NSEC_PER_MSEC; + if (slack > MAX_SLACK) + return MAX_SLACK; - if (slack < 0) - slack = 0; return slack; } diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c index 9468168..71c29b6 100644 --- a/fs/smbfs/proc.c +++ b/fs/smbfs/proc.c @@ -509,7 +509,7 @@ date_unix2dos(struct smb_sb_info *server, month = 2; } else { nl_day = (year & 3) || day <= 59 ? day : day - 1; - for (month = 0; month < 12; month++) + for (month = 1; month < 12; month++) if (day_n[month] > nl_day) break; } diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c index cb5fc57..6c197ef 100644 --- a/fs/squashfs/super.c +++ b/fs/squashfs/super.c @@ -44,7 +44,7 @@ #include "squashfs.h" static struct file_system_type squashfs_fs_type; -static struct super_operations squashfs_super_ops; +static const struct super_operations squashfs_super_ops; static int supported_squashfs_filesystem(short major, short minor, short comp) { @@ -444,7 +444,7 @@ static struct file_system_type squashfs_fs_type = { .fs_flags = FS_REQUIRES_DEV }; -static struct super_operations squashfs_super_ops = { +static const struct super_operations squashfs_super_ops = { .alloc_inode = squashfs_alloc_inode, .destroy_inode = squashfs_destroy_inode, .statfs = squashfs_statfs, @@ -54,7 +54,7 @@ DEFINE_SPINLOCK(sb_lock); static struct super_block *alloc_super(struct file_system_type *type) { struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER); - static struct super_operations default_op; + static const struct super_operations default_op; if (s) { if (security_sb_alloc(s)) { @@ -707,6 +707,12 @@ static int set_bdev_super(struct super_block *s, void *data) { s->s_bdev = data; s->s_dev = s->s_bdev->bd_dev; + + /* + * We set the bdi here to the queue backing, file systems can + * overwrite this in ->fill_super() + */ + s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info; return 0; } @@ -27,6 +27,13 @@ */ static int __sync_filesystem(struct super_block *sb, int wait) { + /* + * This should be safe, as we require bdi backing to actually + * write out data in the first place + */ + if (!sb->s_bdi) + return 0; + /* Avoid doing twice syncing and cache pruning for quota sync */ if (!wait) { writeout_quota_sb(sb, -1); @@ -101,7 +108,7 @@ restart: spin_unlock(&sb_lock); down_read(&sb->s_umount); - if (!(sb->s_flags & MS_RDONLY) && sb->s_root) + if (!(sb->s_flags & MS_RDONLY) && sb->s_root && sb->s_bdi) __sync_filesystem(sb, wait); up_read(&sb->s_umount); @@ -176,6 +183,7 @@ int file_fsync(struct file *filp, struct dentry *dentry, int datasync) ret = err; return ret; } +EXPORT_SYMBOL(file_fsync); /** * vfs_fsync_range - helper to sync a range of data & metadata to disk diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c index 1c8991b..076ca50 100644 --- a/fs/ubifs/budget.c +++ b/fs/ubifs/budget.c @@ -54,29 +54,15 @@ * @nr_to_write: how many dirty pages to write-back * * This function shrinks UBIFS liability by means of writing back some amount - * of dirty inodes and their pages. Returns the amount of pages which were - * written back. The returned value does not include dirty inodes which were - * synchronized. + * of dirty inodes and their pages. * * Note, this function synchronizes even VFS inodes which are locked * (@i_mutex) by the caller of the budgeting function, because write-back does * not touch @i_mutex. */ -static int shrink_liability(struct ubifs_info *c, int nr_to_write) +static void shrink_liability(struct ubifs_info *c, int nr_to_write) { - int nr_written; - - nr_written = writeback_inodes_sb(c->vfs_sb); - if (!nr_written) { - /* - * Re-try again but wait on pages/inodes which are being - * written-back concurrently (e.g., by pdflush). - */ - nr_written = sync_inodes_sb(c->vfs_sb); - } - - dbg_budg("%d pages were written back", nr_written); - return nr_written; + writeback_inodes_sb(c->vfs_sb); } /** @@ -729,7 +715,7 @@ long long ubifs_get_free_space_nolock(struct ubifs_info *c) * ubifs_get_free_space - return amount of free space. * @c: UBIFS file-system description object * - * This function calculates and retuns amount of free space to report to + * This function calculates and returns amount of free space to report to * user-space. */ long long ubifs_get_free_space(struct ubifs_info *c) diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c index f3a7945..4775af4 100644 --- a/fs/ubifs/commit.c +++ b/fs/ubifs/commit.c @@ -510,7 +510,7 @@ int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot) int lnum, offs, len, err = 0, uninitialized_var(last_level), child_cnt; int first = 1, iip; struct ubifs_debug_info *d = c->dbg; - union ubifs_key lower_key, upper_key, l_key, u_key; + union ubifs_key uninitialized_var(lower_key), upper_key, l_key, u_key; unsigned long long uninitialized_var(last_sqnum); struct ubifs_idx_node *idx; struct list_head list; diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index ce2cd83..dbc093a 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -210,6 +210,20 @@ const char *dbg_cstate(int cmt_state) } } +const char *dbg_jhead(int jhead) +{ + switch (jhead) { + case GCHD: + return "0 (GC)"; + case BASEHD: + return "1 (base)"; + case DATAHD: + return "2 (data)"; + default: + return "unknown journal head"; + } +} + static void dump_ch(const struct ubifs_ch *ch) { printk(KERN_DEBUG "\tmagic %#x\n", le32_to_cpu(ch->magic)); @@ -623,8 +637,9 @@ void dbg_dump_budg(struct ubifs_info *c) /* If we are in R/O mode, journal heads do not exist */ if (c->jheads) for (i = 0; i < c->jhead_cnt; i++) - printk(KERN_DEBUG "\tjhead %d\t LEB %d\n", - c->jheads[i].wbuf.jhead, c->jheads[i].wbuf.lnum); + printk(KERN_DEBUG "\tjhead %s\t LEB %d\n", + dbg_jhead(c->jheads[i].wbuf.jhead), + c->jheads[i].wbuf.lnum); for (rb = rb_first(&c->buds); rb; rb = rb_next(rb)) { bud = rb_entry(rb, struct ubifs_bud, rb); printk(KERN_DEBUG "\tbud LEB %d\n", bud->lnum); @@ -648,9 +663,90 @@ void dbg_dump_budg(struct ubifs_info *c) void dbg_dump_lprop(const struct ubifs_info *c, const struct ubifs_lprops *lp) { - printk(KERN_DEBUG "LEB %d lprops: free %d, dirty %d (used %d), " - "flags %#x\n", lp->lnum, lp->free, lp->dirty, - c->leb_size - lp->free - lp->dirty, lp->flags); + int i, spc, dark = 0, dead = 0; + struct rb_node *rb; + struct ubifs_bud *bud; + + spc = lp->free + lp->dirty; + if (spc < c->dead_wm) + dead = spc; + else + dark = ubifs_calc_dark(c, spc); + + if (lp->flags & LPROPS_INDEX) + printk(KERN_DEBUG "LEB %-7d free %-8d dirty %-8d used %-8d " + "free + dirty %-8d flags %#x (", lp->lnum, lp->free, + lp->dirty, c->leb_size - spc, spc, lp->flags); + else + printk(KERN_DEBUG "LEB %-7d free %-8d dirty %-8d used %-8d " + "free + dirty %-8d dark %-4d dead %-4d nodes fit %-3d " + "flags %#-4x (", lp->lnum, lp->free, lp->dirty, + c->leb_size - spc, spc, dark, dead, + (int)(spc / UBIFS_MAX_NODE_SZ), lp->flags); + + if (lp->flags & LPROPS_TAKEN) { + if (lp->flags & LPROPS_INDEX) + printk(KERN_CONT "index, taken"); + else + printk(KERN_CONT "taken"); + } else { + const char *s; + + if (lp->flags & LPROPS_INDEX) { + switch (lp->flags & LPROPS_CAT_MASK) { + case LPROPS_DIRTY_IDX: + s = "dirty index"; + break; + case LPROPS_FRDI_IDX: + s = "freeable index"; + break; + default: + s = "index"; + } + } else { + switch (lp->flags & LPROPS_CAT_MASK) { + case LPROPS_UNCAT: + s = "not categorized"; + break; + case LPROPS_DIRTY: + s = "dirty"; + break; + case LPROPS_FREE: + s = "free"; + break; + case LPROPS_EMPTY: + s = "empty"; + break; + case LPROPS_FREEABLE: + s = "freeable"; + break; + default: + s = NULL; + break; + } + } + printk(KERN_CONT "%s", s); + } + + for (rb = rb_first((struct rb_root *)&c->buds); rb; rb = rb_next(rb)) { + bud = rb_entry(rb, struct ubifs_bud, rb); + if (bud->lnum == lp->lnum) { + int head = 0; + for (i = 0; i < c->jhead_cnt; i++) { + if (lp->lnum == c->jheads[i].wbuf.lnum) { + printk(KERN_CONT ", jhead %s", + dbg_jhead(i)); + head = 1; + } + } + if (!head) + printk(KERN_CONT ", bud of jhead %s", + dbg_jhead(bud->jhead)); + } + } + if (lp->lnum == c->gc_lnum) + printk(KERN_CONT ", GC LEB"); + printk(KERN_CONT ")\n"); } void dbg_dump_lprops(struct ubifs_info *c) @@ -724,7 +820,7 @@ void dbg_dump_leb(const struct ubifs_info *c, int lnum) printk(KERN_DEBUG "(pid %d) start dumping LEB %d\n", current->pid, lnum); - sleb = ubifs_scan(c, lnum, 0, c->dbg->buf); + sleb = ubifs_scan(c, lnum, 0, c->dbg->buf, 0); if (IS_ERR(sleb)) { ubifs_err("scan error %d", (int)PTR_ERR(sleb)); return; @@ -909,8 +1005,10 @@ out: ubifs_msg("saved lprops statistics dump"); dbg_dump_lstats(&d->saved_lst); ubifs_get_lp_stats(c, &lst); + ubifs_msg("current lprops statistics dump"); - dbg_dump_lstats(&d->saved_lst); + dbg_dump_lstats(&lst); + spin_lock(&c->space_lock); dbg_dump_budg(c); spin_unlock(&c->space_lock); diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h index c1cd73b..29d9601 100644 --- a/fs/ubifs/debug.h +++ b/fs/ubifs/debug.h @@ -271,6 +271,7 @@ void ubifs_debugging_exit(struct ubifs_info *c); /* Dump functions */ const char *dbg_ntype(int type); const char *dbg_cstate(int cmt_state); +const char *dbg_jhead(int jhead); const char *dbg_get_key_dump(const struct ubifs_info *c, const union ubifs_key *key); void dbg_dump_inode(const struct ubifs_info *c, const struct inode *inode); @@ -321,6 +322,8 @@ void dbg_check_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat, int dbg_check_lprops(struct ubifs_info *c); int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode, int row, int col); +int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode, + loff_t size); /* Force the use of in-the-gaps method for testing */ @@ -425,6 +428,7 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c); #define dbg_ntype(type) "" #define dbg_cstate(cmt_state) "" +#define dbg_jhead(jhead) "" #define dbg_get_key_dump(c, key) ({}) #define dbg_dump_inode(c, inode) ({}) #define dbg_dump_node(c, node) ({}) @@ -460,6 +464,7 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c); #define dbg_check_heap(c, heap, cat, add_pos) ({}) #define dbg_check_lprops(c) 0 #define dbg_check_lpt_nodes(c, cnode, row, col) 0 +#define dbg_check_inode_size(c, inode, size) 0 #define dbg_force_in_the_gaps_enabled 0 #define dbg_force_in_the_gaps() 0 #define dbg_failure_mode 0 diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 6d34dc7..2e6481a 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -21,34 +21,32 @@ */ /* - * This file implements VFS file and inode operations of regular files, device + * This file implements VFS file and inode operations for regular files, device * nodes and symlinks as well as address space operations. * - * UBIFS uses 2 page flags: PG_private and PG_checked. PG_private is set if the - * page is dirty and is used for budgeting purposes - dirty pages should not be - * budgeted. The PG_checked flag is set if full budgeting is required for the - * page e.g., when it corresponds to a file hole or it is just beyond the file - * size. The budgeting is done in 'ubifs_write_begin()', because it is OK to - * fail in this function, and the budget is released in 'ubifs_write_end()'. So - * the PG_private and PG_checked flags carry the information about how the page - * was budgeted, to make it possible to release the budget properly. + * UBIFS uses 2 page flags: @PG_private and @PG_checked. @PG_private is set if + * the page is dirty and is used for optimization purposes - dirty pages are + * not budgeted so the flag shows that 'ubifs_write_end()' should not release + * the budget for this page. The @PG_checked flag is set if full budgeting is + * required for the page e.g., when it corresponds to a file hole or it is + * beyond the file size. The budgeting is done in 'ubifs_write_begin()', because + * it is OK to fail in this function, and the budget is released in + * 'ubifs_write_end()'. So the @PG_private and @PG_checked flags carry + * information about how the page was budgeted, to make it possible to release + * the budget properly. * - * A thing to keep in mind: inode's 'i_mutex' is locked in most VFS operations - * we implement. However, this is not true for '->writepage()', which might be - * called with 'i_mutex' unlocked. For example, when pdflush is performing - * write-back, it calls 'writepage()' with unlocked 'i_mutex', although the - * inode has 'I_LOCK' flag in this case. At "normal" work-paths 'i_mutex' is - * locked in '->writepage', e.g. in "sys_write -> alloc_pages -> direct reclaim - * path'. So, in '->writepage()' we are only guaranteed that the page is - * locked. + * A thing to keep in mind: inode @i_mutex is locked in most VFS operations we + * implement. However, this is not true for 'ubifs_writepage()', which may be + * called with @i_mutex unlocked. For example, when pdflush is doing background + * write-back, it calls 'ubifs_writepage()' with unlocked @i_mutex. At "normal" + * work-paths the @i_mutex is locked in 'ubifs_writepage()', e.g. in the + * "sys_write -> alloc_pages -> direct reclaim path". So, in 'ubifs_writepage()' + * we are only guaranteed that the page is locked. * - * Similarly, 'i_mutex' does not have to be locked in readpage(), e.g., - * readahead path does not have it locked ("sys_read -> generic_file_aio_read - * -> ondemand_readahead -> readpage"). In case of readahead, 'I_LOCK' flag is - * not set as well. However, UBIFS disables readahead. - * - * This, for example means that there might be 2 concurrent '->writepage()' - * calls for the same inode, but different inode dirty pages. + * Similarly, @i_mutex is not always locked in 'ubifs_readpage()', e.g., the + * read-ahead path does not lock it ("sys_read -> generic_file_aio_read -> + * ondemand_readahead -> readpage"). In case of readahead, @I_LOCK flag is not + * set as well. However, UBIFS disables readahead. */ #include "ubifs.h" @@ -449,9 +447,9 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping, /* * We change whole page so no need to load it. But we * have to set the @PG_checked flag to make the further - * code the page is new. This might be not true, but it - * is better to budget more that to read the page from - * the media. + * code know that the page is new. This might be not + * true, but it is better to budget more than to read + * the page from the media. */ SetPageChecked(page); skipped_read = 1; @@ -497,8 +495,8 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping, } /* - * Whee, we aquired budgeting quickly - without involving - * garbage-collection, committing or forceing write-back. We return + * Whee, we acquired budgeting quickly - without involving + * garbage-collection, committing or forcing write-back. We return * with @ui->ui_mutex locked if we are appending pages, and unlocked * otherwise. This is an optimization (slightly hacky though). */ @@ -562,7 +560,7 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping, /* * Return 0 to force VFS to repeat the whole operation, or the - * error code if 'do_readpage()' failes. + * error code if 'do_readpage()' fails. */ copied = do_readpage(page); goto out; @@ -1175,11 +1173,11 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode, ui->ui_size = inode->i_size; /* Truncation changes inode [mc]time */ inode->i_mtime = inode->i_ctime = ubifs_current_time(inode); - /* The other attributes may be changed at the same time as well */ + /* Other attributes may be changed at the same time as well */ do_attr_changes(inode, attr); - err = ubifs_jnl_truncate(c, inode, old_size, new_size); mutex_unlock(&ui->ui_mutex); + out_budg: if (budgeted) ubifs_release_budget(c, &req); diff --git a/fs/ubifs/gc.c b/fs/ubifs/gc.c index f0f5f15..618c270 100644 --- a/fs/ubifs/gc.c +++ b/fs/ubifs/gc.c @@ -529,7 +529,7 @@ int ubifs_garbage_collect_leb(struct ubifs_info *c, struct ubifs_lprops *lp) * We scan the entire LEB even though we only really need to scan up to * (c->leb_size - lp->free). */ - sleb = ubifs_scan(c, lnum, 0, c->sbuf); + sleb = ubifs_scan(c, lnum, 0, c->sbuf, 0); if (IS_ERR(sleb)) return PTR_ERR(sleb); diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c index 762a7d6..e589fed 100644 --- a/fs/ubifs/io.c +++ b/fs/ubifs/io.c @@ -297,7 +297,7 @@ static enum hrtimer_restart wbuf_timer_callback_nolock(struct hrtimer *timer) { struct ubifs_wbuf *wbuf = container_of(timer, struct ubifs_wbuf, timer); - dbg_io("jhead %d", wbuf->jhead); + dbg_io("jhead %s", dbg_jhead(wbuf->jhead)); wbuf->need_sync = 1; wbuf->c->need_wbuf_sync = 1; ubifs_wake_up_bgt(wbuf->c); @@ -314,7 +314,8 @@ static void new_wbuf_timer_nolock(struct ubifs_wbuf *wbuf) if (wbuf->no_timer) return; - dbg_io("set timer for jhead %d, %llu-%llu millisecs", wbuf->jhead, + dbg_io("set timer for jhead %s, %llu-%llu millisecs", + dbg_jhead(wbuf->jhead), div_u64(ktime_to_ns(wbuf->softlimit), USEC_PER_SEC), div_u64(ktime_to_ns(wbuf->softlimit) + wbuf->delta, USEC_PER_SEC)); @@ -351,8 +352,8 @@ int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf) /* Write-buffer is empty or not seeked */ return 0; - dbg_io("LEB %d:%d, %d bytes, jhead %d", - wbuf->lnum, wbuf->offs, wbuf->used, wbuf->jhead); + dbg_io("LEB %d:%d, %d bytes, jhead %s", + wbuf->lnum, wbuf->offs, wbuf->used, dbg_jhead(wbuf->jhead)); ubifs_assert(!(c->vfs_sb->s_flags & MS_RDONLY)); ubifs_assert(!(wbuf->avail & 7)); ubifs_assert(wbuf->offs + c->min_io_size <= c->leb_size); @@ -401,7 +402,7 @@ int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs, { const struct ubifs_info *c = wbuf->c; - dbg_io("LEB %d:%d, jhead %d", lnum, offs, wbuf->jhead); + dbg_io("LEB %d:%d, jhead %s", lnum, offs, dbg_jhead(wbuf->jhead)); ubifs_assert(lnum >= 0 && lnum < c->leb_cnt); ubifs_assert(offs >= 0 && offs <= c->leb_size); ubifs_assert(offs % c->min_io_size == 0 && !(offs & 7)); @@ -508,9 +509,9 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) struct ubifs_info *c = wbuf->c; int err, written, n, aligned_len = ALIGN(len, 8), offs; - dbg_io("%d bytes (%s) to jhead %d wbuf at LEB %d:%d", len, - dbg_ntype(((struct ubifs_ch *)buf)->node_type), wbuf->jhead, - wbuf->lnum, wbuf->offs + wbuf->used); + dbg_io("%d bytes (%s) to jhead %s wbuf at LEB %d:%d", len, + dbg_ntype(((struct ubifs_ch *)buf)->node_type), + dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs + wbuf->used); ubifs_assert(len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt); ubifs_assert(wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0); ubifs_assert(!(wbuf->offs & 7) && wbuf->offs <= c->leb_size); @@ -535,8 +536,8 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) memcpy(wbuf->buf + wbuf->used, buf, len); if (aligned_len == wbuf->avail) { - dbg_io("flush jhead %d wbuf to LEB %d:%d", - wbuf->jhead, wbuf->lnum, wbuf->offs); + dbg_io("flush jhead %s wbuf to LEB %d:%d", + dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs); err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, wbuf->offs, c->min_io_size, wbuf->dtype); @@ -564,8 +565,8 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) * minimal I/O unit. We have to fill and flush write-buffer and switch * to the next min. I/O unit. */ - dbg_io("flush jhead %d wbuf to LEB %d:%d", - wbuf->jhead, wbuf->lnum, wbuf->offs); + dbg_io("flush jhead %s wbuf to LEB %d:%d", + dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs); memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail); err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, wbuf->offs, c->min_io_size, wbuf->dtype); @@ -698,8 +699,8 @@ int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len, int err, rlen, overlap; struct ubifs_ch *ch = buf; - dbg_io("LEB %d:%d, %s, length %d, jhead %d", lnum, offs, - dbg_ntype(type), len, wbuf->jhead); + dbg_io("LEB %d:%d, %s, length %d, jhead %s", lnum, offs, + dbg_ntype(type), len, dbg_jhead(wbuf->jhead)); ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0); ubifs_assert(!(offs & 7) && offs < c->leb_size); ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT); diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c index 64b5f3a..d321bae 100644 --- a/fs/ubifs/journal.c +++ b/fs/ubifs/journal.c @@ -158,7 +158,7 @@ again: * some. But the write-buffer mutex has to be unlocked because * GC also takes it. */ - dbg_jnl("no free space jhead %d, run GC", jhead); + dbg_jnl("no free space in jhead %s, run GC", dbg_jhead(jhead)); mutex_unlock(&wbuf->io_mutex); lnum = ubifs_garbage_collect(c, 0); @@ -173,7 +173,8 @@ again: * because we dropped @wbuf->io_mutex, so try once * again. */ - dbg_jnl("GC couldn't make a free LEB for jhead %d", jhead); + dbg_jnl("GC couldn't make a free LEB for jhead %s", + dbg_jhead(jhead)); if (retries++ < 2) { dbg_jnl("retry (%d)", retries); goto again; @@ -184,7 +185,7 @@ again: } mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead); - dbg_jnl("got LEB %d for jhead %d", lnum, jhead); + dbg_jnl("got LEB %d for jhead %s", lnum, dbg_jhead(jhead)); avail = c->leb_size - wbuf->offs - wbuf->used; if (wbuf->lnum != -1 && avail >= len) { @@ -255,7 +256,8 @@ static int write_node(struct ubifs_info *c, int jhead, void *node, int len, *lnum = c->jheads[jhead].wbuf.lnum; *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used; - dbg_jnl("jhead %d, LEB %d:%d, len %d", jhead, *lnum, *offs, len); + dbg_jnl("jhead %s, LEB %d:%d, len %d", + dbg_jhead(jhead), *lnum, *offs, len); ubifs_prepare_node(c, node, len, 0); return ubifs_wbuf_write_nolock(wbuf, node, len); @@ -285,7 +287,8 @@ static int write_head(struct ubifs_info *c, int jhead, void *buf, int len, *lnum = c->jheads[jhead].wbuf.lnum; *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used; - dbg_jnl("jhead %d, LEB %d:%d, len %d", jhead, *lnum, *offs, len); + dbg_jnl("jhead %s, LEB %d:%d, len %d", + dbg_jhead(jhead), *lnum, *offs, len); err = ubifs_wbuf_write_nolock(wbuf, buf, len); if (err) diff --git a/fs/ubifs/key.h b/fs/ubifs/key.h index 5fa27ea..0f530c6 100644 --- a/fs/ubifs/key.h +++ b/fs/ubifs/key.h @@ -229,23 +229,6 @@ static inline void xent_key_init(const struct ubifs_info *c, } /** - * xent_key_init_hash - initialize extended attribute entry key without - * re-calculating hash function. - * @c: UBIFS file-system description object - * @key: key to initialize - * @inum: host inode number - * @hash: extended attribute entry name hash - */ -static inline void xent_key_init_hash(const struct ubifs_info *c, - union ubifs_key *key, ino_t inum, - uint32_t hash) -{ - ubifs_assert(!(hash & ~UBIFS_S_KEY_HASH_MASK)); - key->u32[0] = inum; - key->u32[1] = hash | (UBIFS_XENT_KEY << UBIFS_S_KEY_HASH_BITS); -} - -/** * xent_key_init_flash - initialize on-flash extended attribute entry key. * @c: UBIFS file-system description object * @k: key to initialize @@ -295,22 +278,15 @@ static inline void data_key_init(const struct ubifs_info *c, } /** - * data_key_init_flash - initialize on-flash data key. + * highest_data_key - get the highest possible data key for an inode. * @c: UBIFS file-system description object - * @k: key to initialize + * @key: key to initialize * @inum: inode number - * @block: block number */ -static inline void data_key_init_flash(const struct ubifs_info *c, void *k, - ino_t inum, unsigned int block) +static inline void highest_data_key(const struct ubifs_info *c, + union ubifs_key *key, ino_t inum) { - union ubifs_key *key = k; - - ubifs_assert(!(block & ~UBIFS_S_KEY_BLOCK_MASK)); - key->j32[0] = cpu_to_le32(inum); - key->j32[1] = cpu_to_le32(block | - (UBIFS_DATA_KEY << UBIFS_S_KEY_BLOCK_BITS)); - memset(k + 8, 0, UBIFS_MAX_KEY_LEN - 8); + data_key_init(c, key, inum, UBIFS_S_KEY_BLOCK_MASK); } /** @@ -554,4 +530,5 @@ static inline unsigned long long key_max_inode_size(const struct ubifs_info *c) return 0; } } + #endif /* !__UBIFS_KEY_H__ */ diff --git a/fs/ubifs/log.c b/fs/ubifs/log.c index 56e3377..c345e12 100644 --- a/fs/ubifs/log.c +++ b/fs/ubifs/log.c @@ -169,8 +169,8 @@ void ubifs_add_bud(struct ubifs_info *c, struct ubifs_bud *bud) */ c->bud_bytes += c->leb_size - bud->start; - dbg_log("LEB %d:%d, jhead %d, bud_bytes %lld", bud->lnum, - bud->start, bud->jhead, c->bud_bytes); + dbg_log("LEB %d:%d, jhead %s, bud_bytes %lld", bud->lnum, + bud->start, dbg_jhead(bud->jhead), c->bud_bytes); spin_unlock(&c->buds_lock); } @@ -355,16 +355,16 @@ static void remove_buds(struct ubifs_info *c) * heads (non-closed buds). */ c->cmt_bud_bytes += wbuf->offs - bud->start; - dbg_log("preserve %d:%d, jhead %d, bud bytes %d, " + dbg_log("preserve %d:%d, jhead %s, bud bytes %d, " "cmt_bud_bytes %lld", bud->lnum, bud->start, - bud->jhead, wbuf->offs - bud->start, + dbg_jhead(bud->jhead), wbuf->offs - bud->start, c->cmt_bud_bytes); bud->start = wbuf->offs; } else { c->cmt_bud_bytes += c->leb_size - bud->start; - dbg_log("remove %d:%d, jhead %d, bud bytes %d, " + dbg_log("remove %d:%d, jhead %s, bud bytes %d, " "cmt_bud_bytes %lld", bud->lnum, bud->start, - bud->jhead, c->leb_size - bud->start, + dbg_jhead(bud->jhead), c->leb_size - bud->start, c->cmt_bud_bytes); rb_erase(p1, &c->buds); /* @@ -429,7 +429,8 @@ int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum) if (lnum == -1 || offs == c->leb_size) continue; - dbg_log("add ref to LEB %d:%d for jhead %d", lnum, offs, i); + dbg_log("add ref to LEB %d:%d for jhead %s", + lnum, offs, dbg_jhead(i)); ref = buf + len; ref->ch.node_type = UBIFS_REF_NODE; ref->lnum = cpu_to_le32(lnum); @@ -695,7 +696,7 @@ int ubifs_consolidate_log(struct ubifs_info *c) lnum = c->ltail_lnum; write_lnum = lnum; while (1) { - sleb = ubifs_scan(c, lnum, 0, c->sbuf); + sleb = ubifs_scan(c, lnum, 0, c->sbuf, 0); if (IS_ERR(sleb)) { err = PTR_ERR(sleb); goto out_free; diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c index 4cdd284..4d4ca38 100644 --- a/fs/ubifs/lprops.c +++ b/fs/ubifs/lprops.c @@ -281,7 +281,7 @@ void ubifs_add_to_cat(struct ubifs_info *c, struct ubifs_lprops *lprops, case LPROPS_FREE: if (add_to_lpt_heap(c, lprops, cat)) break; - /* No more room on heap so make it uncategorized */ + /* No more room on heap so make it un-categorized */ cat = LPROPS_UNCAT; /* Fall through */ case LPROPS_UNCAT: @@ -375,8 +375,8 @@ void ubifs_replace_cat(struct ubifs_info *c, struct ubifs_lprops *old_lprops, * @lprops: LEB properties * * A LEB may have fallen off of the bottom of a heap, and ended up as - * uncategorized even though it has enough space for us now. If that is the case - * this function will put the LEB back onto a heap. + * un-categorized even though it has enough space for us now. If that is the + * case this function will put the LEB back onto a heap. */ void ubifs_ensure_cat(struct ubifs_info *c, struct ubifs_lprops *lprops) { @@ -436,10 +436,10 @@ int ubifs_categorize_lprops(const struct ubifs_info *c, /** * change_category - change LEB properties category. * @c: UBIFS file-system description object - * @lprops: LEB properties to recategorize + * @lprops: LEB properties to re-categorize * * LEB properties are categorized to enable fast find operations. When the LEB - * properties change they must be recategorized. + * properties change they must be re-categorized. */ static void change_category(struct ubifs_info *c, struct ubifs_lprops *lprops) { @@ -461,21 +461,18 @@ static void change_category(struct ubifs_info *c, struct ubifs_lprops *lprops) } /** - * calc_dark - calculate LEB dark space size. + * ubifs_calc_dark - calculate LEB dark space size. * @c: the UBIFS file-system description object * @spc: amount of free and dirty space in the LEB * - * This function calculates amount of dark space in an LEB which has @spc bytes - * of free and dirty space. Returns the calculations result. + * This function calculates and returns amount of dark space in an LEB which + * has @spc bytes of free and dirty space. * - * Dark space is the space which is not always usable - it depends on which - * nodes are written in which order. E.g., if an LEB has only 512 free bytes, - * it is dark space, because it cannot fit a large data node. So UBIFS cannot - * count on this LEB and treat these 512 bytes as usable because it is not true - * if, for example, only big chunks of uncompressible data will be written to - * the FS. + * UBIFS is trying to account the space which might not be usable, and this + * space is called "dark space". For example, if an LEB has only %512 free + * bytes, it is dark space, because it cannot fit a large data node. */ -static int calc_dark(struct ubifs_info *c, int spc) +int ubifs_calc_dark(const struct ubifs_info *c, int spc) { ubifs_assert(!(spc & 7)); @@ -518,7 +515,7 @@ static int is_lprops_dirty(struct ubifs_info *c, struct ubifs_lprops *lprops) * @free: new free space amount * @dirty: new dirty space amount * @flags: new flags - * @idx_gc_cnt: change to the count of idx_gc list + * @idx_gc_cnt: change to the count of @idx_gc list * * This function changes LEB properties (@free, @dirty or @flag). However, the * property which has the %LPROPS_NC value is not changed. Returns a pointer to @@ -535,7 +532,7 @@ const struct ubifs_lprops *ubifs_change_lp(struct ubifs_info *c, { /* * This is the only function that is allowed to change lprops, so we - * discard the const qualifier. + * discard the "const" qualifier. */ struct ubifs_lprops *lprops = (struct ubifs_lprops *)lp; @@ -575,7 +572,7 @@ const struct ubifs_lprops *ubifs_change_lp(struct ubifs_info *c, if (old_spc < c->dead_wm) c->lst.total_dead -= old_spc; else - c->lst.total_dark -= calc_dark(c, old_spc); + c->lst.total_dark -= ubifs_calc_dark(c, old_spc); c->lst.total_used -= c->leb_size - old_spc; } @@ -616,7 +613,7 @@ const struct ubifs_lprops *ubifs_change_lp(struct ubifs_info *c, if (new_spc < c->dead_wm) c->lst.total_dead += new_spc; else - c->lst.total_dark += calc_dark(c, new_spc); + c->lst.total_dark += ubifs_calc_dark(c, new_spc); c->lst.total_used += c->leb_size - new_spc; } @@ -1096,7 +1093,7 @@ static int scan_check_cb(struct ubifs_info *c, } } - sleb = ubifs_scan(c, lnum, 0, c->dbg->buf); + sleb = ubifs_scan(c, lnum, 0, c->dbg->buf, 0); if (IS_ERR(sleb)) { /* * After an unclean unmount, empty and freeable LEBs @@ -1107,7 +1104,7 @@ static int scan_check_cb(struct ubifs_info *c, "- continuing checking"); lst->empty_lebs += 1; lst->total_free += c->leb_size; - lst->total_dark += calc_dark(c, c->leb_size); + lst->total_dark += ubifs_calc_dark(c, c->leb_size); return LPT_SCAN_CONTINUE; } @@ -1117,7 +1114,7 @@ static int scan_check_cb(struct ubifs_info *c, "- continuing checking"); lst->total_free += lp->free; lst->total_dirty += lp->dirty; - lst->total_dark += calc_dark(c, c->leb_size); + lst->total_dark += ubifs_calc_dark(c, c->leb_size); return LPT_SCAN_CONTINUE; } data->err = PTR_ERR(sleb); @@ -1235,7 +1232,7 @@ static int scan_check_cb(struct ubifs_info *c, if (spc < c->dead_wm) lst->total_dead += spc; else - lst->total_dark += calc_dark(c, spc); + lst->total_dark += ubifs_calc_dark(c, spc); } ubifs_scan_destroy(sleb); diff --git a/fs/ubifs/master.c b/fs/ubifs/master.c index a88f338..28beaee 100644 --- a/fs/ubifs/master.c +++ b/fs/ubifs/master.c @@ -29,7 +29,8 @@ * @c: UBIFS file-system description object * * This function scans the master node LEBs and search for the latest master - * node. Returns zero in case of success and a negative error code in case of + * node. Returns zero in case of success, %-EUCLEAN if there master area is + * corrupted and requires recovery, and a negative error code in case of * failure. */ static int scan_for_master(struct ubifs_info *c) @@ -40,7 +41,7 @@ static int scan_for_master(struct ubifs_info *c) lnum = UBIFS_MST_LNUM; - sleb = ubifs_scan(c, lnum, 0, c->sbuf); + sleb = ubifs_scan(c, lnum, 0, c->sbuf, 1); if (IS_ERR(sleb)) return PTR_ERR(sleb); nodes_cnt = sleb->nodes_cnt; @@ -48,7 +49,7 @@ static int scan_for_master(struct ubifs_info *c) snod = list_entry(sleb->nodes.prev, struct ubifs_scan_node, list); if (snod->type != UBIFS_MST_NODE) - goto out; + goto out_dump; memcpy(c->mst_node, snod->node, snod->len); offs = snod->offs; } @@ -56,7 +57,7 @@ static int scan_for_master(struct ubifs_info *c) lnum += 1; - sleb = ubifs_scan(c, lnum, 0, c->sbuf); + sleb = ubifs_scan(c, lnum, 0, c->sbuf, 1); if (IS_ERR(sleb)) return PTR_ERR(sleb); if (sleb->nodes_cnt != nodes_cnt) @@ -65,7 +66,7 @@ static int scan_for_master(struct ubifs_info *c) goto out; snod = list_entry(sleb->nodes.prev, struct ubifs_scan_node, list); if (snod->type != UBIFS_MST_NODE) - goto out; + goto out_dump; if (snod->offs != offs) goto out; if (memcmp((void *)c->mst_node + UBIFS_CH_SZ, @@ -78,6 +79,12 @@ static int scan_for_master(struct ubifs_info *c) out: ubifs_scan_destroy(sleb); + return -EUCLEAN; + +out_dump: + ubifs_err("unexpected node type %d master LEB %d:%d", + snod->type, lnum, snod->offs); + ubifs_scan_destroy(sleb); return -EINVAL; } @@ -256,7 +263,8 @@ int ubifs_read_master(struct ubifs_info *c) err = scan_for_master(c); if (err) { - err = ubifs_recover_master_node(c); + if (err == -EUCLEAN) + err = ubifs_recover_master_node(c); if (err) /* * Note, we do not free 'c->mst_node' here because the diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c index 152a7b3..82009c7 100644 --- a/fs/ubifs/orphan.c +++ b/fs/ubifs/orphan.c @@ -670,9 +670,10 @@ static int kill_orphans(struct ubifs_info *c) struct ubifs_scan_leb *sleb; dbg_rcvry("LEB %d", lnum); - sleb = ubifs_scan(c, lnum, 0, c->sbuf); + sleb = ubifs_scan(c, lnum, 0, c->sbuf, 1); if (IS_ERR(sleb)) { - sleb = ubifs_recover_leb(c, lnum, 0, c->sbuf, 0); + if (PTR_ERR(sleb) == -EUCLEAN) + sleb = ubifs_recover_leb(c, lnum, 0, c->sbuf, 0); if (IS_ERR(sleb)) { err = PTR_ERR(sleb); break; @@ -899,7 +900,7 @@ static int dbg_scan_orphans(struct ubifs_info *c, struct check_info *ci) for (lnum = c->orph_first; lnum <= c->orph_last; lnum++) { struct ubifs_scan_leb *sleb; - sleb = ubifs_scan(c, lnum, 0, c->dbg->buf); + sleb = ubifs_scan(c, lnum, 0, c->dbg->buf, 0); if (IS_ERR(sleb)) { err = PTR_ERR(sleb); break; diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c index e5f6cf8..f94ddf7 100644 --- a/fs/ubifs/recovery.c +++ b/fs/ubifs/recovery.c @@ -286,7 +286,7 @@ int ubifs_recover_master_node(struct ubifs_info *c) mst = mst2; } - dbg_rcvry("recovered master node from LEB %d", + ubifs_msg("recovered master node from LEB %d", (mst == mst1 ? UBIFS_MST_LNUM : UBIFS_MST_LNUM + 1)); memcpy(c->mst_node, mst, UBIFS_MST_NODE_SZ); @@ -790,7 +790,7 @@ struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum, * We can only recover at the end of the log, so check that the * next log LEB is empty or out of date. */ - sleb = ubifs_scan(c, next_lnum, 0, sbuf); + sleb = ubifs_scan(c, next_lnum, 0, sbuf, 0); if (IS_ERR(sleb)) return sleb; if (sleb->nodes_cnt) { diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c index 2970500..5c2d6d7 100644 --- a/fs/ubifs/replay.c +++ b/fs/ubifs/replay.c @@ -506,7 +506,7 @@ static int replay_bud(struct ubifs_info *c, int lnum, int offs, int jhead, if (c->need_recovery) sleb = ubifs_recover_leb(c, lnum, offs, c->sbuf, jhead != GCHD); else - sleb = ubifs_scan(c, lnum, offs, c->sbuf); + sleb = ubifs_scan(c, lnum, offs, c->sbuf, 0); if (IS_ERR(sleb)) return PTR_ERR(sleb); @@ -836,8 +836,8 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf) const struct ubifs_cs_node *node; dbg_mnt("replay log LEB %d:%d", lnum, offs); - sleb = ubifs_scan(c, lnum, offs, sbuf); - if (IS_ERR(sleb) ) { + sleb = ubifs_scan(c, lnum, offs, sbuf, c->need_recovery); + if (IS_ERR(sleb)) { if (PTR_ERR(sleb) != -EUCLEAN || !c->need_recovery) return PTR_ERR(sleb); sleb = ubifs_recover_log_leb(c, lnum, offs, sbuf); diff --git a/fs/ubifs/scan.c b/fs/ubifs/scan.c index 892ebfe..96c5253 100644 --- a/fs/ubifs/scan.c +++ b/fs/ubifs/scan.c @@ -108,10 +108,9 @@ int ubifs_scan_a_node(const struct ubifs_info *c, void *buf, int len, int lnum, /* Make the node pads to 8-byte boundary */ if ((node_len + pad_len) & 7) { - if (!quiet) { + if (!quiet) dbg_err("bad padding length %d - %d", offs, offs + node_len + pad_len); - } return SCANNED_A_BAD_PAD_NODE; } @@ -253,15 +252,19 @@ void ubifs_scanned_corruption(const struct ubifs_info *c, int lnum, int offs, * @c: UBIFS file-system description object * @lnum: logical eraseblock number * @offs: offset to start at (usually zero) - * @sbuf: scan buffer (must be c->leb_size) + * @sbuf: scan buffer (must be of @c->leb_size bytes in size) + * @quiet: print no messages * * This function scans LEB number @lnum and returns complete information about * its contents. Returns the scaned information in case of success and, * %-EUCLEAN if the LEB neads recovery, and other negative error codes in case * of failure. + * + * If @quiet is non-zero, this function does not print large and scary + * error messages and flash dumps in case of errors. */ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum, - int offs, void *sbuf) + int offs, void *sbuf, int quiet) { void *buf = sbuf + offs; int err, len = c->leb_size - offs; @@ -280,7 +283,7 @@ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum, cond_resched(); - ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 0); + ret = ubifs_scan_a_node(c, buf, len, lnum, offs, quiet); if (ret > 0) { /* Padding bytes or a valid padding node */ offs += ret; @@ -320,7 +323,9 @@ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum, } if (offs % c->min_io_size) { - ubifs_err("empty space starts at non-aligned offset %d", offs); + if (!quiet) + ubifs_err("empty space starts at non-aligned offset %d", + offs); goto corrupted;; } @@ -331,18 +336,25 @@ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum, break; for (; len; offs++, buf++, len--) if (*(uint8_t *)buf != 0xff) { - ubifs_err("corrupt empty space at LEB %d:%d", - lnum, offs); + if (!quiet) + ubifs_err("corrupt empty space at LEB %d:%d", + lnum, offs); goto corrupted; } return sleb; corrupted: - ubifs_scanned_corruption(c, lnum, offs, buf); + if (!quiet) { + ubifs_scanned_corruption(c, lnum, offs, buf); + ubifs_err("LEB %d scanning failed", lnum); + } err = -EUCLEAN; + ubifs_scan_destroy(sleb); + return ERR_PTR(err); + error: - ubifs_err("LEB %d scanning failed", lnum); + ubifs_err("LEB %d scanning failed, error %d", lnum, err); ubifs_scan_destroy(sleb); return ERR_PTR(err); } diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 51763aa..333e181 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -36,7 +36,6 @@ #include <linux/mount.h> #include <linux/math64.h> #include <linux/writeback.h> -#include <linux/smp_lock.h> #include "ubifs.h" /* @@ -318,6 +317,8 @@ static int ubifs_write_inode(struct inode *inode, int wait) if (err) ubifs_err("can't write inode %lu, error %d", inode->i_ino, err); + else + err = dbg_check_inode_size(c, inode, ui->ui_size); } ui->dirty = 0; @@ -448,17 +449,6 @@ static int ubifs_sync_fs(struct super_block *sb, int wait) return 0; /* - * VFS calls '->sync_fs()' before synchronizing all dirty inodes and - * pages, so synchronize them first, then commit the journal. Strictly - * speaking, it is not necessary to commit the journal here, - * synchronizing write-buffers would be enough. But committing makes - * UBIFS free space predictions much more accurate, so we want to let - * the user be able to get more accurate results of 'statfs()' after - * they synchronize the file system. - */ - sync_inodes_sb(sb); - - /* * Synchronize write buffers, because 'ubifs_run_commit()' does not * do this if it waits for an already running commit. */ @@ -468,6 +458,13 @@ static int ubifs_sync_fs(struct super_block *sb, int wait) return err; } + /* + * Strictly speaking, it is not necessary to commit the journal here, + * synchronizing write-buffers would be enough. But committing makes + * UBIFS free space predictions much more accurate, so we want to let + * the user be able to get more accurate results of 'statfs()' after + * they synchronize the file system. + */ err = ubifs_run_commit(c); if (err) return err; @@ -1720,8 +1717,6 @@ static void ubifs_put_super(struct super_block *sb) ubifs_msg("un-mount UBI device %d, volume %d", c->vi.ubi_num, c->vi.vol_id); - lock_kernel(); - /* * The following asserts are only valid if there has not been a failure * of the media. For example, there will be dirty inodes if we failed @@ -1786,8 +1781,6 @@ static void ubifs_put_super(struct super_block *sb) ubi_close_volume(c->ubi); mutex_unlock(&c->umount_mutex); kfree(c); - - unlock_kernel(); } static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data) @@ -1803,22 +1796,17 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data) return err; } - lock_kernel(); if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) { if (c->ro_media) { ubifs_msg("cannot re-mount due to prior errors"); - unlock_kernel(); return -EROFS; } err = ubifs_remount_rw(c); - if (err) { - unlock_kernel(); + if (err) return err; - } } else if (!(sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY)) { if (c->ro_media) { ubifs_msg("cannot re-mount due to prior errors"); - unlock_kernel(); return -EROFS; } ubifs_remount_ro(c); @@ -1833,7 +1821,6 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data) } ubifs_assert(c->lst.taken_empty_lebs > 0); - unlock_kernel(); return 0; } @@ -1980,6 +1967,7 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent) if (err) goto out_bdi; + sb->s_bdi = &c->bdi; sb->s_fs_info = c; sb->s_magic = UBIFS_SUPER_MAGIC; sb->s_blocksize = UBIFS_BLOCK_SIZE; diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index f249f7b..e5b1a7d 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c @@ -1159,8 +1159,8 @@ static struct ubifs_znode *dirty_cow_bottom_up(struct ubifs_info *c, * o exact match, i.e. the found zero-level znode contains key @key, then %1 * is returned and slot number of the matched branch is stored in @n; * o not exact match, which means that zero-level znode does not contain - * @key, then %0 is returned and slot number of the closed branch is stored - * in @n; + * @key, then %0 is returned and slot number of the closest branch is stored + * in @n; * o @key is so small that it is even less than the lowest key of the * leftmost zero-level node, then %0 is returned and %0 is stored in @n. * @@ -1433,7 +1433,7 @@ static int maybe_leb_gced(struct ubifs_info *c, int lnum, int gc_seq1) * @lnum: LEB number is returned here * @offs: offset is returned here * - * This function look up and reads node with key @key. The caller has to make + * This function looks up and reads node with key @key. The caller has to make * sure the @node buffer is large enough to fit the node. Returns zero in case * of success, %-ENOENT if the node was not found, and a negative error code in * case of failure. The node location can be returned in @lnum and @offs. @@ -3268,3 +3268,73 @@ out_unlock: mutex_unlock(&c->tnc_mutex); return err; } + +#ifdef CONFIG_UBIFS_FS_DEBUG + +/** + * dbg_check_inode_size - check if inode size is correct. + * @c: UBIFS file-system description object + * @inum: inode number + * @size: inode size + * + * This function makes sure that the inode size (@size) is correct and it does + * not have any pages beyond @size. Returns zero if the inode is OK, %-EINVAL + * if it has a data page beyond @size, and other negative error code in case of + * other errors. + */ +int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode, + loff_t size) +{ + int err, n; + union ubifs_key from_key, to_key, *key; + struct ubifs_znode *znode; + unsigned int block; + + if (!S_ISREG(inode->i_mode)) + return 0; + if (!(ubifs_chk_flags & UBIFS_CHK_GEN)) + return 0; + + block = (size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT; + data_key_init(c, &from_key, inode->i_ino, block); + highest_data_key(c, &to_key, inode->i_ino); + + mutex_lock(&c->tnc_mutex); + err = ubifs_lookup_level0(c, &from_key, &znode, &n); + if (err < 0) + goto out_unlock; + + if (err) { + err = -EINVAL; + key = &from_key; + goto out_dump; + } + + err = tnc_next(c, &znode, &n); + if (err == -ENOENT) { + err = 0; + goto out_unlock; + } + if (err < 0) + goto out_unlock; + + ubifs_assert(err == 0); + key = &znode->zbranch[n].key; + if (!key_in_range(c, key, &from_key, &to_key)) + goto out_unlock; + +out_dump: + block = key_block(c, key); + ubifs_err("inode %lu has size %lld, but there are data at offset %lld " + "(data key %s)", (unsigned long)inode->i_ino, size, + ((loff_t)block) << UBIFS_BLOCK_SHIFT, DBGKEY(key)); + dbg_dump_inode(c, inode); + dbg_dump_stack(); + err = -EINVAL; + +out_unlock: + mutex_unlock(&c->tnc_mutex); + return err; +} + +#endif /* CONFIG_UBIFS_FS_DEBUG */ diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c index fde8d12..53288e5 100644 --- a/fs/ubifs/tnc_commit.c +++ b/fs/ubifs/tnc_commit.c @@ -245,7 +245,7 @@ static int layout_leb_in_gaps(struct ubifs_info *c, int *p) * it is more comprehensive and less efficient than is needed for this * purpose. */ - sleb = ubifs_scan(c, lnum, 0, c->ileb_buf); + sleb = ubifs_scan(c, lnum, 0, c->ileb_buf, 0); c->ileb_len = 0; if (IS_ERR(sleb)) return PTR_ERR(sleb); diff --git a/fs/ubifs/ubifs-media.h b/fs/ubifs/ubifs-media.h index 3eee07e..191ca78 100644 --- a/fs/ubifs/ubifs-media.h +++ b/fs/ubifs/ubifs-media.h @@ -135,6 +135,13 @@ /* The key is always at the same position in all keyed nodes */ #define UBIFS_KEY_OFFSET offsetof(struct ubifs_ino_node, key) +/* Garbage collector journal head number */ +#define UBIFS_GC_HEAD 0 +/* Base journal head number */ +#define UBIFS_BASE_HEAD 1 +/* Data journal head number */ +#define UBIFS_DATA_HEAD 2 + /* * LEB Properties Tree node types. * diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index a293490..b2d9763 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -105,12 +105,10 @@ /* Number of non-data journal heads */ #define NONDATA_JHEADS_CNT 2 -/* Garbage collector head */ -#define GCHD 0 -/* Base journal head number */ -#define BASEHD 1 -/* First "general purpose" journal head */ -#define DATAHD 2 +/* Shorter names for journal head numbers for internal usage */ +#define GCHD UBIFS_GC_HEAD +#define BASEHD UBIFS_BASE_HEAD +#define DATAHD UBIFS_DATA_HEAD /* 'No change' value for 'ubifs_change_lp()' */ #define LPROPS_NC 0x80000001 @@ -1451,7 +1449,7 @@ int ubifs_sync_wbufs_by_inode(struct ubifs_info *c, struct inode *inode); /* scan.c */ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum, - int offs, void *sbuf); + int offs, void *sbuf, int quiet); void ubifs_scan_destroy(struct ubifs_scan_leb *sleb); int ubifs_scan_a_node(const struct ubifs_info *c, void *buf, int len, int lnum, int offs, int quiet); @@ -1676,6 +1674,7 @@ const struct ubifs_lprops *ubifs_fast_find_free(struct ubifs_info *c); const struct ubifs_lprops *ubifs_fast_find_empty(struct ubifs_info *c); const struct ubifs_lprops *ubifs_fast_find_freeable(struct ubifs_info *c); const struct ubifs_lprops *ubifs_fast_find_frdi_idx(struct ubifs_info *c); +int ubifs_calc_dark(const struct ubifs_info *c, int spc); /* file.c */ int ubifs_fsync(struct file *file, struct dentry *dentry, int datasync); diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c index adafcf5..195830f 100644 --- a/fs/ubifs/xattr.c +++ b/fs/ubifs/xattr.c @@ -78,9 +78,9 @@ enum { SECURITY_XATTR, }; -static struct inode_operations none_inode_operations; -static struct address_space_operations none_address_operations; -static struct file_operations none_file_operations; +static const struct inode_operations none_inode_operations; +static const struct address_space_operations none_address_operations; +static const struct file_operations none_file_operations; /** * create_xattr - create an extended attribute. diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 52f3fc6..3818544 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -216,7 +216,6 @@ xfs_setfilesize( if (ip->i_d.di_size < isize) { ip->i_d.di_size = isize; ip->i_update_core = 1; - ip->i_update_size = 1; xfs_mark_inode_dirty_sync(ip); } diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c index 0542fd5..988d8f8 100644 --- a/fs/xfs/linux-2.6/xfs_file.c +++ b/fs/xfs/linux-2.6/xfs_file.c @@ -172,12 +172,21 @@ xfs_file_release( */ STATIC int xfs_file_fsync( - struct file *filp, - struct dentry *dentry, - int datasync) + struct file *file, + struct dentry *dentry, + int datasync) { - xfs_iflags_clear(XFS_I(dentry->d_inode), XFS_ITRUNCATED); - return -xfs_fsync(XFS_I(dentry->d_inode)); + struct inode *inode = dentry->d_inode; + struct xfs_inode *ip = XFS_I(inode); + int error; + + /* capture size updates in I/O completion before writing the inode. */ + error = filemap_fdatawait(inode->i_mapping); + if (error) + return error; + + xfs_iflags_clear(ip, XFS_ITRUNCATED); + return -xfs_fsync(ip); } STATIC int diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index 6c32f1d..da0159d 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c @@ -43,7 +43,6 @@ #include "xfs_error.h" #include "xfs_itable.h" #include "xfs_rw.h" -#include "xfs_acl.h" #include "xfs_attr.h" #include "xfs_buf_item.h" #include "xfs_utils.h" diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c index fde63a3..49e4a6a 100644 --- a/fs/xfs/linux-2.6/xfs_lrw.c +++ b/fs/xfs/linux-2.6/xfs_lrw.c @@ -812,19 +812,21 @@ write_retry: /* Handle various SYNC-type writes */ if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) { + loff_t end = pos + ret - 1; int error2; xfs_iunlock(xip, iolock); if (need_i_mutex) mutex_unlock(&inode->i_mutex); - error2 = filemap_write_and_wait_range(mapping, pos, - pos + ret - 1); + + error2 = filemap_write_and_wait_range(mapping, pos, end); if (!error) error = error2; if (need_i_mutex) mutex_lock(&inode->i_mutex); xfs_ilock(xip, iolock); - error2 = xfs_write_sync_logforce(mp, xip); + + error2 = xfs_fsync(xip); if (!error) error = error2; } diff --git a/fs/xfs/linux-2.6/xfs_quotaops.c b/fs/xfs/linux-2.6/xfs_quotaops.c index cb6e2cc..9e41f91 100644 --- a/fs/xfs/linux-2.6/xfs_quotaops.c +++ b/fs/xfs/linux-2.6/xfs_quotaops.c @@ -150,7 +150,7 @@ xfs_fs_set_xquota( return -xfs_qm_scall_setqlim(mp, id, xfs_quota_type(type), fdq); } -struct quotactl_ops xfs_quotactl_operations = { +const struct quotactl_ops xfs_quotactl_operations = { .quota_sync = xfs_fs_quota_sync, .get_xstate = xfs_fs_get_xstate, .set_xstate = xfs_fs_set_xstate, diff --git a/fs/xfs/linux-2.6/xfs_stats.c b/fs/xfs/linux-2.6/xfs_stats.c index c3526d4..76fdc58 100644 --- a/fs/xfs/linux-2.6/xfs_stats.c +++ b/fs/xfs/linux-2.6/xfs_stats.c @@ -20,16 +20,9 @@ DEFINE_PER_CPU(struct xfsstats, xfsstats); -STATIC int -xfs_read_xfsstats( - char *buffer, - char **start, - off_t offset, - int count, - int *eof, - void *data) +static int xfs_stat_proc_show(struct seq_file *m, void *v) { - int c, i, j, len, val; + int c, i, j, val; __uint64_t xs_xstrat_bytes = 0; __uint64_t xs_write_bytes = 0; __uint64_t xs_read_bytes = 0; @@ -60,18 +53,18 @@ xfs_read_xfsstats( }; /* Loop over all stats groups */ - for (i=j=len = 0; i < ARRAY_SIZE(xstats); i++) { - len += sprintf(buffer + len, "%s", xstats[i].desc); + for (i=j = 0; i < ARRAY_SIZE(xstats); i++) { + seq_printf(m, "%s", xstats[i].desc); /* inner loop does each group */ while (j < xstats[i].endpoint) { val = 0; /* sum over all cpus */ for_each_possible_cpu(c) val += *(((__u32*)&per_cpu(xfsstats, c) + j)); - len += sprintf(buffer + len, " %u", val); + seq_printf(m, " %u", val); j++; } - buffer[len++] = '\n'; + seq_putc(m, '\n'); } /* extra precision counters */ for_each_possible_cpu(i) { @@ -80,36 +73,38 @@ xfs_read_xfsstats( xs_read_bytes += per_cpu(xfsstats, i).xs_read_bytes; } - len += sprintf(buffer + len, "xpc %Lu %Lu %Lu\n", + seq_printf(m, "xpc %Lu %Lu %Lu\n", xs_xstrat_bytes, xs_write_bytes, xs_read_bytes); - len += sprintf(buffer + len, "debug %u\n", + seq_printf(m, "debug %u\n", #if defined(DEBUG) 1); #else 0); #endif + return 0; +} - if (offset >= len) { - *start = buffer; - *eof = 1; - return 0; - } - *start = buffer + offset; - if ((len -= offset) > count) - return count; - *eof = 1; - - return len; +static int xfs_stat_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, xfs_stat_proc_show, NULL); } +static const struct file_operations xfs_stat_proc_fops = { + .owner = THIS_MODULE, + .open = xfs_stat_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + int xfs_init_procfs(void) { if (!proc_mkdir("fs/xfs", NULL)) goto out; - if (!create_proc_read_entry("fs/xfs/stat", 0, NULL, - xfs_read_xfsstats, NULL)) + if (!proc_create("fs/xfs/stat", 0, NULL, + &xfs_stat_proc_fops)) goto out_remove_entry; return 0; diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index a220d36..bdd41c8 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -67,7 +67,7 @@ #include <linux/freezer.h> #include <linux/parser.h> -static struct super_operations xfs_super_operations; +static const struct super_operations xfs_super_operations; static kmem_zone_t *xfs_ioend_zone; mempool_t *xfs_ioend_pool; @@ -579,15 +579,19 @@ xfs_showargs( else if (mp->m_qflags & XFS_UQUOTA_ACCT) seq_puts(m, "," MNTOPT_UQUOTANOENF); - if (mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD)) - seq_puts(m, "," MNTOPT_PRJQUOTA); - else if (mp->m_qflags & XFS_PQUOTA_ACCT) - seq_puts(m, "," MNTOPT_PQUOTANOENF); - - if (mp->m_qflags & (XFS_GQUOTA_ACCT|XFS_OQUOTA_ENFD)) - seq_puts(m, "," MNTOPT_GRPQUOTA); - else if (mp->m_qflags & XFS_GQUOTA_ACCT) - seq_puts(m, "," MNTOPT_GQUOTANOENF); + /* Either project or group quotas can be active, not both */ + + if (mp->m_qflags & XFS_PQUOTA_ACCT) { + if (mp->m_qflags & XFS_OQUOTA_ENFD) + seq_puts(m, "," MNTOPT_PRJQUOTA); + else + seq_puts(m, "," MNTOPT_PQUOTANOENF); + } else if (mp->m_qflags & XFS_GQUOTA_ACCT) { + if (mp->m_qflags & XFS_OQUOTA_ENFD) + seq_puts(m, "," MNTOPT_GRPQUOTA); + else + seq_puts(m, "," MNTOPT_GQUOTANOENF); + } if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT)) seq_puts(m, "," MNTOPT_NOQUOTA); @@ -687,7 +691,7 @@ xfs_barrier_test( return error; } -void +STATIC void xfs_mountfs_check_barriers(xfs_mount_t *mp) { int error; @@ -1532,7 +1536,7 @@ xfs_fs_get_sb( mnt); } -static struct super_operations xfs_super_operations = { +static const struct super_operations xfs_super_operations = { .alloc_inode = xfs_fs_alloc_inode, .destroy_inode = xfs_fs_destroy_inode, .write_inode = xfs_fs_write_inode, diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/linux-2.6/xfs_super.h index 5a2ea3a..18175eb 100644 --- a/fs/xfs/linux-2.6/xfs_super.h +++ b/fs/xfs/linux-2.6/xfs_super.h @@ -93,7 +93,7 @@ extern void xfs_blkdev_issue_flush(struct xfs_buftarg *); extern const struct export_operations xfs_export_operations; extern struct xattr_handler *xfs_xattr_handlers[]; -extern struct quotactl_ops xfs_quotactl_operations; +extern const struct quotactl_ops xfs_quotactl_operations; #define XFS_M(sb) ((struct xfs_mount *)((sb)->s_fs_info)) diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index 98ef624..320be6a 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -749,21 +749,6 @@ __xfs_inode_clear_reclaim_tag( XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG); } -void -xfs_inode_clear_reclaim_tag( - xfs_inode_t *ip) -{ - xfs_mount_t *mp = ip->i_mount; - xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino); - - read_lock(&pag->pag_ici_lock); - spin_lock(&ip->i_flags_lock); - __xfs_inode_clear_reclaim_tag(mp, pag, ip); - spin_unlock(&ip->i_flags_lock); - read_unlock(&pag->pag_ici_lock); - xfs_put_perag(mp, pag); -} - STATIC int xfs_reclaim_inode_now( struct xfs_inode *ip, diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h index 5912060..27920eb 100644 --- a/fs/xfs/linux-2.6/xfs_sync.h +++ b/fs/xfs/linux-2.6/xfs_sync.h @@ -49,7 +49,6 @@ int xfs_reclaim_inodes(struct xfs_mount *mp, int mode); void xfs_inode_set_reclaim_tag(struct xfs_inode *ip); void __xfs_inode_set_reclaim_tag(struct xfs_perag *pag, struct xfs_inode *ip); -void xfs_inode_clear_reclaim_tag(struct xfs_inode *ip); void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag, struct xfs_inode *ip); diff --git a/fs/xfs/linux-2.6/xfs_sysctl.c b/fs/xfs/linux-2.6/xfs_sysctl.c index 916c0ff..c5bc67c 100644 --- a/fs/xfs/linux-2.6/xfs_sysctl.c +++ b/fs/xfs/linux-2.6/xfs_sysctl.c @@ -26,7 +26,6 @@ STATIC int xfs_stats_clear_proc_handler( ctl_table *ctl, int write, - struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) @@ -34,7 +33,7 @@ xfs_stats_clear_proc_handler( int c, ret, *valp = ctl->data; __uint32_t vn_active; - ret = proc_dointvec_minmax(ctl, write, filp, buffer, lenp, ppos); + ret = proc_dointvec_minmax(ctl, write, buffer, lenp, ppos); if (!ret && write && *valp) { printk("XFS Clearing xfsstats\n"); diff --git a/fs/xfs/quota/xfs_qm_stats.c b/fs/xfs/quota/xfs_qm_stats.c index 21b08c0..83e7ea3 100644 --- a/fs/xfs/quota/xfs_qm_stats.c +++ b/fs/xfs/quota/xfs_qm_stats.c @@ -48,50 +48,34 @@ struct xqmstats xqmstats; -STATIC int -xfs_qm_read_xfsquota( - char *buffer, - char **start, - off_t offset, - int count, - int *eof, - void *data) +static int xqm_proc_show(struct seq_file *m, void *v) { - int len; - /* maximum; incore; ratio free to inuse; freelist */ - len = sprintf(buffer, "%d\t%d\t%d\t%u\n", + seq_printf(m, "%d\t%d\t%d\t%u\n", ndquot, xfs_Gqm? atomic_read(&xfs_Gqm->qm_totaldquots) : 0, xfs_Gqm? xfs_Gqm->qm_dqfree_ratio : 0, xfs_Gqm? xfs_Gqm->qm_dqfreelist.qh_nelems : 0); - - if (offset >= len) { - *start = buffer; - *eof = 1; - return 0; - } - *start = buffer + offset; - if ((len -= offset) > count) - return count; - *eof = 1; - - return len; + return 0; } -STATIC int -xfs_qm_read_stats( - char *buffer, - char **start, - off_t offset, - int count, - int *eof, - void *data) +static int xqm_proc_open(struct inode *inode, struct file *file) { - int len; + return single_open(file, xqm_proc_show, NULL); +} + +static const struct file_operations xqm_proc_fops = { + .owner = THIS_MODULE, + .open = xqm_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +static int xqmstat_proc_show(struct seq_file *m, void *v) +{ /* quota performance statistics */ - len = sprintf(buffer, "qm %u %u %u %u %u %u %u %u\n", + seq_printf(m, "qm %u %u %u %u %u %u %u %u\n", xqmstats.xs_qm_dqreclaims, xqmstats.xs_qm_dqreclaim_misses, xqmstats.xs_qm_dquot_dups, @@ -100,25 +84,27 @@ xfs_qm_read_stats( xqmstats.xs_qm_dqwants, xqmstats.xs_qm_dqshake_reclaims, xqmstats.xs_qm_dqinact_reclaims); + return 0; +} - if (offset >= len) { - *start = buffer; - *eof = 1; - return 0; - } - *start = buffer + offset; - if ((len -= offset) > count) - return count; - *eof = 1; - - return len; +static int xqmstat_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, xqmstat_proc_show, NULL); } +static const struct file_operations xqmstat_proc_fops = { + .owner = THIS_MODULE, + .open = xqmstat_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + void xfs_qm_init_procfs(void) { - create_proc_read_entry("fs/xfs/xqmstat", 0, NULL, xfs_qm_read_stats, NULL); - create_proc_read_entry("fs/xfs/xqm", 0, NULL, xfs_qm_read_xfsquota, NULL); + proc_create("fs/xfs/xqmstat", 0, NULL, &xqmstat_proc_fops); + proc_create("fs/xfs/xqm", 0, NULL, &xqm_proc_fops); } void diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h index f24b50b..a5d54bf 100644 --- a/fs/xfs/xfs_ag.h +++ b/fs/xfs/xfs_ag.h @@ -198,6 +198,15 @@ typedef struct xfs_perag xfs_agino_t pagi_count; /* number of allocated inodes */ int pagb_count; /* pagb slots in use */ xfs_perag_busy_t *pagb_list; /* unstable blocks */ + + /* + * Inode allocation search lookup optimisation. + * If the pagino matches, the search for new inodes + * doesn't need to search the near ones again straight away + */ + xfs_agino_t pagl_pagino; + xfs_agino_t pagl_leftrec; + xfs_agino_t pagl_rightrec; #ifdef __KERNEL__ spinlock_t pagb_lock; /* lock for pagb_list */ diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 8ee5b5a..8971fb0 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -3713,7 +3713,7 @@ done: * entry (null if none). Else, *lastxp will be set to the index * of the found entry; *gotp will contain the entry. */ -xfs_bmbt_rec_host_t * /* pointer to found extent entry */ +STATIC xfs_bmbt_rec_host_t * /* pointer to found extent entry */ xfs_bmap_search_multi_extents( xfs_ifork_t *ifp, /* inode fork pointer */ xfs_fileoff_t bno, /* block number searched for */ diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h index 1b8ff92..56f62d2 100644 --- a/fs/xfs/xfs_bmap.h +++ b/fs/xfs/xfs_bmap.h @@ -392,17 +392,6 @@ xfs_bmap_count_blocks( int whichfork, int *count); -/* - * Search the extent records for the entry containing block bno. - * If bno lies in a hole, point to the next entry. If bno lies - * past eof, *eofp will be set, and *prevp will contain the last - * entry (null if none). Else, *lastxp will be set to the index - * of the found entry; *gotp will contain the entry. - */ -xfs_bmbt_rec_host_t * -xfs_bmap_search_multi_extents(struct xfs_ifork *, xfs_fileoff_t, int *, - xfs_extnum_t *, xfs_bmbt_irec_t *, xfs_bmbt_irec_t *); - #endif /* __KERNEL__ */ #endif /* __XFS_BMAP_H__ */ diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index 5c1ade0..eb7b702 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -202,16 +202,6 @@ xfs_bmbt_get_state( ext_flag); } -/* Endian flipping versions of the bmbt extraction functions */ -void -xfs_bmbt_disk_get_all( - xfs_bmbt_rec_t *r, - xfs_bmbt_irec_t *s) -{ - __xfs_bmbt_get_all(get_unaligned_be64(&r->l0), - get_unaligned_be64(&r->l1), s); -} - /* * Extract the blockcount field from an on disk bmap extent record. */ @@ -816,6 +806,16 @@ xfs_bmbt_trace_key( *l1 = 0; } +/* Endian flipping versions of the bmbt extraction functions */ +STATIC void +xfs_bmbt_disk_get_all( + xfs_bmbt_rec_t *r, + xfs_bmbt_irec_t *s) +{ + __xfs_bmbt_get_all(get_unaligned_be64(&r->l0), + get_unaligned_be64(&r->l1), s); +} + STATIC void xfs_bmbt_trace_record( struct xfs_btree_cur *cur, diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/xfs_bmap_btree.h index 0e8df00..5549d49 100644 --- a/fs/xfs/xfs_bmap_btree.h +++ b/fs/xfs/xfs_bmap_btree.h @@ -220,7 +220,6 @@ extern xfs_fsblock_t xfs_bmbt_get_startblock(xfs_bmbt_rec_host_t *r); extern xfs_fileoff_t xfs_bmbt_get_startoff(xfs_bmbt_rec_host_t *r); extern xfs_exntst_t xfs_bmbt_get_state(xfs_bmbt_rec_host_t *r); -extern void xfs_bmbt_disk_get_all(xfs_bmbt_rec_t *r, xfs_bmbt_irec_t *s); extern xfs_filblks_t xfs_bmbt_disk_get_blockcount(xfs_bmbt_rec_t *r); extern xfs_fileoff_t xfs_bmbt_disk_get_startoff(xfs_bmbt_rec_t *r); diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index 2671738..52b5f14 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -646,46 +646,6 @@ xfs_btree_read_bufl( } /* - * Get a buffer for the block, return it read in. - * Short-form addressing. - */ -int /* error */ -xfs_btree_read_bufs( - xfs_mount_t *mp, /* file system mount point */ - xfs_trans_t *tp, /* transaction pointer */ - xfs_agnumber_t agno, /* allocation group number */ - xfs_agblock_t agbno, /* allocation group block number */ - uint lock, /* lock flags for read_buf */ - xfs_buf_t **bpp, /* buffer for agno/agbno */ - int refval) /* ref count value for buffer */ -{ - xfs_buf_t *bp; /* return value */ - xfs_daddr_t d; /* real disk block address */ - int error; - - ASSERT(agno != NULLAGNUMBER); - ASSERT(agbno != NULLAGBLOCK); - d = XFS_AGB_TO_DADDR(mp, agno, agbno); - if ((error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, d, - mp->m_bsize, lock, &bp))) { - return error; - } - ASSERT(!bp || !XFS_BUF_GETERROR(bp)); - if (bp != NULL) { - switch (refval) { - case XFS_ALLOC_BTREE_REF: - XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, refval); - break; - case XFS_INO_BTREE_REF: - XFS_BUF_SET_VTYPE_REF(bp, B_FS_INOMAP, refval); - break; - } - } - *bpp = bp; - return 0; -} - -/* * Read-ahead the block, don't wait for it, don't return a buffer. * Long-form addressing. */ @@ -2951,7 +2911,7 @@ error0: * inode we have to copy the single block it was pointing to into the * inode. */ -int +STATIC int xfs_btree_kill_iroot( struct xfs_btree_cur *cur) { diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index 4f852b7..7fa0706 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -379,20 +379,6 @@ xfs_btree_read_bufl( int refval);/* ref count value for buffer */ /* - * Get a buffer for the block, return it read in. - * Short-form addressing. - */ -int /* error */ -xfs_btree_read_bufs( - struct xfs_mount *mp, /* file system mount point */ - struct xfs_trans *tp, /* transaction pointer */ - xfs_agnumber_t agno, /* allocation group number */ - xfs_agblock_t agbno, /* allocation group block number */ - uint lock, /* lock flags for read_buf */ - struct xfs_buf **bpp, /* buffer for agno/agbno */ - int refval);/* ref count value for buffer */ - -/* * Read-ahead the block, don't wait for it, don't return a buffer. * Long-form addressing. */ @@ -432,7 +418,6 @@ int xfs_btree_decrement(struct xfs_btree_cur *, int, int *); int xfs_btree_lookup(struct xfs_btree_cur *, xfs_lookup_t, int *); int xfs_btree_update(struct xfs_btree_cur *, union xfs_btree_rec *); int xfs_btree_new_iroot(struct xfs_btree_cur *, int *, int *); -int xfs_btree_kill_iroot(struct xfs_btree_cur *); int xfs_btree_insert(struct xfs_btree_cur *, int *); int xfs_btree_delete(struct xfs_btree_cur *, int *); int xfs_btree_get_rec(struct xfs_btree_cur *, union xfs_btree_rec **, int *); diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h index c4ea51b..f52ac27 100644 --- a/fs/xfs/xfs_fs.h +++ b/fs/xfs/xfs_fs.h @@ -117,7 +117,7 @@ struct getbmapx { #define BMV_IF_VALID \ (BMV_IF_ATTRFORK|BMV_IF_NO_DMAPI_READ|BMV_IF_PREALLOC|BMV_IF_DELALLOC) -/* bmv_oflags values - returned for for each non-header segment */ +/* bmv_oflags values - returned for each non-header segment */ #define BMV_OF_PREALLOC 0x1 /* segment = unwritten pre-allocation */ #define BMV_OF_DELALLOC 0x2 /* segment = delayed allocation */ #define BMV_OF_LAST 0x4 /* segment is the last in the file */ diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index 3120a3a..ab64f3e 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c @@ -57,75 +57,35 @@ xfs_ialloc_cluster_alignment( } /* - * Lookup the record equal to ino in the btree given by cur. - */ -STATIC int /* error */ -xfs_inobt_lookup_eq( - struct xfs_btree_cur *cur, /* btree cursor */ - xfs_agino_t ino, /* starting inode of chunk */ - __int32_t fcnt, /* free inode count */ - xfs_inofree_t free, /* free inode mask */ - int *stat) /* success/failure */ -{ - cur->bc_rec.i.ir_startino = ino; - cur->bc_rec.i.ir_freecount = fcnt; - cur->bc_rec.i.ir_free = free; - return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat); -} - -/* - * Lookup the first record greater than or equal to ino - * in the btree given by cur. + * Lookup a record by ino in the btree given by cur. */ int /* error */ -xfs_inobt_lookup_ge( +xfs_inobt_lookup( struct xfs_btree_cur *cur, /* btree cursor */ xfs_agino_t ino, /* starting inode of chunk */ - __int32_t fcnt, /* free inode count */ - xfs_inofree_t free, /* free inode mask */ + xfs_lookup_t dir, /* <=, >=, == */ int *stat) /* success/failure */ { cur->bc_rec.i.ir_startino = ino; - cur->bc_rec.i.ir_freecount = fcnt; - cur->bc_rec.i.ir_free = free; - return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat); + cur->bc_rec.i.ir_freecount = 0; + cur->bc_rec.i.ir_free = 0; + return xfs_btree_lookup(cur, dir, stat); } /* - * Lookup the first record less than or equal to ino - * in the btree given by cur. - */ -int /* error */ -xfs_inobt_lookup_le( - struct xfs_btree_cur *cur, /* btree cursor */ - xfs_agino_t ino, /* starting inode of chunk */ - __int32_t fcnt, /* free inode count */ - xfs_inofree_t free, /* free inode mask */ - int *stat) /* success/failure */ -{ - cur->bc_rec.i.ir_startino = ino; - cur->bc_rec.i.ir_freecount = fcnt; - cur->bc_rec.i.ir_free = free; - return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat); -} - -/* - * Update the record referred to by cur to the value given - * by [ino, fcnt, free]. + * Update the record referred to by cur to the value given. * This either works (return 0) or gets an EFSCORRUPTED error. */ STATIC int /* error */ xfs_inobt_update( struct xfs_btree_cur *cur, /* btree cursor */ - xfs_agino_t ino, /* starting inode of chunk */ - __int32_t fcnt, /* free inode count */ - xfs_inofree_t free) /* free inode mask */ + xfs_inobt_rec_incore_t *irec) /* btree record */ { union xfs_btree_rec rec; - rec.inobt.ir_startino = cpu_to_be32(ino); - rec.inobt.ir_freecount = cpu_to_be32(fcnt); - rec.inobt.ir_free = cpu_to_be64(free); + rec.inobt.ir_startino = cpu_to_be32(irec->ir_startino); + rec.inobt.ir_freecount = cpu_to_be32(irec->ir_freecount); + rec.inobt.ir_free = cpu_to_be64(irec->ir_free); return xfs_btree_update(cur, &rec); } @@ -135,9 +95,7 @@ xfs_inobt_update( int /* error */ xfs_inobt_get_rec( struct xfs_btree_cur *cur, /* btree cursor */ - xfs_agino_t *ino, /* output: starting inode of chunk */ - __int32_t *fcnt, /* output: number of free inodes */ - xfs_inofree_t *free, /* output: free inode mask */ + xfs_inobt_rec_incore_t *irec, /* btree record */ int *stat) /* output: success/failure */ { union xfs_btree_rec *rec; @@ -145,14 +103,136 @@ xfs_inobt_get_rec( error = xfs_btree_get_rec(cur, &rec, stat); if (!error && *stat == 1) { - *ino = be32_to_cpu(rec->inobt.ir_startino); - *fcnt = be32_to_cpu(rec->inobt.ir_freecount); - *free = be64_to_cpu(rec->inobt.ir_free); + irec->ir_startino = be32_to_cpu(rec->inobt.ir_startino); + irec->ir_freecount = be32_to_cpu(rec->inobt.ir_freecount); + irec->ir_free = be64_to_cpu(rec->inobt.ir_free); } return error; } /* + * Verify that the number of free inodes in the AGI is correct. + */ +#ifdef DEBUG +STATIC int +xfs_check_agi_freecount( + struct xfs_btree_cur *cur, + struct xfs_agi *agi) +{ + if (cur->bc_nlevels == 1) { + xfs_inobt_rec_incore_t rec; + int freecount = 0; + int error; + int i; + + error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i); + if (error) + return error; + + do { + error = xfs_inobt_get_rec(cur, &rec, &i); + if (error) + return error; + + if (i) { + freecount += rec.ir_freecount; + error = xfs_btree_increment(cur, 0, &i); + if (error) + return error; + } + } while (i == 1); + + if (!XFS_FORCED_SHUTDOWN(cur->bc_mp)) + ASSERT(freecount == be32_to_cpu(agi->agi_freecount)); + } + return 0; +} +#else +#define xfs_check_agi_freecount(cur, agi) 0 +#endif + +/* + * Initialise a new set of inodes. + */ +STATIC void +xfs_ialloc_inode_init( + struct xfs_mount *mp, + struct xfs_trans *tp, + xfs_agnumber_t agno, + xfs_agblock_t agbno, + xfs_agblock_t length, + unsigned int gen) +{ + struct xfs_buf *fbuf; + struct xfs_dinode *free; + int blks_per_cluster, nbufs, ninodes; + int version; + int i, j; + xfs_daddr_t d; + + /* + * Loop over the new block(s), filling in the inodes. + * For small block sizes, manipulate the inodes in buffers + * which are multiples of the blocks size. + */ + if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) { + blks_per_cluster = 1; + nbufs = length; + ninodes = mp->m_sb.sb_inopblock; + } else { + blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) / + mp->m_sb.sb_blocksize; + nbufs = length / blks_per_cluster; + ninodes = blks_per_cluster * mp->m_sb.sb_inopblock; + } + + /* + * Figure out what version number to use in the inodes we create. + * If the superblock version has caught up to the one that supports + * the new inode format, then use the new inode version. Otherwise + * use the old version so that old kernels will continue to be + * able to use the file system. + */ + if (xfs_sb_version_hasnlink(&mp->m_sb)) + version = 2; + else + version = 1; + + for (j = 0; j < nbufs; j++) { + /* + * Get the block. + */ + d = XFS_AGB_TO_DADDR(mp, agno, agbno + (j * blks_per_cluster)); + fbuf = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, + mp->m_bsize * blks_per_cluster, + XFS_BUF_LOCK); + ASSERT(fbuf); + ASSERT(!XFS_BUF_GETERROR(fbuf)); + + /* + * Initialize all inodes in this buffer and then log them. + * + * XXX: It would be much better if we had just one transaction + * to log a whole cluster of inodes instead of all the + * individual transactions causing a lot of log traffic. + */ + xfs_biozero(fbuf, 0, ninodes << mp->m_sb.sb_inodelog); + for (i = 0; i < ninodes; i++) { + int ioffset = i << mp->m_sb.sb_inodelog; + uint isize = sizeof(struct xfs_dinode); + + free = xfs_make_iptr(mp, fbuf, i); + free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC); + free->di_version = version; + free->di_gen = cpu_to_be32(gen); + free->di_next_unlinked = cpu_to_be32(NULLAGINO); + xfs_trans_log_buf(tp, fbuf, ioffset, ioffset + isize - 1); + } + xfs_trans_inode_alloc_buf(tp, fbuf); + } +} + +/* * Allocate new inodes in the allocation group specified by agbp. * Return 0 for success, else error code. */ @@ -164,24 +244,15 @@ xfs_ialloc_ag_alloc( { xfs_agi_t *agi; /* allocation group header */ xfs_alloc_arg_t args; /* allocation argument structure */ - int blks_per_cluster; /* fs blocks per inode cluster */ xfs_btree_cur_t *cur; /* inode btree cursor */ - xfs_daddr_t d; /* disk addr of buffer */ xfs_agnumber_t agno; int error; - xfs_buf_t *fbuf; /* new free inodes' buffer */ - xfs_dinode_t *free; /* new free inode structure */ - int i; /* inode counter */ - int j; /* block counter */ - int nbufs; /* num bufs of new inodes */ + int i; xfs_agino_t newino; /* new first inode's number */ xfs_agino_t newlen; /* new number of inodes */ - int ninodes; /* num inodes per buf */ xfs_agino_t thisino; /* current inode number, for loop */ - int version; /* inode version number to use */ int isaligned = 0; /* inode allocation at stripe unit */ /* boundary */ - unsigned int gen; args.tp = tp; args.mp = tp->t_mountp; @@ -202,12 +273,12 @@ xfs_ialloc_ag_alloc( */ agi = XFS_BUF_TO_AGI(agbp); newino = be32_to_cpu(agi->agi_newino); + agno = be32_to_cpu(agi->agi_seqno); args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) + XFS_IALLOC_BLOCKS(args.mp); if (likely(newino != NULLAGINO && (args.agbno < be32_to_cpu(agi->agi_length)))) { - args.fsbno = XFS_AGB_TO_FSB(args.mp, - be32_to_cpu(agi->agi_seqno), args.agbno); + args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno); args.type = XFS_ALLOCTYPE_THIS_BNO; args.mod = args.total = args.wasdel = args.isfl = args.userdata = args.minalignslop = 0; @@ -258,8 +329,7 @@ xfs_ialloc_ag_alloc( * For now, just allocate blocks up front. */ args.agbno = be32_to_cpu(agi->agi_root); - args.fsbno = XFS_AGB_TO_FSB(args.mp, - be32_to_cpu(agi->agi_seqno), args.agbno); + args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno); /* * Allocate a fixed-size extent of inodes. */ @@ -282,8 +352,7 @@ xfs_ialloc_ag_alloc( if (isaligned && args.fsbno == NULLFSBLOCK) { args.type = XFS_ALLOCTYPE_NEAR_BNO; args.agbno = be32_to_cpu(agi->agi_root); - args.fsbno = XFS_AGB_TO_FSB(args.mp, - be32_to_cpu(agi->agi_seqno), args.agbno); + args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno); args.alignment = xfs_ialloc_cluster_alignment(&args); if ((error = xfs_alloc_vextent(&args))) return error; @@ -294,85 +363,30 @@ xfs_ialloc_ag_alloc( return 0; } ASSERT(args.len == args.minlen); - /* - * Convert the results. - */ - newino = XFS_OFFBNO_TO_AGINO(args.mp, args.agbno, 0); - /* - * Loop over the new block(s), filling in the inodes. - * For small block sizes, manipulate the inodes in buffers - * which are multiples of the blocks size. - */ - if (args.mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(args.mp)) { - blks_per_cluster = 1; - nbufs = (int)args.len; - ninodes = args.mp->m_sb.sb_inopblock; - } else { - blks_per_cluster = XFS_INODE_CLUSTER_SIZE(args.mp) / - args.mp->m_sb.sb_blocksize; - nbufs = (int)args.len / blks_per_cluster; - ninodes = blks_per_cluster * args.mp->m_sb.sb_inopblock; - } - /* - * Figure out what version number to use in the inodes we create. - * If the superblock version has caught up to the one that supports - * the new inode format, then use the new inode version. Otherwise - * use the old version so that old kernels will continue to be - * able to use the file system. - */ - if (xfs_sb_version_hasnlink(&args.mp->m_sb)) - version = 2; - else - version = 1; /* + * Stamp and write the inode buffers. + * * Seed the new inode cluster with a random generation number. This * prevents short-term reuse of generation numbers if a chunk is * freed and then immediately reallocated. We use random numbers * rather than a linear progression to prevent the next generation * number from being easily guessable. */ - gen = random32(); - for (j = 0; j < nbufs; j++) { - /* - * Get the block. - */ - d = XFS_AGB_TO_DADDR(args.mp, be32_to_cpu(agi->agi_seqno), - args.agbno + (j * blks_per_cluster)); - fbuf = xfs_trans_get_buf(tp, args.mp->m_ddev_targp, d, - args.mp->m_bsize * blks_per_cluster, - XFS_BUF_LOCK); - ASSERT(fbuf); - ASSERT(!XFS_BUF_GETERROR(fbuf)); + xfs_ialloc_inode_init(args.mp, tp, agno, args.agbno, args.len, + random32()); - /* - * Initialize all inodes in this buffer and then log them. - * - * XXX: It would be much better if we had just one transaction to - * log a whole cluster of inodes instead of all the individual - * transactions causing a lot of log traffic. - */ - xfs_biozero(fbuf, 0, ninodes << args.mp->m_sb.sb_inodelog); - for (i = 0; i < ninodes; i++) { - int ioffset = i << args.mp->m_sb.sb_inodelog; - uint isize = sizeof(struct xfs_dinode); - - free = xfs_make_iptr(args.mp, fbuf, i); - free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC); - free->di_version = version; - free->di_gen = cpu_to_be32(gen); - free->di_next_unlinked = cpu_to_be32(NULLAGINO); - xfs_trans_log_buf(tp, fbuf, ioffset, ioffset + isize - 1); - } - xfs_trans_inode_alloc_buf(tp, fbuf); - } + /* + * Convert the results. + */ + newino = XFS_OFFBNO_TO_AGINO(args.mp, args.agbno, 0); be32_add_cpu(&agi->agi_count, newlen); be32_add_cpu(&agi->agi_freecount, newlen); - agno = be32_to_cpu(agi->agi_seqno); down_read(&args.mp->m_peraglock); args.mp->m_perag[agno].pagi_freecount += newlen; up_read(&args.mp->m_peraglock); agi->agi_newino = cpu_to_be32(newino); + /* * Insert records describing the new inode chunk into the btree. */ @@ -380,13 +394,17 @@ xfs_ialloc_ag_alloc( for (thisino = newino; thisino < newino + newlen; thisino += XFS_INODES_PER_CHUNK) { - if ((error = xfs_inobt_lookup_eq(cur, thisino, - XFS_INODES_PER_CHUNK, XFS_INOBT_ALL_FREE, &i))) { + cur->bc_rec.i.ir_startino = thisino; + cur->bc_rec.i.ir_freecount = XFS_INODES_PER_CHUNK; + cur->bc_rec.i.ir_free = XFS_INOBT_ALL_FREE; + error = xfs_btree_lookup(cur, XFS_LOOKUP_EQ, &i); + if (error) { xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); return error; } ASSERT(i == 0); - if ((error = xfs_btree_insert(cur, &i))) { + error = xfs_btree_insert(cur, &i); + if (error) { xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); return error; } @@ -539,6 +557,62 @@ nextag: } /* + * Try to retrieve the next record to the left/right from the current one. + */ +STATIC int +xfs_ialloc_next_rec( + struct xfs_btree_cur *cur, + xfs_inobt_rec_incore_t *rec, + int *done, + int left) +{ + int error; + int i; + + if (left) + error = xfs_btree_decrement(cur, 0, &i); + else + error = xfs_btree_increment(cur, 0, &i); + + if (error) + return error; + *done = !i; + if (i) { + error = xfs_inobt_get_rec(cur, rec, &i); + if (error) + return error; + XFS_WANT_CORRUPTED_RETURN(i == 1); + } + + return 0; +} + +STATIC int +xfs_ialloc_get_rec( + struct xfs_btree_cur *cur, + xfs_agino_t agino, + xfs_inobt_rec_incore_t *rec, + int *done, + int left) +{ + int error; + int i; + + error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_EQ, &i); + if (error) + return error; + *done = !i; + if (i) { + error = xfs_inobt_get_rec(cur, rec, &i); + if (error) + return error; + XFS_WANT_CORRUPTED_RETURN(i == 1); + } + + return 0; +} + +/* * Visible inode allocation functions. */ @@ -592,8 +666,8 @@ xfs_dialloc( int j; /* result code */ xfs_mount_t *mp; /* file system mount structure */ int offset; /* index of inode in chunk */ - xfs_agino_t pagino; /* parent's a.g. relative inode # */ - xfs_agnumber_t pagno; /* parent's allocation group number */ + xfs_agino_t pagino; /* parent's AG relative inode # */ + xfs_agnumber_t pagno; /* parent's AG number */ xfs_inobt_rec_incore_t rec; /* inode allocation record */ xfs_agnumber_t tagno; /* testing allocation group number */ xfs_btree_cur_t *tcur; /* temp cursor */ @@ -716,6 +790,8 @@ nextag: */ agno = tagno; *IO_agbp = NULL; + + restart_pagno: cur = xfs_inobt_init_cursor(mp, tp, agbp, be32_to_cpu(agi->agi_seqno)); /* * If pagino is 0 (this is the root inode allocation) use newino. @@ -723,220 +799,199 @@ nextag: */ if (!pagino) pagino = be32_to_cpu(agi->agi_newino); -#ifdef DEBUG - if (cur->bc_nlevels == 1) { - int freecount = 0; - if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i))) - goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - do { - if ((error = xfs_inobt_get_rec(cur, &rec.ir_startino, - &rec.ir_freecount, &rec.ir_free, &i))) - goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - freecount += rec.ir_freecount; - if ((error = xfs_btree_increment(cur, 0, &i))) - goto error0; - } while (i == 1); + error = xfs_check_agi_freecount(cur, agi); + if (error) + goto error0; - ASSERT(freecount == be32_to_cpu(agi->agi_freecount) || - XFS_FORCED_SHUTDOWN(mp)); - } -#endif /* - * If in the same a.g. as the parent, try to get near the parent. + * If in the same AG as the parent, try to get near the parent. */ if (pagno == agno) { - if ((error = xfs_inobt_lookup_le(cur, pagino, 0, 0, &i))) + xfs_perag_t *pag = &mp->m_perag[agno]; + int doneleft; /* done, to the left */ + int doneright; /* done, to the right */ + int searchdistance = 10; + + error = xfs_inobt_lookup(cur, pagino, XFS_LOOKUP_LE, &i); + if (error) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + + error = xfs_inobt_get_rec(cur, &rec, &j); + if (error) goto error0; - if (i != 0 && - (error = xfs_inobt_get_rec(cur, &rec.ir_startino, - &rec.ir_freecount, &rec.ir_free, &j)) == 0 && - j == 1 && - rec.ir_freecount > 0) { + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + + if (rec.ir_freecount > 0) { /* * Found a free inode in the same chunk - * as parent, done. + * as the parent, done. */ + goto alloc_inode; } + + + /* + * In the same AG as parent, but parent's chunk is full. + */ + + /* duplicate the cursor, search left & right simultaneously */ + error = xfs_btree_dup_cursor(cur, &tcur); + if (error) + goto error0; + /* - * In the same a.g. as parent, but parent's chunk is full. + * Skip to last blocks looked up if same parent inode. */ - else { - int doneleft; /* done, to the left */ - int doneright; /* done, to the right */ + if (pagino != NULLAGINO && + pag->pagl_pagino == pagino && + pag->pagl_leftrec != NULLAGINO && + pag->pagl_rightrec != NULLAGINO) { + error = xfs_ialloc_get_rec(tcur, pag->pagl_leftrec, + &trec, &doneleft, 1); + if (error) + goto error1; + error = xfs_ialloc_get_rec(cur, pag->pagl_rightrec, + &rec, &doneright, 0); if (error) - goto error0; - ASSERT(i == 1); - ASSERT(j == 1); - /* - * Duplicate the cursor, search left & right - * simultaneously. - */ - if ((error = xfs_btree_dup_cursor(cur, &tcur))) - goto error0; - /* - * Search left with tcur, back up 1 record. - */ - if ((error = xfs_btree_decrement(tcur, 0, &i))) goto error1; - doneleft = !i; - if (!doneleft) { - if ((error = xfs_inobt_get_rec(tcur, - &trec.ir_startino, - &trec.ir_freecount, - &trec.ir_free, &i))) - goto error1; - XFS_WANT_CORRUPTED_GOTO(i == 1, error1); - } - /* - * Search right with cur, go forward 1 record. - */ - if ((error = xfs_btree_increment(cur, 0, &i))) + } else { + /* search left with tcur, back up 1 record */ + error = xfs_ialloc_next_rec(tcur, &trec, &doneleft, 1); + if (error) goto error1; - doneright = !i; - if (!doneright) { - if ((error = xfs_inobt_get_rec(cur, - &rec.ir_startino, - &rec.ir_freecount, - &rec.ir_free, &i))) - goto error1; - XFS_WANT_CORRUPTED_GOTO(i == 1, error1); - } - /* - * Loop until we find the closest inode chunk - * with a free one. - */ - while (!doneleft || !doneright) { - int useleft; /* using left inode - chunk this time */ + /* search right with cur, go forward 1 record. */ + error = xfs_ialloc_next_rec(cur, &rec, &doneright, 0); + if (error) + goto error1; + } + + /* + * Loop until we find an inode chunk with a free inode. + */ + while (!doneleft || !doneright) { + int useleft; /* using left inode chunk this time */ + + if (!--searchdistance) { /* - * Figure out which block is closer, - * if both are valid. - */ - if (!doneleft && !doneright) - useleft = - pagino - - (trec.ir_startino + - XFS_INODES_PER_CHUNK - 1) < - rec.ir_startino - pagino; - else - useleft = !doneleft; - /* - * If checking the left, does it have - * free inodes? - */ - if (useleft && trec.ir_freecount) { - /* - * Yes, set it up as the chunk to use. - */ - rec = trec; - xfs_btree_del_cursor(cur, - XFS_BTREE_NOERROR); - cur = tcur; - break; - } - /* - * If checking the right, does it have - * free inodes? - */ - if (!useleft && rec.ir_freecount) { - /* - * Yes, it's already set up. - */ - xfs_btree_del_cursor(tcur, - XFS_BTREE_NOERROR); - break; - } - /* - * If used the left, get another one - * further left. - */ - if (useleft) { - if ((error = xfs_btree_decrement(tcur, 0, - &i))) - goto error1; - doneleft = !i; - if (!doneleft) { - if ((error = xfs_inobt_get_rec( - tcur, - &trec.ir_startino, - &trec.ir_freecount, - &trec.ir_free, &i))) - goto error1; - XFS_WANT_CORRUPTED_GOTO(i == 1, - error1); - } - } - /* - * If used the right, get another one - * further right. + * Not in range - save last search + * location and allocate a new inode */ - else { - if ((error = xfs_btree_increment(cur, 0, - &i))) - goto error1; - doneright = !i; - if (!doneright) { - if ((error = xfs_inobt_get_rec( - cur, - &rec.ir_startino, - &rec.ir_freecount, - &rec.ir_free, &i))) - goto error1; - XFS_WANT_CORRUPTED_GOTO(i == 1, - error1); - } - } + pag->pagl_leftrec = trec.ir_startino; + pag->pagl_rightrec = rec.ir_startino; + pag->pagl_pagino = pagino; + goto newino; + } + + /* figure out the closer block if both are valid. */ + if (!doneleft && !doneright) { + useleft = pagino - + (trec.ir_startino + XFS_INODES_PER_CHUNK - 1) < + rec.ir_startino - pagino; + } else { + useleft = !doneleft; } - ASSERT(!doneleft || !doneright); + + /* free inodes to the left? */ + if (useleft && trec.ir_freecount) { + rec = trec; + xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); + cur = tcur; + + pag->pagl_leftrec = trec.ir_startino; + pag->pagl_rightrec = rec.ir_startino; + pag->pagl_pagino = pagino; + goto alloc_inode; + } + + /* free inodes to the right? */ + if (!useleft && rec.ir_freecount) { + xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); + + pag->pagl_leftrec = trec.ir_startino; + pag->pagl_rightrec = rec.ir_startino; + pag->pagl_pagino = pagino; + goto alloc_inode; + } + + /* get next record to check */ + if (useleft) { + error = xfs_ialloc_next_rec(tcur, &trec, + &doneleft, 1); + } else { + error = xfs_ialloc_next_rec(cur, &rec, + &doneright, 0); + } + if (error) + goto error1; } + + /* + * We've reached the end of the btree. because + * we are only searching a small chunk of the + * btree each search, there is obviously free + * inodes closer to the parent inode than we + * are now. restart the search again. + */ + pag->pagl_pagino = NULLAGINO; + pag->pagl_leftrec = NULLAGINO; + pag->pagl_rightrec = NULLAGINO; + xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); + xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); + goto restart_pagno; } + /* - * In a different a.g. from the parent. + * In a different AG from the parent. * See if the most recently allocated block has any free. */ - else if (be32_to_cpu(agi->agi_newino) != NULLAGINO) { - if ((error = xfs_inobt_lookup_eq(cur, - be32_to_cpu(agi->agi_newino), 0, 0, &i))) +newino: + if (be32_to_cpu(agi->agi_newino) != NULLAGINO) { + error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino), + XFS_LOOKUP_EQ, &i); + if (error) goto error0; - if (i == 1 && - (error = xfs_inobt_get_rec(cur, &rec.ir_startino, - &rec.ir_freecount, &rec.ir_free, &j)) == 0 && - j == 1 && - rec.ir_freecount > 0) { - /* - * The last chunk allocated in the group still has - * a free inode. - */ - } - /* - * None left in the last group, search the whole a.g. - */ - else { + + if (i == 1) { + error = xfs_inobt_get_rec(cur, &rec, &j); if (error) goto error0; - if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i))) - goto error0; - ASSERT(i == 1); - for (;;) { - if ((error = xfs_inobt_get_rec(cur, - &rec.ir_startino, - &rec.ir_freecount, &rec.ir_free, - &i))) - goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - if (rec.ir_freecount > 0) - break; - if ((error = xfs_btree_increment(cur, 0, &i))) - goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + + if (j == 1 && rec.ir_freecount > 0) { + /* + * The last chunk allocated in the group + * still has a free inode. + */ + goto alloc_inode; } } } + + /* + * None left in the last group, search the whole AG + */ + error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i); + if (error) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + + for (;;) { + error = xfs_inobt_get_rec(cur, &rec, &i); + if (error) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + if (rec.ir_freecount > 0) + break; + error = xfs_btree_increment(cur, 0, &i); + if (error) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + } + +alloc_inode: offset = xfs_ialloc_find_free(&rec.ir_free); ASSERT(offset >= 0); ASSERT(offset < XFS_INODES_PER_CHUNK); @@ -945,33 +1000,19 @@ nextag: ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino + offset); rec.ir_free &= ~XFS_INOBT_MASK(offset); rec.ir_freecount--; - if ((error = xfs_inobt_update(cur, rec.ir_startino, rec.ir_freecount, - rec.ir_free))) + error = xfs_inobt_update(cur, &rec); + if (error) goto error0; be32_add_cpu(&agi->agi_freecount, -1); xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); down_read(&mp->m_peraglock); mp->m_perag[tagno].pagi_freecount--; up_read(&mp->m_peraglock); -#ifdef DEBUG - if (cur->bc_nlevels == 1) { - int freecount = 0; - if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i))) - goto error0; - do { - if ((error = xfs_inobt_get_rec(cur, &rec.ir_startino, - &rec.ir_freecount, &rec.ir_free, &i))) - goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - freecount += rec.ir_freecount; - if ((error = xfs_btree_increment(cur, 0, &i))) - goto error0; - } while (i == 1); - ASSERT(freecount == be32_to_cpu(agi->agi_freecount) || - XFS_FORCED_SHUTDOWN(mp)); - } -#endif + error = xfs_check_agi_freecount(cur, agi); + if (error) + goto error0; + xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1); *inop = ino; @@ -1062,38 +1103,23 @@ xfs_difree( * Initialize the cursor. */ cur = xfs_inobt_init_cursor(mp, tp, agbp, agno); -#ifdef DEBUG - if (cur->bc_nlevels == 1) { - int freecount = 0; - if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i))) - goto error0; - do { - if ((error = xfs_inobt_get_rec(cur, &rec.ir_startino, - &rec.ir_freecount, &rec.ir_free, &i))) - goto error0; - if (i) { - freecount += rec.ir_freecount; - if ((error = xfs_btree_increment(cur, 0, &i))) - goto error0; - } - } while (i == 1); - ASSERT(freecount == be32_to_cpu(agi->agi_freecount) || - XFS_FORCED_SHUTDOWN(mp)); - } -#endif + error = xfs_check_agi_freecount(cur, agi); + if (error) + goto error0; + /* * Look for the entry describing this inode. */ - if ((error = xfs_inobt_lookup_le(cur, agino, 0, 0, &i))) { + if ((error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i))) { cmn_err(CE_WARN, - "xfs_difree: xfs_inobt_lookup_le returned() an error %d on %s. Returning error.", + "xfs_difree: xfs_inobt_lookup returned() an error %d on %s. Returning error.", error, mp->m_fsname); goto error0; } XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - if ((error = xfs_inobt_get_rec(cur, &rec.ir_startino, &rec.ir_freecount, - &rec.ir_free, &i))) { + error = xfs_inobt_get_rec(cur, &rec, &i); + if (error) { cmn_err(CE_WARN, "xfs_difree: xfs_inobt_get_rec() returned an error %d on %s. Returning error.", error, mp->m_fsname); @@ -1148,12 +1174,14 @@ xfs_difree( } else { *delete = 0; - if ((error = xfs_inobt_update(cur, rec.ir_startino, rec.ir_freecount, rec.ir_free))) { + error = xfs_inobt_update(cur, &rec); + if (error) { cmn_err(CE_WARN, - "xfs_difree: xfs_inobt_update() returned an error %d on %s. Returning error.", + "xfs_difree: xfs_inobt_update returned an error %d on %s.", error, mp->m_fsname); goto error0; } + /* * Change the inode free counts and log the ag/sb changes. */ @@ -1165,28 +1193,10 @@ xfs_difree( xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, 1); } -#ifdef DEBUG - if (cur->bc_nlevels == 1) { - int freecount = 0; + error = xfs_check_agi_freecount(cur, agi); + if (error) + goto error0; - if ((error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &i))) - goto error0; - do { - if ((error = xfs_inobt_get_rec(cur, - &rec.ir_startino, - &rec.ir_freecount, - &rec.ir_free, &i))) - goto error0; - if (i) { - freecount += rec.ir_freecount; - if ((error = xfs_btree_increment(cur, 0, &i))) - goto error0; - } - } while (i == 1); - ASSERT(freecount == be32_to_cpu(agi->agi_freecount) || - XFS_FORCED_SHUTDOWN(mp)); - } -#endif xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); return 0; @@ -1297,9 +1307,7 @@ xfs_imap( chunk_agbno = agbno - offset_agbno; } else { xfs_btree_cur_t *cur; /* inode btree cursor */ - xfs_agino_t chunk_agino; /* first agino in inode chunk */ - __int32_t chunk_cnt; /* count of free inodes in chunk */ - xfs_inofree_t chunk_free; /* mask of free inodes in chunk */ + xfs_inobt_rec_incore_t chunk_rec; xfs_buf_t *agbp; /* agi buffer */ int i; /* temp state */ @@ -1315,15 +1323,14 @@ xfs_imap( } cur = xfs_inobt_init_cursor(mp, tp, agbp, agno); - error = xfs_inobt_lookup_le(cur, agino, 0, 0, &i); + error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i); if (error) { xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: " - "xfs_inobt_lookup_le() failed"); + "xfs_inobt_lookup() failed"); goto error0; } - error = xfs_inobt_get_rec(cur, &chunk_agino, &chunk_cnt, - &chunk_free, &i); + error = xfs_inobt_get_rec(cur, &chunk_rec, &i); if (error) { xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: " "xfs_inobt_get_rec() failed"); @@ -1341,7 +1348,7 @@ xfs_imap( xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); if (error) return error; - chunk_agbno = XFS_AGINO_TO_AGBNO(mp, chunk_agino); + chunk_agbno = XFS_AGINO_TO_AGBNO(mp, chunk_rec.ir_startino); offset_agbno = agbno - chunk_agbno; } diff --git a/fs/xfs/xfs_ialloc.h b/fs/xfs/xfs_ialloc.h index aeee827..bb53854 100644 --- a/fs/xfs/xfs_ialloc.h +++ b/fs/xfs/xfs_ialloc.h @@ -150,23 +150,15 @@ xfs_ialloc_pagi_init( xfs_agnumber_t agno); /* allocation group number */ /* - * Lookup the first record greater than or equal to ino - * in the btree given by cur. + * Lookup a record by ino in the btree given by cur. */ -int xfs_inobt_lookup_ge(struct xfs_btree_cur *cur, xfs_agino_t ino, - __int32_t fcnt, xfs_inofree_t free, int *stat); - -/* - * Lookup the first record less than or equal to ino - * in the btree given by cur. - */ -int xfs_inobt_lookup_le(struct xfs_btree_cur *cur, xfs_agino_t ino, - __int32_t fcnt, xfs_inofree_t free, int *stat); +int xfs_inobt_lookup(struct xfs_btree_cur *cur, xfs_agino_t ino, + xfs_lookup_t dir, int *stat); /* * Get the data from the pointed-to record. */ -extern int xfs_inobt_get_rec(struct xfs_btree_cur *cur, xfs_agino_t *ino, - __int32_t *fcnt, xfs_inofree_t *free, int *stat); +extern int xfs_inobt_get_rec(struct xfs_btree_cur *cur, + xfs_inobt_rec_incore_t *rec, int *stat); #endif /* __XFS_IALLOC_H__ */ diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index ecbf8b4..80e5264 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -82,7 +82,6 @@ xfs_inode_alloc( memset(&ip->i_df, 0, sizeof(xfs_ifork_t)); ip->i_flags = 0; ip->i_update_core = 0; - ip->i_update_size = 0; ip->i_delayed_blks = 0; memset(&ip->i_d, 0, sizeof(xfs_icdinode_t)); ip->i_size = 0; @@ -456,32 +455,6 @@ out_error_or_again: return error; } - -/* - * Look for the inode corresponding to the given ino in the hash table. - * If it is there and its i_transp pointer matches tp, return it. - * Otherwise, return NULL. - */ -xfs_inode_t * -xfs_inode_incore(xfs_mount_t *mp, - xfs_ino_t ino, - xfs_trans_t *tp) -{ - xfs_inode_t *ip; - xfs_perag_t *pag; - - pag = xfs_get_perag(mp, ino); - read_lock(&pag->pag_ici_lock); - ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ino)); - read_unlock(&pag->pag_ici_lock); - xfs_put_perag(mp, pag); - - /* the returned inode must match the transaction */ - if (ip && (ip->i_transp != tp)) - return NULL; - return ip; -} - /* * Decrement reference count of an inode structure and unlock it. * diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index da428b3..c1dc7ef 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -651,7 +651,7 @@ xfs_iformat_btree( return 0; } -void +STATIC void xfs_dinode_from_disk( xfs_icdinode_t *to, xfs_dinode_t *from) @@ -1247,7 +1247,7 @@ xfs_isize_check( * In that case the pages will still be in memory, but the inode size * will never have been updated. */ -xfs_fsize_t +STATIC xfs_fsize_t xfs_file_last_byte( xfs_inode_t *ip) { @@ -3837,7 +3837,7 @@ xfs_iext_inline_to_direct( /* * Resize an extent indirection array to new_size bytes. */ -void +STATIC void xfs_iext_realloc_indirect( xfs_ifork_t *ifp, /* inode fork pointer */ int new_size) /* new indirection array size */ @@ -3862,7 +3862,7 @@ xfs_iext_realloc_indirect( /* * Switch from indirection array to linear (direct) extent allocations. */ -void +STATIC void xfs_iext_indirect_to_direct( xfs_ifork_t *ifp) /* inode fork pointer */ { diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 65f24a3..0b38b9a 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -261,7 +261,6 @@ typedef struct xfs_inode { /* Miscellaneous state. */ unsigned short i_flags; /* see defined flags below */ unsigned char i_update_core; /* timestamps/size is dirty */ - unsigned char i_update_size; /* di_size field is dirty */ unsigned int i_delayed_blks; /* count of delay alloc blks */ xfs_icdinode_t i_d; /* most of ondisk inode */ @@ -468,8 +467,6 @@ static inline void xfs_ifunlock(xfs_inode_t *ip) /* * xfs_iget.c prototypes. */ -xfs_inode_t *xfs_inode_incore(struct xfs_mount *, xfs_ino_t, - struct xfs_trans *); int xfs_iget(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, uint, uint, xfs_inode_t **, xfs_daddr_t); void xfs_iput(xfs_inode_t *, uint); @@ -504,7 +501,6 @@ void xfs_ipin(xfs_inode_t *); void xfs_iunpin(xfs_inode_t *); int xfs_iflush(xfs_inode_t *, uint); void xfs_ichgtime(xfs_inode_t *, int); -xfs_fsize_t xfs_file_last_byte(xfs_inode_t *); void xfs_lock_inodes(xfs_inode_t **, int, uint); void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint); @@ -572,8 +568,6 @@ int xfs_itobp(struct xfs_mount *, struct xfs_trans *, struct xfs_buf **, uint); int xfs_iread(struct xfs_mount *, struct xfs_trans *, struct xfs_inode *, xfs_daddr_t, uint); -void xfs_dinode_from_disk(struct xfs_icdinode *, - struct xfs_dinode *); void xfs_dinode_to_disk(struct xfs_dinode *, struct xfs_icdinode *); void xfs_idestroy_fork(struct xfs_inode *, int); @@ -592,8 +586,6 @@ void xfs_iext_remove_inline(xfs_ifork_t *, xfs_extnum_t, int); void xfs_iext_remove_direct(xfs_ifork_t *, xfs_extnum_t, int); void xfs_iext_remove_indirect(xfs_ifork_t *, xfs_extnum_t, int); void xfs_iext_realloc_direct(xfs_ifork_t *, int); -void xfs_iext_realloc_indirect(xfs_ifork_t *, int); -void xfs_iext_indirect_to_direct(xfs_ifork_t *); void xfs_iext_direct_to_inline(xfs_ifork_t *, xfs_extnum_t); void xfs_iext_inline_to_direct(xfs_ifork_t *, int); void xfs_iext_destroy(xfs_ifork_t *); diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index 977c4ae..47d5b66 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c @@ -263,14 +263,6 @@ xfs_inode_item_format( } /* - * We don't have to worry about re-ordering here because - * the update_size field is protected by the inode lock - * and we have that held in exclusive mode. - */ - if (ip->i_update_size) - ip->i_update_size = 0; - - /* * Make sure to get the latest atime from the Linux inode. */ xfs_synchronize_atime(ip); @@ -712,8 +704,6 @@ xfs_inode_item_unlock( * Clear out the fields of the inode log item particular * to the current transaction. */ - iip->ili_ilock_recur = 0; - iip->ili_iolock_recur = 0; iip->ili_flags = 0; /* diff --git a/fs/xfs/xfs_inode_item.h b/fs/xfs/xfs_inode_item.h index a52ac12..65bae4c 100644 --- a/fs/xfs/xfs_inode_item.h +++ b/fs/xfs/xfs_inode_item.h @@ -137,8 +137,6 @@ typedef struct xfs_inode_log_item { struct xfs_inode *ili_inode; /* inode ptr */ xfs_lsn_t ili_flush_lsn; /* lsn at last flush */ xfs_lsn_t ili_last_lsn; /* lsn at last transaction */ - unsigned short ili_ilock_recur; /* lock recursion count */ - unsigned short ili_iolock_recur; /* lock recursion count */ unsigned short ili_flags; /* misc flags */ unsigned short ili_logged; /* flushed logged data */ unsigned int ili_last_fields; /* fields when flushed */ diff --git a/fs/xfs/xfs_inum.h b/fs/xfs/xfs_inum.h index 7a28191..b8e4ee4 100644 --- a/fs/xfs/xfs_inum.h +++ b/fs/xfs/xfs_inum.h @@ -72,7 +72,6 @@ struct xfs_mount; #if XFS_BIG_INUMS #define XFS_MAXINUMBER ((xfs_ino_t)((1ULL << 56) - 1ULL)) -#define XFS_INO64_OFFSET ((xfs_ino_t)(1ULL << 32)) #else #define XFS_MAXINUMBER ((xfs_ino_t)((1ULL << 32) - 1ULL)) #endif diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index aeb2d22..b68f910 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c @@ -39,7 +39,7 @@ #include "xfs_error.h" #include "xfs_btree.h" -int +STATIC int xfs_internal_inum( xfs_mount_t *mp, xfs_ino_t ino) @@ -353,9 +353,6 @@ xfs_bulkstat( int end_of_ag; /* set if we've seen the ag end */ int error; /* error code */ int fmterror;/* bulkstat formatter result */ - __int32_t gcnt; /* current btree rec's count */ - xfs_inofree_t gfree; /* current btree rec's free mask */ - xfs_agino_t gino; /* current btree rec's start inode */ int i; /* loop index */ int icount; /* count of inodes good in irbuf */ size_t irbsize; /* size of irec buffer in bytes */ @@ -442,40 +439,43 @@ xfs_bulkstat( * we need to get the remainder of the chunk we're in. */ if (agino > 0) { + xfs_inobt_rec_incore_t r; + /* * Lookup the inode chunk that this inode lives in. */ - error = xfs_inobt_lookup_le(cur, agino, 0, 0, &tmp); + error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, + &tmp); if (!error && /* no I/O error */ tmp && /* lookup succeeded */ /* got the record, should always work */ - !(error = xfs_inobt_get_rec(cur, &gino, &gcnt, - &gfree, &i)) && + !(error = xfs_inobt_get_rec(cur, &r, &i)) && i == 1 && /* this is the right chunk */ - agino < gino + XFS_INODES_PER_CHUNK && + agino < r.ir_startino + XFS_INODES_PER_CHUNK && /* lastino was not last in chunk */ - (chunkidx = agino - gino + 1) < + (chunkidx = agino - r.ir_startino + 1) < XFS_INODES_PER_CHUNK && /* there are some left allocated */ xfs_inobt_maskn(chunkidx, - XFS_INODES_PER_CHUNK - chunkidx) & ~gfree) { + XFS_INODES_PER_CHUNK - chunkidx) & + ~r.ir_free) { /* * Grab the chunk record. Mark all the * uninteresting inodes (because they're * before our start point) free. */ for (i = 0; i < chunkidx; i++) { - if (XFS_INOBT_MASK(i) & ~gfree) - gcnt++; + if (XFS_INOBT_MASK(i) & ~r.ir_free) + r.ir_freecount++; } - gfree |= xfs_inobt_maskn(0, chunkidx); - irbp->ir_startino = gino; - irbp->ir_freecount = gcnt; - irbp->ir_free = gfree; + r.ir_free |= xfs_inobt_maskn(0, chunkidx); + irbp->ir_startino = r.ir_startino; + irbp->ir_freecount = r.ir_freecount; + irbp->ir_free = r.ir_free; irbp++; - agino = gino + XFS_INODES_PER_CHUNK; - icount = XFS_INODES_PER_CHUNK - gcnt; + agino = r.ir_startino + XFS_INODES_PER_CHUNK; + icount = XFS_INODES_PER_CHUNK - r.ir_freecount; } else { /* * If any of those tests failed, bump the @@ -493,7 +493,7 @@ xfs_bulkstat( /* * Start of ag. Lookup the first inode chunk. */ - error = xfs_inobt_lookup_ge(cur, 0, 0, 0, &tmp); + error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &tmp); icount = 0; } /* @@ -501,6 +501,8 @@ xfs_bulkstat( * until we run out of inodes or space in the buffer. */ while (irbp < irbufend && icount < ubcount) { + xfs_inobt_rec_incore_t r; + /* * Loop as long as we're unable to read the * inode btree. @@ -510,51 +512,55 @@ xfs_bulkstat( if (XFS_AGINO_TO_AGBNO(mp, agino) >= be32_to_cpu(agi->agi_length)) break; - error = xfs_inobt_lookup_ge(cur, agino, 0, 0, - &tmp); + error = xfs_inobt_lookup(cur, agino, + XFS_LOOKUP_GE, &tmp); cond_resched(); } /* * If ran off the end of the ag either with an error, * or the normal way, set end and stop collecting. */ - if (error || - (error = xfs_inobt_get_rec(cur, &gino, &gcnt, - &gfree, &i)) || - i == 0) { + if (error) { end_of_ag = 1; break; } + + error = xfs_inobt_get_rec(cur, &r, &i); + if (error || i == 0) { + end_of_ag = 1; + break; + } + /* * If this chunk has any allocated inodes, save it. * Also start read-ahead now for this chunk. */ - if (gcnt < XFS_INODES_PER_CHUNK) { + if (r.ir_freecount < XFS_INODES_PER_CHUNK) { /* * Loop over all clusters in the next chunk. * Do a readahead if there are any allocated * inodes in that cluster. */ - for (agbno = XFS_AGINO_TO_AGBNO(mp, gino), - chunkidx = 0; + agbno = XFS_AGINO_TO_AGBNO(mp, r.ir_startino); + for (chunkidx = 0; chunkidx < XFS_INODES_PER_CHUNK; chunkidx += nicluster, agbno += nbcluster) { - if (xfs_inobt_maskn(chunkidx, - nicluster) & ~gfree) + if (xfs_inobt_maskn(chunkidx, nicluster) + & ~r.ir_free) xfs_btree_reada_bufs(mp, agno, agbno, nbcluster); } - irbp->ir_startino = gino; - irbp->ir_freecount = gcnt; - irbp->ir_free = gfree; + irbp->ir_startino = r.ir_startino; + irbp->ir_freecount = r.ir_freecount; + irbp->ir_free = r.ir_free; irbp++; - icount += XFS_INODES_PER_CHUNK - gcnt; + icount += XFS_INODES_PER_CHUNK - r.ir_freecount; } /* * Set agino to after this chunk and bump the cursor. */ - agino = gino + XFS_INODES_PER_CHUNK; + agino = r.ir_startino + XFS_INODES_PER_CHUNK; error = xfs_btree_increment(cur, 0, &tmp); cond_resched(); } @@ -820,9 +826,7 @@ xfs_inumbers( int bufidx; xfs_btree_cur_t *cur; int error; - __int32_t gcnt; - xfs_inofree_t gfree; - xfs_agino_t gino; + xfs_inobt_rec_incore_t r; int i; xfs_ino_t ino; int left; @@ -855,7 +859,8 @@ xfs_inumbers( continue; } cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno); - error = xfs_inobt_lookup_ge(cur, agino, 0, 0, &tmp); + error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_GE, + &tmp); if (error) { xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); cur = NULL; @@ -870,9 +875,8 @@ xfs_inumbers( continue; } } - if ((error = xfs_inobt_get_rec(cur, &gino, &gcnt, &gfree, - &i)) || - i == 0) { + error = xfs_inobt_get_rec(cur, &r, &i); + if (error || i == 0) { xfs_buf_relse(agbp); agbp = NULL; xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); @@ -881,10 +885,12 @@ xfs_inumbers( agino = 0; continue; } - agino = gino + XFS_INODES_PER_CHUNK - 1; - buffer[bufidx].xi_startino = XFS_AGINO_TO_INO(mp, agno, gino); - buffer[bufidx].xi_alloccount = XFS_INODES_PER_CHUNK - gcnt; - buffer[bufidx].xi_allocmask = ~gfree; + agino = r.ir_startino + XFS_INODES_PER_CHUNK - 1; + buffer[bufidx].xi_startino = + XFS_AGINO_TO_INO(mp, agno, r.ir_startino); + buffer[bufidx].xi_alloccount = + XFS_INODES_PER_CHUNK - r.ir_freecount; + buffer[bufidx].xi_allocmask = ~r.ir_free; bufidx++; left--; if (bufidx == bcount) { diff --git a/fs/xfs/xfs_itable.h b/fs/xfs/xfs_itable.h index 1fb04e7..20792bf 100644 --- a/fs/xfs/xfs_itable.h +++ b/fs/xfs/xfs_itable.h @@ -99,11 +99,6 @@ xfs_bulkstat_one( void *dibuff, int *stat); -int -xfs_internal_inum( - xfs_mount_t *mp, - xfs_ino_t ino); - typedef int (*inumbers_fmt_pf)( void __user *ubuffer, /* buffer to write to */ const xfs_inogrp_t *buffer, /* buffer to read from */ diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index bcad5f4..679c7c4 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h @@ -451,8 +451,6 @@ extern int xlog_find_tail(xlog_t *log, extern int xlog_recover(xlog_t *log); extern int xlog_recover_finish(xlog_t *log); extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int); -extern void xlog_recover_process_iunlinks(xlog_t *log); - extern struct xfs_buf *xlog_get_bp(xlog_t *, int); extern void xlog_put_bp(struct xfs_buf *); diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 47da2fb..1099395 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -3263,7 +3263,7 @@ xlog_recover_process_one_iunlink( * freeing of the inode and its removal from the list must be * atomic. */ -void +STATIC void xlog_recover_process_iunlinks( xlog_t *log) { diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 5c6f092..8b6c9e8 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -1568,7 +1568,7 @@ xfs_mod_sb(xfs_trans_t *tp, __int64_t fields) * * The m_sb_lock must be held when this routine is called. */ -int +STATIC int xfs_mod_incore_sb_unlocked( xfs_mount_t *mp, xfs_sb_field_t field, diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index a512238..a6c023b 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -414,13 +414,10 @@ typedef struct xfs_mod_sb { extern int xfs_log_sbcount(xfs_mount_t *, uint); extern int xfs_mountfs(xfs_mount_t *mp); -extern void xfs_mountfs_check_barriers(xfs_mount_t *mp); extern void xfs_unmountfs(xfs_mount_t *); extern int xfs_unmountfs_writesb(xfs_mount_t *); extern int xfs_mod_incore_sb(xfs_mount_t *, xfs_sb_field_t, int64_t, int); -extern int xfs_mod_incore_sb_unlocked(xfs_mount_t *, xfs_sb_field_t, - int64_t, int); extern int xfs_mod_incore_sb_batch(xfs_mount_t *, xfs_mod_sb_t *, uint, int); extern int xfs_mount_log_sb(xfs_mount_t *, __int64_t); diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c index afee7eb..4b0613d 100644 --- a/fs/xfs/xfs_mru_cache.c +++ b/fs/xfs/xfs_mru_cache.c @@ -564,35 +564,6 @@ xfs_mru_cache_lookup( } /* - * To look up an element using its key, but leave its location in the internal - * lists alone, call xfs_mru_cache_peek(). If the element isn't found, this - * function returns NULL. - * - * See the comments above the declaration of the xfs_mru_cache_lookup() function - * for important locking information pertaining to this call. - */ -void * -xfs_mru_cache_peek( - xfs_mru_cache_t *mru, - unsigned long key) -{ - xfs_mru_cache_elem_t *elem; - - ASSERT(mru && mru->lists); - if (!mru || !mru->lists) - return NULL; - - spin_lock(&mru->lock); - elem = radix_tree_lookup(&mru->store, key); - if (!elem) - spin_unlock(&mru->lock); - else - __release(mru_lock); /* help sparse not be stupid */ - - return elem ? elem->value : NULL; -} - -/* * To release the internal data structure spinlock after having performed an * xfs_mru_cache_lookup() or an xfs_mru_cache_peek(), call xfs_mru_cache_done() * with the data store pointer. diff --git a/fs/xfs/xfs_mru_cache.h b/fs/xfs/xfs_mru_cache.h index dd58ea1..5d439f3 100644 --- a/fs/xfs/xfs_mru_cache.h +++ b/fs/xfs/xfs_mru_cache.h @@ -49,7 +49,6 @@ int xfs_mru_cache_insert(struct xfs_mru_cache *mru, unsigned long key, void * xfs_mru_cache_remove(struct xfs_mru_cache *mru, unsigned long key); void xfs_mru_cache_delete(struct xfs_mru_cache *mru, unsigned long key); void *xfs_mru_cache_lookup(struct xfs_mru_cache *mru, unsigned long key); -void *xfs_mru_cache_peek(struct xfs_mru_cache *mru, unsigned long key); void xfs_mru_cache_done(struct xfs_mru_cache *mru); #endif /* __XFS_MRU_CACHE_H__ */ diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c index fea6861..3f816ad 100644 --- a/fs/xfs/xfs_rw.c +++ b/fs/xfs/xfs_rw.c @@ -88,90 +88,6 @@ xfs_write_clear_setuid( } /* - * Handle logging requirements of various synchronous types of write. - */ -int -xfs_write_sync_logforce( - xfs_mount_t *mp, - xfs_inode_t *ip) -{ - int error = 0; - - /* - * If we're treating this as O_DSYNC and we have not updated the - * size, force the log. - */ - if (!(mp->m_flags & XFS_MOUNT_OSYNCISOSYNC) && - !(ip->i_update_size)) { - xfs_inode_log_item_t *iip = ip->i_itemp; - - /* - * If an allocation transaction occurred - * without extending the size, then we have to force - * the log up the proper point to ensure that the - * allocation is permanent. We can't count on - * the fact that buffered writes lock out direct I/O - * writes - the direct I/O write could have extended - * the size nontransactionally, then finished before - * we started. xfs_write_file will think that the file - * didn't grow but the update isn't safe unless the - * size change is logged. - * - * Force the log if we've committed a transaction - * against the inode or if someone else has and - * the commit record hasn't gone to disk (e.g. - * the inode is pinned). This guarantees that - * all changes affecting the inode are permanent - * when we return. - */ - if (iip && iip->ili_last_lsn) { - error = _xfs_log_force(mp, iip->ili_last_lsn, - XFS_LOG_FORCE | XFS_LOG_SYNC, NULL); - } else if (xfs_ipincount(ip) > 0) { - error = _xfs_log_force(mp, (xfs_lsn_t)0, - XFS_LOG_FORCE | XFS_LOG_SYNC, NULL); - } - - } else { - xfs_trans_t *tp; - - /* - * O_SYNC or O_DSYNC _with_ a size update are handled - * the same way. - * - * If the write was synchronous then we need to make - * sure that the inode modification time is permanent. - * We'll have updated the timestamp above, so here - * we use a synchronous transaction to log the inode. - * It's not fast, but it's necessary. - * - * If this a dsync write and the size got changed - * non-transactionally, then we need to ensure that - * the size change gets logged in a synchronous - * transaction. - */ - tp = xfs_trans_alloc(mp, XFS_TRANS_WRITE_SYNC); - if ((error = xfs_trans_reserve(tp, 0, - XFS_SWRITE_LOG_RES(mp), - 0, 0, 0))) { - /* Transaction reserve failed */ - xfs_trans_cancel(tp, 0); - } else { - /* Transaction reserve successful */ - xfs_ilock(ip, XFS_ILOCK_EXCL); - xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); - xfs_trans_ihold(tp, ip); - xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); - xfs_trans_set_sync(tp); - error = xfs_trans_commit(tp, 0); - xfs_iunlock(ip, XFS_ILOCK_EXCL); - } - } - - return error; -} - -/* * Force a shutdown of the filesystem instantly while keeping * the filesystem consistent. We don't do an unmount here; just shutdown * the shop, make sure that absolutely nothing persistent happens to diff --git a/fs/xfs/xfs_rw.h b/fs/xfs/xfs_rw.h index f76c003..f5e4874 100644 --- a/fs/xfs/xfs_rw.h +++ b/fs/xfs/xfs_rw.h @@ -68,7 +68,6 @@ xfs_get_extsz_hint( * Prototypes for functions in xfs_rw.c. */ extern int xfs_write_clear_setuid(struct xfs_inode *ip); -extern int xfs_write_sync_logforce(struct xfs_mount *mp, struct xfs_inode *ip); extern int xfs_bwrite(struct xfs_mount *mp, struct xfs_buf *bp); extern int xfs_bioerror(struct xfs_buf *bp); extern int xfs_bioerror_relse(struct xfs_buf *bp); @@ -78,10 +77,4 @@ extern int xfs_read_buf(struct xfs_mount *mp, xfs_buftarg_t *btp, extern void xfs_ioerror_alert(char *func, struct xfs_mount *mp, xfs_buf_t *bp, xfs_daddr_t blkno); -/* - * Prototypes for functions in xfs_vnodeops.c. - */ -extern int xfs_free_eofblocks(struct xfs_mount *mp, struct xfs_inode *ip, - int flags); - #endif /* __XFS_RW_H__ */ diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h index 775249a..ed47fc7 100644 --- a/fs/xfs/xfs_trans.h +++ b/fs/xfs/xfs_trans.h @@ -68,7 +68,7 @@ typedef struct xfs_trans_header { #define XFS_TRANS_GROWFS 14 #define XFS_TRANS_STRAT_WRITE 15 #define XFS_TRANS_DIOSTRAT 16 -#define XFS_TRANS_WRITE_SYNC 17 +/* 17 was XFS_TRANS_WRITE_SYNC */ #define XFS_TRANS_WRITEID 18 #define XFS_TRANS_ADDAFORK 19 #define XFS_TRANS_ATTRINVAL 20 diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c index 8ee2f8c..218829e 100644 --- a/fs/xfs/xfs_trans_buf.c +++ b/fs/xfs/xfs_trans_buf.c @@ -307,7 +307,7 @@ xfs_trans_read_buf( return (flags & XFS_BUF_TRYLOCK) ? EAGAIN : XFS_ERROR(ENOMEM); - if ((bp != NULL) && (XFS_BUF_GETERROR(bp) != 0)) { + if (XFS_BUF_GETERROR(bp) != 0) { xfs_ioerror_alert("xfs_trans_read_buf", mp, bp, blkno); error = XFS_BUF_GETERROR(bp); @@ -315,7 +315,7 @@ xfs_trans_read_buf( return error; } #ifdef DEBUG - if (xfs_do_error && (bp != NULL)) { + if (xfs_do_error) { if (xfs_error_target == target) { if (((xfs_req_num++) % xfs_error_mod) == 0) { xfs_buf_relse(bp); diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c index 23d276a..785ff10 100644 --- a/fs/xfs/xfs_trans_inode.c +++ b/fs/xfs/xfs_trans_inode.c @@ -49,30 +49,7 @@ xfs_trans_inode_broot_debug( /* - * Get and lock the inode for the caller if it is not already - * locked within the given transaction. If it is already locked - * within the transaction, just increment its lock recursion count - * and return a pointer to it. - * - * For an inode to be locked in a transaction, the inode lock, as - * opposed to the io lock, must be taken exclusively. This ensures - * that the inode can be involved in only 1 transaction at a time. - * Lock recursion is handled on the io lock, but only for lock modes - * of equal or lesser strength. That is, you can recur on the io lock - * held EXCL with a SHARED request but not vice versa. Also, if - * the inode is already a part of the transaction then you cannot - * go from not holding the io lock to having it EXCL or SHARED. - * - * Use the inode cache routine xfs_inode_incore() to find the inode - * if it is already owned by this transaction. - * - * If we don't already own the inode, use xfs_iget() to get it. - * Since the inode log item structure is embedded in the incore - * inode structure and is initialized when the inode is brought - * into memory, there is nothing to do with it here. - * - * If the given transaction pointer is NULL, just call xfs_iget(). - * This simplifies code which must handle both cases. + * Get an inode and join it to the transaction. */ int xfs_trans_iget( @@ -84,62 +61,11 @@ xfs_trans_iget( xfs_inode_t **ipp) { int error; - xfs_inode_t *ip; - - /* - * If the transaction pointer is NULL, just call the normal - * xfs_iget(). - */ - if (tp == NULL) - return xfs_iget(mp, NULL, ino, flags, lock_flags, ipp, 0); - - /* - * If we find the inode in core with this transaction - * pointer in its i_transp field, then we know we already - * have it locked. In this case we just increment the lock - * recursion count and return the inode to the caller. - * Assert that the inode is already locked in the mode requested - * by the caller. We cannot do lock promotions yet, so - * die if someone gets this wrong. - */ - if ((ip = xfs_inode_incore(tp->t_mountp, ino, tp)) != NULL) { - /* - * Make sure that the inode lock is held EXCL and - * that the io lock is never upgraded when the inode - * is already a part of the transaction. - */ - ASSERT(ip->i_itemp != NULL); - ASSERT(lock_flags & XFS_ILOCK_EXCL); - ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); - ASSERT((!(lock_flags & XFS_IOLOCK_EXCL)) || - xfs_isilocked(ip, XFS_IOLOCK_EXCL)); - ASSERT((!(lock_flags & XFS_IOLOCK_EXCL)) || - (ip->i_itemp->ili_flags & XFS_ILI_IOLOCKED_EXCL)); - ASSERT((!(lock_flags & XFS_IOLOCK_SHARED)) || - xfs_isilocked(ip, XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)); - ASSERT((!(lock_flags & XFS_IOLOCK_SHARED)) || - (ip->i_itemp->ili_flags & XFS_ILI_IOLOCKED_ANY)); - - if (lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) { - ip->i_itemp->ili_iolock_recur++; - } - if (lock_flags & XFS_ILOCK_EXCL) { - ip->i_itemp->ili_ilock_recur++; - } - *ipp = ip; - return 0; - } - - ASSERT(lock_flags & XFS_ILOCK_EXCL); - error = xfs_iget(tp->t_mountp, tp, ino, flags, lock_flags, &ip, 0); - if (error) { - return error; - } - ASSERT(ip != NULL); - xfs_trans_ijoin(tp, ip, lock_flags); - *ipp = ip; - return 0; + error = xfs_iget(mp, tp, ino, flags, lock_flags, ipp, 0); + if (!error && tp) + xfs_trans_ijoin(tp, *ipp, lock_flags); + return error; } /* @@ -163,8 +89,6 @@ xfs_trans_ijoin( xfs_inode_item_init(ip, ip->i_mount); iip = ip->i_itemp; ASSERT(iip->ili_flags == 0); - ASSERT(iip->ili_ilock_recur == 0); - ASSERT(iip->ili_iolock_recur == 0); /* * Get a log_item_desc to point at the new item. diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 492d75b..a434f28 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -611,7 +611,7 @@ xfs_fsync( xfs_inode_t *ip) { xfs_trans_t *tp; - int error; + int error = 0; int log_flushed = 0, changed = 1; xfs_itrace_entry(ip); @@ -619,14 +619,9 @@ xfs_fsync( if (XFS_FORCED_SHUTDOWN(ip->i_mount)) return XFS_ERROR(EIO); - /* capture size updates in I/O completion before writing the inode. */ - error = xfs_wait_on_pages(ip, 0, -1); - if (error) - return XFS_ERROR(error); - /* * We always need to make sure that the required inode state is safe on - * disk. The vnode might be clean but we still might need to force the + * disk. The inode might be clean but we still might need to force the * log because of committed transactions that haven't hit the disk yet. * Likewise, there could be unflushed non-transactional changes to the * inode core that have to go to disk and this requires us to issue @@ -638,7 +633,7 @@ xfs_fsync( */ xfs_ilock(ip, XFS_ILOCK_SHARED); - if (!(ip->i_update_size || ip->i_update_core)) { + if (!ip->i_update_core) { /* * Timestamps/size haven't changed since last inode flush or * inode transaction commit. That means either nothing got @@ -718,7 +713,7 @@ xfs_fsync( * when the link count isn't zero and by xfs_dm_punch_hole() when * punching a hole to EOF. */ -int +STATIC int xfs_free_eofblocks( xfs_mount_t *mp, xfs_inode_t *ip, @@ -1476,8 +1471,8 @@ xfs_create( if (error == ENOSPC) { /* flush outstanding delalloc blocks and retry */ xfs_flush_inodes(dp); - error = xfs_trans_reserve(tp, resblks, XFS_CREATE_LOG_RES(mp), 0, - XFS_TRANS_PERM_LOG_RES, XFS_CREATE_LOG_COUNT); + error = xfs_trans_reserve(tp, resblks, log_res, 0, + XFS_TRANS_PERM_LOG_RES, log_count); } if (error == ENOSPC) { /* No space at all so try a "no-allocation" reservation */ |