From 92c60ccaf3c15a06d859682b980de1066641b4d0 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Sun, 23 May 2010 00:17:48 +0900 Subject: nilfs2: add blocksize member to nilfs object This stores blocksize in nilfs objects for the successive refactoring of recovery logic. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/the_nilfs.c | 1 + fs/nilfs2/the_nilfs.h | 2 ++ 2 files changed, 3 insertions(+) diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index 8c10973..870a127 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c @@ -604,6 +604,7 @@ int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data) when reloading fails. */ } nilfs->ns_blocksize_bits = sb->s_blocksize_bits; + nilfs->ns_blocksize = blocksize; err = nilfs_store_disk_layout(nilfs, sbp); if (err) diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h index 1ab9745..85df47f 100644 --- a/fs/nilfs2/the_nilfs.h +++ b/fs/nilfs2/the_nilfs.h @@ -82,6 +82,7 @@ enum { * @ns_gc_inodes: dummy inodes to keep live blocks * @ns_gc_inodes_h: hash list to keep dummy inode holding live blocks * @ns_blocksize_bits: bit length of block size + * @ns_blocksize: block size * @ns_nsegments: number of segments in filesystem * @ns_blocks_per_segment: number of blocks per segment * @ns_r_segments_percentage: reserved segments percentage @@ -168,6 +169,7 @@ struct the_nilfs { /* Disk layout information (static) */ unsigned int ns_blocksize_bits; + unsigned int ns_blocksize; unsigned long ns_nsegments; unsigned long ns_blocks_per_segment; unsigned long ns_r_segments_percentage; -- cgit v1.1 From 8b94025c00f9171b41ba9c1696943f5c935b62ef Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Sun, 23 May 2010 01:39:02 +0900 Subject: nilfs2: refactor recovery logic routines Most functions in recovery code take an argument of a super block instance or a nilfs_sb_info struct for convenience sake. This replaces them aggressively with a nilfs object by applying __bread and __breadahead against routines using sb_bread and sb_breadahead. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/recovery.c | 160 +++++++++++++++++++++++++++++--------------------- fs/nilfs2/segment.h | 4 +- fs/nilfs2/the_nilfs.c | 9 ++- 3 files changed, 100 insertions(+), 73 deletions(-) diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c index bae2a51..1c883b1 100644 --- a/fs/nilfs2/recovery.c +++ b/fs/nilfs2/recovery.c @@ -110,8 +110,8 @@ static void store_segsum_info(struct nilfs_segsum_info *ssi, } /** - * calc_crc_cont - check CRC of blocks continuously - * @sbi: nilfs_sb_info + * nilfs_compute_checksum - compute checksum of blocks continuously + * @nilfs: nilfs object * @bhs: buffer head of start block * @sum: place to store result * @offset: offset bytes in the first block @@ -119,23 +119,25 @@ static void store_segsum_info(struct nilfs_segsum_info *ssi, * @start: DBN of start block * @nblock: number of blocks to be checked */ -static int calc_crc_cont(struct nilfs_sb_info *sbi, struct buffer_head *bhs, - u32 *sum, unsigned long offset, u64 check_bytes, - sector_t start, unsigned long nblock) +static int nilfs_compute_checksum(struct the_nilfs *nilfs, + struct buffer_head *bhs, u32 *sum, + unsigned long offset, u64 check_bytes, + sector_t start, unsigned long nblock) { - unsigned long blocksize = sbi->s_super->s_blocksize; + unsigned int blocksize = nilfs->ns_blocksize; unsigned long size; u32 crc; BUG_ON(offset >= blocksize); check_bytes -= offset; size = min_t(u64, check_bytes, blocksize - offset); - crc = crc32_le(sbi->s_nilfs->ns_crc_seed, + crc = crc32_le(nilfs->ns_crc_seed, (unsigned char *)bhs->b_data + offset, size); if (--nblock > 0) { do { - struct buffer_head *bh - = sb_bread(sbi->s_super, ++start); + struct buffer_head *bh; + + bh = __bread(nilfs->ns_bdev, ++start, blocksize); if (!bh) return -EIO; check_bytes -= size; @@ -150,12 +152,12 @@ static int calc_crc_cont(struct nilfs_sb_info *sbi, struct buffer_head *bhs, /** * nilfs_read_super_root_block - read super root block - * @sb: super_block + * @nilfs: nilfs object * @sr_block: disk block number of the super root block * @pbh: address of a buffer_head pointer to return super root buffer * @check: CRC check flag */ -int nilfs_read_super_root_block(struct super_block *sb, sector_t sr_block, +int nilfs_read_super_root_block(struct the_nilfs *nilfs, sector_t sr_block, struct buffer_head **pbh, int check) { struct buffer_head *bh_sr; @@ -164,7 +166,7 @@ int nilfs_read_super_root_block(struct super_block *sb, sector_t sr_block, int ret; *pbh = NULL; - bh_sr = sb_bread(sb, sr_block); + bh_sr = __bread(nilfs->ns_bdev, sr_block, nilfs->ns_blocksize); if (unlikely(!bh_sr)) { ret = NILFS_SEG_FAIL_IO; goto failed; @@ -174,12 +176,13 @@ int nilfs_read_super_root_block(struct super_block *sb, sector_t sr_block, if (check) { unsigned bytes = le16_to_cpu(sr->sr_bytes); - if (bytes == 0 || bytes > sb->s_blocksize) { + if (bytes == 0 || bytes > nilfs->ns_blocksize) { ret = NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT; goto failed_bh; } - if (calc_crc_cont(NILFS_SB(sb), bh_sr, &crc, - sizeof(sr->sr_sum), bytes, sr_block, 1)) { + if (nilfs_compute_checksum( + nilfs, bh_sr, &crc, sizeof(sr->sr_sum), bytes, + sr_block, 1)) { ret = NILFS_SEG_FAIL_IO; goto failed_bh; } @@ -200,13 +203,13 @@ int nilfs_read_super_root_block(struct super_block *sb, sector_t sr_block, /** * load_segment_summary - read segment summary of the specified partial segment - * @sbi: nilfs_sb_info + * @nilfs: nilfs object * @pseg_start: start disk block number of partial segment * @seg_seq: sequence number requested * @ssi: pointer to nilfs_segsum_info struct to store information */ static int -load_segment_summary(struct nilfs_sb_info *sbi, sector_t pseg_start, +load_segment_summary(struct the_nilfs *nilfs, sector_t pseg_start, u64 seg_seq, struct nilfs_segsum_info *ssi) { struct buffer_head *bh_sum; @@ -215,7 +218,7 @@ load_segment_summary(struct nilfs_sb_info *sbi, sector_t pseg_start, u32 crc; int ret = NILFS_SEG_FAIL_IO; - bh_sum = sb_bread(sbi->s_super, pseg_start); + bh_sum = __bread(nilfs->ns_bdev, pseg_start, nilfs->ns_blocksize); if (!bh_sum) goto out; @@ -226,22 +229,21 @@ load_segment_summary(struct nilfs_sb_info *sbi, sector_t pseg_start, ret = NILFS_SEG_FAIL_MAGIC; goto failed; } - store_segsum_info(ssi, sum, sbi->s_super->s_blocksize); + store_segsum_info(ssi, sum, nilfs->ns_blocksize); if (seg_seq != ssi->seg_seq) { ret = NILFS_SEG_FAIL_SEQ; goto failed; } nblock = ssi->nblocks; - if (unlikely(nblock == 0 || - nblock > sbi->s_nilfs->ns_blocks_per_segment)) { + if (unlikely(nblock == 0 || nblock > nilfs->ns_blocks_per_segment)) { /* This limits the number of blocks read in the CRC check */ ret = NILFS_SEG_FAIL_CONSISTENCY; goto failed; } - if (calc_crc_cont(sbi, bh_sum, &crc, sizeof(sum->ss_datasum), - ((u64)nblock << sbi->s_super->s_blocksize_bits), - pseg_start, nblock)) { + if (nilfs_compute_checksum(nilfs, bh_sum, &crc, sizeof(sum->ss_datasum), + ((u64)nblock << nilfs->ns_blocksize_bits), + pseg_start, nblock)) { ret = NILFS_SEG_FAIL_IO; goto failed; } @@ -255,8 +257,16 @@ load_segment_summary(struct nilfs_sb_info *sbi, sector_t pseg_start, return ret; } -static void *segsum_get(struct super_block *sb, struct buffer_head **pbh, - unsigned int *offset, unsigned int bytes) +/** + * nilfs_read_summary_info - read an item on summary blocks of a log + * @nilfs: nilfs object + * @pbh: the current buffer head on summary blocks [in, out] + * @offset: the current byte offset on summary blocks [in, out] + * @bytes: byte size of the item to be read + */ +static void *nilfs_read_summary_info(struct the_nilfs *nilfs, + struct buffer_head **pbh, + unsigned int *offset, unsigned int bytes) { void *ptr; sector_t blocknr; @@ -265,7 +275,8 @@ static void *segsum_get(struct super_block *sb, struct buffer_head **pbh, if (bytes > (*pbh)->b_size - *offset) { blocknr = (*pbh)->b_blocknr; brelse(*pbh); - *pbh = sb_bread(sb, blocknr + 1); + *pbh = __bread(nilfs->ns_bdev, blocknr + 1, + nilfs->ns_blocksize); if (unlikely(!*pbh)) return NULL; *offset = 0; @@ -275,9 +286,18 @@ static void *segsum_get(struct super_block *sb, struct buffer_head **pbh, return ptr; } -static void segsum_skip(struct super_block *sb, struct buffer_head **pbh, - unsigned int *offset, unsigned int bytes, - unsigned long count) +/** + * nilfs_skip_summary_info - skip items on summary blocks of a log + * @nilfs: nilfs object + * @pbh: the current buffer head on summary blocks [in, out] + * @offset: the current byte offset on summary blocks [in, out] + * @bytes: byte size of the item to be skipped + * @count: number of items to be skipped + */ +static void nilfs_skip_summary_info(struct the_nilfs *nilfs, + struct buffer_head **pbh, + unsigned int *offset, unsigned int bytes, + unsigned long count) { unsigned int rest_item_in_current_block = ((*pbh)->b_size - *offset) / bytes; @@ -294,26 +314,33 @@ static void segsum_skip(struct super_block *sb, struct buffer_head **pbh, *offset = bytes * (count - (bcnt - 1) * nitem_per_block); brelse(*pbh); - *pbh = sb_bread(sb, blocknr + bcnt); + *pbh = __bread(nilfs->ns_bdev, blocknr + bcnt, + nilfs->ns_blocksize); } } -static int -collect_blocks_from_segsum(struct nilfs_sb_info *sbi, sector_t sum_blocknr, - struct nilfs_segsum_info *ssi, - struct list_head *head) +/** + * nilfs_scan_dsync_log - get block information of a log written for data sync + * @nilfs: nilfs object + * @start_blocknr: start block number of the log + * @ssi: log summary information + * @head: list head to add nilfs_recovery_block struct + */ +static int nilfs_scan_dsync_log(struct the_nilfs *nilfs, sector_t start_blocknr, + struct nilfs_segsum_info *ssi, + struct list_head *head) { struct buffer_head *bh; unsigned int offset; unsigned long nfinfo = ssi->nfinfo; - sector_t blocknr = sum_blocknr + ssi->nsumblk; + sector_t blocknr = start_blocknr + ssi->nsumblk; ino_t ino; int err = -EIO; if (!nfinfo) return 0; - bh = sb_bread(sbi->s_super, sum_blocknr); + bh = __bread(nilfs->ns_bdev, start_blocknr, nilfs->ns_blocksize); if (unlikely(!bh)) goto out; @@ -323,7 +350,8 @@ collect_blocks_from_segsum(struct nilfs_sb_info *sbi, sector_t sum_blocknr, unsigned long nblocks, ndatablk, nnodeblk; struct nilfs_finfo *finfo; - finfo = segsum_get(sbi->s_super, &bh, &offset, sizeof(*finfo)); + finfo = nilfs_read_summary_info(nilfs, &bh, &offset, + sizeof(*finfo)); if (unlikely(!finfo)) goto out; @@ -336,8 +364,8 @@ collect_blocks_from_segsum(struct nilfs_sb_info *sbi, sector_t sum_blocknr, struct nilfs_recovery_block *rb; struct nilfs_binfo_v *binfo; - binfo = segsum_get(sbi->s_super, &bh, &offset, - sizeof(*binfo)); + binfo = nilfs_read_summary_info(nilfs, &bh, &offset, + sizeof(*binfo)); if (unlikely(!binfo)) goto out; @@ -355,9 +383,9 @@ collect_blocks_from_segsum(struct nilfs_sb_info *sbi, sector_t sum_blocknr, } if (--nfinfo == 0) break; - blocknr += nnodeblk; /* always 0 for the data sync segments */ - segsum_skip(sbi->s_super, &bh, &offset, sizeof(__le64), - nnodeblk); + blocknr += nnodeblk; /* always 0 for data sync logs */ + nilfs_skip_summary_info(nilfs, &bh, &offset, sizeof(__le64), + nnodeblk); if (unlikely(!bh)) goto out; } @@ -467,14 +495,14 @@ static int nilfs_prepare_segment_for_recovery(struct the_nilfs *nilfs, return err; } -static int nilfs_recovery_copy_block(struct nilfs_sb_info *sbi, +static int nilfs_recovery_copy_block(struct the_nilfs *nilfs, struct nilfs_recovery_block *rb, struct page *page) { struct buffer_head *bh_org; void *kaddr; - bh_org = sb_bread(sbi->s_super, rb->blocknr); + bh_org = __bread(nilfs->ns_bdev, rb->blocknr, nilfs->ns_blocksize); if (unlikely(!bh_org)) return -EIO; @@ -485,13 +513,14 @@ static int nilfs_recovery_copy_block(struct nilfs_sb_info *sbi, return 0; } -static int recover_dsync_blocks(struct nilfs_sb_info *sbi, - struct list_head *head, - unsigned long *nr_salvaged_blocks) +static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs, + struct nilfs_sb_info *sbi, + struct list_head *head, + unsigned long *nr_salvaged_blocks) { struct inode *inode; struct nilfs_recovery_block *rb, *n; - unsigned blocksize = sbi->s_super->s_blocksize; + unsigned blocksize = nilfs->ns_blocksize; struct page *page; loff_t pos; int err = 0, err2 = 0; @@ -511,7 +540,7 @@ static int recover_dsync_blocks(struct nilfs_sb_info *sbi, if (unlikely(err)) goto failed_inode; - err = nilfs_recovery_copy_block(sbi, rb, page); + err = nilfs_recovery_copy_block(nilfs, rb, page); if (unlikely(err)) goto failed_page; @@ -551,8 +580,8 @@ static int recover_dsync_blocks(struct nilfs_sb_info *sbi, /** * nilfs_do_roll_forward - salvage logical segments newer than the latest * checkpoint + * @nilfs: nilfs object * @sbi: nilfs_sb_info - * @nilfs: the_nilfs * @ri: pointer to a nilfs_recovery_info */ static int nilfs_do_roll_forward(struct the_nilfs *nilfs, @@ -582,7 +611,7 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs, while (segnum != ri->ri_segnum || pseg_start <= ri->ri_pseg_start) { - ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi); + ret = load_segment_summary(nilfs, pseg_start, seg_seq, &ssi); if (ret) { if (ret == NILFS_SEG_FAIL_IO) { err = -EIO; @@ -610,13 +639,14 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs, if (!NILFS_SEG_DSYNC(&ssi)) goto confused; - err = collect_blocks_from_segsum( - sbi, pseg_start, &ssi, &dsync_blocks); + err = nilfs_scan_dsync_log(nilfs, pseg_start, &ssi, + &dsync_blocks); if (unlikely(err)) goto failed; if (NILFS_SEG_LOGEND(&ssi)) { - err = recover_dsync_blocks( - sbi, &dsync_blocks, &nsalvaged_blocks); + err = nilfs_recover_dsync_blocks( + nilfs, sbi, &dsync_blocks, + &nsalvaged_blocks); if (unlikely(err)) goto failed; state = RF_INIT_ST; @@ -653,7 +683,7 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs, } out: dispose_recovery_list(&dsync_blocks); - nilfs_detach_writer(sbi->s_nilfs, sbi); + nilfs_detach_writer(nilfs, sbi); return err; confused: @@ -667,7 +697,6 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs, } static void nilfs_finish_roll_forward(struct the_nilfs *nilfs, - struct nilfs_sb_info *sbi, struct nilfs_recovery_info *ri) { struct buffer_head *bh; @@ -677,7 +706,7 @@ static void nilfs_finish_roll_forward(struct the_nilfs *nilfs, nilfs_get_segnum_of_block(nilfs, ri->ri_super_root)) return; - bh = sb_getblk(sbi->s_super, ri->ri_lsegs_start); + bh = __getblk(nilfs->ns_bdev, ri->ri_lsegs_start, nilfs->ns_blocksize); BUG_ON(!bh); memset(bh->b_data, 0, bh->b_size); set_buffer_dirty(bh); @@ -751,7 +780,7 @@ int nilfs_recover_logical_segments(struct the_nilfs *nilfs, goto failed; } - nilfs_finish_roll_forward(nilfs, sbi, ri); + nilfs_finish_roll_forward(nilfs, ri); } failed: @@ -762,7 +791,6 @@ int nilfs_recover_logical_segments(struct the_nilfs *nilfs, /** * nilfs_search_super_root - search the latest valid super root * @nilfs: the_nilfs - * @sbi: nilfs_sb_info * @ri: pointer to a nilfs_recovery_info struct to store search results. * * nilfs_search_super_root() looks for the latest super-root from a partial @@ -776,7 +804,7 @@ int nilfs_recover_logical_segments(struct the_nilfs *nilfs, * * %-EIO - I/O error */ -int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, +int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_recovery_info *ri) { struct nilfs_segsum_info ssi; @@ -801,11 +829,10 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, /* Read ahead segment */ b = seg_start; while (b <= seg_end) - sb_breadahead(sbi->s_super, b++); + __breadahead(nilfs->ns_bdev, b++, nilfs->ns_blocksize); for (;;) { - /* Load segment summary */ - ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi); + ret = load_segment_summary(nilfs, pseg_start, seg_seq, &ssi); if (ret) { if (ret == NILFS_SEG_FAIL_IO) goto failed; @@ -836,7 +863,8 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, if (pseg_start == seg_start) { nilfs_get_segment_range(nilfs, nextnum, &b, &end); while (b <= end) - sb_breadahead(sbi->s_super, b++); + __breadahead(nilfs->ns_bdev, b++, + nilfs->ns_blocksize); } if (!NILFS_SEG_HAS_SR(&ssi)) { if (!ri->ri_lsegs_start && NILFS_SEG_LOGBGN(&ssi)) { diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h index 01e20db..d6bb67e 100644 --- a/fs/nilfs2/segment.h +++ b/fs/nilfs2/segment.h @@ -234,9 +234,9 @@ extern int nilfs_attach_segment_constructor(struct nilfs_sb_info *); extern void nilfs_detach_segment_constructor(struct nilfs_sb_info *); /* recovery.c */ -extern int nilfs_read_super_root_block(struct super_block *, sector_t, +extern int nilfs_read_super_root_block(struct the_nilfs *, sector_t, struct buffer_head **, int); -extern int nilfs_search_super_root(struct the_nilfs *, struct nilfs_sb_info *, +extern int nilfs_search_super_root(struct the_nilfs *, struct nilfs_recovery_info *); extern int nilfs_recover_logical_segments(struct the_nilfs *, struct nilfs_sb_info *, diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index 870a127..a94908e 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c @@ -159,8 +159,7 @@ void put_nilfs(struct the_nilfs *nilfs) kfree(nilfs); } -static int nilfs_load_super_root(struct the_nilfs *nilfs, - struct nilfs_sb_info *sbi, sector_t sr_block) +static int nilfs_load_super_root(struct the_nilfs *nilfs, sector_t sr_block) { struct buffer_head *bh_sr; struct nilfs_super_root *raw_sr; @@ -169,7 +168,7 @@ static int nilfs_load_super_root(struct the_nilfs *nilfs, unsigned inode_size; int err; - err = nilfs_read_super_root_block(sbi->s_super, sr_block, &bh_sr, 1); + err = nilfs_read_super_root_block(nilfs, sr_block, &bh_sr, 1); if (unlikely(err)) return err; @@ -285,13 +284,13 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi) nilfs_init_recovery_info(&ri); - err = nilfs_search_super_root(nilfs, sbi, &ri); + err = nilfs_search_super_root(nilfs, &ri); if (unlikely(err)) { printk(KERN_ERR "NILFS: error searching super root.\n"); goto failed; } - err = nilfs_load_super_root(nilfs, sbi, ri.ri_super_root); + err = nilfs_load_super_root(nilfs, ri.ri_super_root); if (unlikely(err)) { printk(KERN_ERR "NILFS: error loading super root.\n"); goto failed; -- cgit v1.1 From aee5ce2f578b98eba16e59cb84d39a95682a836b Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Sun, 23 May 2010 12:21:57 +0900 Subject: nilfs2: rename nilfs_recover_logical_segments function The function name of nilfs_recover_logical_segments makes no sense. This changes the name into nilfs_salvage_orphan_logs to clarify the role of the function. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/recovery.c | 11 +++++------ fs/nilfs2/segment.h | 6 +++--- fs/nilfs2/the_nilfs.c | 2 +- 3 files changed, 9 insertions(+), 10 deletions(-) diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c index 1c883b1..fd7fb41 100644 --- a/fs/nilfs2/recovery.c +++ b/fs/nilfs2/recovery.c @@ -719,9 +719,8 @@ static void nilfs_finish_roll_forward(struct the_nilfs *nilfs, } /** - * nilfs_recover_logical_segments - salvage logical segments written after - * the latest super root - * @nilfs: the_nilfs + * nilfs_salvage_orphan_logs - salvage logs written after the latest checkpoint + * @nilfs: nilfs object * @sbi: nilfs_sb_info * @ri: pointer to a nilfs_recovery_info struct to store search results. * @@ -738,9 +737,9 @@ static void nilfs_finish_roll_forward(struct the_nilfs *nilfs, * * %-ENOMEM - Insufficient memory available. */ -int nilfs_recover_logical_segments(struct the_nilfs *nilfs, - struct nilfs_sb_info *sbi, - struct nilfs_recovery_info *ri) +int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs, + struct nilfs_sb_info *sbi, + struct nilfs_recovery_info *ri) { int err; diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h index d6bb67e..17c487b 100644 --- a/fs/nilfs2/segment.h +++ b/fs/nilfs2/segment.h @@ -238,9 +238,9 @@ extern int nilfs_read_super_root_block(struct the_nilfs *, sector_t, struct buffer_head **, int); extern int nilfs_search_super_root(struct the_nilfs *, struct nilfs_recovery_info *); -extern int nilfs_recover_logical_segments(struct the_nilfs *, - struct nilfs_sb_info *, - struct nilfs_recovery_info *); +extern int nilfs_salvage_orphan_logs(struct the_nilfs *, + struct nilfs_sb_info *, + struct nilfs_recovery_info *); extern void nilfs_dispose_segment_list(struct list_head *); #endif /* _NILFS_SEGMENT_H */ diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index a94908e..9f2cb01 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c @@ -319,7 +319,7 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi) goto failed_unload; } - err = nilfs_recover_logical_segments(nilfs, sbi, &ri); + err = nilfs_salvage_orphan_logs(nilfs, sbi, &ri); if (err) goto failed_unload; -- cgit v1.1 From 354fa8be280ce81c88b6b236d62d23ebcade2d3f Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Sun, 23 May 2010 19:21:49 +0900 Subject: nilfs2: divide load_segment_summary function load_segment_summary function has two distinct roles: getting summary header of a log, and verifying consistencies of the log. This divide it into two corresponding functions, nilfs_read_log_header and nilfs_validate_log to clarify the meaning. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/recovery.c | 110 ++++++++++++++++++++++++++++++++------------------- 1 file changed, 69 insertions(+), 41 deletions(-) diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c index fd7fb41..35506b1 100644 --- a/fs/nilfs2/recovery.c +++ b/fs/nilfs2/recovery.c @@ -202,58 +202,63 @@ int nilfs_read_super_root_block(struct the_nilfs *nilfs, sector_t sr_block, } /** - * load_segment_summary - read segment summary of the specified partial segment + * nilfs_read_log_header - read summary header of the specified log * @nilfs: nilfs object - * @pseg_start: start disk block number of partial segment - * @seg_seq: sequence number requested - * @ssi: pointer to nilfs_segsum_info struct to store information + * @start_blocknr: start block number of the log + * @sum: pointer to return segment summary structure */ -static int -load_segment_summary(struct the_nilfs *nilfs, sector_t pseg_start, - u64 seg_seq, struct nilfs_segsum_info *ssi) +static struct buffer_head * +nilfs_read_log_header(struct the_nilfs *nilfs, sector_t start_blocknr, + struct nilfs_segment_summary **sum) { struct buffer_head *bh_sum; - struct nilfs_segment_summary *sum; + + bh_sum = __bread(nilfs->ns_bdev, start_blocknr, nilfs->ns_blocksize); + if (bh_sum) + *sum = (struct nilfs_segment_summary *)bh_sum->b_data; + return bh_sum; +} + +/** + * nilfs_validate_log - verify consistency of log + * @nilfs: nilfs object + * @seg_seq: sequence number of segment + * @bh_sum: buffer head of summary block + * @sum: segment summary struct + */ +static int nilfs_validate_log(struct the_nilfs *nilfs, u64 seg_seq, + struct buffer_head *bh_sum, + struct nilfs_segment_summary *sum) +{ unsigned long nblock; u32 crc; - int ret = NILFS_SEG_FAIL_IO; + int ret; - bh_sum = __bread(nilfs->ns_bdev, pseg_start, nilfs->ns_blocksize); - if (!bh_sum) + ret = NILFS_SEG_FAIL_MAGIC; + if (le32_to_cpu(sum->ss_magic) != NILFS_SEGSUM_MAGIC) goto out; - sum = (struct nilfs_segment_summary *)bh_sum->b_data; - - /* Check consistency of segment summary */ - if (le32_to_cpu(sum->ss_magic) != NILFS_SEGSUM_MAGIC) { - ret = NILFS_SEG_FAIL_MAGIC; - goto failed; - } - store_segsum_info(ssi, sum, nilfs->ns_blocksize); - if (seg_seq != ssi->seg_seq) { - ret = NILFS_SEG_FAIL_SEQ; - goto failed; - } + ret = NILFS_SEG_FAIL_SEQ; + if (le64_to_cpu(sum->ss_seq) != seg_seq) + goto out; - nblock = ssi->nblocks; - if (unlikely(nblock == 0 || nblock > nilfs->ns_blocks_per_segment)) { + nblock = le32_to_cpu(sum->ss_nblocks); + ret = NILFS_SEG_FAIL_CONSISTENCY; + if (unlikely(nblock == 0 || nblock > nilfs->ns_blocks_per_segment)) /* This limits the number of blocks read in the CRC check */ - ret = NILFS_SEG_FAIL_CONSISTENCY; - goto failed; - } + goto out; + + ret = NILFS_SEG_FAIL_IO; if (nilfs_compute_checksum(nilfs, bh_sum, &crc, sizeof(sum->ss_datasum), ((u64)nblock << nilfs->ns_blocksize_bits), - pseg_start, nblock)) { - ret = NILFS_SEG_FAIL_IO; - goto failed; - } - if (crc == le32_to_cpu(sum->ss_datasum)) - ret = 0; - else - ret = NILFS_SEG_FAIL_CHECKSUM_FULL; - failed: - brelse(bh_sum); - out: + bh_sum->b_blocknr, nblock)) + goto out; + + ret = NILFS_SEG_FAIL_CHECKSUM_FULL; + if (crc != le32_to_cpu(sum->ss_datasum)) + goto out; + ret = 0; +out: return ret; } @@ -589,6 +594,8 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs, struct nilfs_recovery_info *ri) { struct nilfs_segsum_info ssi; + struct buffer_head *bh_sum = NULL; + struct nilfs_segment_summary *sum; sector_t pseg_start; sector_t seg_start, seg_end; /* Starting/ending DBN of full segment */ unsigned long nsalvaged_blocks = 0; @@ -610,8 +617,14 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs, nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end); while (segnum != ri->ri_segnum || pseg_start <= ri->ri_pseg_start) { + brelse(bh_sum); + bh_sum = nilfs_read_log_header(nilfs, pseg_start, &sum); + if (!bh_sum) { + err = -EIO; + goto failed; + } - ret = load_segment_summary(nilfs, pseg_start, seg_seq, &ssi); + ret = nilfs_validate_log(nilfs, seg_seq, bh_sum, sum); if (ret) { if (ret == NILFS_SEG_FAIL_IO) { err = -EIO; @@ -619,6 +632,8 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs, } goto strayed; } + + store_segsum_info(&ssi, sum, nilfs->ns_blocksize); if (unlikely(NILFS_SEG_HAS_SR(&ssi))) goto confused; @@ -682,6 +697,7 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs, ri->ri_need_recovery = NILFS_RECOVERY_ROLLFORWARD_DONE; } out: + brelse(bh_sum); dispose_recovery_list(&dsync_blocks); nilfs_detach_writer(nilfs, sbi); return err; @@ -807,6 +823,8 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_recovery_info *ri) { struct nilfs_segsum_info ssi; + struct buffer_head *bh_sum = NULL; + struct nilfs_segment_summary *sum; sector_t pseg_start, pseg_end, sr_pseg_start = 0; sector_t seg_start, seg_end; /* range of full segment (block number) */ sector_t b, end; @@ -831,12 +849,20 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, __breadahead(nilfs->ns_bdev, b++, nilfs->ns_blocksize); for (;;) { - ret = load_segment_summary(nilfs, pseg_start, seg_seq, &ssi); + brelse(bh_sum); + ret = NILFS_SEG_FAIL_IO; + bh_sum = nilfs_read_log_header(nilfs, pseg_start, &sum); + if (!bh_sum) + goto failed; + + ret = nilfs_validate_log(nilfs, seg_seq, bh_sum, sum); if (ret) { if (ret == NILFS_SEG_FAIL_IO) goto failed; goto strayed; } + + store_segsum_info(&ssi, sum, nilfs->ns_blocksize); pseg_end = pseg_start + ssi.nblocks - 1; if (unlikely(pseg_end > seg_end)) { ret = NILFS_SEG_FAIL_CONSISTENCY; @@ -936,6 +962,7 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, super_root_found: /* Updating pointers relating to the latest checkpoint */ + brelse(bh_sum); list_splice_tail(&segments, &ri->ri_used_segments); nilfs->ns_last_pseg = sr_pseg_start; nilfs->ns_last_seq = nilfs->ns_seg_seq; @@ -943,6 +970,7 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, return 0; failed: + brelse(bh_sum); nilfs_dispose_segment_list(&segments); return (ret < 0) ? ret : nilfs_warn_segment_error(ret); } -- cgit v1.1 From 85655484f896d078d310221475b90ea27f76e5f2 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Sun, 23 May 2010 19:46:44 +0900 Subject: nilfs2: do not use nilfs_segsum_info structure in recovery code This will get rid of nilfs_segsum_info use from recovery functions for simplicity. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/recovery.c | 91 +++++++++++++++++++++------------------------------- 1 file changed, 37 insertions(+), 54 deletions(-) diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c index 35506b1..f5d9c3f 100644 --- a/fs/nilfs2/recovery.c +++ b/fs/nilfs2/recovery.c @@ -91,24 +91,6 @@ static int nilfs_warn_segment_error(int err) return -EINVAL; } -static void store_segsum_info(struct nilfs_segsum_info *ssi, - struct nilfs_segment_summary *sum, - unsigned int blocksize) -{ - ssi->flags = le16_to_cpu(sum->ss_flags); - ssi->seg_seq = le64_to_cpu(sum->ss_seq); - ssi->ctime = le64_to_cpu(sum->ss_create); - ssi->next = le64_to_cpu(sum->ss_next); - ssi->nblocks = le32_to_cpu(sum->ss_nblocks); - ssi->nfinfo = le32_to_cpu(sum->ss_nfinfo); - ssi->sumbytes = le32_to_cpu(sum->ss_sumbytes); - - ssi->nsumblk = DIV_ROUND_UP(ssi->sumbytes, blocksize); - ssi->nfileblk = ssi->nblocks - ssi->nsumblk - !!NILFS_SEG_HAS_SR(ssi); - - /* need to verify ->ss_bytes field if read ->ss_cno */ -} - /** * nilfs_compute_checksum - compute checksum of blocks continuously * @nilfs: nilfs object @@ -328,29 +310,31 @@ static void nilfs_skip_summary_info(struct the_nilfs *nilfs, * nilfs_scan_dsync_log - get block information of a log written for data sync * @nilfs: nilfs object * @start_blocknr: start block number of the log - * @ssi: log summary information + * @sum: log summary information * @head: list head to add nilfs_recovery_block struct */ static int nilfs_scan_dsync_log(struct the_nilfs *nilfs, sector_t start_blocknr, - struct nilfs_segsum_info *ssi, + struct nilfs_segment_summary *sum, struct list_head *head) { struct buffer_head *bh; unsigned int offset; - unsigned long nfinfo = ssi->nfinfo; - sector_t blocknr = start_blocknr + ssi->nsumblk; + u32 nfinfo, sumbytes; + sector_t blocknr; ino_t ino; int err = -EIO; + nfinfo = le32_to_cpu(sum->ss_nfinfo); if (!nfinfo) return 0; + sumbytes = le32_to_cpu(sum->ss_sumbytes); + blocknr = start_blocknr + DIV_ROUND_UP(sumbytes, nilfs->ns_blocksize); bh = __bread(nilfs->ns_bdev, start_blocknr, nilfs->ns_blocksize); if (unlikely(!bh)) goto out; - offset = le16_to_cpu( - ((struct nilfs_segment_summary *)bh->b_data)->ss_bytes); + offset = le16_to_cpu(sum->ss_bytes); for (;;) { unsigned long nblocks, ndatablk, nnodeblk; struct nilfs_finfo *finfo; @@ -593,12 +577,12 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, struct nilfs_recovery_info *ri) { - struct nilfs_segsum_info ssi; struct buffer_head *bh_sum = NULL; struct nilfs_segment_summary *sum; sector_t pseg_start; sector_t seg_start, seg_end; /* Starting/ending DBN of full segment */ unsigned long nsalvaged_blocks = 0; + unsigned int flags; u64 seg_seq; __u64 segnum, nextnum = 0; int empty_seg = 0; @@ -633,32 +617,34 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs, goto strayed; } - store_segsum_info(&ssi, sum, nilfs->ns_blocksize); - if (unlikely(NILFS_SEG_HAS_SR(&ssi))) + flags = le16_to_cpu(sum->ss_flags); + if (flags & NILFS_SS_SR) goto confused; /* Found a valid partial segment; do recovery actions */ - nextnum = nilfs_get_segnum_of_block(nilfs, ssi.next); + nextnum = nilfs_get_segnum_of_block(nilfs, + le64_to_cpu(sum->ss_next)); empty_seg = 0; - nilfs->ns_ctime = ssi.ctime; - if (!(ssi.flags & NILFS_SS_GC)) - nilfs->ns_nongc_ctime = ssi.ctime; + nilfs->ns_ctime = le64_to_cpu(sum->ss_create); + if (!(flags & NILFS_SS_GC)) + nilfs->ns_nongc_ctime = nilfs->ns_ctime; switch (state) { case RF_INIT_ST: - if (!NILFS_SEG_LOGBGN(&ssi) || !NILFS_SEG_DSYNC(&ssi)) + if (!(flags & NILFS_SS_LOGBGN) || + !(flags & NILFS_SS_SYNDT)) goto try_next_pseg; state = RF_DSYNC_ST; /* Fall through */ case RF_DSYNC_ST: - if (!NILFS_SEG_DSYNC(&ssi)) + if (!(flags & NILFS_SS_SYNDT)) goto confused; - err = nilfs_scan_dsync_log(nilfs, pseg_start, &ssi, + err = nilfs_scan_dsync_log(nilfs, pseg_start, sum, &dsync_blocks); if (unlikely(err)) goto failed; - if (NILFS_SEG_LOGEND(&ssi)) { + if (flags & NILFS_SS_LOGEND) { err = nilfs_recover_dsync_blocks( nilfs, sbi, &dsync_blocks, &nsalvaged_blocks); @@ -672,7 +658,7 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs, try_next_pseg: if (pseg_start == ri->ri_lsegs_end) break; - pseg_start += ssi.nblocks; + pseg_start += le32_to_cpu(sum->ss_nblocks); if (pseg_start < seg_end) continue; goto feed_segment; @@ -822,12 +808,13 @@ int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs, int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_recovery_info *ri) { - struct nilfs_segsum_info ssi; struct buffer_head *bh_sum = NULL; struct nilfs_segment_summary *sum; sector_t pseg_start, pseg_end, sr_pseg_start = 0; sector_t seg_start, seg_end; /* range of full segment (block number) */ sector_t b, end; + unsigned long nblocks; + unsigned int flags; u64 seg_seq; __u64 segnum, nextnum = 0; __u64 cno; @@ -862,8 +849,8 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, goto strayed; } - store_segsum_info(&ssi, sum, nilfs->ns_blocksize); - pseg_end = pseg_start + ssi.nblocks - 1; + nblocks = le32_to_cpu(sum->ss_nblocks); + pseg_end = pseg_start + nblocks - 1; if (unlikely(pseg_end > seg_end)) { ret = NILFS_SEG_FAIL_CONSISTENCY; goto strayed; @@ -873,11 +860,13 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, ri->ri_pseg_start = pseg_start; ri->ri_seq = seg_seq; ri->ri_segnum = segnum; - nextnum = nilfs_get_segnum_of_block(nilfs, ssi.next); + nextnum = nilfs_get_segnum_of_block(nilfs, + le64_to_cpu(sum->ss_next)); ri->ri_nextnum = nextnum; empty_seg = 0; - if (!NILFS_SEG_HAS_SR(&ssi) && !scan_newer) { + flags = le16_to_cpu(sum->ss_flags); + if (!(flags & NILFS_SS_SR) && !scan_newer) { /* This will never happen because a superblock (last_segment) always points to a pseg having a super root. */ @@ -891,12 +880,12 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, __breadahead(nilfs->ns_bdev, b++, nilfs->ns_blocksize); } - if (!NILFS_SEG_HAS_SR(&ssi)) { - if (!ri->ri_lsegs_start && NILFS_SEG_LOGBGN(&ssi)) { + if (!(flags & NILFS_SS_SR)) { + if (!ri->ri_lsegs_start && (flags & NILFS_SS_LOGBGN)) { ri->ri_lsegs_start = pseg_start; ri->ri_lsegs_start_seq = seg_seq; } - if (NILFS_SEG_LOGEND(&ssi)) + if (flags & NILFS_SS_LOGEND) ri->ri_lsegs_end = pseg_start; goto try_next_pseg; } @@ -907,12 +896,12 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, ri->ri_lsegs_start = ri->ri_lsegs_end = 0; nilfs_dispose_segment_list(&segments); - nilfs->ns_pseg_offset = (sr_pseg_start = pseg_start) - + ssi.nblocks - seg_start; + sr_pseg_start = pseg_start; + nilfs->ns_pseg_offset = pseg_start + nblocks - seg_start; nilfs->ns_seg_seq = seg_seq; nilfs->ns_segnum = segnum; nilfs->ns_cno = cno; /* nilfs->ns_cno = ri->ri_cno + 1 */ - nilfs->ns_ctime = ssi.ctime; + nilfs->ns_ctime = le64_to_cpu(sum->ss_create); nilfs->ns_nextnum = nextnum; if (scan_newer) @@ -923,15 +912,9 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, scan_newer = 1; } - /* reset region for roll-forward */ - pseg_start += ssi.nblocks; - if (pseg_start < seg_end) - continue; - goto feed_segment; - try_next_pseg: /* Standing on a course, or met an inconsistent state */ - pseg_start += ssi.nblocks; + pseg_start += nblocks; if (pseg_start < seg_end) continue; goto feed_segment; -- cgit v1.1 From 4762077c7b93d35e0417f66702deae3ce3a9855e Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Sun, 23 May 2010 21:48:36 +0900 Subject: nilfs2: get rid of macros for segment summary information This removes macros to test segment summary flags and redefines a few relevant macros with inline functions. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/segbuf.h | 24 +++++++++++++----------- fs/nilfs2/segment.c | 8 ++++---- 2 files changed, 17 insertions(+), 15 deletions(-) diff --git a/fs/nilfs2/segbuf.h b/fs/nilfs2/segbuf.h index 85fbb66..b04f08c 100644 --- a/fs/nilfs2/segbuf.h +++ b/fs/nilfs2/segbuf.h @@ -54,17 +54,6 @@ struct nilfs_segsum_info { sector_t next; }; -/* macro for the flags */ -#define NILFS_SEG_HAS_SR(sum) ((sum)->flags & NILFS_SS_SR) -#define NILFS_SEG_LOGBGN(sum) ((sum)->flags & NILFS_SS_LOGBGN) -#define NILFS_SEG_LOGEND(sum) ((sum)->flags & NILFS_SS_LOGEND) -#define NILFS_SEG_DSYNC(sum) ((sum)->flags & NILFS_SS_SYNDT) -#define NILFS_SEG_SIMPLEX(sum) \ - (((sum)->flags & (NILFS_SS_LOGBGN | NILFS_SS_LOGEND)) == \ - (NILFS_SS_LOGBGN | NILFS_SS_LOGEND)) - -#define NILFS_SEG_EMPTY(sum) ((sum)->nblocks == (sum)->nsumblk) - /** * struct nilfs_segment_buffer - Segment buffer * @sb_super: back pointer to a superblock struct @@ -141,6 +130,19 @@ int nilfs_segbuf_extend_payload(struct nilfs_segment_buffer *, struct buffer_head **); void nilfs_segbuf_fill_in_segsum(struct nilfs_segment_buffer *); +static inline int nilfs_segbuf_simplex(struct nilfs_segment_buffer *segbuf) +{ + unsigned int flags = segbuf->sb_sum.flags; + + return (flags & (NILFS_SS_LOGBGN | NILFS_SS_LOGEND)) == + (NILFS_SS_LOGBGN | NILFS_SS_LOGEND); +} + +static inline int nilfs_segbuf_empty(struct nilfs_segment_buffer *segbuf) +{ + return segbuf->sb_sum.nblocks == segbuf->sb_sum.nsumblk; +} + static inline void nilfs_segbuf_add_segsum_buffer(struct nilfs_segment_buffer *segbuf, struct buffer_head *bh) diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index c920164..1f7881c 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -1914,12 +1914,12 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci) } } - if (!NILFS_SEG_SIMPLEX(&segbuf->sb_sum)) { - if (NILFS_SEG_LOGBGN(&segbuf->sb_sum)) { + if (!nilfs_segbuf_simplex(segbuf)) { + if (segbuf->sb_sum.flags & NILFS_SS_LOGBGN) { set_bit(NILFS_SC_UNCLOSED, &sci->sc_flags); sci->sc_lseg_stime = jiffies; } - if (NILFS_SEG_LOGEND(&segbuf->sb_sum)) + if (segbuf->sb_sum.flags & NILFS_SS_LOGEND) clear_bit(NILFS_SC_UNCLOSED, &sci->sc_flags); } } @@ -2082,7 +2082,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode) /* Avoid empty segment */ if (sci->sc_stage.scnt == NILFS_ST_DONE && - NILFS_SEG_EMPTY(&sci->sc_curseg->sb_sum)) { + nilfs_segbuf_empty(sci->sc_curseg)) { nilfs_segctor_abort_construction(sci, nilfs, 1); goto out; } -- cgit v1.1 From 57a4bfc486727b68e4422031aeba427fb7262668 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Sun, 20 Jun 2010 03:10:21 +0900 Subject: nilfs2: get rid of ns_free_segments_count This counter is unused. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/the_nilfs.c | 3 --- fs/nilfs2/the_nilfs.h | 2 -- 2 files changed, 5 deletions(-) diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index 9f2cb01..4a9e8a0 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c @@ -630,9 +630,6 @@ int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data) err = -EINVAL; goto failed_sbh; } - /* Dummy values */ - nilfs->ns_free_segments_count = - nilfs->ns_nsegments - (nilfs->ns_segnum + 1); /* Initialize gcinode cache */ err = nilfs_init_gccache(nilfs); diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h index 85df47f..191560e 100644 --- a/fs/nilfs2/the_nilfs.h +++ b/fs/nilfs2/the_nilfs.h @@ -73,7 +73,6 @@ enum { * @ns_last_seq: sequence value of the latest segment * @ns_last_cno: checkpoint number of the latest segment * @ns_prot_seq: least sequence number of segments which must not be reclaimed - * @ns_free_segments_count: counter of free segments * @ns_segctor_sem: segment constructor semaphore * @ns_dat: DAT file inode * @ns_cpfile: checkpoint file inode @@ -150,7 +149,6 @@ struct the_nilfs { u64 ns_last_seq; __u64 ns_last_cno; u64 ns_prot_seq; - unsigned long ns_free_segments_count; struct rw_semaphore ns_segctor_sem; -- cgit v1.1 From bde4e696e4a527c3cc579ed77e4844d11ca17e12 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Sun, 27 Jun 2010 21:38:05 +0900 Subject: nilfs2: do not update mount time on rw->ro remount Mount time field in super block is wrongly updated when nilfs remounts the partition from read-write to read-only. This fixes the issue. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/super.c | 1 - 1 file changed, 1 deletion(-) diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 414ef68..39b28cf 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -884,7 +884,6 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data) if (!(sbp->s_state & le16_to_cpu(NILFS_VALID_FS)) && (nilfs->ns_mount_state & NILFS_VALID_FS)) sbp->s_state = cpu_to_le16(nilfs->ns_mount_state); - sbp->s_mtime = cpu_to_le64(get_seconds()); nilfs_commit_super(sbi, 1); up_write(&nilfs->ns_sem); } else { -- cgit v1.1 From 7ecaa46cfea453238a369b3019739d50ff5d7c37 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Mon, 28 Jun 2010 17:49:29 +0900 Subject: nilfs2: add nilfs_cleanup_super This function write out filesystem state to super blocks in order to share the same cleanup work. This is a preparation for making super block writeback alternately. Cc: Jiro SEKIBA Signed-off-by: Ryusuke Konishi --- fs/nilfs2/nilfs.h | 1 + fs/nilfs2/super.c | 28 ++++++++++++++++++++-------- fs/nilfs2/the_nilfs.c | 5 ++--- 3 files changed, 23 insertions(+), 11 deletions(-) diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h index 47d6d79..4695417 100644 --- a/fs/nilfs2/nilfs.h +++ b/fs/nilfs2/nilfs.h @@ -271,6 +271,7 @@ nilfs_read_super_block(struct super_block *, u64, int, struct buffer_head **); extern int nilfs_store_magic_and_option(struct super_block *, struct nilfs_super_block *, char *); extern int nilfs_commit_super(struct nilfs_sb_info *, int); +extern int nilfs_cleanup_super(struct nilfs_sb_info *); extern int nilfs_attach_checkpoint(struct nilfs_sb_info *, __u64); extern void nilfs_detach_checkpoint(struct nilfs_sb_info *); diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 39b28cf..f23a31b 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -278,6 +278,24 @@ int nilfs_commit_super(struct nilfs_sb_info *sbi, int dupsb) return nilfs_sync_super(sbi, dupsb); } +/** + * nilfs_cleanup_super() - write filesystem state for cleanup + * @sbi: nilfs_sb_info to be unmounted or degraded to read-only + * + * This function restores state flags in the on-disk super block. + * This will set "clean" flag (i.e. NILFS_VALID_FS) unless the + * filesystem was not clean previously. + */ +int nilfs_cleanup_super(struct nilfs_sb_info *sbi) +{ + struct nilfs_super_block **sbp = sbi->s_nilfs->ns_sbp; + int ret; + + sbp[0]->s_state = cpu_to_le16(sbi->s_nilfs->ns_mount_state); + ret = nilfs_commit_super(sbi, 1); + return ret; +} + static void nilfs_put_super(struct super_block *sb) { struct nilfs_sb_info *sbi = NILFS_SB(sb); @@ -289,8 +307,7 @@ static void nilfs_put_super(struct super_block *sb) if (!(sb->s_flags & MS_RDONLY)) { down_write(&nilfs->ns_sem); - nilfs->ns_sbp[0]->s_state = cpu_to_le16(nilfs->ns_mount_state); - nilfs_commit_super(sbi, 1); + nilfs_cleanup_super(sbi); up_write(&nilfs->ns_sem); } down_write(&nilfs->ns_super_sem); @@ -819,7 +836,6 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent, static int nilfs_remount(struct super_block *sb, int *flags, char *data) { struct nilfs_sb_info *sbi = NILFS_SB(sb); - struct nilfs_super_block *sbp; struct the_nilfs *nilfs = sbi->s_nilfs; unsigned long old_sb_flags; struct nilfs_mount_options old_opts; @@ -880,11 +896,7 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data) * the RDONLY flag and then mark the partition as valid again. */ down_write(&nilfs->ns_sem); - sbp = nilfs->ns_sbp[0]; - if (!(sbp->s_state & le16_to_cpu(NILFS_VALID_FS)) && - (nilfs->ns_mount_state & NILFS_VALID_FS)) - sbp->s_state = cpu_to_le16(nilfs->ns_mount_state); - nilfs_commit_super(sbi, 1); + nilfs_cleanup_super(sbi); up_write(&nilfs->ns_sem); } else { /* diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index 4a9e8a0..ed58053 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c @@ -324,9 +324,8 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi) goto failed_unload; down_write(&nilfs->ns_sem); - nilfs->ns_mount_state |= NILFS_VALID_FS; - nilfs->ns_sbp[0]->s_state = cpu_to_le16(nilfs->ns_mount_state); - err = nilfs_commit_super(sbi, 1); + nilfs->ns_mount_state |= NILFS_VALID_FS; /* set "clean" flag */ + err = nilfs_cleanup_super(sbi); up_write(&nilfs->ns_sem); if (err) { -- cgit v1.1 From c8a11c8a1455c380286cfd3d3442e2b60edee49a Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Mon, 28 Jun 2010 17:49:30 +0900 Subject: nilfs2: add nilfs_set_error This function marks error state and write it on super blocks. This is a preparation for making super block writeback alternately. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/super.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index f23a31b..4a85dfb 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -74,6 +74,19 @@ struct kmem_cache *nilfs_btree_path_cache; static int nilfs_remount(struct super_block *sb, int *flags, char *data); +static void nilfs_set_error(struct nilfs_sb_info *sbi) +{ + struct the_nilfs *nilfs = sbi->s_nilfs; + + down_write(&nilfs->ns_sem); + if (!(nilfs->ns_mount_state & NILFS_ERROR_FS)) { + nilfs->ns_mount_state |= NILFS_ERROR_FS; + nilfs->ns_sbp[0]->s_state |= cpu_to_le16(NILFS_ERROR_FS); + nilfs_commit_super(sbi, 1); + } + up_write(&nilfs->ns_sem); +} + /** * nilfs_error() - report failure condition on a filesystem * @@ -99,16 +112,7 @@ void nilfs_error(struct super_block *sb, const char *function, va_end(args); if (!(sb->s_flags & MS_RDONLY)) { - struct the_nilfs *nilfs = sbi->s_nilfs; - - down_write(&nilfs->ns_sem); - if (!(nilfs->ns_mount_state & NILFS_ERROR_FS)) { - nilfs->ns_mount_state |= NILFS_ERROR_FS; - nilfs->ns_sbp[0]->s_state |= - cpu_to_le16(NILFS_ERROR_FS); - nilfs_commit_super(sbi, 1); - } - up_write(&nilfs->ns_sem); + nilfs_set_error(sbi); if (nilfs_test_opt(sbi, ERRORS_RO)) { printk(KERN_CRIT "Remounting filesystem read-only\n"); -- cgit v1.1 From 60f46b7efc1d6b980511c2644cb89903062f6e98 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Mon, 28 Jun 2010 17:49:31 +0900 Subject: nilfs2: separate function that updates log position This moves out section that updates information of the recent log position stored in super blocks from nilfs_commit_super to a new routine named nilfs_set_log_cursor. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/nilfs.h | 2 ++ fs/nilfs2/super.c | 30 +++++++++++++++++------------- 2 files changed, 19 insertions(+), 13 deletions(-) diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h index 4695417..6718616 100644 --- a/fs/nilfs2/nilfs.h +++ b/fs/nilfs2/nilfs.h @@ -270,6 +270,8 @@ extern struct nilfs_super_block * nilfs_read_super_block(struct super_block *, u64, int, struct buffer_head **); extern int nilfs_store_magic_and_option(struct super_block *, struct nilfs_super_block *, char *); +extern void nilfs_set_log_cursor(struct nilfs_super_block *, + struct the_nilfs *); extern int nilfs_commit_super(struct nilfs_sb_info *, int); extern int nilfs_cleanup_super(struct nilfs_sb_info *); extern int nilfs_attach_checkpoint(struct nilfs_sb_info *, __u64); diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 4a85dfb..c5328c8 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -237,13 +237,27 @@ static int nilfs_sync_super(struct nilfs_sb_info *sbi, int dupsb) return err; } +void nilfs_set_log_cursor(struct nilfs_super_block *sbp, + struct the_nilfs *nilfs) +{ + sector_t nfreeblocks; + + /* nilfs->ns_sem must be locked by the caller. */ + nilfs_count_free_blocks(nilfs, &nfreeblocks); + sbp->s_free_blocks_count = cpu_to_le64(nfreeblocks); + + spin_lock(&nilfs->ns_last_segment_lock); + sbp->s_last_seq = cpu_to_le64(nilfs->ns_last_seq); + sbp->s_last_pseg = cpu_to_le64(nilfs->ns_last_pseg); + sbp->s_last_cno = cpu_to_le64(nilfs->ns_last_cno); + spin_unlock(&nilfs->ns_last_segment_lock); +} + int nilfs_commit_super(struct nilfs_sb_info *sbi, int dupsb) { struct the_nilfs *nilfs = sbi->s_nilfs; struct nilfs_super_block **sbp = nilfs->ns_sbp; - sector_t nfreeblocks; time_t t; - int err; /* nilfs->sem must be locked by the caller. */ if (sbp[0]->s_magic != cpu_to_le16(NILFS_SUPER_MAGIC)) { @@ -255,20 +269,10 @@ int nilfs_commit_super(struct nilfs_sb_info *sbi, int dupsb) return -EIO; } } - err = nilfs_count_free_blocks(nilfs, &nfreeblocks); - if (unlikely(err)) { - printk(KERN_ERR "NILFS: failed to count free blocks\n"); - return err; - } - spin_lock(&nilfs->ns_last_segment_lock); - sbp[0]->s_last_seq = cpu_to_le64(nilfs->ns_last_seq); - sbp[0]->s_last_pseg = cpu_to_le64(nilfs->ns_last_pseg); - sbp[0]->s_last_cno = cpu_to_le64(nilfs->ns_last_cno); - spin_unlock(&nilfs->ns_last_segment_lock); + nilfs_set_log_cursor(sbp[0], nilfs); t = get_seconds(); nilfs->ns_sbwtime[0] = t; - sbp[0]->s_free_blocks_count = cpu_to_le64(nfreeblocks); sbp[0]->s_wtime = cpu_to_le64(t); sbp[0]->s_sum = 0; sbp[0]->s_sum = cpu_to_le32(crc32_le(nilfs->ns_crc_seed, -- cgit v1.1 From d26493b6f017c0b0063a15bf893411ddae85eee4 Mon Sep 17 00:00:00 2001 From: Jiro SEKIBA Date: Mon, 28 Jun 2010 17:49:32 +0900 Subject: nilfs2: introduce nilfs_prepare_super This function checks validity of super block pointers. If first super block is invalid, it will swap the super blocks. The function should be called before any super block information updates. Caller must obtain nilfs->ns_sem. Signed-off-by: Jiro SEKIBA Signed-off-by: Ryusuke Konishi --- fs/nilfs2/nilfs.h | 1 + fs/nilfs2/segment.c | 8 ++++-- fs/nilfs2/super.c | 73 +++++++++++++++++++++++++++++++++++++---------------- 3 files changed, 58 insertions(+), 24 deletions(-) diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h index 6718616..4626510 100644 --- a/fs/nilfs2/nilfs.h +++ b/fs/nilfs2/nilfs.h @@ -272,6 +272,7 @@ extern int nilfs_store_magic_and_option(struct super_block *, struct nilfs_super_block *, char *); extern void nilfs_set_log_cursor(struct nilfs_super_block *, struct the_nilfs *); +extern struct nilfs_super_block **nilfs_prepare_super(struct nilfs_sb_info *); extern int nilfs_commit_super(struct nilfs_sb_info *, int); extern int nilfs_cleanup_super(struct nilfs_sb_info *); extern int nilfs_attach_checkpoint(struct nilfs_sb_info *, __u64); diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 1f7881c..9e680a9 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -2408,6 +2408,7 @@ static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode) { struct nilfs_sb_info *sbi = sci->sc_sbi; struct the_nilfs *nilfs = sbi->s_nilfs; + struct nilfs_super_block **sbp; int err = 0; nilfs_segctor_accept(sci); @@ -2423,8 +2424,11 @@ static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode) if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) && nilfs_discontinued(nilfs)) { down_write(&nilfs->ns_sem); - err = nilfs_commit_super( - sbi, nilfs_altsb_need_update(nilfs)); + err = -EIO; + sbp = nilfs_prepare_super(sbi); + if (likely(sbp)) + err = nilfs_commit_super( + sbi, nilfs_altsb_need_update(nilfs)); up_write(&nilfs->ns_sem); } } diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index c5328c8..eb7de40 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -77,12 +77,16 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data); static void nilfs_set_error(struct nilfs_sb_info *sbi) { struct the_nilfs *nilfs = sbi->s_nilfs; + struct nilfs_super_block **sbp; down_write(&nilfs->ns_sem); if (!(nilfs->ns_mount_state & NILFS_ERROR_FS)) { nilfs->ns_mount_state |= NILFS_ERROR_FS; - nilfs->ns_sbp[0]->s_state |= cpu_to_le16(NILFS_ERROR_FS); - nilfs_commit_super(sbi, 1); + sbp = nilfs_prepare_super(sbi); + if (likely(sbp)) { + sbp[0]->s_state |= cpu_to_le16(NILFS_ERROR_FS); + nilfs_commit_super(sbi, 1); + } } up_write(&nilfs->ns_sem); } @@ -253,22 +257,32 @@ void nilfs_set_log_cursor(struct nilfs_super_block *sbp, spin_unlock(&nilfs->ns_last_segment_lock); } -int nilfs_commit_super(struct nilfs_sb_info *sbi, int dupsb) +struct nilfs_super_block **nilfs_prepare_super(struct nilfs_sb_info *sbi) { struct the_nilfs *nilfs = sbi->s_nilfs; struct nilfs_super_block **sbp = nilfs->ns_sbp; - time_t t; - /* nilfs->sem must be locked by the caller. */ + /* nilfs->ns_sem must be locked by the caller. */ if (sbp[0]->s_magic != cpu_to_le16(NILFS_SUPER_MAGIC)) { - if (sbp[1] && sbp[1]->s_magic == cpu_to_le16(NILFS_SUPER_MAGIC)) + if (sbp[1] && + sbp[1]->s_magic == cpu_to_le16(NILFS_SUPER_MAGIC)) { nilfs_swap_super_block(nilfs); - else { + } else { printk(KERN_CRIT "NILFS: superblock broke on dev %s\n", sbi->s_super->s_id); - return -EIO; + return NULL; } } + return sbp; +} + +int nilfs_commit_super(struct nilfs_sb_info *sbi, int dupsb) +{ + struct the_nilfs *nilfs = sbi->s_nilfs; + struct nilfs_super_block **sbp = nilfs->ns_sbp; + time_t t; + + /* nilfs->ns_sem must be locked by the caller. */ nilfs_set_log_cursor(sbp[0], nilfs); t = get_seconds(); @@ -296,11 +310,14 @@ int nilfs_commit_super(struct nilfs_sb_info *sbi, int dupsb) */ int nilfs_cleanup_super(struct nilfs_sb_info *sbi) { - struct nilfs_super_block **sbp = sbi->s_nilfs->ns_sbp; - int ret; + struct nilfs_super_block **sbp; + int ret = -EIO; - sbp[0]->s_state = cpu_to_le16(sbi->s_nilfs->ns_mount_state); - ret = nilfs_commit_super(sbi, 1); + sbp = nilfs_prepare_super(sbi); + if (sbp) { + sbp[0]->s_state = cpu_to_le16(sbi->s_nilfs->ns_mount_state); + ret = nilfs_commit_super(sbi, 1); + } return ret; } @@ -336,6 +353,7 @@ static int nilfs_sync_fs(struct super_block *sb, int wait) { struct nilfs_sb_info *sbi = NILFS_SB(sb); struct the_nilfs *nilfs = sbi->s_nilfs; + struct nilfs_super_block **sbp; int err = 0; /* This function is called when super block should be written back */ @@ -343,8 +361,11 @@ static int nilfs_sync_fs(struct super_block *sb, int wait) err = nilfs_construct_segment(sb); down_write(&nilfs->ns_sem); - if (nilfs_sb_dirty(nilfs)) - nilfs_commit_super(sbi, 1); + if (nilfs_sb_dirty(nilfs)) { + sbp = nilfs_prepare_super(sbi); + if (likely(sbp)) + nilfs_commit_super(sbi, 1); + } up_write(&nilfs->ns_sem); return err; @@ -638,11 +659,18 @@ nilfs_set_default_options(struct nilfs_sb_info *sbi, static int nilfs_setup_super(struct nilfs_sb_info *sbi) { struct the_nilfs *nilfs = sbi->s_nilfs; - struct nilfs_super_block *sbp = nilfs->ns_sbp[0]; - int max_mnt_count = le16_to_cpu(sbp->s_max_mnt_count); - int mnt_count = le16_to_cpu(sbp->s_mnt_count); + struct nilfs_super_block **sbp; + int max_mnt_count; + int mnt_count; + + /* nilfs->ns_sem must be locked by the caller. */ + sbp = nilfs_prepare_super(sbi); + if (!sbp) + return -EIO; + + max_mnt_count = le16_to_cpu(sbp[0]->s_max_mnt_count); + mnt_count = le16_to_cpu(sbp[0]->s_mnt_count); - /* nilfs->sem must be locked by the caller. */ if (nilfs->ns_mount_state & NILFS_ERROR_FS) { printk(KERN_WARNING "NILFS warning: mounting fs with errors\n"); @@ -653,11 +681,12 @@ static int nilfs_setup_super(struct nilfs_sb_info *sbi) #endif } if (!max_mnt_count) - sbp->s_max_mnt_count = cpu_to_le16(NILFS_DFL_MAX_MNT_COUNT); + sbp[0]->s_max_mnt_count = cpu_to_le16(NILFS_DFL_MAX_MNT_COUNT); - sbp->s_mnt_count = cpu_to_le16(mnt_count + 1); - sbp->s_state = cpu_to_le16(le16_to_cpu(sbp->s_state) & ~NILFS_VALID_FS); - sbp->s_mtime = cpu_to_le64(get_seconds()); + sbp[0]->s_mnt_count = cpu_to_le16(mnt_count + 1); + sbp[0]->s_state = + cpu_to_le16(le16_to_cpu(sbp[0]->s_state) & ~NILFS_VALID_FS); + sbp[0]->s_mtime = cpu_to_le64(get_seconds()); return nilfs_commit_super(sbi, 1); } -- cgit v1.1 From b2ac86e1a8e3a3b0ab4449d062c582f07a078e7b Mon Sep 17 00:00:00 2001 From: Jiro SEKIBA Date: Mon, 28 Jun 2010 17:49:33 +0900 Subject: nilfs2: sync super blocks in turns This will sync super blocks in turns instead of syncing duplicate super blocks at the time. This will help searching valid super root when super block is written into disk before log is written, which is happen when barrier-less block devices are unmounted uncleanly. In the situation, old super block likely points to valid log. This patch introduces ns_sbwcount member to the nilfs object and adds nilfs_sb_will_flip() function; ns_sbwcount counts how many times super blocks write back to the disk. And, nilfs_sb_will_flip() decides whether flipping required or not based on the count of ns_sbwcount to sync super blocks asymmetrically. The following functions are also changed: - nilfs_prepare_super(): flips super blocks according to the argument. The argument is calculated by nilfs_sb_will_flip() function. - nilfs_cleanup_super(): sets "clean" flag to both super blocks if they point to the same checkpoint. To update both of super block information, caller of nilfs_commit_super must set the information on both super blocks. Signed-off-by: Jiro SEKIBA Signed-off-by: Ryusuke Konishi --- fs/nilfs2/nilfs.h | 11 +++++- fs/nilfs2/segment.c | 10 +++--- fs/nilfs2/super.c | 95 +++++++++++++++++++++++++++++++++++---------------- fs/nilfs2/the_nilfs.c | 4 +-- fs/nilfs2/the_nilfs.h | 17 +++++---- 5 files changed, 91 insertions(+), 46 deletions(-) diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h index 4626510..36998ea 100644 --- a/fs/nilfs2/nilfs.h +++ b/fs/nilfs2/nilfs.h @@ -107,6 +107,14 @@ enum { }; /* + * commit flags for nilfs_commit_super and nilfs_sync_super + */ +enum { + NILFS_SB_COMMIT = 0, /* Commit a super block alternately */ + NILFS_SB_COMMIT_ALL /* Commit both super blocks */ +}; + +/* * Macros to check inode numbers */ #define NILFS_MDT_INO_BITS \ @@ -272,7 +280,8 @@ extern int nilfs_store_magic_and_option(struct super_block *, struct nilfs_super_block *, char *); extern void nilfs_set_log_cursor(struct nilfs_super_block *, struct the_nilfs *); -extern struct nilfs_super_block **nilfs_prepare_super(struct nilfs_sb_info *); +extern struct nilfs_super_block **nilfs_prepare_super(struct nilfs_sb_info *, + int flip); extern int nilfs_commit_super(struct nilfs_sb_info *, int); extern int nilfs_cleanup_super(struct nilfs_sb_info *); extern int nilfs_attach_checkpoint(struct nilfs_sb_info *, __u64); diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 9e680a9..04e0485 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -2425,10 +2425,12 @@ static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode) nilfs_discontinued(nilfs)) { down_write(&nilfs->ns_sem); err = -EIO; - sbp = nilfs_prepare_super(sbi); - if (likely(sbp)) - err = nilfs_commit_super( - sbi, nilfs_altsb_need_update(nilfs)); + sbp = nilfs_prepare_super(sbi, + nilfs_sb_will_flip(nilfs)); + if (likely(sbp)) { + nilfs_set_log_cursor(sbp[0], nilfs); + err = nilfs_commit_super(sbi, NILFS_SB_COMMIT); + } up_write(&nilfs->ns_sem); } } diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index eb7de40..f2cfbba 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -82,10 +82,12 @@ static void nilfs_set_error(struct nilfs_sb_info *sbi) down_write(&nilfs->ns_sem); if (!(nilfs->ns_mount_state & NILFS_ERROR_FS)) { nilfs->ns_mount_state |= NILFS_ERROR_FS; - sbp = nilfs_prepare_super(sbi); + sbp = nilfs_prepare_super(sbi, 0); if (likely(sbp)) { sbp[0]->s_state |= cpu_to_le16(NILFS_ERROR_FS); - nilfs_commit_super(sbi, 1); + if (sbp[1]) + sbp[1]->s_state |= cpu_to_le16(NILFS_ERROR_FS); + nilfs_commit_super(sbi, NILFS_SB_COMMIT_ALL); } } up_write(&nilfs->ns_sem); @@ -184,7 +186,7 @@ static void nilfs_clear_inode(struct inode *inode) nilfs_btnode_cache_clear(&ii->i_btnode_cache); } -static int nilfs_sync_super(struct nilfs_sb_info *sbi, int dupsb) +static int nilfs_sync_super(struct nilfs_sb_info *sbi, int flag) { struct the_nilfs *nilfs = sbi->s_nilfs; int err; @@ -210,12 +212,20 @@ static int nilfs_sync_super(struct nilfs_sb_info *sbi, int dupsb) printk(KERN_ERR "NILFS: unable to write superblock (err=%d)\n", err); if (err == -EIO && nilfs->ns_sbh[1]) { + /* + * sbp[0] points to newer log than sbp[1], + * so copy sbp[0] to sbp[1] to take over sbp[0]. + */ + memcpy(nilfs->ns_sbp[1], nilfs->ns_sbp[0], + nilfs->ns_sbsize); nilfs_fall_back_super_block(nilfs); goto retry; } } else { struct nilfs_super_block *sbp = nilfs->ns_sbp[0]; + nilfs->ns_sbwcount++; + /* * The latest segment becomes trailable from the position * written in superblock. @@ -224,20 +234,21 @@ static int nilfs_sync_super(struct nilfs_sb_info *sbi, int dupsb) /* update GC protection for recent segments */ if (nilfs->ns_sbh[1]) { - sbp = NULL; - if (dupsb) { + if (flag == NILFS_SB_COMMIT_ALL) { set_buffer_dirty(nilfs->ns_sbh[1]); - if (!sync_dirty_buffer(nilfs->ns_sbh[1])) - sbp = nilfs->ns_sbp[1]; + if (sync_dirty_buffer(nilfs->ns_sbh[1]) < 0) + goto out; } + if (le64_to_cpu(nilfs->ns_sbp[1]->s_last_cno) < + le64_to_cpu(nilfs->ns_sbp[0]->s_last_cno)) + sbp = nilfs->ns_sbp[1]; } - if (sbp) { - spin_lock(&nilfs->ns_last_segment_lock); - nilfs->ns_prot_seq = le64_to_cpu(sbp->s_last_seq); - spin_unlock(&nilfs->ns_last_segment_lock); - } - } + spin_lock(&nilfs->ns_last_segment_lock); + nilfs->ns_prot_seq = le64_to_cpu(sbp->s_last_seq); + spin_unlock(&nilfs->ns_last_segment_lock); + } + out: return err; } @@ -257,7 +268,8 @@ void nilfs_set_log_cursor(struct nilfs_super_block *sbp, spin_unlock(&nilfs->ns_last_segment_lock); } -struct nilfs_super_block **nilfs_prepare_super(struct nilfs_sb_info *sbi) +struct nilfs_super_block **nilfs_prepare_super(struct nilfs_sb_info *sbi, + int flip) { struct the_nilfs *nilfs = sbi->s_nilfs; struct nilfs_super_block **sbp = nilfs->ns_sbp; @@ -266,38 +278,46 @@ struct nilfs_super_block **nilfs_prepare_super(struct nilfs_sb_info *sbi) if (sbp[0]->s_magic != cpu_to_le16(NILFS_SUPER_MAGIC)) { if (sbp[1] && sbp[1]->s_magic == cpu_to_le16(NILFS_SUPER_MAGIC)) { - nilfs_swap_super_block(nilfs); + memcpy(sbp[0], sbp[1], nilfs->ns_sbsize); } else { printk(KERN_CRIT "NILFS: superblock broke on dev %s\n", sbi->s_super->s_id); return NULL; } + } else if (sbp[1] && + sbp[1]->s_magic != cpu_to_le16(NILFS_SUPER_MAGIC)) { + memcpy(sbp[1], sbp[0], nilfs->ns_sbsize); } + + if (flip && sbp[1]) + nilfs_swap_super_block(nilfs); + return sbp; } -int nilfs_commit_super(struct nilfs_sb_info *sbi, int dupsb) +int nilfs_commit_super(struct nilfs_sb_info *sbi, int flag) { struct the_nilfs *nilfs = sbi->s_nilfs; struct nilfs_super_block **sbp = nilfs->ns_sbp; time_t t; /* nilfs->ns_sem must be locked by the caller. */ - nilfs_set_log_cursor(sbp[0], nilfs); - t = get_seconds(); - nilfs->ns_sbwtime[0] = t; + nilfs->ns_sbwtime = t; sbp[0]->s_wtime = cpu_to_le64(t); sbp[0]->s_sum = 0; sbp[0]->s_sum = cpu_to_le32(crc32_le(nilfs->ns_crc_seed, (unsigned char *)sbp[0], nilfs->ns_sbsize)); - if (dupsb && sbp[1]) { - memcpy(sbp[1], sbp[0], nilfs->ns_sbsize); - nilfs->ns_sbwtime[1] = t; + if (flag == NILFS_SB_COMMIT_ALL && sbp[1]) { + sbp[1]->s_wtime = sbp[0]->s_wtime; + sbp[1]->s_sum = 0; + sbp[1]->s_sum = cpu_to_le32(crc32_le(nilfs->ns_crc_seed, + (unsigned char *)sbp[1], + nilfs->ns_sbsize)); } clear_nilfs_sb_dirty(nilfs); - return nilfs_sync_super(sbi, dupsb); + return nilfs_sync_super(sbi, flag); } /** @@ -311,12 +331,23 @@ int nilfs_commit_super(struct nilfs_sb_info *sbi, int dupsb) int nilfs_cleanup_super(struct nilfs_sb_info *sbi) { struct nilfs_super_block **sbp; + int flag = NILFS_SB_COMMIT; int ret = -EIO; - sbp = nilfs_prepare_super(sbi); + sbp = nilfs_prepare_super(sbi, 0); if (sbp) { sbp[0]->s_state = cpu_to_le16(sbi->s_nilfs->ns_mount_state); - ret = nilfs_commit_super(sbi, 1); + nilfs_set_log_cursor(sbp[0], sbi->s_nilfs); + if (sbp[1] && sbp[0]->s_last_cno == sbp[1]->s_last_cno) { + /* + * make the "clean" flag also to the opposite + * super block if both super blocks point to + * the same checkpoint. + */ + sbp[1]->s_state = sbp[0]->s_state; + flag = NILFS_SB_COMMIT_ALL; + } + ret = nilfs_commit_super(sbi, flag); } return ret; } @@ -362,9 +393,11 @@ static int nilfs_sync_fs(struct super_block *sb, int wait) down_write(&nilfs->ns_sem); if (nilfs_sb_dirty(nilfs)) { - sbp = nilfs_prepare_super(sbi); - if (likely(sbp)) - nilfs_commit_super(sbi, 1); + sbp = nilfs_prepare_super(sbi, nilfs_sb_will_flip(nilfs)); + if (likely(sbp)) { + nilfs_set_log_cursor(sbp[0], nilfs); + nilfs_commit_super(sbi, NILFS_SB_COMMIT); + } } up_write(&nilfs->ns_sem); @@ -664,7 +697,7 @@ static int nilfs_setup_super(struct nilfs_sb_info *sbi) int mnt_count; /* nilfs->ns_sem must be locked by the caller. */ - sbp = nilfs_prepare_super(sbi); + sbp = nilfs_prepare_super(sbi, 0); if (!sbp) return -EIO; @@ -687,7 +720,9 @@ static int nilfs_setup_super(struct nilfs_sb_info *sbi) sbp[0]->s_state = cpu_to_le16(le16_to_cpu(sbp[0]->s_state) & ~NILFS_VALID_FS); sbp[0]->s_mtime = cpu_to_le64(get_seconds()); - return nilfs_commit_super(sbi, 1); + /* synchronize sbp[1] with sbp[0] */ + memcpy(sbp[1], sbp[0], nilfs->ns_sbsize); + return nilfs_commit_super(sbi, NILFS_SB_COMMIT_ALL); } struct nilfs_super_block *nilfs_read_super_block(struct super_block *sb, diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index ed58053..530d277 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c @@ -513,8 +513,8 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs, nilfs_swap_super_block(nilfs); } - nilfs->ns_sbwtime[0] = le64_to_cpu(sbp[0]->s_wtime); - nilfs->ns_sbwtime[1] = valid[!swp] ? le64_to_cpu(sbp[1]->s_wtime) : 0; + nilfs->ns_sbwcount = 0; + nilfs->ns_sbwtime = le64_to_cpu(sbp[0]->s_wtime); nilfs->ns_prot_seq = le64_to_cpu(sbp[valid[1] & !swp]->s_last_seq); *sbpp = sbp[0]; return 0; diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h index 191560e..32b4983 100644 --- a/fs/nilfs2/the_nilfs.h +++ b/fs/nilfs2/the_nilfs.h @@ -57,7 +57,8 @@ enum { * @ns_current: back pointer to current mount * @ns_sbh: buffer heads of on-disk super blocks * @ns_sbp: pointers to super block data - * @ns_sbwtime: previous write time of super blocks + * @ns_sbwtime: previous write time of super block + * @ns_sbwcount: write count of super block * @ns_sbsize: size of valid data in super block * @ns_supers: list of nilfs super block structs * @ns_seg_seq: segment sequence counter @@ -119,7 +120,8 @@ struct the_nilfs { */ struct buffer_head *ns_sbh[2]; struct nilfs_super_block *ns_sbp[2]; - time_t ns_sbwtime[2]; + time_t ns_sbwtime; + unsigned ns_sbwcount; unsigned ns_sbsize; unsigned ns_mount_state; @@ -203,20 +205,17 @@ THE_NILFS_FNS(SB_DIRTY, sb_dirty) /* Minimum interval of periodical update of superblocks (in seconds) */ #define NILFS_SB_FREQ 10 -#define NILFS_ALTSB_FREQ 60 /* spare superblock */ static inline int nilfs_sb_need_update(struct the_nilfs *nilfs) { u64 t = get_seconds(); - return t < nilfs->ns_sbwtime[0] || - t > nilfs->ns_sbwtime[0] + NILFS_SB_FREQ; + return t < nilfs->ns_sbwtime || t > nilfs->ns_sbwtime + NILFS_SB_FREQ; } -static inline int nilfs_altsb_need_update(struct the_nilfs *nilfs) +static inline int nilfs_sb_will_flip(struct the_nilfs *nilfs) { - u64 t = get_seconds(); - struct nilfs_super_block **sbp = nilfs->ns_sbp; - return sbp[1] && t > nilfs->ns_sbwtime[1] + NILFS_ALTSB_FREQ; + int flip_bits = nilfs->ns_sbwcount & 0x0FL; + return (flip_bits != 0x08 && flip_bits != 0x0F); } void nilfs_set_last_segment(struct the_nilfs *, sector_t, u64, __u64); -- cgit v1.1 From 843d63baa5babf3d8786f6a4377a2448525da7aa Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Mon, 28 Jun 2010 19:15:24 +0900 Subject: nilfs2: separate setup of log cursor from init_nilfs This separates a setup routine of log cursor from init_nilfs(). The routine, nilfs_store_log_cursor, reads the last position of the log containing a super root, and initializes relevant state on the nilfs object. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/the_nilfs.c | 45 ++++++++++++++++++++++++++++++++------------- 1 file changed, 32 insertions(+), 13 deletions(-) diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index 530d277..0d2a46c 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c @@ -247,6 +247,36 @@ static void nilfs_clear_recovery_info(struct nilfs_recovery_info *ri) } /** + * nilfs_store_log_cursor - load log cursor from a super block + * @nilfs: nilfs object + * @sbp: buffer storing super block to be read + * + * nilfs_store_log_cursor() reads the last position of the log + * containing a super root from a given super block, and initializes + * relevant information on the nilfs object preparatory for log + * scanning and recovery. + */ +static int nilfs_store_log_cursor(struct the_nilfs *nilfs, + struct nilfs_super_block *sbp) +{ + int ret = 0; + + nilfs->ns_last_pseg = le64_to_cpu(sbp->s_last_pseg); + nilfs->ns_last_cno = le64_to_cpu(sbp->s_last_cno); + nilfs->ns_last_seq = le64_to_cpu(sbp->s_last_seq); + + nilfs->ns_seg_seq = nilfs->ns_last_seq; + nilfs->ns_segnum = + nilfs_get_segnum_of_block(nilfs, nilfs->ns_last_pseg); + nilfs->ns_cno = nilfs->ns_last_cno + 1; + if (nilfs->ns_segnum >= nilfs->ns_nsegments) { + printk(KERN_ERR "NILFS invalid last segment number.\n"); + ret = -EINVAL; + } + return ret; +} + +/** * load_nilfs - load and recover the nilfs * @nilfs: the_nilfs structure to be released * @sbi: nilfs_sb_info used to recover past segment @@ -615,20 +645,9 @@ int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data) bdi = nilfs->ns_bdev->bd_inode->i_mapping->backing_dev_info; nilfs->ns_bdi = bdi ? : &default_backing_dev_info; - /* Finding last segment */ - nilfs->ns_last_pseg = le64_to_cpu(sbp->s_last_pseg); - nilfs->ns_last_cno = le64_to_cpu(sbp->s_last_cno); - nilfs->ns_last_seq = le64_to_cpu(sbp->s_last_seq); - - nilfs->ns_seg_seq = nilfs->ns_last_seq; - nilfs->ns_segnum = - nilfs_get_segnum_of_block(nilfs, nilfs->ns_last_pseg); - nilfs->ns_cno = nilfs->ns_last_cno + 1; - if (nilfs->ns_segnum >= nilfs->ns_nsegments) { - printk(KERN_ERR "NILFS invalid last segment number.\n"); - err = -EINVAL; + err = nilfs_store_log_cursor(nilfs, sbp); + if (err) goto failed_sbh; - } /* Initialize gcinode cache */ err = nilfs_init_gccache(nilfs); -- cgit v1.1 From 2d72b99ecdf8cbb5d9422c54b401d9d590b2faf5 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Mon, 28 Jun 2010 19:15:25 +0900 Subject: nilfs2: add missing error code in comment of nilfs_search_super_root nilfs_search_super_root can return -ENOMEM, but this error code is not described in its kernel-doc comment. This fixes the discrepancy. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/recovery.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c index f5d9c3f..83e3d8c 100644 --- a/fs/nilfs2/recovery.c +++ b/fs/nilfs2/recovery.c @@ -804,6 +804,8 @@ int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs, * %-EINVAL - No valid segment found * * %-EIO - I/O error + * + * %-ENOMEM - Insufficient memory available. */ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_recovery_info *ri) -- cgit v1.1 From 6c12516083cf51b6e576691ac6e20c4a32f4edb9 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Mon, 28 Jun 2010 19:15:26 +0900 Subject: nilfs2: implement fallback for super root search Although nilfs redundantly uses two super blocks and each may point to different position on log, the current version of nilfs does not try fallback to the spare super block when it doesn't find any valid log at the position that the primary super block points to. This has been a cause of mount failures due to write order reversals on barrier less block devices. This inserts fallback code in error path of nilfs_search_super_root routine to resolve the mount failure problem. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/the_nilfs.c | 52 +++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 50 insertions(+), 2 deletions(-) diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index 0d2a46c..88c8976 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c @@ -38,6 +38,8 @@ static LIST_HEAD(nilfs_objects); static DEFINE_SPINLOCK(nilfs_lock); +static int nilfs_valid_sb(struct nilfs_super_block *sbp); + void nilfs_set_last_segment(struct the_nilfs *nilfs, sector_t start_blocknr, u64 seq, __u64 cno) { @@ -316,8 +318,50 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi) err = nilfs_search_super_root(nilfs, &ri); if (unlikely(err)) { - printk(KERN_ERR "NILFS: error searching super root.\n"); - goto failed; + struct nilfs_super_block **sbp = nilfs->ns_sbp; + int blocksize; + + if (err != -EINVAL) + goto scan_error; + + if (!nilfs_valid_sb(sbp[1])) { + printk(KERN_WARNING + "NILFS warning: unable to fall back to spare" + "super block\n"); + goto scan_error; + } + printk(KERN_INFO + "NILFS: try rollback from an earlier position\n"); + + /* + * restore super block with its spare and reconfigure + * relevant states of the nilfs object. + */ + memcpy(sbp[0], sbp[1], nilfs->ns_sbsize); + nilfs->ns_crc_seed = le32_to_cpu(sbp[0]->s_crc_seed); + nilfs->ns_sbwtime = le64_to_cpu(sbp[0]->s_wtime); + + /* verify consistency between two super blocks */ + blocksize = BLOCK_SIZE << le32_to_cpu(sbp[0]->s_log_block_size); + if (blocksize != nilfs->ns_blocksize) { + printk(KERN_WARNING + "NILFS warning: blocksize differs between " + "two super blocks (%d != %d)\n", + blocksize, nilfs->ns_blocksize); + goto scan_error; + } + + err = nilfs_store_log_cursor(nilfs, sbp[0]); + if (err) + goto scan_error; + + /* drop clean flag to allow roll-forward and recovery */ + nilfs->ns_mount_state &= ~NILFS_VALID_FS; + valid_fs = 0; + + err = nilfs_search_super_root(nilfs, &ri); + if (err) + goto scan_error; } err = nilfs_load_super_root(nilfs, ri.ri_super_root); @@ -371,6 +415,10 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi) sbi->s_super->s_flags = s_flags; return 0; + scan_error: + printk(KERN_ERR "NILFS: error searching super root.\n"); + goto failed; + failed_unload: nilfs_mdt_destroy(nilfs->ns_cpfile); nilfs_mdt_destroy(nilfs->ns_sufile); -- cgit v1.1 From 325020477a51ffa849418b3e38189fd266f2ae20 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Tue, 29 Jun 2010 14:42:13 +0900 Subject: nilfs2: do not update log cursor for small change Super blocks of nilfs are periodically overwritten in order to record the recent log position. This shortens recovery time after unclean unmount, but the current implementation performs the update even for a few blocks of change. If the filesystem gets small changes slowly and continually, super blocks may be updated excessively. This moderates the issue by skipping update of log cursor if it does not cross a segment boundary. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/segment.c | 1 - fs/nilfs2/the_nilfs.c | 11 +++++++++++ fs/nilfs2/the_nilfs.h | 2 ++ 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 04e0485..9fd051a 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -1951,7 +1951,6 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci) if (update_sr) { nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start, segbuf->sb_sum.seg_seq, nilfs->ns_cno++); - set_nilfs_sb_dirty(nilfs); clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags); clear_bit(NILFS_SC_DIRTY, &sci->sc_flags); diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index 88c8976..f2efc8c 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c @@ -47,6 +47,16 @@ void nilfs_set_last_segment(struct the_nilfs *nilfs, nilfs->ns_last_pseg = start_blocknr; nilfs->ns_last_seq = seq; nilfs->ns_last_cno = cno; + + if (!nilfs_sb_dirty(nilfs)) { + if (nilfs->ns_prev_seq == nilfs->ns_last_seq) + goto stay_cursor; + + set_nilfs_sb_dirty(nilfs); + } + nilfs->ns_prev_seq = nilfs->ns_last_seq; + + stay_cursor: spin_unlock(&nilfs->ns_last_segment_lock); } @@ -267,6 +277,7 @@ static int nilfs_store_log_cursor(struct the_nilfs *nilfs, nilfs->ns_last_cno = le64_to_cpu(sbp->s_last_cno); nilfs->ns_last_seq = le64_to_cpu(sbp->s_last_seq); + nilfs->ns_prev_seq = nilfs->ns_last_seq; nilfs->ns_seg_seq = nilfs->ns_last_seq; nilfs->ns_segnum = nilfs_get_segnum_of_block(nilfs, nilfs->ns_last_pseg); diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h index 32b4983..f785a7b 100644 --- a/fs/nilfs2/the_nilfs.h +++ b/fs/nilfs2/the_nilfs.h @@ -74,6 +74,7 @@ enum { * @ns_last_seq: sequence value of the latest segment * @ns_last_cno: checkpoint number of the latest segment * @ns_prot_seq: least sequence number of segments which must not be reclaimed + * @ns_prev_seq: base sequence number used to decide if advance log cursor * @ns_segctor_sem: segment constructor semaphore * @ns_dat: DAT file inode * @ns_cpfile: checkpoint file inode @@ -151,6 +152,7 @@ struct the_nilfs { u64 ns_last_seq; __u64 ns_last_cno; u64 ns_prot_seq; + u64 ns_prev_seq; struct rw_semaphore ns_segctor_sem; -- cgit v1.1 From 773bc4f3b6898634a80a41c72a1f34cb89992dcd Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Mon, 5 Jul 2010 13:00:08 +0900 Subject: nilfs2: add barrier mount option Nilfs enables write barriers by default and has "nobarrier" mount option to disable this feature. But it lacks the complementary option and has no way to re-enable the feature on remount. This adds "barrier" option to resolve this imbalance. Signed-off-by: Ryusuke Konishi --- Documentation/filesystems/nilfs2.txt | 5 ++++- fs/nilfs2/super.c | 6 +++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/Documentation/filesystems/nilfs2.txt b/Documentation/filesystems/nilfs2.txt index d3e7673..54f61c0 100644 --- a/Documentation/filesystems/nilfs2.txt +++ b/Documentation/filesystems/nilfs2.txt @@ -49,7 +49,10 @@ Mount options NILFS2 supports the following mount options: (*) == default -nobarrier Disables barriers. +barrier(*) This enables/disables the use of write barriers. This +nobarrier requires an IO stack which can support barriers, and + if nilfs gets an error on a barrier write, it will + disable again with a warning. errors=continue Keep going on a filesystem error. errors=remount-ro(*) Remount the filesystem read-only on an error. errors=panic Panic and halt the machine if an error occurs. diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index f2cfbba..13b0e95 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -603,7 +603,7 @@ static const struct export_operations nilfs_export_ops = { enum { Opt_err_cont, Opt_err_panic, Opt_err_ro, - Opt_nobarrier, Opt_snapshot, Opt_order, Opt_norecovery, + Opt_barrier, Opt_nobarrier, Opt_snapshot, Opt_order, Opt_norecovery, Opt_discard, Opt_err, }; @@ -611,6 +611,7 @@ static match_table_t tokens = { {Opt_err_cont, "errors=continue"}, {Opt_err_panic, "errors=panic"}, {Opt_err_ro, "errors=remount-ro"}, + {Opt_barrier, "barrier"}, {Opt_nobarrier, "nobarrier"}, {Opt_snapshot, "cp=%u"}, {Opt_order, "order=%s"}, @@ -636,6 +637,9 @@ static int parse_options(char *options, struct super_block *sb) token = match_token(p, tokens, args); switch (token) { + case Opt_barrier: + nilfs_set_opt(sbi, BARRIER); + break; case Opt_nobarrier: nilfs_clear_opt(sbi, BARRIER); break; -- cgit v1.1 From 802d31775404ee335ca1e97a82e1e706a4c843be Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Mon, 5 Jul 2010 14:27:04 +0900 Subject: nilfs2: add nodiscard mount option Nilfs has "discard" mount option which issues discard/TRIM commands to underlying block device, but it lacks a complementary option and has no way to disable the feature through remount. This adds "nodiscard" option to resolve this imbalance. Signed-off-by: Ryusuke Konishi --- Documentation/filesystems/nilfs2.txt | 7 ++++--- fs/nilfs2/super.c | 6 +++++- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/Documentation/filesystems/nilfs2.txt b/Documentation/filesystems/nilfs2.txt index 54f61c0..d5c0cef 100644 --- a/Documentation/filesystems/nilfs2.txt +++ b/Documentation/filesystems/nilfs2.txt @@ -77,9 +77,10 @@ norecovery Disable recovery of the filesystem on mount. This disables every write access on the device for read-only mounts or snapshots. This option will fail for r/w mounts on an unclean volume. -discard Issue discard/TRIM commands to the underlying block - device when blocks are freed. This is useful for SSD - devices and sparse/thinly-provisioned LUNs. +discard This enables/disables the use of discard/TRIM commands. +nodiscard(*) The discard/TRIM commands are sent to the underlying + block device when blocks are freed. This is useful + for SSD devices and sparse/thinly-provisioned LUNs. NILFS2 usage ============ diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 13b0e95..9da1221 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -604,7 +604,7 @@ static const struct export_operations nilfs_export_ops = { enum { Opt_err_cont, Opt_err_panic, Opt_err_ro, Opt_barrier, Opt_nobarrier, Opt_snapshot, Opt_order, Opt_norecovery, - Opt_discard, Opt_err, + Opt_discard, Opt_nodiscard, Opt_err, }; static match_table_t tokens = { @@ -617,6 +617,7 @@ static match_table_t tokens = { {Opt_order, "order=%s"}, {Opt_norecovery, "norecovery"}, {Opt_discard, "discard"}, + {Opt_nodiscard, "nodiscard"}, {Opt_err, NULL} }; @@ -676,6 +677,9 @@ static int parse_options(char *options, struct super_block *sb) case Opt_discard: nilfs_set_opt(sbi, DISCARD); break; + case Opt_nodiscard: + nilfs_clear_opt(sbi, DISCARD); + break; default: printk(KERN_ERR "NILFS: Unrecognized mount option \"%s\"\n", p); -- cgit v1.1 From c6b4d57ddf12f3fd4d41d7b3b9181de46748418d Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Mon, 5 Jul 2010 14:40:27 +0900 Subject: nilfs2: use seq_puts to print mount options without argument This replaces seq_printf() with seq_puts() in nilfs_show_options for mount options which have no argument. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/super.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 9da1221..6a11243 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -521,20 +521,20 @@ static int nilfs_show_options(struct seq_file *seq, struct vfsmount *vfs) struct nilfs_sb_info *sbi = NILFS_SB(sb); if (!nilfs_test_opt(sbi, BARRIER)) - seq_printf(seq, ",nobarrier"); + seq_puts(seq, ",nobarrier"); if (nilfs_test_opt(sbi, SNAPSHOT)) seq_printf(seq, ",cp=%llu", (unsigned long long int)sbi->s_snapshot_cno); if (nilfs_test_opt(sbi, ERRORS_PANIC)) - seq_printf(seq, ",errors=panic"); + seq_puts(seq, ",errors=panic"); if (nilfs_test_opt(sbi, ERRORS_CONT)) - seq_printf(seq, ",errors=continue"); + seq_puts(seq, ",errors=continue"); if (nilfs_test_opt(sbi, STRICT_ORDER)) - seq_printf(seq, ",order=strict"); + seq_puts(seq, ",order=strict"); if (nilfs_test_opt(sbi, NORECOVERY)) - seq_printf(seq, ",norecovery"); + seq_puts(seq, ",norecovery"); if (nilfs_test_opt(sbi, DISCARD)) - seq_printf(seq, ",discard"); + seq_puts(seq, ",discard"); return 0; } -- cgit v1.1 From 7c01745781177795e39f78b2c2c42c470a13833a Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Mon, 5 Jul 2010 20:08:33 +0900 Subject: nilfs2: pass remount flag to parse_options This adds is_remount argument to the parse_options() function that obtains mount options from strings. Previously, parse_options did not distinguish context whether it's called for a new mount or remount, so the caller needed additional verifications outside the function. This allows parse_options to verify options and print messages depending on the context. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/super.c | 49 ++++++++++++++++++++++++++----------------------- 1 file changed, 26 insertions(+), 23 deletions(-) diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 6a11243..952f4cc 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -621,7 +621,7 @@ static match_table_t tokens = { {Opt_err, NULL} }; -static int parse_options(char *options, struct super_block *sb) +static int parse_options(char *options, struct super_block *sb, int is_remount) { struct nilfs_sb_info *sbi = NILFS_SB(sb); char *p; @@ -666,8 +666,26 @@ static int parse_options(char *options, struct super_block *sb) case Opt_snapshot: if (match_int(&args[0], &option) || option <= 0) return 0; - if (!(sb->s_flags & MS_RDONLY)) + if (is_remount) { + if (!nilfs_test_opt(sbi, SNAPSHOT)) { + printk(KERN_ERR + "NILFS: cannot change regular " + "mount to snapshot.\n"); + return 0; + } else if (option != sbi->s_snapshot_cno) { + printk(KERN_ERR + "NILFS: cannot remount to a " + "different snapshot.\n"); + return 0; + } + break; + } + if (!(sb->s_flags & MS_RDONLY)) { + printk(KERN_ERR "NILFS: cannot mount snapshot " + "read/write. A read-only option is " + "required.\n"); return 0; + } sbi->s_snapshot_cno = option; nilfs_set_opt(sbi, SNAPSHOT); break; @@ -767,7 +785,7 @@ int nilfs_store_magic_and_option(struct super_block *sb, sbi->s_interval = le32_to_cpu(sbp->s_c_interval); sbi->s_watermark = le32_to_cpu(sbp->s_c_block_max); - return !parse_options(data, sb) ? -EINVAL : 0 ; + return !parse_options(data, sb, 0) ? -EINVAL : 0 ; } /** @@ -929,32 +947,17 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data) old_opts.snapshot_cno = sbi->s_snapshot_cno; was_snapshot = nilfs_test_opt(sbi, SNAPSHOT); - if (!parse_options(data, sb)) { + if (!parse_options(data, sb, 1)) { err = -EINVAL; goto restore_opts; } sb->s_flags = (sb->s_flags & ~MS_POSIXACL); err = -EINVAL; - if (was_snapshot) { - if (!(*flags & MS_RDONLY)) { - printk(KERN_ERR "NILFS (device %s): cannot remount " - "snapshot read/write.\n", - sb->s_id); - goto restore_opts; - } else if (sbi->s_snapshot_cno != old_opts.snapshot_cno) { - printk(KERN_ERR "NILFS (device %s): cannot " - "remount to a different snapshot.\n", - sb->s_id); - goto restore_opts; - } - } else { - if (nilfs_test_opt(sbi, SNAPSHOT)) { - printk(KERN_ERR "NILFS (device %s): cannot change " - "a regular mount to a snapshot.\n", - sb->s_id); - goto restore_opts; - } + if (was_snapshot && !(*flags & MS_RDONLY)) { + printk(KERN_ERR "NILFS (device %s): cannot remount snapshot " + "read/write.\n", sb->s_id); + goto restore_opts; } if (!nilfs_valid_fs(nilfs)) { -- cgit v1.1 From cfa913a5077f7619869b2b4d1bf23ccb4f8b3d7b Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Wed, 7 Jul 2010 17:19:54 +0900 Subject: nilfs2: add sanity check in nilfs_btree_add_dirty_buffer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit According to the report titled "problem with nilfs_cleanerd" from Łukasz Wójcicki, nilfs_btree_lookup_dirty_buffers or nilfs_btree_add_dirty_buffer got memory violation during garbage collection. This could happen if a level field of given btree node buffer is incorrect, which is a crucial internal bug. This inserts a sanity check to figure out the problem. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/btree.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index b27a342..3863567 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -1920,6 +1920,18 @@ static void nilfs_btree_add_dirty_buffer(struct nilfs_btree *btree, node = (struct nilfs_btree_node *)bh->b_data; key = nilfs_btree_node_get_key(node, 0); level = nilfs_btree_node_get_level(node); + if (level < NILFS_BTREE_LEVEL_NODE_MIN || + level >= NILFS_BTREE_LEVEL_MAX) { + dump_stack(); + printk(KERN_WARNING + "%s: invalid btree level: %d (key=%llu, ino=%lu, " + "blocknr=%llu)\n", + __func__, level, (unsigned long long)key, + NILFS_BMAP_I(&btree->bt_bmap)->vfs_inode.i_ino, + (unsigned long long)bh->b_blocknr); + return; + } + list_for_each(head, &lists[level]) { cbh = list_entry(head, struct buffer_head, b_assoc_buffers); cnode = (struct nilfs_btree_node *)cbh->b_data; -- cgit v1.1 From 1d5385b9f30ae43209459db424416a3e1d8f2bde Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Fri, 16 Jul 2010 23:52:40 +0900 Subject: nilfs2: verify btree node after reading This inserts sanity checks soon after read btree node from disk. This allows early detection of broken btree nodes, and helps to narrow down problems due to file system corruption. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/btree.c | 50 +++++++++++++++++++++++++++++++++++++++++++++++--- fs/nilfs2/btree.h | 2 ++ fs/nilfs2/gcinode.c | 9 +++++++-- 3 files changed, 56 insertions(+), 5 deletions(-) diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index 3863567..6c9ec56 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -71,17 +71,24 @@ static int nilfs_btree_get_block(const struct nilfs_btree *btree, __u64 ptr, { struct address_space *btnc = &NILFS_BMAP_I((struct nilfs_bmap *)btree)->i_btnode_cache; + struct buffer_head *bh; int err; err = nilfs_btnode_submit_block(btnc, ptr, 0, bhp); if (err) return err == -EEXIST ? 0 : err; - wait_on_buffer(*bhp); - if (!buffer_uptodate(*bhp)) { - brelse(*bhp); + bh = *bhp; + wait_on_buffer(bh); + if (!buffer_uptodate(bh)) { + brelse(bh); return -EIO; } + if (nilfs_btree_broken_node_block(bh)) { + clear_buffer_uptodate(bh); + brelse(bh); + return -EINVAL; + } return 0; } @@ -382,6 +389,43 @@ static int nilfs_btree_node_lookup(const struct nilfs_btree_node *node, return s == 0; } +/** + * nilfs_btree_node_broken - verify consistency of btree node + * @node: btree node block to be examined + * @size: node size (in bytes) + * @blocknr: block number + * + * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned. + */ +static int nilfs_btree_node_broken(const struct nilfs_btree_node *node, + size_t size, sector_t blocknr) +{ + int level, flags, nchildren; + int ret = 0; + + level = nilfs_btree_node_get_level(node); + flags = nilfs_btree_node_get_flags(node); + nchildren = nilfs_btree_node_get_nchildren(node); + + if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN || + level >= NILFS_BTREE_LEVEL_MAX || + (flags & NILFS_BTREE_NODE_ROOT) || + nchildren < 0 || + nchildren > NILFS_BTREE_NODE_NCHILDREN_MAX(size))) { + printk(KERN_CRIT "NILFS: bad btree node (blocknr=%llu): " + "level = %d, flags = 0x%x, nchildren = %d\n", + (unsigned long long)blocknr, level, flags, nchildren); + ret = 1; + } + return ret; +} + +int nilfs_btree_broken_node_block(struct buffer_head *bh) +{ + return nilfs_btree_node_broken((struct nilfs_btree_node *)bh->b_data, + bh->b_size, bh->b_blocknr); +} + static inline struct nilfs_btree_node * nilfs_btree_get_root(const struct nilfs_btree *btree) { diff --git a/fs/nilfs2/btree.h b/fs/nilfs2/btree.h index 43c8c5b..980e1e8 100644 --- a/fs/nilfs2/btree.h +++ b/fs/nilfs2/btree.h @@ -80,4 +80,6 @@ int nilfs_btree_convert_and_insert(struct nilfs_bmap *, __u64, __u64, const __u64 *, const __u64 *, int); void nilfs_btree_init_gc(struct nilfs_bmap *); +int nilfs_btree_broken_node_block(struct buffer_head *bh); + #endif /* _NILFS_BTREE_H */ diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c index 145f03c..edb53fc 100644 --- a/fs/nilfs2/gcinode.c +++ b/fs/nilfs2/gcinode.c @@ -164,10 +164,15 @@ int nilfs_gccache_wait_and_mark_dirty(struct buffer_head *bh) if (buffer_dirty(bh)) return -EEXIST; - if (buffer_nilfs_node(bh)) + if (buffer_nilfs_node(bh)) { + if (nilfs_btree_broken_node_block(bh)) { + clear_buffer_uptodate(bh); + return -EIO; + } nilfs_btnode_mark_dirty(bh); - else + } else { nilfs_mdt_mark_buffer_dirty(bh); + } return 0; } -- cgit v1.1 From 25b8d7ded0e4579bf152882249abfd351e65a17d Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Sat, 10 Jul 2010 16:50:41 +0900 Subject: nilfs2: get rid of private conversion macros on bmap key and pointer Will remove nilfs_bmap_key_to_dkey(), nilfs_bmap_dkey_to_key(), nilfs_bmap_ptr_to_dptr(), and nilfs_bmap_dptr_to_ptr() for simplicity. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/bmap.h | 5 ----- fs/nilfs2/btree.c | 34 ++++++++++++++++------------------ fs/nilfs2/direct.c | 12 ++++++------ 3 files changed, 22 insertions(+), 29 deletions(-) diff --git a/fs/nilfs2/bmap.h b/fs/nilfs2/bmap.h index 9980d7d..de88ddf 100644 --- a/fs/nilfs2/bmap.h +++ b/fs/nilfs2/bmap.h @@ -32,11 +32,6 @@ #define NILFS_BMAP_INVALID_PTR 0 -#define nilfs_bmap_dkey_to_key(dkey) le64_to_cpu(dkey) -#define nilfs_bmap_key_to_dkey(key) cpu_to_le64(key) -#define nilfs_bmap_dptr_to_ptr(dptr) le64_to_cpu(dptr) -#define nilfs_bmap_ptr_to_dptr(ptr) cpu_to_le64(ptr) - #define nilfs_bmap_keydiff_abs(diff) ((diff) < 0 ? -(diff) : (diff)) diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index 6c9ec56..b2347f7 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -191,29 +191,27 @@ nilfs_btree_node_dptrs(const struct nilfs_btree_node *node, static inline __u64 nilfs_btree_node_get_key(const struct nilfs_btree_node *node, int index) { - return nilfs_bmap_dkey_to_key(*(nilfs_btree_node_dkeys(node) + index)); + return le64_to_cpu(*(nilfs_btree_node_dkeys(node) + index)); } static inline void nilfs_btree_node_set_key(struct nilfs_btree_node *node, int index, __u64 key) { - *(nilfs_btree_node_dkeys(node) + index) = nilfs_bmap_key_to_dkey(key); + *(nilfs_btree_node_dkeys(node) + index) = cpu_to_le64(key); } static inline __u64 nilfs_btree_node_get_ptr(const struct nilfs_btree *btree, const struct nilfs_btree_node *node, int index) { - return nilfs_bmap_dptr_to_ptr(*(nilfs_btree_node_dptrs(node, btree) + - index)); + return le64_to_cpu(*(nilfs_btree_node_dptrs(node, btree) + index)); } static inline void nilfs_btree_node_set_ptr(struct nilfs_btree *btree, struct nilfs_btree_node *node, int index, __u64 ptr) { - *(nilfs_btree_node_dptrs(node, btree) + index) = - nilfs_bmap_ptr_to_dptr(ptr); + *(nilfs_btree_node_dptrs(node, btree) + index) = cpu_to_le64(ptr); } static void nilfs_btree_node_init(struct nilfs_btree *btree, @@ -232,8 +230,8 @@ static void nilfs_btree_node_init(struct nilfs_btree *btree, dkeys = nilfs_btree_node_dkeys(node); dptrs = nilfs_btree_node_dptrs(node, btree); for (i = 0; i < nchildren; i++) { - dkeys[i] = nilfs_bmap_key_to_dkey(keys[i]); - dptrs[i] = nilfs_bmap_ptr_to_dptr(ptrs[i]); + dkeys[i] = cpu_to_le64(keys[i]); + dptrs[i] = cpu_to_le64(ptrs[i]); } } @@ -313,8 +311,8 @@ static void nilfs_btree_node_insert(struct nilfs_btree *btree, memmove(dptrs + index + 1, dptrs + index, (nchildren - index) * sizeof(*dptrs)); } - dkeys[index] = nilfs_bmap_key_to_dkey(key); - dptrs[index] = nilfs_bmap_ptr_to_dptr(ptr); + dkeys[index] = cpu_to_le64(key); + dptrs[index] = cpu_to_le64(ptr); nchildren++; nilfs_btree_node_set_nchildren(node, nchildren); } @@ -332,8 +330,8 @@ static void nilfs_btree_node_delete(struct nilfs_btree *btree, dkeys = nilfs_btree_node_dkeys(node); dptrs = nilfs_btree_node_dptrs(node, btree); - key = nilfs_bmap_dkey_to_key(dkeys[index]); - ptr = nilfs_bmap_dptr_to_ptr(dptrs[index]); + key = le64_to_cpu(dkeys[index]); + ptr = le64_to_cpu(dptrs[index]); nchildren = nilfs_btree_node_get_nchildren(node); if (keyp != NULL) *keyp = key; @@ -1569,8 +1567,8 @@ static int nilfs_btree_gather_data(struct nilfs_bmap *bmap, dkeys = nilfs_btree_node_dkeys(node); dptrs = nilfs_btree_node_dptrs(node, btree); for (i = 0; i < nitems; i++) { - keys[i] = nilfs_bmap_dkey_to_key(dkeys[i]); - ptrs[i] = nilfs_bmap_dptr_to_ptr(dptrs[i]); + keys[i] = le64_to_cpu(dkeys[i]); + ptrs[i] = le64_to_cpu(dptrs[i]); } if (bh != NULL) @@ -2059,7 +2057,7 @@ static int nilfs_btree_assign_p(struct nilfs_btree *btree, key = nilfs_btree_node_get_key(parent, path[level + 1].bp_index); /* on-disk format */ - binfo->bi_dat.bi_blkoff = nilfs_bmap_key_to_dkey(key); + binfo->bi_dat.bi_blkoff = cpu_to_le64(key); binfo->bi_dat.bi_level = level; return 0; @@ -2090,8 +2088,8 @@ static int nilfs_btree_assign_v(struct nilfs_btree *btree, key = nilfs_btree_node_get_key(parent, path[level + 1].bp_index); /* on-disk format */ - binfo->bi_v.bi_vblocknr = nilfs_bmap_ptr_to_dptr(ptr); - binfo->bi_v.bi_blkoff = nilfs_bmap_key_to_dkey(key); + binfo->bi_v.bi_vblocknr = cpu_to_le64(ptr); + binfo->bi_v.bi_blkoff = cpu_to_le64(key); return 0; } @@ -2159,7 +2157,7 @@ static int nilfs_btree_assign_gc(struct nilfs_bmap *bmap, /* on-disk format */ binfo->bi_v.bi_vblocknr = cpu_to_le64((*bh)->b_blocknr); - binfo->bi_v.bi_blkoff = nilfs_bmap_key_to_dkey(key); + binfo->bi_v.bi_blkoff = cpu_to_le64(key); return 0; } diff --git a/fs/nilfs2/direct.c b/fs/nilfs2/direct.c index 236753d..32f1746 100644 --- a/fs/nilfs2/direct.c +++ b/fs/nilfs2/direct.c @@ -36,13 +36,13 @@ static inline __le64 *nilfs_direct_dptrs(const struct nilfs_direct *direct) static inline __u64 nilfs_direct_get_ptr(const struct nilfs_direct *direct, __u64 key) { - return nilfs_bmap_dptr_to_ptr(*(nilfs_direct_dptrs(direct) + key)); + return le64_to_cpu(*(nilfs_direct_dptrs(direct) + key)); } static inline void nilfs_direct_set_ptr(struct nilfs_direct *direct, __u64 key, __u64 ptr) { - *(nilfs_direct_dptrs(direct) + key) = nilfs_bmap_ptr_to_dptr(ptr); + *(nilfs_direct_dptrs(direct) + key) = cpu_to_le64(ptr); } static int nilfs_direct_lookup(const struct nilfs_bmap *bmap, @@ -258,7 +258,7 @@ int nilfs_direct_delete_and_convert(struct nilfs_bmap *bmap, for (i = 0, j = 0; i < NILFS_DIRECT_NBLOCKS; i++) { if ((j < n) && (i == keys[j])) { dptrs[i] = (i != key) ? - nilfs_bmap_ptr_to_dptr(ptrs[j]) : + cpu_to_le64(ptrs[j]) : NILFS_BMAP_INVALID_PTR; j++; } else @@ -315,8 +315,8 @@ static int nilfs_direct_assign_v(struct nilfs_direct *direct, ret = nilfs_dat_prepare_start(dat, &req.bpr_req); if (!ret) { nilfs_dat_commit_start(dat, &req.bpr_req, blocknr); - binfo->bi_v.bi_vblocknr = nilfs_bmap_ptr_to_dptr(ptr); - binfo->bi_v.bi_blkoff = nilfs_bmap_key_to_dkey(key); + binfo->bi_v.bi_vblocknr = cpu_to_le64(ptr); + binfo->bi_v.bi_blkoff = cpu_to_le64(key); } return ret; } @@ -329,7 +329,7 @@ static int nilfs_direct_assign_p(struct nilfs_direct *direct, { nilfs_direct_set_ptr(direct, key, blocknr); - binfo->bi_dat.bi_blkoff = nilfs_bmap_key_to_dkey(key); + binfo->bi_dat.bi_blkoff = cpu_to_le64(key); binfo->bi_dat.bi_level = 0; return 0; -- cgit v1.1 From 583ada4761e18bb105ce5181b0b13cf55ead6201 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Sat, 10 Jul 2010 21:37:47 +0900 Subject: nilfs2: remove constant qualifier from argument of bmap propagate The first argument of bops->bop_propagate operation takes a constant qualifier, and causes compilation error when removed cast to pointer of nilfs_btree structure type. This fixes the issue to prepare for succesive removal of nilfs_btree struct. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/bmap.h | 2 +- fs/nilfs2/btree.c | 4 ++-- fs/nilfs2/direct.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/fs/nilfs2/bmap.h b/fs/nilfs2/bmap.h index de88ddf..379fda4 100644 --- a/fs/nilfs2/bmap.h +++ b/fs/nilfs2/bmap.h @@ -66,7 +66,7 @@ struct nilfs_bmap_operations { int (*bop_delete)(struct nilfs_bmap *, __u64); void (*bop_clear)(struct nilfs_bmap *); - int (*bop_propagate)(const struct nilfs_bmap *, struct buffer_head *); + int (*bop_propagate)(struct nilfs_bmap *, struct buffer_head *); void (*bop_lookup_dirty_buffers)(struct nilfs_bmap *, struct list_head *); diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index b2347f7..a2dc36c 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -1899,7 +1899,7 @@ static int nilfs_btree_propagate_v(struct nilfs_btree *btree, return ret; } -static int nilfs_btree_propagate(const struct nilfs_bmap *bmap, +static int nilfs_btree_propagate(struct nilfs_bmap *bmap, struct buffer_head *bh) { struct nilfs_btree *btree; @@ -1942,7 +1942,7 @@ static int nilfs_btree_propagate(const struct nilfs_bmap *bmap, return ret; } -static int nilfs_btree_propagate_gc(const struct nilfs_bmap *bmap, +static int nilfs_btree_propagate_gc(struct nilfs_bmap *bmap, struct buffer_head *bh) { return nilfs_dat_mark_dirty(nilfs_bmap_get_dat(bmap), bh->b_blocknr); diff --git a/fs/nilfs2/direct.c b/fs/nilfs2/direct.c index 32f1746..fd006ee 100644 --- a/fs/nilfs2/direct.c +++ b/fs/nilfs2/direct.c @@ -269,7 +269,7 @@ int nilfs_direct_delete_and_convert(struct nilfs_bmap *bmap, return 0; } -static int nilfs_direct_propagate(const struct nilfs_bmap *bmap, +static int nilfs_direct_propagate(struct nilfs_bmap *bmap, struct buffer_head *bh) { struct nilfs_direct *direct = (struct nilfs_direct *)bmap; -- cgit v1.1 From 10ff885ba6f56bf7480ce3b5daf38c07600ecea3 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Sat, 10 Jul 2010 18:07:04 +0900 Subject: nilfs2: get rid of nilfs_direct uses This replaces all uses of nilfs_direct struct in implementation of direct mapping with nilfs_bmap struct. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/direct.c | 78 ++++++++++++++++++++++-------------------------------- 1 file changed, 32 insertions(+), 46 deletions(-) diff --git a/fs/nilfs2/direct.c b/fs/nilfs2/direct.c index fd006ee..cfc7218 100644 --- a/fs/nilfs2/direct.c +++ b/fs/nilfs2/direct.c @@ -27,31 +27,29 @@ #include "alloc.h" #include "dat.h" -static inline __le64 *nilfs_direct_dptrs(const struct nilfs_direct *direct) +static inline __le64 *nilfs_direct_dptrs(const struct nilfs_bmap *direct) { return (__le64 *) - ((struct nilfs_direct_node *)direct->d_bmap.b_u.u_data + 1); + ((struct nilfs_direct_node *)direct->b_u.u_data + 1); } static inline __u64 -nilfs_direct_get_ptr(const struct nilfs_direct *direct, __u64 key) +nilfs_direct_get_ptr(const struct nilfs_bmap *direct, __u64 key) { return le64_to_cpu(*(nilfs_direct_dptrs(direct) + key)); } -static inline void nilfs_direct_set_ptr(struct nilfs_direct *direct, +static inline void nilfs_direct_set_ptr(struct nilfs_bmap *direct, __u64 key, __u64 ptr) { *(nilfs_direct_dptrs(direct) + key) = cpu_to_le64(ptr); } -static int nilfs_direct_lookup(const struct nilfs_bmap *bmap, +static int nilfs_direct_lookup(const struct nilfs_bmap *direct, __u64 key, int level, __u64 *ptrp) { - struct nilfs_direct *direct; __u64 ptr; - direct = (struct nilfs_direct *)bmap; /* XXX: use macro for level 1 */ if (key > NILFS_DIRECT_KEY_MAX || level != 1) return -ENOENT; ptr = nilfs_direct_get_ptr(direct, key); @@ -63,11 +61,10 @@ static int nilfs_direct_lookup(const struct nilfs_bmap *bmap, return 0; } -static int nilfs_direct_lookup_contig(const struct nilfs_bmap *bmap, +static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct, __u64 key, __u64 *ptrp, unsigned maxblocks) { - struct nilfs_direct *direct = (struct nilfs_direct *)bmap; struct inode *dat = NULL; __u64 ptr, ptr2; sector_t blocknr; @@ -79,8 +76,8 @@ static int nilfs_direct_lookup_contig(const struct nilfs_bmap *bmap, if (ptr == NILFS_BMAP_INVALID_PTR) return -ENOENT; - if (NILFS_BMAP_USE_VBN(bmap)) { - dat = nilfs_bmap_get_dat(bmap); + if (NILFS_BMAP_USE_VBN(direct)) { + dat = nilfs_bmap_get_dat(direct); ret = nilfs_dat_translate(dat, ptr, &blocknr); if (ret < 0) return ret; @@ -106,29 +103,28 @@ static int nilfs_direct_lookup_contig(const struct nilfs_bmap *bmap, } static __u64 -nilfs_direct_find_target_v(const struct nilfs_direct *direct, __u64 key) +nilfs_direct_find_target_v(const struct nilfs_bmap *direct, __u64 key) { __u64 ptr; - ptr = nilfs_bmap_find_target_seq(&direct->d_bmap, key); + ptr = nilfs_bmap_find_target_seq(direct, key); if (ptr != NILFS_BMAP_INVALID_PTR) /* sequential access */ return ptr; else /* block group */ - return nilfs_bmap_find_target_in_group(&direct->d_bmap); + return nilfs_bmap_find_target_in_group(direct); } -static void nilfs_direct_set_target_v(struct nilfs_direct *direct, +static void nilfs_direct_set_target_v(struct nilfs_bmap *direct, __u64 key, __u64 ptr) { - direct->d_bmap.b_last_allocated_key = key; - direct->d_bmap.b_last_allocated_ptr = ptr; + direct->b_last_allocated_key = key; + direct->b_last_allocated_ptr = ptr; } static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr) { - struct nilfs_direct *direct = (struct nilfs_direct *)bmap; union nilfs_bmap_ptr_req req; struct inode *dat = NULL; struct buffer_head *bh; @@ -136,11 +132,11 @@ static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr) if (key > NILFS_DIRECT_KEY_MAX) return -ENOENT; - if (nilfs_direct_get_ptr(direct, key) != NILFS_BMAP_INVALID_PTR) + if (nilfs_direct_get_ptr(bmap, key) != NILFS_BMAP_INVALID_PTR) return -EEXIST; if (NILFS_BMAP_USE_VBN(bmap)) { - req.bpr_ptr = nilfs_direct_find_target_v(direct, key); + req.bpr_ptr = nilfs_direct_find_target_v(bmap, key); dat = nilfs_bmap_get_dat(bmap); } ret = nilfs_bmap_prepare_alloc_ptr(bmap, &req, dat); @@ -150,13 +146,13 @@ static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr) set_buffer_nilfs_volatile(bh); nilfs_bmap_commit_alloc_ptr(bmap, &req, dat); - nilfs_direct_set_ptr(direct, key, req.bpr_ptr); + nilfs_direct_set_ptr(bmap, key, req.bpr_ptr); if (!nilfs_bmap_dirty(bmap)) nilfs_bmap_set_dirty(bmap); if (NILFS_BMAP_USE_VBN(bmap)) - nilfs_direct_set_target_v(direct, key, req.bpr_ptr); + nilfs_direct_set_target_v(bmap, key, req.bpr_ptr); nilfs_bmap_add_blocks(bmap, 1); } @@ -165,33 +161,30 @@ static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr) static int nilfs_direct_delete(struct nilfs_bmap *bmap, __u64 key) { - struct nilfs_direct *direct = (struct nilfs_direct *)bmap; union nilfs_bmap_ptr_req req; struct inode *dat; int ret; if (key > NILFS_DIRECT_KEY_MAX || - nilfs_direct_get_ptr(direct, key) == NILFS_BMAP_INVALID_PTR) + nilfs_direct_get_ptr(bmap, key) == NILFS_BMAP_INVALID_PTR) return -ENOENT; dat = NILFS_BMAP_USE_VBN(bmap) ? nilfs_bmap_get_dat(bmap) : NULL; - req.bpr_ptr = nilfs_direct_get_ptr(direct, key); + req.bpr_ptr = nilfs_direct_get_ptr(bmap, key); ret = nilfs_bmap_prepare_end_ptr(bmap, &req, dat); if (!ret) { nilfs_bmap_commit_end_ptr(bmap, &req, dat); - nilfs_direct_set_ptr(direct, key, NILFS_BMAP_INVALID_PTR); + nilfs_direct_set_ptr(bmap, key, NILFS_BMAP_INVALID_PTR); nilfs_bmap_sub_blocks(bmap, 1); } return ret; } -static int nilfs_direct_last_key(const struct nilfs_bmap *bmap, __u64 *keyp) +static int nilfs_direct_last_key(const struct nilfs_bmap *direct, __u64 *keyp) { - struct nilfs_direct *direct; __u64 key, lastkey; - direct = (struct nilfs_direct *)bmap; lastkey = NILFS_DIRECT_KEY_MAX + 1; for (key = NILFS_DIRECT_KEY_MIN; key <= NILFS_DIRECT_KEY_MAX; key++) if (nilfs_direct_get_ptr(direct, key) != @@ -211,15 +204,13 @@ static int nilfs_direct_check_insert(const struct nilfs_bmap *bmap, __u64 key) return key > NILFS_DIRECT_KEY_MAX; } -static int nilfs_direct_gather_data(struct nilfs_bmap *bmap, +static int nilfs_direct_gather_data(struct nilfs_bmap *direct, __u64 *keys, __u64 *ptrs, int nitems) { - struct nilfs_direct *direct; __u64 key; __u64 ptr; int n; - direct = (struct nilfs_direct *)bmap; if (nitems > NILFS_DIRECT_NBLOCKS) nitems = NILFS_DIRECT_NBLOCKS; n = 0; @@ -237,7 +228,6 @@ static int nilfs_direct_gather_data(struct nilfs_bmap *bmap, int nilfs_direct_delete_and_convert(struct nilfs_bmap *bmap, __u64 key, __u64 *keys, __u64 *ptrs, int n) { - struct nilfs_direct *direct; __le64 *dptrs; int ret, i, j; @@ -253,8 +243,7 @@ int nilfs_direct_delete_and_convert(struct nilfs_bmap *bmap, bmap->b_ops->bop_clear(bmap); /* convert */ - direct = (struct nilfs_direct *)bmap; - dptrs = nilfs_direct_dptrs(direct); + dptrs = nilfs_direct_dptrs(bmap); for (i = 0, j = 0; i < NILFS_DIRECT_NBLOCKS; i++) { if ((j < n) && (i == keys[j])) { dptrs[i] = (i != key) ? @@ -272,7 +261,6 @@ int nilfs_direct_delete_and_convert(struct nilfs_bmap *bmap, static int nilfs_direct_propagate(struct nilfs_bmap *bmap, struct buffer_head *bh) { - struct nilfs_direct *direct = (struct nilfs_direct *)bmap; struct nilfs_palloc_req oldreq, newreq; struct inode *dat; __u64 key; @@ -284,7 +272,7 @@ static int nilfs_direct_propagate(struct nilfs_bmap *bmap, dat = nilfs_bmap_get_dat(bmap); key = nilfs_bmap_data_get_key(bmap, bh); - ptr = nilfs_direct_get_ptr(direct, key); + ptr = nilfs_direct_get_ptr(bmap, key); if (!buffer_nilfs_volatile(bh)) { oldreq.pr_entry_nr = ptr; newreq.pr_entry_nr = ptr; @@ -294,20 +282,20 @@ static int nilfs_direct_propagate(struct nilfs_bmap *bmap, nilfs_dat_commit_update(dat, &oldreq, &newreq, bmap->b_ptr_type == NILFS_BMAP_PTR_VS); set_buffer_nilfs_volatile(bh); - nilfs_direct_set_ptr(direct, key, newreq.pr_entry_nr); + nilfs_direct_set_ptr(bmap, key, newreq.pr_entry_nr); } else ret = nilfs_dat_mark_dirty(dat, ptr); return ret; } -static int nilfs_direct_assign_v(struct nilfs_direct *direct, +static int nilfs_direct_assign_v(struct nilfs_bmap *direct, __u64 key, __u64 ptr, struct buffer_head **bh, sector_t blocknr, union nilfs_binfo *binfo) { - struct inode *dat = nilfs_bmap_get_dat(&direct->d_bmap); + struct inode *dat = nilfs_bmap_get_dat(direct); union nilfs_bmap_ptr_req req; int ret; @@ -321,7 +309,7 @@ static int nilfs_direct_assign_v(struct nilfs_direct *direct, return ret; } -static int nilfs_direct_assign_p(struct nilfs_direct *direct, +static int nilfs_direct_assign_p(struct nilfs_bmap *direct, __u64 key, __u64 ptr, struct buffer_head **bh, sector_t blocknr, @@ -340,18 +328,16 @@ static int nilfs_direct_assign(struct nilfs_bmap *bmap, sector_t blocknr, union nilfs_binfo *binfo) { - struct nilfs_direct *direct; __u64 key; __u64 ptr; - direct = (struct nilfs_direct *)bmap; key = nilfs_bmap_data_get_key(bmap, *bh); if (unlikely(key > NILFS_DIRECT_KEY_MAX)) { printk(KERN_CRIT "%s: invalid key: %llu\n", __func__, (unsigned long long)key); return -EINVAL; } - ptr = nilfs_direct_get_ptr(direct, key); + ptr = nilfs_direct_get_ptr(bmap, key); if (unlikely(ptr == NILFS_BMAP_INVALID_PTR)) { printk(KERN_CRIT "%s: invalid pointer: %llu\n", __func__, (unsigned long long)ptr); @@ -359,8 +345,8 @@ static int nilfs_direct_assign(struct nilfs_bmap *bmap, } return NILFS_BMAP_USE_VBN(bmap) ? - nilfs_direct_assign_v(direct, key, ptr, bh, blocknr, binfo) : - nilfs_direct_assign_p(direct, key, ptr, bh, blocknr, binfo); + nilfs_direct_assign_v(bmap, key, ptr, bh, blocknr, binfo) : + nilfs_direct_assign_p(bmap, key, ptr, bh, blocknr, binfo); } static const struct nilfs_bmap_operations nilfs_direct_ops = { -- cgit v1.1 From e7c274f8083793f8f861def63c02a0839b34d26d Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Sat, 10 Jul 2010 19:09:49 +0900 Subject: nilfs2: get rid of nilfs_btree uses This replaces all uses of nilfs_btree struct in implementation of btree mapping with nilfs_bmap struct. Name of local variable "btree" is kept not to bloat amount of change. And, a part of local variables "bmap" is renamed to "btree" to uniform naming rule. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/btree.c | 316 ++++++++++++++++++++++++------------------------------ fs/nilfs2/btree.h | 2 +- 2 files changed, 141 insertions(+), 177 deletions(-) diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index a2dc36c..81e8716 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -66,11 +66,10 @@ static void nilfs_btree_free_path(struct nilfs_btree_path *path) /* * B-tree node operations */ -static int nilfs_btree_get_block(const struct nilfs_btree *btree, __u64 ptr, +static int nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr, struct buffer_head **bhp) { - struct address_space *btnc = - &NILFS_BMAP_I((struct nilfs_bmap *)btree)->i_btnode_cache; + struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache; struct buffer_head *bh; int err; @@ -92,11 +91,10 @@ static int nilfs_btree_get_block(const struct nilfs_btree *btree, __u64 ptr, return 0; } -static int nilfs_btree_get_new_block(const struct nilfs_btree *btree, +static int nilfs_btree_get_new_block(const struct nilfs_bmap *btree, __u64 ptr, struct buffer_head **bhp) { - struct address_space *btnc = - &NILFS_BMAP_I((struct nilfs_bmap *)btree)->i_btnode_cache; + struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache; struct buffer_head *bh; bh = nilfs_btnode_create_block(btnc, ptr); @@ -149,14 +147,14 @@ nilfs_btree_node_set_nchildren(struct nilfs_btree_node *node, int nchildren) node->bn_nchildren = cpu_to_le16(nchildren); } -static inline int nilfs_btree_node_size(const struct nilfs_btree *btree) +static inline int nilfs_btree_node_size(const struct nilfs_bmap *btree) { - return 1 << btree->bt_bmap.b_inode->i_blkbits; + return 1 << btree->b_inode->i_blkbits; } static inline int nilfs_btree_node_nchildren_min(const struct nilfs_btree_node *node, - const struct nilfs_btree *btree) + const struct nilfs_bmap *btree) { return nilfs_btree_node_root(node) ? NILFS_BTREE_ROOT_NCHILDREN_MIN : @@ -165,7 +163,7 @@ nilfs_btree_node_nchildren_min(const struct nilfs_btree_node *node, static inline int nilfs_btree_node_nchildren_max(const struct nilfs_btree_node *node, - const struct nilfs_btree *btree) + const struct nilfs_bmap *btree) { return nilfs_btree_node_root(node) ? NILFS_BTREE_ROOT_NCHILDREN_MAX : @@ -182,7 +180,7 @@ nilfs_btree_node_dkeys(const struct nilfs_btree_node *node) static inline __le64 * nilfs_btree_node_dptrs(const struct nilfs_btree_node *node, - const struct nilfs_btree *btree) + const struct nilfs_bmap *btree) { return (__le64 *)(nilfs_btree_node_dkeys(node) + nilfs_btree_node_nchildren_max(node, btree)); @@ -201,20 +199,20 @@ nilfs_btree_node_set_key(struct nilfs_btree_node *node, int index, __u64 key) } static inline __u64 -nilfs_btree_node_get_ptr(const struct nilfs_btree *btree, +nilfs_btree_node_get_ptr(const struct nilfs_bmap *btree, const struct nilfs_btree_node *node, int index) { return le64_to_cpu(*(nilfs_btree_node_dptrs(node, btree) + index)); } static inline void -nilfs_btree_node_set_ptr(struct nilfs_btree *btree, +nilfs_btree_node_set_ptr(struct nilfs_bmap *btree, struct nilfs_btree_node *node, int index, __u64 ptr) { *(nilfs_btree_node_dptrs(node, btree) + index) = cpu_to_le64(ptr); } -static void nilfs_btree_node_init(struct nilfs_btree *btree, +static void nilfs_btree_node_init(struct nilfs_bmap *btree, struct nilfs_btree_node *node, int flags, int level, int nchildren, const __u64 *keys, const __u64 *ptrs) @@ -236,7 +234,7 @@ static void nilfs_btree_node_init(struct nilfs_btree *btree, } /* Assume the buffer heads corresponding to left and right are locked. */ -static void nilfs_btree_node_move_left(struct nilfs_btree *btree, +static void nilfs_btree_node_move_left(struct nilfs_bmap *btree, struct nilfs_btree_node *left, struct nilfs_btree_node *right, int n) @@ -265,7 +263,7 @@ static void nilfs_btree_node_move_left(struct nilfs_btree *btree, } /* Assume that the buffer heads corresponding to left and right are locked. */ -static void nilfs_btree_node_move_right(struct nilfs_btree *btree, +static void nilfs_btree_node_move_right(struct nilfs_bmap *btree, struct nilfs_btree_node *left, struct nilfs_btree_node *right, int n) @@ -294,7 +292,7 @@ static void nilfs_btree_node_move_right(struct nilfs_btree *btree, } /* Assume that the buffer head corresponding to node is locked. */ -static void nilfs_btree_node_insert(struct nilfs_btree *btree, +static void nilfs_btree_node_insert(struct nilfs_bmap *btree, struct nilfs_btree_node *node, __u64 key, __u64 ptr, int index) { @@ -318,7 +316,7 @@ static void nilfs_btree_node_insert(struct nilfs_btree *btree, } /* Assume that the buffer head corresponding to node is locked. */ -static void nilfs_btree_node_delete(struct nilfs_btree *btree, +static void nilfs_btree_node_delete(struct nilfs_bmap *btree, struct nilfs_btree_node *node, __u64 *keyp, __u64 *ptrp, int index) { @@ -425,9 +423,9 @@ int nilfs_btree_broken_node_block(struct buffer_head *bh) } static inline struct nilfs_btree_node * -nilfs_btree_get_root(const struct nilfs_btree *btree) +nilfs_btree_get_root(const struct nilfs_bmap *btree) { - return (struct nilfs_btree_node *)btree->bt_bmap.b_u.u_data; + return (struct nilfs_btree_node *)btree->b_u.u_data; } static inline struct nilfs_btree_node * @@ -442,13 +440,13 @@ nilfs_btree_get_sib_node(const struct nilfs_btree_path *path, int level) return (struct nilfs_btree_node *)path[level].bp_sib_bh->b_data; } -static inline int nilfs_btree_height(const struct nilfs_btree *btree) +static inline int nilfs_btree_height(const struct nilfs_bmap *btree) { return nilfs_btree_node_get_level(nilfs_btree_get_root(btree)) + 1; } static inline struct nilfs_btree_node * -nilfs_btree_get_node(const struct nilfs_btree *btree, +nilfs_btree_get_node(const struct nilfs_bmap *btree, const struct nilfs_btree_path *path, int level) { @@ -469,7 +467,7 @@ nilfs_btree_bad_node(struct nilfs_btree_node *node, int level) return 0; } -static int nilfs_btree_do_lookup(const struct nilfs_btree *btree, +static int nilfs_btree_do_lookup(const struct nilfs_bmap *btree, struct nilfs_btree_path *path, __u64 key, __u64 *ptrp, int minlevel) { @@ -516,7 +514,7 @@ static int nilfs_btree_do_lookup(const struct nilfs_btree *btree, return 0; } -static int nilfs_btree_do_lookup_last(const struct nilfs_btree *btree, +static int nilfs_btree_do_lookup_last(const struct nilfs_bmap *btree, struct nilfs_btree_path *path, __u64 *keyp, __u64 *ptrp) { @@ -553,15 +551,13 @@ static int nilfs_btree_do_lookup_last(const struct nilfs_btree *btree, return 0; } -static int nilfs_btree_lookup(const struct nilfs_bmap *bmap, +static int nilfs_btree_lookup(const struct nilfs_bmap *btree, __u64 key, int level, __u64 *ptrp) { - struct nilfs_btree *btree; struct nilfs_btree_path *path; __u64 ptr; int ret; - btree = (struct nilfs_btree *)bmap; path = nilfs_btree_alloc_path(); if (path == NULL) return -ENOMEM; @@ -576,10 +572,9 @@ static int nilfs_btree_lookup(const struct nilfs_bmap *bmap, return ret; } -static int nilfs_btree_lookup_contig(const struct nilfs_bmap *bmap, +static int nilfs_btree_lookup_contig(const struct nilfs_bmap *btree, __u64 key, __u64 *ptrp, unsigned maxblocks) { - struct nilfs_btree *btree = (struct nilfs_btree *)bmap; struct nilfs_btree_path *path; struct nilfs_btree_node *node; struct inode *dat = NULL; @@ -596,8 +591,8 @@ static int nilfs_btree_lookup_contig(const struct nilfs_bmap *bmap, if (ret < 0) goto out; - if (NILFS_BMAP_USE_VBN(bmap)) { - dat = nilfs_bmap_get_dat(bmap); + if (NILFS_BMAP_USE_VBN(btree)) { + dat = nilfs_bmap_get_dat(btree); ret = nilfs_dat_translate(dat, ptr, &blocknr); if (ret < 0) goto out; @@ -656,7 +651,7 @@ static int nilfs_btree_lookup_contig(const struct nilfs_bmap *bmap, return ret; } -static void nilfs_btree_promote_key(struct nilfs_btree *btree, +static void nilfs_btree_promote_key(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 key) { @@ -678,7 +673,7 @@ static void nilfs_btree_promote_key(struct nilfs_btree *btree, } } -static void nilfs_btree_do_insert(struct nilfs_btree *btree, +static void nilfs_btree_do_insert(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) { @@ -702,7 +697,7 @@ static void nilfs_btree_do_insert(struct nilfs_btree *btree, } } -static void nilfs_btree_carry_left(struct nilfs_btree *btree, +static void nilfs_btree_carry_left(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) { @@ -747,7 +742,7 @@ static void nilfs_btree_carry_left(struct nilfs_btree *btree, nilfs_btree_do_insert(btree, path, level, keyp, ptrp); } -static void nilfs_btree_carry_right(struct nilfs_btree *btree, +static void nilfs_btree_carry_right(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) { @@ -793,7 +788,7 @@ static void nilfs_btree_carry_right(struct nilfs_btree *btree, nilfs_btree_do_insert(btree, path, level, keyp, ptrp); } -static void nilfs_btree_split(struct nilfs_btree *btree, +static void nilfs_btree_split(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) { @@ -847,7 +842,7 @@ static void nilfs_btree_split(struct nilfs_btree *btree, path[level + 1].bp_index++; } -static void nilfs_btree_grow(struct nilfs_btree *btree, +static void nilfs_btree_grow(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) { @@ -874,7 +869,7 @@ static void nilfs_btree_grow(struct nilfs_btree *btree, *ptrp = path[level].bp_newreq.bpr_ptr; } -static __u64 nilfs_btree_find_near(const struct nilfs_btree *btree, +static __u64 nilfs_btree_find_near(const struct nilfs_bmap *btree, const struct nilfs_btree_path *path) { struct nilfs_btree_node *node; @@ -902,13 +897,13 @@ static __u64 nilfs_btree_find_near(const struct nilfs_btree *btree, return NILFS_BMAP_INVALID_PTR; } -static __u64 nilfs_btree_find_target_v(const struct nilfs_btree *btree, +static __u64 nilfs_btree_find_target_v(const struct nilfs_bmap *btree, const struct nilfs_btree_path *path, __u64 key) { __u64 ptr; - ptr = nilfs_bmap_find_target_seq(&btree->bt_bmap, key); + ptr = nilfs_bmap_find_target_seq(btree, key); if (ptr != NILFS_BMAP_INVALID_PTR) /* sequential access */ return ptr; @@ -919,17 +914,17 @@ static __u64 nilfs_btree_find_target_v(const struct nilfs_btree *btree, return ptr; } /* block group */ - return nilfs_bmap_find_target_in_group(&btree->bt_bmap); + return nilfs_bmap_find_target_in_group(btree); } -static void nilfs_btree_set_target_v(struct nilfs_btree *btree, __u64 key, +static void nilfs_btree_set_target_v(struct nilfs_bmap *btree, __u64 key, __u64 ptr) { - btree->bt_bmap.b_last_allocated_key = key; - btree->bt_bmap.b_last_allocated_ptr = ptr; + btree->b_last_allocated_key = key; + btree->b_last_allocated_ptr = ptr; } -static int nilfs_btree_prepare_insert(struct nilfs_btree *btree, +static int nilfs_btree_prepare_insert(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int *levelp, __u64 key, __u64 ptr, struct nilfs_bmap_stats *stats) @@ -944,14 +939,13 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree, level = NILFS_BTREE_LEVEL_DATA; /* allocate a new ptr for data block */ - if (NILFS_BMAP_USE_VBN(&btree->bt_bmap)) { + if (NILFS_BMAP_USE_VBN(btree)) { path[level].bp_newreq.bpr_ptr = nilfs_btree_find_target_v(btree, path, key); - dat = nilfs_bmap_get_dat(&btree->bt_bmap); + dat = nilfs_bmap_get_dat(btree); } - ret = nilfs_bmap_prepare_alloc_ptr(&btree->bt_bmap, - &path[level].bp_newreq, dat); + ret = nilfs_bmap_prepare_alloc_ptr(btree, &path[level].bp_newreq, dat); if (ret < 0) goto err_out_data; @@ -1009,7 +1003,7 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree, /* split */ path[level].bp_newreq.bpr_ptr = path[level - 1].bp_newreq.bpr_ptr + 1; - ret = nilfs_bmap_prepare_alloc_ptr(&btree->bt_bmap, + ret = nilfs_bmap_prepare_alloc_ptr(btree, &path[level].bp_newreq, dat); if (ret < 0) goto err_out_child_node; @@ -1039,8 +1033,7 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree, /* grow */ path[level].bp_newreq.bpr_ptr = path[level - 1].bp_newreq.bpr_ptr + 1; - ret = nilfs_bmap_prepare_alloc_ptr(&btree->bt_bmap, - &path[level].bp_newreq, dat); + ret = nilfs_bmap_prepare_alloc_ptr(btree, &path[level].bp_newreq, dat); if (ret < 0) goto err_out_child_node; ret = nilfs_btree_get_new_block(btree, path[level].bp_newreq.bpr_ptr, @@ -1066,25 +1059,22 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree, /* error */ err_out_curr_node: - nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap, &path[level].bp_newreq, - dat); + nilfs_bmap_abort_alloc_ptr(btree, &path[level].bp_newreq, dat); err_out_child_node: for (level--; level > NILFS_BTREE_LEVEL_DATA; level--) { nilfs_btnode_delete(path[level].bp_sib_bh); - nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap, - &path[level].bp_newreq, dat); + nilfs_bmap_abort_alloc_ptr(btree, &path[level].bp_newreq, dat); } - nilfs_bmap_abort_alloc_ptr(&btree->bt_bmap, &path[level].bp_newreq, - dat); + nilfs_bmap_abort_alloc_ptr(btree, &path[level].bp_newreq, dat); err_out_data: *levelp = level; stats->bs_nblocks = 0; return ret; } -static void nilfs_btree_commit_insert(struct nilfs_btree *btree, +static void nilfs_btree_commit_insert(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int maxlevel, __u64 key, __u64 ptr) { @@ -1093,29 +1083,27 @@ static void nilfs_btree_commit_insert(struct nilfs_btree *btree, set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr)); ptr = path[NILFS_BTREE_LEVEL_DATA].bp_newreq.bpr_ptr; - if (NILFS_BMAP_USE_VBN(&btree->bt_bmap)) { + if (NILFS_BMAP_USE_VBN(btree)) { nilfs_btree_set_target_v(btree, key, ptr); - dat = nilfs_bmap_get_dat(&btree->bt_bmap); + dat = nilfs_bmap_get_dat(btree); } for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) { - nilfs_bmap_commit_alloc_ptr(&btree->bt_bmap, + nilfs_bmap_commit_alloc_ptr(btree, &path[level - 1].bp_newreq, dat); path[level].bp_op(btree, path, level, &key, &ptr); } - if (!nilfs_bmap_dirty(&btree->bt_bmap)) - nilfs_bmap_set_dirty(&btree->bt_bmap); + if (!nilfs_bmap_dirty(btree)) + nilfs_bmap_set_dirty(btree); } -static int nilfs_btree_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr) +static int nilfs_btree_insert(struct nilfs_bmap *btree, __u64 key, __u64 ptr) { - struct nilfs_btree *btree; struct nilfs_btree_path *path; struct nilfs_bmap_stats stats; int level, ret; - btree = (struct nilfs_btree *)bmap; path = nilfs_btree_alloc_path(); if (path == NULL) return -ENOMEM; @@ -1132,14 +1120,14 @@ static int nilfs_btree_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr) if (ret < 0) goto out; nilfs_btree_commit_insert(btree, path, level, key, ptr); - nilfs_bmap_add_blocks(bmap, stats.bs_nblocks); + nilfs_bmap_add_blocks(btree, stats.bs_nblocks); out: nilfs_btree_free_path(path); return ret; } -static void nilfs_btree_do_delete(struct nilfs_btree *btree, +static void nilfs_btree_do_delete(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) { @@ -1161,7 +1149,7 @@ static void nilfs_btree_do_delete(struct nilfs_btree *btree, } } -static void nilfs_btree_borrow_left(struct nilfs_btree *btree, +static void nilfs_btree_borrow_left(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) { @@ -1192,7 +1180,7 @@ static void nilfs_btree_borrow_left(struct nilfs_btree *btree, path[level].bp_index += n; } -static void nilfs_btree_borrow_right(struct nilfs_btree *btree, +static void nilfs_btree_borrow_right(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) { @@ -1224,7 +1212,7 @@ static void nilfs_btree_borrow_right(struct nilfs_btree *btree, path[level].bp_sib_bh = NULL; } -static void nilfs_btree_concat_left(struct nilfs_btree *btree, +static void nilfs_btree_concat_left(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) { @@ -1249,7 +1237,7 @@ static void nilfs_btree_concat_left(struct nilfs_btree *btree, path[level].bp_index += nilfs_btree_node_get_nchildren(left); } -static void nilfs_btree_concat_right(struct nilfs_btree *btree, +static void nilfs_btree_concat_right(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) { @@ -1273,7 +1261,7 @@ static void nilfs_btree_concat_right(struct nilfs_btree *btree, path[level + 1].bp_index++; } -static void nilfs_btree_shrink(struct nilfs_btree *btree, +static void nilfs_btree_shrink(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, __u64 *keyp, __u64 *ptrp) { @@ -1295,7 +1283,7 @@ static void nilfs_btree_shrink(struct nilfs_btree *btree, } -static int nilfs_btree_prepare_delete(struct nilfs_btree *btree, +static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int *levelp, struct nilfs_bmap_stats *stats, @@ -1315,7 +1303,7 @@ static int nilfs_btree_prepare_delete(struct nilfs_btree *btree, path[level].bp_oldreq.bpr_ptr = nilfs_btree_node_get_ptr(btree, node, path[level].bp_index); - ret = nilfs_bmap_prepare_end_ptr(&btree->bt_bmap, + ret = nilfs_bmap_prepare_end_ptr(btree, &path[level].bp_oldreq, dat); if (ret < 0) goto err_out_child_node; @@ -1393,8 +1381,7 @@ static int nilfs_btree_prepare_delete(struct nilfs_btree *btree, path[level].bp_oldreq.bpr_ptr = nilfs_btree_node_get_ptr(btree, node, path[level].bp_index); - ret = nilfs_bmap_prepare_end_ptr(&btree->bt_bmap, - &path[level].bp_oldreq, dat); + ret = nilfs_bmap_prepare_end_ptr(btree, &path[level].bp_oldreq, dat); if (ret < 0) goto err_out_child_node; @@ -1409,44 +1396,40 @@ static int nilfs_btree_prepare_delete(struct nilfs_btree *btree, /* error */ err_out_curr_node: - nilfs_bmap_abort_end_ptr(&btree->bt_bmap, &path[level].bp_oldreq, dat); + nilfs_bmap_abort_end_ptr(btree, &path[level].bp_oldreq, dat); err_out_child_node: for (level--; level >= NILFS_BTREE_LEVEL_NODE_MIN; level--) { brelse(path[level].bp_sib_bh); - nilfs_bmap_abort_end_ptr(&btree->bt_bmap, - &path[level].bp_oldreq, dat); + nilfs_bmap_abort_end_ptr(btree, &path[level].bp_oldreq, dat); } *levelp = level; stats->bs_nblocks = 0; return ret; } -static void nilfs_btree_commit_delete(struct nilfs_btree *btree, +static void nilfs_btree_commit_delete(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int maxlevel, struct inode *dat) { int level; for (level = NILFS_BTREE_LEVEL_NODE_MIN; level <= maxlevel; level++) { - nilfs_bmap_commit_end_ptr(&btree->bt_bmap, - &path[level].bp_oldreq, dat); + nilfs_bmap_commit_end_ptr(btree, &path[level].bp_oldreq, dat); path[level].bp_op(btree, path, level, NULL, NULL); } - if (!nilfs_bmap_dirty(&btree->bt_bmap)) - nilfs_bmap_set_dirty(&btree->bt_bmap); + if (!nilfs_bmap_dirty(btree)) + nilfs_bmap_set_dirty(btree); } -static int nilfs_btree_delete(struct nilfs_bmap *bmap, __u64 key) +static int nilfs_btree_delete(struct nilfs_bmap *btree, __u64 key) { - struct nilfs_btree *btree; struct nilfs_btree_path *path; struct nilfs_bmap_stats stats; struct inode *dat; int level, ret; - btree = (struct nilfs_btree *)bmap; path = nilfs_btree_alloc_path(); if (path == NULL) return -ENOMEM; @@ -1457,27 +1440,24 @@ static int nilfs_btree_delete(struct nilfs_bmap *bmap, __u64 key) goto out; - dat = NILFS_BMAP_USE_VBN(&btree->bt_bmap) ? - nilfs_bmap_get_dat(&btree->bt_bmap) : NULL; + dat = NILFS_BMAP_USE_VBN(btree) ? nilfs_bmap_get_dat(btree) : NULL; ret = nilfs_btree_prepare_delete(btree, path, &level, &stats, dat); if (ret < 0) goto out; nilfs_btree_commit_delete(btree, path, level, dat); - nilfs_bmap_sub_blocks(bmap, stats.bs_nblocks); + nilfs_bmap_sub_blocks(btree, stats.bs_nblocks); out: nilfs_btree_free_path(path); return ret; } -static int nilfs_btree_last_key(const struct nilfs_bmap *bmap, __u64 *keyp) +static int nilfs_btree_last_key(const struct nilfs_bmap *btree, __u64 *keyp) { - struct nilfs_btree *btree; struct nilfs_btree_path *path; int ret; - btree = (struct nilfs_btree *)bmap; path = nilfs_btree_alloc_path(); if (path == NULL) return -ENOMEM; @@ -1489,16 +1469,14 @@ static int nilfs_btree_last_key(const struct nilfs_bmap *bmap, __u64 *keyp) return ret; } -static int nilfs_btree_check_delete(struct nilfs_bmap *bmap, __u64 key) +static int nilfs_btree_check_delete(struct nilfs_bmap *btree, __u64 key) { struct buffer_head *bh; - struct nilfs_btree *btree; struct nilfs_btree_node *root, *node; __u64 maxkey, nextmaxkey; __u64 ptr; int nchildren, ret; - btree = (struct nilfs_btree *)bmap; root = nilfs_btree_get_root(btree); switch (nilfs_btree_height(btree)) { case 2: @@ -1529,18 +1507,16 @@ static int nilfs_btree_check_delete(struct nilfs_bmap *bmap, __u64 key) return (maxkey == key) && (nextmaxkey < NILFS_BMAP_LARGE_LOW); } -static int nilfs_btree_gather_data(struct nilfs_bmap *bmap, +static int nilfs_btree_gather_data(struct nilfs_bmap *btree, __u64 *keys, __u64 *ptrs, int nitems) { struct buffer_head *bh; - struct nilfs_btree *btree; struct nilfs_btree_node *node, *root; __le64 *dkeys; __le64 *dptrs; __u64 ptr; int nchildren, i, ret; - btree = (struct nilfs_btree *)bmap; root = nilfs_btree_get_root(btree); switch (nilfs_btree_height(btree)) { case 2: @@ -1578,14 +1554,13 @@ static int nilfs_btree_gather_data(struct nilfs_bmap *bmap, } static int -nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *bmap, __u64 key, +nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *btree, __u64 key, union nilfs_bmap_ptr_req *dreq, union nilfs_bmap_ptr_req *nreq, struct buffer_head **bhp, struct nilfs_bmap_stats *stats) { struct buffer_head *bh; - struct nilfs_btree *btree = (struct nilfs_btree *)bmap; struct inode *dat = NULL; int ret; @@ -1593,12 +1568,12 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *bmap, __u64 key, /* for data */ /* cannot find near ptr */ - if (NILFS_BMAP_USE_VBN(bmap)) { + if (NILFS_BMAP_USE_VBN(btree)) { dreq->bpr_ptr = nilfs_btree_find_target_v(btree, NULL, key); - dat = nilfs_bmap_get_dat(bmap); + dat = nilfs_bmap_get_dat(btree); } - ret = nilfs_bmap_prepare_alloc_ptr(bmap, dreq, dat); + ret = nilfs_bmap_prepare_alloc_ptr(btree, dreq, dat); if (ret < 0) return ret; @@ -1606,7 +1581,7 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *bmap, __u64 key, stats->bs_nblocks++; if (nreq != NULL) { nreq->bpr_ptr = dreq->bpr_ptr + 1; - ret = nilfs_bmap_prepare_alloc_ptr(bmap, nreq, dat); + ret = nilfs_bmap_prepare_alloc_ptr(btree, nreq, dat); if (ret < 0) goto err_out_dreq; @@ -1623,16 +1598,16 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *bmap, __u64 key, /* error */ err_out_nreq: - nilfs_bmap_abort_alloc_ptr(bmap, nreq, dat); + nilfs_bmap_abort_alloc_ptr(btree, nreq, dat); err_out_dreq: - nilfs_bmap_abort_alloc_ptr(bmap, dreq, dat); + nilfs_bmap_abort_alloc_ptr(btree, dreq, dat); stats->bs_nblocks = 0; return ret; } static void -nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *bmap, +nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *btree, __u64 key, __u64 ptr, const __u64 *keys, const __u64 *ptrs, int n, @@ -1640,34 +1615,32 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *bmap, union nilfs_bmap_ptr_req *nreq, struct buffer_head *bh) { - struct nilfs_btree *btree = (struct nilfs_btree *)bmap; struct nilfs_btree_node *node; struct inode *dat; __u64 tmpptr; /* free resources */ - if (bmap->b_ops->bop_clear != NULL) - bmap->b_ops->bop_clear(bmap); + if (btree->b_ops->bop_clear != NULL) + btree->b_ops->bop_clear(btree); /* ptr must be a pointer to a buffer head. */ set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr)); /* convert and insert */ - dat = NILFS_BMAP_USE_VBN(bmap) ? nilfs_bmap_get_dat(bmap) : NULL; - nilfs_btree_init(bmap); + dat = NILFS_BMAP_USE_VBN(btree) ? nilfs_bmap_get_dat(btree) : NULL; + nilfs_btree_init(btree); if (nreq != NULL) { - nilfs_bmap_commit_alloc_ptr(bmap, dreq, dat); - nilfs_bmap_commit_alloc_ptr(bmap, nreq, dat); + nilfs_bmap_commit_alloc_ptr(btree, dreq, dat); + nilfs_bmap_commit_alloc_ptr(btree, nreq, dat); /* create child node at level 1 */ node = (struct nilfs_btree_node *)bh->b_data; nilfs_btree_node_init(btree, node, 0, 1, n, keys, ptrs); - nilfs_btree_node_insert(btree, node, - key, dreq->bpr_ptr, n); + nilfs_btree_node_insert(btree, node, key, dreq->bpr_ptr, n); if (!buffer_dirty(bh)) nilfs_btnode_mark_dirty(bh); - if (!nilfs_bmap_dirty(bmap)) - nilfs_bmap_set_dirty(bmap); + if (!nilfs_bmap_dirty(btree)) + nilfs_bmap_set_dirty(btree); brelse(bh); @@ -1677,19 +1650,18 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *bmap, nilfs_btree_node_init(btree, node, NILFS_BTREE_NODE_ROOT, 2, 1, &keys[0], &tmpptr); } else { - nilfs_bmap_commit_alloc_ptr(bmap, dreq, dat); + nilfs_bmap_commit_alloc_ptr(btree, dreq, dat); /* create root node at level 1 */ node = nilfs_btree_get_root(btree); nilfs_btree_node_init(btree, node, NILFS_BTREE_NODE_ROOT, 1, n, keys, ptrs); - nilfs_btree_node_insert(btree, node, - key, dreq->bpr_ptr, n); - if (!nilfs_bmap_dirty(bmap)) - nilfs_bmap_set_dirty(bmap); + nilfs_btree_node_insert(btree, node, key, dreq->bpr_ptr, n); + if (!nilfs_bmap_dirty(btree)) + nilfs_bmap_set_dirty(btree); } - if (NILFS_BMAP_USE_VBN(bmap)) + if (NILFS_BMAP_USE_VBN(btree)) nilfs_btree_set_target_v(btree, key, dreq->bpr_ptr); } @@ -1702,7 +1674,7 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *bmap, * @ptrs: * @n: */ -int nilfs_btree_convert_and_insert(struct nilfs_bmap *bmap, +int nilfs_btree_convert_and_insert(struct nilfs_bmap *btree, __u64 key, __u64 ptr, const __u64 *keys, const __u64 *ptrs, int n) { @@ -1715,7 +1687,7 @@ int nilfs_btree_convert_and_insert(struct nilfs_bmap *bmap, di = &dreq; ni = NULL; } else if ((n + 1) <= NILFS_BTREE_NODE_NCHILDREN_MAX( - 1 << bmap->b_inode->i_blkbits)) { + 1 << btree->b_inode->i_blkbits)) { di = &dreq; ni = &nreq; } else { @@ -1724,17 +1696,17 @@ int nilfs_btree_convert_and_insert(struct nilfs_bmap *bmap, BUG(); } - ret = nilfs_btree_prepare_convert_and_insert(bmap, key, di, ni, &bh, + ret = nilfs_btree_prepare_convert_and_insert(btree, key, di, ni, &bh, &stats); if (ret < 0) return ret; - nilfs_btree_commit_convert_and_insert(bmap, key, ptr, keys, ptrs, n, + nilfs_btree_commit_convert_and_insert(btree, key, ptr, keys, ptrs, n, di, ni, bh); - nilfs_bmap_add_blocks(bmap, stats.bs_nblocks); + nilfs_bmap_add_blocks(btree, stats.bs_nblocks); return 0; } -static int nilfs_btree_propagate_p(struct nilfs_btree *btree, +static int nilfs_btree_propagate_p(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, struct buffer_head *bh) @@ -1746,7 +1718,7 @@ static int nilfs_btree_propagate_p(struct nilfs_btree *btree, return 0; } -static int nilfs_btree_prepare_update_v(struct nilfs_btree *btree, +static int nilfs_btree_prepare_update_v(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, struct inode *dat) { @@ -1768,7 +1740,7 @@ static int nilfs_btree_prepare_update_v(struct nilfs_btree *btree, path[level].bp_ctxt.newkey = path[level].bp_newreq.bpr_ptr; path[level].bp_ctxt.bh = path[level].bp_bh; ret = nilfs_btnode_prepare_change_key( - &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache, + &NILFS_BMAP_I(btree)->i_btnode_cache, &path[level].bp_ctxt); if (ret < 0) { nilfs_dat_abort_update(dat, @@ -1781,7 +1753,7 @@ static int nilfs_btree_prepare_update_v(struct nilfs_btree *btree, return 0; } -static void nilfs_btree_commit_update_v(struct nilfs_btree *btree, +static void nilfs_btree_commit_update_v(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, struct inode *dat) { @@ -1789,11 +1761,11 @@ static void nilfs_btree_commit_update_v(struct nilfs_btree *btree, nilfs_dat_commit_update(dat, &path[level].bp_oldreq.bpr_req, &path[level].bp_newreq.bpr_req, - btree->bt_bmap.b_ptr_type == NILFS_BMAP_PTR_VS); + btree->b_ptr_type == NILFS_BMAP_PTR_VS); if (buffer_nilfs_node(path[level].bp_bh)) { nilfs_btnode_commit_change_key( - &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache, + &NILFS_BMAP_I(btree)->i_btnode_cache, &path[level].bp_ctxt); path[level].bp_bh = path[level].bp_ctxt.bh; } @@ -1804,7 +1776,7 @@ static void nilfs_btree_commit_update_v(struct nilfs_btree *btree, path[level].bp_newreq.bpr_ptr); } -static void nilfs_btree_abort_update_v(struct nilfs_btree *btree, +static void nilfs_btree_abort_update_v(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, struct inode *dat) { @@ -1812,11 +1784,11 @@ static void nilfs_btree_abort_update_v(struct nilfs_btree *btree, &path[level].bp_newreq.bpr_req); if (buffer_nilfs_node(path[level].bp_bh)) nilfs_btnode_abort_change_key( - &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache, + &NILFS_BMAP_I(btree)->i_btnode_cache, &path[level].bp_ctxt); } -static int nilfs_btree_prepare_propagate_v(struct nilfs_btree *btree, +static int nilfs_btree_prepare_propagate_v(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int minlevel, int *maxlevelp, struct inode *dat) @@ -1851,7 +1823,7 @@ static int nilfs_btree_prepare_propagate_v(struct nilfs_btree *btree, return ret; } -static void nilfs_btree_commit_propagate_v(struct nilfs_btree *btree, +static void nilfs_btree_commit_propagate_v(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int minlevel, int maxlevel, struct buffer_head *bh, @@ -1866,13 +1838,13 @@ static void nilfs_btree_commit_propagate_v(struct nilfs_btree *btree, nilfs_btree_commit_update_v(btree, path, level, dat); } -static int nilfs_btree_propagate_v(struct nilfs_btree *btree, +static int nilfs_btree_propagate_v(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, struct buffer_head *bh) { int maxlevel = 0, ret; struct nilfs_btree_node *parent; - struct inode *dat = nilfs_bmap_get_dat(&btree->bt_bmap); + struct inode *dat = nilfs_bmap_get_dat(btree); __u64 ptr; get_bh(bh); @@ -1899,10 +1871,9 @@ static int nilfs_btree_propagate_v(struct nilfs_btree *btree, return ret; } -static int nilfs_btree_propagate(struct nilfs_bmap *bmap, +static int nilfs_btree_propagate(struct nilfs_bmap *btree, struct buffer_head *bh) { - struct nilfs_btree *btree; struct nilfs_btree_path *path; struct nilfs_btree_node *node; __u64 key; @@ -1910,7 +1881,6 @@ static int nilfs_btree_propagate(struct nilfs_bmap *bmap, WARN_ON(!buffer_dirty(bh)); - btree = (struct nilfs_btree *)bmap; path = nilfs_btree_alloc_path(); if (path == NULL) return -ENOMEM; @@ -1920,7 +1890,7 @@ static int nilfs_btree_propagate(struct nilfs_bmap *bmap, key = nilfs_btree_node_get_key(node, 0); level = nilfs_btree_node_get_level(node); } else { - key = nilfs_bmap_data_get_key(bmap, bh); + key = nilfs_bmap_data_get_key(btree, bh); level = NILFS_BTREE_LEVEL_DATA; } @@ -1932,7 +1902,7 @@ static int nilfs_btree_propagate(struct nilfs_bmap *bmap, goto out; } - ret = NILFS_BMAP_USE_VBN(bmap) ? + ret = NILFS_BMAP_USE_VBN(btree) ? nilfs_btree_propagate_v(btree, path, level, bh) : nilfs_btree_propagate_p(btree, path, level, bh); @@ -1942,13 +1912,13 @@ static int nilfs_btree_propagate(struct nilfs_bmap *bmap, return ret; } -static int nilfs_btree_propagate_gc(struct nilfs_bmap *bmap, +static int nilfs_btree_propagate_gc(struct nilfs_bmap *btree, struct buffer_head *bh) { - return nilfs_dat_mark_dirty(nilfs_bmap_get_dat(bmap), bh->b_blocknr); + return nilfs_dat_mark_dirty(nilfs_bmap_get_dat(btree), bh->b_blocknr); } -static void nilfs_btree_add_dirty_buffer(struct nilfs_btree *btree, +static void nilfs_btree_add_dirty_buffer(struct nilfs_bmap *btree, struct list_head *lists, struct buffer_head *bh) { @@ -1969,7 +1939,7 @@ static void nilfs_btree_add_dirty_buffer(struct nilfs_btree *btree, "%s: invalid btree level: %d (key=%llu, ino=%lu, " "blocknr=%llu)\n", __func__, level, (unsigned long long)key, - NILFS_BMAP_I(&btree->bt_bmap)->vfs_inode.i_ino, + NILFS_BMAP_I(btree)->vfs_inode.i_ino, (unsigned long long)bh->b_blocknr); return; } @@ -1984,11 +1954,10 @@ static void nilfs_btree_add_dirty_buffer(struct nilfs_btree *btree, list_add_tail(&bh->b_assoc_buffers, head); } -static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap *bmap, +static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap *btree, struct list_head *listp) { - struct nilfs_btree *btree = (struct nilfs_btree *)bmap; - struct address_space *btcache = &NILFS_BMAP_I(bmap)->i_btnode_cache; + struct address_space *btcache = &NILFS_BMAP_I(btree)->i_btnode_cache; struct list_head lists[NILFS_BTREE_LEVEL_MAX]; struct pagevec pvec; struct buffer_head *bh, *head; @@ -2022,7 +1991,7 @@ static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap *bmap, list_splice_tail(&lists[level], listp); } -static int nilfs_btree_assign_p(struct nilfs_btree *btree, +static int nilfs_btree_assign_p(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, struct buffer_head **bh, @@ -2042,12 +2011,12 @@ static int nilfs_btree_assign_p(struct nilfs_btree *btree, path[level].bp_ctxt.newkey = blocknr; path[level].bp_ctxt.bh = *bh; ret = nilfs_btnode_prepare_change_key( - &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache, + &NILFS_BMAP_I(btree)->i_btnode_cache, &path[level].bp_ctxt); if (ret < 0) return ret; nilfs_btnode_commit_change_key( - &NILFS_BMAP_I(&btree->bt_bmap)->i_btnode_cache, + &NILFS_BMAP_I(btree)->i_btnode_cache, &path[level].bp_ctxt); *bh = path[level].bp_ctxt.bh; } @@ -2063,7 +2032,7 @@ static int nilfs_btree_assign_p(struct nilfs_btree *btree, return 0; } -static int nilfs_btree_assign_v(struct nilfs_btree *btree, +static int nilfs_btree_assign_v(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int level, struct buffer_head **bh, @@ -2071,15 +2040,14 @@ static int nilfs_btree_assign_v(struct nilfs_btree *btree, union nilfs_binfo *binfo) { struct nilfs_btree_node *parent; - struct inode *dat = nilfs_bmap_get_dat(&btree->bt_bmap); + struct inode *dat = nilfs_bmap_get_dat(btree); __u64 key; __u64 ptr; union nilfs_bmap_ptr_req req; int ret; parent = nilfs_btree_get_node(btree, path, level + 1); - ptr = nilfs_btree_node_get_ptr(btree, parent, - path[level + 1].bp_index); + ptr = nilfs_btree_node_get_ptr(btree, parent, path[level + 1].bp_index); req.bpr_ptr = ptr; ret = nilfs_dat_prepare_start(dat, &req.bpr_req); if (ret < 0) @@ -2094,18 +2062,16 @@ static int nilfs_btree_assign_v(struct nilfs_btree *btree, return 0; } -static int nilfs_btree_assign(struct nilfs_bmap *bmap, +static int nilfs_btree_assign(struct nilfs_bmap *btree, struct buffer_head **bh, sector_t blocknr, union nilfs_binfo *binfo) { - struct nilfs_btree *btree; struct nilfs_btree_path *path; struct nilfs_btree_node *node; __u64 key; int level, ret; - btree = (struct nilfs_btree *)bmap; path = nilfs_btree_alloc_path(); if (path == NULL) return -ENOMEM; @@ -2115,7 +2081,7 @@ static int nilfs_btree_assign(struct nilfs_bmap *bmap, key = nilfs_btree_node_get_key(node, 0); level = nilfs_btree_node_get_level(node); } else { - key = nilfs_bmap_data_get_key(bmap, *bh); + key = nilfs_bmap_data_get_key(btree, *bh); level = NILFS_BTREE_LEVEL_DATA; } @@ -2125,7 +2091,7 @@ static int nilfs_btree_assign(struct nilfs_bmap *bmap, goto out; } - ret = NILFS_BMAP_USE_VBN(bmap) ? + ret = NILFS_BMAP_USE_VBN(btree) ? nilfs_btree_assign_v(btree, path, level, bh, blocknr, binfo) : nilfs_btree_assign_p(btree, path, level, bh, blocknr, binfo); @@ -2135,7 +2101,7 @@ static int nilfs_btree_assign(struct nilfs_bmap *bmap, return ret; } -static int nilfs_btree_assign_gc(struct nilfs_bmap *bmap, +static int nilfs_btree_assign_gc(struct nilfs_bmap *btree, struct buffer_head **bh, sector_t blocknr, union nilfs_binfo *binfo) @@ -2144,7 +2110,7 @@ static int nilfs_btree_assign_gc(struct nilfs_bmap *bmap, __u64 key; int ret; - ret = nilfs_dat_move(nilfs_bmap_get_dat(bmap), (*bh)->b_blocknr, + ret = nilfs_dat_move(nilfs_bmap_get_dat(btree), (*bh)->b_blocknr, blocknr); if (ret < 0) return ret; @@ -2153,7 +2119,7 @@ static int nilfs_btree_assign_gc(struct nilfs_bmap *bmap, node = (struct nilfs_btree_node *)(*bh)->b_data; key = nilfs_btree_node_get_key(node, 0); } else - key = nilfs_bmap_data_get_key(bmap, *bh); + key = nilfs_bmap_data_get_key(btree, *bh); /* on-disk format */ binfo->bi_v.bi_vblocknr = cpu_to_le64((*bh)->b_blocknr); @@ -2162,15 +2128,13 @@ static int nilfs_btree_assign_gc(struct nilfs_bmap *bmap, return 0; } -static int nilfs_btree_mark(struct nilfs_bmap *bmap, __u64 key, int level) +static int nilfs_btree_mark(struct nilfs_bmap *btree, __u64 key, int level) { struct buffer_head *bh; - struct nilfs_btree *btree; struct nilfs_btree_path *path; __u64 ptr; int ret; - btree = (struct nilfs_btree *)bmap; path = nilfs_btree_alloc_path(); if (path == NULL) return -ENOMEM; @@ -2189,8 +2153,8 @@ static int nilfs_btree_mark(struct nilfs_bmap *bmap, __u64 key, int level) if (!buffer_dirty(bh)) nilfs_btnode_mark_dirty(bh); brelse(bh); - if (!nilfs_bmap_dirty(&btree->bt_bmap)) - nilfs_bmap_set_dirty(&btree->bt_bmap); + if (!nilfs_bmap_dirty(btree)) + nilfs_bmap_set_dirty(btree); out: nilfs_btree_free_path(path); diff --git a/fs/nilfs2/btree.h b/fs/nilfs2/btree.h index 980e1e8..cffbfba 100644 --- a/fs/nilfs2/btree.h +++ b/fs/nilfs2/btree.h @@ -54,7 +54,7 @@ struct nilfs_btree_path { union nilfs_bmap_ptr_req bp_oldreq; union nilfs_bmap_ptr_req bp_newreq; struct nilfs_btnode_chkey_ctxt bp_ctxt; - void (*bp_op)(struct nilfs_btree *, struct nilfs_btree_path *, + void (*bp_op)(struct nilfs_bmap *, struct nilfs_btree_path *, int, __u64 *, __u64 *); }; -- cgit v1.1 From dc935be2a094087bc561d80f8cf9e66bbc1f7b18 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Sat, 10 Jul 2010 22:21:54 +0900 Subject: nilfs2: unify bmap set_target_v operations This unifies two similar functions nilfs_btree_set_target_v and nilfs_direct_set_target_v into one, nilfs_bmap_set_target_v. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/bmap.h | 7 +++++++ fs/nilfs2/btree.c | 11 ++--------- fs/nilfs2/direct.c | 9 +-------- 3 files changed, 10 insertions(+), 17 deletions(-) diff --git a/fs/nilfs2/bmap.h b/fs/nilfs2/bmap.h index 379fda4..fae83cf 100644 --- a/fs/nilfs2/bmap.h +++ b/fs/nilfs2/bmap.h @@ -219,6 +219,13 @@ static inline void nilfs_bmap_abort_end_ptr(struct nilfs_bmap *bmap, nilfs_dat_abort_end(dat, &req->bpr_req); } +static inline void nilfs_bmap_set_target_v(struct nilfs_bmap *bmap, __u64 key, + __u64 ptr) +{ + bmap->b_last_allocated_key = key; + bmap->b_last_allocated_ptr = ptr; +} + __u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *, const struct buffer_head *); diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index 81e8716..0543bf9 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -917,13 +917,6 @@ static __u64 nilfs_btree_find_target_v(const struct nilfs_bmap *btree, return nilfs_bmap_find_target_in_group(btree); } -static void nilfs_btree_set_target_v(struct nilfs_bmap *btree, __u64 key, - __u64 ptr) -{ - btree->b_last_allocated_key = key; - btree->b_last_allocated_ptr = ptr; -} - static int nilfs_btree_prepare_insert(struct nilfs_bmap *btree, struct nilfs_btree_path *path, int *levelp, __u64 key, __u64 ptr, @@ -1084,7 +1077,7 @@ static void nilfs_btree_commit_insert(struct nilfs_bmap *btree, set_buffer_nilfs_volatile((struct buffer_head *)((unsigned long)ptr)); ptr = path[NILFS_BTREE_LEVEL_DATA].bp_newreq.bpr_ptr; if (NILFS_BMAP_USE_VBN(btree)) { - nilfs_btree_set_target_v(btree, key, ptr); + nilfs_bmap_set_target_v(btree, key, ptr); dat = nilfs_bmap_get_dat(btree); } @@ -1662,7 +1655,7 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *btree, } if (NILFS_BMAP_USE_VBN(btree)) - nilfs_btree_set_target_v(btree, key, dreq->bpr_ptr); + nilfs_bmap_set_target_v(btree, key, dreq->bpr_ptr); } /** diff --git a/fs/nilfs2/direct.c b/fs/nilfs2/direct.c index cfc7218..3186130 100644 --- a/fs/nilfs2/direct.c +++ b/fs/nilfs2/direct.c @@ -116,13 +116,6 @@ nilfs_direct_find_target_v(const struct nilfs_bmap *direct, __u64 key) return nilfs_bmap_find_target_in_group(direct); } -static void nilfs_direct_set_target_v(struct nilfs_bmap *direct, - __u64 key, __u64 ptr) -{ - direct->b_last_allocated_key = key; - direct->b_last_allocated_ptr = ptr; -} - static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr) { union nilfs_bmap_ptr_req req; @@ -152,7 +145,7 @@ static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr) nilfs_bmap_set_dirty(bmap); if (NILFS_BMAP_USE_VBN(bmap)) - nilfs_direct_set_target_v(bmap, key, req.bpr_ptr); + nilfs_bmap_set_target_v(bmap, key, req.bpr_ptr); nilfs_bmap_add_blocks(bmap, 1); } -- cgit v1.1 From 05d0e94b66dbdf9d90371b39dc7a6b390ba74d41 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Sat, 10 Jul 2010 20:52:09 +0900 Subject: nilfs2: get rid of nilfs_bmap_union This removes nilfs_bmap_union and finally unifies three structures and the union in bmap/btree code into one. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/bmap.c | 6 ++++-- fs/nilfs2/bmap_union.h | 42 ------------------------------------------ fs/nilfs2/btree.h | 8 -------- fs/nilfs2/direct.h | 11 ----------- fs/nilfs2/gcinode.c | 2 ++ fs/nilfs2/mdt.c | 1 + fs/nilfs2/nilfs.h | 7 ++----- fs/nilfs2/super.c | 4 +++- 8 files changed, 12 insertions(+), 69 deletions(-) delete mode 100644 fs/nilfs2/bmap_union.h diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c index effdbdb..3dbdc1d 100644 --- a/fs/nilfs2/bmap.c +++ b/fs/nilfs2/bmap.c @@ -26,6 +26,8 @@ #include "nilfs.h" #include "bmap.h" #include "sb.h" +#include "btree.h" +#include "direct.h" #include "btnode.h" #include "mdt.h" #include "dat.h" @@ -533,7 +535,7 @@ void nilfs_bmap_init_gc(struct nilfs_bmap *bmap) void nilfs_bmap_init_gcdat(struct nilfs_bmap *gcbmap, struct nilfs_bmap *bmap) { - memcpy(gcbmap, bmap, sizeof(union nilfs_bmap_union)); + memcpy(gcbmap, bmap, sizeof(*bmap)); init_rwsem(&gcbmap->b_sem); lockdep_set_class(&bmap->b_sem, &nilfs_bmap_dat_lock_key); gcbmap->b_inode = &NILFS_BMAP_I(gcbmap)->vfs_inode; @@ -541,7 +543,7 @@ void nilfs_bmap_init_gcdat(struct nilfs_bmap *gcbmap, struct nilfs_bmap *bmap) void nilfs_bmap_commit_gcdat(struct nilfs_bmap *gcbmap, struct nilfs_bmap *bmap) { - memcpy(bmap, gcbmap, sizeof(union nilfs_bmap_union)); + memcpy(bmap, gcbmap, sizeof(*bmap)); init_rwsem(&bmap->b_sem); lockdep_set_class(&bmap->b_sem, &nilfs_bmap_dat_lock_key); bmap->b_inode = &NILFS_BMAP_I(bmap)->vfs_inode; diff --git a/fs/nilfs2/bmap_union.h b/fs/nilfs2/bmap_union.h deleted file mode 100644 index d41509b..0000000 --- a/fs/nilfs2/bmap_union.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * bmap_union.h - NILFS block mapping. - * - * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Written by Koji Sato . - */ - -#ifndef _NILFS_BMAP_UNION_H -#define _NILFS_BMAP_UNION_H - -#include "bmap.h" -#include "direct.h" -#include "btree.h" - -/** - * nilfs_bmap_union - - * @bi_bmap: bmap structure - * @bi_btree: direct map structure - * @bi_direct: B-tree structure - */ -union nilfs_bmap_union { - struct nilfs_bmap bi_bmap; - struct nilfs_direct bi_direct; - struct nilfs_btree bi_btree; -}; - -#endif /* _NILFS_BMAP_UNION_H */ diff --git a/fs/nilfs2/btree.h b/fs/nilfs2/btree.h index cffbfba..22c02e3 100644 --- a/fs/nilfs2/btree.h +++ b/fs/nilfs2/btree.h @@ -31,14 +31,6 @@ #include "bmap.h" /** - * struct nilfs_btree - B-tree structure - * @bt_bmap: bmap base structure - */ -struct nilfs_btree { - struct nilfs_bmap bt_bmap; -}; - -/** * struct nilfs_btree_path - A path on which B-tree operations are executed * @bp_bh: buffer head of node block * @bp_sib_bh: buffer head of sibling node block diff --git a/fs/nilfs2/direct.h b/fs/nilfs2/direct.h index a5ffd66..dc643de 100644 --- a/fs/nilfs2/direct.h +++ b/fs/nilfs2/direct.h @@ -28,8 +28,6 @@ #include "bmap.h" -struct nilfs_direct; - /** * struct nilfs_direct_node - direct node * @dn_flags: flags @@ -40,15 +38,6 @@ struct nilfs_direct_node { __u8 pad[7]; }; -/** - * struct nilfs_direct - direct mapping - * @d_bmap: bmap structure - */ -struct nilfs_direct { - struct nilfs_bmap d_bmap; -}; - - #define NILFS_DIRECT_NBLOCKS (NILFS_BMAP_SIZE / sizeof(__le64) - 1) #define NILFS_DIRECT_KEY_MIN 0 #define NILFS_DIRECT_KEY_MAX (NILFS_DIRECT_NBLOCKS - 1) diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c index edb53fc..b634382 100644 --- a/fs/nilfs2/gcinode.c +++ b/fs/nilfs2/gcinode.c @@ -48,6 +48,8 @@ #include #include #include "nilfs.h" +#include "btree.h" +#include "btnode.h" #include "page.h" #include "mdt.h" #include "dat.h" diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index 024be8c..d01aff4 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c @@ -28,6 +28,7 @@ #include #include #include "nilfs.h" +#include "btnode.h" #include "segment.h" #include "page.h" #include "mdt.h" diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h index 36998ea..cfedc48 100644 --- a/fs/nilfs2/nilfs.h +++ b/fs/nilfs2/nilfs.h @@ -32,7 +32,6 @@ #include "the_nilfs.h" #include "sb.h" #include "bmap.h" -#include "bmap_union.h" /* * nilfs inode data in memory @@ -41,7 +40,7 @@ struct nilfs_inode_info { __u32 i_flags; unsigned long i_state; /* Dynamic state flags */ struct nilfs_bmap *i_bmap; - union nilfs_bmap_union i_bmap_union; + struct nilfs_bmap i_bmap_data; __u64 i_xattr; /* sector_t ??? */ __u32 i_dir_start_lookup; __u64 i_cno; /* check point number for GC inode */ @@ -71,9 +70,7 @@ static inline struct nilfs_inode_info *NILFS_I(const struct inode *inode) static inline struct nilfs_inode_info * NILFS_BMAP_I(const struct nilfs_bmap *bmap) { - return container_of((union nilfs_bmap_union *)bmap, - struct nilfs_inode_info, - i_bmap_union); + return container_of(bmap, struct nilfs_inode_info, i_bmap_data); } static inline struct inode *NILFS_BTNC_I(struct address_space *btnc) diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 952f4cc..1644573 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -55,6 +55,8 @@ #include "nilfs.h" #include "mdt.h" #include "alloc.h" +#include "btree.h" +#include "btnode.h" #include "page.h" #include "cpfile.h" #include "ifile.h" @@ -1213,7 +1215,7 @@ static void nilfs_inode_init_once(void *obj) init_rwsem(&ii->xattr_sem); #endif nilfs_btnode_cache_init_once(&ii->i_btnode_cache); - ii->i_bmap = (struct nilfs_bmap *)&ii->i_bmap_union; + ii->i_bmap = &ii->i_bmap_data; inode_init_once(&ii->vfs_inode); } -- cgit v1.1 From 364ec2d700223b965620ff4d5031a3665d195873 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Tue, 13 Jul 2010 23:33:51 +0900 Subject: nilfs2: remove redundant pointer checks in bmap lookup functions nilfs_bmap_lookup and its variants are supposed to take a valid pointer argument to return a block address, thus pointer checks in nilfs_btree_lookup and nilfs_direct_lookup are needless. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/btree.c | 6 +----- fs/nilfs2/direct.c | 3 +-- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index 0543bf9..18bb965 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -555,17 +555,13 @@ static int nilfs_btree_lookup(const struct nilfs_bmap *btree, __u64 key, int level, __u64 *ptrp) { struct nilfs_btree_path *path; - __u64 ptr; int ret; path = nilfs_btree_alloc_path(); if (path == NULL) return -ENOMEM; - ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level); - - if (ptrp != NULL) - *ptrp = ptr; + ret = nilfs_btree_do_lookup(btree, path, key, ptrp, level); nilfs_btree_free_path(path); diff --git a/fs/nilfs2/direct.c b/fs/nilfs2/direct.c index 3186130..324d80c 100644 --- a/fs/nilfs2/direct.c +++ b/fs/nilfs2/direct.c @@ -56,8 +56,7 @@ static int nilfs_direct_lookup(const struct nilfs_bmap *direct, if (ptr == NILFS_BMAP_INVALID_PTR) return -ENOENT; - if (ptrp != NULL) - *ptrp = ptr; + *ptrp = ptr; return 0; } -- cgit v1.1 From ea64ab87cdba9e1172392d247e6526359e301f12 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Tue, 13 Jul 2010 23:33:52 +0900 Subject: nilfs2: optimize calculation of min/max number of btree node children nilfs_btree_node_nchildren_max() and nilfs_btree_node_nchildren_min() functions switch return value depending on whether target node is the root or a node block. In most uses of these functions, however, the node type is fixed, and moreover the same calculation is repeatedly performed in loop. This unfold these functions depending on context and move them outside loops wherever possible. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/btree.c | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index 18bb965..c0266f7 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -473,7 +473,7 @@ static int nilfs_btree_do_lookup(const struct nilfs_bmap *btree, { struct nilfs_btree_node *node; __u64 ptr; - int level, index, found, ret; + int level, index, found, ncmax, ret; node = nilfs_btree_get_root(btree); level = nilfs_btree_node_get_level(node); @@ -485,6 +485,8 @@ static int nilfs_btree_do_lookup(const struct nilfs_bmap *btree, path[level].bp_bh = NULL; path[level].bp_index = index; + ncmax = NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(btree)); + for (level--; level >= minlevel; level--) { ret = nilfs_btree_get_block(btree, ptr, &path[level].bp_bh); if (ret < 0) @@ -496,9 +498,9 @@ static int nilfs_btree_do_lookup(const struct nilfs_bmap *btree, found = nilfs_btree_node_lookup(node, key, &index); else index = 0; - if (index < nilfs_btree_node_nchildren_max(node, btree)) + if (index < ncmax) { ptr = nilfs_btree_node_get_ptr(btree, node, index); - else { + } else { WARN_ON(found || level != NILFS_BTREE_LEVEL_NODE_MIN); /* insert */ ptr = NILFS_BMAP_INVALID_PTR; @@ -921,7 +923,7 @@ static int nilfs_btree_prepare_insert(struct nilfs_bmap *btree, struct buffer_head *bh; struct nilfs_btree_node *node, *parent, *sib; __u64 sibptr; - int pindex, level, ret; + int pindex, level, ncmax, ret; struct inode *dat = NULL; stats->bs_nblocks = 0; @@ -938,12 +940,13 @@ static int nilfs_btree_prepare_insert(struct nilfs_bmap *btree, if (ret < 0) goto err_out_data; + ncmax = NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(btree)); + for (level = NILFS_BTREE_LEVEL_NODE_MIN; level < nilfs_btree_height(btree) - 1; level++) { node = nilfs_btree_get_nonroot_node(path, level); - if (nilfs_btree_node_get_nchildren(node) < - nilfs_btree_node_nchildren_max(node, btree)) { + if (nilfs_btree_node_get_nchildren(node) < ncmax) { path[level].bp_op = nilfs_btree_do_insert; stats->bs_nblocks++; goto out; @@ -960,8 +963,7 @@ static int nilfs_btree_prepare_insert(struct nilfs_bmap *btree, if (ret < 0) goto err_out_child_node; sib = (struct nilfs_btree_node *)bh->b_data; - if (nilfs_btree_node_get_nchildren(sib) < - nilfs_btree_node_nchildren_max(sib, btree)) { + if (nilfs_btree_node_get_nchildren(sib) < ncmax) { path[level].bp_sib_bh = bh; path[level].bp_op = nilfs_btree_carry_left; stats->bs_nblocks++; @@ -979,8 +981,7 @@ static int nilfs_btree_prepare_insert(struct nilfs_bmap *btree, if (ret < 0) goto err_out_child_node; sib = (struct nilfs_btree_node *)bh->b_data; - if (nilfs_btree_node_get_nchildren(sib) < - nilfs_btree_node_nchildren_max(sib, btree)) { + if (nilfs_btree_node_get_nchildren(sib) < ncmax) { path[level].bp_sib_bh = bh; path[level].bp_op = nilfs_btree_carry_right; stats->bs_nblocks++; @@ -1014,7 +1015,7 @@ static int nilfs_btree_prepare_insert(struct nilfs_bmap *btree, /* root */ node = nilfs_btree_get_root(btree); if (nilfs_btree_node_get_nchildren(node) < - nilfs_btree_node_nchildren_max(node, btree)) { + NILFS_BTREE_ROOT_NCHILDREN_MAX) { path[level].bp_op = nilfs_btree_do_insert; stats->bs_nblocks++; goto out; @@ -1281,10 +1282,12 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree, struct buffer_head *bh; struct nilfs_btree_node *node, *parent, *sib; __u64 sibptr; - int pindex, level, ret; + int pindex, level, ncmin, ret; ret = 0; stats->bs_nblocks = 0; + ncmin = NILFS_BTREE_NODE_NCHILDREN_MIN(nilfs_btree_node_size(btree)); + for (level = NILFS_BTREE_LEVEL_NODE_MIN; level < nilfs_btree_height(btree) - 1; level++) { @@ -1297,8 +1300,7 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree, if (ret < 0) goto err_out_child_node; - if (nilfs_btree_node_get_nchildren(node) > - nilfs_btree_node_nchildren_min(node, btree)) { + if (nilfs_btree_node_get_nchildren(node) > ncmin) { path[level].bp_op = nilfs_btree_do_delete; stats->bs_nblocks++; goto out; @@ -1315,8 +1317,7 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree, if (ret < 0) goto err_out_curr_node; sib = (struct nilfs_btree_node *)bh->b_data; - if (nilfs_btree_node_get_nchildren(sib) > - nilfs_btree_node_nchildren_min(sib, btree)) { + if (nilfs_btree_node_get_nchildren(sib) > ncmin) { path[level].bp_sib_bh = bh; path[level].bp_op = nilfs_btree_borrow_left; stats->bs_nblocks++; @@ -1336,8 +1337,7 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree, if (ret < 0) goto err_out_curr_node; sib = (struct nilfs_btree_node *)bh->b_data; - if (nilfs_btree_node_get_nchildren(sib) > - nilfs_btree_node_nchildren_min(sib, btree)) { + if (nilfs_btree_node_get_nchildren(sib) > ncmin) { path[level].bp_sib_bh = bh; path[level].bp_op = nilfs_btree_borrow_right; stats->bs_nblocks++; -- cgit v1.1 From 9b7b265c9ab67fcd1245d6b64fa5ca2eda43ac88 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Tue, 13 Jul 2010 23:33:53 +0900 Subject: nilfs2: reduce repetitive calculation of max number of child nodes The current btree implementation repeats the same calculation on the maximum number of child nodes. This is because a few low level routines use the calculation for index addressing in a btree node block. This reduces the calculation by explicitly passing the maximum number of child nodes (ncmax) through their argument. This changes parameter passing of the following functions: - nilfs_btree_node_dptrs - nilfs_btree_node_get_ptr - nilfs_btree_node_set_ptr - nilfs_btree_node_init - nilfs_btree_node_move_left - nilfs_btree_node_move_right - nilfs_btree_node_insert - nilfs_btree_node_delete, and - nilfs_btree_get_node The following functions are removed: - nilfs_btree_node_nchildren_min - nilfs_btree_node_nchildren_max Most middle level btree operations are rewritten to pass a proper ncmax value depending on whether each occurrence of node is "root" or not. A constant NILFS_BTREE_ROOT_NCHILDREN_MAX is used for the root node, whereas nilfs_btree_nchildren_per_block() function is used for non-root nodes. If a node could be either root or a non-root node, an output argument of nilfs_btree_get_node() is used to set up ncmax. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/btree.c | 339 +++++++++++++++++++++++++++++------------------------- 1 file changed, 182 insertions(+), 157 deletions(-) diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index c0266f7..829e145 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -152,22 +152,9 @@ static inline int nilfs_btree_node_size(const struct nilfs_bmap *btree) return 1 << btree->b_inode->i_blkbits; } -static inline int -nilfs_btree_node_nchildren_min(const struct nilfs_btree_node *node, - const struct nilfs_bmap *btree) -{ - return nilfs_btree_node_root(node) ? - NILFS_BTREE_ROOT_NCHILDREN_MIN : - NILFS_BTREE_NODE_NCHILDREN_MIN(nilfs_btree_node_size(btree)); -} - -static inline int -nilfs_btree_node_nchildren_max(const struct nilfs_btree_node *node, - const struct nilfs_bmap *btree) +static int nilfs_btree_nchildren_per_block(const struct nilfs_bmap *btree) { - return nilfs_btree_node_root(node) ? - NILFS_BTREE_ROOT_NCHILDREN_MAX : - NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(btree)); + return NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(btree)); } static inline __le64 * @@ -179,11 +166,9 @@ nilfs_btree_node_dkeys(const struct nilfs_btree_node *node) } static inline __le64 * -nilfs_btree_node_dptrs(const struct nilfs_btree_node *node, - const struct nilfs_bmap *btree) +nilfs_btree_node_dptrs(const struct nilfs_btree_node *node, int ncmax) { - return (__le64 *)(nilfs_btree_node_dkeys(node) + - nilfs_btree_node_nchildren_max(node, btree)); + return (__le64 *)(nilfs_btree_node_dkeys(node) + ncmax); } static inline __u64 @@ -199,22 +184,21 @@ nilfs_btree_node_set_key(struct nilfs_btree_node *node, int index, __u64 key) } static inline __u64 -nilfs_btree_node_get_ptr(const struct nilfs_bmap *btree, - const struct nilfs_btree_node *node, int index) +nilfs_btree_node_get_ptr(const struct nilfs_btree_node *node, int index, + int ncmax) { - return le64_to_cpu(*(nilfs_btree_node_dptrs(node, btree) + index)); + return le64_to_cpu(*(nilfs_btree_node_dptrs(node, ncmax) + index)); } static inline void -nilfs_btree_node_set_ptr(struct nilfs_bmap *btree, - struct nilfs_btree_node *node, int index, __u64 ptr) +nilfs_btree_node_set_ptr(struct nilfs_btree_node *node, int index, __u64 ptr, + int ncmax) { - *(nilfs_btree_node_dptrs(node, btree) + index) = cpu_to_le64(ptr); + *(nilfs_btree_node_dptrs(node, ncmax) + index) = cpu_to_le64(ptr); } -static void nilfs_btree_node_init(struct nilfs_bmap *btree, - struct nilfs_btree_node *node, - int flags, int level, int nchildren, +static void nilfs_btree_node_init(struct nilfs_btree_node *node, int flags, + int level, int nchildren, int ncmax, const __u64 *keys, const __u64 *ptrs) { __le64 *dkeys; @@ -226,7 +210,7 @@ static void nilfs_btree_node_init(struct nilfs_bmap *btree, nilfs_btree_node_set_nchildren(node, nchildren); dkeys = nilfs_btree_node_dkeys(node); - dptrs = nilfs_btree_node_dptrs(node, btree); + dptrs = nilfs_btree_node_dptrs(node, ncmax); for (i = 0; i < nchildren; i++) { dkeys[i] = cpu_to_le64(keys[i]); dptrs[i] = cpu_to_le64(ptrs[i]); @@ -234,21 +218,20 @@ static void nilfs_btree_node_init(struct nilfs_bmap *btree, } /* Assume the buffer heads corresponding to left and right are locked. */ -static void nilfs_btree_node_move_left(struct nilfs_bmap *btree, - struct nilfs_btree_node *left, +static void nilfs_btree_node_move_left(struct nilfs_btree_node *left, struct nilfs_btree_node *right, - int n) + int n, int lncmax, int rncmax) { __le64 *ldkeys, *rdkeys; __le64 *ldptrs, *rdptrs; int lnchildren, rnchildren; ldkeys = nilfs_btree_node_dkeys(left); - ldptrs = nilfs_btree_node_dptrs(left, btree); + ldptrs = nilfs_btree_node_dptrs(left, lncmax); lnchildren = nilfs_btree_node_get_nchildren(left); rdkeys = nilfs_btree_node_dkeys(right); - rdptrs = nilfs_btree_node_dptrs(right, btree); + rdptrs = nilfs_btree_node_dptrs(right, rncmax); rnchildren = nilfs_btree_node_get_nchildren(right); memcpy(ldkeys + lnchildren, rdkeys, n * sizeof(*rdkeys)); @@ -263,21 +246,20 @@ static void nilfs_btree_node_move_left(struct nilfs_bmap *btree, } /* Assume that the buffer heads corresponding to left and right are locked. */ -static void nilfs_btree_node_move_right(struct nilfs_bmap *btree, - struct nilfs_btree_node *left, +static void nilfs_btree_node_move_right(struct nilfs_btree_node *left, struct nilfs_btree_node *right, - int n) + int n, int lncmax, int rncmax) { __le64 *ldkeys, *rdkeys; __le64 *ldptrs, *rdptrs; int lnchildren, rnchildren; ldkeys = nilfs_btree_node_dkeys(left); - ldptrs = nilfs_btree_node_dptrs(left, btree); + ldptrs = nilfs_btree_node_dptrs(left, lncmax); lnchildren = nilfs_btree_node_get_nchildren(left); rdkeys = nilfs_btree_node_dkeys(right); - rdptrs = nilfs_btree_node_dptrs(right, btree); + rdptrs = nilfs_btree_node_dptrs(right, rncmax); rnchildren = nilfs_btree_node_get_nchildren(right); memmove(rdkeys + n, rdkeys, rnchildren * sizeof(*rdkeys)); @@ -292,16 +274,15 @@ static void nilfs_btree_node_move_right(struct nilfs_bmap *btree, } /* Assume that the buffer head corresponding to node is locked. */ -static void nilfs_btree_node_insert(struct nilfs_bmap *btree, - struct nilfs_btree_node *node, - __u64 key, __u64 ptr, int index) +static void nilfs_btree_node_insert(struct nilfs_btree_node *node, int index, + __u64 key, __u64 ptr, int ncmax) { __le64 *dkeys; __le64 *dptrs; int nchildren; dkeys = nilfs_btree_node_dkeys(node); - dptrs = nilfs_btree_node_dptrs(node, btree); + dptrs = nilfs_btree_node_dptrs(node, ncmax); nchildren = nilfs_btree_node_get_nchildren(node); if (index < nchildren) { memmove(dkeys + index + 1, dkeys + index, @@ -316,9 +297,8 @@ static void nilfs_btree_node_insert(struct nilfs_bmap *btree, } /* Assume that the buffer head corresponding to node is locked. */ -static void nilfs_btree_node_delete(struct nilfs_bmap *btree, - struct nilfs_btree_node *node, - __u64 *keyp, __u64 *ptrp, int index) +static void nilfs_btree_node_delete(struct nilfs_btree_node *node, int index, + __u64 *keyp, __u64 *ptrp, int ncmax) { __u64 key; __u64 ptr; @@ -327,7 +307,7 @@ static void nilfs_btree_node_delete(struct nilfs_bmap *btree, int nchildren; dkeys = nilfs_btree_node_dkeys(node); - dptrs = nilfs_btree_node_dptrs(node, btree); + dptrs = nilfs_btree_node_dptrs(node, ncmax); key = le64_to_cpu(dkeys[index]); ptr = le64_to_cpu(dptrs[index]); nchildren = nilfs_btree_node_get_nchildren(node); @@ -445,14 +425,21 @@ static inline int nilfs_btree_height(const struct nilfs_bmap *btree) return nilfs_btree_node_get_level(nilfs_btree_get_root(btree)) + 1; } -static inline struct nilfs_btree_node * +static struct nilfs_btree_node * nilfs_btree_get_node(const struct nilfs_bmap *btree, const struct nilfs_btree_path *path, - int level) + int level, int *ncmaxp) { - return (level == nilfs_btree_height(btree) - 1) ? - nilfs_btree_get_root(btree) : - nilfs_btree_get_nonroot_node(path, level); + struct nilfs_btree_node *node; + + if (level == nilfs_btree_height(btree) - 1) { + node = nilfs_btree_get_root(btree); + *ncmaxp = NILFS_BTREE_ROOT_NCHILDREN_MAX; + } else { + node = nilfs_btree_get_nonroot_node(path, level); + *ncmaxp = nilfs_btree_nchildren_per_block(btree); + } + return node; } static inline int @@ -481,11 +468,12 @@ static int nilfs_btree_do_lookup(const struct nilfs_bmap *btree, return -ENOENT; found = nilfs_btree_node_lookup(node, key, &index); - ptr = nilfs_btree_node_get_ptr(btree, node, index); + ptr = nilfs_btree_node_get_ptr(node, index, + NILFS_BTREE_ROOT_NCHILDREN_MAX); path[level].bp_bh = NULL; path[level].bp_index = index; - ncmax = NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(btree)); + ncmax = nilfs_btree_nchildren_per_block(btree); for (level--; level >= minlevel; level--) { ret = nilfs_btree_get_block(btree, ptr, &path[level].bp_bh); @@ -499,7 +487,7 @@ static int nilfs_btree_do_lookup(const struct nilfs_bmap *btree, else index = 0; if (index < ncmax) { - ptr = nilfs_btree_node_get_ptr(btree, node, index); + ptr = nilfs_btree_node_get_ptr(node, index, ncmax); } else { WARN_ON(found || level != NILFS_BTREE_LEVEL_NODE_MIN); /* insert */ @@ -522,16 +510,18 @@ static int nilfs_btree_do_lookup_last(const struct nilfs_bmap *btree, { struct nilfs_btree_node *node; __u64 ptr; - int index, level, ret; + int index, level, ncmax, ret; node = nilfs_btree_get_root(btree); index = nilfs_btree_node_get_nchildren(node) - 1; if (index < 0) return -ENOENT; level = nilfs_btree_node_get_level(node); - ptr = nilfs_btree_node_get_ptr(btree, node, index); + ptr = nilfs_btree_node_get_ptr(node, index, + NILFS_BTREE_ROOT_NCHILDREN_MAX); path[level].bp_bh = NULL; path[level].bp_index = index; + ncmax = nilfs_btree_nchildren_per_block(btree); for (level--; level > 0; level--) { ret = nilfs_btree_get_block(btree, ptr, &path[level].bp_bh); @@ -541,7 +531,7 @@ static int nilfs_btree_do_lookup_last(const struct nilfs_bmap *btree, if (nilfs_btree_bad_node(node, level)) return -EINVAL; index = nilfs_btree_node_get_nchildren(node) - 1; - ptr = nilfs_btree_node_get_ptr(btree, node, index); + ptr = nilfs_btree_node_get_ptr(node, index, ncmax); path[level].bp_index = index; } @@ -579,7 +569,7 @@ static int nilfs_btree_lookup_contig(const struct nilfs_bmap *btree, __u64 ptr, ptr2; sector_t blocknr; int level = NILFS_BTREE_LEVEL_NODE_MIN; - int ret, cnt, index, maxlevel; + int ret, cnt, index, maxlevel, ncmax; path = nilfs_btree_alloc_path(); if (path == NULL) @@ -601,14 +591,14 @@ static int nilfs_btree_lookup_contig(const struct nilfs_bmap *btree, goto end; maxlevel = nilfs_btree_height(btree) - 1; - node = nilfs_btree_get_node(btree, path, level); + node = nilfs_btree_get_node(btree, path, level, &ncmax); index = path[level].bp_index + 1; for (;;) { while (index < nilfs_btree_node_get_nchildren(node)) { if (nilfs_btree_node_get_key(node, index) != key + cnt) goto end; - ptr2 = nilfs_btree_node_get_ptr(btree, node, index); + ptr2 = nilfs_btree_node_get_ptr(node, index, ncmax); if (dat) { ret = nilfs_dat_translate(dat, ptr2, &blocknr); if (ret < 0) @@ -624,12 +614,12 @@ static int nilfs_btree_lookup_contig(const struct nilfs_bmap *btree, break; /* look-up right sibling node */ - node = nilfs_btree_get_node(btree, path, level + 1); + node = nilfs_btree_get_node(btree, path, level + 1, &ncmax); index = path[level + 1].bp_index + 1; if (index >= nilfs_btree_node_get_nchildren(node) || nilfs_btree_node_get_key(node, index) != key + cnt) break; - ptr2 = nilfs_btree_node_get_ptr(btree, node, index); + ptr2 = nilfs_btree_node_get_ptr(node, index, ncmax); path[level + 1].bp_index = index; brelse(path[level].bp_bh); @@ -638,6 +628,7 @@ static int nilfs_btree_lookup_contig(const struct nilfs_bmap *btree, if (ret < 0) goto out; node = nilfs_btree_get_nonroot_node(path, level); + ncmax = nilfs_btree_nchildren_per_block(btree); index = 0; path[level].bp_index = index; } @@ -676,11 +667,13 @@ static void nilfs_btree_do_insert(struct nilfs_bmap *btree, int level, __u64 *keyp, __u64 *ptrp) { struct nilfs_btree_node *node; + int ncblk; if (level < nilfs_btree_height(btree) - 1) { node = nilfs_btree_get_nonroot_node(path, level); - nilfs_btree_node_insert(btree, node, *keyp, *ptrp, - path[level].bp_index); + ncblk = nilfs_btree_nchildren_per_block(btree); + nilfs_btree_node_insert(node, path[level].bp_index, + *keyp, *ptrp, ncblk); if (!buffer_dirty(path[level].bp_bh)) nilfs_btnode_mark_dirty(path[level].bp_bh); @@ -690,8 +683,9 @@ static void nilfs_btree_do_insert(struct nilfs_bmap *btree, 0)); } else { node = nilfs_btree_get_root(btree); - nilfs_btree_node_insert(btree, node, *keyp, *ptrp, - path[level].bp_index); + nilfs_btree_node_insert(node, path[level].bp_index, + *keyp, *ptrp, + NILFS_BTREE_ROOT_NCHILDREN_MAX); } } @@ -700,12 +694,13 @@ static void nilfs_btree_carry_left(struct nilfs_bmap *btree, int level, __u64 *keyp, __u64 *ptrp) { struct nilfs_btree_node *node, *left; - int nchildren, lnchildren, n, move; + int nchildren, lnchildren, n, move, ncblk; node = nilfs_btree_get_nonroot_node(path, level); left = nilfs_btree_get_sib_node(path, level); nchildren = nilfs_btree_node_get_nchildren(node); lnchildren = nilfs_btree_node_get_nchildren(left); + ncblk = nilfs_btree_nchildren_per_block(btree); move = 0; n = (nchildren + lnchildren + 1) / 2 - lnchildren; @@ -715,7 +710,7 @@ static void nilfs_btree_carry_left(struct nilfs_bmap *btree, move = 1; } - nilfs_btree_node_move_left(btree, left, node, n); + nilfs_btree_node_move_left(left, node, n, ncblk, ncblk); if (!buffer_dirty(path[level].bp_bh)) nilfs_btnode_mark_dirty(path[level].bp_bh); @@ -745,12 +740,13 @@ static void nilfs_btree_carry_right(struct nilfs_bmap *btree, int level, __u64 *keyp, __u64 *ptrp) { struct nilfs_btree_node *node, *right; - int nchildren, rnchildren, n, move; + int nchildren, rnchildren, n, move, ncblk; node = nilfs_btree_get_nonroot_node(path, level); right = nilfs_btree_get_sib_node(path, level); nchildren = nilfs_btree_node_get_nchildren(node); rnchildren = nilfs_btree_node_get_nchildren(right); + ncblk = nilfs_btree_nchildren_per_block(btree); move = 0; n = (nchildren + rnchildren + 1) / 2 - rnchildren; @@ -760,7 +756,7 @@ static void nilfs_btree_carry_right(struct nilfs_bmap *btree, move = 1; } - nilfs_btree_node_move_right(btree, node, right, n); + nilfs_btree_node_move_right(node, right, n, ncblk, ncblk); if (!buffer_dirty(path[level].bp_bh)) nilfs_btnode_mark_dirty(path[level].bp_bh); @@ -793,11 +789,12 @@ static void nilfs_btree_split(struct nilfs_bmap *btree, struct nilfs_btree_node *node, *right; __u64 newkey; __u64 newptr; - int nchildren, n, move; + int nchildren, n, move, ncblk; node = nilfs_btree_get_nonroot_node(path, level); right = nilfs_btree_get_sib_node(path, level); nchildren = nilfs_btree_node_get_nchildren(node); + ncblk = nilfs_btree_nchildren_per_block(btree); move = 0; n = (nchildren + 1) / 2; @@ -806,7 +803,7 @@ static void nilfs_btree_split(struct nilfs_bmap *btree, move = 1; } - nilfs_btree_node_move_right(btree, node, right, n); + nilfs_btree_node_move_right(node, right, n, ncblk, ncblk); if (!buffer_dirty(path[level].bp_bh)) nilfs_btnode_mark_dirty(path[level].bp_bh); @@ -818,8 +815,8 @@ static void nilfs_btree_split(struct nilfs_bmap *btree, if (move) { path[level].bp_index -= nilfs_btree_node_get_nchildren(node); - nilfs_btree_node_insert(btree, right, *keyp, *ptrp, - path[level].bp_index); + nilfs_btree_node_insert(right, path[level].bp_index, + *keyp, *ptrp, ncblk); *keyp = nilfs_btree_node_get_key(right, 0); *ptrp = path[level].bp_newreq.bpr_ptr; @@ -845,14 +842,16 @@ static void nilfs_btree_grow(struct nilfs_bmap *btree, int level, __u64 *keyp, __u64 *ptrp) { struct nilfs_btree_node *root, *child; - int n; + int n, ncblk; root = nilfs_btree_get_root(btree); child = nilfs_btree_get_sib_node(path, level); + ncblk = nilfs_btree_nchildren_per_block(btree); n = nilfs_btree_node_get_nchildren(root); - nilfs_btree_node_move_right(btree, root, child, n); + nilfs_btree_node_move_right(root, child, n, + NILFS_BTREE_ROOT_NCHILDREN_MAX, ncblk); nilfs_btree_node_set_level(root, level + 1); if (!buffer_dirty(path[level].bp_sib_bh)) @@ -871,7 +870,7 @@ static __u64 nilfs_btree_find_near(const struct nilfs_bmap *btree, const struct nilfs_btree_path *path) { struct nilfs_btree_node *node; - int level; + int level, ncmax; if (path == NULL) return NILFS_BMAP_INVALID_PTR; @@ -879,17 +878,18 @@ static __u64 nilfs_btree_find_near(const struct nilfs_bmap *btree, /* left sibling */ level = NILFS_BTREE_LEVEL_NODE_MIN; if (path[level].bp_index > 0) { - node = nilfs_btree_get_node(btree, path, level); - return nilfs_btree_node_get_ptr(btree, node, - path[level].bp_index - 1); + node = nilfs_btree_get_node(btree, path, level, &ncmax); + return nilfs_btree_node_get_ptr(node, + path[level].bp_index - 1, + ncmax); } /* parent */ level = NILFS_BTREE_LEVEL_NODE_MIN + 1; if (level <= nilfs_btree_height(btree) - 1) { - node = nilfs_btree_get_node(btree, path, level); - return nilfs_btree_node_get_ptr(btree, node, - path[level].bp_index); + node = nilfs_btree_get_node(btree, path, level, &ncmax); + return nilfs_btree_node_get_ptr(node, path[level].bp_index, + ncmax); } return NILFS_BMAP_INVALID_PTR; @@ -923,7 +923,7 @@ static int nilfs_btree_prepare_insert(struct nilfs_bmap *btree, struct buffer_head *bh; struct nilfs_btree_node *node, *parent, *sib; __u64 sibptr; - int pindex, level, ncmax, ret; + int pindex, level, ncmax, ncblk, ret; struct inode *dat = NULL; stats->bs_nblocks = 0; @@ -940,54 +940,55 @@ static int nilfs_btree_prepare_insert(struct nilfs_bmap *btree, if (ret < 0) goto err_out_data; - ncmax = NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(btree)); + ncblk = nilfs_btree_nchildren_per_block(btree); for (level = NILFS_BTREE_LEVEL_NODE_MIN; level < nilfs_btree_height(btree) - 1; level++) { node = nilfs_btree_get_nonroot_node(path, level); - if (nilfs_btree_node_get_nchildren(node) < ncmax) { + if (nilfs_btree_node_get_nchildren(node) < ncblk) { path[level].bp_op = nilfs_btree_do_insert; stats->bs_nblocks++; goto out; } - parent = nilfs_btree_get_node(btree, path, level + 1); + parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax); pindex = path[level + 1].bp_index; /* left sibling */ if (pindex > 0) { - sibptr = nilfs_btree_node_get_ptr(btree, parent, - pindex - 1); + sibptr = nilfs_btree_node_get_ptr(parent, pindex - 1, + ncmax); ret = nilfs_btree_get_block(btree, sibptr, &bh); if (ret < 0) goto err_out_child_node; sib = (struct nilfs_btree_node *)bh->b_data; - if (nilfs_btree_node_get_nchildren(sib) < ncmax) { + if (nilfs_btree_node_get_nchildren(sib) < ncblk) { path[level].bp_sib_bh = bh; path[level].bp_op = nilfs_btree_carry_left; stats->bs_nblocks++; goto out; - } else + } else { brelse(bh); + } } /* right sibling */ - if (pindex < - nilfs_btree_node_get_nchildren(parent) - 1) { - sibptr = nilfs_btree_node_get_ptr(btree, parent, - pindex + 1); + if (pindex < nilfs_btree_node_get_nchildren(parent) - 1) { + sibptr = nilfs_btree_node_get_ptr(parent, pindex + 1, + ncmax); ret = nilfs_btree_get_block(btree, sibptr, &bh); if (ret < 0) goto err_out_child_node; sib = (struct nilfs_btree_node *)bh->b_data; - if (nilfs_btree_node_get_nchildren(sib) < ncmax) { + if (nilfs_btree_node_get_nchildren(sib) < ncblk) { path[level].bp_sib_bh = bh; path[level].bp_op = nilfs_btree_carry_right; stats->bs_nblocks++; goto out; - } else + } else { brelse(bh); + } } /* split */ @@ -1005,9 +1006,8 @@ static int nilfs_btree_prepare_insert(struct nilfs_bmap *btree, stats->bs_nblocks++; - nilfs_btree_node_init(btree, - (struct nilfs_btree_node *)bh->b_data, - 0, level, 0, NULL, NULL); + sib = (struct nilfs_btree_node *)bh->b_data; + nilfs_btree_node_init(sib, 0, level, 0, ncblk, NULL, NULL); path[level].bp_sib_bh = bh; path[level].bp_op = nilfs_btree_split; } @@ -1031,8 +1031,8 @@ static int nilfs_btree_prepare_insert(struct nilfs_bmap *btree, if (ret < 0) goto err_out_curr_node; - nilfs_btree_node_init(btree, (struct nilfs_btree_node *)bh->b_data, - 0, level, 0, NULL, NULL); + nilfs_btree_node_init((struct nilfs_btree_node *)bh->b_data, + 0, level, 0, ncblk, NULL, NULL); path[level].bp_sib_bh = bh; path[level].bp_op = nilfs_btree_grow; @@ -1122,11 +1122,13 @@ static void nilfs_btree_do_delete(struct nilfs_bmap *btree, int level, __u64 *keyp, __u64 *ptrp) { struct nilfs_btree_node *node; + int ncblk; if (level < nilfs_btree_height(btree) - 1) { node = nilfs_btree_get_nonroot_node(path, level); - nilfs_btree_node_delete(btree, node, keyp, ptrp, - path[level].bp_index); + ncblk = nilfs_btree_nchildren_per_block(btree); + nilfs_btree_node_delete(node, path[level].bp_index, + keyp, ptrp, ncblk); if (!buffer_dirty(path[level].bp_bh)) nilfs_btnode_mark_dirty(path[level].bp_bh); if (path[level].bp_index == 0) @@ -1134,8 +1136,9 @@ static void nilfs_btree_do_delete(struct nilfs_bmap *btree, nilfs_btree_node_get_key(node, 0)); } else { node = nilfs_btree_get_root(btree); - nilfs_btree_node_delete(btree, node, keyp, ptrp, - path[level].bp_index); + nilfs_btree_node_delete(node, path[level].bp_index, + keyp, ptrp, + NILFS_BTREE_ROOT_NCHILDREN_MAX); } } @@ -1144,7 +1147,7 @@ static void nilfs_btree_borrow_left(struct nilfs_bmap *btree, int level, __u64 *keyp, __u64 *ptrp) { struct nilfs_btree_node *node, *left; - int nchildren, lnchildren, n; + int nchildren, lnchildren, n, ncblk; nilfs_btree_do_delete(btree, path, level, keyp, ptrp); @@ -1152,10 +1155,11 @@ static void nilfs_btree_borrow_left(struct nilfs_bmap *btree, left = nilfs_btree_get_sib_node(path, level); nchildren = nilfs_btree_node_get_nchildren(node); lnchildren = nilfs_btree_node_get_nchildren(left); + ncblk = nilfs_btree_nchildren_per_block(btree); n = (nchildren + lnchildren) / 2 - nchildren; - nilfs_btree_node_move_right(btree, left, node, n); + nilfs_btree_node_move_right(left, node, n, ncblk, ncblk); if (!buffer_dirty(path[level].bp_bh)) nilfs_btnode_mark_dirty(path[level].bp_bh); @@ -1175,7 +1179,7 @@ static void nilfs_btree_borrow_right(struct nilfs_bmap *btree, int level, __u64 *keyp, __u64 *ptrp) { struct nilfs_btree_node *node, *right; - int nchildren, rnchildren, n; + int nchildren, rnchildren, n, ncblk; nilfs_btree_do_delete(btree, path, level, keyp, ptrp); @@ -1183,10 +1187,11 @@ static void nilfs_btree_borrow_right(struct nilfs_bmap *btree, right = nilfs_btree_get_sib_node(path, level); nchildren = nilfs_btree_node_get_nchildren(node); rnchildren = nilfs_btree_node_get_nchildren(right); + ncblk = nilfs_btree_nchildren_per_block(btree); n = (nchildren + rnchildren) / 2 - nchildren; - nilfs_btree_node_move_left(btree, node, right, n); + nilfs_btree_node_move_left(node, right, n, ncblk, ncblk); if (!buffer_dirty(path[level].bp_bh)) nilfs_btnode_mark_dirty(path[level].bp_bh); @@ -1207,16 +1212,17 @@ static void nilfs_btree_concat_left(struct nilfs_bmap *btree, int level, __u64 *keyp, __u64 *ptrp) { struct nilfs_btree_node *node, *left; - int n; + int n, ncblk; nilfs_btree_do_delete(btree, path, level, keyp, ptrp); node = nilfs_btree_get_nonroot_node(path, level); left = nilfs_btree_get_sib_node(path, level); + ncblk = nilfs_btree_nchildren_per_block(btree); n = nilfs_btree_node_get_nchildren(node); - nilfs_btree_node_move_left(btree, left, node, n); + nilfs_btree_node_move_left(left, node, n, ncblk, ncblk); if (!buffer_dirty(path[level].bp_sib_bh)) nilfs_btnode_mark_dirty(path[level].bp_sib_bh); @@ -1232,16 +1238,17 @@ static void nilfs_btree_concat_right(struct nilfs_bmap *btree, int level, __u64 *keyp, __u64 *ptrp) { struct nilfs_btree_node *node, *right; - int n; + int n, ncblk; nilfs_btree_do_delete(btree, path, level, keyp, ptrp); node = nilfs_btree_get_nonroot_node(path, level); right = nilfs_btree_get_sib_node(path, level); + ncblk = nilfs_btree_nchildren_per_block(btree); n = nilfs_btree_node_get_nchildren(right); - nilfs_btree_node_move_left(btree, node, right, n); + nilfs_btree_node_move_left(node, right, n, ncblk, ncblk); if (!buffer_dirty(path[level].bp_bh)) nilfs_btnode_mark_dirty(path[level].bp_bh); @@ -1256,17 +1263,20 @@ static void nilfs_btree_shrink(struct nilfs_bmap *btree, int level, __u64 *keyp, __u64 *ptrp) { struct nilfs_btree_node *root, *child; - int n; + int n, ncblk; nilfs_btree_do_delete(btree, path, level, keyp, ptrp); root = nilfs_btree_get_root(btree); child = nilfs_btree_get_nonroot_node(path, level); + ncblk = nilfs_btree_nchildren_per_block(btree); - nilfs_btree_node_delete(btree, root, NULL, NULL, 0); + nilfs_btree_node_delete(root, 0, NULL, NULL, + NILFS_BTREE_ROOT_NCHILDREN_MAX); nilfs_btree_node_set_level(root, level); n = nilfs_btree_node_get_nchildren(child); - nilfs_btree_node_move_left(btree, root, child, n); + nilfs_btree_node_move_left(root, child, n, + NILFS_BTREE_ROOT_NCHILDREN_MAX, ncblk); nilfs_btnode_delete(path[level].bp_bh); path[level].bp_bh = NULL; @@ -1282,19 +1292,20 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree, struct buffer_head *bh; struct nilfs_btree_node *node, *parent, *sib; __u64 sibptr; - int pindex, level, ncmin, ret; + int pindex, level, ncmin, ncmax, ncblk, ret; ret = 0; stats->bs_nblocks = 0; ncmin = NILFS_BTREE_NODE_NCHILDREN_MIN(nilfs_btree_node_size(btree)); + ncblk = nilfs_btree_nchildren_per_block(btree); for (level = NILFS_BTREE_LEVEL_NODE_MIN; level < nilfs_btree_height(btree) - 1; level++) { node = nilfs_btree_get_nonroot_node(path, level); path[level].bp_oldreq.bpr_ptr = - nilfs_btree_node_get_ptr(btree, node, - path[level].bp_index); + nilfs_btree_node_get_ptr(node, path[level].bp_index, + ncblk); ret = nilfs_bmap_prepare_end_ptr(btree, &path[level].bp_oldreq, dat); if (ret < 0) @@ -1306,13 +1317,13 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree, goto out; } - parent = nilfs_btree_get_node(btree, path, level + 1); + parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax); pindex = path[level + 1].bp_index; if (pindex > 0) { /* left sibling */ - sibptr = nilfs_btree_node_get_ptr(btree, parent, - pindex - 1); + sibptr = nilfs_btree_node_get_ptr(parent, pindex - 1, + ncmax); ret = nilfs_btree_get_block(btree, sibptr, &bh); if (ret < 0) goto err_out_curr_node; @@ -1331,8 +1342,8 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree, } else if (pindex < nilfs_btree_node_get_nchildren(parent) - 1) { /* right sibling */ - sibptr = nilfs_btree_node_get_ptr(btree, parent, - pindex + 1); + sibptr = nilfs_btree_node_get_ptr(parent, pindex + 1, + ncmax); ret = nilfs_btree_get_block(btree, sibptr, &bh); if (ret < 0) goto err_out_curr_node; @@ -1368,7 +1379,8 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree, node = nilfs_btree_get_root(btree); path[level].bp_oldreq.bpr_ptr = - nilfs_btree_node_get_ptr(btree, node, path[level].bp_index); + nilfs_btree_node_get_ptr(node, path[level].bp_index, + NILFS_BTREE_ROOT_NCHILDREN_MAX); ret = nilfs_bmap_prepare_end_ptr(btree, &path[level].bp_oldreq, dat); if (ret < 0) @@ -1476,7 +1488,8 @@ static int nilfs_btree_check_delete(struct nilfs_bmap *btree, __u64 key) nchildren = nilfs_btree_node_get_nchildren(root); if (nchildren > 1) return 0; - ptr = nilfs_btree_node_get_ptr(btree, root, nchildren - 1); + ptr = nilfs_btree_node_get_ptr(root, nchildren - 1, + NILFS_BTREE_ROOT_NCHILDREN_MAX); ret = nilfs_btree_get_block(btree, ptr, &bh); if (ret < 0) return ret; @@ -1504,22 +1517,25 @@ static int nilfs_btree_gather_data(struct nilfs_bmap *btree, __le64 *dkeys; __le64 *dptrs; __u64 ptr; - int nchildren, i, ret; + int nchildren, ncmax, i, ret; root = nilfs_btree_get_root(btree); switch (nilfs_btree_height(btree)) { case 2: bh = NULL; node = root; + ncmax = NILFS_BTREE_ROOT_NCHILDREN_MAX; break; case 3: nchildren = nilfs_btree_node_get_nchildren(root); WARN_ON(nchildren > 1); - ptr = nilfs_btree_node_get_ptr(btree, root, nchildren - 1); + ptr = nilfs_btree_node_get_ptr(root, nchildren - 1, + NILFS_BTREE_ROOT_NCHILDREN_MAX); ret = nilfs_btree_get_block(btree, ptr, &bh); if (ret < 0) return ret; node = (struct nilfs_btree_node *)bh->b_data; + ncmax = nilfs_btree_nchildren_per_block(btree); break; default: node = NULL; @@ -1530,7 +1546,7 @@ static int nilfs_btree_gather_data(struct nilfs_bmap *btree, if (nchildren < nitems) nitems = nchildren; dkeys = nilfs_btree_node_dkeys(node); - dptrs = nilfs_btree_node_dptrs(node, btree); + dptrs = nilfs_btree_node_dptrs(node, ncmax); for (i = 0; i < nitems; i++) { keys[i] = le64_to_cpu(dkeys[i]); ptrs[i] = le64_to_cpu(dptrs[i]); @@ -1607,6 +1623,7 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *btree, struct nilfs_btree_node *node; struct inode *dat; __u64 tmpptr; + int ncblk; /* free resources */ if (btree->b_ops->bop_clear != NULL) @@ -1624,8 +1641,9 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *btree, /* create child node at level 1 */ node = (struct nilfs_btree_node *)bh->b_data; - nilfs_btree_node_init(btree, node, 0, 1, n, keys, ptrs); - nilfs_btree_node_insert(btree, node, key, dreq->bpr_ptr, n); + ncblk = nilfs_btree_nchildren_per_block(btree); + nilfs_btree_node_init(node, 0, 1, n, ncblk, keys, ptrs); + nilfs_btree_node_insert(node, n, key, dreq->bpr_ptr, ncblk); if (!buffer_dirty(bh)) nilfs_btnode_mark_dirty(bh); if (!nilfs_bmap_dirty(btree)) @@ -1636,16 +1654,19 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *btree, /* create root node at level 2 */ node = nilfs_btree_get_root(btree); tmpptr = nreq->bpr_ptr; - nilfs_btree_node_init(btree, node, NILFS_BTREE_NODE_ROOT, - 2, 1, &keys[0], &tmpptr); + nilfs_btree_node_init(node, NILFS_BTREE_NODE_ROOT, 2, 1, + NILFS_BTREE_ROOT_NCHILDREN_MAX, + &keys[0], &tmpptr); } else { nilfs_bmap_commit_alloc_ptr(btree, dreq, dat); /* create root node at level 1 */ node = nilfs_btree_get_root(btree); - nilfs_btree_node_init(btree, node, NILFS_BTREE_NODE_ROOT, - 1, n, keys, ptrs); - nilfs_btree_node_insert(btree, node, key, dreq->bpr_ptr, n); + nilfs_btree_node_init(node, NILFS_BTREE_NODE_ROOT, 1, n, + NILFS_BTREE_ROOT_NCHILDREN_MAX, + keys, ptrs); + nilfs_btree_node_insert(node, n, key, dreq->bpr_ptr, + NILFS_BTREE_ROOT_NCHILDREN_MAX); if (!nilfs_bmap_dirty(btree)) nilfs_bmap_set_dirty(btree); } @@ -1712,12 +1733,12 @@ static int nilfs_btree_prepare_update_v(struct nilfs_bmap *btree, int level, struct inode *dat) { struct nilfs_btree_node *parent; - int ret; + int ncmax, ret; - parent = nilfs_btree_get_node(btree, path, level + 1); + parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax); path[level].bp_oldreq.bpr_ptr = - nilfs_btree_node_get_ptr(btree, parent, - path[level + 1].bp_index); + nilfs_btree_node_get_ptr(parent, path[level + 1].bp_index, + ncmax); path[level].bp_newreq.bpr_ptr = path[level].bp_oldreq.bpr_ptr + 1; ret = nilfs_dat_prepare_update(dat, &path[level].bp_oldreq.bpr_req, &path[level].bp_newreq.bpr_req); @@ -1747,6 +1768,7 @@ static void nilfs_btree_commit_update_v(struct nilfs_bmap *btree, int level, struct inode *dat) { struct nilfs_btree_node *parent; + int ncmax; nilfs_dat_commit_update(dat, &path[level].bp_oldreq.bpr_req, &path[level].bp_newreq.bpr_req, @@ -1760,9 +1782,9 @@ static void nilfs_btree_commit_update_v(struct nilfs_bmap *btree, } set_buffer_nilfs_volatile(path[level].bp_bh); - parent = nilfs_btree_get_node(btree, path, level + 1); - nilfs_btree_node_set_ptr(btree, parent, path[level + 1].bp_index, - path[level].bp_newreq.bpr_ptr); + parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax); + nilfs_btree_node_set_ptr(parent, path[level + 1].bp_index, + path[level].bp_newreq.bpr_ptr, ncmax); } static void nilfs_btree_abort_update_v(struct nilfs_bmap *btree, @@ -1835,6 +1857,7 @@ static int nilfs_btree_propagate_v(struct nilfs_bmap *btree, struct nilfs_btree_node *parent; struct inode *dat = nilfs_bmap_get_dat(btree); __u64 ptr; + int ncmax; get_bh(bh); path[level].bp_bh = bh; @@ -1844,9 +1867,10 @@ static int nilfs_btree_propagate_v(struct nilfs_bmap *btree, goto out; if (buffer_nilfs_volatile(path[level].bp_bh)) { - parent = nilfs_btree_get_node(btree, path, level + 1); - ptr = nilfs_btree_node_get_ptr(btree, parent, - path[level + 1].bp_index); + parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax); + ptr = nilfs_btree_node_get_ptr(parent, + path[level + 1].bp_index, + ncmax); ret = nilfs_dat_mark_dirty(dat, ptr); if (ret < 0) goto out; @@ -1990,11 +2014,11 @@ static int nilfs_btree_assign_p(struct nilfs_bmap *btree, struct nilfs_btree_node *parent; __u64 key; __u64 ptr; - int ret; + int ncmax, ret; - parent = nilfs_btree_get_node(btree, path, level + 1); - ptr = nilfs_btree_node_get_ptr(btree, parent, - path[level + 1].bp_index); + parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax); + ptr = nilfs_btree_node_get_ptr(parent, path[level + 1].bp_index, + ncmax); if (buffer_nilfs_node(*bh)) { path[level].bp_ctxt.oldkey = ptr; path[level].bp_ctxt.newkey = blocknr; @@ -2010,8 +2034,8 @@ static int nilfs_btree_assign_p(struct nilfs_bmap *btree, *bh = path[level].bp_ctxt.bh; } - nilfs_btree_node_set_ptr(btree, parent, - path[level + 1].bp_index, blocknr); + nilfs_btree_node_set_ptr(parent, path[level + 1].bp_index, blocknr, + ncmax); key = nilfs_btree_node_get_key(parent, path[level + 1].bp_index); /* on-disk format */ @@ -2033,10 +2057,11 @@ static int nilfs_btree_assign_v(struct nilfs_bmap *btree, __u64 key; __u64 ptr; union nilfs_bmap_ptr_req req; - int ret; + int ncmax, ret; - parent = nilfs_btree_get_node(btree, path, level + 1); - ptr = nilfs_btree_node_get_ptr(btree, parent, path[level + 1].bp_index); + parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax); + ptr = nilfs_btree_node_get_ptr(parent, path[level + 1].bp_index, + ncmax); req.bpr_ptr = ptr; ret = nilfs_dat_prepare_start(dat, &req.bpr_req); if (ret < 0) -- cgit v1.1 From 5ad2686e9266f24a0bb76b01d5c3ae29b4e149fe Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Tue, 13 Jul 2010 23:33:54 +0900 Subject: nilfs2: get maximum number of child nodes from bmap object The patch "reduce repetitive calculation of max number of child nodes" gathered up the calculation of maximum number of child nodes into nilfs_btree_nchildren_per_block() function. This makes the function get resultant value from a private variable in bmap object instead of calculating it for each call. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/bmap.h | 2 ++ fs/nilfs2/btree.c | 6 +++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/fs/nilfs2/bmap.h b/fs/nilfs2/bmap.h index fae83cf..a20569b 100644 --- a/fs/nilfs2/bmap.h +++ b/fs/nilfs2/bmap.h @@ -105,6 +105,7 @@ static inline int nilfs_bmap_is_new_ptr(unsigned long ptr) * @b_last_allocated_ptr: last allocated ptr for data block * @b_ptr_type: pointer type * @b_state: state + * @b_nchildren_per_block: maximum number of child nodes for non-root nodes */ struct nilfs_bmap { union { @@ -118,6 +119,7 @@ struct nilfs_bmap { __u64 b_last_allocated_ptr; int b_ptr_type; int b_state; + __u16 b_nchildren_per_block; }; /* pointer type */ diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index 829e145..7089d90 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -154,7 +154,7 @@ static inline int nilfs_btree_node_size(const struct nilfs_bmap *btree) static int nilfs_btree_nchildren_per_block(const struct nilfs_bmap *btree) { - return NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(btree)); + return btree->b_nchildren_per_block; } static inline __le64 * @@ -2218,10 +2218,14 @@ static const struct nilfs_bmap_operations nilfs_btree_ops_gc = { int nilfs_btree_init(struct nilfs_bmap *bmap) { bmap->b_ops = &nilfs_btree_ops; + bmap->b_nchildren_per_block = + NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(bmap)); return 0; } void nilfs_btree_init_gc(struct nilfs_bmap *bmap) { bmap->b_ops = &nilfs_btree_ops_gc; + bmap->b_nchildren_per_block = + NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(bmap)); } -- cgit v1.1 From 7c397a81fe90c0445df2873700d14e82cca5fbc8 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Tue, 13 Jul 2010 23:33:55 +0900 Subject: nilfs2: eliminate inline keywords in btree implementation This removes all inline uses from btree.c. Gcc now agressively apply inline expansion even for the functions declared without the keyword; the inline use in btree.c looks excessive. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/btree.c | 41 +++++++++++++++++++---------------------- 1 file changed, 19 insertions(+), 22 deletions(-) diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index 7089d90..6462c70 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -106,48 +106,45 @@ static int nilfs_btree_get_new_block(const struct nilfs_bmap *btree, return 0; } -static inline int -nilfs_btree_node_get_flags(const struct nilfs_btree_node *node) +static int nilfs_btree_node_get_flags(const struct nilfs_btree_node *node) { return node->bn_flags; } -static inline void +static void nilfs_btree_node_set_flags(struct nilfs_btree_node *node, int flags) { node->bn_flags = flags; } -static inline int nilfs_btree_node_root(const struct nilfs_btree_node *node) +static int nilfs_btree_node_root(const struct nilfs_btree_node *node) { return nilfs_btree_node_get_flags(node) & NILFS_BTREE_NODE_ROOT; } -static inline int -nilfs_btree_node_get_level(const struct nilfs_btree_node *node) +static int nilfs_btree_node_get_level(const struct nilfs_btree_node *node) { return node->bn_level; } -static inline void +static void nilfs_btree_node_set_level(struct nilfs_btree_node *node, int level) { node->bn_level = level; } -static inline int -nilfs_btree_node_get_nchildren(const struct nilfs_btree_node *node) +static int nilfs_btree_node_get_nchildren(const struct nilfs_btree_node *node) { return le16_to_cpu(node->bn_nchildren); } -static inline void +static void nilfs_btree_node_set_nchildren(struct nilfs_btree_node *node, int nchildren) { node->bn_nchildren = cpu_to_le16(nchildren); } -static inline int nilfs_btree_node_size(const struct nilfs_bmap *btree) +static int nilfs_btree_node_size(const struct nilfs_bmap *btree) { return 1 << btree->b_inode->i_blkbits; } @@ -157,7 +154,7 @@ static int nilfs_btree_nchildren_per_block(const struct nilfs_bmap *btree) return btree->b_nchildren_per_block; } -static inline __le64 * +static __le64 * nilfs_btree_node_dkeys(const struct nilfs_btree_node *node) { return (__le64 *)((char *)(node + 1) + @@ -165,32 +162,32 @@ nilfs_btree_node_dkeys(const struct nilfs_btree_node *node) 0 : NILFS_BTREE_NODE_EXTRA_PAD_SIZE)); } -static inline __le64 * +static __le64 * nilfs_btree_node_dptrs(const struct nilfs_btree_node *node, int ncmax) { return (__le64 *)(nilfs_btree_node_dkeys(node) + ncmax); } -static inline __u64 +static __u64 nilfs_btree_node_get_key(const struct nilfs_btree_node *node, int index) { return le64_to_cpu(*(nilfs_btree_node_dkeys(node) + index)); } -static inline void +static void nilfs_btree_node_set_key(struct nilfs_btree_node *node, int index, __u64 key) { *(nilfs_btree_node_dkeys(node) + index) = cpu_to_le64(key); } -static inline __u64 +static __u64 nilfs_btree_node_get_ptr(const struct nilfs_btree_node *node, int index, int ncmax) { return le64_to_cpu(*(nilfs_btree_node_dptrs(node, ncmax) + index)); } -static inline void +static void nilfs_btree_node_set_ptr(struct nilfs_btree_node *node, int index, __u64 ptr, int ncmax) { @@ -402,25 +399,25 @@ int nilfs_btree_broken_node_block(struct buffer_head *bh) bh->b_size, bh->b_blocknr); } -static inline struct nilfs_btree_node * +static struct nilfs_btree_node * nilfs_btree_get_root(const struct nilfs_bmap *btree) { return (struct nilfs_btree_node *)btree->b_u.u_data; } -static inline struct nilfs_btree_node * +static struct nilfs_btree_node * nilfs_btree_get_nonroot_node(const struct nilfs_btree_path *path, int level) { return (struct nilfs_btree_node *)path[level].bp_bh->b_data; } -static inline struct nilfs_btree_node * +static struct nilfs_btree_node * nilfs_btree_get_sib_node(const struct nilfs_btree_path *path, int level) { return (struct nilfs_btree_node *)path[level].bp_sib_bh->b_data; } -static inline int nilfs_btree_height(const struct nilfs_bmap *btree) +static int nilfs_btree_height(const struct nilfs_bmap *btree) { return nilfs_btree_node_get_level(nilfs_btree_get_root(btree)) + 1; } @@ -442,7 +439,7 @@ nilfs_btree_get_node(const struct nilfs_bmap *btree, return node; } -static inline int +static int nilfs_btree_bad_node(struct nilfs_btree_node *node, int level) { if (unlikely(nilfs_btree_node_get_level(node) != level)) { -- cgit v1.1 From f8e6cc013b896d75d6ce4ec9e168014af1257fd8 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Thu, 15 Jul 2010 11:39:10 +0900 Subject: nilfs2: fix buffer head leak in nilfs_btnode_submit_block nilfs_btnode_submit_block() refers to buffer head just before returning from the function, but it releases the buffer head earlier than that if nilfs_dat_translate() gets an error. This has potential for oops in the erroneous case. This fixes the issue. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/btnode.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c index 447ce47..0a6834b 100644 --- a/fs/nilfs2/btnode.c +++ b/fs/nilfs2/btnode.c @@ -100,6 +100,7 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr, { struct buffer_head *bh; struct inode *inode = NILFS_BTNC_I(btnc); + struct page *page; int err; bh = nilfs_grab_buffer(inode, btnc, blocknr, 1 << BH_NILFS_Node); @@ -107,6 +108,7 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr, return -ENOMEM; err = -EEXIST; /* internal code */ + page = bh->b_page; if (buffer_uptodate(bh) || buffer_dirty(bh)) goto found; @@ -143,8 +145,8 @@ found: *pbh = bh; out_locked: - unlock_page(bh->b_page); - page_cache_release(bh->b_page); + unlock_page(page); + page_cache_release(page); return err; } -- cgit v1.1 From 26dfdd8e29f28c08aa67861b3c83d0f3f7d30cee Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Sun, 18 Jul 2010 10:42:23 +0900 Subject: nilfs2: add read ahead mode to nilfs_btnode_submit_block This adds mode argument to nilfs_btnode_submit_block() function and allows it to issue a read-ahead request. An optional submit_ptr argument is also added to store the actual block address for which bio is sent. submit_ptr is used for a series of read-ahead requests, and helps to decide if each requested block is continous to the previous one on disk. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/btnode.c | 17 ++++++++++++++--- fs/nilfs2/btnode.h | 4 ++-- fs/nilfs2/btree.c | 3 ++- fs/nilfs2/gcinode.c | 6 ++++-- 4 files changed, 22 insertions(+), 8 deletions(-) diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c index 0a6834b..f78ab10 100644 --- a/fs/nilfs2/btnode.c +++ b/fs/nilfs2/btnode.c @@ -96,7 +96,8 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr) } int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr, - sector_t pblocknr, struct buffer_head **pbh) + sector_t pblocknr, int mode, + struct buffer_head **pbh, sector_t *submit_ptr) { struct buffer_head *bh; struct inode *inode = NILFS_BTNC_I(btnc); @@ -127,7 +128,16 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr, } } } - lock_buffer(bh); + + if (mode == READA) { + if (pblocknr != *submit_ptr + 1 || !trylock_buffer(bh)) { + err = -EBUSY; /* internal code */ + brelse(bh); + goto out_locked; + } + } else { /* mode == READ */ + lock_buffer(bh); + } if (buffer_uptodate(bh)) { unlock_buffer(bh); err = -EEXIST; /* internal code */ @@ -138,8 +148,9 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr, bh->b_blocknr = pblocknr; /* set block address for read */ bh->b_end_io = end_buffer_read_sync; get_bh(bh); - submit_bh(READ, bh); + submit_bh(mode, bh); bh->b_blocknr = blocknr; /* set back to the given block address */ + *submit_ptr = pblocknr; err = 0; found: *pbh = bh; diff --git a/fs/nilfs2/btnode.h b/fs/nilfs2/btnode.h index 07da83f..7903749 100644 --- a/fs/nilfs2/btnode.h +++ b/fs/nilfs2/btnode.h @@ -42,8 +42,8 @@ void nilfs_btnode_cache_init(struct address_space *, struct backing_dev_info *); void nilfs_btnode_cache_clear(struct address_space *); struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr); -int nilfs_btnode_submit_block(struct address_space *, __u64, sector_t, - struct buffer_head **); +int nilfs_btnode_submit_block(struct address_space *, __u64, sector_t, int, + struct buffer_head **, sector_t *); void nilfs_btnode_delete(struct buffer_head *); int nilfs_btnode_prepare_change_key(struct address_space *, struct nilfs_btnode_chkey_ctxt *); diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index 6462c70..4669389 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -71,9 +71,10 @@ static int nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr, { struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache; struct buffer_head *bh; + sector_t pbn = 0; int err; - err = nilfs_btnode_submit_block(btnc, ptr, 0, bhp); + err = nilfs_btnode_submit_block(btnc, ptr, pbn, READ, bhp, &pbn); if (err) return err == -EEXIST ? 0 : err; diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c index b634382..bed3a78 100644 --- a/fs/nilfs2/gcinode.c +++ b/fs/nilfs2/gcinode.c @@ -151,8 +151,10 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff, int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn, __u64 vbn, struct buffer_head **out_bh) { - int ret = nilfs_btnode_submit_block(&NILFS_I(inode)->i_btnode_cache, - vbn ? : pbn, pbn, out_bh); + int ret; + + ret = nilfs_btnode_submit_block(&NILFS_I(inode)->i_btnode_cache, + vbn ? : pbn, pbn, READ, out_bh, &pbn); if (ret == -EEXIST) /* internal code (cache hit) */ ret = 0; return ret; -- cgit v1.1 From 464ece88630d0fb715ca942eabb1da825046a534 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Sun, 18 Jul 2010 10:42:24 +0900 Subject: nilfs2: add btree get block function with readahead option This adds __nilfs_btree_get_block() function that can issue a series of read-ahead requests for sibling btree nodes. This read-ahead needs parent node block, so nilfs_btree_readahead_info structure is added to pass the information that __nilfs_btree_get_block() needs. This also replaces the previous nilfs_btree_get_block() implementation with a wrapper function of __nilfs_btree_get_block(). Signed-off-by: Ryusuke Konishi --- fs/nilfs2/btree.c | 94 ++++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 68 insertions(+), 26 deletions(-) diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index 4669389..1b5321c 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -66,32 +66,6 @@ static void nilfs_btree_free_path(struct nilfs_btree_path *path) /* * B-tree node operations */ -static int nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr, - struct buffer_head **bhp) -{ - struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache; - struct buffer_head *bh; - sector_t pbn = 0; - int err; - - err = nilfs_btnode_submit_block(btnc, ptr, pbn, READ, bhp, &pbn); - if (err) - return err == -EEXIST ? 0 : err; - - bh = *bhp; - wait_on_buffer(bh); - if (!buffer_uptodate(bh)) { - brelse(bh); - return -EIO; - } - if (nilfs_btree_broken_node_block(bh)) { - clear_buffer_uptodate(bh); - brelse(bh); - return -EINVAL; - } - return 0; -} - static int nilfs_btree_get_new_block(const struct nilfs_bmap *btree, __u64 ptr, struct buffer_head **bhp) { @@ -452,6 +426,74 @@ nilfs_btree_bad_node(struct nilfs_btree_node *node, int level) return 0; } +struct nilfs_btree_readahead_info { + struct nilfs_btree_node *node; /* parent node */ + int max_ra_blocks; /* max nof blocks to read ahead */ + int index; /* current index on the parent node */ + int ncmax; /* nof children in the parent node */ +}; + +static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr, + struct buffer_head **bhp, + const struct nilfs_btree_readahead_info *ra) +{ + struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache; + struct buffer_head *bh, *ra_bh; + sector_t submit_ptr = 0; + int ret; + + ret = nilfs_btnode_submit_block(btnc, ptr, 0, READ, &bh, &submit_ptr); + if (ret) { + if (ret != -EEXIST) + return ret; + goto out_check; + } + + if (ra) { + int i, n; + __u64 ptr2; + + /* read ahead sibling nodes */ + for (n = ra->max_ra_blocks, i = ra->index + 1; + n > 0 && i < ra->ncmax; n--, i++) { + ptr2 = nilfs_btree_node_get_ptr(ra->node, i, ra->ncmax); + + ret = nilfs_btnode_submit_block(btnc, ptr2, 0, READA, + &ra_bh, &submit_ptr); + if (likely(!ret || ret == -EEXIST)) + brelse(ra_bh); + else if (ret != -EBUSY) + break; + if (!buffer_locked(bh)) + goto out_no_wait; + } + } + + wait_on_buffer(bh); + + out_no_wait: + if (!buffer_uptodate(bh)) { + brelse(bh); + return -EIO; + } + + out_check: + if (nilfs_btree_broken_node_block(bh)) { + clear_buffer_uptodate(bh); + brelse(bh); + return -EINVAL; + } + + *bhp = bh; + return 0; +} + +static int nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr, + struct buffer_head **bhp) +{ + return __nilfs_btree_get_block(btree, ptr, bhp, NULL); +} + static int nilfs_btree_do_lookup(const struct nilfs_bmap *btree, struct nilfs_btree_path *path, __u64 key, __u64 *ptrp, int minlevel) -- cgit v1.1 From 4e13e66bee2d792c1aae21797f16c181024834eb Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Sun, 18 Jul 2010 10:42:25 +0900 Subject: nilfs2: introduce check flag to btree node buffer nilfs_btree_get_block() now may return untested buffer due to read-ahead. This adds a new flag for buffer heads so that the btree code can check whether the buffer is already verified or not. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/btree.c | 10 +++++++++- fs/nilfs2/page.c | 5 ++++- fs/nilfs2/page.h | 2 ++ 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index 1b5321c..d3faa0b 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -370,8 +370,16 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node, int nilfs_btree_broken_node_block(struct buffer_head *bh) { - return nilfs_btree_node_broken((struct nilfs_btree_node *)bh->b_data, + int ret; + + if (buffer_nilfs_checked(bh)) + return 0; + + ret = nilfs_btree_node_broken((struct nilfs_btree_node *)bh->b_data, bh->b_size, bh->b_blocknr); + if (likely(!ret)) + set_buffer_nilfs_checked(bh); + return ret; } static struct nilfs_btree_node * diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c index 8de3e1e..aab11db 100644 --- a/fs/nilfs2/page.c +++ b/fs/nilfs2/page.c @@ -37,7 +37,8 @@ #define NILFS_BUFFER_INHERENT_BITS \ ((1UL << BH_Uptodate) | (1UL << BH_Mapped) | (1UL << BH_NILFS_Node) | \ - (1UL << BH_NILFS_Volatile) | (1UL << BH_NILFS_Allocated)) + (1UL << BH_NILFS_Volatile) | (1UL << BH_NILFS_Allocated) | \ + (1UL << BH_NILFS_Checked)) static struct buffer_head * __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index, @@ -129,6 +130,7 @@ void nilfs_forget_buffer(struct buffer_head *bh) lock_buffer(bh); clear_buffer_nilfs_volatile(bh); + clear_buffer_nilfs_checked(bh); clear_buffer_dirty(bh); if (nilfs_page_buffers_clean(page)) __nilfs_clear_page_dirty(page); @@ -480,6 +482,7 @@ void nilfs_clear_dirty_pages(struct address_space *mapping) lock_buffer(bh); clear_buffer_dirty(bh); clear_buffer_nilfs_volatile(bh); + clear_buffer_nilfs_checked(bh); clear_buffer_uptodate(bh); clear_buffer_mapped(bh); unlock_buffer(bh); diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h index 8abca4d..f53d8da 100644 --- a/fs/nilfs2/page.h +++ b/fs/nilfs2/page.h @@ -34,11 +34,13 @@ enum { BH_NILFS_Allocated = BH_PrivateStart, BH_NILFS_Node, BH_NILFS_Volatile, + BH_NILFS_Checked, }; BUFFER_FNS(NILFS_Allocated, nilfs_allocated) /* nilfs private buffers */ BUFFER_FNS(NILFS_Node, nilfs_node) /* nilfs node buffers */ BUFFER_FNS(NILFS_Volatile, nilfs_volatile) +BUFFER_FNS(NILFS_Checked, nilfs_checked) /* buffer is verified */ void nilfs_mark_buffer_dirty(struct buffer_head *bh); -- cgit v1.1 From 03bdb5ac58a2144dfe8cfd73347fdb9f57e2e062 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Sun, 18 Jul 2010 10:42:26 +0900 Subject: nilfs2: apply read-ahead for nilfs_btree_lookup_contig This applies read-ahead to nilfs_btree_do_lookup and nilfs_btree_lookup_contig functions and extends them to read ahead siblings of level 1 btree nodes that hold data blocks. At present, the read-ahead is not applied to most btree operations; only get_block() callback function, which is used during read of regular files or directories, receives the benefit. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/btree.c | 50 +++++++++++++++++++++++++++++++++----------------- 1 file changed, 33 insertions(+), 17 deletions(-) diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index d3faa0b..300c2bc 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -504,9 +504,11 @@ static int nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr, static int nilfs_btree_do_lookup(const struct nilfs_bmap *btree, struct nilfs_btree_path *path, - __u64 key, __u64 *ptrp, int minlevel) + __u64 key, __u64 *ptrp, int minlevel, + int readahead) { struct nilfs_btree_node *node; + struct nilfs_btree_readahead_info p, *ra; __u64 ptr; int level, index, found, ncmax, ret; @@ -523,10 +525,20 @@ static int nilfs_btree_do_lookup(const struct nilfs_bmap *btree, ncmax = nilfs_btree_nchildren_per_block(btree); - for (level--; level >= minlevel; level--) { - ret = nilfs_btree_get_block(btree, ptr, &path[level].bp_bh); + while (--level >= minlevel) { + ra = NULL; + if (level == NILFS_BTREE_LEVEL_NODE_MIN && readahead) { + p.node = nilfs_btree_get_node(btree, path, level + 1, + &p.ncmax); + p.index = index; + p.max_ra_blocks = 7; + ra = &p; + } + ret = __nilfs_btree_get_block(btree, ptr, &path[level].bp_bh, + ra); if (ret < 0) return ret; + node = nilfs_btree_get_nonroot_node(path, level); if (nilfs_btree_bad_node(node, level)) return -EINVAL; @@ -601,7 +613,7 @@ static int nilfs_btree_lookup(const struct nilfs_bmap *btree, if (path == NULL) return -ENOMEM; - ret = nilfs_btree_do_lookup(btree, path, key, ptrp, level); + ret = nilfs_btree_do_lookup(btree, path, key, ptrp, level, 0); nilfs_btree_free_path(path); @@ -618,12 +630,13 @@ static int nilfs_btree_lookup_contig(const struct nilfs_bmap *btree, sector_t blocknr; int level = NILFS_BTREE_LEVEL_NODE_MIN; int ret, cnt, index, maxlevel, ncmax; + struct nilfs_btree_readahead_info p; path = nilfs_btree_alloc_path(); if (path == NULL) return -ENOMEM; - ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level); + ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level, 1); if (ret < 0) goto out; @@ -662,17 +675,20 @@ static int nilfs_btree_lookup_contig(const struct nilfs_bmap *btree, break; /* look-up right sibling node */ - node = nilfs_btree_get_node(btree, path, level + 1, &ncmax); - index = path[level + 1].bp_index + 1; - if (index >= nilfs_btree_node_get_nchildren(node) || - nilfs_btree_node_get_key(node, index) != key + cnt) + p.node = nilfs_btree_get_node(btree, path, level + 1, &p.ncmax); + p.index = path[level + 1].bp_index + 1; + p.max_ra_blocks = 7; + if (p.index >= nilfs_btree_node_get_nchildren(p.node) || + nilfs_btree_node_get_key(p.node, p.index) != key + cnt) break; - ptr2 = nilfs_btree_node_get_ptr(node, index, ncmax); - path[level + 1].bp_index = index; + ptr2 = nilfs_btree_node_get_ptr(p.node, p.index, p.ncmax); + path[level + 1].bp_index = p.index; brelse(path[level].bp_bh); path[level].bp_bh = NULL; - ret = nilfs_btree_get_block(btree, ptr2, &path[level].bp_bh); + + ret = __nilfs_btree_get_block(btree, ptr2, &path[level].bp_bh, + &p); if (ret < 0) goto out; node = nilfs_btree_get_nonroot_node(path, level); @@ -1147,7 +1163,7 @@ static int nilfs_btree_insert(struct nilfs_bmap *btree, __u64 key, __u64 ptr) return -ENOMEM; ret = nilfs_btree_do_lookup(btree, path, key, NULL, - NILFS_BTREE_LEVEL_NODE_MIN); + NILFS_BTREE_LEVEL_NODE_MIN, 0); if (ret != -ENOENT) { if (ret == 0) ret = -EEXIST; @@ -1484,7 +1500,7 @@ static int nilfs_btree_delete(struct nilfs_bmap *btree, __u64 key) return -ENOMEM; ret = nilfs_btree_do_lookup(btree, path, key, NULL, - NILFS_BTREE_LEVEL_NODE_MIN); + NILFS_BTREE_LEVEL_NODE_MIN, 0); if (ret < 0) goto out; @@ -1955,7 +1971,7 @@ static int nilfs_btree_propagate(struct nilfs_bmap *btree, level = NILFS_BTREE_LEVEL_DATA; } - ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1); + ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1, 0); if (ret < 0) { if (unlikely(ret == -ENOENT)) printk(KERN_CRIT "%s: key = %llu, level == %d\n", @@ -2147,7 +2163,7 @@ static int nilfs_btree_assign(struct nilfs_bmap *btree, level = NILFS_BTREE_LEVEL_DATA; } - ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1); + ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1, 0); if (ret < 0) { WARN_ON(ret == -ENOENT); goto out; @@ -2201,7 +2217,7 @@ static int nilfs_btree_mark(struct nilfs_bmap *btree, __u64 key, int level) if (path == NULL) return -ENOMEM; - ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level + 1); + ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level + 1, 0); if (ret < 0) { WARN_ON(ret == -ENOENT); goto out; -- cgit v1.1 From 2f1b7cd29fa4917f19d2624afc773d941684c5df Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Thu, 22 Jul 2010 03:22:18 +0900 Subject: nilfs2: clarify byte offset in super block format This inserts comments indicating hexadecimal offset in declaration of nilfs_super_block structure so that people can know offset of its fields without counting from the head. Signed-off-by: Ryusuke Konishi --- include/linux/nilfs2_fs.h | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h index 8c2c611..cc3465e 100644 --- a/include/linux/nilfs2_fs.h +++ b/include/linux/nilfs2_fs.h @@ -160,7 +160,7 @@ struct nilfs_super_root { * struct nilfs_super_block - structure of super block on disk */ struct nilfs_super_block { - __le32 s_rev_level; /* Revision level */ +/*00*/ __le32 s_rev_level; /* Revision level */ __le16 s_minor_rev_level; /* minor revision level */ __le16 s_magic; /* Magic signature */ @@ -169,47 +169,47 @@ struct nilfs_super_block { is excluded. */ __le16 s_flags; /* flags */ __le32 s_crc_seed; /* Seed value of CRC calculation */ - __le32 s_sum; /* Check sum of super block */ +/*10*/ __le32 s_sum; /* Check sum of super block */ __le32 s_log_block_size; /* Block size represented as follows blocksize = 1 << (s_log_block_size + 10) */ __le64 s_nsegments; /* Number of segments in filesystem */ - __le64 s_dev_size; /* block device size in bytes */ +/*20*/ __le64 s_dev_size; /* block device size in bytes */ __le64 s_first_data_block; /* 1st seg disk block number */ - __le32 s_blocks_per_segment; /* number of blocks per full segment */ +/*30*/ __le32 s_blocks_per_segment; /* number of blocks per full segment */ __le32 s_r_segments_percentage; /* Reserved segments percentage */ __le64 s_last_cno; /* Last checkpoint number */ - __le64 s_last_pseg; /* disk block addr pseg written last */ +/*40*/ __le64 s_last_pseg; /* disk block addr pseg written last */ __le64 s_last_seq; /* seq. number of seg written last */ - __le64 s_free_blocks_count; /* Free blocks count */ +/*50*/ __le64 s_free_blocks_count; /* Free blocks count */ __le64 s_ctime; /* Creation time (execution time of newfs) */ - __le64 s_mtime; /* Mount time */ +/*60*/ __le64 s_mtime; /* Mount time */ __le64 s_wtime; /* Write time */ - __le16 s_mnt_count; /* Mount count */ +/*70*/ __le16 s_mnt_count; /* Mount count */ __le16 s_max_mnt_count; /* Maximal mount count */ __le16 s_state; /* File system state */ __le16 s_errors; /* Behaviour when detecting errors */ __le64 s_lastcheck; /* time of last check */ - __le32 s_checkinterval; /* max. time between checks */ +/*80*/ __le32 s_checkinterval; /* max. time between checks */ __le32 s_creator_os; /* OS */ __le16 s_def_resuid; /* Default uid for reserved blocks */ __le16 s_def_resgid; /* Default gid for reserved blocks */ __le32 s_first_ino; /* First non-reserved inode */ - __le16 s_inode_size; /* Size of an inode */ +/*90*/ __le16 s_inode_size; /* Size of an inode */ __le16 s_dat_entry_size; /* Size of a dat entry */ __le16 s_checkpoint_size; /* Size of a checkpoint */ __le16 s_segment_usage_size; /* Size of a segment usage */ - __u8 s_uuid[16]; /* 128-bit uuid for volume */ - char s_volume_name[80]; /* volume name */ +/*98*/ __u8 s_uuid[16]; /* 128-bit uuid for volume */ +/*A8*/ char s_volume_name[80]; /* volume name */ - __le32 s_c_interval; /* Commit interval of segment */ +/*F8*/ __le32 s_c_interval; /* Commit interval of segment */ __le32 s_c_block_max; /* Threshold of data amount for the segment construction */ __u32 s_reserved[192]; /* padding to the end of the block */ -- cgit v1.1 From 1a80a1763fb760b3a84a28df87515f7cdc07a4f4 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Thu, 22 Jul 2010 03:22:19 +0900 Subject: nilfs2: add feature set fields to super block This adds three new fields to nilfs_super_block structure, compatible feature set, readonly-compatible feature set, and incompatible feature set in order to prepare for future disk format modifications. The role of these fields conforms to those of ext3 or other filesystems. Most important flags are the incompatible feature set; it is used to refuse to mount the filesystem which sets an incompatible feature the kernel doesn't know about. Signed-off-by: Ryusuke Konishi --- include/linux/nilfs2_fs.h | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h index cc3465e..7dd4cd4 100644 --- a/include/linux/nilfs2_fs.h +++ b/include/linux/nilfs2_fs.h @@ -212,7 +212,10 @@ struct nilfs_super_block { /*F8*/ __le32 s_c_interval; /* Commit interval of segment */ __le32 s_c_block_max; /* Threshold of data amount for the segment construction */ - __u32 s_reserved[192]; /* padding to the end of the block */ +/*100*/ __le64 s_feature_compat; /* Compatible feature set */ + __le64 s_feature_compat_ro; /* Read-only compatible feature set */ + __le64 s_feature_incompat; /* Incompatible feature set */ + __u32 s_reserved[186]; /* padding to the end of the block */ }; /* @@ -228,6 +231,16 @@ struct nilfs_super_block { #define NILFS_MINOR_REV 0 /* minor revision */ /* + * Feature set definitions + * + * If there is a bit set in the incompatible feature set that the kernel + * doesn't know about, it should refuse to mount the filesystem. + */ +#define NILFS_FEATURE_COMPAT_SUPP 0ULL +#define NILFS_FEATURE_COMPAT_RO_SUPP 0ULL +#define NILFS_FEATURE_INCOMPAT_SUPP 0ULL + +/* * Bytes count of super_block for CRC-calculation */ #define NILFS_SB_BYTES \ -- cgit v1.1 From c5ca48aabe8b11674bf1102abe52d17ecc053f9c Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Thu, 22 Jul 2010 03:22:20 +0900 Subject: nilfs2: reject incompatible filesystem This forces nilfs to check compatibility of feature flags so as to reject a filesystem with unknown features when it mounts or remounts the filesystem. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/nilfs.h | 2 ++ fs/nilfs2/super.c | 39 +++++++++++++++++++++++++++++++++++++++ fs/nilfs2/the_nilfs.c | 20 ++++++++++++++++++++ 3 files changed, 61 insertions(+) diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h index cfedc48..0842d77 100644 --- a/fs/nilfs2/nilfs.h +++ b/fs/nilfs2/nilfs.h @@ -275,6 +275,8 @@ extern struct nilfs_super_block * nilfs_read_super_block(struct super_block *, u64, int, struct buffer_head **); extern int nilfs_store_magic_and_option(struct super_block *, struct nilfs_super_block *, char *); +extern int nilfs_check_feature_compatibility(struct super_block *, + struct nilfs_super_block *); extern void nilfs_set_log_cursor(struct nilfs_super_block *, struct the_nilfs *); extern struct nilfs_super_block **nilfs_prepare_super(struct nilfs_sb_info *, diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 1644573..26078b3 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -790,6 +790,30 @@ int nilfs_store_magic_and_option(struct super_block *sb, return !parse_options(data, sb, 0) ? -EINVAL : 0 ; } +int nilfs_check_feature_compatibility(struct super_block *sb, + struct nilfs_super_block *sbp) +{ + __u64 features; + + features = le64_to_cpu(sbp->s_feature_incompat) & + ~NILFS_FEATURE_INCOMPAT_SUPP; + if (features) { + printk(KERN_ERR "NILFS: couldn't mount because of unsupported " + "optional features (%llx)\n", + (unsigned long long)features); + return -EINVAL; + } + features = le64_to_cpu(sbp->s_feature_compat_ro) & + ~NILFS_FEATURE_COMPAT_RO_SUPP; + if (!(sb->s_flags & MS_RDONLY) && features) { + printk(KERN_ERR "NILFS: couldn't mount RDWR because of " + "unsupported optional features (%llx)\n", + (unsigned long long)features); + return -EINVAL; + } + return 0; +} + /** * nilfs_fill_super() - initialize a super block instance * @sb: super_block @@ -984,11 +1008,26 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data) nilfs_cleanup_super(sbi); up_write(&nilfs->ns_sem); } else { + __u64 features; + /* * Mounting a RDONLY partition read-write, so reread and * store the current valid flag. (It may have been changed * by fsck since we originally mounted the partition.) */ + down_read(&nilfs->ns_sem); + features = le64_to_cpu(nilfs->ns_sbp[0]->s_feature_compat_ro) & + ~NILFS_FEATURE_COMPAT_RO_SUPP; + up_read(&nilfs->ns_sem); + if (features) { + printk(KERN_WARNING "NILFS (device %s): couldn't " + "remount RDWR because of unsupported optional " + "features (%llx)\n", + sb->s_id, (unsigned long long)features); + err = -EROFS; + goto restore_opts; + } + sb->s_flags &= ~MS_RDONLY; err = nilfs_attach_segment_constructor(sbi); diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index f2efc8c..da67b56 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c @@ -385,11 +385,23 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi) goto skip_recovery; if (s_flags & MS_RDONLY) { + __u64 features; + if (nilfs_test_opt(sbi, NORECOVERY)) { printk(KERN_INFO "NILFS: norecovery option specified. " "skipping roll-forward recovery\n"); goto skip_recovery; } + features = le64_to_cpu(nilfs->ns_sbp[0]->s_feature_compat_ro) & + ~NILFS_FEATURE_COMPAT_RO_SUPP; + if (features) { + printk(KERN_ERR "NILFS: couldn't proceed with " + "recovery because of unsupported optional " + "features (%llx)\n", + (unsigned long long)features); + err = -EROFS; + goto failed_unload; + } if (really_read_only) { printk(KERN_ERR "NILFS: write access " "unavailable, cannot proceed.\n"); @@ -644,6 +656,10 @@ int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data) if (err) goto out; + err = nilfs_check_feature_compatibility(sb, sbp); + if (err) + goto out; + blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size); if (sb->s_blocksize != blocksize && !sb_set_blocksize(sb, blocksize)) { @@ -669,6 +685,10 @@ int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data) if (err) goto failed_sbh; + err = nilfs_check_feature_compatibility(sb, sbp); + if (err) + goto failed_sbh; + blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size); if (sb->s_blocksize != blocksize) { int hw_blocksize = bdev_logical_block_size(sb->s_bdev); -- cgit v1.1 From c28e69d9332aab739920082a0a5677d861390824 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Sat, 24 Jul 2010 17:09:10 +0900 Subject: nilfs2: simplify nilfs_get_page function Implementation of nilfs_get_page() is a bit old as below: - A common read_mapping_page inline function is now available instead of its read_cache_page use. - wait_on_page_locked() use in the function is eliminable since read_cache_page function does the same thing through wait_on_page_read(). - PageUptodate() check is eliminable for the same reason. This renews nilfs_get_page() based on these points. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/dir.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c index 85c89df..d8d183e 100644 --- a/fs/nilfs2/dir.c +++ b/fs/nilfs2/dir.c @@ -199,13 +199,10 @@ fail: static struct page *nilfs_get_page(struct inode *dir, unsigned long n) { struct address_space *mapping = dir->i_mapping; - struct page *page = read_cache_page(mapping, n, - (filler_t *)mapping->a_ops->readpage, NULL); + struct page *page = read_mapping_page(mapping, n, NULL); + if (!IS_ERR(page)) { - wait_on_page_locked(page); kmap(page); - if (!PageUptodate(page)) - goto fail; if (!PageChecked(page)) nilfs_check_page(page); if (PageError(page)) -- cgit v1.1 From 6cda9fa2575ec0869fe77b0bdf295c0e51868cab Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Sun, 25 Jul 2010 20:39:03 +0900 Subject: nilfs2: avoid rec_len overflow with 64KB block size With 64KB blocksize, a directory entry can have size 64KB which does not fit into 16 bits we have for entry length. So this patch stores 0xffff instead and converts value when read from / written to disk. Nilfs derives its directory implementation from ext2 filesystem, and this draws upon the corresponding change on ext2. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/dir.c | 26 ++++++++++++++------------ include/linux/nilfs2_fs.h | 18 ++++++++++++++++++ 2 files changed, 32 insertions(+), 12 deletions(-) diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c index d8d183e..b60277b 100644 --- a/fs/nilfs2/dir.c +++ b/fs/nilfs2/dir.c @@ -141,7 +141,7 @@ static void nilfs_check_page(struct page *page) } for (offs = 0; offs <= limit - NILFS_DIR_REC_LEN(1); offs += rec_len) { p = (struct nilfs_dir_entry *)(kaddr + offs); - rec_len = le16_to_cpu(p->rec_len); + rec_len = nilfs_rec_len_from_disk(p->rec_len); if (rec_len < NILFS_DIR_REC_LEN(1)) goto Eshort; @@ -235,7 +235,8 @@ nilfs_match(int len, const unsigned char *name, struct nilfs_dir_entry *de) */ static struct nilfs_dir_entry *nilfs_next_entry(struct nilfs_dir_entry *p) { - return (struct nilfs_dir_entry *)((char *)p + le16_to_cpu(p->rec_len)); + return (struct nilfs_dir_entry *)((char *)p + + nilfs_rec_len_from_disk(p->rec_len)); } static unsigned char @@ -326,7 +327,7 @@ static int nilfs_readdir(struct file *filp, void *dirent, filldir_t filldir) goto success; } } - filp->f_pos += le16_to_cpu(de->rec_len); + filp->f_pos += nilfs_rec_len_from_disk(de->rec_len); } nilfs_put_page(page); } @@ -441,7 +442,7 @@ void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de, struct page *page, struct inode *inode) { unsigned from = (char *) de - (char *) page_address(page); - unsigned to = from + le16_to_cpu(de->rec_len); + unsigned to = from + nilfs_rec_len_from_disk(de->rec_len); struct address_space *mapping = page->mapping; int err; @@ -497,7 +498,7 @@ int nilfs_add_link(struct dentry *dentry, struct inode *inode) /* We hit i_size */ name_len = 0; rec_len = chunk_size; - de->rec_len = cpu_to_le16(chunk_size); + de->rec_len = nilfs_rec_len_to_disk(chunk_size); de->inode = 0; goto got_it; } @@ -511,7 +512,7 @@ int nilfs_add_link(struct dentry *dentry, struct inode *inode) if (nilfs_match(namelen, name, de)) goto out_unlock; name_len = NILFS_DIR_REC_LEN(de->name_len); - rec_len = le16_to_cpu(de->rec_len); + rec_len = nilfs_rec_len_from_disk(de->rec_len); if (!de->inode && rec_len >= reclen) goto got_it; if (rec_len >= name_len + reclen) @@ -534,8 +535,8 @@ got_it: struct nilfs_dir_entry *de1; de1 = (struct nilfs_dir_entry *)((char *)de + name_len); - de1->rec_len = cpu_to_le16(rec_len - name_len); - de->rec_len = cpu_to_le16(name_len); + de1->rec_len = nilfs_rec_len_to_disk(rec_len - name_len); + de->rec_len = nilfs_rec_len_to_disk(name_len); de = de1; } de->name_len = namelen; @@ -566,7 +567,8 @@ int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct page *page) struct inode *inode = mapping->host; char *kaddr = page_address(page); unsigned from = ((char *)dir - kaddr) & ~(nilfs_chunk_size(inode) - 1); - unsigned to = ((char *)dir - kaddr) + le16_to_cpu(dir->rec_len); + unsigned to = ((char *)dir - kaddr) + + nilfs_rec_len_from_disk(dir->rec_len); struct nilfs_dir_entry *pde = NULL; struct nilfs_dir_entry *de = (struct nilfs_dir_entry *)(kaddr + from); int err; @@ -587,7 +589,7 @@ int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct page *page) err = nilfs_prepare_chunk(page, mapping, from, to); BUG_ON(err); if (pde) - pde->rec_len = cpu_to_le16(to - from); + pde->rec_len = nilfs_rec_len_to_disk(to - from); dir->inode = 0; nilfs_commit_chunk(page, mapping, from, to); inode->i_ctime = inode->i_mtime = CURRENT_TIME; @@ -621,14 +623,14 @@ int nilfs_make_empty(struct inode *inode, struct inode *parent) memset(kaddr, 0, chunk_size); de = (struct nilfs_dir_entry *)kaddr; de->name_len = 1; - de->rec_len = cpu_to_le16(NILFS_DIR_REC_LEN(1)); + de->rec_len = nilfs_rec_len_to_disk(NILFS_DIR_REC_LEN(1)); memcpy(de->name, ".\0\0", 4); de->inode = cpu_to_le64(inode->i_ino); nilfs_set_de_type(de, inode); de = (struct nilfs_dir_entry *)(kaddr + NILFS_DIR_REC_LEN(1)); de->name_len = 2; - de->rec_len = cpu_to_le16(chunk_size - NILFS_DIR_REC_LEN(1)); + de->rec_len = nilfs_rec_len_to_disk(chunk_size - NILFS_DIR_REC_LEN(1)); de->inode = cpu_to_le64(parent->i_ino); memcpy(de->name, "..\0", 4); nilfs_set_de_type(de, inode); diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h index 7dd4cd4..970828a 100644 --- a/include/linux/nilfs2_fs.h +++ b/include/linux/nilfs2_fs.h @@ -326,7 +326,25 @@ enum { #define NILFS_DIR_ROUND (NILFS_DIR_PAD - 1) #define NILFS_DIR_REC_LEN(name_len) (((name_len) + 12 + NILFS_DIR_ROUND) & \ ~NILFS_DIR_ROUND) +#define NILFS_MAX_REC_LEN ((1<<16)-1) +static inline unsigned nilfs_rec_len_from_disk(__le16 dlen) +{ + unsigned len = le16_to_cpu(dlen); + + if (len == NILFS_MAX_REC_LEN) + return 1 << 16; + return len; +} + +static inline __le16 nilfs_rec_len_to_disk(unsigned len) +{ + if (len == (1 << 16)) + return cpu_to_le16(NILFS_MAX_REC_LEN); + else if (len > (1 << 16)) + BUG(); + return cpu_to_le16(len); +} /** * struct nilfs_finfo - file information -- cgit v1.1 From 89c0fd014d34d409a7b196667c2b9a4813b6c968 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Sun, 25 Jul 2010 22:44:53 +0900 Subject: nilfs2: reject filesystem with unsupported block size This inserts sanity check that refuses to mount a filesystem with unsupported block size. Previously, kernel code of nilfs was looking only limitation of devices though mkfs.nilfs2 limits the range of block sizes; there was no check that prevents rec_len overflow with larger block sizes. With this change, block sizes larger than 64KB or smaller than 1KB will get rejected explicitly by kernel. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/the_nilfs.c | 9 ++++++++- include/linux/nilfs2_fs.h | 6 ++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index da67b56..37de1f0 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c @@ -671,7 +671,7 @@ int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data) goto out; } - blocksize = sb_min_blocksize(sb, BLOCK_SIZE); + blocksize = sb_min_blocksize(sb, NILFS_MIN_BLOCK_SIZE); if (!blocksize) { printk(KERN_ERR "NILFS: unable to set blocksize\n"); err = -EINVAL; @@ -690,6 +690,13 @@ int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data) goto failed_sbh; blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size); + if (blocksize < NILFS_MIN_BLOCK_SIZE || + blocksize > NILFS_MAX_BLOCK_SIZE) { + printk(KERN_ERR "NILFS: couldn't mount because of unsupported " + "filesystem blocksize %d\n", blocksize); + err = -EINVAL; + goto failed_sbh; + } if (sb->s_blocksize != blocksize) { int hw_blocksize = bdev_logical_block_size(sb->s_bdev); diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h index 970828a..f5487b6 100644 --- a/include/linux/nilfs2_fs.h +++ b/include/linux/nilfs2_fs.h @@ -287,6 +287,12 @@ struct nilfs_super_block { #define NILFS_NAME_LEN 255 /* + * Block size limitations + */ +#define NILFS_MIN_BLOCK_SIZE 1024 +#define NILFS_MAX_BLOCK_SIZE 65536 + +/* * The new version of the directory entry. Since V0 structures are * stored in intel byte order, and the name_len field could never be * bigger than 255 chars, it's safe to reclaim the extra byte for the -- cgit v1.1