From 0587c7c4a88efdcc63a0f952088d38be6a891df6 Mon Sep 17 00:00:00 2001 From: gehujun <1319579758@qq.com> Date: Sat, 21 May 2022 12:19:19 +0800 Subject: [PATCH] f2fs: rebuild compressed extent cache update code for readonly-fs ------------------------ For readonly format image, we first rebuild the code of updating extent, and then support caching extent for no-compressed file. Its idea is almost same with original design. At last we remove the limit for no-compressed file that enable it use extent. The motivation of rebuilding comes from: a. If we don't enable the compress config, it will build some unnecessary code into target in f2fs_get_dnode_of_data b. The f2fs_cluster_blocks_are_contiguous is called only when updating compressed extent cache and being called once in extent_cache.c Signed-off-by: Zhang Qilong Signed-off-by: Gewus <1319579758@qq.com> --- fs/ext4/inline.c | 12 ++++ fs/f2fs/extent_cache.c | 127 +++++++++++++++++++++++++++++++++++++++++ fs/f2fs/f2fs.h | 8 ++- fs/f2fs/node.c | 7 +++ 4 files changed, 153 insertions(+), 1 deletion(-) diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index c8f19901a44b..bf70efd24519 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -1981,6 +1981,18 @@ int ext4_convert_inline_data(struct inode *inode) if (!ext4_has_inline_data(inode)) { ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA); return 0; + } else if (!ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { + /* + * Inode has inline data but EXT4_STATE_MAY_INLINE_DATA is + * cleared. This means we are in the middle of moving of + * inline data to delay allocated block. Just force writeout + * here to finish conversion. + */ + error = filemap_flush(inode->i_mapping); + if (error) + return error; + if (!ext4_has_inline_data(inode)) + return 0; } needed_blocks = ext4_writepage_trans_blocks(inode); diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c index 3ebf976a682d..aafb58b3d8bd 100644 --- a/fs/f2fs/extent_cache.c +++ b/fs/f2fs/extent_cache.c @@ -878,3 +878,130 @@ void f2fs_destroy_extent_cache(void) kmem_cache_destroy(extent_node_slab); kmem_cache_destroy(extent_tree_slab); } + +/* + * check whether cluster blocks are contiguous, and add extent cache entry + * only if cluster blocks are logically and physically contiguous. + */ +static unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn) +{ + bool compressed = f2fs_data_blkaddr(dn) == COMPRESS_ADDR; + int i = compressed ? 1 : 0; + block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page, + dn->ofs_in_node + i); + + for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) { + block_t blkaddr = data_blkaddr(dn->inode, dn->node_page, + dn->ofs_in_node + i); + + if (!__is_valid_data_blkaddr(blkaddr)) + break; + if (first_blkaddr + i - (compressed ? 1 : 0) != blkaddr) + return 0; + } + + return compressed ? i - 1 : i; +} +/* + * check whether normal file blocks are contiguous, and add extent cache + * entry only if remained blocks are logically and physically contiguous. + */ +static unsigned int f2fs_normal_blocks_are_contiguous(struct dnode_of_data *dn) +{ + int i = 0; + struct inode *inode = dn->inode; + block_t first_blkaddr = data_blkaddr(inode, dn->node_page, + dn->ofs_in_node); + unsigned int max_blocks = ADDRS_PER_PAGE(dn->node_page, inode) + - dn->ofs_in_node; + + for (i = 1; i < max_blocks; i++) { + block_t blkaddr = data_blkaddr(inode, dn->node_page, + dn->ofs_in_node + i); + + if (!__is_valid_data_blkaddr(blkaddr) || + first_blkaddr + i != blkaddr) + return i; + } + + return i; +} + +#ifdef CONFIG_F2FS_FS_COMPRESSION +static void f2fs_update_extent_tree_range_compressed(struct inode *inode, + pgoff_t fofs, block_t blkaddr, unsigned int llen, + unsigned int c_len) +{ + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + struct extent_tree *et = F2FS_I(inode)->extent_tree; + struct extent_node *en = NULL; + struct extent_node *prev_en = NULL, *next_en = NULL; + struct extent_info ei; + struct rb_node **insert_p = NULL, *insert_parent = NULL; + bool leftmost = false; + + trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, llen); + + /* it is safe here to check FI_NO_EXTENT w/o et->lock in ro image */ + if (is_inode_flag_set(inode, FI_NO_EXTENT)) + return; + + write_lock(&et->lock); + + en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root, + (struct rb_entry *)et->cached_en, fofs, + (struct rb_entry **)&prev_en, + (struct rb_entry **)&next_en, + &insert_p, &insert_parent, false, + &leftmost); + if (en) + goto unlock_out; + + set_extent_info(&ei, fofs, blkaddr, llen); + ei.c_len = c_len; + + if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en)) + { + if(!c_len && llen < F2FS_MIN_EXTENT_LEN) + goto unlock_out; + __insert_extent_tree(sbi, et, &ei, + insert_p, insert_parent, leftmost); + } +unlock_out: + write_unlock(&et->lock); +} +#endif + +void f2fs_readonly_update_extent_cache(struct dnode_of_data *dn, + pgoff_t index) +{ + // unsigned int c_len = f2fs_cluster_blocks_are_contiguous(dn); + unsigned int c_len = 0; + unsigned int llen = 0; + + block_t blkaddr; + + // if (!c_len) + // return; + + blkaddr = f2fs_data_blkaddr(dn); + // if (blkaddr == COMPRESS_ADDR) + // blkaddr = data_blkaddr(dn->inode, dn->node_page, + // dn->ofs_in_node + 1); + if (f2fs_compressed_file(dn->inode)) { + c_len = f2fs_cluster_blocks_are_contiguous(dn); + if (!c_len) + return; + llen = F2FS_I(dn->inode)->i_cluster_size; + if (blkaddr == COMPRESS_ADDR) + blkaddr = data_blkaddr(dn->inode, dn->node_page, + dn->ofs_in_node + 1); + } else { + llen = f2fs_normal_blocks_are_contiguous(dn); + } + + f2fs_update_extent_tree_range_compressed(dn->inode, + index, blkaddr, + llen, + c_len); +} \ No newline at end of file diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 6c4bf22a3e83..87d6b1a2205b 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -166,6 +166,7 @@ struct f2fs_mount_info { #define F2FS_FEATURE_SB_CHKSUM 0x0800 #define F2FS_FEATURE_CASEFOLD 0x1000 #define F2FS_FEATURE_COMPRESSION 0x2000 +#define F2FS_FEATURE_RO 0x4000 #define __F2FS_HAS_FEATURE(raw_super, mask) \ ((raw_super->feature & cpu_to_le32(mask)) != 0) @@ -547,6 +548,9 @@ struct extent_info { unsigned int fofs; /* start offset in a file */ unsigned int len; /* length of the extent */ u32 blk; /* start block address of the extent */ +#ifdef CONFIG_F2FS_FS_COMPRESSION + unsigned int c_len; /* physical extent length of compressed blocks */ +#endif }; struct extent_node { @@ -3799,7 +3803,8 @@ void f2fs_update_extent_cache_range(struct dnode_of_data *dn, void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi); int __init f2fs_create_extent_cache(void); void f2fs_destroy_extent_cache(void); - +void f2fs_readonly_update_extent_cache(struct dnode_of_data *dn, + pgoff_t index); /* * sysfs.c */ @@ -3948,6 +3953,7 @@ F2FS_FEATURE_FUNCS(verity, VERITY); F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM); F2FS_FEATURE_FUNCS(casefold, CASEFOLD); F2FS_FEATURE_FUNCS(compression, COMPRESSION); +F2FS_FEATURE_FUNCS(readonly, RO); #ifdef CONFIG_BLK_DEV_ZONED static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi, diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 7e625806bd4a..a485a60a9956 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -810,6 +810,13 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) dn->ofs_in_node = offset[level]; dn->node_page = npage[level]; dn->data_blkaddr = f2fs_data_blkaddr(dn); + + if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) && + f2fs_sb_has_readonly(sbi)) { + f2fs_readonly_update_extent_cache(dn, index); + } + +out: return 0; release_pages: -- Gitee