Commit 5dd4056d authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jan Kara

dquot: cleanup space allocation / freeing routines

Get rid of the alloc_space, free_space, reserve_space, claim_space and
release_rsv dquot operations - they are always called from the filesystem
and if a filesystem really needs their own (which none currently does)
it can just call into it's own routine directly.

Move shared logic into the common __dquot_alloc_space,
dquot_claim_space_nodirty and __dquot_free_space low-level methods,
and rationalize the wrappers around it to move as much as possible
code into the common block for CONFIG_QUOTA vs not.  Also rename
all these helpers to be named dquot_* instead of vfs_dq_*.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJan Kara <jack@suse.cz>
parent 49792c80
......@@ -462,9 +462,7 @@ in sys_read() and friends.
prototypes:
int (*initialize) (struct inode *, int);
int (*drop) (struct inode *);
int (*alloc_space) (struct inode *, qsize_t, int);
int (*alloc_inode) (const struct inode *, unsigned long);
int (*free_space) (struct inode *, qsize_t);
int (*free_inode) (const struct inode *, unsigned long);
int (*transfer) (struct inode *, struct iattr *);
int (*write_dquot) (struct dquot *);
......@@ -481,9 +479,7 @@ What filesystem should expect from the generic quota functions:
FS recursion Held locks when called
initialize: yes maybe dqonoff_sem
drop: yes -
alloc_space: ->mark_dirty() -
alloc_inode: ->mark_dirty() -
free_space: ->mark_dirty() -
free_inode: ->mark_dirty() -
transfer: yes -
write_dquot: yes dqonoff_sem or dqptr_sem
......@@ -495,7 +491,7 @@ write_info: yes dqonoff_sem
FS recursion means calling ->quota_read() and ->quota_write() from superblock
operations.
->alloc_space(), ->alloc_inode(), ->free_space(), ->free_inode() are called
->alloc_inode(), ->free_inode() are called
only directly by the filesystem and do not call any fs functions only
the ->mark_dirty() operation.
......
......@@ -570,7 +570,7 @@ do_more:
error_return:
brelse(bitmap_bh);
release_blocks(sb, freed);
vfs_dq_free_block(inode, freed);
dquot_free_block(inode, freed);
}
/**
......@@ -1236,6 +1236,7 @@ ext2_fsblk_t ext2_new_blocks(struct inode *inode, ext2_fsblk_t goal,
unsigned short windowsz = 0;
unsigned long ngroups;
unsigned long num = *count;
int ret;
*errp = -ENOSPC;
sb = inode->i_sb;
......@@ -1247,8 +1248,9 @@ ext2_fsblk_t ext2_new_blocks(struct inode *inode, ext2_fsblk_t goal,
/*
* Check quota for allocation of this block.
*/
if (vfs_dq_alloc_block(inode, num)) {
*errp = -EDQUOT;
ret = dquot_alloc_block(inode, num);
if (ret) {
*errp = ret;
return 0;
}
......@@ -1409,7 +1411,7 @@ allocated:
*errp = 0;
brelse(bitmap_bh);
vfs_dq_free_block(inode, *count-num);
dquot_free_block(inode, *count-num);
*count = num;
return ret_block;
......@@ -1420,7 +1422,7 @@ out:
* Undo the block allocation
*/
if (!performed_allocation)
vfs_dq_free_block(inode, *count);
dquot_free_block(inode, *count);
brelse(bitmap_bh);
return 0;
}
......
......@@ -644,8 +644,8 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
the inode. */
ea_bdebug(new_bh, "reusing block");
error = -EDQUOT;
if (vfs_dq_alloc_block(inode, 1)) {
error = dquot_alloc_block(inode, 1);
if (error) {
unlock_buffer(new_bh);
goto cleanup;
}
......@@ -702,7 +702,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
* as if nothing happened and cleanup the unused block */
if (error && error != -ENOSPC) {
if (new_bh && new_bh != old_bh)
vfs_dq_free_block(inode, 1);
dquot_free_block(inode, 1);
goto cleanup;
}
} else
......@@ -734,7 +734,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
le32_add_cpu(&HDR(old_bh)->h_refcount, -1);
if (ce)
mb_cache_entry_release(ce);
vfs_dq_free_block(inode, 1);
dquot_free_block(inode, 1);
mark_buffer_dirty(old_bh);
ea_bdebug(old_bh, "refcount now=%d",
le32_to_cpu(HDR(old_bh)->h_refcount));
......@@ -797,7 +797,7 @@ ext2_xattr_delete_inode(struct inode *inode)
mark_buffer_dirty(bh);
if (IS_SYNC(inode))
sync_dirty_buffer(bh);
vfs_dq_free_block(inode, 1);
dquot_free_block(inode, 1);
}
EXT2_I(inode)->i_file_acl = 0;
......
......@@ -676,7 +676,7 @@ void ext3_free_blocks(handle_t *handle, struct inode *inode,
}
ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks);
if (dquot_freed_blocks)
vfs_dq_free_block(inode, dquot_freed_blocks);
dquot_free_block(inode, dquot_freed_blocks);
return;
}
......@@ -1502,8 +1502,9 @@ ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode,
/*
* Check quota for allocation of this block.
*/
if (vfs_dq_alloc_block(inode, num)) {
*errp = -EDQUOT;
err = dquot_alloc_block(inode, num);
if (err) {
*errp = err;
return 0;
}
......@@ -1713,7 +1714,7 @@ allocated:
*errp = 0;
brelse(bitmap_bh);
vfs_dq_free_block(inode, *count-num);
dquot_free_block(inode, *count-num);
*count = num;
return ret_block;
......@@ -1728,7 +1729,7 @@ out:
* Undo the block allocation
*/
if (!performed_allocation)
vfs_dq_free_block(inode, *count);
dquot_free_block(inode, *count);
brelse(bitmap_bh);
return 0;
}
......
......@@ -3336,7 +3336,7 @@ int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
* i_size has been changed by generic_commit_write() and we thus need
* to include the updated inode in the current transaction.
*
* Also, vfs_dq_alloc_space() will always dirty the inode when blocks
* Also, dquot_alloc_space() will always dirty the inode when blocks
* are allocated to the file.
*
* If the inode is marked synchronous, we don't honour that here - doing
......
......@@ -752,9 +752,7 @@ static ssize_t ext3_quota_write(struct super_block *sb, int type,
static const struct dquot_operations ext3_quota_operations = {
.initialize = dquot_initialize,
.drop = dquot_drop,
.alloc_space = dquot_alloc_space,
.alloc_inode = dquot_alloc_inode,
.free_space = dquot_free_space,
.free_inode = dquot_free_inode,
.transfer = dquot_transfer,
.write_dquot = ext3_write_dquot,
......
......@@ -500,7 +500,7 @@ ext3_xattr_release_block(handle_t *handle, struct inode *inode,
error = ext3_journal_dirty_metadata(handle, bh);
if (IS_SYNC(inode))
handle->h_sync = 1;
vfs_dq_free_block(inode, 1);
dquot_free_block(inode, 1);
ea_bdebug(bh, "refcount now=%d; releasing",
le32_to_cpu(BHDR(bh)->h_refcount));
if (ce)
......@@ -775,8 +775,8 @@ inserted:
else {
/* The old block is released after updating
the inode. */
error = -EDQUOT;
if (vfs_dq_alloc_block(inode, 1))
error = dquot_alloc_block(inode, 1);
if (error)
goto cleanup;
error = ext3_journal_get_write_access(handle,
new_bh);
......@@ -850,7 +850,7 @@ cleanup:
return error;
cleanup_dquot:
vfs_dq_free_block(inode, 1);
dquot_free_block(inode, 1);
goto cleanup;
bad_block:
......
......@@ -1093,9 +1093,9 @@ void ext4_da_update_reserve_space(struct inode *inode,
/* Update quota subsystem */
if (quota_claim) {
vfs_dq_claim_block(inode, used);
dquot_claim_block(inode, used);
if (mdb_free)
vfs_dq_release_reservation_block(inode, mdb_free);
dquot_release_reservation_block(inode, mdb_free);
} else {
/*
* We did fallocate with an offset that is already delayed
......@@ -1106,8 +1106,8 @@ void ext4_da_update_reserve_space(struct inode *inode,
* that
*/
if (allocated_meta_blocks)
vfs_dq_claim_block(inode, allocated_meta_blocks);
vfs_dq_release_reservation_block(inode, mdb_free + used);
dquot_claim_block(inode, allocated_meta_blocks);
dquot_release_reservation_block(inode, mdb_free + used);
}
/*
......@@ -1836,6 +1836,7 @@ static int ext4_da_reserve_space(struct inode *inode, sector_t lblock)
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_inode_info *ei = EXT4_I(inode);
unsigned long md_needed, md_reserved;
int ret;
/*
* recalculate the amount of metadata blocks to reserve
......@@ -1853,11 +1854,12 @@ repeat:
* later. Real quota accounting is done at pages writeout
* time.
*/
if (vfs_dq_reserve_block(inode, md_needed + 1))
return -EDQUOT;
ret = dquot_reserve_block(inode, md_needed + 1);
if (ret)
return ret;
if (ext4_claim_free_blocks(sbi, md_needed + 1)) {
vfs_dq_release_reservation_block(inode, md_needed + 1);
dquot_release_reservation_block(inode, md_needed + 1);
if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
yield();
goto repeat;
......@@ -1914,7 +1916,7 @@ static void ext4_da_release_space(struct inode *inode, int to_free)
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
vfs_dq_release_reservation_block(inode, to_free);
dquot_release_reservation_block(inode, to_free);
}
static void ext4_da_page_release_reservation(struct page *page,
......@@ -5641,7 +5643,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
* i_size has been changed by generic_commit_write() and we thus need
* to include the updated inode in the current transaction.
*
* Also, vfs_dq_alloc_block() will always dirty the inode when blocks
* Also, dquot_alloc_block() will always dirty the inode when blocks
* are allocated to the file.
*
* If the inode is marked synchronous, we don't honour that here - doing
......
......@@ -4254,7 +4254,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
return 0;
}
reserv_blks = ar->len;
while (ar->len && vfs_dq_alloc_block(ar->inode, ar->len)) {
while (ar->len && dquot_alloc_block(ar->inode, ar->len)) {
ar->flags |= EXT4_MB_HINT_NOPREALLOC;
ar->len--;
}
......@@ -4331,7 +4331,7 @@ out2:
kmem_cache_free(ext4_ac_cachep, ac);
out1:
if (inquota && ar->len < inquota)
vfs_dq_free_block(ar->inode, inquota - ar->len);
dquot_free_block(ar->inode, inquota - ar->len);
out3:
if (!ar->len) {
if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag)
......@@ -4646,7 +4646,7 @@ do_more:
sb->s_dirt = 1;
error_return:
if (freed)
vfs_dq_free_block(inode, freed);
dquot_free_block(inode, freed);
brelse(bitmap_bh);
ext4_std_error(sb, err);
if (ac)
......
......@@ -1014,15 +1014,10 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
static const struct dquot_operations ext4_quota_operations = {
.initialize = dquot_initialize,
.drop = dquot_drop,
.alloc_space = dquot_alloc_space,
.reserve_space = dquot_reserve_space,
.claim_space = dquot_claim_space,
.release_rsv = dquot_release_reserved_space,
#ifdef CONFIG_QUOTA
.get_reserved_space = ext4_get_reserved_space,
#endif
.alloc_inode = dquot_alloc_inode,
.free_space = dquot_free_space,
.free_inode = dquot_free_inode,
.transfer = dquot_transfer,
.write_dquot = ext4_write_dquot,
......
......@@ -494,7 +494,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
error = ext4_handle_dirty_metadata(handle, inode, bh);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
vfs_dq_free_block(inode, 1);
dquot_free_block(inode, 1);
ea_bdebug(bh, "refcount now=%d; releasing",
le32_to_cpu(BHDR(bh)->h_refcount));
if (ce)
......@@ -787,8 +787,8 @@ inserted:
else {
/* The old block is released after updating
the inode. */
error = -EDQUOT;
if (vfs_dq_alloc_block(inode, 1))
error = dquot_alloc_block(inode, 1);
if (error)
goto cleanup;
error = ext4_journal_get_write_access(handle,
new_bh);
......@@ -876,7 +876,7 @@ cleanup:
return error;
cleanup_dquot:
vfs_dq_free_block(inode, 1);
dquot_free_block(inode, 1);
goto cleanup;
bad_block:
......
......@@ -381,10 +381,10 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
* It's time to move the inline table to an external
* page and begin to build the xtree
*/
if (vfs_dq_alloc_block(ip, sbi->nbperpage))
if (dquot_alloc_block(ip, sbi->nbperpage))
goto clean_up;
if (dbAlloc(ip, 0, sbi->nbperpage, &xaddr)) {
vfs_dq_free_block(ip, sbi->nbperpage);
dquot_free_block(ip, sbi->nbperpage);
goto clean_up;
}
......@@ -408,7 +408,7 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
memcpy(&jfs_ip->i_dirtable, temp_table,
sizeof (temp_table));
dbFree(ip, xaddr, sbi->nbperpage);
vfs_dq_free_block(ip, sbi->nbperpage);
dquot_free_block(ip, sbi->nbperpage);
goto clean_up;
}
ip->i_size = PSIZE;
......@@ -1027,10 +1027,9 @@ static int dtSplitUp(tid_t tid,
n = xlen;
/* Allocate blocks to quota. */
if (vfs_dq_alloc_block(ip, n)) {
rc = -EDQUOT;
rc = dquot_alloc_block(ip, n);
if (rc)
goto extendOut;
}
quota_allocation += n;
if ((rc = dbReAlloc(sbi->ipbmap, xaddr, (s64) xlen,
......@@ -1308,7 +1307,7 @@ static int dtSplitUp(tid_t tid,
/* Rollback quota allocation */
if (rc && quota_allocation)
vfs_dq_free_block(ip, quota_allocation);
dquot_free_block(ip, quota_allocation);
dtSplitUp_Exit:
......@@ -1369,9 +1368,10 @@ static int dtSplitPage(tid_t tid, struct inode *ip, struct dtsplit * split,
return -EIO;
/* Allocate blocks to quota. */
if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
rc = dquot_alloc_block(ip, lengthPXD(pxd));
if (rc) {
release_metapage(rmp);
return -EDQUOT;
return rc;
}
jfs_info("dtSplitPage: ip:0x%p smp:0x%p rmp:0x%p", ip, smp, rmp);
......@@ -1892,6 +1892,7 @@ static int dtSplitRoot(tid_t tid,
struct dt_lock *dtlck;
struct tlock *tlck;
struct lv *lv;
int rc;
/* get split root page */
smp = split->mp;
......@@ -1916,9 +1917,10 @@ static int dtSplitRoot(tid_t tid,
rp = rmp->data;
/* Allocate blocks to quota. */
if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
rc = dquot_alloc_block(ip, lengthPXD(pxd));
if (rc) {
release_metapage(rmp);
return -EDQUOT;
return rc;
}
BT_MARK_DIRTY(rmp, ip);
......@@ -2287,7 +2289,7 @@ static int dtDeleteUp(tid_t tid, struct inode *ip,
xlen = lengthPXD(&fp->header.self);
/* Free quota allocation. */
vfs_dq_free_block(ip, xlen);
dquot_free_block(ip, xlen);
/* free/invalidate its buffer page */
discard_metapage(fmp);
......@@ -2363,7 +2365,7 @@ static int dtDeleteUp(tid_t tid, struct inode *ip,
xlen = lengthPXD(&p->header.self);
/* Free quota allocation */
vfs_dq_free_block(ip, xlen);
dquot_free_block(ip, xlen);
/* free/invalidate its buffer page */
discard_metapage(mp);
......
......@@ -141,10 +141,11 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr)
}
/* Allocate blocks to quota. */
if (vfs_dq_alloc_block(ip, nxlen)) {
rc = dquot_alloc_block(ip, nxlen);
if (rc) {
dbFree(ip, nxaddr, (s64) nxlen);
mutex_unlock(&JFS_IP(ip)->commit_mutex);
return -EDQUOT;
return rc;
}
/* determine the value of the extent flag */
......@@ -164,7 +165,7 @@ extAlloc(struct inode *ip, s64 xlen, s64 pno, xad_t * xp, bool abnr)
*/
if (rc) {
dbFree(ip, nxaddr, nxlen);
vfs_dq_free_block(ip, nxlen);
dquot_free_block(ip, nxlen);
mutex_unlock(&JFS_IP(ip)->commit_mutex);
return (rc);
}
......@@ -256,10 +257,11 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr)
goto exit;
/* Allocat blocks to quota. */
if (vfs_dq_alloc_block(ip, nxlen)) {
rc = dquot_alloc_block(ip, nxlen);
if (rc) {
dbFree(ip, nxaddr, (s64) nxlen);
mutex_unlock(&JFS_IP(ip)->commit_mutex);
return -EDQUOT;
return rc;
}
delta = nxlen - xlen;
......@@ -297,7 +299,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr)
/* extend the extent */
if ((rc = xtExtend(0, ip, xoff + xlen, (int) nextend, 0))) {
dbFree(ip, xaddr + xlen, delta);
vfs_dq_free_block(ip, nxlen);
dquot_free_block(ip, nxlen);
goto exit;
}
} else {
......@@ -308,7 +310,7 @@ int extRealloc(struct inode *ip, s64 nxlen, xad_t * xp, bool abnr)
*/
if ((rc = xtTailgate(0, ip, xoff, (int) ntail, nxaddr, 0))) {
dbFree(ip, nxaddr, nxlen);
vfs_dq_free_block(ip, nxlen);
dquot_free_block(ip, nxlen);
goto exit;
}
}
......
......@@ -585,10 +585,10 @@ int xtInsert(tid_t tid, /* transaction id */
hint = addressXAD(xad) + lengthXAD(xad) - 1;
} else
hint = 0;
if ((rc = vfs_dq_alloc_block(ip, xlen)))
if ((rc = dquot_alloc_block(ip, xlen)))
goto out;
if ((rc = dbAlloc(ip, hint, (s64) xlen, &xaddr))) {
vfs_dq_free_block(ip, xlen);
dquot_free_block(ip, xlen);
goto out;
}
}
......@@ -617,7 +617,7 @@ int xtInsert(tid_t tid, /* transaction id */
/* undo data extent allocation */
if (*xaddrp == 0) {
dbFree(ip, xaddr, (s64) xlen);
vfs_dq_free_block(ip, xlen);
dquot_free_block(ip, xlen);
}
return rc;
}
......@@ -985,10 +985,9 @@ xtSplitPage(tid_t tid, struct inode *ip,
rbn = addressPXD(pxd);
/* Allocate blocks to quota. */
if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
rc = -EDQUOT;
rc = dquot_alloc_block(ip, lengthPXD(pxd));
if (rc)
goto clean_up;
}
quota_allocation += lengthPXD(pxd);
......@@ -1195,7 +1194,7 @@ xtSplitPage(tid_t tid, struct inode *ip,
/* Rollback quota allocation. */
if (quota_allocation)
vfs_dq_free_block(ip, quota_allocation);
dquot_free_block(ip, quota_allocation);
return (rc);
}
......@@ -1235,6 +1234,7 @@ xtSplitRoot(tid_t tid,
struct pxdlist *pxdlist;
struct tlock *tlck;
struct xtlock *xtlck;
int rc;
sp = &JFS_IP(ip)->i_xtroot;
......@@ -1252,9 +1252,10 @@ xtSplitRoot(tid_t tid,
return -EIO;
/* Allocate blocks to quota. */
if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) {
rc = dquot_alloc_block(ip, lengthPXD(pxd));
if (rc) {
release_metapage(rmp);
return -EDQUOT;
return rc;
}
jfs_info("xtSplitRoot: ip:0x%p rmp:0x%p", ip, rmp);
......@@ -3680,7 +3681,7 @@ s64 xtTruncate(tid_t tid, struct inode *ip, s64 newsize, int flag)
ip->i_size = newsize;
/* update quota allocation to reflect freed blocks */
vfs_dq_free_block(ip, nfreed);
dquot_free_block(ip, nfreed);
/*
* free tlock of invalidated pages
......
......@@ -260,14 +260,14 @@ static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size,
nblocks = (size + (sb->s_blocksize - 1)) >> sb->s_blocksize_bits;
/* Allocate new blocks to quota. */
if (vfs_dq_alloc_block(ip, nblocks)) {
return -EDQUOT;
}
rc = dquot_alloc_block(ip, nblocks);
if (rc)
return rc;
rc = dbAlloc(ip, INOHINT(ip), nblocks, &blkno);
if (rc) {
/*Rollback quota allocation. */
vfs_dq_free_block(ip, nblocks);
dquot_free_block(ip, nblocks);
return rc;
}
......@@ -332,7 +332,7 @@ static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size,
failed:
/* Rollback quota allocation. */
vfs_dq_free_block(ip, nblocks);
dquot_free_block(ip, nblocks);
dbFree(ip, blkno, nblocks);
return rc;
......@@ -538,7 +538,8 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
if (blocks_needed > current_blocks) {
/* Allocate new blocks to quota. */
if (vfs_dq_alloc_block(inode, blocks_needed))
rc = dquot_alloc_block(inode, blocks_needed);
if (rc)
return -EDQUOT;
quota_allocation = blocks_needed;
......@@ -602,7 +603,7 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
clean_up:
/* Rollback quota allocation */
if (quota_allocation)
vfs_dq_free_block(inode, quota_allocation);
dquot_free_block(inode, quota_allocation);
return (rc);
}
......@@ -677,7 +678,7 @@ static int ea_put(tid_t tid, struct inode *inode, struct ea_buffer *ea_buf,
/* If old blocks exist, they must be removed from quota allocation. */
if (old_blocks)
vfs_dq_free_block(inode, old_blocks);
dquot_free_block(inode, old_blocks);
inode->i_ctime = CURRENT_TIME;
......
......@@ -5712,7 +5712,7 @@ int ocfs2_remove_btree_range(struct inode *inode,
goto out;
}
vfs_dq_free_space_nodirty(inode,
dquot_free_space_nodirty(inode,
ocfs2_clusters_to_bytes(inode->i_sb, len));
ret = ocfs2_remove_extent(handle, et, cpos, len, meta_ac, dealloc);
......@@ -6935,7 +6935,7 @@ static int ocfs2_do_truncate(struct ocfs2_super *osb,
goto bail;
}
vfs_dq_free_space_nodirty(inode,
dquot_free_space_nodirty(inode,
ocfs2_clusters_to_bytes(osb->sb, clusters_to_del));
spin_lock(&OCFS2_I(inode)->ip_lock);
OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters) -
......@@ -7300,11 +7300,10 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,