Commit ef38ff9d authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw

* git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw: (49 commits)
  [GFS2] fix assertion in log_refund()
  [GFS2] fix GFP_KERNEL misuses
  [GFS2] test for IS_ERR rather than 0
  [GFS2] Invalidate cache at correct point
  [GFS2] fs/gfs2/recovery.c: suppress warnings
  [GFS2] Faster gfs2_bitfit algorithm
  [GFS2] Streamline quota lock/check for no-quota case
  [GFS2] Remove drop of module ref where not needed
  [GFS2] gfs2_adjust_quota has broken unstuffing code
  [GFS2] possible null pointer dereference fixup
  [GFS2] Need to ensure that sector_t is 64bits for GFS2
  [GFS2] re-support special inode
  [GFS2] remove gfs2_dev_iops
  [GFS2] fix file_system_type leak on gfs2meta mount
  [GFS2] Allow bmap to allocate extents
  [GFS2] Fix a page lock / glock deadlock
  [GFS2] proper extern for gfs2/locking/dlm/mount.c:gdlm_ops
  [GFS2] gfs2/ops_file.c should #include "ops_inode.h"
  [GFS2] be*_add_cpu conversion
  [GFS2] Fix bug where we called drop_bh incorrectly
  ...
parents fda31d7d 62be1f71
config GFS2_FS
tristate "GFS2 file system support"
depends on EXPERIMENTAL
depends on EXPERIMENTAL && (64BIT || (LSF && LBD))
select FS_POSIX_ACL
select CRC32
help
......
obj-$(CONFIG_GFS2_FS) += gfs2.o
gfs2-y := acl.o bmap.o daemon.o dir.o eaops.o eattr.o glock.o \
glops.o inode.o lm.o log.o lops.o locking.o main.o meta_io.o \
glops.o inode.o log.o lops.o locking.o main.o meta_io.o \
mount.o ops_address.o ops_dentry.o ops_export.o ops_file.o \
ops_fstype.o ops_inode.o ops_super.o quota.o \
recovery.o rgrp.o super.o sys.o trans.o util.o
......
......@@ -116,7 +116,7 @@ static int acl_get(struct gfs2_inode *ip, int access, struct posix_acl **acl,
goto out;
er.er_data_len = GFS2_EA_DATA_LEN(el->el_ea);
er.er_data = kmalloc(er.er_data_len, GFP_KERNEL);
er.er_data = kmalloc(er.er_data_len, GFP_NOFS);
error = -ENOMEM;
if (!er.er_data)
goto out;
......@@ -222,7 +222,7 @@ int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip)
return error;
}
clone = posix_acl_clone(acl, GFP_KERNEL);
clone = posix_acl_clone(acl, GFP_NOFS);
error = -ENOMEM;
if (!clone)
goto out;
......@@ -272,7 +272,7 @@ int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr)
if (!acl)
return gfs2_setattr_simple(ip, attr);
clone = posix_acl_clone(acl, GFP_KERNEL);
clone = posix_acl_clone(acl, GFP_NOFS);
error = -ENOMEM;
if (!clone)
goto out;
......
This diff is collapsed.
......@@ -159,6 +159,7 @@ static int gfs2_dir_write_data(struct gfs2_inode *ip, const char *buf,
unsigned int o;
int copied = 0;
int error = 0;
int new = 0;
if (!size)
return 0;
......@@ -183,7 +184,6 @@ static int gfs2_dir_write_data(struct gfs2_inode *ip, const char *buf,
while (copied < size) {
unsigned int amount;
struct buffer_head *bh;
int new = 0;
amount = size - copied;
if (amount > sdp->sd_sb.sb_bsize - o)
......@@ -757,7 +757,7 @@ static struct gfs2_dirent *gfs2_dirent_search(struct inode *inode,
if (ip->i_di.di_flags & GFS2_DIF_EXHASH) {
struct gfs2_leaf *leaf;
unsigned hsize = 1 << ip->i_di.di_depth;
unsigned hsize = 1 << ip->i_depth;
unsigned index;
u64 ln;
if (hsize * sizeof(u64) != ip->i_di.di_size) {
......@@ -765,7 +765,7 @@ static struct gfs2_dirent *gfs2_dirent_search(struct inode *inode,
return ERR_PTR(-EIO);
}
index = name->hash >> (32 - ip->i_di.di_depth);
index = name->hash >> (32 - ip->i_depth);
error = get_first_leaf(ip, index, &bh);
if (error)
return ERR_PTR(error);
......@@ -803,14 +803,15 @@ got_dent:
static struct gfs2_leaf *new_leaf(struct inode *inode, struct buffer_head **pbh, u16 depth)
{
struct gfs2_inode *ip = GFS2_I(inode);
u64 bn = gfs2_alloc_meta(ip);
unsigned int n = 1;
u64 bn = gfs2_alloc_block(ip, &n);
struct buffer_head *bh = gfs2_meta_new(ip->i_gl, bn);
struct gfs2_leaf *leaf;
struct gfs2_dirent *dent;
struct qstr name = { .name = "", .len = 0, .hash = 0 };
if (!bh)
return NULL;
gfs2_trans_add_unrevoke(GFS2_SB(inode), bn, 1);
gfs2_trans_add_bh(ip->i_gl, bh, 1);
gfs2_metatype_set(bh, GFS2_METATYPE_LF, GFS2_FORMAT_LF);
leaf = (struct gfs2_leaf *)bh->b_data;
......@@ -905,12 +906,11 @@ static int dir_make_exhash(struct inode *inode)
*lp = cpu_to_be64(bn);
dip->i_di.di_size = sdp->sd_sb.sb_bsize / 2;
dip->i_di.di_blocks++;
gfs2_set_inode_blocks(&dip->i_inode);
gfs2_add_inode_blocks(&dip->i_inode, 1);
dip->i_di.di_flags |= GFS2_DIF_EXHASH;
for (x = sdp->sd_hash_ptrs, y = -1; x; x >>= 1, y++) ;
dip->i_di.di_depth = y;
dip->i_depth = y;
gfs2_dinode_out(dip, dibh->b_data);
......@@ -941,7 +941,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
int x, moved = 0;
int error;
index = name->hash >> (32 - dip->i_di.di_depth);
index = name->hash >> (32 - dip->i_depth);
error = get_leaf_nr(dip, index, &leaf_no);
if (error)
return error;
......@@ -952,7 +952,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
return error;
oleaf = (struct gfs2_leaf *)obh->b_data;
if (dip->i_di.di_depth == be16_to_cpu(oleaf->lf_depth)) {
if (dip->i_depth == be16_to_cpu(oleaf->lf_depth)) {
brelse(obh);
return 1; /* can't split */
}
......@@ -967,10 +967,10 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
bn = nbh->b_blocknr;
/* Compute the start and len of leaf pointers in the hash table. */
len = 1 << (dip->i_di.di_depth - be16_to_cpu(oleaf->lf_depth));
len = 1 << (dip->i_depth - be16_to_cpu(oleaf->lf_depth));
half_len = len >> 1;
if (!half_len) {
printk(KERN_WARNING "di_depth %u lf_depth %u index %u\n", dip->i_di.di_depth, be16_to_cpu(oleaf->lf_depth), index);
printk(KERN_WARNING "i_depth %u lf_depth %u index %u\n", dip->i_depth, be16_to_cpu(oleaf->lf_depth), index);
gfs2_consist_inode(dip);
error = -EIO;
goto fail_brelse;
......@@ -997,7 +997,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
kfree(lp);
/* Compute the divider */
divider = (start + half_len) << (32 - dip->i_di.di_depth);
divider = (start + half_len) << (32 - dip->i_depth);
/* Copy the entries */
dirent_first(dip, obh, &dent);
......@@ -1021,13 +1021,13 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
new->de_inum = dent->de_inum; /* No endian worries */
new->de_type = dent->de_type; /* No endian worries */
nleaf->lf_entries = cpu_to_be16(be16_to_cpu(nleaf->lf_entries)+1);
be16_add_cpu(&nleaf->lf_entries, 1);
dirent_del(dip, obh, prev, dent);
if (!oleaf->lf_entries)
gfs2_consist_inode(dip);
oleaf->lf_entries = cpu_to_be16(be16_to_cpu(oleaf->lf_entries)-1);
be16_add_cpu(&oleaf->lf_entries, -1);
if (!prev)
prev = dent;
......@@ -1044,8 +1044,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
error = gfs2_meta_inode_buffer(dip, &dibh);
if (!gfs2_assert_withdraw(GFS2_SB(&dip->i_inode), !error)) {
gfs2_trans_add_bh(dip->i_gl, dibh, 1);
dip->i_di.di_blocks++;
gfs2_set_inode_blocks(&dip->i_inode);
gfs2_add_inode_blocks(&dip->i_inode, 1);
gfs2_dinode_out(dip, dibh->b_data);
brelse(dibh);
}
......@@ -1082,7 +1081,7 @@ static int dir_double_exhash(struct gfs2_inode *dip)
int x;
int error = 0;
hsize = 1 << dip->i_di.di_depth;
hsize = 1 << dip->i_depth;
if (hsize * sizeof(u64) != dip->i_di.di_size) {
gfs2_consist_inode(dip);
return -EIO;
......@@ -1090,7 +1089,7 @@ static int dir_double_exhash(struct gfs2_inode *dip)
/* Allocate both the "from" and "to" buffers in one big chunk */
buf = kcalloc(3, sdp->sd_hash_bsize, GFP_KERNEL | __GFP_NOFAIL);
buf = kcalloc(3, sdp->sd_hash_bsize, GFP_NOFS | __GFP_NOFAIL);
for (block = dip->i_di.di_size >> sdp->sd_hash_bsize_shift; block--;) {
error = gfs2_dir_read_data(dip, (char *)buf,
......@@ -1125,7 +1124,7 @@ static int dir_double_exhash(struct gfs2_inode *dip)
error = gfs2_meta_inode_buffer(dip, &dibh);
if (!gfs2_assert_withdraw(sdp, !error)) {
dip->i_di.di_depth++;
dip->i_depth++;
gfs2_dinode_out(dip, dibh->b_data);
brelse(dibh);
}
......@@ -1370,16 +1369,16 @@ static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
int error = 0;
unsigned depth = 0;
hsize = 1 << dip->i_di.di_depth;
hsize = 1 << dip->i_depth;
if (hsize * sizeof(u64) != dip->i_di.di_size) {
gfs2_consist_inode(dip);
return -EIO;
}
hash = gfs2_dir_offset2hash(*offset);
index = hash >> (32 - dip->i_di.di_depth);
index = hash >> (32 - dip->i_depth);
lp = kmalloc(sdp->sd_hash_bsize, GFP_KERNEL);
lp = kmalloc(sdp->sd_hash_bsize, GFP_NOFS);
if (!lp)
return -ENOMEM;
......@@ -1405,7 +1404,7 @@ static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
if (error)
break;
len = 1 << (dip->i_di.di_depth - depth);
len = 1 << (dip->i_depth - depth);
index = (index & ~(len - 1)) + len;
}
......@@ -1444,7 +1443,7 @@ int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque,
error = -ENOMEM;
/* 96 is max number of dirents which can be stuffed into an inode */
darr = kmalloc(96 * sizeof(struct gfs2_dirent *), GFP_KERNEL);
darr = kmalloc(96 * sizeof(struct gfs2_dirent *), GFP_NOFS);
if (darr) {
g.pdent = darr;
g.offset = 0;
......@@ -1549,7 +1548,7 @@ static int dir_new_leaf(struct inode *inode, const struct qstr *name)
u32 index;
u64 bn;
index = name->hash >> (32 - ip->i_di.di_depth);
index = name->hash >> (32 - ip->i_depth);
error = get_first_leaf(ip, index, &obh);
if (error)
return error;
......@@ -1579,8 +1578,7 @@ static int dir_new_leaf(struct inode *inode, const struct qstr *name)
if (error)
return error;
gfs2_trans_add_bh(ip->i_gl, bh, 1);
ip->i_di.di_blocks++;
gfs2_set_inode_blocks(&ip->i_inode);
gfs2_add_inode_blocks(&ip->i_inode, 1);
gfs2_dinode_out(ip, bh->b_data);
brelse(bh);
return 0;
......@@ -1616,7 +1614,7 @@ int gfs2_dir_add(struct inode *inode, const struct qstr *name,
dent->de_type = cpu_to_be16(type);
if (ip->i_di.di_flags & GFS2_DIF_EXHASH) {
leaf = (struct gfs2_leaf *)bh->b_data;
leaf->lf_entries = cpu_to_be16(be16_to_cpu(leaf->lf_entries) + 1);
be16_add_cpu(&leaf->lf_entries, 1);
}
brelse(bh);
error = gfs2_meta_inode_buffer(ip, &bh);
......@@ -1641,7 +1639,7 @@ int gfs2_dir_add(struct inode *inode, const struct qstr *name,
continue;
if (error < 0)
break;
if (ip->i_di.di_depth < GFS2_DIR_MAX_DEPTH) {
if (ip->i_depth < GFS2_DIR_MAX_DEPTH) {
error = dir_double_exhash(ip);
if (error)
break;
......@@ -1785,13 +1783,13 @@ static int foreach_leaf(struct gfs2_inode *dip, leaf_call_t lc, void *data)
u64 leaf_no;
int error = 0;
hsize = 1 << dip->i_di.di_depth;
hsize = 1 << dip->i_depth;
if (hsize * sizeof(u64) != dip->i_di.di_size) {
gfs2_consist_inode(dip);
return -EIO;
}
lp = kmalloc(sdp->sd_hash_bsize, GFP_KERNEL);
lp = kmalloc(sdp->sd_hash_bsize, GFP_NOFS);
if (!lp)
return -ENOMEM;
......@@ -1817,7 +1815,7 @@ static int foreach_leaf(struct gfs2_inode *dip, leaf_call_t lc, void *data)
if (error)
goto out;
leaf = (struct gfs2_leaf *)bh->b_data;
len = 1 << (dip->i_di.di_depth - be16_to_cpu(leaf->lf_depth));
len = 1 << (dip->i_depth - be16_to_cpu(leaf->lf_depth));
brelse(bh);
error = lc(dip, index, len, leaf_no, data);
......@@ -1866,15 +1864,18 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
ht = kzalloc(size, GFP_KERNEL);
ht = kzalloc(size, GFP_NOFS);
if (!ht)
return -ENOMEM;
gfs2_alloc_get(dip);
if (!gfs2_alloc_get(dip)) {
error = -ENOMEM;
goto out;
}
error = gfs2_quota_hold(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
if (error)
goto out;
goto out_put;
error = gfs2_rindex_hold(sdp, &dip->i_alloc->al_ri_gh);
if (error)
......@@ -1894,7 +1895,7 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
l_blocks++;
}
gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE, 0);
gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
for (x = 0; x < rlist.rl_rgrps; x++) {
struct gfs2_rgrpd *rgd;
......@@ -1921,11 +1922,7 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
brelse(bh);
gfs2_free_meta(dip, blk, 1);
if (!dip->i_di.di_blocks)
gfs2_consist_inode(dip);
dip->i_di.di_blocks--;
gfs2_set_inode_blocks(&dip->i_inode);
gfs2_add_inode_blocks(&dip->i_inode, -1);
}
error = gfs2_dir_write_data(dip, ht, index * sizeof(u64), size);
......@@ -1952,8 +1949,9 @@ out_rlist:
gfs2_glock_dq_uninit(&dip->i_alloc->al_ri_gh);
out_qs:
gfs2_quota_unhold(dip);
out:
out_put:
gfs2_alloc_put(dip);
out:
kfree(ht);
return error;
}
......
......@@ -277,10 +277,7 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
}
*dataptrs = 0;
if (!ip->i_di.di_blocks)
gfs2_consist_inode(ip);
ip->i_di.di_blocks--;
gfs2_set_inode_blocks(&ip->i_inode);
gfs2_add_inode_blocks(&ip->i_inode, -1);
}
if (bstart)
gfs2_free_meta(ip, bstart, blen);
......@@ -321,6 +318,8 @@ static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
int error;
al = gfs2_alloc_get(ip);
if (!al)
return -ENOMEM;
error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
if (error)
......@@ -449,7 +448,7 @@ static int ea_get_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
unsigned int x;
int error = 0;
bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_KERNEL);
bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_NOFS);
if (!bh)
return -ENOMEM;
......@@ -582,10 +581,11 @@ static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_ea_header *ea;
unsigned int n = 1;
u64 block;
block = gfs2_alloc_meta(ip);
block = gfs2_alloc_block(ip, &n);
gfs2_trans_add_unrevoke(sdp, block, 1);
*bhp = gfs2_meta_new(ip->i_gl, block);
gfs2_trans_add_bh(ip->i_gl, *bhp, 1);
gfs2_metatype_set(*bhp, GFS2_METATYPE_EA, GFS2_FORMAT_EA);
......@@ -597,8 +597,7 @@ static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
ea->ea_flags = GFS2_EAFLAG_LAST;
ea->ea_num_ptrs = 0;
ip->i_di.di_blocks++;
gfs2_set_inode_blocks(&ip->i_inode);
gfs2_add_inode_blocks(&ip->i_inode, 1);
return 0;
}
......@@ -642,15 +641,15 @@ static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
struct buffer_head *bh;
u64 block;
int mh_size = sizeof(struct gfs2_meta_header);
unsigned int n = 1;
block = gfs2_alloc_meta(ip);
block = gfs2_alloc_block(ip, &n);
gfs2_trans_add_unrevoke(sdp, block, 1);
bh = gfs2_meta_new(ip->i_gl, block);
gfs2_trans_add_bh(ip->i_gl, bh, 1);
gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED);
ip->i_di.di_blocks++;
gfs2_set_inode_blocks(&ip->i_inode);
gfs2_add_inode_blocks(&ip->i_inode, 1);
copy = data_len > sdp->sd_jbsize ? sdp->sd_jbsize :
data_len;
......@@ -684,15 +683,13 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
int error;
al = gfs2_alloc_get(ip);
if (!al)
return -ENOMEM;
error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
error = gfs2_quota_lock_check(ip);
if (error)
goto out;
error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
if (error)
goto out_gunlock_q;
al->al_requested = blks;
error = gfs2_inplace_reserve(ip);
......@@ -966,9 +963,9 @@ static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
gfs2_trans_add_bh(ip->i_gl, indbh, 1);
} else {
u64 blk;
blk = gfs2_alloc_meta(ip);
unsigned int n = 1;
blk = gfs2_alloc_block(ip, &n);
gfs2_trans_add_unrevoke(sdp, blk, 1);
indbh = gfs2_meta_new(ip->i_gl, blk);
gfs2_trans_add_bh(ip->i_gl, indbh, 1);
gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
......@@ -978,8 +975,7 @@ static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
*eablk = cpu_to_be64(ip->i_di.di_eattr);
ip->i_di.di_eattr = blk;
ip->i_di.di_flags |= GFS2_DIF_EA_INDIRECT;
ip->i_di.di_blocks++;
gfs2_set_inode_blocks(&ip->i_inode);
gfs2_add_inode_blocks(&ip->i_inode, 1);
eablk++;
}
......@@ -1210,7 +1206,7 @@ static int ea_acl_chmod_unstuffed(struct gfs2_inode *ip,
unsigned int x;
int error;
bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_KERNEL);
bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_NOFS);
if (!bh)
return -ENOMEM;
......@@ -1347,7 +1343,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
else
goto out;
gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE, 0);
gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
for (x = 0; x < rlist.rl_rgrps; x++) {
struct gfs2_rgrpd *rgd;
......@@ -1387,10 +1383,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
}
*eablk = 0;
if (!ip->i_di.di_blocks)
gfs2_consist_inode(ip);
ip->i_di.di_blocks--;
gfs2_set_inode_blocks(&ip->i_inode);
gfs2_add_inode_blocks(&ip->i_inode, -1);
}
if (bstart)
gfs2_free_meta(ip, bstart, blen);
......@@ -1442,10 +1435,7 @@ static int ea_dealloc_block(struct gfs2_inode *ip)
gfs2_free_meta(ip, ip->i_di.di_eattr, 1);
ip->i_di.di_eattr = 0;
if (!ip->i_di.di_blocks)
gfs2_consist_inode(ip);
ip->i_di.di_blocks--;
gfs2_set_inode_blocks(&ip->i_inode);
gfs2_add_inode_blocks(&ip->i_inode, -1);
error = gfs2_meta_inode_buffer(ip, &dibh);
if (!error) {
......@@ -1474,6 +1464,8 @@ int gfs2_ea_dealloc(struct gfs2_inode *ip)
int error;
al = gfs2_alloc_get(ip);
if (!al)
return -ENOMEM;
error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
if (error)
......
/*
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
* Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
......@@ -35,7 +35,6 @@
#include "glock.h"
#include "glops.h"
#include "inode.h"
#include "lm.h"
#include "lops.h"
#include "meta_io.h"
#include "quota.h"
......@@ -183,7 +182,8 @@ static void glock_free(struct gfs2_glock *gl)
struct gfs2_sbd *sdp = gl->gl_sbd;
struct inode *aspace = gl->gl_aspace;
gfs2_lm_put_lock(sdp, gl->gl_lock);
if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
sdp->sd_lockstruct.ls_ops->lm_put_lock(gl->gl_lock);
if (aspace)
gfs2_aspace_put(aspace);
......@@ -197,7 +197,7 @@ static void glock_free(struct gfs2_glock *gl)
*
*/
void gfs2_glock_hold(struct gfs2_glock *gl)
static void gfs2_glock_hold(struct gfs2_glock *gl)
{
atomic_inc(&gl->gl_ref);
}
......@@ -293,6 +293,16 @@ static void glock_work_func(struct work_struct *work)
gfs2_glock_put(gl);
}
static int gfs2_lm_get_lock(struct gfs2_sbd *sdp, struct lm_lockname *name,
void **lockp)
{
int error = -EIO;
if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
error = sdp->sd_lockstruct.ls_ops->lm_get_lock(
sdp->sd_lockstruct.ls_lockspace, name, lockp);
return error;
}
/**
* gfs2_glock_get() - Get a glock, or create one if one doesn't exist
* @sdp: The GFS2 superblock
......@@ -338,8 +348,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
gl->gl_ip = 0;
gl->gl_ops = glops;
gl->gl_req_gh = NULL;
gl->gl_req_bh = NULL;
gl->gl_vn = 0;
gl->gl_stamp = jiffies;
gl->gl_tchange = jiffies;
gl->gl_object = NULL;
......@@ -595,11 +603,12 @@ static void run_queue(struct gfs2_glock *gl)
blocked = rq_mutex(gh);
} else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
blocked = rq_demote(gl);
if (gl->gl_waiters2 && !blocked) {
if (test_bit(GLF_WAITERS2, &gl->gl_flags) &&
!blocked) {
set_bit(GLF_DEMOTE, &gl->gl_flags);
gl->gl_demote_state = LM_ST_UNLOCKED;
}
gl->gl_waiters2 = 0;
clear_bit(GLF_WAITERS2, &gl->gl_flags);
} else if (!list_empty(&gl->gl_waiters3)) {
gh = list_entry(gl->gl_waiters3.next,
struct gfs2_holder, gh_list);
......@@ -710,7 +719,7 @@ static void handle_callback(struct gfs2_glock *gl, unsigned int state,
} else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
gl->gl_demote_state != state) {
if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
gl->gl_waiters2 = 1;
set_bit(GLF_WAITERS2, &gl->gl_flags);
else
gl->gl_demote_state = LM_ST_UNLOCKED;
}
......@@ -742,6 +751,43 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
gl->gl_tchange = jiffies;
}
/**
* drop_bh - Called after a lock module unlock completes
* @gl: the glock
* @ret: the return status
*
* Doesn't wake up the process waiting on the struct gfs2_holder (if any)
* Doesn't drop the reference on the glock the top half took out
*
*/
static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
{
struct gfs2_sbd *sdp = gl->gl_sbd;
struct gfs2_holder *gh = gl->gl_req_gh;
gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
gfs2_assert_warn(sdp, !ret);