summaryrefslogtreecommitdiff
path: root/fs/afs
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2007-10-12 21:27:47 -0400
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2007-10-12 21:27:47 -0400
commitb981d8b3f5e008ff10d993be633ad00564fc22cd (patch)
treee292dc07b22308912cf6a58354a608b9e5e8e1fd /fs/afs
parentb11d2127c4893a7315d1e16273bc8560049fa3ca (diff)
parent2b9e0aae1d50e880c58d46788e5e3ebd89d75d62 (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: drivers/macintosh/adbhid.c
Diffstat (limited to 'fs/afs')
-rw-r--r--fs/afs/flock.c129
-rw-r--r--fs/afs/mntpt.c2
-rw-r--r--fs/afs/netdevices.c5
-rw-r--r--fs/afs/rxrpc.c21
-rw-r--r--fs/afs/super.c3
5 files changed, 96 insertions, 64 deletions
diff --git a/fs/afs/flock.c b/fs/afs/flock.c
index 8f07f8d1bfa9..af6952e39a18 100644
--- a/fs/afs/flock.c
+++ b/fs/afs/flock.c
@@ -19,6 +19,7 @@ static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl);
static void afs_fl_release_private(struct file_lock *fl);
static struct workqueue_struct *afs_lock_manager;
+static DEFINE_MUTEX(afs_lock_manager_mutex);
static struct file_lock_operations afs_lock_ops = {
.fl_copy_lock = afs_fl_copy_lock,
@@ -30,12 +31,20 @@ static struct file_lock_operations afs_lock_ops = {
*/
static int afs_init_lock_manager(void)
{
+ int ret;
+
+ ret = 0;
if (!afs_lock_manager) {
- afs_lock_manager = create_singlethread_workqueue("kafs_lockd");
- if (!afs_lock_manager)
- return -ENOMEM;
+ mutex_lock(&afs_lock_manager_mutex);
+ if (!afs_lock_manager) {
+ afs_lock_manager =
+ create_singlethread_workqueue("kafs_lockd");
+ if (!afs_lock_manager)
+ ret = -ENOMEM;
+ }
+ mutex_unlock(&afs_lock_manager_mutex);
}
- return 0;
+ return ret;
}
/*
@@ -68,6 +77,29 @@ static void afs_schedule_lock_extension(struct afs_vnode *vnode)
}
/*
+ * grant one or more locks (readlocks are allowed to jump the queue if the
+ * first lock in the queue is itself a readlock)
+ * - the caller must hold the vnode lock
+ */
+static void afs_grant_locks(struct afs_vnode *vnode, struct file_lock *fl)
+{
+ struct file_lock *p, *_p;
+
+ list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);
+ if (fl->fl_type == F_RDLCK) {
+ list_for_each_entry_safe(p, _p, &vnode->pending_locks,
+ fl_u.afs.link) {
+ if (p->fl_type == F_RDLCK) {
+ p->fl_u.afs.state = AFS_LOCK_GRANTED;
+ list_move_tail(&p->fl_u.afs.link,
+ &vnode->granted_locks);
+ wake_up(&p->fl_wait);
+ }
+ }
+ }
+}
+
+/*
* do work for a lock, including:
* - probing for a lock we're waiting on but didn't get immediately
* - extending a lock that's close to timing out
@@ -172,8 +204,7 @@ void afs_lock_work(struct work_struct *work)
struct file_lock, fl_u.afs.link) == fl) {
fl->fl_u.afs.state = ret;
if (ret == AFS_LOCK_GRANTED)
- list_move_tail(&fl->fl_u.afs.link,
- &vnode->granted_locks);
+ afs_grant_locks(vnode, fl);
else
list_del_init(&fl->fl_u.afs.link);
wake_up(&fl->fl_wait);
@@ -258,49 +289,50 @@ static int afs_do_setlk(struct file *file, struct file_lock *fl)
spin_lock(&vnode->lock);
- if (list_empty(&vnode->pending_locks)) {
- /* if there's no-one else with a lock on this vnode, then we
- * need to ask the server for a lock */
- if (list_empty(&vnode->granted_locks)) {
- _debug("not locked");
- ASSERTCMP(vnode->flags &
- ((1 << AFS_VNODE_LOCKING) |
- (1 << AFS_VNODE_READLOCKED) |
- (1 << AFS_VNODE_WRITELOCKED)), ==, 0);
- list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks);
- set_bit(AFS_VNODE_LOCKING, &vnode->flags);
- spin_unlock(&vnode->lock);
+ /* if we've already got a readlock on the server then we can instantly
+ * grant another readlock, irrespective of whether there are any
+ * pending writelocks */
+ if (type == AFS_LOCK_READ &&
+ vnode->flags & (1 << AFS_VNODE_READLOCKED)) {
+ _debug("instant readlock");
+ ASSERTCMP(vnode->flags &
+ ((1 << AFS_VNODE_LOCKING) |
+ (1 << AFS_VNODE_WRITELOCKED)), ==, 0);
+ ASSERT(!list_empty(&vnode->granted_locks));
+ goto sharing_existing_lock;
+ }
- ret = afs_vnode_set_lock(vnode, key, type);
- clear_bit(AFS_VNODE_LOCKING, &vnode->flags);
- switch (ret) {
- case 0:
- goto acquired_server_lock;
- case -EWOULDBLOCK:
- spin_lock(&vnode->lock);
- ASSERT(list_empty(&vnode->granted_locks));
- ASSERTCMP(vnode->pending_locks.next, ==,
- &fl->fl_u.afs.link);
- goto wait;
- default:
- spin_lock(&vnode->lock);
- list_del_init(&fl->fl_u.afs.link);
- spin_unlock(&vnode->lock);
- goto error;
- }
- }
+ /* if there's no-one else with a lock on this vnode, then we need to
+ * ask the server for a lock */
+ if (list_empty(&vnode->pending_locks) &&
+ list_empty(&vnode->granted_locks)) {
+ _debug("not locked");
+ ASSERTCMP(vnode->flags &
+ ((1 << AFS_VNODE_LOCKING) |
+ (1 << AFS_VNODE_READLOCKED) |
+ (1 << AFS_VNODE_WRITELOCKED)), ==, 0);
+ list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks);
+ set_bit(AFS_VNODE_LOCKING, &vnode->flags);
+ spin_unlock(&vnode->lock);
- /* if we've already got a readlock on the server and no waiting
- * writelocks, then we might be able to instantly grant another
- * readlock */
- if (type == AFS_LOCK_READ &&
- vnode->flags & (1 << AFS_VNODE_READLOCKED)) {
- _debug("instant readlock");
- ASSERTCMP(vnode->flags &
- ((1 << AFS_VNODE_LOCKING) |
- (1 << AFS_VNODE_WRITELOCKED)), ==, 0);
- ASSERT(!list_empty(&vnode->granted_locks));
- goto sharing_existing_lock;
+ ret = afs_vnode_set_lock(vnode, key, type);
+ clear_bit(AFS_VNODE_LOCKING, &vnode->flags);
+ switch (ret) {
+ case 0:
+ _debug("acquired");
+ goto acquired_server_lock;
+ case -EWOULDBLOCK:
+ _debug("would block");
+ spin_lock(&vnode->lock);
+ ASSERT(list_empty(&vnode->granted_locks));
+ ASSERTCMP(vnode->pending_locks.next, ==,
+ &fl->fl_u.afs.link);
+ goto wait;
+ default:
+ spin_lock(&vnode->lock);
+ list_del_init(&fl->fl_u.afs.link);
+ spin_unlock(&vnode->lock);
+ goto error;
}
}
@@ -456,7 +488,8 @@ static int afs_do_getlk(struct file *file, struct file_lock *fl)
/* check local lock records first */
ret = 0;
- if (posix_test_lock(file, fl) == 0) {
+ posix_test_lock(file, fl);
+ if (fl->fl_type == F_UNLCK) {
/* no local locks; consult the server */
ret = afs_vnode_fetch_status(vnode, NULL, key);
if (ret < 0)
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index a3684dcc76e7..6f8c96fb29eb 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -235,8 +235,8 @@ static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd)
err = do_add_mount(newmnt, nd, MNT_SHRINKABLE, &afs_vfsmounts);
switch (err) {
case 0:
- mntput(nd->mnt);
dput(nd->dentry);
+ mntput(nd->mnt);
nd->mnt = newmnt;
nd->dentry = dget(newmnt->mnt_root);
schedule_delayed_work(&afs_mntpt_expiry_timer,
diff --git a/fs/afs/netdevices.c b/fs/afs/netdevices.c
index fc27d4b52e5f..49f189423063 100644
--- a/fs/afs/netdevices.c
+++ b/fs/afs/netdevices.c
@@ -8,6 +8,7 @@
#include <linux/inetdevice.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
+#include <net/net_namespace.h>
#include "internal.h"
/*
@@ -23,7 +24,7 @@ int afs_get_MAC_address(u8 *mac, size_t maclen)
BUG();
rtnl_lock();
- dev = __dev_getfirstbyhwtype(ARPHRD_ETHER);
+ dev = __dev_getfirstbyhwtype(&init_net, ARPHRD_ETHER);
if (dev) {
memcpy(mac, dev->dev_addr, maclen);
ret = 0;
@@ -47,7 +48,7 @@ int afs_get_ipv4_interfaces(struct afs_interface *bufs, size_t maxbufs,
ASSERT(maxbufs > 0);
rtnl_lock();
- for_each_netdev(dev) {
+ for_each_netdev(&init_net, dev) {
if (dev->type == ARPHRD_LOOPBACK && !wantloopback)
continue;
idev = __in_dev_get_rtnl(dev);
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 1b36f45076ad..8ccee9ee1d9d 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -792,6 +792,7 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
{
struct msghdr msg;
struct iovec iov[1];
+ int n;
_enter("");
@@ -806,22 +807,20 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
msg.msg_flags = 0;
call->state = AFS_CALL_AWAIT_ACK;
- switch (rxrpc_kernel_send_data(call->rxcall, &msg, len)) {
- case 0:
+ n = rxrpc_kernel_send_data(call->rxcall, &msg, len);
+ if (n >= 0) {
_leave(" [replied]");
return;
-
- case -ENOMEM:
+ }
+ if (n == -ENOMEM) {
_debug("oom");
rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT);
- default:
- rxrpc_kernel_end_call(call->rxcall);
- call->rxcall = NULL;
- call->type->destructor(call);
- afs_free_call(call);
- _leave(" [error]");
- return;
}
+ rxrpc_kernel_end_call(call->rxcall);
+ call->rxcall = NULL;
+ call->type->destructor(call);
+ afs_free_call(call);
+ _leave(" [error]");
}
/*
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 993cdf1cce3a..b8808b40f82b 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -89,8 +89,7 @@ int __init afs_fs_init(void)
sizeof(struct afs_vnode),
0,
SLAB_HWCACHE_ALIGN,
- afs_i_init_once,
- NULL);
+ afs_i_init_once);
if (!afs_inode_cachep) {
printk(KERN_NOTICE "kAFS: Failed to allocate inode cache\n");
return ret;