From 275afcac9953ece0828972edeab9684cfe1a5ef3 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Thu, 19 Jul 2007 01:50:35 -0700 Subject: afs build fix Bruce and David's patches clashed. fs/afs/flock.c: In function 'afs_do_getlk': fs/afs/flock.c:459: error: void value not ignored as it ought to be Cc: "J. Bruce Fields" Acked-by: David Howells Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/afs/flock.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs/afs') diff --git a/fs/afs/flock.c b/fs/afs/flock.c index 8f07f8d1bfa9..4f77f3caee97 100644 --- a/fs/afs/flock.c +++ b/fs/afs/flock.c @@ -456,7 +456,8 @@ static int afs_do_getlk(struct file *file, struct file_lock *fl) /* check local lock records first */ ret = 0; - if (posix_test_lock(file, fl) == 0) { + posix_test_lock(file, fl); + if (fl->fl_type == F_UNLCK) { /* no local locks; consult the server */ ret = afs_vnode_fetch_status(vnode, NULL, key); if (ret < 0) -- cgit v1.2.3 From 20c2df83d25c6a95affe6157a4c9cac4cf5ffaac Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 20 Jul 2007 10:11:58 +0900 Subject: mm: Remove slab destructors from kmem_cache_create(). Slab destructors were no longer supported after Christoph's c59def9f222d44bb7e2f0a559f2906191a0862d7 change. They've been BUGs for both slab and slub, and slob never supported them either. This rips out support for the dtor pointer from kmem_cache_create() completely and fixes up every single callsite in the kernel (there were about 224, not including the slab allocator definitions themselves, or the documentation references). Signed-off-by: Paul Mundt --- fs/afs/super.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs/afs') diff --git a/fs/afs/super.c b/fs/afs/super.c index 993cdf1cce3a..b8808b40f82b 100644 --- a/fs/afs/super.c +++ b/fs/afs/super.c @@ -89,8 +89,7 @@ int __init afs_fs_init(void) sizeof(struct afs_vnode), 0, SLAB_HWCACHE_ALIGN, - afs_i_init_once, - NULL); + afs_i_init_once); if (!afs_inode_cachep) { printk(KERN_NOTICE "kAFS: Failed to allocate inode cache\n"); return ret; -- cgit v1.2.3 From bd6dc742a4b1945861795a66dc27c65365c5f28e Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 20 Jul 2007 10:59:41 +0100 Subject: AFS: Use patched rxrpc_kernel_send_data() correctly Fix afs_send_simple_reply() to accept a greater-than-zero return value from rxrpc_kernel_send_data() as being a successful return rather than thinking it an error and aborting the call. rxrpc_kernel_send_data() previously returned zero incorrectly when it worked successfully, but has been patched to return the number of bytes it transmitted. Signed-off-by: David Howells Signed-off-by: Linus Torvalds --- fs/afs/rxrpc.c | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) (limited to 'fs/afs') diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 1b36f45076ad..8ccee9ee1d9d 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -792,6 +792,7 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len) { struct msghdr msg; struct iovec iov[1]; + int n; _enter(""); @@ -806,22 +807,20 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len) msg.msg_flags = 0; call->state = AFS_CALL_AWAIT_ACK; - switch (rxrpc_kernel_send_data(call->rxcall, &msg, len)) { - case 0: + n = rxrpc_kernel_send_data(call->rxcall, &msg, len); + if (n >= 0) { _leave(" [replied]"); return; - - case -ENOMEM: + } + if (n == -ENOMEM) { _debug("oom"); rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT); - default: - rxrpc_kernel_end_call(call->rxcall); - call->rxcall = NULL; - call->type->destructor(call); - afs_free_call(call); - _leave(" [error]"); - return; } + rxrpc_kernel_end_call(call->rxcall); + call->rxcall = NULL; + call->type->destructor(call); + afs_free_call(call); + _leave(" [error]"); } /* -- cgit v1.2.3 From ff8e210a9550ad760a62e9803938cd04f9fb0851 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 31 Jul 2007 00:38:49 -0700 Subject: AFS: fix file locking Fix file locking for AFS: (*) Start the lock manager thread under a mutex to avoid a race. (*) Made the locking non-fair: New readlocks will jump pending writelocks if there's a readlock currently granted on a file. This makes the behaviour similar to Linux's VFS locking. Signed-off-by: David Howells Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/afs/flock.c | 126 ++++++++++++++++++++++++++++++++++++--------------------- 1 file changed, 79 insertions(+), 47 deletions(-) (limited to 'fs/afs') diff --git a/fs/afs/flock.c b/fs/afs/flock.c index 4f77f3caee97..af6952e39a18 100644 --- a/fs/afs/flock.c +++ b/fs/afs/flock.c @@ -19,6 +19,7 @@ static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl); static void afs_fl_release_private(struct file_lock *fl); static struct workqueue_struct *afs_lock_manager; +static DEFINE_MUTEX(afs_lock_manager_mutex); static struct file_lock_operations afs_lock_ops = { .fl_copy_lock = afs_fl_copy_lock, @@ -30,12 +31,20 @@ static struct file_lock_operations afs_lock_ops = { */ static int afs_init_lock_manager(void) { + int ret; + + ret = 0; if (!afs_lock_manager) { - afs_lock_manager = create_singlethread_workqueue("kafs_lockd"); - if (!afs_lock_manager) - return -ENOMEM; + mutex_lock(&afs_lock_manager_mutex); + if (!afs_lock_manager) { + afs_lock_manager = + create_singlethread_workqueue("kafs_lockd"); + if (!afs_lock_manager) + ret = -ENOMEM; + } + mutex_unlock(&afs_lock_manager_mutex); } - return 0; + return ret; } /* @@ -67,6 +76,29 @@ static void afs_schedule_lock_extension(struct afs_vnode *vnode) AFS_LOCKWAIT * HZ / 2); } +/* + * grant one or more locks (readlocks are allowed to jump the queue if the + * first lock in the queue is itself a readlock) + * - the caller must hold the vnode lock + */ +static void afs_grant_locks(struct afs_vnode *vnode, struct file_lock *fl) +{ + struct file_lock *p, *_p; + + list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks); + if (fl->fl_type == F_RDLCK) { + list_for_each_entry_safe(p, _p, &vnode->pending_locks, + fl_u.afs.link) { + if (p->fl_type == F_RDLCK) { + p->fl_u.afs.state = AFS_LOCK_GRANTED; + list_move_tail(&p->fl_u.afs.link, + &vnode->granted_locks); + wake_up(&p->fl_wait); + } + } + } +} + /* * do work for a lock, including: * - probing for a lock we're waiting on but didn't get immediately @@ -172,8 +204,7 @@ void afs_lock_work(struct work_struct *work) struct file_lock, fl_u.afs.link) == fl) { fl->fl_u.afs.state = ret; if (ret == AFS_LOCK_GRANTED) - list_move_tail(&fl->fl_u.afs.link, - &vnode->granted_locks); + afs_grant_locks(vnode, fl); else list_del_init(&fl->fl_u.afs.link); wake_up(&fl->fl_wait); @@ -258,49 +289,50 @@ static int afs_do_setlk(struct file *file, struct file_lock *fl) spin_lock(&vnode->lock); - if (list_empty(&vnode->pending_locks)) { - /* if there's no-one else with a lock on this vnode, then we - * need to ask the server for a lock */ - if (list_empty(&vnode->granted_locks)) { - _debug("not locked"); - ASSERTCMP(vnode->flags & - ((1 << AFS_VNODE_LOCKING) | - (1 << AFS_VNODE_READLOCKED) | - (1 << AFS_VNODE_WRITELOCKED)), ==, 0); - list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks); - set_bit(AFS_VNODE_LOCKING, &vnode->flags); - spin_unlock(&vnode->lock); + /* if we've already got a readlock on the server then we can instantly + * grant another readlock, irrespective of whether there are any + * pending writelocks */ + if (type == AFS_LOCK_READ && + vnode->flags & (1 << AFS_VNODE_READLOCKED)) { + _debug("instant readlock"); + ASSERTCMP(vnode->flags & + ((1 << AFS_VNODE_LOCKING) | + (1 << AFS_VNODE_WRITELOCKED)), ==, 0); + ASSERT(!list_empty(&vnode->granted_locks)); + goto sharing_existing_lock; + } - ret = afs_vnode_set_lock(vnode, key, type); - clear_bit(AFS_VNODE_LOCKING, &vnode->flags); - switch (ret) { - case 0: - goto acquired_server_lock; - case -EWOULDBLOCK: - spin_lock(&vnode->lock); - ASSERT(list_empty(&vnode->granted_locks)); - ASSERTCMP(vnode->pending_locks.next, ==, - &fl->fl_u.afs.link); - goto wait; - default: - spin_lock(&vnode->lock); - list_del_init(&fl->fl_u.afs.link); - spin_unlock(&vnode->lock); - goto error; - } - } + /* if there's no-one else with a lock on this vnode, then we need to + * ask the server for a lock */ + if (list_empty(&vnode->pending_locks) && + list_empty(&vnode->granted_locks)) { + _debug("not locked"); + ASSERTCMP(vnode->flags & + ((1 << AFS_VNODE_LOCKING) | + (1 << AFS_VNODE_READLOCKED) | + (1 << AFS_VNODE_WRITELOCKED)), ==, 0); + list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks); + set_bit(AFS_VNODE_LOCKING, &vnode->flags); + spin_unlock(&vnode->lock); - /* if we've already got a readlock on the server and no waiting - * writelocks, then we might be able to instantly grant another - * readlock */ - if (type == AFS_LOCK_READ && - vnode->flags & (1 << AFS_VNODE_READLOCKED)) { - _debug("instant readlock"); - ASSERTCMP(vnode->flags & - ((1 << AFS_VNODE_LOCKING) | - (1 << AFS_VNODE_WRITELOCKED)), ==, 0); - ASSERT(!list_empty(&vnode->granted_locks)); - goto sharing_existing_lock; + ret = afs_vnode_set_lock(vnode, key, type); + clear_bit(AFS_VNODE_LOCKING, &vnode->flags); + switch (ret) { + case 0: + _debug("acquired"); + goto acquired_server_lock; + case -EWOULDBLOCK: + _debug("would block"); + spin_lock(&vnode->lock); + ASSERT(list_empty(&vnode->granted_locks)); + ASSERTCMP(vnode->pending_locks.next, ==, + &fl->fl_u.afs.link); + goto wait; + default: + spin_lock(&vnode->lock); + list_del_init(&fl->fl_u.afs.link); + spin_unlock(&vnode->lock); + goto error; } } -- cgit v1.2.3 From 1a1a1a758bf0107d1f78ff1d622f45987803d894 Mon Sep 17 00:00:00 2001 From: Andreas Gruenbacher Date: Tue, 11 Sep 2007 15:23:37 -0700 Subject: afs: mntput called before dput dput must be called before mntput here. Signed-off-by: Andreas Gruenbacher Acked-By: David Howells Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/afs/mntpt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/afs') diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c index a3684dcc76e7..6f8c96fb29eb 100644 --- a/fs/afs/mntpt.c +++ b/fs/afs/mntpt.c @@ -235,8 +235,8 @@ static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd) err = do_add_mount(newmnt, nd, MNT_SHRINKABLE, &afs_vfsmounts); switch (err) { case 0: - mntput(nd->mnt); dput(nd->dentry); + mntput(nd->mnt); nd->mnt = newmnt; nd->dentry = dget(newmnt->mnt_root); schedule_delayed_work(&afs_mntpt_expiry_timer, -- cgit v1.2.3 From 881d966b48b035ab3f3aeaae0f3d3f9b584f45b2 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 17 Sep 2007 11:56:21 -0700 Subject: [NET]: Make the device list and device lookups per namespace. This patch makes most of the generic device layer network namespace safe. This patch makes dev_base_head a network namespace variable, and then it picks up a few associated variables. The functions: dev_getbyhwaddr dev_getfirsthwbytype dev_get_by_flags dev_get_by_name __dev_get_by_name dev_get_by_index __dev_get_by_index dev_ioctl dev_ethtool dev_load wireless_process_ioctl were modified to take a network namespace argument, and deal with it. vlan_ioctl_set and brioctl_set were modified so their hooks will receive a network namespace argument. So basically anthing in the core of the network stack that was affected to by the change of dev_base was modified to handle multiple network namespaces. The rest of the network stack was simply modified to explicitly use &init_net the initial network namespace. This can be fixed when those components of the network stack are modified to handle multiple network namespaces. For now the ifindex generator is left global. Fundametally ifindex numbers are per namespace, or else we will have corner case problems with migration when we get that far. At the same time there are assumptions in the network stack that the ifindex of a network device won't change. Making the ifindex number global seems a good compromise until the network stack can cope with ifindex changes when you change namespaces, and the like. Signed-off-by: Eric W. Biederman Signed-off-by: David S. Miller --- fs/afs/netdevices.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'fs/afs') diff --git a/fs/afs/netdevices.c b/fs/afs/netdevices.c index fc27d4b52e5f..49f189423063 100644 --- a/fs/afs/netdevices.c +++ b/fs/afs/netdevices.c @@ -8,6 +8,7 @@ #include #include #include +#include #include "internal.h" /* @@ -23,7 +24,7 @@ int afs_get_MAC_address(u8 *mac, size_t maclen) BUG(); rtnl_lock(); - dev = __dev_getfirstbyhwtype(ARPHRD_ETHER); + dev = __dev_getfirstbyhwtype(&init_net, ARPHRD_ETHER); if (dev) { memcpy(mac, dev->dev_addr, maclen); ret = 0; @@ -47,7 +48,7 @@ int afs_get_ipv4_interfaces(struct afs_interface *bufs, size_t maxbufs, ASSERT(maxbufs > 0); rtnl_lock(); - for_each_netdev(dev) { + for_each_netdev(&init_net, dev) { if (dev->type == ARPHRD_LOOPBACK && !wantloopback) continue; idev = __in_dev_get_rtnl(dev); -- cgit v1.2.3