Semaphore to mutex conversion.
The conversion was generated via scripts, and the result was validated
automatically via a script as well.
Signed-off-by: Arjan van de Ven <arjan@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
#include <linux/crypto.h>
#include <linux/pfkeyv2.h>
#include <linux/in6.h>
+#include <linux/mutex.h>
#include <net/sock.h>
#include <net/dst.h>
extern u32 sysctl_xfrm_aevent_etime;
extern u32 sysctl_xfrm_aevent_rseqth;
-extern struct semaphore xfrm_cfg_sem;
+extern struct mutex xfrm_cfg_mutex;
/* Organization of SPD aka "XFRM rules"
------------------------------------
#include <linux/atmmpc.h>
#include <net/atmclip.h>
#include <linux/atmlec.h>
+#include <linux/mutex.h>
#include <asm/ioctls.h>
#include "resources.h"
#include "common.h"
-static DECLARE_MUTEX(ioctl_mutex);
+static DEFINE_MUTEX(ioctl_mutex);
static LIST_HEAD(ioctl_list);
void register_atm_ioctl(struct atm_ioctl *ioctl)
{
- down(&ioctl_mutex);
+ mutex_lock(&ioctl_mutex);
list_add_tail(&ioctl->list, &ioctl_list);
- up(&ioctl_mutex);
+ mutex_unlock(&ioctl_mutex);
}
void deregister_atm_ioctl(struct atm_ioctl *ioctl)
{
- down(&ioctl_mutex);
+ mutex_lock(&ioctl_mutex);
list_del(&ioctl->list);
- up(&ioctl_mutex);
+ mutex_unlock(&ioctl_mutex);
}
EXPORT_SYMBOL(register_atm_ioctl);
error = -ENOIOCTLCMD;
- down(&ioctl_mutex);
+ mutex_lock(&ioctl_mutex);
list_for_each(pos, &ioctl_list) {
struct atm_ioctl * ic = list_entry(pos, struct atm_ioctl, list);
if (try_module_get(ic->owner)) {
break;
}
}
- up(&ioctl_mutex);
+ mutex_unlock(&ioctl_mutex);
if (error != -ENOIOCTLCMD)
goto done;
#include <linux/wait.h>
#include <linux/device.h>
#include <linux/net.h>
+#include <linux/mutex.h>
+
#include <net/sock.h>
#include <asm/uaccess.h>
#include <asm/unaligned.h>
static struct task_struct *rfcomm_thread;
-static DECLARE_MUTEX(rfcomm_sem);
-#define rfcomm_lock() down(&rfcomm_sem);
-#define rfcomm_unlock() up(&rfcomm_sem);
+static DEFINE_MUTEX(rfcomm_mutex);
+#define rfcomm_lock() mutex_lock(&rfcomm_mutex)
+#define rfcomm_unlock() mutex_unlock(&rfcomm_mutex)
static unsigned long rfcomm_event;
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <linux/mutex.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/socket.h>
* 2) Since we run with the RTNL semaphore not held, we can sleep
* safely in order to wait for the netdev refcnt to drop to zero.
*/
-static DECLARE_MUTEX(net_todo_run_mutex);
+static DEFINE_MUTEX(net_todo_run_mutex);
void netdev_run_todo(void)
{
struct list_head list = LIST_HEAD_INIT(list);
/* Need to guard against multiple cpu's getting out of order. */
- down(&net_todo_run_mutex);
+ mutex_lock(&net_todo_run_mutex);
/* Not safe to do outside the semaphore. We must not return
* until all unregister events invoked by the local processor
}
out:
- up(&net_todo_run_mutex);
+ mutex_unlock(&net_todo_run_mutex);
}
/**
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
+#include <linux/mutex.h>
#include <net/flow.h>
#include <asm/atomic.h>
#include <asm/semaphore.h>
void flow_cache_flush(void)
{
struct flow_flush_info info;
- static DECLARE_MUTEX(flow_flush_sem);
+ static DEFINE_MUTEX(flow_flush_sem);
/* Don't want cpus going down or up during this. */
lock_cpu_hotplug();
- down(&flow_flush_sem);
+ mutex_lock(&flow_flush_sem);
atomic_set(&info.cpuleft, num_online_cpus());
init_completion(&info.completion);
local_bh_enable();
wait_for_completion(&info.completion);
- up(&flow_flush_sem);
+ mutex_unlock(&flow_flush_sem);
unlock_cpu_hotplug();
}
#include <linux/list.h>
#include <linux/vmalloc.h>
#include <linux/rtnetlink.h>
+#include <linux/mutex.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <net/icmp.h>
int users;
};
-static DECLARE_MUTEX(ipcomp_resource_sem);
+static DEFINE_MUTEX(ipcomp_resource_mutex);
static void **ipcomp_scratches;
static int ipcomp_scratch_users;
static LIST_HEAD(ipcomp_tfms_list);
}
/*
- * Must be protected by xfrm_cfg_sem. State and tunnel user references are
+ * Must be protected by xfrm_cfg_mutex. State and tunnel user references are
* always incremented on success.
*/
static int ipcomp_tunnel_attach(struct xfrm_state *x)
if (!ipcd)
return;
xfrm_state_delete_tunnel(x);
- down(&ipcomp_resource_sem);
+ mutex_lock(&ipcomp_resource_mutex);
ipcomp_free_data(ipcd);
- up(&ipcomp_resource_sem);
+ mutex_unlock(&ipcomp_resource_mutex);
kfree(ipcd);
}
if (x->props.mode)
x->props.header_len += sizeof(struct iphdr);
- down(&ipcomp_resource_sem);
+ mutex_lock(&ipcomp_resource_mutex);
if (!ipcomp_alloc_scratches())
goto error;
ipcd->tfms = ipcomp_alloc_tfms(x->calg->alg_name);
if (!ipcd->tfms)
goto error;
- up(&ipcomp_resource_sem);
+ mutex_unlock(&ipcomp_resource_mutex);
if (x->props.mode) {
err = ipcomp_tunnel_attach(x);
return err;
error_tunnel:
- down(&ipcomp_resource_sem);
+ mutex_lock(&ipcomp_resource_mutex);
error:
ipcomp_free_data(ipcd);
- up(&ipcomp_resource_sem);
+ mutex_unlock(&ipcomp_resource_mutex);
kfree(ipcd);
goto out;
}
#include <linux/sysctl.h>
#include <linux/proc_fs.h>
#include <linux/security.h>
+#include <linux/mutex.h>
#include <net/sock.h>
#include <net/route.h>
static unsigned int queue_user_dropped = 0;
static struct sock *ipqnl;
static LIST_HEAD(queue_list);
-static DECLARE_MUTEX(ipqnl_sem);
+static DEFINE_MUTEX(ipqnl_mutex);
static void
ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict)
struct sk_buff *skb;
unsigned int qlen;
- down(&ipqnl_sem);
+ mutex_lock(&ipqnl_mutex);
for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) {
skb = skb_dequeue(&sk->sk_receive_queue);
kfree_skb(skb);
}
- up(&ipqnl_sem);
+ mutex_unlock(&ipqnl_mutex);
}
static int
cleanup_ipqnl:
sock_release(ipqnl->sk_socket);
- down(&ipqnl_sem);
- up(&ipqnl_sem);
+ mutex_lock(&ipqnl_mutex);
+ mutex_unlock(&ipqnl_mutex);
cleanup_netlink_notifier:
netlink_unregister_notifier(&ipq_nl_notifier);
#include <linux/skbuff.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <net/xfrm.h>
#include <net/ip.h>
#include <net/protocol.h>
}
static struct xfrm_tunnel *ipip_handler;
-static DECLARE_MUTEX(xfrm4_tunnel_sem);
+static DEFINE_MUTEX(xfrm4_tunnel_mutex);
int xfrm4_tunnel_register(struct xfrm_tunnel *handler)
{
int ret;
- down(&xfrm4_tunnel_sem);
+ mutex_lock(&xfrm4_tunnel_mutex);
ret = 0;
if (ipip_handler != NULL)
ret = -EINVAL;
if (!ret)
ipip_handler = handler;
- up(&xfrm4_tunnel_sem);
+ mutex_unlock(&xfrm4_tunnel_mutex);
return ret;
}
{
int ret;
- down(&xfrm4_tunnel_sem);
+ mutex_lock(&xfrm4_tunnel_mutex);
ret = 0;
if (ipip_handler != handler)
ret = -EINVAL;
if (!ret)
ipip_handler = NULL;
- up(&xfrm4_tunnel_sem);
+ mutex_unlock(&xfrm4_tunnel_mutex);
synchronize_net();
#include <net/protocol.h>
#include <linux/ipv6.h>
#include <linux/icmpv6.h>
+#include <linux/mutex.h>
struct ipcomp6_tfms {
struct list_head list;
int users;
};
-static DECLARE_MUTEX(ipcomp6_resource_sem);
+static DEFINE_MUTEX(ipcomp6_resource_mutex);
static void **ipcomp6_scratches;
static int ipcomp6_scratch_users;
static LIST_HEAD(ipcomp6_tfms_list);
if (!ipcd)
return;
xfrm_state_delete_tunnel(x);
- down(&ipcomp6_resource_sem);
+ mutex_lock(&ipcomp6_resource_mutex);
ipcomp6_free_data(ipcd);
- up(&ipcomp6_resource_sem);
+ mutex_unlock(&ipcomp6_resource_mutex);
kfree(ipcd);
xfrm6_tunnel_free_spi((xfrm_address_t *)&x->props.saddr);
if (x->props.mode)
x->props.header_len += sizeof(struct ipv6hdr);
- down(&ipcomp6_resource_sem);
+ mutex_lock(&ipcomp6_resource_mutex);
if (!ipcomp6_alloc_scratches())
goto error;
ipcd->tfms = ipcomp6_alloc_tfms(x->calg->alg_name);
if (!ipcd->tfms)
goto error;
- up(&ipcomp6_resource_sem);
+ mutex_unlock(&ipcomp6_resource_mutex);
if (x->props.mode) {
err = ipcomp6_tunnel_attach(x);
out:
return err;
error_tunnel:
- down(&ipcomp6_resource_sem);
+ mutex_lock(&ipcomp6_resource_mutex);
error:
ipcomp6_free_data(ipcd);
- up(&ipcomp6_resource_sem);
+ mutex_unlock(&ipcomp6_resource_mutex);
kfree(ipcd);
goto out;
#include <linux/spinlock.h>
#include <linux/sysctl.h>
#include <linux/proc_fs.h>
+#include <linux/mutex.h>
#include <net/sock.h>
#include <net/ipv6.h>
#include <net/ip6_route.h>
static unsigned int queue_user_dropped = 0;
static struct sock *ipqnl;
static LIST_HEAD(queue_list);
-static DECLARE_MUTEX(ipqnl_sem);
+static DEFINE_MUTEX(ipqnl_mutex);
static void
ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict)
struct sk_buff *skb;
unsigned int qlen;
- down(&ipqnl_sem);
+ mutex_lock(&ipqnl_mutex);
for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) {
skb = skb_dequeue(&sk->sk_receive_queue);
kfree_skb(skb);
}
- up(&ipqnl_sem);
+ mutex_unlock(&ipqnl_mutex);
}
static int
cleanup_ipqnl:
sock_release(ipqnl->sk_socket);
- down(&ipqnl_sem);
- up(&ipqnl_sem);
+ mutex_lock(&ipqnl_mutex);
+ mutex_unlock(&ipqnl_mutex);
cleanup_netlink_notifier:
netlink_unregister_notifier(&ipq_nl_notifier);
#include <net/protocol.h>
#include <linux/ipv6.h>
#include <linux/icmpv6.h>
+#include <linux/mutex.h>
#ifdef CONFIG_IPV6_XFRM6_TUNNEL_DEBUG
# define X6TDEBUG 3
}
static struct xfrm6_tunnel *xfrm6_tunnel_handler;
-static DECLARE_MUTEX(xfrm6_tunnel_sem);
+static DEFINE_MUTEX(xfrm6_tunnel_mutex);
int xfrm6_tunnel_register(struct xfrm6_tunnel *handler)
{
int ret;
- down(&xfrm6_tunnel_sem);
+ mutex_lock(&xfrm6_tunnel_mutex);
ret = 0;
if (xfrm6_tunnel_handler != NULL)
ret = -EINVAL;
if (!ret)
xfrm6_tunnel_handler = handler;
- up(&xfrm6_tunnel_sem);
+ mutex_unlock(&xfrm6_tunnel_mutex);
return ret;
}
{
int ret;
- down(&xfrm6_tunnel_sem);
+ mutex_lock(&xfrm6_tunnel_mutex);
ret = 0;
if (xfrm6_tunnel_handler != handler)
ret = -EINVAL;
if (!ret)
xfrm6_tunnel_handler = NULL;
- up(&xfrm6_tunnel_sem);
+ mutex_unlock(&xfrm6_tunnel_mutex);
synchronize_net();
if (!hdr)
goto out;
- down(&xfrm_cfg_sem);
+ mutex_lock(&xfrm_cfg_mutex);
err = pfkey_process(sk, skb, hdr);
- up(&xfrm_cfg_sem);
+ mutex_unlock(&xfrm_cfg_mutex);
out:
if (err && hdr && pfkey_error(hdr, err, sk) == 0)
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter.h>
+#include <linux/mutex.h>
#include <net/sock.h>
#include "nf_internals.h"
/* Sockopts only registered and called from user context, so
net locking would be overkill. Also, [gs]etsockopt calls may
sleep. */
-static DECLARE_MUTEX(nf_sockopt_mutex);
+static DEFINE_MUTEX(nf_sockopt_mutex);
static LIST_HEAD(nf_sockopts);
/* Do exclusive ranges overlap? */
struct list_head *i;
int ret = 0;
- if (down_interruptible(&nf_sockopt_mutex) != 0)
+ if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0)
return -EINTR;
list_for_each(i, &nf_sockopts) {
list_add(®->list, &nf_sockopts);
out:
- up(&nf_sockopt_mutex);
+ mutex_unlock(&nf_sockopt_mutex);
return ret;
}
EXPORT_SYMBOL(nf_register_sockopt);
{
/* No point being interruptible: we're probably in cleanup_module() */
restart:
- down(&nf_sockopt_mutex);
+ mutex_lock(&nf_sockopt_mutex);
if (reg->use != 0) {
/* To be woken by nf_sockopt call... */
/* FIXME: Stuart Young's name appears gratuitously. */
set_current_state(TASK_UNINTERRUPTIBLE);
reg->cleanup_task = current;
- up(&nf_sockopt_mutex);
+ mutex_unlock(&nf_sockopt_mutex);
schedule();
goto restart;
}
list_del(®->list);
- up(&nf_sockopt_mutex);
+ mutex_unlock(&nf_sockopt_mutex);
}
EXPORT_SYMBOL(nf_unregister_sockopt);
struct nf_sockopt_ops *ops;
int ret;
- if (down_interruptible(&nf_sockopt_mutex) != 0)
+ if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0)
return -EINTR;
list_for_each(i, &nf_sockopts) {
if (val >= ops->get_optmin
&& val < ops->get_optmax) {
ops->use++;
- up(&nf_sockopt_mutex);
+ mutex_unlock(&nf_sockopt_mutex);
ret = ops->get(sk, val, opt, len);
goto out;
}
if (val >= ops->set_optmin
&& val < ops->set_optmax) {
ops->use++;
- up(&nf_sockopt_mutex);
+ mutex_unlock(&nf_sockopt_mutex);
ret = ops->set(sk, val, opt, *len);
goto out;
}
}
}
}
- up(&nf_sockopt_mutex);
+ mutex_unlock(&nf_sockopt_mutex);
return -ENOPROTOOPT;
out:
- down(&nf_sockopt_mutex);
+ mutex_lock(&nf_sockopt_mutex);
ops->use--;
if (ops->cleanup_task)
wake_up_process(ops->cleanup_task);
- up(&nf_sockopt_mutex);
+ mutex_unlock(&nf_sockopt_mutex);
return ret;
}
#include <linux/netdevice.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/mutex.h>
#include <linux/wanrouter.h>
#include <linux/if_bridge.h>
#include <linux/if_frad.h>
* with module unload.
*/
-static DECLARE_MUTEX(br_ioctl_mutex);
+static DEFINE_MUTEX(br_ioctl_mutex);
static int (*br_ioctl_hook)(unsigned int cmd, void __user *arg) = NULL;
void brioctl_set(int (*hook)(unsigned int, void __user *))
{
- down(&br_ioctl_mutex);
+ mutex_lock(&br_ioctl_mutex);
br_ioctl_hook = hook;
- up(&br_ioctl_mutex);
+ mutex_unlock(&br_ioctl_mutex);
}
EXPORT_SYMBOL(brioctl_set);
-static DECLARE_MUTEX(vlan_ioctl_mutex);
+static DEFINE_MUTEX(vlan_ioctl_mutex);
static int (*vlan_ioctl_hook)(void __user *arg);
void vlan_ioctl_set(int (*hook)(void __user *))
{
- down(&vlan_ioctl_mutex);
+ mutex_lock(&vlan_ioctl_mutex);
vlan_ioctl_hook = hook;
- up(&vlan_ioctl_mutex);
+ mutex_unlock(&vlan_ioctl_mutex);
}
EXPORT_SYMBOL(vlan_ioctl_set);
-static DECLARE_MUTEX(dlci_ioctl_mutex);
+static DEFINE_MUTEX(dlci_ioctl_mutex);
static int (*dlci_ioctl_hook)(unsigned int, void __user *);
void dlci_ioctl_set(int (*hook)(unsigned int, void __user *))
{
- down(&dlci_ioctl_mutex);
+ mutex_lock(&dlci_ioctl_mutex);
dlci_ioctl_hook = hook;
- up(&dlci_ioctl_mutex);
+ mutex_unlock(&dlci_ioctl_mutex);
}
EXPORT_SYMBOL(dlci_ioctl_set);
if (!br_ioctl_hook)
request_module("bridge");
- down(&br_ioctl_mutex);
+ mutex_lock(&br_ioctl_mutex);
if (br_ioctl_hook)
err = br_ioctl_hook(cmd, argp);
- up(&br_ioctl_mutex);
+ mutex_unlock(&br_ioctl_mutex);
break;
case SIOCGIFVLAN:
case SIOCSIFVLAN:
if (!vlan_ioctl_hook)
request_module("8021q");
- down(&vlan_ioctl_mutex);
+ mutex_lock(&vlan_ioctl_mutex);
if (vlan_ioctl_hook)
err = vlan_ioctl_hook(argp);
- up(&vlan_ioctl_mutex);
+ mutex_unlock(&vlan_ioctl_mutex);
break;
case SIOCGIFDIVERT:
case SIOCSIFDIVERT:
request_module("dlci");
if (dlci_ioctl_hook) {
- down(&dlci_ioctl_mutex);
+ mutex_lock(&dlci_ioctl_mutex);
err = dlci_ioctl_hook(cmd, argp);
- up(&dlci_ioctl_mutex);
+ mutex_unlock(&dlci_ioctl_mutex);
}
break;
default:
#include <linux/proc_fs.h>
#include <linux/net.h>
#include <linux/workqueue.h>
+#include <linux/mutex.h>
#include <asm/ioctls.h>
#include <linux/sunrpc/types.h>
#include <linux/sunrpc/cache.h>
*/
static DEFINE_SPINLOCK(queue_lock);
-static DECLARE_MUTEX(queue_io_sem);
+static DEFINE_MUTEX(queue_io_mutex);
struct cache_queue {
struct list_head list;
if (count == 0)
return 0;
- down(&queue_io_sem); /* protect against multiple concurrent
+ mutex_lock(&queue_io_mutex); /* protect against multiple concurrent
* readers on this file */
again:
spin_lock(&queue_lock);
}
if (rp->q.list.next == &cd->queue) {
spin_unlock(&queue_lock);
- up(&queue_io_sem);
+ mutex_unlock(&queue_io_mutex);
BUG_ON(rp->offset);
return 0;
}
}
if (err == -EAGAIN)
goto again;
- up(&queue_io_sem);
+ mutex_unlock(&queue_io_mutex);
return err ? err : count;
}
-static char write_buf[8192]; /* protected by queue_io_sem */
+static char write_buf[8192]; /* protected by queue_io_mutex */
static ssize_t
cache_write(struct file *filp, const char __user *buf, size_t count,
if (count >= sizeof(write_buf))
return -EINVAL;
- down(&queue_io_sem);
+ mutex_lock(&queue_io_mutex);
if (copy_from_user(write_buf, buf, count)) {
- up(&queue_io_sem);
+ mutex_unlock(&queue_io_mutex);
return -EFAULT;
}
write_buf[count] = '\0';
else
err = -EINVAL;
- up(&queue_io_sem);
+ mutex_unlock(&queue_io_mutex);
return err ? err : count;
}
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/xprt.h>
/*
* rpciod-related stuff
*/
-static DECLARE_MUTEX(rpciod_sema);
+static DEFINE_MUTEX(rpciod_mutex);
static unsigned int rpciod_users;
static struct workqueue_struct *rpciod_workqueue;
struct workqueue_struct *wq;
int error = 0;
- down(&rpciod_sema);
+ mutex_lock(&rpciod_mutex);
dprintk("rpciod_up: users %d\n", rpciod_users);
rpciod_users++;
if (rpciod_workqueue)
rpciod_workqueue = wq;
error = 0;
out:
- up(&rpciod_sema);
+ mutex_unlock(&rpciod_mutex);
return error;
}
void
rpciod_down(void)
{
- down(&rpciod_sema);
+ mutex_lock(&rpciod_mutex);
dprintk("rpciod_down sema %d\n", rpciod_users);
if (rpciod_users) {
if (--rpciod_users)
destroy_workqueue(rpciod_workqueue);
rpciod_workqueue = NULL;
out:
- up(&rpciod_sema);
+ mutex_unlock(&rpciod_mutex);
}
#ifdef RPC_DEBUG
#include <linux/netdevice.h>
#include <linux/file.h>
#include <linux/proc_fs.h>
+#include <linux/mutex.h>
#include <net/sock.h>
#include <net/af_unix.h>
void unix_gc(void)
{
- static DECLARE_MUTEX(unix_gc_sem);
+ static DEFINE_MUTEX(unix_gc_sem);
int i;
struct sock *s;
struct sk_buff_head hitlist;
* Avoid a recursive GC.
*/
- if (down_trylock(&unix_gc_sem))
+ if (!mutex_trylock(&unix_gc_sem))
return;
spin_lock(&unix_table_lock);
*/
__skb_queue_purge(&hitlist);
- up(&unix_gc_sem);
+ mutex_unlock(&unix_gc_sem);
}
#include <net/xfrm.h>
#include <net/ip.h>
-DECLARE_MUTEX(xfrm_cfg_sem);
-EXPORT_SYMBOL(xfrm_cfg_sem);
+DEFINE_MUTEX(xfrm_cfg_mutex);
+EXPORT_SYMBOL(xfrm_cfg_mutex);
static DEFINE_RWLOCK(xfrm_policy_lock);
unsigned int qlen = 0;
do {
- down(&xfrm_cfg_sem);
+ mutex_lock(&xfrm_cfg_mutex);
netlink_run_queue(sk, &qlen, &xfrm_user_rcv_msg);
- up(&xfrm_cfg_sem);
+ mutex_unlock(&xfrm_cfg_mutex);
} while (qlen);
}