Semaphore to mutex conversion.
The conversion was generated via scripts, and the result was validated
automatically via a script as well.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
struct list_head sk_deferred; /* deferred requests that need to
* be revisted */
- struct semaphore sk_sem; /* to serialize sending data */
+ struct mutex sk_mutex; /* to serialize sending data */
int (*sk_recvfrom)(struct svc_rqst *rqstp);
int (*sk_sendto)(struct svc_rqst *rqstp);
#include <linux/config.h>
#include <linux/socket.h>
#include <linux/un.h>
+#include <linux/mutex.h>
#include <net/sock.h>
extern void unix_inflight(struct file *fp);
struct unix_address *addr;
struct dentry *dentry;
struct vfsmount *mnt;
- struct semaphore readsem;
+ struct mutex readlock;
struct sock *peer;
struct sock *other;
struct sock *gc_tree;
dev = try_then_request_module(atm_dev_lookup(itf), "atm-device-%d", itf);
} else {
dev = NULL;
- down(&atm_dev_mutex);
+ mutex_lock(&atm_dev_mutex);
if (!list_empty(&atm_devs)) {
dev = list_entry(atm_devs.next, struct atm_dev, dev_list);
atm_dev_hold(dev);
}
- up(&atm_dev_mutex);
+ mutex_unlock(&atm_dev_mutex);
}
if (!dev)
return -ENODEV;
#include <linux/bitops.h>
#include <linux/capability.h>
#include <linux/delay.h>
+#include <linux/mutex.h>
+
#include <net/sock.h> /* for struct sock */
#include "common.h"
LIST_HEAD(atm_devs);
-DECLARE_MUTEX(atm_dev_mutex);
+DEFINE_MUTEX(atm_dev_mutex);
static struct atm_dev *__alloc_atm_dev(const char *type)
{
{
struct atm_dev *dev;
- down(&atm_dev_mutex);
+ mutex_lock(&atm_dev_mutex);
dev = __atm_dev_lookup(number);
- up(&atm_dev_mutex);
+ mutex_unlock(&atm_dev_mutex);
return dev;
}
type);
return NULL;
}
- down(&atm_dev_mutex);
+ mutex_lock(&atm_dev_mutex);
if (number != -1) {
if ((inuse = __atm_dev_lookup(number))) {
atm_dev_put(inuse);
- up(&atm_dev_mutex);
+ mutex_unlock(&atm_dev_mutex);
kfree(dev);
return NULL;
}
printk(KERN_ERR "atm_dev_register: "
"atm_proc_dev_register failed for dev %s\n",
type);
- up(&atm_dev_mutex);
+ mutex_unlock(&atm_dev_mutex);
kfree(dev);
return NULL;
}
list_add_tail(&dev->dev_list, &atm_devs);
- up(&atm_dev_mutex);
+ mutex_unlock(&atm_dev_mutex);
return dev;
}
* with same number can appear, such we need deregister proc,
* release async all vccs and remove them from vccs list too
*/
- down(&atm_dev_mutex);
+ mutex_lock(&atm_dev_mutex);
list_del(&dev->dev_list);
- up(&atm_dev_mutex);
+ mutex_unlock(&atm_dev_mutex);
atm_dev_release_vccs(dev);
atm_proc_dev_deregister(dev);
return -EFAULT;
if (get_user(len, &iobuf->length))
return -EFAULT;
- down(&atm_dev_mutex);
+ mutex_lock(&atm_dev_mutex);
list_for_each(p, &atm_devs)
size += sizeof(int);
if (size > len) {
- up(&atm_dev_mutex);
+ mutex_unlock(&atm_dev_mutex);
return -E2BIG;
}
tmp_buf = kmalloc(size, GFP_ATOMIC);
if (!tmp_buf) {
- up(&atm_dev_mutex);
+ mutex_unlock(&atm_dev_mutex);
return -ENOMEM;
}
tmp_p = tmp_buf;
dev = list_entry(p, struct atm_dev, dev_list);
*tmp_p++ = dev->number;
}
- up(&atm_dev_mutex);
+ mutex_unlock(&atm_dev_mutex);
error = ((copy_to_user(buf, tmp_buf, size)) ||
put_user(size, &iobuf->length))
? -EFAULT : 0;
void *atm_dev_seq_start(struct seq_file *seq, loff_t *pos)
{
- down(&atm_dev_mutex);
+ mutex_lock(&atm_dev_mutex);
return *pos ? dev_get_idx(*pos) : (void *) 1;
}
void atm_dev_seq_stop(struct seq_file *seq, void *v)
{
- up(&atm_dev_mutex);
+ mutex_unlock(&atm_dev_mutex);
}
void *atm_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
#include <linux/config.h>
#include <linux/atmdev.h>
+#include <linux/mutex.h>
extern struct list_head atm_devs;
-extern struct semaphore atm_dev_mutex;
+extern struct mutex atm_dev_mutex;
int atm_dev_ioctl(unsigned int cmd, void __user *arg);
#define ASSERT_READ_LOCK(x)
#define ASSERT_WRITE_LOCK(x)
#include <linux/netfilter_ipv4/listhelp.h>
+#include <linux/mutex.h>
#if 0
/* use this for remote debugging
-static DECLARE_MUTEX(ebt_mutex);
+static DEFINE_MUTEX(ebt_mutex);
static LIST_HEAD(ebt_tables);
static LIST_HEAD(ebt_targets);
static LIST_HEAD(ebt_matches);
/* If it succeeds, returns element and locks mutex */
static inline void *
find_inlist_lock_noload(struct list_head *head, const char *name, int *error,
- struct semaphore *mutex)
+ struct mutex *mutex)
{
void *ret;
- *error = down_interruptible(mutex);
+ *error = mutex_lock_interruptible(mutex);
if (*error != 0)
return NULL;
ret = list_named_find(head, name);
if (!ret) {
*error = -ENOENT;
- up(mutex);
+ mutex_unlock(mutex);
}
return ret;
}
#else
static void *
find_inlist_lock(struct list_head *head, const char *name, const char *prefix,
- int *error, struct semaphore *mutex)
+ int *error, struct mutex *mutex)
{
void *ret;
#endif
static inline struct ebt_table *
-find_table_lock(const char *name, int *error, struct semaphore *mutex)
+find_table_lock(const char *name, int *error, struct mutex *mutex)
{
return find_inlist_lock(&ebt_tables, name, "ebtable_", error, mutex);
}
static inline struct ebt_match *
-find_match_lock(const char *name, int *error, struct semaphore *mutex)
+find_match_lock(const char *name, int *error, struct mutex *mutex)
{
return find_inlist_lock(&ebt_matches, name, "ebt_", error, mutex);
}
static inline struct ebt_watcher *
-find_watcher_lock(const char *name, int *error, struct semaphore *mutex)
+find_watcher_lock(const char *name, int *error, struct mutex *mutex)
{
return find_inlist_lock(&ebt_watchers, name, "ebt_", error, mutex);
}
static inline struct ebt_target *
-find_target_lock(const char *name, int *error, struct semaphore *mutex)
+find_target_lock(const char *name, int *error, struct mutex *mutex)
{
return find_inlist_lock(&ebt_targets, name, "ebt_", error, mutex);
}
return ret;
m->u.match = match;
if (!try_module_get(match->me)) {
- up(&ebt_mutex);
+ mutex_unlock(&ebt_mutex);
return -ENOENT;
}
- up(&ebt_mutex);
+ mutex_unlock(&ebt_mutex);
if (match->check &&
match->check(name, hookmask, e, m->data, m->match_size) != 0) {
BUGPRINT("match->check failed\n");
return ret;
w->u.watcher = watcher;
if (!try_module_get(watcher->me)) {
- up(&ebt_mutex);
+ mutex_unlock(&ebt_mutex);
return -ENOENT;
}
- up(&ebt_mutex);
+ mutex_unlock(&ebt_mutex);
if (watcher->check &&
watcher->check(name, hookmask, e, w->data, w->watcher_size) != 0) {
BUGPRINT("watcher->check failed\n");
if (!target)
goto cleanup_watchers;
if (!try_module_get(target->me)) {
- up(&ebt_mutex);
+ mutex_unlock(&ebt_mutex);
ret = -ENOENT;
goto cleanup_watchers;
}
- up(&ebt_mutex);
+ mutex_unlock(&ebt_mutex);
t->u.target = target;
if (t->u.target == &ebt_standard_target) {
t->private = newinfo;
write_unlock_bh(&t->lock);
- up(&ebt_mutex);
+ mutex_unlock(&ebt_mutex);
/* so, a user can change the chains while having messed up her counter
allocation. Only reason why this is done is because this way the lock
is held only once, while this doesn't bring the kernel into a
return ret;
free_unlock:
- up(&ebt_mutex);
+ mutex_unlock(&ebt_mutex);
free_iterate:
EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
ebt_cleanup_entry, NULL);
{
int ret;
- ret = down_interruptible(&ebt_mutex);
+ ret = mutex_lock_interruptible(&ebt_mutex);
if (ret != 0)
return ret;
if (!list_named_insert(&ebt_targets, target)) {
- up(&ebt_mutex);
+ mutex_unlock(&ebt_mutex);
return -EEXIST;
}
- up(&ebt_mutex);
+ mutex_unlock(&ebt_mutex);
return 0;
}
void ebt_unregister_target(struct ebt_target *target)
{
- down(&ebt_mutex);
+ mutex_lock(&ebt_mutex);
LIST_DELETE(&ebt_targets, target);
- up(&ebt_mutex);
+ mutex_unlock(&ebt_mutex);
}
int ebt_register_match(struct ebt_match *match)
{
int ret;
- ret = down_interruptible(&ebt_mutex);
+ ret = mutex_lock_interruptible(&ebt_mutex);
if (ret != 0)
return ret;
if (!list_named_insert(&ebt_matches, match)) {
- up(&ebt_mutex);
+ mutex_unlock(&ebt_mutex);
return -EEXIST;
}
- up(&ebt_mutex);
+ mutex_unlock(&ebt_mutex);
return 0;
}
void ebt_unregister_match(struct ebt_match *match)
{
- down(&ebt_mutex);
+ mutex_lock(&ebt_mutex);
LIST_DELETE(&ebt_matches, match);
- up(&ebt_mutex);
+ mutex_unlock(&ebt_mutex);
}
int ebt_register_watcher(struct ebt_watcher *watcher)
{
int ret;
- ret = down_interruptible(&ebt_mutex);
+ ret = mutex_lock_interruptible(&ebt_mutex);
if (ret != 0)
return ret;
if (!list_named_insert(&ebt_watchers, watcher)) {
- up(&ebt_mutex);
+ mutex_unlock(&ebt_mutex);
return -EEXIST;
}
- up(&ebt_mutex);
+ mutex_unlock(&ebt_mutex);
return 0;
}
void ebt_unregister_watcher(struct ebt_watcher *watcher)
{
- down(&ebt_mutex);
+ mutex_lock(&ebt_mutex);
LIST_DELETE(&ebt_watchers, watcher);
- up(&ebt_mutex);
+ mutex_unlock(&ebt_mutex);
}
int ebt_register_table(struct ebt_table *table)
table->private = newinfo;
rwlock_init(&table->lock);
- ret = down_interruptible(&ebt_mutex);
+ ret = mutex_lock_interruptible(&ebt_mutex);
if (ret != 0)
goto free_chainstack;
goto free_unlock;
}
list_prepend(&ebt_tables, table);
- up(&ebt_mutex);
+ mutex_unlock(&ebt_mutex);
return 0;
free_unlock:
- up(&ebt_mutex);
+ mutex_unlock(&ebt_mutex);
free_chainstack:
if (newinfo->chainstack) {
for_each_cpu(i)
BUGPRINT("Request to unregister NULL table!!!\n");
return;
}
- down(&ebt_mutex);
+ mutex_lock(&ebt_mutex);
LIST_DELETE(&ebt_tables, table);
- up(&ebt_mutex);
+ mutex_unlock(&ebt_mutex);
vfree(table->private->entries);
if (table->private->chainstack) {
for_each_cpu(i)
write_unlock_bh(&t->lock);
ret = 0;
unlock_mutex:
- up(&ebt_mutex);
+ mutex_unlock(&ebt_mutex);
free_tmp:
vfree(tmp);
return ret;
return 0;
}
-/* called with ebt_mutex down */
+/* called with ebt_mutex locked */
static int copy_everything_to_user(struct ebt_table *t, void __user *user,
int *len, int cmd)
{
case EBT_SO_GET_INIT_INFO:
if (*len != sizeof(struct ebt_replace)){
ret = -EINVAL;
- up(&ebt_mutex);
+ mutex_unlock(&ebt_mutex);
break;
}
if (cmd == EBT_SO_GET_INFO) {
tmp.entries_size = t->table->entries_size;
tmp.valid_hooks = t->table->valid_hooks;
}
- up(&ebt_mutex);
+ mutex_unlock(&ebt_mutex);
if (copy_to_user(user, &tmp, *len) != 0){
BUGPRINT("c2u Didn't work\n");
ret = -EFAULT;
case EBT_SO_GET_ENTRIES:
case EBT_SO_GET_INIT_ENTRIES:
ret = copy_everything_to_user(t, user, len, cmd);
- up(&ebt_mutex);
+ mutex_unlock(&ebt_mutex);
break;
default:
- up(&ebt_mutex);
+ mutex_unlock(&ebt_mutex);
ret = -EINVAL;
}
{
int ret;
- down(&ebt_mutex);
+ mutex_lock(&ebt_mutex);
list_named_insert(&ebt_targets, &ebt_standard_target);
- up(&ebt_mutex);
+ mutex_unlock(&ebt_mutex);
if ((ret = nf_register_sockopt(&ebt_sockopts)) < 0)
return ret;
#include <linux/stat.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/mutex.h>
#include <net/ip_vs.h>
/* ipvs application list head */
static LIST_HEAD(ip_vs_app_list);
-static DECLARE_MUTEX(__ip_vs_app_mutex);
+static DEFINE_MUTEX(__ip_vs_app_mutex);
/*
{
int result;
- down(&__ip_vs_app_mutex);
+ mutex_lock(&__ip_vs_app_mutex);
result = ip_vs_app_inc_new(app, proto, port);
- up(&__ip_vs_app_mutex);
+ mutex_unlock(&__ip_vs_app_mutex);
return result;
}
/* increase the module use count */
ip_vs_use_count_inc();
- down(&__ip_vs_app_mutex);
+ mutex_lock(&__ip_vs_app_mutex);
list_add(&app->a_list, &ip_vs_app_list);
- up(&__ip_vs_app_mutex);
+ mutex_unlock(&__ip_vs_app_mutex);
return 0;
}
{
struct ip_vs_app *inc, *nxt;
- down(&__ip_vs_app_mutex);
+ mutex_lock(&__ip_vs_app_mutex);
list_for_each_entry_safe(inc, nxt, &app->incs_list, a_list) {
ip_vs_app_inc_release(inc);
list_del(&app->a_list);
- up(&__ip_vs_app_mutex);
+ mutex_unlock(&__ip_vs_app_mutex);
/* decrease the module use count */
ip_vs_use_count_dec();
static void *ip_vs_app_seq_start(struct seq_file *seq, loff_t *pos)
{
- down(&__ip_vs_app_mutex);
+ mutex_lock(&__ip_vs_app_mutex);
return *pos ? ip_vs_app_idx(*pos - 1) : SEQ_START_TOKEN;
}
static void ip_vs_app_seq_stop(struct seq_file *seq, void *v)
{
- up(&__ip_vs_app_mutex);
+ mutex_unlock(&__ip_vs_app_mutex);
}
static int ip_vs_app_seq_show(struct seq_file *seq, void *v)
#include <linux/init.h>
#include <asm/uaccess.h>
-#include <asm/semaphore.h>
+#include <linux/mutex.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_arp/arp_tables.h>
#include <linux/icmp.h>
#include <net/ip.h>
#include <asm/uaccess.h>
-#include <asm/semaphore.h>
+#include <linux/mutex.h>
#include <linux/proc_fs.h>
#include <linux/err.h>
#include <linux/cpumask.h>
#include <linux/icmpv6.h>
#include <net/ipv6.h>
#include <asm/uaccess.h>
-#include <asm/semaphore.h>
+#include <linux/mutex.h>
#include <linux/proc_fs.h>
#include <linux/cpumask.h>
DEFINE_RWLOCK(nf_ct_cache_lock);
/* This avoids calling kmem_cache_create() with same name simultaneously */
-DECLARE_MUTEX(nf_ct_cache_mutex);
+static DEFINE_MUTEX(nf_ct_cache_mutex);
extern struct nf_conntrack_protocol nf_conntrack_generic_protocol;
struct nf_conntrack_protocol *
return -EINVAL;
}
- down(&nf_ct_cache_mutex);
+ mutex_lock(&nf_ct_cache_mutex);
write_lock_bh(&nf_ct_cache_lock);
/* e.g: multiple helpers are loaded */
ret = -EBUSY;
write_unlock_bh(&nf_ct_cache_lock);
- up(&nf_ct_cache_mutex);
+ mutex_unlock(&nf_ct_cache_mutex);
return ret;
}
write_unlock_bh(&nf_ct_cache_lock);
out_free_name:
kfree(cache_name);
out_up_mutex:
- up(&nf_ct_cache_mutex);
+ mutex_unlock(&nf_ct_cache_mutex);
return ret;
}
* slab cache.
*/
DEBUGP("nf_conntrack_unregister_cache: 0x%04x\n", features);
- down(&nf_ct_cache_mutex);
+ mutex_lock(&nf_ct_cache_mutex);
write_lock_bh(&nf_ct_cache_lock);
if (--nf_ct_cache[features].use > 0) {
write_unlock_bh(&nf_ct_cache_lock);
- up(&nf_ct_cache_mutex);
+ mutex_unlock(&nf_ct_cache_mutex);
return;
}
cachep = nf_ct_cache[features].cachep;
kmem_cache_destroy(cachep);
kfree(name);
- up(&nf_ct_cache_mutex);
+ mutex_unlock(&nf_ct_cache_mutex);
}
int
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
+#include <linux/mutex.h>
+
/* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
* in ip_conntrack_core, since we don't want the protocols to autoload
xb->page_len +
xb->tail[0].iov_len;
- /* Grab svsk->sk_sem to serialize outgoing data. */
- down(&svsk->sk_sem);
+ /* Grab svsk->sk_mutex to serialize outgoing data. */
+ mutex_lock(&svsk->sk_mutex);
if (test_bit(SK_DEAD, &svsk->sk_flags))
len = -ENOTCONN;
else
len = svsk->sk_sendto(rqstp);
- up(&svsk->sk_sem);
+ mutex_unlock(&svsk->sk_mutex);
svc_sock_release(rqstp);
if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN)
svsk->sk_lastrecv = get_seconds();
INIT_LIST_HEAD(&svsk->sk_deferred);
INIT_LIST_HEAD(&svsk->sk_ready);
- sema_init(&svsk->sk_sem, 1);
+ mutex_init(&svsk->sk_mutex);
/* Initialize the socket */
if (sock->type == SOCK_DGRAM)
u->mnt = NULL;
spin_lock_init(&u->lock);
atomic_set(&u->inflight, sock ? 0 : -1);
- init_MUTEX(&u->readsem); /* single task reading lock */
+ mutex_init(&u->readlock); /* single task reading lock */
init_waitqueue_head(&u->peer_wait);
unix_insert_socket(unix_sockets_unbound, sk);
out:
struct unix_address * addr;
int err;
- down(&u->readsem);
+ mutex_lock(&u->readlock);
err = 0;
if (u->addr)
spin_unlock(&unix_table_lock);
err = 0;
-out: up(&u->readsem);
+out: mutex_unlock(&u->readlock);
return err;
}
goto out;
addr_len = err;
- down(&u->readsem);
+ mutex_lock(&u->readlock);
err = -EINVAL;
if (u->addr)
out_unlock:
spin_unlock(&unix_table_lock);
out_up:
- up(&u->readsem);
+ mutex_unlock(&u->readlock);
out:
return err;
msg->msg_namelen = 0;
- down(&u->readsem);
+ mutex_lock(&u->readlock);
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
out_free:
skb_free_datagram(sk,skb);
out_unlock:
- up(&u->readsem);
+ mutex_unlock(&u->readlock);
out:
return err;
}
memset(&tmp_scm, 0, sizeof(tmp_scm));
}
- down(&u->readsem);
+ mutex_lock(&u->readlock);
do
{
err = -EAGAIN;
if (!timeo)
break;
- up(&u->readsem);
+ mutex_unlock(&u->readlock);
timeo = unix_stream_data_wait(sk, timeo);
err = sock_intr_errno(timeo);
goto out;
}
- down(&u->readsem);
+ mutex_lock(&u->readlock);
continue;
}
}
} while (size);
- up(&u->readsem);
+ mutex_unlock(&u->readlock);
scm_recv(sock, msg, siocb->scm, flags);
out:
return copied ? : err;