* dentry->d_lock (used to keep d_move() away from dentry->d_parent)
* iprune_sem (synchronize shrink_icache_memory())
* inode_lock (protects the super_block->s_inodes list)
- * inode->inotify_sem (protects inode->inotify_watches and watches->i_list)
- * inotify_dev->sem (protects inotify_device and watches->d_list)
+ * inode->inotify_mutex (protects inode->inotify_watches and watches->i_list)
+ * inotify_dev->mutex (protects inotify_device and watches->d_list)
*/
/*
/*
* struct inotify_device - represents an inotify instance
*
- * This structure is protected by the semaphore 'sem'.
+ * This structure is protected by the mutex 'mutex'.
*/
struct inotify_device {
wait_queue_head_t wq; /* wait queue for i/o */
struct idr idr; /* idr mapping wd -> watch */
- struct semaphore sem; /* protects this bad boy */
+ struct mutex mutex; /* protects this bad boy */
struct list_head events; /* list of queued events */
struct list_head watches; /* list of watches */
atomic_t count; /* reference count */
* device. In read(), this list is walked and all events that can fit in the
* buffer are returned.
*
- * Protected by dev->sem of the device in which we are queued.
+ * Protected by dev->mutex of the device in which we are queued.
*/
struct inotify_kernel_event {
struct inotify_event event; /* the user-space event */
/*
* struct inotify_watch - represents a watch request on a specific inode
*
- * d_list is protected by dev->sem of the associated watch->dev.
- * i_list and mask are protected by inode->inotify_sem of the associated inode.
+ * d_list is protected by dev->mutex of the associated watch->dev.
+ * i_list and mask are protected by inode->inotify_mutex of the associated inode.
* dev, inode, and wd are never written to once the watch is created.
*/
struct inotify_watch {
/*
* inotify_dev_get_event - return the next event in the given dev's queue
*
- * Caller must hold dev->sem.
+ * Caller must hold dev->mutex.
*/
static inline struct inotify_kernel_event *
inotify_dev_get_event(struct inotify_device *dev)
/*
* inotify_dev_queue_event - add a new event to the given device
*
- * Caller must hold dev->sem. Can sleep (calls kernel_event()).
+ * Caller must hold dev->mutex. Can sleep (calls kernel_event()).
*/
static void inotify_dev_queue_event(struct inotify_device *dev,
struct inotify_watch *watch, u32 mask,
/*
* remove_kevent - cleans up and ultimately frees the given kevent
*
- * Caller must hold dev->sem.
+ * Caller must hold dev->mutex.
*/
static void remove_kevent(struct inotify_device *dev,
struct inotify_kernel_event *kevent)
/*
* inotify_dev_event_dequeue - destroy an event on the given device
*
- * Caller must hold dev->sem.
+ * Caller must hold dev->mutex.
*/
static void inotify_dev_event_dequeue(struct inotify_device *dev)
{
/*
* inotify_dev_get_wd - returns the next WD for use by the given dev
*
- * Callers must hold dev->sem. This function can sleep.
+ * Callers must hold dev->mutex. This function can sleep.
*/
static int inotify_dev_get_wd(struct inotify_device *dev,
struct inotify_watch *watch)
/*
* create_watch - creates a watch on the given device.
*
- * Callers must hold dev->sem. Calls inotify_dev_get_wd() so may sleep.
+ * Callers must hold dev->mutex. Calls inotify_dev_get_wd() so may sleep.
* Both 'dev' and 'inode' (by way of nameidata) need to be pinned.
*/
static struct inotify_watch *create_watch(struct inotify_device *dev,
/*
* inotify_find_dev - find the watch associated with the given inode and dev
*
- * Callers must hold inode->inotify_sem.
+ * Callers must hold inode->inotify_mutex.
*/
static struct inotify_watch *inode_find_dev(struct inode *inode,
struct inotify_device *dev)
* the IN_IGNORED event to the given device signifying that the inode is no
* longer watched.
*
- * Callers must hold both inode->inotify_sem and dev->sem. We drop a
+ * Callers must hold both inode->inotify_mutex and dev->mutex. We drop a
* reference to the inode before returning.
*
* The inode is not iput() so as to remain atomic. If the inode needs to be
if (!inotify_inode_watched(inode))
return;
- down(&inode->inotify_sem);
+ mutex_lock(&inode->inotify_mutex);
list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
u32 watch_mask = watch->mask;
if (watch_mask & mask) {
struct inotify_device *dev = watch->dev;
get_inotify_watch(watch);
- down(&dev->sem);
+ mutex_lock(&dev->mutex);
inotify_dev_queue_event(dev, watch, mask, cookie, name);
if (watch_mask & IN_ONESHOT)
remove_watch_no_event(watch, dev);
- up(&dev->sem);
+ mutex_unlock(&dev->mutex);
put_inotify_watch(watch);
}
}
- up(&inode->inotify_sem);
+ mutex_unlock(&inode->inotify_mutex);
}
EXPORT_SYMBOL_GPL(inotify_inode_queue_event);
iput(need_iput_tmp);
/* for each watch, send IN_UNMOUNT and then remove it */
- down(&inode->inotify_sem);
+ mutex_lock(&inode->inotify_mutex);
watches = &inode->inotify_watches;
list_for_each_entry_safe(watch, next_w, watches, i_list) {
struct inotify_device *dev = watch->dev;
- down(&dev->sem);
+ mutex_lock(&dev->mutex);
inotify_dev_queue_event(dev, watch, IN_UNMOUNT,0,NULL);
remove_watch(watch, dev);
- up(&dev->sem);
+ mutex_unlock(&dev->mutex);
}
- up(&inode->inotify_sem);
+ mutex_unlock(&inode->inotify_mutex);
iput(inode);
spin_lock(&inode_lock);
{
struct inotify_watch *watch, *next;
- down(&inode->inotify_sem);
+ mutex_lock(&inode->inotify_mutex);
list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
struct inotify_device *dev = watch->dev;
- down(&dev->sem);
+ mutex_lock(&dev->mutex);
remove_watch(watch, dev);
- up(&dev->sem);
+ mutex_unlock(&dev->mutex);
}
- up(&inode->inotify_sem);
+ mutex_unlock(&inode->inotify_mutex);
}
EXPORT_SYMBOL_GPL(inotify_inode_is_dead);
int ret = 0;
poll_wait(file, &dev->wq, wait);
- down(&dev->sem);
+ mutex_lock(&dev->mutex);
if (!list_empty(&dev->events))
ret = POLLIN | POLLRDNORM;
- up(&dev->sem);
+ mutex_unlock(&dev->mutex);
return ret;
}
prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE);
- down(&dev->sem);
+ mutex_lock(&dev->mutex);
events = !list_empty(&dev->events);
- up(&dev->sem);
+ mutex_unlock(&dev->mutex);
if (events) {
ret = 0;
break;
if (ret)
return ret;
- down(&dev->sem);
+ mutex_lock(&dev->mutex);
while (1) {
struct inotify_kernel_event *kevent;
remove_kevent(dev, kevent);
}
- up(&dev->sem);
+ mutex_unlock(&dev->mutex);
return ret;
}
* Destroy all of the watches on this device. Unfortunately, not very
* pretty. We cannot do a simple iteration over the list, because we
* do not know the inode until we iterate to the watch. But we need to
- * hold inode->inotify_sem before dev->sem. The following works.
+ * hold inode->inotify_mutex before dev->mutex. The following works.
*/
while (1) {
struct inotify_watch *watch;
struct list_head *watches;
struct inode *inode;
- down(&dev->sem);
+ mutex_lock(&dev->mutex);
watches = &dev->watches;
if (list_empty(watches)) {
- up(&dev->sem);
+ mutex_unlock(&dev->mutex);
break;
}
watch = list_entry(watches->next, struct inotify_watch, d_list);
get_inotify_watch(watch);
- up(&dev->sem);
+ mutex_unlock(&dev->mutex);
inode = watch->inode;
- down(&inode->inotify_sem);
- down(&dev->sem);
+ mutex_lock(&inode->inotify_mutex);
+ mutex_lock(&dev->mutex);
remove_watch_no_event(watch, dev);
- up(&dev->sem);
- up(&inode->inotify_sem);
+ mutex_unlock(&dev->mutex);
+ mutex_unlock(&inode->inotify_mutex);
put_inotify_watch(watch);
}
/* destroy all of the events on this device */
- down(&dev->sem);
+ mutex_lock(&dev->mutex);
while (!list_empty(&dev->events))
inotify_dev_event_dequeue(dev);
- up(&dev->sem);
+ mutex_unlock(&dev->mutex);
/* free this device: the put matching the get in inotify_init() */
put_inotify_dev(dev);
struct inotify_watch *watch;
struct inode *inode;
- down(&dev->sem);
+ mutex_lock(&dev->mutex);
watch = idr_find(&dev->idr, wd);
if (unlikely(!watch)) {
- up(&dev->sem);
+ mutex_unlock(&dev->mutex);
return -EINVAL;
}
get_inotify_watch(watch);
inode = watch->inode;
- up(&dev->sem);
+ mutex_unlock(&dev->mutex);
- down(&inode->inotify_sem);
- down(&dev->sem);
+ mutex_lock(&inode->inotify_mutex);
+ mutex_lock(&dev->mutex);
/* make sure that we did not race */
watch = idr_find(&dev->idr, wd);
if (likely(watch))
remove_watch(watch, dev);
- up(&dev->sem);
- up(&inode->inotify_sem);
+ mutex_unlock(&dev->mutex);
+ mutex_unlock(&inode->inotify_mutex);
put_inotify_watch(watch);
return 0;
INIT_LIST_HEAD(&dev->events);
INIT_LIST_HEAD(&dev->watches);
init_waitqueue_head(&dev->wq);
- sema_init(&dev->sem, 1);
+ mutex_init(&dev->mutex);
dev->event_count = 0;
dev->queue_size = 0;
dev->max_events = inotify_max_queued_events;
inode = nd.dentry->d_inode;
dev = filp->private_data;
- down(&inode->inotify_sem);
- down(&dev->sem);
+ mutex_lock(&inode->inotify_mutex);
+ mutex_lock(&dev->mutex);
if (mask & IN_MASK_ADD)
mask_add = 1;
list_add(&watch->i_list, &inode->inotify_watches);
ret = watch->wd;
out:
- up(&dev->sem);
- up(&inode->inotify_sem);
+ mutex_unlock(&dev->mutex);
+ mutex_unlock(&inode->inotify_mutex);
path_release(&nd);
fput_and_out:
fput_light(filp, fput_needed);