return err;
}
-static void do_thaw_all(struct work_struct *work)
+static void do_thaw_one(struct super_block *sb, void *unused)
{
- struct super_block *sb, *n;
char b[BDEVNAME_SIZE];
+ while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
+ printk(KERN_WARNING "Emergency Thaw on %s\n",
+ bdevname(sb->s_bdev, b));
+}
- spin_lock(&sb_lock);
- list_for_each_entry_safe(sb, n, &super_blocks, s_list) {
- if (list_empty(&sb->s_instances))
- continue;
- sb->s_count++;
- spin_unlock(&sb_lock);
- down_read(&sb->s_umount);
- while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
- printk(KERN_WARNING "Emergency Thaw on %s\n",
- bdevname(sb->s_bdev, b));
- up_read(&sb->s_umount);
- spin_lock(&sb_lock);
- }
- spin_unlock(&sb_lock);
+static void do_thaw_all(struct work_struct *work)
+{
+ iterate_supers(do_thaw_one, NULL);
kfree(work);
printk(KERN_WARNING "Emergency Thaw complete\n");
}
#include <linux/writeback.h>
#include <linux/sysctl.h>
#include <linux/gfp.h>
-#include "internal.h"
/* A global variable is a bit ugly, but it keeps the code simple */
int sysctl_drop_caches;
-static void drop_pagecache_sb(struct super_block *sb)
+static void drop_pagecache_sb(struct super_block *sb, void *unused)
{
struct inode *inode, *toput_inode = NULL;
iput(toput_inode);
}
-static void drop_pagecache(void)
-{
- struct super_block *sb, *n;
-
- spin_lock(&sb_lock);
- list_for_each_entry_safe(sb, n, &super_blocks, s_list) {
- if (list_empty(&sb->s_instances))
- continue;
- sb->s_count++;
- spin_unlock(&sb_lock);
- down_read(&sb->s_umount);
- if (sb->s_root)
- drop_pagecache_sb(sb);
- up_read(&sb->s_umount);
- spin_lock(&sb_lock);
- __put_super(sb);
- }
- spin_unlock(&sb_lock);
-}
-
static void drop_slab(void)
{
int nr_objects;
proc_dointvec_minmax(table, write, buffer, length, ppos);
if (write) {
if (sysctl_drop_caches & 1)
- drop_pagecache();
+ iterate_supers(drop_pagecache_sb, NULL);
if (sysctl_drop_caches & 2)
drop_slab();
}
#include <linux/quotaops.h>
#include <linux/types.h>
#include <linux/writeback.h>
-#include "../internal.h"
static int check_quotactl_permission(struct super_block *sb, int type, int cmd,
qid_t id)
return security_quotactl(cmd, type, id, sb);
}
+static void quota_sync_one(struct super_block *sb, void *arg)
+{
+ if (sb->s_qcop && sb->s_qcop->quota_sync)
+ sb->s_qcop->quota_sync(sb, *(int *)arg, 1);
+}
+
static int quota_sync_all(int type)
{
- struct super_block *sb, *n;
int ret;
if (type >= MAXQUOTAS)
return -EINVAL;
ret = security_quotactl(Q_SYNC, type, 0, NULL);
- if (ret)
- return ret;
-
- spin_lock(&sb_lock);
- list_for_each_entry_safe(sb, n, &super_blocks, s_list) {
- if (list_empty(&sb->s_instances))
- continue;
- if (!sb->s_qcop || !sb->s_qcop->quota_sync)
- continue;
-
- sb->s_count++;
- spin_unlock(&sb_lock);
- down_read(&sb->s_umount);
- if (sb->s_root)
- sb->s_qcop->quota_sync(sb, type, 1);
- up_read(&sb->s_umount);
- spin_lock(&sb_lock);
- __put_super(sb);
- }
- spin_unlock(&sb_lock);
-
- return 0;
+ if (!ret)
+ iterate_supers(quota_sync_one, &type);
+ return ret;
}
static int quota_quotaon(struct super_block *sb, int type, int cmd, qid_t id,
spin_unlock(&sb_lock);
}
+/**
+ * iterate_supers - call function for all active superblocks
+ * @f: function to call
+ * @arg: argument to pass to it
+ *
+ * Scans the superblock list and calls given function, passing it
+ * locked superblock and given argument.
+ */
+void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
+{
+ struct super_block *sb, *n;
+
+ spin_lock(&sb_lock);
+ list_for_each_entry_safe(sb, n, &super_blocks, s_list) {
+ if (list_empty(&sb->s_instances))
+ continue;
+ sb->s_count++;
+ spin_unlock(&sb_lock);
+
+ down_read(&sb->s_umount);
+ if (sb->s_root)
+ f(sb, arg);
+ up_read(&sb->s_umount);
+
+ spin_lock(&sb_lock);
+ __put_super(sb);
+ }
+ spin_unlock(&sb_lock);
+}
+
/**
* get_super - get the superblock of a device
* @bdev: device to get the superblock for
}
EXPORT_SYMBOL_GPL(sync_filesystem);
+static void sync_one_sb(struct super_block *sb, void *arg)
+{
+ if (!(sb->s_flags & MS_RDONLY) && sb->s_bdi)
+ __sync_filesystem(sb, *(int *)arg);
+}
/*
* Sync all the data for all the filesystems (called by sys_sync() and
* emergency sync)
*/
static void sync_filesystems(int wait)
{
- struct super_block *sb, *n;
-
- spin_lock(&sb_lock);
- list_for_each_entry_safe(sb, n, &super_blocks, s_list) {
- if (list_empty(&sb->s_instances))
- continue;
- sb->s_count++;
- spin_unlock(&sb_lock);
-
- down_read(&sb->s_umount);
- if (!(sb->s_flags & MS_RDONLY) && sb->s_root && sb->s_bdi)
- __sync_filesystem(sb, wait);
- up_read(&sb->s_umount);
-
- /* restart only when sb is no longer on the list */
- spin_lock(&sb_lock);
- __put_super(sb);
- }
- spin_unlock(&sb_lock);
+ iterate_supers(sync_one_sb, &wait);
}
/*
extern struct super_block *get_active_super(struct block_device *bdev);
extern struct super_block *user_get_super(dev_t);
extern void drop_super(struct super_block *sb);
+extern void iterate_supers(void (*)(struct super_block *, void *), void *);
extern int dcache_dir_open(struct inode *, struct file *);
extern int dcache_dir_close(struct inode *, struct file *);