#include <linux/devfs_fs_kernel.h>
#include <linux/buffer_head.h> /* for invalidate_bdev */
#include <linux/suspend.h>
+#include <linux/poll.h>
#include <linux/init.h>
static int start_readonly;
+/*
+ * We have a system wide 'event count' that is incremented
+ * on any 'interesting' event, and readers of /proc/mdstat
+ * can use 'poll' or 'select' to find out when the event
+ * count increases.
+ *
+ * Events are:
+ * start array, stop array, error, add device, remove device,
+ * start build, activate spare
+ */
+DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
+static atomic_t md_event_count;
+void md_new_event(mddev_t *mddev)
+{
+ atomic_inc(&md_event_count);
+ wake_up(&md_event_waiters);
+}
+
/*
* Enables to iterate over all existing md arrays
* all_mddevs_lock protects this list.
mddev->queue->make_request_fn = mddev->pers->make_request;
mddev->changed = 1;
+ md_new_event(mddev);
return 0;
}
printk(KERN_INFO "md: %s switched to read-only mode.\n",
mdname(mddev));
err = 0;
+ md_new_event(mddev);
out:
return err;
}
kick_rdev_from_array(rdev);
md_update_sb(mddev);
+ md_new_event(mddev);
return 0;
busy:
*/
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
-
+ md_new_event(mddev);
return 0;
abort_unbind_export:
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
+ md_new_event(mddev);
}
/* seq_file implementation /proc/mdstat */
mddev_put(mddev);
}
+struct mdstat_info {
+ int event;
+};
+
static int md_seq_show(struct seq_file *seq, void *v)
{
mddev_t *mddev = v;
sector_t size;
struct list_head *tmp2;
mdk_rdev_t *rdev;
+ struct mdstat_info *mi = seq->private;
int i;
struct bitmap *bitmap;
spin_unlock(&pers_lock);
seq_printf(seq, "\n");
+ mi->event = atomic_read(&md_event_count);
return 0;
}
if (v == (void*)2) {
static int md_seq_open(struct inode *inode, struct file *file)
{
int error;
+ struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL);
+ if (mi == NULL)
+ return -ENOMEM;
error = seq_open(file, &md_seq_ops);
+ if (error)
+ kfree(mi);
+ else {
+ struct seq_file *p = file->private_data;
+ p->private = mi;
+ mi->event = atomic_read(&md_event_count);
+ }
return error;
}
+static int md_seq_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *m = file->private_data;
+ struct mdstat_info *mi = m->private;
+ m->private = NULL;
+ kfree(mi);
+ return seq_release(inode, file);
+}
+
+static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
+{
+ struct seq_file *m = filp->private_data;
+ struct mdstat_info *mi = m->private;
+ int mask;
+
+ poll_wait(filp, &md_event_waiters, wait);
+
+ /* always allow read */
+ mask = POLLIN | POLLRDNORM;
+
+ if (mi->event != atomic_read(&md_event_count))
+ mask |= POLLERR | POLLPRI;
+ return mask;
+}
+
static struct file_operations md_seq_fops = {
.open = md_seq_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = md_seq_release,
+ .poll = mdstat_poll,
};
int register_md_personality(int pnum, mdk_personality_t *p)
j += sectors;
if (j>1) mddev->curr_resync = j;
-
+ if (last_check == 0)
+ /* this is the earliers that rebuilt will be
+ * visible in /proc/mdstat
+ */
+ md_new_event(mddev);
if (last_check + window > io_sectors || j == max_sectors)
continue;
mddev->recovery = 0;
/* flag recovery needed just to double check */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ md_new_event(mddev);
goto unlock;
}
/* Clear some bits that don't mean anything, but
sprintf(nm, "rd%d", rdev->raid_disk);
sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
spares++;
+ md_new_event(mddev);
} else
break;
}
mdname(mddev));
/* leave the spares where they are, it shouldn't hurt */
mddev->recovery = 0;
- } else {
+ } else
md_wakeup_thread(mddev->sync_thread);
- }
+ md_new_event(mddev);
}
unlock:
mddev_unlock(mddev);