void poll_initwait(struct poll_wqueues *pwq)
{
init_poll_funcptr(&pwq->pt, __pollwait);
+ pwq->polling_task = current;
pwq->error = 0;
pwq->table = NULL;
pwq->inline_index = 0;
}
-
EXPORT_SYMBOL(poll_initwait);
static void free_poll_entry(struct poll_table_entry *entry)
free_page((unsigned long) old);
}
}
-
EXPORT_SYMBOL(poll_freewait);
-static struct poll_table_entry *poll_get_entry(poll_table *_p)
+static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p)
{
- struct poll_wqueues *p = container_of(_p, struct poll_wqueues, pt);
struct poll_table_page *table = p->table;
if (p->inline_index < N_INLINE_POLL_ENTRIES)
new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL);
if (!new_table) {
p->error = -ENOMEM;
- __set_current_state(TASK_RUNNING);
return NULL;
}
new_table->entry = new_table->entries;
return table->entry++;
}
+static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
+{
+ struct poll_wqueues *pwq = wait->private;
+ DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task);
+
+ /*
+ * Although this function is called under waitqueue lock, LOCK
+ * doesn't imply write barrier and the users expect write
+ * barrier semantics on wakeup functions. The following
+ * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
+ * and is paired with set_mb() in poll_schedule_timeout.
+ */
+ smp_wmb();
+ pwq->triggered = 1;
+
+ /*
+ * Perform the default wake up operation using a dummy
+ * waitqueue.
+ *
+ * TODO: This is hacky but there currently is no interface to
+ * pass in @sync. @sync is scheduled to be removed and once
+ * that happens, wake_up_process() can be used directly.
+ */
+ return default_wake_function(&dummy_wait, mode, sync, key);
+}
+
/* Add a new entry */
static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
poll_table *p)
{
- struct poll_table_entry *entry = poll_get_entry(p);
+ struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt);
+ struct poll_table_entry *entry = poll_get_entry(pwq);
if (!entry)
return;
get_file(filp);
entry->filp = filp;
entry->wait_address = wait_address;
- init_waitqueue_entry(&entry->wait, current);
+ init_waitqueue_func_entry(&entry->wait, pollwake);
+ entry->wait.private = pwq;
add_wait_queue(wait_address, &entry->wait);
}
+int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
+ ktime_t *expires, unsigned long slack)
+{
+ int rc = -EINTR;
+
+ set_current_state(state);
+ if (!pwq->triggered)
+ rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS);
+ __set_current_state(TASK_RUNNING);
+
+ /*
+ * Prepare for the next iteration.
+ *
+ * The following set_mb() serves two purposes. First, it's
+ * the counterpart rmb of the wmb in pollwake() such that data
+ * written before wake up is always visible after wake up.
+ * Second, the full barrier guarantees that triggered clearing
+ * doesn't pass event check of the next iteration. Note that
+ * this problem doesn't exist for the first iteration as
+ * add_wait_queue() has full barrier semantics.
+ */
+ set_mb(pwq->triggered, 0);
+
+ return rc;
+}
+EXPORT_SYMBOL(poll_schedule_timeout);
+
/**
* poll_select_set_timeout - helper function to setup the timeout value
* @to: pointer to timespec variable for the final timeout
for (;;) {
unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
- set_current_state(TASK_INTERRUPTIBLE);
-
inp = fds->in; outp = fds->out; exp = fds->ex;
rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex;
to = &expire;
}
- if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS))
+ if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE,
+ to, slack))
timed_out = 1;
}
- __set_current_state(TASK_RUNNING);
poll_freewait(&table);
for (;;) {
struct poll_list *walk;
- set_current_state(TASK_INTERRUPTIBLE);
for (walk = list; walk != NULL; walk = walk->next) {
struct pollfd * pfd, * pfd_end;
to = &expire;
}
- if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS))
+ if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack))
timed_out = 1;
}
- __set_current_state(TASK_RUNNING);
return count;
}
}
struct poll_table_entry {
- struct file * filp;
+ struct file *filp;
wait_queue_t wait;
- wait_queue_head_t * wait_address;
+ wait_queue_head_t *wait_address;
};
/*
*/
struct poll_wqueues {
poll_table pt;
- struct poll_table_page * table;
+ struct poll_table_page *table;
+ struct task_struct *polling_task;
+ int triggered;
int error;
int inline_index;
struct poll_table_entry inline_entries[N_INLINE_POLL_ENTRIES];
extern void poll_initwait(struct poll_wqueues *pwq);
extern void poll_freewait(struct poll_wqueues *pwq);
+extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
+ ktime_t *expires, unsigned long slack);
+
+static inline int poll_schedule(struct poll_wqueues *pwq, int state)
+{
+ return poll_schedule_timeout(pwq, state, NULL, 0);
+}
/*
* Scaleable version of the fd_set.