*/
#include <linux/interrupt.h>
-#include <linux/gpio.h>
-#include <linux/workqueue.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/kernel.h>
return 0;
}
-
-/**
- * max1363_poll_func_th() - th of trigger launched polling to ring buffer
- *
- * As sampling only occurs on i2c comms occurring, leave timestamping until
- * then. Some triggers will generate their own time stamp. Currently
- * there is no way of notifying them when no one cares.
- **/
-static void max1363_poll_func_th(struct iio_dev *indio_dev, s64 time)
+static irqreturn_t max1363_trigger_handler(int irq, void *p)
{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->private_data;
struct max1363_state *st = iio_priv(indio_dev);
-
- schedule_work(&st->poll_work);
-
- return;
-}
-/**
- * max1363_poll_bh_to_ring() - bh of trigger launched polling to ring buffer
- * @work_s: the work struct through which this was scheduled
- *
- * Currently there is no option in this driver to disable the saving of
- * timestamps within the ring.
- * I think the one copy of this at a time was to avoid problems if the
- * trigger was set far too high and the reads then locked up the computer.
- **/
-static void max1363_poll_bh_to_ring(struct work_struct *work_s)
-{
- struct max1363_state *st = container_of(work_s, struct max1363_state,
- poll_work);
- struct iio_dev *indio_dev = iio_priv_to_dev(st);
struct iio_sw_ring_buffer *sw_ring = iio_to_sw_ring(indio_dev->ring);
s64 time_ns;
__u8 *rxbuf;
if (d_size % sizeof(s64))
d_size += sizeof(s64) - (d_size % sizeof(s64));
- /* Ensure only one copy of this function running at a time */
- if (atomic_inc_return(&st->protect_ring) > 1)
- return;
-
/* Monitor mode prevents reading. Whilst not currently implemented
* might as well have this test in here in the meantime as it does
* no harm.
*/
if (numvals == 0)
- return;
+ return IRQ_HANDLED;
rxbuf = kmalloc(d_size, GFP_KERNEL);
if (rxbuf == NULL)
- return;
+ return -ENOMEM;
if (st->chip_info->bits != 8)
b_sent = i2c_master_recv(st->client, rxbuf, numvals*2);
else
indio_dev->ring->access.store_to(&sw_ring->buf, rxbuf, time_ns);
done:
+ iio_trigger_notify_done(indio_dev->trig);
kfree(rxbuf);
- atomic_dec(&st->protect_ring);
+
+ return IRQ_HANDLED;
}
}
/* Effectively select the ring buffer implementation */
iio_ring_sw_register_funcs(&indio_dev->ring->access);
- ret = iio_alloc_pollfunc(indio_dev, NULL, &max1363_poll_func_th);
- if (ret)
+ indio_dev->pollfunc = kzalloc(sizeof(*indio_dev->pollfunc), GFP_KERNEL);
+ if (indio_dev->pollfunc == NULL) {
+ ret = -ENOMEM;
goto error_deallocate_sw_rb;
+ }
+ indio_dev->pollfunc->private_data = indio_dev;
+ indio_dev->pollfunc->thread = &max1363_trigger_handler;
+ indio_dev->pollfunc->type = IRQF_ONESHOT;
+ indio_dev->pollfunc->name =
+ kasprintf(GFP_KERNEL, "%s_consumer%d",
+ st->client->name, indio_dev->id);
+ if (indio_dev->pollfunc->name == NULL) {
+ ret = -ENOMEM;
+ goto error_free_pollfunc;
+ }
/* Ring buffer functions - here trigger setup related */
indio_dev->ring->postenable = &iio_triggered_ring_postenable;
indio_dev->ring->preenable = &max1363_ring_preenable;
indio_dev->ring->predisable = &iio_triggered_ring_predisable;
- INIT_WORK(&st->poll_work, &max1363_poll_bh_to_ring);
/* Flag that polled ring buffering is possible */
indio_dev->modes |= INDIO_RING_TRIGGERED;
+
return 0;
+error_free_pollfunc:
+ kfree(indio_dev->pollfunc);
error_deallocate_sw_rb:
iio_sw_rb_free(indio_dev->ring);
error_ret:
iio_trigger_dettach_poll_func(indio_dev->trig,
indio_dev->pollfunc);
}
+ kfree(indio_dev->pollfunc->name);
kfree(indio_dev->pollfunc);
iio_sw_rb_free(indio_dev->ring);
}