int
spu_irq_class_0_bottom(struct spu *spu)
{
- unsigned long stat;
+ unsigned long stat, mask;
spu->class_0_pending = 0;
+ mask = in_be64(&spu->priv1->int_mask_class0_RW);
stat = in_be64(&spu->priv1->int_stat_class0_RW);
+ stat &= mask;
+
if (stat & 1) /* invalid MFC DMA */
__spu_trap_invalid_dma(spu);
{
struct spu *spu;
unsigned long stat;
+ unsigned long mask;
spu = data;
stat = in_be64(&spu->priv1->int_stat_class2_RW);
+ mask = in_be64(&spu->priv1->int_mask_class2_RW);
- pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat,
- in_be64(&spu->priv1->int_mask_class2_RW));
+ pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
+ stat &= mask;
if (stat & 1) /* PPC core mailbox */
__spu_trap_mailbox(spu);
#include <linux/smp_lock.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
+#include <linux/poll.h>
#include <asm/io.h>
#include <asm/spu.h>
return ctx->csa.prob.mb_stat_R;
}
+static unsigned int spu_backing_mbox_stat_poll(struct spu_context *ctx,
+ unsigned int events)
+{
+ int ret;
+ u32 stat;
+
+ ret = 0;
+ spin_lock_irq(&ctx->csa.register_lock);
+ stat = ctx->csa.prob.mb_stat_R;
+
+ /* if the requested event is there, return the poll
+ mask, otherwise enable the interrupt to get notified,
+ but first mark any pending interrupts as done so
+ we don't get woken up unnecessarily */
+
+ if (events & (POLLIN | POLLRDNORM)) {
+ if (stat & 0xff0000)
+ ret |= POLLIN | POLLRDNORM;
+ else {
+ ctx->csa.priv1.int_stat_class0_RW &= ~0x1;
+ ctx->csa.priv1.int_mask_class2_RW |= 0x1;
+ }
+ }
+ if (events & (POLLOUT | POLLWRNORM)) {
+ if (stat & 0x00ff00)
+ ret = POLLOUT | POLLWRNORM;
+ else {
+ ctx->csa.priv1.int_stat_class0_RW &= ~0x10;
+ ctx->csa.priv1.int_mask_class2_RW |= 0x10;
+ }
+ }
+ spin_unlock_irq(&ctx->csa.register_lock);
+ return ret;
+}
+
static int spu_backing_ibox_read(struct spu_context *ctx, u32 * data)
{
int ret;
struct spu_context_ops spu_backing_ops = {
.mbox_read = spu_backing_mbox_read,
.mbox_stat_read = spu_backing_mbox_stat_read,
+ .mbox_stat_poll = spu_backing_mbox_stat_poll,
.ibox_read = spu_backing_ibox_read,
.wbox_write = spu_backing_wbox_write,
.signal1_read = spu_backing_signal1_read,
static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
{
struct spu_context *ctx = file->private_data;
- u32 mbox_stat;
unsigned int mask;
- spu_acquire(ctx);
-
- mbox_stat = ctx->ops->mbox_stat_read(ctx);
-
- spu_release(ctx);
-
poll_wait(file, &ctx->ibox_wq, wait);
- mask = 0;
- if (mbox_stat & 0xff0000)
- mask |= POLLIN | POLLRDNORM;
+ spu_acquire(ctx);
+ mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
+ spu_release(ctx);
return mask;
}
static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
{
struct spu_context *ctx = file->private_data;
- u32 mbox_stat;
unsigned int mask;
- spu_acquire(ctx);
- mbox_stat = ctx->ops->mbox_stat_read(ctx);
- spu_release(ctx);
-
poll_wait(file, &ctx->wbox_wq, wait);
- mask = 0;
- if (mbox_stat & 0x00ff00)
- mask = POLLOUT | POLLWRNORM;
+ spu_acquire(ctx);
+ mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
+ spu_release(ctx);
return mask;
}
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/vmalloc.h>
+#include <linux/poll.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/stddef.h>
return in_be32(&ctx->spu->problem->mb_stat_R);
}
+static unsigned int spu_hw_mbox_stat_poll(struct spu_context *ctx,
+ unsigned int events)
+{
+ struct spu *spu = ctx->spu;
+ struct spu_priv1 __iomem *priv1 = spu->priv1;
+ int ret = 0;
+ u32 stat;
+
+ spin_lock_irq(&spu->register_lock);
+ stat = in_be32(&spu->problem->mb_stat_R);
+
+ /* if the requested event is there, return the poll
+ mask, otherwise enable the interrupt to get notified,
+ but first mark any pending interrupts as done so
+ we don't get woken up unnecessarily */
+
+ if (events & (POLLIN | POLLRDNORM)) {
+ if (stat & 0xff0000)
+ ret |= POLLIN | POLLRDNORM;
+ else {
+ out_be64(&priv1->int_stat_class2_RW, 0x1);
+ out_be64(&priv1->int_mask_class2_RW,
+ in_be64(&priv1->int_mask_class2_RW) | 0x1);
+ }
+ }
+ if (events & (POLLOUT | POLLWRNORM)) {
+ if (stat & 0x00ff00)
+ ret = POLLOUT | POLLWRNORM;
+ else {
+ out_be64(&priv1->int_stat_class2_RW, 0x10);
+ out_be64(&priv1->int_mask_class2_RW,
+ in_be64(&priv1->int_mask_class2_RW) | 0x10);
+ }
+ }
+ spin_unlock_irq(&spu->register_lock);
+ return ret;
+}
+
static int spu_hw_ibox_read(struct spu_context *ctx, u32 * data)
{
struct spu *spu = ctx->spu;
struct spu_context_ops spu_hw_ops = {
.mbox_read = spu_hw_mbox_read,
.mbox_stat_read = spu_hw_mbox_stat_read,
+ .mbox_stat_poll = spu_hw_mbox_stat_poll,
.ibox_read = spu_hw_ibox_read,
.wbox_write = spu_hw_wbox_write,
.signal1_read = spu_hw_signal1_read,
struct spu_context_ops {
int (*mbox_read) (struct spu_context * ctx, u32 * data);
u32(*mbox_stat_read) (struct spu_context * ctx);
+ unsigned int (*mbox_stat_poll)(struct spu_context *ctx,
+ unsigned int events);
int (*ibox_read) (struct spu_context * ctx, u32 * data);
int (*wbox_write) (struct spu_context * ctx, u32 data);
u32(*signal1_read) (struct spu_context * ctx);
CLASS0_ENABLE_SPU_ERROR_INTR;
csa->priv1.int_mask_class1_RW = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
CLASS1_ENABLE_STORAGE_FAULT_INTR;
- csa->priv1.int_mask_class2_RW = CLASS2_ENABLE_MAILBOX_INTR |
- CLASS2_ENABLE_SPU_STOP_INTR | CLASS2_ENABLE_SPU_HALT_INTR;
+ csa->priv1.int_mask_class2_RW = CLASS2_ENABLE_SPU_STOP_INTR |
+ CLASS2_ENABLE_SPU_HALT_INTR;
}
static void init_priv2(struct spu_state *csa)