{
struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id;
struct uart_port *port = &sirfport->port;
+ spin_lock(&port->lock);
if (gpio_is_valid(sirfport->cts_gpio) && sirfport->ms_enabled)
uart_handle_cts_change(port,
!gpio_get_value(sirfport->cts_gpio));
+ spin_unlock(&port->lock);
return IRQ_HANDLED;
}
sirfport->rx_io_count += rx_count;
port->icount.rx += rx_count;
- spin_unlock(&port->lock);
- tty_flip_buffer_push(&port->state->port);
- spin_lock(&port->lock);
-
return rx_count;
}
struct circ_buf *xmit = &port->state->xmit;
unsigned long flags;
+ spin_lock_irqsave(&port->lock, flags);
xmit->tail = (xmit->tail + sirfport->transfer_size) &
(UART_XMIT_SIZE - 1);
port->icount.tx += sirfport->transfer_size;
if (sirfport->tx_dma_addr)
dma_unmap_single(port->dev, sirfport->tx_dma_addr,
sirfport->transfer_size, DMA_TO_DEVICE);
- spin_lock_irqsave(&sirfport->tx_lock, flags);
sirfport->tx_dma_state = TX_DMA_IDLE;
sirfsoc_uart_tx_with_dma(sirfport);
- spin_unlock_irqrestore(&sirfport->tx_lock, flags);
+ spin_unlock_irqrestore(&port->lock, flags);
}
static void sirfsoc_uart_insert_rx_buf_to_tty(
inserted = tty_insert_flip_string(tport,
sirfport->rx_dma_items[sirfport->rx_completed].xmit.buf, count);
port->icount.rx += inserted;
- tty_flip_buffer_push(tport);
}
static void sirfsoc_rx_submit_one_dma_desc(struct uart_port *port, int index)
unsigned long flags;
struct dma_tx_state tx_state;
- spin_lock_irqsave(&sirfport->rx_lock, flags);
+ spin_lock_irqsave(&port->lock, flags);
while (DMA_COMPLETE == dmaengine_tx_status(sirfport->rx_dma_chan,
sirfport->rx_dma_items[sirfport->rx_completed].cookie, &tx_state)) {
sirfsoc_uart_insert_rx_buf_to_tty(sirfport,
wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
SIRFUART_IO_MODE);
- spin_unlock_irqrestore(&sirfport->rx_lock, flags);
- spin_lock(&port->lock);
sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
- spin_unlock(&port->lock);
if (sirfport->rx_io_count == 4) {
- spin_lock_irqsave(&sirfport->rx_lock, flags);
sirfport->rx_io_count = 0;
wr_regl(port, ureg->sirfsoc_int_st_reg,
uint_st->sirfsoc_rx_done);
else
wr_regl(port, SIRFUART_INT_EN_CLR,
uint_en->sirfsoc_rx_done_en);
- spin_unlock_irqrestore(&sirfport->rx_lock, flags);
-
sirfsoc_uart_start_next_rx_dma(port);
} else {
- spin_lock_irqsave(&sirfport->rx_lock, flags);
wr_regl(port, ureg->sirfsoc_int_st_reg,
uint_st->sirfsoc_rx_done);
if (!sirfport->is_marco)
else
wr_regl(port, ureg->sirfsoc_int_en_reg,
uint_en->sirfsoc_rx_done_en);
- spin_unlock_irqrestore(&sirfport->rx_lock, flags);
}
+ spin_unlock_irqrestore(&port->lock, flags);
+ tty_flip_buffer_push(&port->state->port);
}
static void sirfsoc_uart_handle_rx_tmo(struct sirfsoc_uart_port *sirfport)
struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
struct dma_tx_state tx_state;
- spin_lock(&sirfport->rx_lock);
-
dmaengine_tx_status(sirfport->rx_dma_chan,
sirfport->rx_dma_items[sirfport->rx_issued].cookie, &tx_state);
dmaengine_terminate_all(sirfport->rx_dma_chan);
else
wr_regl(port, SIRFUART_INT_EN_CLR,
uint_en->sirfsoc_rx_timeout_en);
- spin_unlock(&sirfport->rx_lock);
tasklet_schedule(&sirfport->rx_tmo_process_tasklet);
}
intr_status &= port->read_status_mask;
uart_insert_char(port, intr_status,
uint_en->sirfsoc_rx_oflow_en, 0, flag);
- tty_flip_buffer_push(&state->port);
}
recv_char:
if ((sirfport->uart_reg->uart_type == SIRF_REAL_UART) &&
sirfsoc_uart_pio_rx_chars(port,
SIRFSOC_UART_IO_RX_MAX_CNT);
}
+ spin_unlock(&port->lock);
+ tty_flip_buffer_push(&state->port);
+ spin_lock(&port->lock);
if (intr_status & uint_st->sirfsoc_txfifo_empty) {
if (sirfport->tx_dma_chan)
sirfsoc_uart_tx_with_dma(sirfport);
}
}
spin_unlock(&port->lock);
+
return IRQ_HANDLED;
}
struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
unsigned long flags;
struct dma_tx_state tx_state;
- spin_lock_irqsave(&sirfport->rx_lock, flags);
+ spin_lock_irqsave(&port->rx_lock, flags);
while (DMA_COMPLETE == dmaengine_tx_status(sirfport->rx_dma_chan,
sirfport->rx_dma_items[sirfport->rx_completed].cookie, &tx_state)) {
sirfsoc_uart_insert_rx_buf_to_tty(sirfport,
sirfport->rx_completed++;
sirfport->rx_completed %= SIRFSOC_RX_LOOP_BUF_CNT;
}
- spin_unlock_irqrestore(&sirfport->rx_lock, flags);
+ spin_unlock_irqrestore(&port->lock, flags);
+ tty_flip_buffer_push(&port->state->port);
}
static void sirfsoc_uart_rx_dma_complete_callback(void *param)
{
struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
- spin_lock(&sirfport->rx_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&sirfport->port.lock, flags);
sirfport->rx_issued++;
sirfport->rx_issued %= SIRFSOC_RX_LOOP_BUF_CNT;
- spin_unlock(&sirfport->rx_lock);
tasklet_schedule(&sirfport->rx_dma_complete_tasklet);
+ spin_unlock_irqrestore(&sirfport->port.lock, flags);
}
/* submit rx dma task into dmaengine */
struct sirfsoc_uart_port *sirfport = to_sirfport(port);
struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
- unsigned long flags;
int i;
- spin_lock_irqsave(&sirfport->rx_lock, flags);
sirfport->rx_io_count = 0;
wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) &
~SIRFUART_IO_MODE);
- spin_unlock_irqrestore(&sirfport->rx_lock, flags);
for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++)
sirfsoc_rx_submit_one_dma_desc(port, i);
sirfport->rx_completed = sirfport->rx_issued = 0;
- spin_lock_irqsave(&sirfport->rx_lock, flags);
if (!sirfport->is_marco)
wr_regl(port, ureg->sirfsoc_int_en_reg,
rd_regl(port, ureg->sirfsoc_int_en_reg) |
else
wr_regl(port, ureg->sirfsoc_int_en_reg,
SIRFUART_RX_DMA_INT_EN(port, uint_en));
- spin_unlock_irqrestore(&sirfport->rx_lock, flags);
}
static void sirfsoc_uart_start_rx(struct uart_port *port)
ret = -EFAULT;
goto err;
}
- spin_lock_init(&sirfport->rx_lock);
- spin_lock_init(&sirfport->tx_lock);
tasklet_init(&sirfport->rx_dma_complete_tasklet,
sirfsoc_uart_rx_dma_complete_tl, (unsigned long)sirfport);
tasklet_init(&sirfport->rx_tmo_process_tasklet,