Documentation/networking/netconsole.rst for an
alternative.
+ <DEVNAME>:<n>.<n>[,options]
+ Use the specified serial port on the serial core bus.
+ The addressing uses DEVNAME of the physical serial port
+ device, followed by the serial core controller instance,
+ and the serial port instance. The options are the same
+ as documented for the ttyS addressing above.
+
+ The mapping of the serial ports to the tty instances
+ can be viewed with:
+
+ $ ls -d /sys/bus/serial-base/devices/*:*.*/tty/*
+ /sys/bus/serial-base/devices/00:04:0.0/tty/ttyS0
+
+ In the above example, the console can be addressed with
+ console=00:04:0.0. Note that a console addressed this
+ way will only get added when the related device driver
+ is ready. The use of an earlycon parameter in addition to
+ the console may be desired for console output early on.
+
uart[8250],io,<addr>[,options]
uart[8250],mmio,<addr>[,options]
uart[8250],mmio16,<addr>[,options]
retbleed=off [X86]
spec_rstack_overflow=off [X86]
spec_store_bypass_disable=off [X86,PPC]
+ spectre_bhi=off [X86]
spectre_v2_user=off [X86]
srbds=off [X86,INTEL]
ssbd=force-off [ARM64]
sonypi.*= [HW] Sony Programmable I/O Control Device driver
See Documentation/admin-guide/laptops/sonypi.rst
+ spectre_bhi= [X86] Control mitigation of Branch History Injection
+ (BHI) vulnerability. This setting affects the
+ deployment of the HW BHI control and the SW BHB
+ clearing sequence.
+
+ on - (default) Enable the HW or SW mitigation
+ as needed.
+ off - Disable the mitigation.
+
spectre_v2= [X86,EARLY] Control mitigation of Spectre variant 2
(indirect branch speculation) vulnerability.
The default operation protects the kernel from
(void)p->serial_in(p, UART_RX);
}
-static void dw8250_check_lcr(struct uart_port *p, int value)
+static void dw8250_check_lcr(struct uart_port *p, int offset, int value)
{
- void __iomem *offset = p->membase + (UART_LCR << p->regshift);
+ struct dw8250_data *d = to_dw8250_data(p->private_data);
+ void __iomem *addr = p->membase + (offset << p->regshift);
int tries = 1000;
+ if (offset != UART_LCR || d->uart_16550_compatible)
+ return;
+
/* Make sure LCR write wasn't ignored */
while (tries--) {
- unsigned int lcr = p->serial_in(p, UART_LCR);
+ unsigned int lcr = p->serial_in(p, offset);
if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
return;
#ifdef CONFIG_64BIT
if (p->type == PORT_OCTEON)
- __raw_writeq(value & 0xff, offset);
+ __raw_writeq(value & 0xff, addr);
else
#endif
if (p->iotype == UPIO_MEM32)
- writel(value, offset);
+ writel(value, addr);
else if (p->iotype == UPIO_MEM32BE)
- iowrite32be(value, offset);
+ iowrite32be(value, addr);
else
- writeb(value, offset);
+ writeb(value, addr);
}
/*
* FIXME: this deadlocks if port->lock is already held
static void dw8250_serial_out(struct uart_port *p, int offset, int value)
{
- struct dw8250_data *d = to_dw8250_data(p->private_data);
-
writeb(value, p->membase + (offset << p->regshift));
-
- if (offset == UART_LCR && !d->uart_16550_compatible)
- dw8250_check_lcr(p, value);
+ dw8250_check_lcr(p, offset, value);
}
static void dw8250_serial_out38x(struct uart_port *p, int offset, int value)
#ifdef CONFIG_64BIT
static unsigned int dw8250_serial_inq(struct uart_port *p, int offset)
{
- unsigned int value;
-
- value = (u8)__raw_readq(p->membase + (offset << p->regshift));
+ u8 value = __raw_readq(p->membase + (offset << p->regshift));
return dw8250_modify_msr(p, offset, value);
}
static void dw8250_serial_outq(struct uart_port *p, int offset, int value)
{
- struct dw8250_data *d = to_dw8250_data(p->private_data);
-
value &= 0xff;
__raw_writeq(value, p->membase + (offset << p->regshift));
/* Read back to ensure register write ordering. */
__raw_readq(p->membase + (UART_LCR << p->regshift));
- if (offset == UART_LCR && !d->uart_16550_compatible)
- dw8250_check_lcr(p, value);
+ dw8250_check_lcr(p, offset, value);
}
#endif /* CONFIG_64BIT */
static void dw8250_serial_out32(struct uart_port *p, int offset, int value)
{
- struct dw8250_data *d = to_dw8250_data(p->private_data);
-
writel(value, p->membase + (offset << p->regshift));
-
- if (offset == UART_LCR && !d->uart_16550_compatible)
- dw8250_check_lcr(p, value);
+ dw8250_check_lcr(p, offset, value);
}
static unsigned int dw8250_serial_in32(struct uart_port *p, int offset)
static void dw8250_serial_out32be(struct uart_port *p, int offset, int value)
{
- struct dw8250_data *d = to_dw8250_data(p->private_data);
-
iowrite32be(value, p->membase + (offset << p->regshift));
-
- if (offset == UART_LCR && !d->uart_16550_compatible)
- dw8250_check_lcr(p, value);
+ dw8250_check_lcr(p, offset, value);
}
static unsigned int dw8250_serial_in32be(struct uart_port *p, int offset)
long rate;
int ret;
+ clk_disable_unprepare(d->clk);
rate = clk_round_rate(d->clk, newrate);
- if (rate > 0 && p->uartclk != rate) {
- clk_disable_unprepare(d->clk);
+ if (rate > 0) {
/*
* Note that any clock-notifer worker will block in
* serial8250_update_uartclk() until we are done.
ret = clk_set_rate(d->clk, newrate);
if (!ret)
p->uartclk = rate;
- clk_prepare_enable(d->clk);
}
+ clk_prepare_enable(d->clk);
dw8250_do_set_termios(p, termios, old);
}
static void dma_tx_callback(void *param)
{
struct mxs_auart_port *s = param;
- struct circ_buf *xmit = &s->port.state->xmit;
+ struct tty_port *tport = &s->port.state->port;
dma_unmap_sg(s->dev, &s->tx_sgl, 1, DMA_TO_DEVICE);
smp_mb__after_atomic();
/* wake up the possible processes. */
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS)
uart_write_wakeup(&s->port);
mxs_auart_tx_chars(s);
static void mxs_auart_tx_chars(struct mxs_auart_port *s)
{
- struct circ_buf *xmit = &s->port.state->xmit;
+ struct tty_port *tport = &s->port.state->port;
bool pending;
u8 ch;
if (auart_dma_enabled(s)) {
u32 i = 0;
- int size;
void *buffer = s->tx_dma_buf;
if (test_and_set_bit(MXS_AUART_DMA_TX_SYNC, &s->flags))
return;
- while (!uart_circ_empty(xmit) && !uart_tx_stopped(&s->port)) {
- size = min_t(u32, UART_XMIT_SIZE - i,
- CIRC_CNT_TO_END(xmit->head,
- xmit->tail,
- UART_XMIT_SIZE));
- memcpy(buffer + i, xmit->buf + xmit->tail, size);
- xmit->tail = (xmit->tail + size) & (UART_XMIT_SIZE - 1);
-
- i += size;
- if (i >= UART_XMIT_SIZE)
- break;
- }
-
if (uart_tx_stopped(&s->port))
mxs_auart_stop_tx(&s->port);
+ else
+ i = kfifo_out(&tport->xmit_fifo, buffer,
+ UART_XMIT_SIZE);
if (i) {
mxs_auart_dma_tx(s, i);
static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
{
- u32 istat;
+ u32 istat, stat;
struct mxs_auart_port *s = context;
u32 mctrl_temp = s->mctrl_prev;
- u32 stat = mxs_read(s, REG_STAT);
+ uart_port_lock(&s->port);
+
+ stat = mxs_read(s, REG_STAT);
istat = mxs_read(s, REG_INTR);
/* ack irq */
istat &= ~AUART_INTR_TXIS;
}
+ uart_port_unlock(&s->port);
+
return IRQ_HANDLED;
}
{
struct tty_port *port;
unsigned char ch, r1, drop, flag;
- int loops = 0;
/* Sanity check, make sure the old bug is no longer happening */
if (uap->port.state == NULL) {
if (r1 & Rx_OVR)
tty_insert_flip_char(port, 0, TTY_OVERRUN);
next_char:
- /* We can get stuck in an infinite loop getting char 0 when the
- * line is in a wrong HW state, we break that here.
- * When that happens, I disable the receive side of the driver.
- * Note that what I've been experiencing is a real irq loop where
- * I'm getting flooded regardless of the actual port speed.
- * Something strange is going on with the HW
- */
- if ((++loops) > 1000)
- goto flood;
ch = read_zsreg(uap, R0);
if (!(ch & Rx_CH_AV))
break;
}
- return true;
- flood:
- pmz_interrupt_control(uap, 0);
- pmz_error("pmz: rx irq flood !\n");
return true;
}
static void pmz_transmit_chars(struct uart_pmac_port *uap)
{
- struct circ_buf *xmit;
+ struct tty_port *tport;
+ unsigned char ch;
if (ZS_IS_CONS(uap)) {
unsigned char status = read_zsreg(uap, R0);
if (uap->port.state == NULL)
goto ack_tx_int;
- xmit = &uap->port.state->xmit;
- if (uart_circ_empty(xmit)) {
+ tport = &uap->port.state->port;
+ if (kfifo_is_empty(&tport->xmit_fifo)) {
uart_write_wakeup(&uap->port);
goto ack_tx_int;
}
goto ack_tx_int;
uap->flags |= PMACZILOG_FLAG_TX_ACTIVE;
- write_zsdata(uap, xmit->buf[xmit->tail]);
+ WARN_ON(!uart_fifo_get(&uap->port, &ch));
+ write_zsdata(uap, ch);
zssync(uap);
- uart_xmit_advance(&uap->port, 1);
-
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS)
uart_write_wakeup(&uap->port);
return;
port->icount.tx++;
port->x_char = 0;
} else {
- struct circ_buf *xmit = &port->state->xmit;
+ struct tty_port *tport = &port->state->port;
+ unsigned char ch;
- if (uart_circ_empty(xmit))
+ if (!uart_fifo_get(&uap->port, &ch))
return;
- write_zsdata(uap, xmit->buf[xmit->tail]);
+ write_zsdata(uap, ch);
zssync(uap);
- uart_xmit_advance(port, 1);
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS)
uart_write_wakeup(&uap->port);
}
}
memset(uap, 0, sizeof(struct uart_pmac_port));
}
-static int __init pmz_attach(struct platform_device *pdev)
+static int pmz_attach(struct platform_device *pdev)
{
struct uart_pmac_port *uap;
int i;
return uart_add_one_port(&pmz_uart_reg, &uap->port);
}
-static void __exit pmz_detach(struct platform_device *pdev)
+static void pmz_detach(struct platform_device *pdev)
{
struct uart_pmac_port *uap = platform_get_drvdata(pdev);
#else
static struct platform_driver pmz_driver = {
- .remove_new = __exit_p(pmz_detach),
+ .probe = pmz_attach,
+ .remove_new = pmz_detach,
.driver = {
.name = "scc",
},
#ifdef CONFIG_PPC_PMAC
return macio_register_driver(&pmz_driver);
#else
- return platform_driver_probe(&pmz_driver, pmz_attach);
+ return platform_driver_register(&pmz_driver);
#endif
}
struct serial_port_device {
struct device dev;
struct uart_port *port;
+ unsigned int tx_enabled:1;
};
int serial_base_ctrl_init(void);
int serial_base_port_init(void);
void serial_base_port_exit(void);
+ void serial_base_port_startup(struct uart_port *port);
+ void serial_base_port_shutdown(struct uart_port *port);
+
int serial_base_driver_register(struct device_driver *driver);
void serial_base_driver_unregister(struct device_driver *driver);
int serial_core_register_port(struct uart_driver *drv, struct uart_port *port);
void serial_core_unregister_port(struct uart_driver *drv, struct uart_port *port);
+
+#ifdef CONFIG_SERIAL_CORE_CONSOLE
+
+int serial_base_add_preferred_console(struct uart_driver *drv,
+ struct uart_port *port);
+
+#else
+
+static inline
+int serial_base_add_preferred_console(struct uart_driver *drv,
+ struct uart_port *port)
+{
+ return 0;
+}
+
+#endif
+
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+
+int serial_base_add_isa_preferred_console(const char *name, int idx);
+
+#else
+
+static inline
+int serial_base_add_isa_preferred_console(const char *name, int idx)
+{
+ return 0;
+}
+
+#endif
* enabled, serial_port_runtime_resume() calls start_tx() again
* after enabling the device.
*/
- if (pm_runtime_active(&port_dev->dev))
+ if (!pm_runtime_enabled(port->dev) || pm_runtime_active(&port_dev->dev))
port->ops->start_tx(port);
pm_runtime_mark_last_busy(&port_dev->dev);
pm_runtime_put_autosuspend(&port_dev->dev);
uart_port_unlock_irq(uport);
}
-/*
- * Startup the port. This will be called once per open. All calls
- * will be serialised by the per-port mutex.
- */
-static int uart_port_startup(struct tty_struct *tty, struct uart_state *state,
- bool init_hw)
+static int uart_alloc_xmit_buf(struct tty_port *port)
{
- struct uart_port *uport = uart_port_check(state);
+ struct uart_state *state = container_of(port, struct uart_state, port);
+ struct uart_port *uport;
unsigned long flags;
unsigned long page;
- int retval = 0;
-
- if (uport->type == PORT_UNKNOWN)
- return 1;
-
- /*
- * Make sure the device is in D0 state.
- */
- uart_change_pm(state, UART_PM_STATE_ON);
/*
* Initialise and allocate the transmit and temporary
if (!page)
return -ENOMEM;
- uart_port_lock(state, flags);
- if (!state->xmit.buf) {
- state->xmit.buf = (unsigned char *) page;
- uart_circ_clear(&state->xmit);
+ uport = uart_port_lock(state, flags);
+ if (!state->port.xmit_buf) {
+ state->port.xmit_buf = (unsigned char *)page;
+ kfifo_init(&state->port.xmit_fifo, state->port.xmit_buf,
+ PAGE_SIZE);
uart_port_unlock(uport, flags);
} else {
uart_port_unlock(uport, flags);
/*
* Do not free() the page under the port lock, see
- * uart_shutdown().
+ * uart_free_xmit_buf().
*/
free_page(page);
}
+ return 0;
+}
+
+static void uart_free_xmit_buf(struct tty_port *port)
+{
+ struct uart_state *state = container_of(port, struct uart_state, port);
+ struct uart_port *uport;
+ unsigned long flags;
+ char *xmit_buf;
+
+ /*
+ * Do not free() the transmit buffer page under the port lock since
+ * this can create various circular locking scenarios. For instance,
+ * console driver may need to allocate/free a debug object, which
+ * can end up in printk() recursion.
+ */
+ uport = uart_port_lock(state, flags);
+ xmit_buf = port->xmit_buf;
+ port->xmit_buf = NULL;
+ INIT_KFIFO(port->xmit_fifo);
+ uart_port_unlock(uport, flags);
+
+ free_page((unsigned long)xmit_buf);
+}
+
+/*
+ * Startup the port. This will be called once per open. All calls
+ * will be serialised by the per-port mutex.
+ */
+static int uart_port_startup(struct tty_struct *tty, struct uart_state *state,
+ bool init_hw)
+{
+ struct uart_port *uport = uart_port_check(state);
+ int retval;
+
+ if (uport->type == PORT_UNKNOWN)
+ return 1;
+
+ /*
+ * Make sure the device is in D0 state.
+ */
+ uart_change_pm(state, UART_PM_STATE_ON);
+
+ retval = uart_alloc_xmit_buf(&state->port);
+ if (retval)
+ return retval;
+
retval = uport->ops->startup(uport);
if (retval == 0) {
if (uart_console(uport) && uport->cons->cflag) {
bool init_hw)
{
struct tty_port *port = &state->port;
+ struct uart_port *uport;
int retval;
if (tty_port_initialized(port))
- return 0;
+ goto out_base_port_startup;
retval = uart_port_startup(tty, state, init_hw);
- if (retval)
+ if (retval) {
set_bit(TTY_IO_ERROR, &tty->flags);
+ return retval;
+ }
- return retval;
+ out_base_port_startup:
+ uport = uart_port_check(state);
+ if (!uport)
+ return -EIO;
+
+ serial_base_port_startup(uport);
+
+ return 0;
}
/*
{
struct uart_port *uport = uart_port_check(state);
struct tty_port *port = &state->port;
- unsigned long flags;
- char *xmit_buf = NULL;
/*
* Set the TTY IO error marker
if (tty)
set_bit(TTY_IO_ERROR, &tty->flags);
+ if (uport)
+ serial_base_port_shutdown(uport);
+
if (tty_port_initialized(port)) {
tty_port_set_initialized(port, false);
*/
tty_port_set_suspended(port, false);
- /*
- * Do not free() the transmit buffer page under the port lock since
- * this can create various circular locking scenarios. For instance,
- * console driver may need to allocate/free a debug object, which
- * can endup in printk() recursion.
- */
- uart_port_lock(state, flags);
- xmit_buf = state->xmit.buf;
- state->xmit.buf = NULL;
- uart_port_unlock(uport, flags);
-
- free_page((unsigned long)xmit_buf);
+ uart_free_xmit_buf(port);
}
/**
{
struct uart_state *state = tty->driver_data;
struct uart_port *port;
- struct circ_buf *circ;
unsigned long flags;
int ret = 0;
- circ = &state->xmit;
port = uart_port_lock(state, flags);
- if (!circ->buf) {
+ if (!state->port.xmit_buf) {
uart_port_unlock(port, flags);
return 0;
}
- if (port && uart_circ_chars_free(circ) != 0) {
- circ->buf[circ->head] = c;
- circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1);
- ret = 1;
- }
+ if (port)
+ ret = kfifo_put(&state->port.xmit_fifo, c);
uart_port_unlock(port, flags);
return ret;
}
{
struct uart_state *state = tty->driver_data;
struct uart_port *port;
- struct circ_buf *circ;
unsigned long flags;
- int c, ret = 0;
+ int ret = 0;
/*
* This means you called this function _after_ the port was
return -EL3HLT;
port = uart_port_lock(state, flags);
- circ = &state->xmit;
- if (!circ->buf) {
+ if (WARN_ON_ONCE(!state->port.xmit_buf)) {
uart_port_unlock(port, flags);
return 0;
}
- while (port) {
- c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE);
- if (count < c)
- c = count;
- if (c <= 0)
- break;
- memcpy(circ->buf + circ->head, buf, c);
- circ->head = (circ->head + c) & (UART_XMIT_SIZE - 1);
- buf += c;
- count -= c;
- ret += c;
- }
+ if (port)
+ ret = kfifo_in(&state->port.xmit_fifo, buf, count);
__uart_start(state);
uart_port_unlock(port, flags);
unsigned int ret;
port = uart_port_lock(state, flags);
- ret = uart_circ_chars_free(&state->xmit);
+ ret = kfifo_avail(&state->port.xmit_fifo);
uart_port_unlock(port, flags);
return ret;
}
unsigned int ret;
port = uart_port_lock(state, flags);
- ret = uart_circ_chars_pending(&state->xmit);
+ ret = kfifo_len(&state->port.xmit_fifo);
uart_port_unlock(port, flags);
return ret;
}
port = uart_port_lock(state, flags);
if (!port)
return;
- uart_circ_clear(&state->xmit);
+ kfifo_reset(&state->port.xmit_fifo);
if (port->ops->flush_buffer)
port->ops->flush_buffer(port);
uart_port_unlock(port, flags);
* interrupt happens).
*/
if (uport->x_char ||
- ((uart_circ_chars_pending(&state->xmit) > 0) &&
+ (!kfifo_is_empty(&state->port.xmit_fifo) &&
!uart_tx_stopped(uport)))
result &= ~TIOCSER_TEMT;
{
struct uart_state *state = container_of(port, struct uart_state, port);
struct uart_port *uport = uart_port_check(state);
- char *buf;
/*
* At this point, we stop accepting input. To do this, we
uport->ops->stop_rx(uport);
uart_port_unlock_irq(uport);
+ serial_base_port_shutdown(uport);
uart_port_shutdown(port);
/*
*/
tty_port_set_suspended(port, false);
- /*
- * Free the transmit buffer.
- */
- uart_port_lock_irq(uport);
- uart_circ_clear(&state->xmit);
- buf = state->xmit.buf;
- state->xmit.buf = NULL;
- uart_port_unlock_irq(uport);
-
- free_page((unsigned long)buf);
+ uart_free_xmit_buf(port);
uart_change_pm(state, UART_PM_STATE_OFF);
}
if (ret)
goto err_unregister_ctrl_dev;
+ ret = serial_base_add_preferred_console(drv, port);
+ if (ret)
+ goto err_unregister_port_dev;
+
ret = serial_core_add_one_port(drv, port);
if (ret)
goto err_unregister_port_dev;
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/pnp.h>
#include <linux/property.h>
#include <linux/serial_core.h>
#include <linux/spinlock.h>
static int __serial_port_busy(struct uart_port *port)
{
return !uart_tx_stopped(port) &&
- uart_circ_chars_pending(&port->state->xmit);
+ !kfifo_is_empty(&port->state->port.xmit_fifo);
}
static int serial_port_runtime_resume(struct device *dev)
/* Flush any pending TX for the port */
uart_port_lock_irqsave(port, &flags);
+ if (!port_dev->tx_enabled)
+ goto unlock;
if (__serial_port_busy(port))
port->ops->start_tx(port);
+
+ unlock:
uart_port_unlock_irqrestore(port, flags);
out:
return 0;
uart_port_lock_irqsave(port, &flags);
+ if (!port_dev->tx_enabled) {
+ uart_port_unlock_irqrestore(port, flags);
+ return 0;
+ }
+
busy = __serial_port_busy(port);
if (busy)
port->ops->start_tx(port);
return busy ? -EBUSY : 0;
}
+ static void serial_base_port_set_tx(struct uart_port *port,
+ struct serial_port_device *port_dev,
+ bool enabled)
+ {
+ unsigned long flags;
+
+ uart_port_lock_irqsave(port, &flags);
+ port_dev->tx_enabled = enabled;
+ uart_port_unlock_irqrestore(port, flags);
+ }
+
+ void serial_base_port_startup(struct uart_port *port)
+ {
+ struct serial_port_device *port_dev = port->port_dev;
+
+ serial_base_port_set_tx(port, port_dev, true);
+ }
+
+ void serial_base_port_shutdown(struct uart_port *port)
+ {
+ struct serial_port_device *port_dev = port->port_dev;
+
+ serial_base_port_set_tx(port, port_dev, false);
+ }
+
static DEFINE_RUNTIME_DEV_PM_OPS(serial_port_pm,
serial_port_runtime_suspend,
serial_port_runtime_resume, NULL);
if (dev_is_platform(dev))
ret = platform_get_irq(to_platform_device(dev), 0);
- else
+ else if (dev_is_pnp(dev)) {
+ ret = pnp_irq(to_pnp_dev(dev), 0);
+ if (ret < 0)
+ ret = -ENXIO;
+ } else
ret = fwnode_irq_get(dev_fwnode(dev), 0);
if (ret == -EPROBE_DEFER)
return ret;
{
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
- struct circ_buf *xmit = &port->state->xmit;
+ struct tty_port *tport = &port->state->port;
+
+ while (1) {
+ unsigned char ch;
- while (!uart_circ_empty(xmit)) {
/* Check that TDR is empty before filling FIFO */
if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE))
break;
- writel_relaxed(xmit->buf[xmit->tail], port->membase + ofs->tdr);
- uart_xmit_advance(port, 1);
+
+ if (!uart_fifo_get(port, &ch))
+ break;
+
+ writel_relaxed(ch, port->membase + ofs->tdr);
}
/* rely on TXE irq (mask or unmask) for sending remaining data */
- if (uart_circ_empty(xmit))
+ if (kfifo_is_empty(&tport->xmit_fifo))
stm32_usart_tx_interrupt_disable(port);
else
stm32_usart_tx_interrupt_enable(port);
static void stm32_usart_transmit_chars_dma(struct uart_port *port)
{
struct stm32_port *stm32port = to_stm32_port(port);
- struct circ_buf *xmit = &port->state->xmit;
+ struct tty_port *tport = &port->state->port;
struct dma_async_tx_descriptor *desc = NULL;
unsigned int count;
int ret;
return;
}
- count = uart_circ_chars_pending(xmit);
-
- if (count > TX_BUF_L)
- count = TX_BUF_L;
-
- if (xmit->tail < xmit->head) {
- memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], count);
- } else {
- size_t one = UART_XMIT_SIZE - xmit->tail;
- size_t two;
-
- if (one > count)
- one = count;
- two = count - one;
-
- memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], one);
- if (two)
- memcpy(&stm32port->tx_buf[one], &xmit->buf[0], two);
- }
+ count = kfifo_out_peek(&tport->xmit_fifo, &stm32port->tx_buf[0],
+ TX_BUF_L);
desc = dmaengine_prep_slave_single(stm32port->tx_ch,
stm32port->tx_dma_buf,
{
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
- struct circ_buf *xmit = &port->state->xmit;
+ struct tty_port *tport = &port->state->port;
u32 isr;
int ret;
if (!stm32_port->hw_flow_control &&
port->rs485.flags & SER_RS485_ENABLED &&
(port->x_char ||
- !(uart_circ_empty(xmit) || uart_tx_stopped(port)))) {
+ !(kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(port)))) {
stm32_usart_tc_interrupt_disable(port);
stm32_usart_rs485_rts_enable(port);
}
return;
}
- if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ if (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(port)) {
stm32_usart_tx_interrupt_disable(port);
return;
}
else
stm32_usart_transmit_chars_pio(port);
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS)
uart_write_wakeup(port);
- if (uart_circ_empty(xmit)) {
+ if (kfifo_is_empty(&tport->xmit_fifo)) {
stm32_usart_tx_interrupt_disable(port);
if (!stm32_port->hw_flow_control &&
port->rs485.flags & SER_RS485_ENABLED) {
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
u32 sr;
unsigned int size;
+ irqreturn_t ret = IRQ_NONE;
sr = readl_relaxed(port->membase + ofs->isr);
(sr & USART_SR_TC)) {
stm32_usart_tc_interrupt_disable(port);
stm32_usart_rs485_rts_disable(port);
+ ret = IRQ_HANDLED;
}
- if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG)
+ if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG) {
writel_relaxed(USART_ICR_RTOCF,
port->membase + ofs->icr);
+ ret = IRQ_HANDLED;
+ }
if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) {
/* Clear wake up flag and disable wake up interrupt */
stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
pm_wakeup_event(tport->tty->dev, 0);
+ ret = IRQ_HANDLED;
}
/*
uart_unlock_and_check_sysrq(port);
if (size)
tty_flip_buffer_push(tport);
+ ret = IRQ_HANDLED;
}
}
uart_port_lock(port);
stm32_usart_transmit_chars(port);
uart_port_unlock(port);
+ ret = IRQ_HANDLED;
}
/* Receiver timeout irq for DMA RX */
uart_unlock_and_check_sysrq(port);
if (size)
tty_flip_buffer_push(tport);
+ ret = IRQ_HANDLED;
}
- return IRQ_HANDLED;
+ return ret;
}
static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl)
/* There are probably characters waiting to be transmitted. */
static void stm32_usart_start_tx(struct uart_port *port)
{
- struct circ_buf *xmit = &port->state->xmit;
+ struct tty_port *tport = &port->state->port;
- if (uart_circ_empty(xmit) && !port->x_char) {
+ if (kfifo_is_empty(&tport->xmit_fifo) && !port->x_char) {
stm32_usart_rs485_rts_disable(port);
return;
}
val |= USART_CR2_SWAP;
writel_relaxed(val, port->membase + ofs->cr2);
}
+ stm32_port->throttled = false;
/* RX FIFO Flush */
if (ofs->rqr != UNDEF_REG)