Using delayed-work for tty flip buffers ends up causing us to wait for
the next tick to complete some actions. That's usually not all that
noticeable, but for certain latency-critical workloads it ends up being
totally unacceptable.
As an extreme case of this, passing a token back-and-forth over a pty
will take two ticks per iteration, so even just a thousand iterations
will take 8 seconds assuming a common 250Hz configuration.
Avoiding the whole delayed work issue brings that ping-pong test-case
down to 0.009s on my machine.
In more practical terms, this latency has been a performance problem for
things like dive computer simulators (simulating the serial interface
using the ptys) and for other environments (Alan mentions a CP/M emulator).
Reported-by: Jef Driesen <[email protected]>
Acked-by: Greg KH <[email protected]>
Acked-by: Alan Cox <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
if (tty->buf.tail != NULL)
tty->buf.tail->commit = tty->buf.tail->used;
spin_unlock_irqrestore(&tty->buf.lock, flags);
- schedule_delayed_work(&tty->buf.work, 1);
+ schedule_work(&tty->buf.work);
}
EXPORT_SYMBOL(tty_schedule_flip);
static void flush_to_ldisc(struct work_struct *work)
{
struct tty_struct *tty =
- container_of(work, struct tty_struct, buf.work.work);
+ container_of(work, struct tty_struct, buf.work);
unsigned long flags;
struct tty_ldisc *disc;
if (test_bit(TTY_FLUSHPENDING, &tty->flags))
break;
if (!tty->receive_room || seen_tail) {
- schedule_delayed_work(&tty->buf.work, 1);
+ schedule_work(&tty->buf.work);
break;
}
if (count > tty->receive_room)
*/
void tty_flush_to_ldisc(struct tty_struct *tty)
{
- flush_delayed_work(&tty->buf.work);
+ flush_work(&tty->buf.work);
}
/**
spin_unlock_irqrestore(&tty->buf.lock, flags);
if (tty->low_latency)
- flush_to_ldisc(&tty->buf.work.work);
+ flush_to_ldisc(&tty->buf.work);
else
- schedule_delayed_work(&tty->buf.work, 1);
+ schedule_work(&tty->buf.work);
}
EXPORT_SYMBOL(tty_flip_buffer_push);
tty->buf.tail = NULL;
tty->buf.free = NULL;
tty->buf.memory_used = 0;
- INIT_DELAYED_WORK(&tty->buf.work, flush_to_ldisc);
+ INIT_WORK(&tty->buf.work, flush_to_ldisc);
}
static int tty_ldisc_halt(struct tty_struct *tty)
{
clear_bit(TTY_LDISC, &tty->flags);
- return cancel_delayed_work_sync(&tty->buf.work);
+ return cancel_work_sync(&tty->buf.work);
}
/**
{
flush_work_sync(&tty->hangup_work);
flush_work_sync(&tty->SAK_work);
- flush_delayed_work_sync(&tty->buf.work);
+ flush_work_sync(&tty->buf.work);
}
/**
/* Restart the work queue in case no characters kick it off. Safe if
already running */
if (work)
- schedule_delayed_work(&tty->buf.work, 1);
+ schedule_work(&tty->buf.work);
if (o_work)
- schedule_delayed_work(&o_tty->buf.work, 1);
+ schedule_work(&o_tty->buf.work);
mutex_unlock(&tty->ldisc_mutex);
tty_unlock();
return retval;
/*
* this is like tty_ldisc_halt, but we need to give up
- * the BTM before calling cancel_delayed_work_sync,
- * which may need to wait for another function taking the BTM
+ * the BTM before calling cancel_work_sync, which may
+ * need to wait for another function taking the BTM
*/
clear_bit(TTY_LDISC, &tty->flags);
tty_unlock();
- cancel_delayed_work_sync(&tty->buf.work);
+ cancel_work_sync(&tty->buf.work);
mutex_unlock(&tty->ldisc_mutex);
tty_lock();
if (t->buf.tail != NULL)
t->buf.tail->commit = t->buf.tail->used;
spin_unlock_irqrestore(&t->buf.lock, flags);
- schedule_delayed_work(&t->buf.work, 0);
+ schedule_work(&t->buf.work);
}
#endif
struct tty_bufhead {
- struct delayed_work work;
+ struct work_struct work;
spinlock_t lock;
struct tty_buffer *head; /* Queue head */
struct tty_buffer *tail; /* Active buffer */