return max_cpu_index;
}
+CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
+
void cpu_list_add(CPUState *cpu)
{
QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
cpu->cpu_index = UNASSIGNED_CPU_INDEX;
}
+CPUState *qemu_get_cpu(int index)
+{
+ CPUState *cpu;
+
+ CPU_FOREACH(cpu) {
+ if (cpu->cpu_index == index) {
+ return cpu;
+ }
+ }
+
+ return NULL;
+}
+
+/* current CPU in the current thread. It is only valid inside cpu_exec() */
+__thread CPUState *current_cpu;
+
struct qemu_work_item {
- struct qemu_work_item *next;
+ QSIMPLEQ_ENTRY(qemu_work_item) node;
run_on_cpu_func func;
run_on_cpu_data data;
bool free, exclusive, done;
static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi)
{
qemu_mutex_lock(&cpu->work_mutex);
- if (cpu->queued_work_first == NULL) {
- cpu->queued_work_first = wi;
- } else {
- cpu->queued_work_last->next = wi;
- }
- cpu->queued_work_last = wi;
- wi->next = NULL;
+ QSIMPLEQ_INSERT_TAIL(&cpu->work_list, wi, node);
wi->done = false;
qemu_mutex_unlock(&cpu->work_mutex);
{
struct qemu_work_item *wi;
- if (cpu->queued_work_first == NULL) {
+ qemu_mutex_lock(&cpu->work_mutex);
+ if (QSIMPLEQ_EMPTY(&cpu->work_list)) {
+ qemu_mutex_unlock(&cpu->work_mutex);
return;
}
-
- qemu_mutex_lock(&cpu->work_mutex);
- while (cpu->queued_work_first != NULL) {
- wi = cpu->queued_work_first;
- cpu->queued_work_first = wi->next;
- if (!cpu->queued_work_first) {
- cpu->queued_work_last = NULL;
- }
+ while (!QSIMPLEQ_EMPTY(&cpu->work_list)) {
+ wi = QSIMPLEQ_FIRST(&cpu->work_list);
+ QSIMPLEQ_REMOVE_HEAD(&cpu->work_list, node);
qemu_mutex_unlock(&cpu->work_mutex);
if (wi->exclusive) {
/* Running work items outside the BQL avoids the following deadlock: