]> Git Repo - linux.git/commitdiff
virtio_fs: store actual queue index in mq_map
authorMax Gurtovoy <[email protected]>
Sun, 6 Oct 2024 18:43:41 +0000 (21:43 +0300)
committerMichael S. Tsirkin <[email protected]>
Tue, 12 Nov 2024 23:07:46 +0000 (18:07 -0500)
This will eliminate the need for index recalculation during the fast
path.

Signed-off-by: Max Gurtovoy <[email protected]>
Message-Id: <20241006184341[email protected]>
Signed-off-by: Michael S. Tsirkin <[email protected]>
fs/fuse/virtio_fs.c

index 04d7d61cb2b2ce80ef07cde3ffd8e2cbb4da2a38..28c1864baeed91e267b6284568320239425a41fe 100644 (file)
@@ -242,7 +242,7 @@ static ssize_t cpu_list_show(struct kobject *kobj,
 
        qid = fsvq->vq->index;
        for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
-               if (qid < VQ_REQUEST || (fs->mq_map[cpu] == qid - VQ_REQUEST)) {
+               if (qid < VQ_REQUEST || (fs->mq_map[cpu] == qid)) {
                        if (first)
                                ret = snprintf(buf + pos, size - pos, "%u", cpu);
                        else
@@ -871,23 +871,23 @@ static void virtio_fs_map_queues(struct virtio_device *vdev, struct virtio_fs *f
                        goto fallback;
 
                for_each_cpu(cpu, mask)
-                       fs->mq_map[cpu] = q;
+                       fs->mq_map[cpu] = q + VQ_REQUEST;
        }
 
        return;
 fallback:
        /* Attempt to map evenly in groups over the CPUs */
        masks = group_cpus_evenly(fs->num_request_queues);
-       /* If even this fails we default to all CPUs use queue zero */
+       /* If even this fails we default to all CPUs use first request queue */
        if (!masks) {
                for_each_possible_cpu(cpu)
-                       fs->mq_map[cpu] = 0;
+                       fs->mq_map[cpu] = VQ_REQUEST;
                return;
        }
 
        for (q = 0; q < fs->num_request_queues; q++) {
                for_each_cpu(cpu, &masks[q])
-                       fs->mq_map[cpu] = q;
+                       fs->mq_map[cpu] = q + VQ_REQUEST;
        }
        kfree(masks);
 }
@@ -1482,7 +1482,7 @@ static void virtio_fs_send_req(struct fuse_iqueue *fiq, struct fuse_req *req)
        clear_bit(FR_PENDING, &req->flags);
 
        fs = fiq->priv;
-       queue_id = VQ_REQUEST + fs->mq_map[raw_smp_processor_id()];
+       queue_id = fs->mq_map[raw_smp_processor_id()];
 
        pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u queue_id %u\n",
                 __func__, req->in.h.opcode, req->in.h.unique,
This page took 0.056935 seconds and 4 git commands to generate.