^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2017 Sagi Grimberg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/blk-mq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/blk-mq-rdma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <rdma/ib_verbs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * blk_mq_rdma_map_queues - provide a default queue mapping for rdma device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * @map: CPU to hardware queue map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * @dev: rdma device to provide a mapping for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * @first_vec: first interrupt vectors to use for queues (usually 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * This function assumes the rdma device @dev has at least as many available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * interrupt vetors as @set has queues. It will then query it's affinity mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * and built queue mapping that maps a queue to the CPUs that have irq affinity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * for the corresponding vector.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * In case either the driver passed a @dev with less vectors than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * @set->nr_hw_queues, or @dev does not provide an affinity mask for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * vector, we fallback to the naive mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct ib_device *dev, int first_vec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) const struct cpumask *mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) unsigned int queue, cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) for (queue = 0; queue < map->nr_queues; queue++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) mask = ib_get_vector_affinity(dev, first_vec + queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) if (!mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) goto fallback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) for_each_cpu(cpu, mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) map->mq_map[cpu] = map->queue_offset + queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) fallback:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) return blk_mq_map_queues(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) EXPORT_SYMBOL_GPL(blk_mq_rdma_map_queues);