mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-05 12:56:41 +07:00
Merge branch 'virtio_net-Expand-affinity-to-arbitrary-numbers-of-cpu-and-vq'
Caleb Raitto says: ==================== virtio_net: Expand affinity to arbitrary numbers of cpu and vq Virtio-net tries to pin each virtual queue rx and tx interrupt to a cpu if there are as many queues as cpus. Expand this heuristic to configure a reasonable affinity setting also when the number of cpus != the number of virtual queues. Patch 1 allows vqs to take an affinity mask with more than 1 cpu. Patch 2 generalizes the algorithm in virtnet_set_affinity beyond the case where #cpus == #vqs. v2 changes: Renamed "virtio_net: Make vp_set_vq_affinity() take a mask." to "virtio: Make vp_set_vq_affinity() take a mask." Tested: [InstanceSetup] set_multiqueue = false $ cd /proc/irq $ for i in `seq 24 60` ; do sudo grep ".*" $i/smp_affinity_list; done 0-15 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 10 10 11 11 12 12 13 13 14 14 15 15 0-15 0-15 0-15 0-15 $ cd /sys/class/net/eth0/queues/ $ for i in `seq 0 15` ; do sudo grep ".*" tx-$i/xps_cpus; done 0001 0002 0004 0008 0010 0020 0040 0080 0100 0200 0400 0800 1000 2000 4000 8000 $ sudo ethtool -L eth0 combined 15 $ cd /proc/irq $ for i in `seq 24 60` ; do sudo grep ".*" $i/smp_affinity_list; done 0-15 0-1 0-1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 10 10 11 11 12 12 13 13 14 14 15 15 15 15 0-15 0-15 0-15 0-15 $ cd /sys/class/net/eth0/queues/ $ for i in `seq 0 14` ; do sudo grep ".*" tx-$i/xps_cpus; done 0003 0004 0008 0010 0020 0040 0080 0100 0200 0400 0800 1000 2000 4000 8000 $ sudo ethtool -L eth0 combined 8 $ cd /proc/irq $ for i in `seq 24 60` ; do sudo grep ".*" $i/smp_affinity_list; done 0-15 0-1 0-1 2-3 2-3 4-5 4-5 6-7 6-7 8-9 8-9 10-11 10-11 12-13 12-13 14-15 14-15 9 9 10 10 11 11 12 12 13 13 14 14 15 15 15 15 0-15 0-15 0-15 0-15 $ cd /sys/class/net/eth0/queues/ $ for i in `seq 0 7` ; do sudo grep ".*" tx-$i/xps_cpus; done 0003 000c 0030 00c0 0300 0c00 3000 c000 $ sudo ethtool -L eth0 combined 16 $ sudo sh -c "echo 0 > /sys/devices/system/cpu/cpu15/online" $ cd /proc/irq $ for i in `seq 24 60` ; do sudo grep ".*" $i/smp_affinity_list; done 0-15 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 10 10 11 11 12 12 13 13 14 14 0 0 0-15 0-15 0-15 0-15 $ cd /sys/class/net/eth0/queues/ $ for i in `seq 0 15` ; do sudo grep ".*" tx-$i/xps_cpus; done 0001 0002 0004 0008 0010 0020 0040 0080 0100 0200 0400 0800 1000 2000 4000 0001 $ for i in `seq 8 15`; \ do sudo sh -c "echo 0 > /sys/devices/system/cpu/cpu$i/online"; done $ cd /proc/irq $ for i in `seq 24 60` ; do sudo grep ".*" $i/smp_affinity_list; done 0-15 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 0-15 0-15 0-15 0-15 $ cd /sys/class/net/eth0/queues/ $ for i in `seq 0 15` ; do sudo grep ".*" tx-$i/xps_cpus; done 0001 0002 0004 0008 0010 0020 0040 0080 0001 0002 0004 0008 0010 0020 0040 0080 ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
29afde5051
@ -146,7 +146,7 @@ static void virtcrypto_clean_affinity(struct virtio_crypto *vi, long hcpu)
|
||||
|
||||
if (vi->affinity_hint_set) {
|
||||
for (i = 0; i < vi->max_data_queues; i++)
|
||||
virtqueue_set_affinity(vi->data_vq[i].vq, -1);
|
||||
virtqueue_set_affinity(vi->data_vq[i].vq, NULL);
|
||||
|
||||
vi->affinity_hint_set = false;
|
||||
}
|
||||
@ -173,7 +173,7 @@ static void virtcrypto_set_affinity(struct virtio_crypto *vcrypto)
|
||||
*
|
||||
*/
|
||||
for_each_online_cpu(cpu) {
|
||||
virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpu);
|
||||
virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpumask_of(cpu));
|
||||
if (++i >= vcrypto->max_data_queues)
|
||||
break;
|
||||
}
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include <linux/average.h>
|
||||
#include <linux/filter.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/pci.h>
|
||||
#include <net/route.h>
|
||||
#include <net/xdp.h>
|
||||
@ -1878,8 +1879,8 @@ static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
|
||||
|
||||
if (vi->affinity_hint_set) {
|
||||
for (i = 0; i < vi->max_queue_pairs; i++) {
|
||||
virtqueue_set_affinity(vi->rq[i].vq, -1);
|
||||
virtqueue_set_affinity(vi->sq[i].vq, -1);
|
||||
virtqueue_set_affinity(vi->rq[i].vq, NULL);
|
||||
virtqueue_set_affinity(vi->sq[i].vq, NULL);
|
||||
}
|
||||
|
||||
vi->affinity_hint_set = false;
|
||||
@ -1888,30 +1889,41 @@ static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
|
||||
|
||||
static void virtnet_set_affinity(struct virtnet_info *vi)
|
||||
{
|
||||
int i;
|
||||
int cpu;
|
||||
cpumask_var_t mask;
|
||||
int stragglers;
|
||||
int group_size;
|
||||
int i, j, cpu;
|
||||
int num_cpu;
|
||||
int stride;
|
||||
|
||||
/* In multiqueue mode, when the number of cpu is equal to the number of
|
||||
* queue pairs, we let the queue pairs to be private to one cpu by
|
||||
* setting the affinity hint to eliminate the contention.
|
||||
*/
|
||||
if (vi->curr_queue_pairs == 1 ||
|
||||
vi->max_queue_pairs != num_online_cpus()) {
|
||||
if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
|
||||
virtnet_clean_affinity(vi, -1);
|
||||
return;
|
||||
}
|
||||
|
||||
i = 0;
|
||||
for_each_online_cpu(cpu) {
|
||||
const unsigned long *mask = cpumask_bits(cpumask_of(cpu));
|
||||
num_cpu = num_online_cpus();
|
||||
stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1);
|
||||
stragglers = num_cpu >= vi->curr_queue_pairs ?
|
||||
num_cpu % vi->curr_queue_pairs :
|
||||
0;
|
||||
cpu = cpumask_next(-1, cpu_online_mask);
|
||||
|
||||
virtqueue_set_affinity(vi->rq[i].vq, cpu);
|
||||
virtqueue_set_affinity(vi->sq[i].vq, cpu);
|
||||
__netif_set_xps_queue(vi->dev, mask, i, false);
|
||||
i++;
|
||||
for (i = 0; i < vi->curr_queue_pairs; i++) {
|
||||
group_size = stride + (i < stragglers ? 1 : 0);
|
||||
|
||||
for (j = 0; j < group_size; j++) {
|
||||
cpumask_set_cpu(cpu, mask);
|
||||
cpu = cpumask_next_wrap(cpu, cpu_online_mask,
|
||||
nr_cpu_ids, false);
|
||||
}
|
||||
virtqueue_set_affinity(vi->rq[i].vq, mask);
|
||||
virtqueue_set_affinity(vi->sq[i].vq, mask);
|
||||
__netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, false);
|
||||
cpumask_clear(mask);
|
||||
}
|
||||
|
||||
vi->affinity_hint_set = true;
|
||||
free_cpumask_var(mask);
|
||||
}
|
||||
|
||||
static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
|
||||
|
@ -421,7 +421,7 @@ const char *vp_bus_name(struct virtio_device *vdev)
|
||||
* - OR over all affinities for shared MSI
|
||||
* - ignore the affinity request if we're using INTX
|
||||
*/
|
||||
int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
|
||||
int vp_set_vq_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
|
||||
{
|
||||
struct virtio_device *vdev = vq->vdev;
|
||||
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
|
||||
@ -435,11 +435,10 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
|
||||
if (vp_dev->msix_enabled) {
|
||||
mask = vp_dev->msix_affinity_masks[info->msix_vector];
|
||||
irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
|
||||
if (cpu == -1)
|
||||
if (!cpu_mask)
|
||||
irq_set_affinity_hint(irq, NULL);
|
||||
else {
|
||||
cpumask_clear(mask);
|
||||
cpumask_set_cpu(cpu, mask);
|
||||
cpumask_copy(mask, cpu_mask);
|
||||
irq_set_affinity_hint(irq, mask);
|
||||
}
|
||||
}
|
||||
|
@ -141,7 +141,7 @@ const char *vp_bus_name(struct virtio_device *vdev);
|
||||
* - OR over all affinities for shared MSI
|
||||
* - ignore the affinity request if we're using INTX
|
||||
*/
|
||||
int vp_set_vq_affinity(struct virtqueue *vq, int cpu);
|
||||
int vp_set_vq_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask);
|
||||
|
||||
const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index);
|
||||
|
||||
|
@ -79,7 +79,8 @@ struct virtio_config_ops {
|
||||
u64 (*get_features)(struct virtio_device *vdev);
|
||||
int (*finalize_features)(struct virtio_device *vdev);
|
||||
const char *(*bus_name)(struct virtio_device *vdev);
|
||||
int (*set_vq_affinity)(struct virtqueue *vq, int cpu);
|
||||
int (*set_vq_affinity)(struct virtqueue *vq,
|
||||
const struct cpumask *cpu_mask);
|
||||
const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev,
|
||||
int index);
|
||||
};
|
||||
@ -236,11 +237,11 @@ const char *virtio_bus_name(struct virtio_device *vdev)
|
||||
*
|
||||
*/
|
||||
static inline
|
||||
int virtqueue_set_affinity(struct virtqueue *vq, int cpu)
|
||||
int virtqueue_set_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
|
||||
{
|
||||
struct virtio_device *vdev = vq->vdev;
|
||||
if (vdev->config->set_vq_affinity)
|
||||
return vdev->config->set_vq_affinity(vq, cpu);
|
||||
return vdev->config->set_vq_affinity(vq, cpu_mask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user