mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-04 02:06:43 +07:00
vhost: fix OOB in get_rx_bufs()
After batched used ring updating was introduced in commite2b3b35eb9
("vhost_net: batch used ring update in rx"). We tend to batch heads in vq->heads for more than one packet. But the quota passed to get_rx_bufs() was not correctly limited, which can result a OOB write in vq->heads. headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx, vhost_len, &in, vq_log, &log, likely(mergeable) ? UIO_MAXIOV : 1); UIO_MAXIOV was still used which is wrong since we could have batched used in vq->heads, this will cause OOB if the next buffer needs more than 960 (1024 (UIO_MAXIOV) - 64 (VHOST_NET_BATCH)) heads after we've batched 64 (VHOST_NET_BATCH) heads: Acked-by: Stefan Hajnoczi <stefanha@redhat.com> ============================================================================= BUG kmalloc-8k (Tainted: G B ): Redzone overwritten ----------------------------------------------------------------------------- INFO: 0x00000000fd93b7a2-0x00000000f0713384. First byte 0xa9 instead of 0xcc INFO: Allocated in alloc_pd+0x22/0x60 age=3933677 cpu=2 pid=2674 kmem_cache_alloc_trace+0xbb/0x140 alloc_pd+0x22/0x60 gen8_ppgtt_create+0x11d/0x5f0 i915_ppgtt_create+0x16/0x80 i915_gem_create_context+0x248/0x390 i915_gem_context_create_ioctl+0x4b/0xe0 drm_ioctl_kernel+0xa5/0xf0 drm_ioctl+0x2ed/0x3a0 do_vfs_ioctl+0x9f/0x620 ksys_ioctl+0x6b/0x80 __x64_sys_ioctl+0x11/0x20 do_syscall_64+0x43/0xf0 entry_SYSCALL_64_after_hwframe+0x44/0xa9 INFO: Slab 0x00000000d13e87af objects=3 used=3 fp=0x (null) flags=0x200000000010201 INFO: Object 0x0000000003278802 @offset=17064 fp=0x00000000e2e6652b Fixing this by allocating UIO_MAXIOV + VHOST_NET_BATCH iovs for vhost-net. This is done through set the limitation through vhost_dev_init(), then set_owner can allocate the number of iov in a per device manner. This fixes CVE-2018-16880. Fixes:e2b3b35eb9
("vhost_net: batch used ring update in rx") Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
bfe2599dd2
commit
b46a0bf78a
@ -1337,7 +1337,8 @@ static int vhost_net_open(struct inode *inode, struct file *f)
|
||||
n->vqs[i].rx_ring = NULL;
|
||||
vhost_net_buf_init(&n->vqs[i].rxq);
|
||||
}
|
||||
vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
|
||||
vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
|
||||
UIO_MAXIOV + VHOST_NET_BATCH);
|
||||
|
||||
vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev);
|
||||
vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev);
|
||||
|
@ -1627,7 +1627,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
|
||||
vqs[i] = &vs->vqs[i].vq;
|
||||
vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
|
||||
}
|
||||
vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
|
||||
vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV);
|
||||
|
||||
vhost_scsi_init_inflight(vs, NULL);
|
||||
|
||||
|
@ -390,9 +390,9 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
|
||||
vq->indirect = kmalloc_array(UIO_MAXIOV,
|
||||
sizeof(*vq->indirect),
|
||||
GFP_KERNEL);
|
||||
vq->log = kmalloc_array(UIO_MAXIOV, sizeof(*vq->log),
|
||||
vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log),
|
||||
GFP_KERNEL);
|
||||
vq->heads = kmalloc_array(UIO_MAXIOV, sizeof(*vq->heads),
|
||||
vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads),
|
||||
GFP_KERNEL);
|
||||
if (!vq->indirect || !vq->log || !vq->heads)
|
||||
goto err_nomem;
|
||||
@ -414,7 +414,7 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev)
|
||||
}
|
||||
|
||||
void vhost_dev_init(struct vhost_dev *dev,
|
||||
struct vhost_virtqueue **vqs, int nvqs)
|
||||
struct vhost_virtqueue **vqs, int nvqs, int iov_limit)
|
||||
{
|
||||
struct vhost_virtqueue *vq;
|
||||
int i;
|
||||
@ -427,6 +427,7 @@ void vhost_dev_init(struct vhost_dev *dev,
|
||||
dev->iotlb = NULL;
|
||||
dev->mm = NULL;
|
||||
dev->worker = NULL;
|
||||
dev->iov_limit = iov_limit;
|
||||
init_llist_head(&dev->work_list);
|
||||
init_waitqueue_head(&dev->wait);
|
||||
INIT_LIST_HEAD(&dev->read_list);
|
||||
|
@ -170,9 +170,11 @@ struct vhost_dev {
|
||||
struct list_head read_list;
|
||||
struct list_head pending_list;
|
||||
wait_queue_head_t wait;
|
||||
int iov_limit;
|
||||
};
|
||||
|
||||
void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
|
||||
void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
|
||||
int nvqs, int iov_limit);
|
||||
long vhost_dev_set_owner(struct vhost_dev *dev);
|
||||
bool vhost_dev_has_owner(struct vhost_dev *dev);
|
||||
long vhost_dev_check_owner(struct vhost_dev *);
|
||||
|
@ -531,7 +531,7 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
|
||||
vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
|
||||
vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
|
||||
|
||||
vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs));
|
||||
vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs), UIO_MAXIOV);
|
||||
|
||||
file->private_data = vsock;
|
||||
spin_lock_init(&vsock->send_pkt_list_lock);
|
||||
|
Loading…
Reference in New Issue
Block a user