@@ -1679,6 +1679,13 @@ static long vhost_net_set_owner(struct vhost_net *n)
return r;
}
+static void vhost_net_set_backend_features(struct vhost_dev *dev, u64 features)
+{
+ mutex_lock(&dev->mutex);
+ vhost_set_backend_features(dev, features);
+ mutex_unlock(&dev->mutex);
+}
+
static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
unsigned long arg)
{
@@ -1715,7 +1722,7 @@ static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
return -EFAULT;
if (features & ~VHOST_NET_BACKEND_FEATURES)
return -EOPNOTSUPP;
- vhost_set_backend_features(&n->dev, features);
+ vhost_net_set_backend_features(&n->dev, features);
return 0;
case VHOST_RESET_OWNER:
return vhost_net_reset_owner(n);
@@ -344,6 +344,33 @@ static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
return 0;
}
+
+static long vhost_vdpa_get_backend_features(void __user *argp)
+{
+ u64 features = VHOST_VDPA_BACKEND_FEATURES;
+ u64 __user *featurep = argp;
+ long r;
+
+ r = copy_to_user(featurep, &features, sizeof(features));
+
+ return r;
+}
+static long vhost_vdpa_set_backend_features(struct vhost_vdpa *v, void __user *argp)
+{
+ u64 __user *featurep = argp;
+ u64 features;
+
+ if (copy_from_user(&features, featurep, sizeof(features)))
+ return -EFAULT;
+
+ if (features & ~VHOST_VDPA_BACKEND_FEATURES)
+ return -EOPNOTSUPP;
+
+ vhost_set_backend_features(&v->vdev, features);
+
+ return 0;
+}
+
static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
void __user *argp)
{
@@ -353,8 +380,6 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
struct vdpa_callback cb;
struct vhost_virtqueue *vq;
struct vhost_vring_state s;
- u64 __user *featurep = argp;
- u64 features;
u32 idx;
long r;
@@ -381,18 +406,6 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
vq->last_avail_idx = vq_state.avail_index;
break;
- case VHOST_GET_BACKEND_FEATURES:
- features = VHOST_VDPA_BACKEND_FEATURES;
- if (copy_to_user(featurep, &features, sizeof(features)))
- return -EFAULT;
- return 0;
- case VHOST_SET_BACKEND_FEATURES:
- if (copy_from_user(&features, featurep, sizeof(features)))
- return -EFAULT;
- if (features & ~VHOST_VDPA_BACKEND_FEATURES)
- return -EOPNOTSUPP;
- vhost_set_backend_features(&v->vdev, features);
- return 0;
}
r = vhost_vring_ioctl(&v->vdev, cmd, argp);
@@ -476,6 +489,12 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
case VHOST_VDPA_SET_CONFIG_CALL:
r = vhost_vdpa_set_config_call(v, argp);
break;
+ case VHOST_SET_BACKEND_FEATURES:
+ r = vhost_vdpa_set_backend_features(v, argp);
+ break;
+ case VHOST_GET_BACKEND_FEATURES:
+ r = vhost_vdpa_get_backend_features(argp);
+ break;
default:
r = vhost_dev_ioctl(&v->vdev, cmd, argp);
if (r == -ENOIOCTLCMD)
@@ -2591,14 +2591,12 @@ void vhost_set_backend_features(struct vhost_dev *dev, u64 features)
struct vhost_virtqueue *vq;
int i;
- mutex_lock(&dev->mutex);
for (i = 0; i < dev->nvqs; ++i) {
vq = dev->vqs[i];
mutex_lock(&vq->mutex);
vq->acked_backend_features = features;
mutex_unlock(&vq->mutex);
}
- mutex_unlock(&dev->mutex);
}
EXPORT_SYMBOL_GPL(vhost_set_backend_features);
This commit introduced vhost_vdpa_set/get_backend_features() to resolve these issues: (1)In vhost_vdpa ioctl SET_BACKEND_FEATURES path, currect code would try to acquire vhost dev mutex twice (first shown in vhost_vdpa_unlocked_ioctl), which can lead to a dead lock issue. (2)SET_BACKEND_FEATURES was blindly added to vring ioctl instead of vdpa device ioctl To resolve these issues, this commit (1)removed mutex operations in vhost_set_backend_features. (2)Handle ioctl SET/GET_BACKEND_FEATURES in vdpa ioctl. (3)introduce a new function vhost_net_set_backend_features() for vhost_net, which is a wrap of vhost_set_backend_features() with necessary mutex lockings. Signed-off-by: Zhu Lingshan <lingshan.zhu@intel.com> --- drivers/vhost/net.c | 9 ++++++++- drivers/vhost/vdpa.c | 47 ++++++++++++++++++++++++++++++------------- drivers/vhost/vhost.c | 2 -- 3 files changed, 41 insertions(+), 17 deletions(-)