@@ -310,6 +310,7 @@ struct adf_accel_dev {
u8 pf_compat_ver;
} vf;
};
+ struct mutex state_lock; /* protect state of the device */
bool is_vf;
u32 accel_id;
};
@@ -58,6 +58,9 @@ void adf_dev_stop(struct adf_accel_dev *accel_dev);
void adf_dev_shutdown(struct adf_accel_dev *accel_dev);
int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev);
+int adf_dev_up(struct adf_accel_dev *accel_dev, bool init_config);
+int adf_dev_down(struct adf_accel_dev *accel_dev, bool cache_config);
+
void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data);
void adf_clean_vf_map(bool);
@@ -223,6 +223,7 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
map->attached = true;
list_add_tail(&map->list, &vfs_table);
}
+ mutex_init(&accel_dev->state_lock);
unlock:
mutex_unlock(&table_lock);
return ret;
@@ -269,6 +270,7 @@ void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
}
}
unlock:
+ mutex_destroy(&accel_dev->state_lock);
list_del(&accel_dev->list);
mutex_unlock(&table_lock);
}
@@ -400,3 +400,67 @@ int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev)
return 0;
}
+
+int adf_dev_down(struct adf_accel_dev *accel_dev, bool reconfig)
+{
+ int ret = 0;
+
+ if (!accel_dev)
+ return -EINVAL;
+
+ mutex_lock(&accel_dev->state_lock);
+
+ if (!adf_dev_started(accel_dev)) {
+ dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already down\n",
+ accel_dev->accel_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (reconfig) {
+ ret = adf_dev_shutdown_cache_cfg(accel_dev);
+ goto out;
+ }
+
+ adf_dev_stop(accel_dev);
+ adf_dev_shutdown(accel_dev);
+
+out:
+ mutex_unlock(&accel_dev->state_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_dev_down);
+
+int adf_dev_up(struct adf_accel_dev *accel_dev, bool config)
+{
+ int ret = 0;
+
+ if (!accel_dev)
+ return -EINVAL;
+
+ mutex_lock(&accel_dev->state_lock);
+
+ if (adf_dev_started(accel_dev)) {
+ dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already up\n",
+ accel_dev->accel_id);
+ ret = -EALREADY;
+ goto out;
+ }
+
+ if (config && GET_HW_DATA(accel_dev)->dev_config) {
+ ret = GET_HW_DATA(accel_dev)->dev_config(accel_dev);
+ if (unlikely(ret))
+ goto out;
+ }
+
+ ret = adf_dev_init(accel_dev);
+ if (unlikely(ret))
+ goto out;
+
+ ret = adf_dev_start(accel_dev);
+
+out:
+ mutex_unlock(&accel_dev->state_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_dev_up);
@@ -50,38 +50,21 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
switch (ret) {
case DEV_DOWN:
- if (!adf_dev_started(accel_dev)) {
- dev_info(dev, "Device qat_dev%d already down\n",
- accel_id);
- return -EINVAL;
- }
-
dev_info(dev, "Stopping device qat_dev%d\n", accel_id);
- ret = adf_dev_shutdown_cache_cfg(accel_dev);
+ ret = adf_dev_down(accel_dev, true);
if (ret < 0)
return -EINVAL;
break;
case DEV_UP:
- if (adf_dev_started(accel_dev)) {
- dev_info(dev, "Device qat_dev%d already up\n",
- accel_id);
- return -EINVAL;
- }
-
dev_info(dev, "Starting device qat_dev%d\n", accel_id);
- ret = GET_HW_DATA(accel_dev)->dev_config(accel_dev);
- if (!ret)
- ret = adf_dev_init(accel_dev);
- if (!ret)
- ret = adf_dev_start(accel_dev);
-
+ ret = adf_dev_up(accel_dev, true);
if (ret < 0) {
dev_err(dev, "Failed to start device qat_dev%d\n",
accel_id);
- adf_dev_shutdown_cache_cfg(accel_dev);
+ adf_dev_down(accel_dev, true);
return ret;
}
break;