@@ -916,45 +916,13 @@ static void nvme_class_init(ObjectClass *oc, void *data)
dc->vmsd = &nvme_vmstate;
}
-static void nvme_get_bootindex(Object *obj, Visitor *v, void *opaque,
- const char *name, Error **errp)
-{
- NvmeCtrl *s = NVME(obj);
-
- visit_type_int32(v, &s->conf.bootindex, name, errp);
-}
-
-static void nvme_set_bootindex(Object *obj, Visitor *v, void *opaque,
- const char *name, Error **errp)
-{
- NvmeCtrl *s = NVME(obj);
- int32_t boot_index;
- Error *local_err = NULL;
-
- visit_type_int32(v, &boot_index, name, &local_err);
- if (local_err) {
- goto out;
- }
- /* check whether bootindex is present in fw_boot_order list */
- check_boot_index(boot_index, &local_err);
- if (local_err) {
- goto out;
- }
- /* change bootindex to a new one */
- s->conf.bootindex = boot_index;
-
-out:
- if (local_err) {
- error_propagate(errp, local_err);
- }
-}
-
static void nvme_instance_init(Object *obj)
{
- object_property_add(obj, "bootindex", "int32",
- nvme_get_bootindex,
- nvme_set_bootindex, NULL, NULL, NULL);
- object_property_set_int(obj, -1, "bootindex", NULL);
+ NvmeCtrl *s = NVME(obj);
+
+ device_add_bootindex_property(obj, &s->conf.bootindex,
+ "bootindex", "/namespace@1,0",
+ DEVICE(obj), &error_abort);
}
static const TypeInfo nvme_info = {
Background on QEMU boot indices ------------------------------- Normally, the "bootindex" property is configured for bootable devices with: DEVICE_instance_init() device_add_bootindex_property(..., "bootindex", ...) object_property_add(..., device_get_bootindex, device_set_bootindex, ...) and when the bootindex is set on the QEMU command line, with -device DEVICE,...,bootindex=N the setter that was configured above is invoked: device_set_bootindex() /* parse boot index */ visit_type_int32() /* verify unicity */ check_boot_index() /* store parsed boot index */ ... /* insert device path to boot order */ add_boot_device_path() In the last step, add_boot_device_path() ensures that an OpenFirmware device path will show up in the "bootorder" fw_cfg file, at a position corresponding to the device's boot index. Thus guest firmware (SeaBIOS and OVMF) can try to boot off the device with the right priority. NVMe boot index --------------- In QEMU commit 33739c712982, nvma: ide: add bootindex to qom property the following generic setters / getters: - device_set_bootindex() - device_get_bootindex() were open-coded for NVMe, under the names - nvme_set_bootindex() - nvme_get_bootindex() Plus nvme_instance_init() was added to configure the "bootindex" property manually, designating the open-coded getter & setter, rather than calling device_add_bootindex_property(). Crucially, nvme_set_bootindex() avoided the final add_boot_device_path() call. This fact is spelled out in the message of commit 33739c712982, and it was presumably the entire reason for all of the code duplication. Now, Vladislav filed an RFE for OVMF <https://github.com/tianocore/edk2/issues/48>; OVMF should boot off NVMe devices. It is simple to build edk2's existent NvmExpressDxe driver into OVMF, but the boot order matching logic in OVMF can only handle NVMe if the "bootorder" fw_cfg file includes such devices. Therefore this patch converts the NVMe device model to device_set_bootindex() all the way. Device paths ------------ device_set_bootindex() accepts an optional parameter called "suffix". When present, it is expected to take the form of an OpenFirmware device path node, and it gets appended as last node to the otherwise auto-generated OFW path. For NVMe, the auto-generated part is /pci@i0cf8/pci8086,5845@6[,1] ^ ^ ^ ^ | | PCI slot and (present when nonzero) | | function of the NVMe controller, both hex | "driver name" component, built from PCI vendor & device IDs PCI root at system bus port, PIO to which here we append the suffix /namespace@1,0 ^ ^ | big endian (MSB at lowest address) numeric interpretation | of the 64-bit IEEE Extended Unique Identifier, aka EUI-64, | hex 32-bit NVMe namespace identifier, aka NSID, hex resulting in the OFW device path /pci@i0cf8/pci8086,5845@6[,1]/namespace@1,0 The reason for including the NSID and the EUI-64 is that an NVMe device can in theory produce several different namespaces (distinguished by NSID). Additionally, each of those may (optionally) have an EUI-64 value. For now, QEMU only provides namespace 1. Furthermore, QEMU doesn't even represent the EUI-64 as a standalone field; it is embedded (and left unused) inside the "NvmeIdNs.res30" array, at the last eight bytes. (Which is fine, since EUI-64 can be left zero-filled if unsupported by the device.) Based on the above, we set the "unit address" part of the last ("namespace") node to fixed "1,0". OVMF will then map the above OFW device path to the following UEFI device path fragment, for boot order processing: PciRoot(0x0)/Pci(0x6,0x1)/NVMe(0x1,00-00-00-00-00-00-00-00) ^ ^ ^ ^ ^ ^ | | | | | octets of the EUI-64 in address order | | | | NSID | | | NVMe namespace messaging device path node | PCI slot and function PCI root bridge Cc: Keith Busch <keith.busch@intel.com> (supporter:nvme) Cc: Kevin Wolf <kwolf@redhat.com> (supporter:Block layer core) Cc: qemu-block@nongnu.org (open list:nvme) Cc: Gonglei <arei.gonglei@huawei.com> Cc: Vladislav Vovchenko <vladislav.vovchenko@sk.com> Cc: Feng Tian <feng.tian@intel.com> Cc: Gerd Hoffmann <kraxel@redhat.com> Cc: Kevin O'Connor <kevin@koconnor.net> Signed-off-by: Laszlo Ersek <lersek@redhat.com> --- hw/block/nvme.c | 42 +++++------------------------------------- 1 file changed, 5 insertions(+), 37 deletions(-) -- 1.8.3.1