diff --git a/MAINTAINERS b/MAINTAINERS index 2899f4b1d933311198d18839b030e762ae24b007..d9e6853315adc7974bf41e62d6fb93b1a35d2b8a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -22665,6 +22665,14 @@ L: kvm@vger.kernel.org S: Maintained F: drivers/vfio/platform/ +VFIO QAT PCI DRIVER +M: Xin Zeng +M: Giovanni Cabiddu +L: kvm@vger.kernel.org +L: qat-linux@intel.com +S: Supported +F: drivers/vfio/pci/qat/ + VGA_SWITCHEROO R: Lukas Wunner S: Maintained diff --git a/arch/x86/configs/tencent.config b/arch/x86/configs/tencent.config index 49044e27a0381887f6f9d03349c1e0747c080434..2c21fd0d5014902c8c929a5a9773e3e2f03ec224 100644 --- a/arch/x86/configs/tencent.config +++ b/arch/x86/configs/tencent.config @@ -1421,6 +1421,7 @@ CONFIG_VFIO=m CONFIG_VFIO_NOIOMMU=y CONFIG_VFIO_PCI=m CONFIG_MLX5_VFIO_PCI=m +CONFIG_QAT_VFIO_PCI=m CONFIG_VIRT_DRIVERS=y CONFIG_VBOXGUEST=m CONFIG_EFI_SECRET=m diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c index 1102c47f8293d572bf0a65836e72665b818e86e2..78f0ea49254dbbd814d5b9614c1ab24fa1b45f8a 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c @@ -10,12 +10,14 @@ #include #include #include +#include #include #include #include #include #include #include +#include #include "adf_420xx_hw_data.h" #include "icp_qat_hw.h" @@ -296,7 +298,7 @@ static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev) { if (adf_gen4_init_thd2arb_map(accel_dev)) dev_warn(&GET_DEV(accel_dev), - "Generate of the thread to arbiter map failed"); + "Failed to generate thread to arbiter mapping"); return GET_HW_DATA(accel_dev)->thd_to_arb_map; } @@ -487,6 +489,7 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id) adf_gen4_init_dc_ops(&hw_data->dc_ops); adf_gen4_init_ras_ops(&hw_data->ras_ops); adf_gen4_init_tl_data(&hw_data->tl_data); + adf_gen4_init_vf_mig_ops(&hw_data->vfmig_ops); adf_init_rl_data(&hw_data->rl_data); } diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index 927506cf271d00d37b619d6594e8d6f5aeef5689..9fd7ec53b9f3d82f356a71d6e838da9b0cd65806 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -10,12 +10,14 @@ #include #include #include +#include #include #include #include #include "adf_gen4_ras.h" #include #include +#include #include "adf_4xxx_hw_data.h" #include "icp_qat_hw.h" @@ -208,7 +210,7 @@ static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev) { if (adf_gen4_init_thd2arb_map(accel_dev)) dev_warn(&GET_DEV(accel_dev), - "Generate of the thread to arbiter map failed"); + "Failed to generate thread to arbiter mapping"); return GET_HW_DATA(accel_dev)->thd_to_arb_map; } @@ -454,6 +456,8 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->get_ring_to_svc_map = adf_gen4_get_ring_to_svc_map; hw_data->disable_iov = adf_disable_sriov; hw_data->ring_pair_reset = adf_gen4_ring_pair_reset; + hw_data->bank_state_save = adf_gen4_bank_state_save; + hw_data->bank_state_restore = adf_gen4_bank_state_restore; hw_data->enable_pm = adf_gen4_enable_pm; hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt; hw_data->dev_config = adf_gen4_dev_config; @@ -469,6 +473,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) adf_gen4_init_dc_ops(&hw_data->dc_ops); adf_gen4_init_ras_ops(&hw_data->ras_ops); adf_gen4_init_tl_data(&hw_data->tl_data); + adf_gen4_init_vf_mig_ops(&hw_data->vfmig_ops); adf_init_rl_data(&hw_data->rl_data); } diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c index 9762f2bf7727f1ce5288b00bd8bac502d09935cf..d26564cebdec4a1cc88bcef4ff40296efcaf134e 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c @@ -197,7 +197,9 @@ module_pci_driver(adf_driver); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Intel"); MODULE_FIRMWARE(ADF_4XXX_FW); +MODULE_FIRMWARE(ADF_402XX_FW); MODULE_FIRMWARE(ADF_4XXX_MMP); +MODULE_FIRMWARE(ADF_402XX_MMP); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_VERSION(ADF_DRV_VERSION); MODULE_SOFTDEP("pre: crypto-intel_qat"); diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c index a882e0ea2279629dc19d55d444340202afd3aa17..201f9412c5823034a2b1a8b8cca47579c1856f41 100644 --- a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include "adf_c3xxx_hw_data.h" diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c index 84d9486e04de6bc9438d7384af1e67d07af34232..a512ca4efd3f9caef8a4cb581c28a54ad9946331 100644 --- a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c index 48cf3eb7c73499f01dd56de7192b9586a199d67b..6b5b0cf9c7c7464cbde9178dab336dd4dfd8e7d3 100644 --- a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include "adf_c62x_hw_data.h" diff --git a/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c index 751d7aa57fc7f04cd95c7ac42792f0286b76c4c5..4aaaaf921734689ed5c86c0fd8f4bba670205012 100644 --- a/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index 5915cde8a7aa4d72f45e5b233dd65dc588a912ac..6f9266edc9f17e3358d4a0a1b11bdf36d7cf8d5f 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -14,16 +14,20 @@ intel_qat-objs := adf_cfg.o \ adf_hw_arbiter.o \ adf_sysfs.o \ adf_sysfs_ras_counters.o \ + adf_gen2_hw_csr_data.o \ adf_gen2_hw_data.o \ adf_gen2_config.o \ adf_gen4_config.o \ + adf_gen4_hw_csr_data.o \ adf_gen4_hw_data.o \ + adf_gen4_vf_mig.o \ adf_gen4_pm.o \ adf_gen2_dc.o \ adf_gen4_dc.o \ adf_gen4_ras.o \ adf_gen4_timer.o \ adf_clock.o \ + adf_mstate_mgr.o \ qat_crypto.o \ qat_compression.o \ qat_comp_algs.o \ @@ -52,6 +56,6 @@ intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \ intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \ adf_pfvf_pf_msg.o adf_pfvf_pf_proto.o \ adf_pfvf_vf_msg.o adf_pfvf_vf_proto.o \ - adf_gen2_pfvf.o adf_gen4_pfvf.o + adf_gen2_pfvf.o adf_gen4_pfvf.o qat_mig_dev.o intel_qat-$(CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION) += adf_heartbeat_inject.o diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 08658c3a01e9bcde41ecef655fbdab0e8291ac99..7830ecb1a1f1585bbd21b7b42004e9d91287c1fc 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -9,6 +9,7 @@ #include #include #include +#include #include "adf_cfg_common.h" #include "adf_rl.h" #include "adf_telemetry.h" @@ -140,6 +141,40 @@ struct admin_info { u32 mailbox_offset; }; +struct ring_config { + u64 base; + u32 config; + u32 head; + u32 tail; + u32 reserved0; +}; + +struct bank_state { + u32 ringstat0; + u32 ringstat1; + u32 ringuostat; + u32 ringestat; + u32 ringnestat; + u32 ringnfstat; + u32 ringfstat; + u32 ringcstat0; + u32 ringcstat1; + u32 ringcstat2; + u32 ringcstat3; + u32 iaintflagen; + u32 iaintflagreg; + u32 iaintflagsrcsel0; + u32 iaintflagsrcsel1; + u32 iaintcolen; + u32 iaintcolctl; + u32 iaintflagandcolen; + u32 ringexpstat; + u32 ringexpintenable; + u32 ringsrvarben; + u32 reserved0; + struct ring_config rings[ADF_ETR_MAX_RINGS_PER_BANK]; +}; + struct adf_hw_csr_ops { u64 (*build_csr_ring_base_addr)(dma_addr_t addr, u32 size); u32 (*read_csr_ring_head)(void __iomem *csr_base_addr, u32 bank, @@ -150,22 +185,49 @@ struct adf_hw_csr_ops { u32 ring); void (*write_csr_ring_tail)(void __iomem *csr_base_addr, u32 bank, u32 ring, u32 value); + u32 (*read_csr_stat)(void __iomem *csr_base_addr, u32 bank); + u32 (*read_csr_uo_stat)(void __iomem *csr_base_addr, u32 bank); u32 (*read_csr_e_stat)(void __iomem *csr_base_addr, u32 bank); + u32 (*read_csr_ne_stat)(void __iomem *csr_base_addr, u32 bank); + u32 (*read_csr_nf_stat)(void __iomem *csr_base_addr, u32 bank); + u32 (*read_csr_f_stat)(void __iomem *csr_base_addr, u32 bank); + u32 (*read_csr_c_stat)(void __iomem *csr_base_addr, u32 bank); + u32 (*read_csr_exp_stat)(void __iomem *csr_base_addr, u32 bank); + u32 (*read_csr_exp_int_en)(void __iomem *csr_base_addr, u32 bank); + void (*write_csr_exp_int_en)(void __iomem *csr_base_addr, u32 bank, + u32 value); + u32 (*read_csr_ring_config)(void __iomem *csr_base_addr, u32 bank, + u32 ring); void (*write_csr_ring_config)(void __iomem *csr_base_addr, u32 bank, u32 ring, u32 value); + dma_addr_t (*read_csr_ring_base)(void __iomem *csr_base_addr, u32 bank, + u32 ring); void (*write_csr_ring_base)(void __iomem *csr_base_addr, u32 bank, u32 ring, dma_addr_t addr); + u32 (*read_csr_int_en)(void __iomem *csr_base_addr, u32 bank); + void (*write_csr_int_en)(void __iomem *csr_base_addr, u32 bank, + u32 value); + u32 (*read_csr_int_flag)(void __iomem *csr_base_addr, u32 bank); void (*write_csr_int_flag)(void __iomem *csr_base_addr, u32 bank, u32 value); + u32 (*read_csr_int_srcsel)(void __iomem *csr_base_addr, u32 bank); void (*write_csr_int_srcsel)(void __iomem *csr_base_addr, u32 bank); + void (*write_csr_int_srcsel_w_val)(void __iomem *csr_base_addr, + u32 bank, u32 value); + u32 (*read_csr_int_col_en)(void __iomem *csr_base_addr, u32 bank); void (*write_csr_int_col_en)(void __iomem *csr_base_addr, u32 bank, u32 value); + u32 (*read_csr_int_col_ctl)(void __iomem *csr_base_addr, u32 bank); void (*write_csr_int_col_ctl)(void __iomem *csr_base_addr, u32 bank, u32 value); + u32 (*read_csr_int_flag_and_col)(void __iomem *csr_base_addr, + u32 bank); void (*write_csr_int_flag_and_col)(void __iomem *csr_base_addr, u32 bank, u32 value); + u32 (*read_csr_ring_srv_arb_en)(void __iomem *csr_base_addr, u32 bank); void (*write_csr_ring_srv_arb_en)(void __iomem *csr_base_addr, u32 bank, u32 value); + u32 (*get_int_col_ctl_enable_mask)(void); }; struct adf_cfg_device_data; @@ -197,6 +259,20 @@ struct adf_dc_ops { void (*build_deflate_ctx)(void *ctx); }; +struct qat_migdev_ops { + int (*init)(struct qat_mig_dev *mdev); + void (*cleanup)(struct qat_mig_dev *mdev); + void (*reset)(struct qat_mig_dev *mdev); + int (*open)(struct qat_mig_dev *mdev); + void (*close)(struct qat_mig_dev *mdev); + int (*suspend)(struct qat_mig_dev *mdev); + int (*resume)(struct qat_mig_dev *mdev); + int (*save_state)(struct qat_mig_dev *mdev); + int (*save_setup)(struct qat_mig_dev *mdev); + int (*load_state)(struct qat_mig_dev *mdev); + int (*load_setup)(struct qat_mig_dev *mdev, int size); +}; + struct adf_dev_err_mask { u32 cppagentcmdpar_mask; u32 parerr_ath_cph_mask; @@ -244,6 +320,10 @@ struct adf_hw_device_data { void (*enable_ints)(struct adf_accel_dev *accel_dev); void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev); int (*ring_pair_reset)(struct adf_accel_dev *accel_dev, u32 bank_nr); + int (*bank_state_save)(struct adf_accel_dev *accel_dev, u32 bank_number, + struct bank_state *state); + int (*bank_state_restore)(struct adf_accel_dev *accel_dev, + u32 bank_number, struct bank_state *state); void (*reset_device)(struct adf_accel_dev *accel_dev); void (*set_msix_rttable)(struct adf_accel_dev *accel_dev); const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num); @@ -260,6 +340,7 @@ struct adf_hw_device_data { struct adf_dev_err_mask dev_err_mask; struct adf_rl_hw_data rl_data; struct adf_tl_hw_data tl_data; + struct qat_migdev_ops vfmig_ops; const char *fw_name; const char *fw_mmp_name; u32 fuses; @@ -316,6 +397,7 @@ struct adf_hw_device_data { #define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_ops) #define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops) #define GET_DC_OPS(accel_dev) (&(accel_dev)->hw_device->dc_ops) +#define GET_VFMIG_OPS(accel_dev) (&(accel_dev)->hw_device->vfmig_ops) #define GET_TL_DATA(accel_dev) GET_HW_DATA(accel_dev)->tl_data #define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev @@ -330,11 +412,17 @@ struct adf_fw_loader_data { struct adf_accel_vf_info { struct adf_accel_dev *accel_dev; struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */ + struct mutex pfvf_mig_lock; /* protects PFVF state for migration */ struct ratelimit_state vf2pf_ratelimit; u32 vf_nr; bool init; bool restarting; u8 vf_compat_ver; + /* + * Private area used for device migration. + * Memory allocation and free is managed by migration driver. + */ + void *mig_priv; }; struct adf_dc_data { diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c index 9da2278bd5b7dc594076478abf5387ed7e7ddbe0..04260f61d04294b24d9bf77b68789955de414f01 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_aer.c +++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c @@ -130,8 +130,7 @@ static void adf_device_reset_worker(struct work_struct *work) if (adf_dev_restart(accel_dev)) { /* The device hanged and we can't restart it so stop here */ dev_err(&GET_DEV(accel_dev), "Restart device failed\n"); - if (reset_data->mode == ADF_DEV_RESET_ASYNC || - completion_done(&reset_data->compl)) + if (reset_data->mode == ADF_DEV_RESET_ASYNC) kfree(reset_data); WARN(1, "QAT: device restart failed. Device is unusable\n"); return; @@ -147,16 +146,8 @@ static void adf_device_reset_worker(struct work_struct *work) adf_dev_restarted_notify(accel_dev); clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); - /* - * The dev is back alive. Notify the caller if in sync mode - * - * If device restart will take a more time than expected, - * the schedule_reset() function can timeout and exit. This can be - * detected by calling the completion_done() function. In this case - * the reset_data structure needs to be freed here. - */ - if (reset_data->mode == ADF_DEV_RESET_ASYNC || - completion_done(&reset_data->compl)) + /* The dev is back alive. Notify the caller if in sync mode */ + if (reset_data->mode == ADF_DEV_RESET_ASYNC) kfree(reset_data); else complete(&reset_data->compl); @@ -191,10 +182,10 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev, if (!timeout) { dev_err(&GET_DEV(accel_dev), "Reset device timeout expired\n"); + cancel_work_sync(&reset_data->reset_work); ret = -EFAULT; - } else { - kfree(reset_data); } + kfree(reset_data); return ret; } return 0; diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h index 57328249c89e7a6f4ae7c7754bf2bbb218c6f651..3bec9e20bad0a3e3583e8cc818a7f94a4bad3aa7 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h @@ -248,6 +248,16 @@ static inline void __iomem *adf_get_pmisc_base(struct adf_accel_dev *accel_dev) return pmisc->virt_addr; } +static inline void __iomem *adf_get_etr_base(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_bar *etr; + + etr = &GET_BARS(accel_dev)[hw_data->get_etr_bar_id(hw_data)]; + + return etr->virt_addr; +} + static inline void __iomem *adf_get_aram_base(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.c new file mode 100644 index 0000000000000000000000000000000000000000..650c9edd8a6650524a24d165050058f1625260c9 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2024 Intel Corporation */ +#include +#include "adf_gen2_hw_csr_data.h" + +static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size) +{ + return BUILD_RING_BASE_ADDR(addr, size); +} + +static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring) +{ + return READ_CSR_RING_HEAD(csr_base_addr, bank, ring); +} + +static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring, + u32 value) +{ + WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value); +} + +static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring) +{ + return READ_CSR_RING_TAIL(csr_base_addr, bank, ring); +} + +static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring, + u32 value) +{ + WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value); +} + +static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_E_STAT(csr_base_addr, bank); +} + +static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank, + u32 ring, u32 value) +{ + WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value); +} + +static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring, + dma_addr_t addr) +{ + WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr); +} + +static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank, u32 value) +{ + WRITE_CSR_INT_FLAG(csr_base_addr, bank, value); +} + +static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank) +{ + WRITE_CSR_INT_SRCSEL(csr_base_addr, bank); +} + +static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value); +} + +static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value); +} + +static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value); +} + +static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value); +} + +void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops) +{ + csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr; + csr_ops->read_csr_ring_head = read_csr_ring_head; + csr_ops->write_csr_ring_head = write_csr_ring_head; + csr_ops->read_csr_ring_tail = read_csr_ring_tail; + csr_ops->write_csr_ring_tail = write_csr_ring_tail; + csr_ops->read_csr_e_stat = read_csr_e_stat; + csr_ops->write_csr_ring_config = write_csr_ring_config; + csr_ops->write_csr_ring_base = write_csr_ring_base; + csr_ops->write_csr_int_flag = write_csr_int_flag; + csr_ops->write_csr_int_srcsel = write_csr_int_srcsel; + csr_ops->write_csr_int_col_en = write_csr_int_col_en; + csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl; + csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col; + csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en; +} +EXPORT_SYMBOL_GPL(adf_gen2_init_hw_csr_ops); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.h new file mode 100644 index 0000000000000000000000000000000000000000..55058b0f9e52b1e57136f32ad7a4d09ce862b7ef --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2024 Intel Corporation */ +#ifndef ADF_GEN2_HW_CSR_DATA_H_ +#define ADF_GEN2_HW_CSR_DATA_H_ + +#include +#include "adf_accel_devices.h" + +#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL +#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL +#define ADF_RING_CSR_RING_CONFIG 0x000 +#define ADF_RING_CSR_RING_LBASE 0x040 +#define ADF_RING_CSR_RING_UBASE 0x080 +#define ADF_RING_CSR_RING_HEAD 0x0C0 +#define ADF_RING_CSR_RING_TAIL 0x100 +#define ADF_RING_CSR_E_STAT 0x14C +#define ADF_RING_CSR_INT_FLAG 0x170 +#define ADF_RING_CSR_INT_SRCSEL 0x174 +#define ADF_RING_CSR_INT_SRCSEL_2 0x178 +#define ADF_RING_CSR_INT_COL_EN 0x17C +#define ADF_RING_CSR_INT_COL_CTL 0x180 +#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184 +#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000 +#define ADF_RING_BUNDLE_SIZE 0x1000 +#define ADF_ARB_REG_SLOT 0x1000 +#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C + +#define BUILD_RING_BASE_ADDR(addr, size) \ + (((addr) >> 6) & (GENMASK_ULL(63, 0) << (size))) +#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \ + ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_RING_HEAD + ((ring) << 2)) +#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \ + ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_RING_TAIL + ((ring) << 2)) +#define READ_CSR_E_STAT(csr_base_addr, bank) \ + ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_E_STAT) +#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value) +#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \ +do { \ + u32 l_base = 0, u_base = 0; \ + l_base = (u32)((value) & 0xFFFFFFFF); \ + u_base = (u32)(((value) & 0xFFFFFFFF00000000ULL) >> 32); \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_RING_LBASE + ((ring) << 2), l_base); \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_RING_UBASE + ((ring) << 2), u_base); \ +} while (0) + +#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_RING_HEAD + ((ring) << 2), value) +#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_RING_TAIL + ((ring) << 2), value) +#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_INT_FLAG, value) +#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \ +do { \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \ +} while (0) +#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_INT_COL_EN, value) +#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_INT_COL_CTL, \ + ADF_RING_CSR_INT_COL_CTL_ENABLE | (value)) +#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_INT_FLAG_AND_COL, value) + +#define WRITE_CSR_RING_SRV_ARB_EN(csr_addr, index, value) \ + ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \ + (ADF_ARB_REG_SLOT * (index)), value) + +void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); + +#endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c index d1884547b5a142cab337a51dede4eab2a65b11b9..1f64bf49b221c2b2f43a6fcd542adaefae20ff57 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c @@ -111,103 +111,6 @@ void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev) } EXPORT_SYMBOL_GPL(adf_gen2_enable_ints); -static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size) -{ - return BUILD_RING_BASE_ADDR(addr, size); -} - -static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring) -{ - return READ_CSR_RING_HEAD(csr_base_addr, bank, ring); -} - -static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring, - u32 value) -{ - WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value); -} - -static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring) -{ - return READ_CSR_RING_TAIL(csr_base_addr, bank, ring); -} - -static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring, - u32 value) -{ - WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value); -} - -static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank) -{ - return READ_CSR_E_STAT(csr_base_addr, bank); -} - -static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank, - u32 ring, u32 value) -{ - WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value); -} - -static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring, - dma_addr_t addr) -{ - WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr); -} - -static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank, u32 value) -{ - WRITE_CSR_INT_FLAG(csr_base_addr, bank, value); -} - -static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank) -{ - WRITE_CSR_INT_SRCSEL(csr_base_addr, bank); -} - -static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank, - u32 value) -{ - WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value); -} - -static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank, - u32 value) -{ - WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value); -} - -static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank, - u32 value) -{ - WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value); -} - -static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank, - u32 value) -{ - WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value); -} - -void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops) -{ - csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr; - csr_ops->read_csr_ring_head = read_csr_ring_head; - csr_ops->write_csr_ring_head = write_csr_ring_head; - csr_ops->read_csr_ring_tail = read_csr_ring_tail; - csr_ops->write_csr_ring_tail = write_csr_ring_tail; - csr_ops->read_csr_e_stat = read_csr_e_stat; - csr_ops->write_csr_ring_config = write_csr_ring_config; - csr_ops->write_csr_ring_base = write_csr_ring_base; - csr_ops->write_csr_int_flag = write_csr_int_flag; - csr_ops->write_csr_int_srcsel = write_csr_int_srcsel; - csr_ops->write_csr_int_col_en = write_csr_int_col_en; - csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl; - csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col; - csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en; -} -EXPORT_SYMBOL_GPL(adf_gen2_init_hw_csr_ops); - u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h index 6bd341061de414118ee11ced3e5470dbb751d9b2..708e9186127bbdf3db4c8e602f8b6af993a3d927 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h @@ -6,78 +6,9 @@ #include "adf_accel_devices.h" #include "adf_cfg_common.h" -/* Transport access */ -#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL -#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL -#define ADF_RING_CSR_RING_CONFIG 0x000 -#define ADF_RING_CSR_RING_LBASE 0x040 -#define ADF_RING_CSR_RING_UBASE 0x080 -#define ADF_RING_CSR_RING_HEAD 0x0C0 -#define ADF_RING_CSR_RING_TAIL 0x100 -#define ADF_RING_CSR_E_STAT 0x14C -#define ADF_RING_CSR_INT_FLAG 0x170 -#define ADF_RING_CSR_INT_SRCSEL 0x174 -#define ADF_RING_CSR_INT_SRCSEL_2 0x178 -#define ADF_RING_CSR_INT_COL_EN 0x17C -#define ADF_RING_CSR_INT_COL_CTL 0x180 -#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184 -#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000 -#define ADF_RING_BUNDLE_SIZE 0x1000 #define ADF_GEN2_RX_RINGS_OFFSET 8 #define ADF_GEN2_TX_RINGS_MASK 0xFF -#define BUILD_RING_BASE_ADDR(addr, size) \ - (((addr) >> 6) & (GENMASK_ULL(63, 0) << (size))) -#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \ - ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_RING_HEAD + ((ring) << 2)) -#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \ - ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_RING_TAIL + ((ring) << 2)) -#define READ_CSR_E_STAT(csr_base_addr, bank) \ - ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_E_STAT) -#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value) -#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \ -do { \ - u32 l_base = 0, u_base = 0; \ - l_base = (u32)((value) & 0xFFFFFFFF); \ - u_base = (u32)(((value) & 0xFFFFFFFF00000000ULL) >> 32); \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_RING_LBASE + ((ring) << 2), l_base); \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_RING_UBASE + ((ring) << 2), u_base); \ -} while (0) - -#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_RING_HEAD + ((ring) << 2), value) -#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_RING_TAIL + ((ring) << 2), value) -#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_INT_FLAG, value) -#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \ -do { \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \ -} while (0) -#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_INT_COL_EN, value) -#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_INT_COL_CTL, \ - ADF_RING_CSR_INT_COL_CTL_ENABLE | (value)) -#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_INT_FLAG_AND_COL, value) - /* AE to function map */ #define AE2FUNCTION_MAP_A_OFFSET (0x3A400 + 0x190) #define AE2FUNCTION_MAP_B_OFFSET (0x3A400 + 0x310) @@ -106,12 +37,6 @@ do { \ #define ADF_ARB_OFFSET 0x30000 #define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180 #define ADF_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0)) -#define ADF_ARB_REG_SLOT 0x1000 -#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C - -#define WRITE_CSR_RING_SRV_ARB_EN(csr_addr, index, value) \ - ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \ - (ADF_ARB_REG_SLOT * (index)), value) /* Power gating */ #define ADF_POWERGATE_DC BIT(23) @@ -158,7 +83,6 @@ u32 adf_gen2_get_num_aes(struct adf_hw_device_data *self); void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev); void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable, int num_a_regs, int num_b_regs); -void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); void adf_gen2_get_admin_info(struct admin_info *admin_csrs_info); void adf_gen2_get_arb_info(struct arb_info *arb_info); void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.c new file mode 100644 index 0000000000000000000000000000000000000000..6609c248aaba5da8683ad4900163d5642d03c272 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.c @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2024 Intel Corporation */ +#include +#include "adf_gen4_hw_csr_data.h" + +static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size) +{ + return BUILD_RING_BASE_ADDR(addr, size); +} + +static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring) +{ + return READ_CSR_RING_HEAD(csr_base_addr, bank, ring); +} + +static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring, + u32 value) +{ + WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value); +} + +static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring) +{ + return READ_CSR_RING_TAIL(csr_base_addr, bank, ring); +} + +static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring, + u32 value) +{ + WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value); +} + +static u32 read_csr_stat(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_STAT(csr_base_addr, bank); +} + +static u32 read_csr_uo_stat(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_UO_STAT(csr_base_addr, bank); +} + +static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_E_STAT(csr_base_addr, bank); +} + +static u32 read_csr_ne_stat(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_NE_STAT(csr_base_addr, bank); +} + +static u32 read_csr_nf_stat(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_NF_STAT(csr_base_addr, bank); +} + +static u32 read_csr_f_stat(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_F_STAT(csr_base_addr, bank); +} + +static u32 read_csr_c_stat(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_C_STAT(csr_base_addr, bank); +} + +static u32 read_csr_exp_stat(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_EXP_STAT(csr_base_addr, bank); +} + +static u32 read_csr_exp_int_en(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_EXP_INT_EN(csr_base_addr, bank); +} + +static void write_csr_exp_int_en(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_EXP_INT_EN(csr_base_addr, bank, value); +} + +static u32 read_csr_ring_config(void __iomem *csr_base_addr, u32 bank, + u32 ring) +{ + return READ_CSR_RING_CONFIG(csr_base_addr, bank, ring); +} + +static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank, u32 ring, + u32 value) +{ + WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value); +} + +static dma_addr_t read_csr_ring_base(void __iomem *csr_base_addr, u32 bank, + u32 ring) +{ + return READ_CSR_RING_BASE(csr_base_addr, bank, ring); +} + +static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring, + dma_addr_t addr) +{ + WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr); +} + +static u32 read_csr_int_en(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_INT_EN(csr_base_addr, bank); +} + +static void write_csr_int_en(void __iomem *csr_base_addr, u32 bank, u32 value) +{ + WRITE_CSR_INT_EN(csr_base_addr, bank, value); +} + +static u32 read_csr_int_flag(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_INT_FLAG(csr_base_addr, bank); +} + +static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_INT_FLAG(csr_base_addr, bank, value); +} + +static u32 read_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_INT_SRCSEL(csr_base_addr, bank); +} + +static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank) +{ + WRITE_CSR_INT_SRCSEL(csr_base_addr, bank); +} + +static void write_csr_int_srcsel_w_val(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_INT_SRCSEL_W_VAL(csr_base_addr, bank, value); +} + +static u32 read_csr_int_col_en(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_INT_COL_EN(csr_base_addr, bank); +} + +static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank, u32 value) +{ + WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value); +} + +static u32 read_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_INT_COL_CTL(csr_base_addr, bank); +} + +static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value); +} + +static u32 read_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_INT_FLAG_AND_COL(csr_base_addr, bank); +} + +static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value); +} + +static u32 read_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_RING_SRV_ARB_EN(csr_base_addr, bank); +} + +static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value); +} + +static u32 get_int_col_ctl_enable_mask(void) +{ + return ADF_RING_CSR_INT_COL_CTL_ENABLE; +} + +void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops) +{ + csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr; + csr_ops->read_csr_ring_head = read_csr_ring_head; + csr_ops->write_csr_ring_head = write_csr_ring_head; + csr_ops->read_csr_ring_tail = read_csr_ring_tail; + csr_ops->write_csr_ring_tail = write_csr_ring_tail; + csr_ops->read_csr_stat = read_csr_stat; + csr_ops->read_csr_uo_stat = read_csr_uo_stat; + csr_ops->read_csr_e_stat = read_csr_e_stat; + csr_ops->read_csr_ne_stat = read_csr_ne_stat; + csr_ops->read_csr_nf_stat = read_csr_nf_stat; + csr_ops->read_csr_f_stat = read_csr_f_stat; + csr_ops->read_csr_c_stat = read_csr_c_stat; + csr_ops->read_csr_exp_stat = read_csr_exp_stat; + csr_ops->read_csr_exp_int_en = read_csr_exp_int_en; + csr_ops->write_csr_exp_int_en = write_csr_exp_int_en; + csr_ops->read_csr_ring_config = read_csr_ring_config; + csr_ops->write_csr_ring_config = write_csr_ring_config; + csr_ops->read_csr_ring_base = read_csr_ring_base; + csr_ops->write_csr_ring_base = write_csr_ring_base; + csr_ops->read_csr_int_en = read_csr_int_en; + csr_ops->write_csr_int_en = write_csr_int_en; + csr_ops->read_csr_int_flag = read_csr_int_flag; + csr_ops->write_csr_int_flag = write_csr_int_flag; + csr_ops->read_csr_int_srcsel = read_csr_int_srcsel; + csr_ops->write_csr_int_srcsel = write_csr_int_srcsel; + csr_ops->write_csr_int_srcsel_w_val = write_csr_int_srcsel_w_val; + csr_ops->read_csr_int_col_en = read_csr_int_col_en; + csr_ops->write_csr_int_col_en = write_csr_int_col_en; + csr_ops->read_csr_int_col_ctl = read_csr_int_col_ctl; + csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl; + csr_ops->read_csr_int_flag_and_col = read_csr_int_flag_and_col; + csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col; + csr_ops->read_csr_ring_srv_arb_en = read_csr_ring_srv_arb_en; + csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en; + csr_ops->get_int_col_ctl_enable_mask = get_int_col_ctl_enable_mask; +} +EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_ops); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.h new file mode 100644 index 0000000000000000000000000000000000000000..6f33e7c87c2c90e81a2bc5d1dd6549b1ce5ff27d --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.h @@ -0,0 +1,188 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2024 Intel Corporation */ +#ifndef ADF_GEN4_HW_CSR_DATA_H_ +#define ADF_GEN4_HW_CSR_DATA_H_ + +#include +#include "adf_accel_devices.h" + +#define ADF_BANK_INT_SRC_SEL_MASK 0x44UL +#define ADF_RING_CSR_RING_CONFIG 0x1000 +#define ADF_RING_CSR_RING_LBASE 0x1040 +#define ADF_RING_CSR_RING_UBASE 0x1080 +#define ADF_RING_CSR_RING_HEAD 0x0C0 +#define ADF_RING_CSR_RING_TAIL 0x100 +#define ADF_RING_CSR_STAT 0x140 +#define ADF_RING_CSR_UO_STAT 0x148 +#define ADF_RING_CSR_E_STAT 0x14C +#define ADF_RING_CSR_NE_STAT 0x150 +#define ADF_RING_CSR_NF_STAT 0x154 +#define ADF_RING_CSR_F_STAT 0x158 +#define ADF_RING_CSR_C_STAT 0x15C +#define ADF_RING_CSR_INT_FLAG_EN 0x16C +#define ADF_RING_CSR_INT_FLAG 0x170 +#define ADF_RING_CSR_INT_SRCSEL 0x174 +#define ADF_RING_CSR_INT_COL_EN 0x17C +#define ADF_RING_CSR_INT_COL_CTL 0x180 +#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184 +#define ADF_RING_CSR_EXP_STAT 0x188 +#define ADF_RING_CSR_EXP_INT_EN 0x18C +#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000 +#define ADF_RING_CSR_ADDR_OFFSET 0x100000 +#define ADF_RING_BUNDLE_SIZE 0x2000 +#define ADF_RING_CSR_RING_SRV_ARB_EN 0x19C + +#define BUILD_RING_BASE_ADDR(addr, size) \ + ((((addr) >> 6) & (GENMASK_ULL(63, 0) << (size))) << 6) +#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_HEAD + ((ring) << 2)) +#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_TAIL + ((ring) << 2)) +#define READ_CSR_STAT(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_STAT) +#define READ_CSR_UO_STAT(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_UO_STAT) +#define READ_CSR_E_STAT(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_E_STAT) +#define READ_CSR_NE_STAT(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_NE_STAT) +#define READ_CSR_NF_STAT(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_NF_STAT) +#define READ_CSR_F_STAT(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_F_STAT) +#define READ_CSR_C_STAT(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_C_STAT) +#define READ_CSR_EXP_STAT(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_EXP_STAT) +#define READ_CSR_EXP_INT_EN(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_EXP_INT_EN) +#define WRITE_CSR_EXP_INT_EN(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_EXP_INT_EN, value) +#define READ_CSR_RING_CONFIG(csr_base_addr, bank, ring) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_CONFIG + ((ring) << 2)) +#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value) +#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \ +do { \ + void __iomem *_csr_base_addr = csr_base_addr; \ + u32 _bank = bank; \ + u32 _ring = ring; \ + dma_addr_t _value = value; \ + u32 l_base = 0, u_base = 0; \ + l_base = lower_32_bits(_value); \ + u_base = upper_32_bits(_value); \ + ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (_bank) + \ + ADF_RING_CSR_RING_LBASE + ((_ring) << 2), l_base); \ + ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (_bank) + \ + ADF_RING_CSR_RING_UBASE + ((_ring) << 2), u_base); \ +} while (0) + +static inline u64 read_base(void __iomem *csr_base_addr, u32 bank, u32 ring) +{ + u32 l_base, u_base; + + /* + * Use special IO wrapper for ring base as LBASE and UBASE are + * not physically contigious + */ + l_base = ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + + ADF_RING_CSR_RING_LBASE + (ring << 2)); + u_base = ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + + ADF_RING_CSR_RING_UBASE + (ring << 2)); + + return (u64)u_base << 32 | (u64)l_base; +} + +#define READ_CSR_RING_BASE(csr_base_addr, bank, ring) \ + read_base((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, (bank), (ring)) + +#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_HEAD + ((ring) << 2), value) +#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_TAIL + ((ring) << 2), value) +#define READ_CSR_INT_EN(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_INT_FLAG_EN) +#define WRITE_CSR_INT_EN(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_FLAG_EN, (value)) +#define READ_CSR_INT_FLAG(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_INT_FLAG) +#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_FLAG, (value)) +#define READ_CSR_INT_SRCSEL(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_INT_SRCSEL) +#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK) +#define WRITE_CSR_INT_SRCSEL_W_VAL(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_SRCSEL, (value)) +#define READ_CSR_INT_COL_EN(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_INT_COL_EN) +#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_COL_EN, (value)) +#define READ_CSR_INT_COL_CTL(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_INT_COL_CTL) +#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_COL_CTL, \ + ADF_RING_CSR_INT_COL_CTL_ENABLE | (value)) +#define READ_CSR_INT_FLAG_AND_COL(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_FLAG_AND_COL) +#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_FLAG_AND_COL, (value)) + +#define READ_CSR_RING_SRV_ARB_EN(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_SRV_ARB_EN) +#define WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_SRV_ARB_EN, (value)) + +void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); + +#endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c index d28e1921940a799b4bd145d82745ac957564cea3..41a0979e68c1774dc7a5ce4aafe915398fc05de9 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2020 Intel Corporation */ #include +#include #include "adf_accel_devices.h" #include "adf_cfg_services.h" #include "adf_common_drv.h" @@ -8,103 +9,6 @@ #include "adf_gen4_hw_data.h" #include "adf_gen4_pm.h" -static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size) -{ - return BUILD_RING_BASE_ADDR(addr, size); -} - -static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring) -{ - return READ_CSR_RING_HEAD(csr_base_addr, bank, ring); -} - -static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring, - u32 value) -{ - WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value); -} - -static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring) -{ - return READ_CSR_RING_TAIL(csr_base_addr, bank, ring); -} - -static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring, - u32 value) -{ - WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value); -} - -static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank) -{ - return READ_CSR_E_STAT(csr_base_addr, bank); -} - -static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank, u32 ring, - u32 value) -{ - WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value); -} - -static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring, - dma_addr_t addr) -{ - WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr); -} - -static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank, - u32 value) -{ - WRITE_CSR_INT_FLAG(csr_base_addr, bank, value); -} - -static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank) -{ - WRITE_CSR_INT_SRCSEL(csr_base_addr, bank); -} - -static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank, u32 value) -{ - WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value); -} - -static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank, - u32 value) -{ - WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value); -} - -static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank, - u32 value) -{ - WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value); -} - -static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank, - u32 value) -{ - WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value); -} - -void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops) -{ - csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr; - csr_ops->read_csr_ring_head = read_csr_ring_head; - csr_ops->write_csr_ring_head = write_csr_ring_head; - csr_ops->read_csr_ring_tail = read_csr_ring_tail; - csr_ops->write_csr_ring_tail = write_csr_ring_tail; - csr_ops->read_csr_e_stat = read_csr_e_stat; - csr_ops->write_csr_ring_config = write_csr_ring_config; - csr_ops->write_csr_ring_base = write_csr_ring_base; - csr_ops->write_csr_int_flag = write_csr_int_flag; - csr_ops->write_csr_int_srcsel = write_csr_int_srcsel; - csr_ops->write_csr_int_col_en = write_csr_int_col_en; - csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl; - csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col; - csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en; -} -EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_ops); - u32 adf_gen4_get_accel_mask(struct adf_hw_device_data *self) { return ADF_GEN4_ACCELERATORS_MASK; @@ -321,8 +225,7 @@ static int reset_ring_pair(void __iomem *csr, u32 bank_number) int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; - u32 etr_bar_id = hw_data->get_etr_bar_id(hw_data); - void __iomem *csr; + void __iomem *csr = adf_get_etr_base(accel_dev); int ret; if (bank_number >= hw_data->num_banks) @@ -331,7 +234,6 @@ int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number) dev_dbg(&GET_DEV(accel_dev), "ring pair reset for bank:%d\n", bank_number); - csr = (&GET_BARS(accel_dev)[etr_bar_id])->virt_addr; ret = reset_ring_pair(csr, bank_number); if (ret) dev_err(&GET_DEV(accel_dev), @@ -489,3 +391,281 @@ u16 adf_gen4_get_ring_to_svc_map(struct adf_accel_dev *accel_dev) return ring_to_svc_map; } EXPORT_SYMBOL_GPL(adf_gen4_get_ring_to_svc_map); + +/* + * adf_gen4_bank_quiesce_coal_timer() - quiesce bank coalesced interrupt timer + * @accel_dev: Pointer to the device structure + * @bank_idx: Offset to the bank within this device + * @timeout_ms: Timeout in milliseconds for the operation + * + * This function tries to quiesce the coalesced interrupt timer of a bank if + * it has been enabled and triggered. + * + * Returns 0 on success, error code otherwise + * + */ +int adf_gen4_bank_quiesce_coal_timer(struct adf_accel_dev *accel_dev, + u32 bank_idx, int timeout_ms) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev); + void __iomem *csr_misc = adf_get_pmisc_base(accel_dev); + void __iomem *csr_etr = adf_get_etr_base(accel_dev); + u32 int_col_ctl, int_col_mask, int_col_en; + u32 e_stat, intsrc; + u64 wait_us; + int ret; + + if (timeout_ms < 0) + return -EINVAL; + + int_col_ctl = csr_ops->read_csr_int_col_ctl(csr_etr, bank_idx); + int_col_mask = csr_ops->get_int_col_ctl_enable_mask(); + if (!(int_col_ctl & int_col_mask)) + return 0; + + int_col_en = csr_ops->read_csr_int_col_en(csr_etr, bank_idx); + int_col_en &= BIT(ADF_WQM_CSR_RP_IDX_RX); + + e_stat = csr_ops->read_csr_e_stat(csr_etr, bank_idx); + if (!(~e_stat & int_col_en)) + return 0; + + wait_us = 2 * ((int_col_ctl & ~int_col_mask) << 8) * USEC_PER_SEC; + do_div(wait_us, hw_data->clock_frequency); + wait_us = min(wait_us, (u64)timeout_ms * USEC_PER_MSEC); + dev_dbg(&GET_DEV(accel_dev), + "wait for bank %d - coalesced timer expires in %llu us (max=%u ms estat=0x%x intcolen=0x%x)\n", + bank_idx, wait_us, timeout_ms, e_stat, int_col_en); + + ret = read_poll_timeout(ADF_CSR_RD, intsrc, intsrc, + ADF_COALESCED_POLL_DELAY_US, wait_us, true, + csr_misc, ADF_WQM_CSR_RPINTSOU(bank_idx)); + if (ret) + dev_warn(&GET_DEV(accel_dev), + "coalesced timer for bank %d expired (%llu us)\n", + bank_idx, wait_us); + + return ret; +} +EXPORT_SYMBOL_GPL(adf_gen4_bank_quiesce_coal_timer); + +static int drain_bank(void __iomem *csr, u32 bank_number, int timeout_us) +{ + u32 status; + + ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETCTL(bank_number), + ADF_WQM_CSR_RPRESETCTL_DRAIN); + + return read_poll_timeout(ADF_CSR_RD, status, + status & ADF_WQM_CSR_RPRESETSTS_STATUS, + ADF_RPRESET_POLL_DELAY_US, timeout_us, true, + csr, ADF_WQM_CSR_RPRESETSTS(bank_number)); +} + +void adf_gen4_bank_drain_finish(struct adf_accel_dev *accel_dev, + u32 bank_number) +{ + void __iomem *csr = adf_get_etr_base(accel_dev); + + ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETSTS(bank_number), + ADF_WQM_CSR_RPRESETSTS_STATUS); +} + +int adf_gen4_bank_drain_start(struct adf_accel_dev *accel_dev, + u32 bank_number, int timeout_us) +{ + void __iomem *csr = adf_get_etr_base(accel_dev); + int ret; + + dev_dbg(&GET_DEV(accel_dev), "Drain bank %d\n", bank_number); + + ret = drain_bank(csr, bank_number, timeout_us); + if (ret) + dev_err(&GET_DEV(accel_dev), "Bank drain failed (timeout)\n"); + else + dev_dbg(&GET_DEV(accel_dev), "Bank drain successful\n"); + + return ret; +} + +static void bank_state_save(struct adf_hw_csr_ops *ops, void __iomem *base, + u32 bank, struct bank_state *state, u32 num_rings) +{ + u32 i; + + state->ringstat0 = ops->read_csr_stat(base, bank); + state->ringuostat = ops->read_csr_uo_stat(base, bank); + state->ringestat = ops->read_csr_e_stat(base, bank); + state->ringnestat = ops->read_csr_ne_stat(base, bank); + state->ringnfstat = ops->read_csr_nf_stat(base, bank); + state->ringfstat = ops->read_csr_f_stat(base, bank); + state->ringcstat0 = ops->read_csr_c_stat(base, bank); + state->iaintflagen = ops->read_csr_int_en(base, bank); + state->iaintflagreg = ops->read_csr_int_flag(base, bank); + state->iaintflagsrcsel0 = ops->read_csr_int_srcsel(base, bank); + state->iaintcolen = ops->read_csr_int_col_en(base, bank); + state->iaintcolctl = ops->read_csr_int_col_ctl(base, bank); + state->iaintflagandcolen = ops->read_csr_int_flag_and_col(base, bank); + state->ringexpstat = ops->read_csr_exp_stat(base, bank); + state->ringexpintenable = ops->read_csr_exp_int_en(base, bank); + state->ringsrvarben = ops->read_csr_ring_srv_arb_en(base, bank); + + for (i = 0; i < num_rings; i++) { + state->rings[i].head = ops->read_csr_ring_head(base, bank, i); + state->rings[i].tail = ops->read_csr_ring_tail(base, bank, i); + state->rings[i].config = ops->read_csr_ring_config(base, bank, i); + state->rings[i].base = ops->read_csr_ring_base(base, bank, i); + } +} + +#define CHECK_STAT(op, expect_val, name, args...) \ +({ \ + u32 __expect_val = (expect_val); \ + u32 actual_val = op(args); \ + (__expect_val == actual_val) ? 0 : \ + (pr_err("QAT: Fail to restore %s register. Expected 0x%x, actual 0x%x\n", \ + name, __expect_val, actual_val), -EINVAL); \ +}) + +static int bank_state_restore(struct adf_hw_csr_ops *ops, void __iomem *base, + u32 bank, struct bank_state *state, u32 num_rings, + int tx_rx_gap) +{ + u32 val, tmp_val, i; + int ret; + + for (i = 0; i < num_rings; i++) + ops->write_csr_ring_base(base, bank, i, state->rings[i].base); + + for (i = 0; i < num_rings; i++) + ops->write_csr_ring_config(base, bank, i, state->rings[i].config); + + for (i = 0; i < num_rings / 2; i++) { + int tx = i * (tx_rx_gap + 1); + int rx = tx + tx_rx_gap; + + ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head); + ops->write_csr_ring_tail(base, bank, tx, state->rings[tx].tail); + + /* + * The TX ring head needs to be updated again to make sure that + * the HW will not consider the ring as full when it is empty + * and the correct state flags are set to match the recovered state. + */ + if (state->ringestat & BIT(tx)) { + val = ops->read_csr_int_srcsel(base, bank); + val |= ADF_RP_INT_SRC_SEL_F_RISE_MASK; + ops->write_csr_int_srcsel_w_val(base, bank, val); + ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head); + } + + ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail); + val = ops->read_csr_int_srcsel(base, bank); + val |= ADF_RP_INT_SRC_SEL_F_RISE_MASK << ADF_RP_INT_SRC_SEL_RANGE_WIDTH; + ops->write_csr_int_srcsel_w_val(base, bank, val); + + ops->write_csr_ring_head(base, bank, rx, state->rings[rx].head); + val = ops->read_csr_int_srcsel(base, bank); + val |= ADF_RP_INT_SRC_SEL_F_FALL_MASK << ADF_RP_INT_SRC_SEL_RANGE_WIDTH; + ops->write_csr_int_srcsel_w_val(base, bank, val); + + /* + * The RX ring tail needs to be updated again to make sure that + * the HW will not consider the ring as empty when it is full + * and the correct state flags are set to match the recovered state. + */ + if (state->ringfstat & BIT(rx)) + ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail); + } + + ops->write_csr_int_flag_and_col(base, bank, state->iaintflagandcolen); + ops->write_csr_int_en(base, bank, state->iaintflagen); + ops->write_csr_int_col_en(base, bank, state->iaintcolen); + ops->write_csr_int_srcsel_w_val(base, bank, state->iaintflagsrcsel0); + ops->write_csr_exp_int_en(base, bank, state->ringexpintenable); + ops->write_csr_int_col_ctl(base, bank, state->iaintcolctl); + ops->write_csr_ring_srv_arb_en(base, bank, state->ringsrvarben); + + /* Check that all ring statuses match the saved state. */ + ret = CHECK_STAT(ops->read_csr_stat, state->ringstat0, "ringstat", + base, bank); + if (ret) + return ret; + + ret = CHECK_STAT(ops->read_csr_e_stat, state->ringestat, "ringestat", + base, bank); + if (ret) + return ret; + + ret = CHECK_STAT(ops->read_csr_ne_stat, state->ringnestat, "ringnestat", + base, bank); + if (ret) + return ret; + + ret = CHECK_STAT(ops->read_csr_nf_stat, state->ringnfstat, "ringnfstat", + base, bank); + if (ret) + return ret; + + ret = CHECK_STAT(ops->read_csr_f_stat, state->ringfstat, "ringfstat", + base, bank); + if (ret) + return ret; + + ret = CHECK_STAT(ops->read_csr_c_stat, state->ringcstat0, "ringcstat", + base, bank); + if (ret) + return ret; + + tmp_val = ops->read_csr_exp_stat(base, bank); + val = state->ringexpstat; + if (tmp_val && !val) { + pr_err("QAT: Bank was restored with exception: 0x%x\n", val); + return -EINVAL; + } + + return 0; +} + +int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number, + struct bank_state *state) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev); + void __iomem *csr_base = adf_get_etr_base(accel_dev); + + if (bank_number >= hw_data->num_banks || !state) + return -EINVAL; + + dev_dbg(&GET_DEV(accel_dev), "Saving state of bank %d\n", bank_number); + + bank_state_save(csr_ops, csr_base, bank_number, state, + hw_data->num_rings_per_bank); + + return 0; +} +EXPORT_SYMBOL_GPL(adf_gen4_bank_state_save); + +int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev, u32 bank_number, + struct bank_state *state) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev); + void __iomem *csr_base = adf_get_etr_base(accel_dev); + int ret; + + if (bank_number >= hw_data->num_banks || !state) + return -EINVAL; + + dev_dbg(&GET_DEV(accel_dev), "Restoring state of bank %d\n", bank_number); + + ret = bank_state_restore(csr_ops, csr_base, bank_number, state, + hw_data->num_rings_per_bank, hw_data->tx_rx_gap); + if (ret) + dev_err(&GET_DEV(accel_dev), + "Unable to restore state of bank %d\n", bank_number); + + return ret; +} +EXPORT_SYMBOL_GPL(adf_gen4_bank_state_restore); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h index c6e80df5a85a337c6fb6a3fa383fec3ac849c748..8b10926cedbac2d507ce3fd3f0f16dcc8972bd45 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ /* Copyright(c) 2020 Intel Corporation */ -#ifndef ADF_GEN4_HW_CSR_DATA_H_ -#define ADF_GEN4_HW_CSR_DATA_H_ +#ifndef ADF_GEN4_HW_DATA_H_ +#define ADF_GEN4_HW_DATA_H_ #include @@ -54,95 +54,6 @@ #define ADF_GEN4_ADMINMSGLR_OFFSET 0x500578 #define ADF_GEN4_MAILBOX_BASE_OFFSET 0x600970 -/* Transport access */ -#define ADF_BANK_INT_SRC_SEL_MASK 0x44UL -#define ADF_RING_CSR_RING_CONFIG 0x1000 -#define ADF_RING_CSR_RING_LBASE 0x1040 -#define ADF_RING_CSR_RING_UBASE 0x1080 -#define ADF_RING_CSR_RING_HEAD 0x0C0 -#define ADF_RING_CSR_RING_TAIL 0x100 -#define ADF_RING_CSR_E_STAT 0x14C -#define ADF_RING_CSR_INT_FLAG 0x170 -#define ADF_RING_CSR_INT_SRCSEL 0x174 -#define ADF_RING_CSR_INT_COL_CTL 0x180 -#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184 -#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000 -#define ADF_RING_CSR_INT_COL_EN 0x17C -#define ADF_RING_CSR_ADDR_OFFSET 0x100000 -#define ADF_RING_BUNDLE_SIZE 0x2000 - -#define BUILD_RING_BASE_ADDR(addr, size) \ - ((((addr) >> 6) & (GENMASK_ULL(63, 0) << (size))) << 6) -#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \ - ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_RING_HEAD + ((ring) << 2)) -#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \ - ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_RING_TAIL + ((ring) << 2)) -#define READ_CSR_E_STAT(csr_base_addr, bank) \ - ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_E_STAT) -#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \ - ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value) -#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \ -do { \ - void __iomem *_csr_base_addr = csr_base_addr; \ - u32 _bank = bank; \ - u32 _ring = ring; \ - dma_addr_t _value = value; \ - u32 l_base = 0, u_base = 0; \ - l_base = lower_32_bits(_value); \ - u_base = upper_32_bits(_value); \ - ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (_bank) + \ - ADF_RING_CSR_RING_LBASE + ((_ring) << 2), l_base); \ - ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (_bank) + \ - ADF_RING_CSR_RING_UBASE + ((_ring) << 2), u_base); \ -} while (0) - -#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \ - ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_RING_HEAD + ((ring) << 2), value) -#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \ - ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_RING_TAIL + ((ring) << 2), value) -#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \ - ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_INT_FLAG, (value)) -#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \ - ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK) -#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \ - ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_INT_COL_EN, (value)) -#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \ - ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_INT_COL_CTL, \ - ADF_RING_CSR_INT_COL_CTL_ENABLE | (value)) -#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \ - ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_INT_FLAG_AND_COL, (value)) - -/* Arbiter configuration */ -#define ADF_RING_CSR_RING_SRV_ARB_EN 0x19C - -#define WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value) \ - ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_RING_SRV_ARB_EN, (value)) - /* Default ring mapping */ #define ADF_GEN4_DEFAULT_RING_TO_SRV_MAP \ (ASYM << ADF_CFG_SERV_RING_PAIR_0_SHIFT | \ @@ -166,10 +77,20 @@ do { \ #define ADF_RPRESET_POLL_TIMEOUT_US (5 * USEC_PER_SEC) #define ADF_RPRESET_POLL_DELAY_US 20 #define ADF_WQM_CSR_RPRESETCTL_RESET BIT(0) +#define ADF_WQM_CSR_RPRESETCTL_DRAIN BIT(2) #define ADF_WQM_CSR_RPRESETCTL(bank) (0x6000 + ((bank) << 3)) #define ADF_WQM_CSR_RPRESETSTS_STATUS BIT(0) #define ADF_WQM_CSR_RPRESETSTS(bank) (ADF_WQM_CSR_RPRESETCTL(bank) + 4) +/* Ring interrupt */ +#define ADF_RP_INT_SRC_SEL_F_RISE_MASK BIT(2) +#define ADF_RP_INT_SRC_SEL_F_FALL_MASK GENMASK(2, 0) +#define ADF_RP_INT_SRC_SEL_RANGE_WIDTH 4 +#define ADF_COALESCED_POLL_TIMEOUT_US (1 * USEC_PER_SEC) +#define ADF_COALESCED_POLL_DELAY_US 1000 +#define ADF_WQM_CSR_RPINTSOU(bank) (0x200000 + ((bank) << 12)) +#define ADF_WQM_CSR_RP_IDX_RX 1 + /* Error source registers */ #define ADF_GEN4_ERRSOU0 (0x41A200) #define ADF_GEN4_ERRSOU1 (0x41A204) @@ -197,6 +118,19 @@ do { \ /* Arbiter threads mask with error value */ #define ADF_GEN4_ENA_THD_MASK_ERROR GENMASK(ADF_NUM_THREADS_PER_AE, 0) +/* PF2VM communication channel */ +#define ADF_GEN4_PF2VM_OFFSET(i) (0x40B010 + (i) * 0x20) +#define ADF_GEN4_VM2PF_OFFSET(i) (0x40B014 + (i) * 0x20) +#define ADF_GEN4_VINTMSKPF2VM_OFFSET(i) (0x40B00C + (i) * 0x20) +#define ADF_GEN4_VINTSOUPF2VM_OFFSET(i) (0x40B008 + (i) * 0x20) +#define ADF_GEN4_VINTMSK_OFFSET(i) (0x40B004 + (i) * 0x20) +#define ADF_GEN4_VINTSOU_OFFSET(i) (0x40B000 + (i) * 0x20) + +struct adf_gen4_vfmig { + struct adf_mstate_mgr *mstate_mgr; + bool bank_stopped[ADF_GEN4_NUM_BANKS_PER_VF]; +}; + void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); enum icp_qat_gen4_slice_mask { @@ -230,11 +164,20 @@ u32 adf_gen4_get_num_aes(struct adf_hw_device_data *self); enum dev_sku_info adf_gen4_get_sku(struct adf_hw_device_data *self); u32 adf_gen4_get_sram_bar_id(struct adf_hw_device_data *self); int adf_gen4_init_device(struct adf_accel_dev *accel_dev); -void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number); void adf_gen4_set_msix_default_rttable(struct adf_accel_dev *accel_dev); void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev); u16 adf_gen4_get_ring_to_svc_map(struct adf_accel_dev *accel_dev); +int adf_gen4_bank_quiesce_coal_timer(struct adf_accel_dev *accel_dev, + u32 bank_idx, int timeout_ms); +int adf_gen4_bank_drain_start(struct adf_accel_dev *accel_dev, + u32 bank_number, int timeout_us); +void adf_gen4_bank_drain_finish(struct adf_accel_dev *accel_dev, + u32 bank_number); +int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number, + struct bank_state *state); +int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev, + u32 bank_number, struct bank_state *state); #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c index 8e8efe93f3ee5c99ea6590e4fd9037090d55d8ae..21474d402d09dee26eda833d1d7db2bf853c1e47 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c @@ -6,12 +6,10 @@ #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "adf_gen4_pfvf.h" +#include "adf_gen4_hw_data.h" #include "adf_pfvf_pf_proto.h" #include "adf_pfvf_utils.h" -#define ADF_4XXX_PF2VM_OFFSET(i) (0x40B010 + ((i) * 0x20)) -#define ADF_4XXX_VM2PF_OFFSET(i) (0x40B014 + ((i) * 0x20)) - /* VF2PF interrupt source registers */ #define ADF_4XXX_VM2PF_SOU 0x41A180 #define ADF_4XXX_VM2PF_MSK 0x41A1C0 @@ -29,12 +27,12 @@ static const struct pfvf_csr_format csr_gen4_fmt = { static u32 adf_gen4_pf_get_pf2vf_offset(u32 i) { - return ADF_4XXX_PF2VM_OFFSET(i); + return ADF_GEN4_PF2VM_OFFSET(i); } static u32 adf_gen4_pf_get_vf2pf_offset(u32 i) { - return ADF_4XXX_VM2PF_OFFSET(i); + return ADF_GEN4_VM2PF_OFFSET(i); } static void adf_gen4_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask) diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c index 7fc7a77f6aed93e3d56efea23d94ae5a3b5a995f..c7ad8cf07863b16e05ab727f6f43b05feb69c106 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c @@ -149,5 +149,6 @@ void adf_gen4_init_tl_data(struct adf_tl_hw_data *tl_data) tl_data->sl_exec_counters = sl_exec_counters; tl_data->rp_counters = rp_counters; tl_data->num_rp_counters = ARRAY_SIZE(rp_counters); + tl_data->max_sl_cnt = ADF_GEN4_TL_MAX_SLICES_PER_TYPE; } EXPORT_SYMBOL_GPL(adf_gen4_init_tl_data); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c new file mode 100644 index 0000000000000000000000000000000000000000..a62eb5e8dbe6a03f8bab55eaf662f21000ad38a1 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c @@ -0,0 +1,1010 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2024 Intel Corporation */ +#include +#include +#include +#include +#include +#include +#include + +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "adf_gen4_hw_data.h" +#include "adf_gen4_pfvf.h" +#include "adf_pfvf_utils.h" +#include "adf_mstate_mgr.h" +#include "adf_gen4_vf_mig.h" + +#define ADF_GEN4_VF_MSTATE_SIZE 4096 +#define ADF_GEN4_PFVF_RSP_TIMEOUT_US 5000 + +static int adf_gen4_vfmig_save_setup(struct qat_mig_dev *mdev); +static int adf_gen4_vfmig_load_setup(struct qat_mig_dev *mdev, int len); + +static int adf_gen4_vfmig_init_device(struct qat_mig_dev *mdev) +{ + u8 *state; + + state = kmalloc(ADF_GEN4_VF_MSTATE_SIZE, GFP_KERNEL); + if (!state) + return -ENOMEM; + + mdev->state = state; + mdev->state_size = ADF_GEN4_VF_MSTATE_SIZE; + mdev->setup_size = 0; + mdev->remote_setup_size = 0; + + return 0; +} + +static void adf_gen4_vfmig_cleanup_device(struct qat_mig_dev *mdev) +{ + kfree(mdev->state); + mdev->state = NULL; +} + +static void adf_gen4_vfmig_reset_device(struct qat_mig_dev *mdev) +{ + mdev->setup_size = 0; + mdev->remote_setup_size = 0; +} + +static int adf_gen4_vfmig_open_device(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + struct adf_accel_vf_info *vf_info; + struct adf_gen4_vfmig *vfmig; + + vf_info = &accel_dev->pf.vf_info[mdev->vf_id]; + + vfmig = kzalloc(sizeof(*vfmig), GFP_KERNEL); + if (!vfmig) + return -ENOMEM; + + vfmig->mstate_mgr = adf_mstate_mgr_new(mdev->state, mdev->state_size); + if (!vfmig->mstate_mgr) { + kfree(vfmig); + return -ENOMEM; + } + vf_info->mig_priv = vfmig; + mdev->setup_size = 0; + mdev->remote_setup_size = 0; + + return 0; +} + +static void adf_gen4_vfmig_close_device(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + struct adf_accel_vf_info *vf_info; + struct adf_gen4_vfmig *vfmig; + + vf_info = &accel_dev->pf.vf_info[mdev->vf_id]; + if (vf_info->mig_priv) { + vfmig = vf_info->mig_priv; + adf_mstate_mgr_destroy(vfmig->mstate_mgr); + kfree(vfmig); + vf_info->mig_priv = NULL; + } +} + +static int adf_gen4_vfmig_suspend_device(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_accel_vf_info *vf_info; + struct adf_gen4_vfmig *vf_mig; + u32 vf_nr = mdev->vf_id; + int ret, i; + + vf_info = &accel_dev->pf.vf_info[vf_nr]; + vf_mig = vf_info->mig_priv; + + /* Stop all inflight jobs */ + for (i = 0; i < hw_data->num_banks_per_vf; i++) { + u32 pf_bank_nr = i + vf_nr * hw_data->num_banks_per_vf; + + ret = adf_gen4_bank_drain_start(accel_dev, pf_bank_nr, + ADF_RPRESET_POLL_TIMEOUT_US); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to drain bank %d for vf_nr %d\n", i, + vf_nr); + return ret; + } + vf_mig->bank_stopped[i] = true; + + adf_gen4_bank_quiesce_coal_timer(accel_dev, pf_bank_nr, + ADF_COALESCED_POLL_TIMEOUT_US); + } + + return 0; +} + +static int adf_gen4_vfmig_resume_device(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_accel_vf_info *vf_info; + struct adf_gen4_vfmig *vf_mig; + u32 vf_nr = mdev->vf_id; + int i; + + vf_info = &accel_dev->pf.vf_info[vf_nr]; + vf_mig = vf_info->mig_priv; + + for (i = 0; i < hw_data->num_banks_per_vf; i++) { + u32 pf_bank_nr = i + vf_nr * hw_data->num_banks_per_vf; + + if (vf_mig->bank_stopped[i]) { + adf_gen4_bank_drain_finish(accel_dev, pf_bank_nr); + vf_mig->bank_stopped[i] = false; + } + } + + return 0; +} + +struct adf_vf_bank_info { + struct adf_accel_dev *accel_dev; + u32 vf_nr; + u32 bank_nr; +}; + +struct mig_user_sla { + enum adf_base_services srv; + u64 rp_mask; + u32 cir; + u32 pir; +}; + +static int adf_mstate_sla_check(struct adf_mstate_mgr *sub_mgr, u8 *src_buf, + u32 src_size, void *opaque) +{ + struct adf_mstate_vreginfo _sinfo = { src_buf, src_size }; + struct adf_mstate_vreginfo *sinfo = &_sinfo, *dinfo = opaque; + u32 src_sla_cnt = sinfo->size / sizeof(struct mig_user_sla); + u32 dst_sla_cnt = dinfo->size / sizeof(struct mig_user_sla); + struct mig_user_sla *src_slas = sinfo->addr; + struct mig_user_sla *dst_slas = dinfo->addr; + int i, j; + + for (i = 0; i < src_sla_cnt; i++) { + for (j = 0; j < dst_sla_cnt; j++) { + if (src_slas[i].srv != dst_slas[j].srv || + src_slas[i].rp_mask != dst_slas[j].rp_mask) + continue; + + if (src_slas[i].cir > dst_slas[j].cir || + src_slas[i].pir > dst_slas[j].pir) { + pr_err("QAT: DST VF rate limiting mismatch.\n"); + return -EINVAL; + } + break; + } + + if (j == dst_sla_cnt) { + pr_err("QAT: SRC VF rate limiting mismatch - SRC srv %d and rp_mask 0x%llx.\n", + src_slas[i].srv, src_slas[i].rp_mask); + return -EINVAL; + } + } + + return 0; +} + +static inline int adf_mstate_check_cap_size(u32 src_sz, u32 dst_sz, u32 max_sz) +{ + if (src_sz > max_sz || dst_sz > max_sz) + return -EINVAL; + else + return 0; +} + +static int adf_mstate_compatver_check(struct adf_mstate_mgr *sub_mgr, + u8 *src_buf, u32 src_sz, void *opaque) +{ + struct adf_mstate_vreginfo *info = opaque; + u8 compat = 0; + u8 *pcompat; + + if (src_sz != info->size) { + pr_debug("QAT: State mismatch (compat version size), current %u, expected %u\n", + src_sz, info->size); + return -EINVAL; + } + + memcpy(info->addr, src_buf, info->size); + pcompat = info->addr; + if (*pcompat == 0) { + pr_warn("QAT: Unable to determine the version of VF\n"); + return 0; + } + + compat = adf_vf_compat_checker(*pcompat); + if (compat == ADF_PF2VF_VF_INCOMPATIBLE) { + pr_debug("QAT: SRC VF driver (ver=%u) is incompatible with DST PF driver (ver=%u)\n", + *pcompat, ADF_PFVF_COMPAT_THIS_VERSION); + return -EINVAL; + } + + if (compat == ADF_PF2VF_VF_COMPAT_UNKNOWN) + pr_debug("QAT: SRC VF driver (ver=%u) is newer than DST PF driver (ver=%u)\n", + *pcompat, ADF_PFVF_COMPAT_THIS_VERSION); + + return 0; +} + +/* + * adf_mstate_capmask_compare() - compare QAT device capability mask + * @sinfo: Pointer to source capability info + * @dinfo: Pointer to target capability info + * + * This function compares the capability mask between source VF and target VF + * + * Returns: 0 if target capability mask is identical to source capability mask, + * 1 if target mask can represent all the capabilities represented by source mask, + * -1 if target mask can't represent all the capabilities represented by source + * mask. + */ +static int adf_mstate_capmask_compare(struct adf_mstate_vreginfo *sinfo, + struct adf_mstate_vreginfo *dinfo) +{ + u64 src = 0, dst = 0; + + if (adf_mstate_check_cap_size(sinfo->size, dinfo->size, sizeof(u64))) { + pr_debug("QAT: Unexpected capability size %u %u %zu\n", + sinfo->size, dinfo->size, sizeof(u64)); + return -1; + } + + memcpy(&src, sinfo->addr, sinfo->size); + memcpy(&dst, dinfo->addr, dinfo->size); + + pr_debug("QAT: Check cap compatibility of cap %llu %llu\n", src, dst); + + if (src == dst) + return 0; + + if ((src | dst) == dst) + return 1; + + return -1; +} + +static int adf_mstate_capmask_superset(struct adf_mstate_mgr *sub_mgr, u8 *buf, + u32 size, void *opa) +{ + struct adf_mstate_vreginfo sinfo = { buf, size }; + + if (adf_mstate_capmask_compare(&sinfo, opa) >= 0) + return 0; + + return -EINVAL; +} + +static int adf_mstate_capmask_equal(struct adf_mstate_mgr *sub_mgr, u8 *buf, + u32 size, void *opa) +{ + struct adf_mstate_vreginfo sinfo = { buf, size }; + + if (adf_mstate_capmask_compare(&sinfo, opa) == 0) + return 0; + + return -EINVAL; +} + +static int adf_mstate_set_vreg(struct adf_mstate_mgr *sub_mgr, u8 *buf, + u32 size, void *opa) +{ + struct adf_mstate_vreginfo *info = opa; + + if (size != info->size) { + pr_debug("QAT: Unexpected cap size %u %u\n", size, info->size); + return -EINVAL; + } + memcpy(info->addr, buf, info->size); + + return 0; +} + +static u32 adf_gen4_vfmig_get_slas(struct adf_accel_dev *accel_dev, u32 vf_nr, + struct mig_user_sla *pmig_slas) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_rl *rl_data = accel_dev->rate_limiting; + struct rl_sla **sla_type_arr = NULL; + u64 rp_mask, rp_index; + u32 max_num_sla; + u32 sla_cnt = 0; + int i, j; + + if (!accel_dev->rate_limiting) + return 0; + + rp_index = vf_nr * hw_data->num_banks_per_vf; + max_num_sla = adf_rl_get_sla_arr_of_type(rl_data, RL_LEAF, &sla_type_arr); + + for (i = 0; i < max_num_sla; i++) { + if (!sla_type_arr[i]) + continue; + + rp_mask = 0; + for (j = 0; j < sla_type_arr[i]->ring_pairs_cnt; j++) + rp_mask |= BIT(sla_type_arr[i]->ring_pairs_ids[j]); + + if (rp_mask & GENMASK_ULL(rp_index + 3, rp_index)) { + pmig_slas->rp_mask = rp_mask; + pmig_slas->cir = sla_type_arr[i]->cir; + pmig_slas->pir = sla_type_arr[i]->pir; + pmig_slas->srv = sla_type_arr[i]->srv; + pmig_slas++; + sla_cnt++; + } + } + + return sla_cnt; +} + +static int adf_gen4_vfmig_load_etr_regs(struct adf_mstate_mgr *sub_mgr, + u8 *state, u32 size, void *opa) +{ + struct adf_vf_bank_info *vf_bank_info = opa; + struct adf_accel_dev *accel_dev = vf_bank_info->accel_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + u32 pf_bank_nr; + int ret; + + pf_bank_nr = vf_bank_info->bank_nr + vf_bank_info->vf_nr * hw_data->num_banks_per_vf; + ret = hw_data->bank_state_restore(accel_dev, pf_bank_nr, + (struct bank_state *)state); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to load regs for vf%d bank%d\n", + vf_bank_info->vf_nr, vf_bank_info->bank_nr); + return ret; + } + + return 0; +} + +static int adf_gen4_vfmig_load_etr_bank(struct adf_accel_dev *accel_dev, + u32 vf_nr, u32 bank_nr, + struct adf_mstate_mgr *mstate_mgr) +{ + struct adf_vf_bank_info vf_bank_info = {accel_dev, vf_nr, bank_nr}; + struct adf_mstate_sect_h *subsec, *l2_subsec; + struct adf_mstate_mgr sub_sects_mgr; + char bank_ids[ADF_MSTATE_ID_LEN]; + + snprintf(bank_ids, sizeof(bank_ids), ADF_MSTATE_BANK_IDX_IDS "%x", bank_nr); + subsec = adf_mstate_sect_lookup(mstate_mgr, bank_ids, NULL, NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), + "Failed to lookup sec %s for vf%d bank%d\n", + ADF_MSTATE_BANK_IDX_IDS, vf_nr, bank_nr); + return -EINVAL; + } + + adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec); + l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr, ADF_MSTATE_ETR_REGS_IDS, + adf_gen4_vfmig_load_etr_regs, + &vf_bank_info); + if (!l2_subsec) { + dev_err(&GET_DEV(accel_dev), + "Failed to add sec %s for vf%d bank%d\n", + ADF_MSTATE_ETR_REGS_IDS, vf_nr, bank_nr); + return -EINVAL; + } + + return 0; +} + +static int adf_gen4_vfmig_load_etr(struct adf_accel_dev *accel_dev, u32 vf_nr) +{ + struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr]; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_gen4_vfmig *vfmig = vf_info->mig_priv; + struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr; + struct adf_mstate_mgr sub_sects_mgr; + struct adf_mstate_sect_h *subsec; + int ret, i; + + subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_ETRB_IDS, NULL, + NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n", + ADF_MSTATE_ETRB_IDS); + return -EINVAL; + } + + adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec); + for (i = 0; i < hw_data->num_banks_per_vf; i++) { + ret = adf_gen4_vfmig_load_etr_bank(accel_dev, vf_nr, i, + &sub_sects_mgr); + if (ret) + return ret; + } + + return 0; +} + +static int adf_gen4_vfmig_load_misc(struct adf_accel_dev *accel_dev, u32 vf_nr) +{ + struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr]; + struct adf_gen4_vfmig *vfmig = vf_info->mig_priv; + void __iomem *csr = adf_get_pmisc_base(accel_dev); + struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr; + struct adf_mstate_sect_h *subsec, *l2_subsec; + struct adf_mstate_mgr sub_sects_mgr; + struct { + char *id; + u64 ofs; + } misc_states[] = { + {ADF_MSTATE_VINTMSK_IDS, ADF_GEN4_VINTMSK_OFFSET(vf_nr)}, + {ADF_MSTATE_VINTMSK_PF2VM_IDS, ADF_GEN4_VINTMSKPF2VM_OFFSET(vf_nr)}, + {ADF_MSTATE_PF2VM_IDS, ADF_GEN4_PF2VM_OFFSET(vf_nr)}, + {ADF_MSTATE_VM2PF_IDS, ADF_GEN4_VM2PF_OFFSET(vf_nr)}, + }; + int i; + + subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_MISCB_IDS, NULL, + NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n", + ADF_MSTATE_MISCB_IDS); + return -EINVAL; + } + + adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec); + for (i = 0; i < ARRAY_SIZE(misc_states); i++) { + struct adf_mstate_vreginfo info; + u32 regv; + + info.addr = ®v; + info.size = sizeof(regv); + l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr, + misc_states[i].id, + adf_mstate_set_vreg, + &info); + if (!l2_subsec) { + dev_err(&GET_DEV(accel_dev), + "Failed to load sec %s\n", misc_states[i].id); + return -EINVAL; + } + ADF_CSR_WR(csr, misc_states[i].ofs, regv); + } + + return 0; +} + +static int adf_gen4_vfmig_load_generic(struct adf_accel_dev *accel_dev, u32 vf_nr) +{ + struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr]; + struct mig_user_sla dst_slas[RL_RP_CNT_PER_LEAF_MAX] = { }; + struct adf_gen4_vfmig *vfmig = vf_info->mig_priv; + struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr; + struct adf_mstate_sect_h *subsec, *l2_subsec; + struct adf_mstate_mgr sub_sects_mgr; + u32 dst_sla_cnt; + struct { + char *id; + int (*action)(struct adf_mstate_mgr *sub_mgr, u8 *buf, u32 size, void *opa); + struct adf_mstate_vreginfo info; + } gen_states[] = { + {ADF_MSTATE_IOV_INIT_IDS, adf_mstate_set_vreg, + {&vf_info->init, sizeof(vf_info->init)}}, + {ADF_MSTATE_COMPAT_VER_IDS, adf_mstate_compatver_check, + {&vf_info->vf_compat_ver, sizeof(vf_info->vf_compat_ver)}}, + {ADF_MSTATE_SLA_IDS, adf_mstate_sla_check, {dst_slas, 0}}, + }; + int i; + + subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_GEN_IDS, NULL, NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n", + ADF_MSTATE_GEN_IDS); + return -EINVAL; + } + + adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec); + for (i = 0; i < ARRAY_SIZE(gen_states); i++) { + if (gen_states[i].info.addr == dst_slas) { + dst_sla_cnt = adf_gen4_vfmig_get_slas(accel_dev, vf_nr, dst_slas); + gen_states[i].info.size = dst_sla_cnt * sizeof(struct mig_user_sla); + } + + l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr, + gen_states[i].id, + gen_states[i].action, + &gen_states[i].info); + if (!l2_subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n", + gen_states[i].id); + return -EINVAL; + } + } + + return 0; +} + +static int adf_gen4_vfmig_load_config(struct adf_accel_dev *accel_dev, u32 vf_nr) +{ + struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr]; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_gen4_vfmig *vfmig = vf_info->mig_priv; + struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr; + struct adf_mstate_sect_h *subsec, *l2_subsec; + struct adf_mstate_mgr sub_sects_mgr; + struct { + char *id; + int (*action)(struct adf_mstate_mgr *sub_mgr, u8 *buf, u32 size, void *opa); + struct adf_mstate_vreginfo info; + } setups[] = { + {ADF_MSTATE_GEN_CAP_IDS, adf_mstate_capmask_superset, + {&hw_data->accel_capabilities_mask, sizeof(hw_data->accel_capabilities_mask)}}, + {ADF_MSTATE_GEN_SVCMAP_IDS, adf_mstate_capmask_equal, + {&hw_data->ring_to_svc_map, sizeof(hw_data->ring_to_svc_map)}}, + {ADF_MSTATE_GEN_EXTDC_IDS, adf_mstate_capmask_superset, + {&hw_data->extended_dc_capabilities, sizeof(hw_data->extended_dc_capabilities)}}, + }; + int i; + + subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_CONFIG_IDS, NULL, NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n", + ADF_MSTATE_CONFIG_IDS); + return -EINVAL; + } + + adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec); + for (i = 0; i < ARRAY_SIZE(setups); i++) { + l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr, setups[i].id, + setups[i].action, &setups[i].info); + if (!l2_subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n", + setups[i].id); + return -EINVAL; + } + } + + return 0; +} + +static int adf_gen4_vfmig_save_etr_regs(struct adf_mstate_mgr *subs, u8 *state, + u32 size, void *opa) +{ + struct adf_vf_bank_info *vf_bank_info = opa; + struct adf_accel_dev *accel_dev = vf_bank_info->accel_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + u32 pf_bank_nr; + int ret; + + pf_bank_nr = vf_bank_info->bank_nr; + pf_bank_nr += vf_bank_info->vf_nr * hw_data->num_banks_per_vf; + + ret = hw_data->bank_state_save(accel_dev, pf_bank_nr, + (struct bank_state *)state); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to save regs for vf%d bank%d\n", + vf_bank_info->vf_nr, vf_bank_info->bank_nr); + return ret; + } + + return sizeof(struct bank_state); +} + +static int adf_gen4_vfmig_save_etr_bank(struct adf_accel_dev *accel_dev, + u32 vf_nr, u32 bank_nr, + struct adf_mstate_mgr *mstate_mgr) +{ + struct adf_mstate_sect_h *subsec, *l2_subsec; + struct adf_vf_bank_info vf_bank_info; + struct adf_mstate_mgr sub_sects_mgr; + char bank_ids[ADF_MSTATE_ID_LEN]; + + snprintf(bank_ids, sizeof(bank_ids), ADF_MSTATE_BANK_IDX_IDS "%x", bank_nr); + + subsec = adf_mstate_sect_add(mstate_mgr, bank_ids, NULL, NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), + "Failed to add sec %s for vf%d bank%d\n", + ADF_MSTATE_BANK_IDX_IDS, vf_nr, bank_nr); + return -EINVAL; + } + + adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr); + vf_bank_info.accel_dev = accel_dev; + vf_bank_info.vf_nr = vf_nr; + vf_bank_info.bank_nr = bank_nr; + l2_subsec = adf_mstate_sect_add(&sub_sects_mgr, ADF_MSTATE_ETR_REGS_IDS, + adf_gen4_vfmig_save_etr_regs, + &vf_bank_info); + if (!l2_subsec) { + dev_err(&GET_DEV(accel_dev), + "Failed to add sec %s for vf%d bank%d\n", + ADF_MSTATE_ETR_REGS_IDS, vf_nr, bank_nr); + return -EINVAL; + } + adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec); + + return 0; +} + +static int adf_gen4_vfmig_save_etr(struct adf_accel_dev *accel_dev, u32 vf_nr) +{ + struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr]; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_gen4_vfmig *vfmig = vf_info->mig_priv; + struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr; + struct adf_mstate_mgr sub_sects_mgr; + struct adf_mstate_sect_h *subsec; + int ret, i; + + subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_ETRB_IDS, NULL, NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n", + ADF_MSTATE_ETRB_IDS); + return -EINVAL; + } + + adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr); + for (i = 0; i < hw_data->num_banks_per_vf; i++) { + ret = adf_gen4_vfmig_save_etr_bank(accel_dev, vf_nr, i, + &sub_sects_mgr); + if (ret) + return ret; + } + adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec); + + return 0; +} + +static int adf_gen4_vfmig_save_misc(struct adf_accel_dev *accel_dev, u32 vf_nr) +{ + struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr]; + struct adf_gen4_vfmig *vfmig = vf_info->mig_priv; + struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr; + void __iomem *csr = adf_get_pmisc_base(accel_dev); + struct adf_mstate_sect_h *subsec, *l2_subsec; + struct adf_mstate_mgr sub_sects_mgr; + struct { + char *id; + u64 offset; + } misc_states[] = { + {ADF_MSTATE_VINTSRC_IDS, ADF_GEN4_VINTSOU_OFFSET(vf_nr)}, + {ADF_MSTATE_VINTMSK_IDS, ADF_GEN4_VINTMSK_OFFSET(vf_nr)}, + {ADF_MSTATE_VINTSRC_PF2VM_IDS, ADF_GEN4_VINTSOUPF2VM_OFFSET(vf_nr)}, + {ADF_MSTATE_VINTMSK_PF2VM_IDS, ADF_GEN4_VINTMSKPF2VM_OFFSET(vf_nr)}, + {ADF_MSTATE_PF2VM_IDS, ADF_GEN4_PF2VM_OFFSET(vf_nr)}, + {ADF_MSTATE_VM2PF_IDS, ADF_GEN4_VM2PF_OFFSET(vf_nr)}, + }; + ktime_t time_exp; + int i; + + subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_MISCB_IDS, NULL, NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n", + ADF_MSTATE_MISCB_IDS); + return -EINVAL; + } + + time_exp = ktime_add_us(ktime_get(), ADF_GEN4_PFVF_RSP_TIMEOUT_US); + while (!mutex_trylock(&vf_info->pfvf_mig_lock)) { + if (ktime_after(ktime_get(), time_exp)) { + dev_err(&GET_DEV(accel_dev), "Failed to get pfvf mig lock\n"); + return -ETIMEDOUT; + } + usleep_range(500, 1000); + } + + adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr); + for (i = 0; i < ARRAY_SIZE(misc_states); i++) { + struct adf_mstate_vreginfo info; + u32 regv; + + info.addr = ®v; + info.size = sizeof(regv); + regv = ADF_CSR_RD(csr, misc_states[i].offset); + + l2_subsec = adf_mstate_sect_add_vreg(&sub_sects_mgr, + misc_states[i].id, + &info); + if (!l2_subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n", + misc_states[i].id); + mutex_unlock(&vf_info->pfvf_mig_lock); + return -EINVAL; + } + } + + mutex_unlock(&vf_info->pfvf_mig_lock); + adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec); + + return 0; +} + +static int adf_gen4_vfmig_save_generic(struct adf_accel_dev *accel_dev, u32 vf_nr) +{ + struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr]; + struct adf_gen4_vfmig *vfmig = vf_info->mig_priv; + struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr; + struct adf_mstate_mgr sub_sects_mgr; + struct adf_mstate_sect_h *subsec, *l2_subsec; + struct mig_user_sla src_slas[RL_RP_CNT_PER_LEAF_MAX] = { }; + u32 src_sla_cnt; + struct { + char *id; + struct adf_mstate_vreginfo info; + } gen_states[] = { + {ADF_MSTATE_IOV_INIT_IDS, + {&vf_info->init, sizeof(vf_info->init)}}, + {ADF_MSTATE_COMPAT_VER_IDS, + {&vf_info->vf_compat_ver, sizeof(vf_info->vf_compat_ver)}}, + {ADF_MSTATE_SLA_IDS, {src_slas, 0}}, + }; + int i; + + subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_GEN_IDS, NULL, NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n", + ADF_MSTATE_GEN_IDS); + return -EINVAL; + } + + adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr); + for (i = 0; i < ARRAY_SIZE(gen_states); i++) { + if (gen_states[i].info.addr == src_slas) { + src_sla_cnt = adf_gen4_vfmig_get_slas(accel_dev, vf_nr, src_slas); + gen_states[i].info.size = src_sla_cnt * sizeof(struct mig_user_sla); + } + + l2_subsec = adf_mstate_sect_add_vreg(&sub_sects_mgr, + gen_states[i].id, + &gen_states[i].info); + if (!l2_subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n", + gen_states[i].id); + return -EINVAL; + } + } + adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec); + + return 0; +} + +static int adf_gen4_vfmig_save_config(struct adf_accel_dev *accel_dev, u32 vf_nr) +{ + struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr]; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_gen4_vfmig *vfmig = vf_info->mig_priv; + struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr; + struct adf_mstate_mgr sub_sects_mgr; + struct adf_mstate_sect_h *subsec, *l2_subsec; + struct { + char *id; + struct adf_mstate_vreginfo info; + } setups[] = { + {ADF_MSTATE_GEN_CAP_IDS, + {&hw_data->accel_capabilities_mask, sizeof(hw_data->accel_capabilities_mask)}}, + {ADF_MSTATE_GEN_SVCMAP_IDS, + {&hw_data->ring_to_svc_map, sizeof(hw_data->ring_to_svc_map)}}, + {ADF_MSTATE_GEN_EXTDC_IDS, + {&hw_data->extended_dc_capabilities, sizeof(hw_data->extended_dc_capabilities)}}, + }; + int i; + + subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_CONFIG_IDS, NULL, NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n", + ADF_MSTATE_CONFIG_IDS); + return -EINVAL; + } + + adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr); + for (i = 0; i < ARRAY_SIZE(setups); i++) { + l2_subsec = adf_mstate_sect_add_vreg(&sub_sects_mgr, setups[i].id, + &setups[i].info); + if (!l2_subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n", + setups[i].id); + return -EINVAL; + } + } + adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec); + + return 0; +} + +static int adf_gen4_vfmig_save_state(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + struct adf_accel_vf_info *vf_info; + struct adf_gen4_vfmig *vfmig; + u32 vf_nr = mdev->vf_id; + int ret; + + vf_info = &accel_dev->pf.vf_info[vf_nr]; + vfmig = vf_info->mig_priv; + + ret = adf_gen4_vfmig_save_setup(mdev); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to save setup for vf_nr %d\n", vf_nr); + return ret; + } + + adf_mstate_mgr_init(vfmig->mstate_mgr, mdev->state + mdev->setup_size, + mdev->state_size - mdev->setup_size); + if (!adf_mstate_preamble_add(vfmig->mstate_mgr)) + return -EINVAL; + + ret = adf_gen4_vfmig_save_generic(accel_dev, vf_nr); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to save generic state for vf_nr %d\n", vf_nr); + return ret; + } + + ret = adf_gen4_vfmig_save_misc(accel_dev, vf_nr); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to save misc bar state for vf_nr %d\n", vf_nr); + return ret; + } + + ret = adf_gen4_vfmig_save_etr(accel_dev, vf_nr); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to save etr bar state for vf_nr %d\n", vf_nr); + return ret; + } + + adf_mstate_preamble_update(vfmig->mstate_mgr); + + return 0; +} + +static int adf_gen4_vfmig_load_state(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + struct adf_accel_vf_info *vf_info; + struct adf_gen4_vfmig *vfmig; + u32 vf_nr = mdev->vf_id; + int ret; + + vf_info = &accel_dev->pf.vf_info[vf_nr]; + vfmig = vf_info->mig_priv; + + ret = adf_gen4_vfmig_load_setup(mdev, mdev->state_size); + if (ret) { + dev_err(&GET_DEV(accel_dev), "Failed to load setup for vf_nr %d\n", + vf_nr); + return ret; + } + + ret = adf_mstate_mgr_init_from_remote(vfmig->mstate_mgr, + mdev->state + mdev->remote_setup_size, + mdev->state_size - mdev->remote_setup_size, + NULL, NULL); + if (ret) { + dev_err(&GET_DEV(accel_dev), "Invalid state for vf_nr %d\n", + vf_nr); + return ret; + } + + ret = adf_gen4_vfmig_load_generic(accel_dev, vf_nr); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to load general state for vf_nr %d\n", vf_nr); + return ret; + } + + ret = adf_gen4_vfmig_load_misc(accel_dev, vf_nr); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to load misc bar state for vf_nr %d\n", vf_nr); + return ret; + } + + ret = adf_gen4_vfmig_load_etr(accel_dev, vf_nr); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to load etr bar state for vf_nr %d\n", vf_nr); + return ret; + } + + return 0; +} + +static int adf_gen4_vfmig_save_setup(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + struct adf_accel_vf_info *vf_info; + struct adf_gen4_vfmig *vfmig; + u32 vf_nr = mdev->vf_id; + int ret; + + vf_info = &accel_dev->pf.vf_info[vf_nr]; + vfmig = vf_info->mig_priv; + + if (mdev->setup_size) + return 0; + + adf_mstate_mgr_init(vfmig->mstate_mgr, mdev->state, mdev->state_size); + if (!adf_mstate_preamble_add(vfmig->mstate_mgr)) + return -EINVAL; + + ret = adf_gen4_vfmig_save_config(accel_dev, mdev->vf_id); + if (ret) + return ret; + + adf_mstate_preamble_update(vfmig->mstate_mgr); + mdev->setup_size = adf_mstate_state_size(vfmig->mstate_mgr); + + return 0; +} + +static int adf_gen4_vfmig_load_setup(struct qat_mig_dev *mdev, int len) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + struct adf_accel_vf_info *vf_info; + struct adf_gen4_vfmig *vfmig; + u32 vf_nr = mdev->vf_id; + u32 setup_size; + int ret; + + vf_info = &accel_dev->pf.vf_info[vf_nr]; + vfmig = vf_info->mig_priv; + + if (mdev->remote_setup_size) + return 0; + + if (len < sizeof(struct adf_mstate_preh)) + return -EAGAIN; + + adf_mstate_mgr_init(vfmig->mstate_mgr, mdev->state, mdev->state_size); + setup_size = adf_mstate_state_size_from_remote(vfmig->mstate_mgr); + if (setup_size > mdev->state_size) + return -EINVAL; + + if (len < setup_size) + return -EAGAIN; + + ret = adf_mstate_mgr_init_from_remote(vfmig->mstate_mgr, mdev->state, + setup_size, NULL, NULL); + if (ret) { + dev_err(&GET_DEV(accel_dev), "Invalid setup for vf_nr %d\n", + vf_nr); + return ret; + } + + mdev->remote_setup_size = setup_size; + + ret = adf_gen4_vfmig_load_config(accel_dev, vf_nr); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to load config for vf_nr %d\n", vf_nr); + return ret; + } + + return 0; +} + +void adf_gen4_init_vf_mig_ops(struct qat_migdev_ops *vfmig_ops) +{ + vfmig_ops->init = adf_gen4_vfmig_init_device; + vfmig_ops->cleanup = adf_gen4_vfmig_cleanup_device; + vfmig_ops->reset = adf_gen4_vfmig_reset_device; + vfmig_ops->open = adf_gen4_vfmig_open_device; + vfmig_ops->close = adf_gen4_vfmig_close_device; + vfmig_ops->suspend = adf_gen4_vfmig_suspend_device; + vfmig_ops->resume = adf_gen4_vfmig_resume_device; + vfmig_ops->save_state = adf_gen4_vfmig_save_state; + vfmig_ops->load_state = adf_gen4_vfmig_load_state; + vfmig_ops->load_setup = adf_gen4_vfmig_load_setup; + vfmig_ops->save_setup = adf_gen4_vfmig_save_setup; +} +EXPORT_SYMBOL_GPL(adf_gen4_init_vf_mig_ops); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.h new file mode 100644 index 0000000000000000000000000000000000000000..72216d078ee1f62153d103bcc3562177f10b62f2 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2024 Intel Corporation */ +#ifndef ADF_GEN4_VF_MIG_H_ +#define ADF_GEN4_VF_MIG_H_ + +#include "adf_accel_devices.h" + +void adf_gen4_init_vf_mig_ops(struct qat_migdev_ops *vfmig_ops); + +#endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.c b/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.c new file mode 100644 index 0000000000000000000000000000000000000000..41cc763a74aa25ad44da62d912e472d44d1f0507 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.c @@ -0,0 +1,318 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2024 Intel Corporation */ + +#include +#include +#include "adf_mstate_mgr.h" + +#define ADF_MSTATE_MAGIC 0xADF5CAEA +#define ADF_MSTATE_VERSION 0x1 + +struct adf_mstate_sect_h { + u8 id[ADF_MSTATE_ID_LEN]; + u32 size; + u32 sub_sects; + u8 state[]; +}; + +u32 adf_mstate_state_size(struct adf_mstate_mgr *mgr) +{ + return mgr->state - mgr->buf; +} + +static inline u32 adf_mstate_avail_room(struct adf_mstate_mgr *mgr) +{ + return mgr->buf + mgr->size - mgr->state; +} + +void adf_mstate_mgr_init(struct adf_mstate_mgr *mgr, u8 *buf, u32 size) +{ + mgr->buf = buf; + mgr->state = buf; + mgr->size = size; + mgr->n_sects = 0; +}; + +struct adf_mstate_mgr *adf_mstate_mgr_new(u8 *buf, u32 size) +{ + struct adf_mstate_mgr *mgr; + + mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); + if (!mgr) + return NULL; + + adf_mstate_mgr_init(mgr, buf, size); + + return mgr; +} + +void adf_mstate_mgr_destroy(struct adf_mstate_mgr *mgr) +{ + kfree(mgr); +} + +void adf_mstate_mgr_init_from_parent(struct adf_mstate_mgr *mgr, + struct adf_mstate_mgr *p_mgr) +{ + adf_mstate_mgr_init(mgr, p_mgr->state, + p_mgr->size - adf_mstate_state_size(p_mgr)); +} + +void adf_mstate_mgr_init_from_psect(struct adf_mstate_mgr *mgr, + struct adf_mstate_sect_h *p_sect) +{ + adf_mstate_mgr_init(mgr, p_sect->state, p_sect->size); + mgr->n_sects = p_sect->sub_sects; +} + +static void adf_mstate_preamble_init(struct adf_mstate_preh *preamble) +{ + preamble->magic = ADF_MSTATE_MAGIC; + preamble->version = ADF_MSTATE_VERSION; + preamble->preh_len = sizeof(*preamble); + preamble->size = 0; + preamble->n_sects = 0; +} + +/* default preambles checker */ +static int adf_mstate_preamble_def_checker(struct adf_mstate_preh *preamble, + void *opaque) +{ + struct adf_mstate_mgr *mgr = opaque; + + if (preamble->magic != ADF_MSTATE_MAGIC || + preamble->version > ADF_MSTATE_VERSION || + preamble->preh_len > mgr->size) { + pr_debug("QAT: LM - Invalid state (magic=%#x, version=%#x, hlen=%u), state_size=%u\n", + preamble->magic, preamble->version, preamble->preh_len, + mgr->size); + return -EINVAL; + } + + return 0; +} + +struct adf_mstate_preh *adf_mstate_preamble_add(struct adf_mstate_mgr *mgr) +{ + struct adf_mstate_preh *pre = (struct adf_mstate_preh *)mgr->buf; + + if (adf_mstate_avail_room(mgr) < sizeof(*pre)) { + pr_err("QAT: LM - Not enough space for preamble\n"); + return NULL; + } + + adf_mstate_preamble_init(pre); + mgr->state += pre->preh_len; + + return pre; +} + +int adf_mstate_preamble_update(struct adf_mstate_mgr *mgr) +{ + struct adf_mstate_preh *preamble = (struct adf_mstate_preh *)mgr->buf; + + preamble->size = adf_mstate_state_size(mgr) - preamble->preh_len; + preamble->n_sects = mgr->n_sects; + + return 0; +} + +static void adf_mstate_dump_sect(struct adf_mstate_sect_h *sect, + const char *prefix) +{ + pr_debug("QAT: LM - %s QAT state section %s\n", prefix, sect->id); + print_hex_dump_debug("h-", DUMP_PREFIX_OFFSET, 16, 2, sect, + sizeof(*sect), true); + print_hex_dump_debug("s-", DUMP_PREFIX_OFFSET, 16, 2, sect->state, + sect->size, true); +} + +static inline void __adf_mstate_sect_update(struct adf_mstate_mgr *mgr, + struct adf_mstate_sect_h *sect, + u32 size, + u32 n_subsects) +{ + sect->size += size; + sect->sub_sects += n_subsects; + mgr->n_sects++; + mgr->state += sect->size; + + adf_mstate_dump_sect(sect, "Add"); +} + +void adf_mstate_sect_update(struct adf_mstate_mgr *p_mgr, + struct adf_mstate_mgr *curr_mgr, + struct adf_mstate_sect_h *sect) +{ + __adf_mstate_sect_update(p_mgr, sect, adf_mstate_state_size(curr_mgr), + curr_mgr->n_sects); +} + +static struct adf_mstate_sect_h *adf_mstate_sect_add_header(struct adf_mstate_mgr *mgr, + const char *id) +{ + struct adf_mstate_sect_h *sect = (struct adf_mstate_sect_h *)(mgr->state); + + if (adf_mstate_avail_room(mgr) < sizeof(*sect)) { + pr_debug("QAT: LM - Not enough space for header of QAT state sect %s\n", id); + return NULL; + } + + strscpy(sect->id, id, sizeof(sect->id)); + sect->size = 0; + sect->sub_sects = 0; + mgr->state += sizeof(*sect); + + return sect; +} + +struct adf_mstate_sect_h *adf_mstate_sect_add_vreg(struct adf_mstate_mgr *mgr, + const char *id, + struct adf_mstate_vreginfo *info) +{ + struct adf_mstate_sect_h *sect; + + sect = adf_mstate_sect_add_header(mgr, id); + if (!sect) + return NULL; + + if (adf_mstate_avail_room(mgr) < info->size) { + pr_debug("QAT: LM - Not enough space for QAT state sect %s, requires %u\n", + id, info->size); + return NULL; + } + + memcpy(sect->state, info->addr, info->size); + __adf_mstate_sect_update(mgr, sect, info->size, 0); + + return sect; +} + +struct adf_mstate_sect_h *adf_mstate_sect_add(struct adf_mstate_mgr *mgr, + const char *id, + adf_mstate_populate populate, + void *opaque) +{ + struct adf_mstate_mgr sub_sects_mgr; + struct adf_mstate_sect_h *sect; + int avail_room, size; + + sect = adf_mstate_sect_add_header(mgr, id); + if (!sect) + return NULL; + + if (!populate) + return sect; + + avail_room = adf_mstate_avail_room(mgr); + adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mgr); + + size = (*populate)(&sub_sects_mgr, sect->state, avail_room, opaque); + if (size < 0) + return NULL; + + size += adf_mstate_state_size(&sub_sects_mgr); + if (avail_room < size) { + pr_debug("QAT: LM - Not enough space for QAT state sect %s, requires %u\n", + id, size); + return NULL; + } + __adf_mstate_sect_update(mgr, sect, size, sub_sects_mgr.n_sects); + + return sect; +} + +static int adf_mstate_sect_validate(struct adf_mstate_mgr *mgr) +{ + struct adf_mstate_sect_h *start = (struct adf_mstate_sect_h *)mgr->state; + struct adf_mstate_sect_h *sect = start; + u64 end; + int i; + + end = (uintptr_t)mgr->buf + mgr->size; + for (i = 0; i < mgr->n_sects; i++) { + uintptr_t s_start = (uintptr_t)sect->state; + uintptr_t s_end = s_start + sect->size; + + if (s_end < s_start || s_end > end) { + pr_debug("QAT: LM - Corrupted state section (index=%u, size=%u) in state_mgr (size=%u, secs=%u)\n", + i, sect->size, mgr->size, mgr->n_sects); + return -EINVAL; + } + sect = (struct adf_mstate_sect_h *)s_end; + } + + pr_debug("QAT: LM - Scanned section (last child=%s, size=%lu) in state_mgr (size=%u, secs=%u)\n", + start->id, sizeof(struct adf_mstate_sect_h) * (ulong)(sect - start), + mgr->size, mgr->n_sects); + + return 0; +} + +u32 adf_mstate_state_size_from_remote(struct adf_mstate_mgr *mgr) +{ + struct adf_mstate_preh *preh = (struct adf_mstate_preh *)mgr->buf; + + return preh->preh_len + preh->size; +} + +int adf_mstate_mgr_init_from_remote(struct adf_mstate_mgr *mgr, u8 *buf, u32 size, + adf_mstate_preamble_checker pre_checker, + void *opaque) +{ + struct adf_mstate_preh *pre; + int ret; + + adf_mstate_mgr_init(mgr, buf, size); + pre = (struct adf_mstate_preh *)(mgr->buf); + + pr_debug("QAT: LM - Dump state preambles\n"); + print_hex_dump_debug("", DUMP_PREFIX_OFFSET, 16, 2, pre, pre->preh_len, 0); + + if (pre_checker) + ret = (*pre_checker)(pre, opaque); + else + ret = adf_mstate_preamble_def_checker(pre, mgr); + if (ret) + return ret; + + mgr->state = mgr->buf + pre->preh_len; + mgr->n_sects = pre->n_sects; + + return adf_mstate_sect_validate(mgr); +} + +struct adf_mstate_sect_h *adf_mstate_sect_lookup(struct adf_mstate_mgr *mgr, + const char *id, + adf_mstate_action action, + void *opaque) +{ + struct adf_mstate_sect_h *sect = (struct adf_mstate_sect_h *)mgr->state; + struct adf_mstate_mgr sub_sects_mgr; + int i, ret; + + for (i = 0; i < mgr->n_sects; i++) { + if (!strncmp(sect->id, id, sizeof(sect->id))) + goto found; + + sect = (struct adf_mstate_sect_h *)(sect->state + sect->size); + } + + return NULL; + +found: + adf_mstate_dump_sect(sect, "Found"); + + adf_mstate_mgr_init_from_psect(&sub_sects_mgr, sect); + if (sect->sub_sects && adf_mstate_sect_validate(&sub_sects_mgr)) + return NULL; + + if (!action) + return sect; + + ret = (*action)(&sub_sects_mgr, sect->state, sect->size, opaque); + if (ret) + return NULL; + + return sect; +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.h b/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.h new file mode 100644 index 0000000000000000000000000000000000000000..81d263a596c56f571f6923c98d1a3dc03489e69e --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2024 Intel Corporation */ + +#ifndef ADF_MSTATE_MGR_H +#define ADF_MSTATE_MGR_H + +#define ADF_MSTATE_ID_LEN 8 + +#define ADF_MSTATE_ETRB_IDS "ETRBAR" +#define ADF_MSTATE_MISCB_IDS "MISCBAR" +#define ADF_MSTATE_EXTB_IDS "EXTBAR" +#define ADF_MSTATE_GEN_IDS "GENER" +#define ADF_MSTATE_CONFIG_IDS "CONFIG" +#define ADF_MSTATE_SECTION_NUM 5 + +#define ADF_MSTATE_BANK_IDX_IDS "bnk" + +#define ADF_MSTATE_ETR_REGS_IDS "mregs" +#define ADF_MSTATE_VINTSRC_IDS "visrc" +#define ADF_MSTATE_VINTMSK_IDS "vimsk" +#define ADF_MSTATE_SLA_IDS "sla" +#define ADF_MSTATE_IOV_INIT_IDS "iovinit" +#define ADF_MSTATE_COMPAT_VER_IDS "compver" +#define ADF_MSTATE_GEN_CAP_IDS "gencap" +#define ADF_MSTATE_GEN_SVCMAP_IDS "svcmap" +#define ADF_MSTATE_GEN_EXTDC_IDS "extdc" +#define ADF_MSTATE_VINTSRC_PF2VM_IDS "vispv" +#define ADF_MSTATE_VINTMSK_PF2VM_IDS "vimpv" +#define ADF_MSTATE_VM2PF_IDS "vm2pf" +#define ADF_MSTATE_PF2VM_IDS "pf2vm" + +struct adf_mstate_mgr { + u8 *buf; + u8 *state; + u32 size; + u32 n_sects; +}; + +struct adf_mstate_preh { + u32 magic; + u32 version; + u16 preh_len; + u16 n_sects; + u32 size; +}; + +struct adf_mstate_vreginfo { + void *addr; + u32 size; +}; + +struct adf_mstate_sect_h; + +typedef int (*adf_mstate_preamble_checker)(struct adf_mstate_preh *preamble, void *opa); +typedef int (*adf_mstate_populate)(struct adf_mstate_mgr *sub_mgr, u8 *buf, + u32 size, void *opa); +typedef int (*adf_mstate_action)(struct adf_mstate_mgr *sub_mgr, u8 *buf, u32 size, + void *opa); + +struct adf_mstate_mgr *adf_mstate_mgr_new(u8 *buf, u32 size); +void adf_mstate_mgr_destroy(struct adf_mstate_mgr *mgr); +void adf_mstate_mgr_init(struct adf_mstate_mgr *mgr, u8 *buf, u32 size); +void adf_mstate_mgr_init_from_parent(struct adf_mstate_mgr *mgr, + struct adf_mstate_mgr *p_mgr); +void adf_mstate_mgr_init_from_psect(struct adf_mstate_mgr *mgr, + struct adf_mstate_sect_h *p_sect); +int adf_mstate_mgr_init_from_remote(struct adf_mstate_mgr *mgr, + u8 *buf, u32 size, + adf_mstate_preamble_checker checker, + void *opaque); +struct adf_mstate_preh *adf_mstate_preamble_add(struct adf_mstate_mgr *mgr); +int adf_mstate_preamble_update(struct adf_mstate_mgr *mgr); +u32 adf_mstate_state_size(struct adf_mstate_mgr *mgr); +u32 adf_mstate_state_size_from_remote(struct adf_mstate_mgr *mgr); +void adf_mstate_sect_update(struct adf_mstate_mgr *p_mgr, + struct adf_mstate_mgr *curr_mgr, + struct adf_mstate_sect_h *sect); +struct adf_mstate_sect_h *adf_mstate_sect_add_vreg(struct adf_mstate_mgr *mgr, + const char *id, + struct adf_mstate_vreginfo *info); +struct adf_mstate_sect_h *adf_mstate_sect_add(struct adf_mstate_mgr *mgr, + const char *id, + adf_mstate_populate populate, + void *opaque); +struct adf_mstate_sect_h *adf_mstate_sect_lookup(struct adf_mstate_mgr *mgr, + const char *id, + adf_mstate_action action, + void *opaque); +#endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c index 9ab93fbfefde9408826c44d59eae5c01411179f6..b9b5e744a3f16356d1f8865f0eafba3dd07e7ab9 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c @@ -242,13 +242,7 @@ static int adf_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr, "VersionRequest received from VF%d (vers %d) to PF (vers %d)\n", vf_nr, vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION); - if (vf_compat_ver == 0) - compat = ADF_PF2VF_VF_INCOMPATIBLE; - else if (vf_compat_ver <= ADF_PFVF_COMPAT_THIS_VERSION) - compat = ADF_PF2VF_VF_COMPATIBLE; - else - compat = ADF_PF2VF_VF_COMPAT_UNKNOWN; - + compat = adf_vf_compat_checker(vf_compat_ver); vf_info->vf_compat_ver = vf_compat_ver; resp->type = ADF_PF2VF_MSGTYPE_VERSION_RESP; diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h index 2be048e2287b7b7f80cff346c38f926e044d5d21..1a044297d8733829ac48d867a9fec1e67bdb61e8 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h @@ -28,4 +28,15 @@ u32 adf_pfvf_csr_msg_of(struct adf_accel_dev *accel_dev, struct pfvf_message msg struct pfvf_message adf_pfvf_message_of(struct adf_accel_dev *accel_dev, u32 raw_msg, const struct pfvf_csr_format *fmt); +static inline u8 adf_vf_compat_checker(u8 vf_compat_ver) +{ + if (vf_compat_ver == 0) + return ADF_PF2VF_VF_INCOMPATIBLE; + + if (vf_compat_ver <= ADF_PFVF_COMPAT_THIS_VERSION) + return ADF_PF2VF_VF_COMPATIBLE; + + return ADF_PF2VF_VF_COMPAT_UNKNOWN; +} + #endif /* ADF_PFVF_UTILS_H */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.c b/drivers/crypto/intel/qat/qat_common/adf_rl.c index d4f2db3c53d8c0c09f636b66281ab4d28f87549f..346ef8bee99d9f061d7b67ee752e6c88099a4d4e 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_rl.c +++ b/drivers/crypto/intel/qat/qat_common/adf_rl.c @@ -183,14 +183,14 @@ static enum adf_cfg_service_type srv_to_cfg_svc_type(enum adf_base_services rl_s } /** - * get_sla_arr_of_type() - Returns a pointer to SLA type specific array + * adf_rl_get_sla_arr_of_type() - Returns a pointer to SLA type specific array * @rl_data: pointer to ratelimiting data * @type: SLA type * @sla_arr: pointer to variable where requested pointer will be stored * * Return: Max number of elements allowed for the returned array */ -static u32 get_sla_arr_of_type(struct adf_rl *rl_data, enum rl_node_type type, +u32 adf_rl_get_sla_arr_of_type(struct adf_rl *rl_data, enum rl_node_type type, struct rl_sla ***sla_arr) { switch (type) { @@ -778,7 +778,7 @@ static void clear_sla(struct adf_rl *rl_data, struct rl_sla *sla) rp_in_use[sla->ring_pairs_ids[i]] = false; update_budget(sla, old_cir, true); - get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr); + adf_rl_get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr); assign_node_to_parent(rl_data->accel_dev, sla, true); adf_rl_send_admin_delete_msg(rl_data->accel_dev, node_id, sla->type); mark_rps_usage(sla, rl_data->rp_in_use, false); @@ -875,7 +875,7 @@ static int add_update_sla(struct adf_accel_dev *accel_dev, if (!is_update) { mark_rps_usage(sla, rl_data->rp_in_use, true); - get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr); + adf_rl_get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr); sla_type_arr[sla->node_id] = sla; rl_data->sla[sla->sla_id] = sla; } @@ -1065,7 +1065,7 @@ void adf_rl_remove_sla_all(struct adf_accel_dev *accel_dev, bool incl_default) /* Unregister and remove all SLAs */ for (j = RL_LEAF; j >= end_type; j--) { - max_id = get_sla_arr_of_type(rl_data, j, &sla_type_arr); + max_id = adf_rl_get_sla_arr_of_type(rl_data, j, &sla_type_arr); for (i = 0; i < max_id; i++) { if (!sla_type_arr[i]) @@ -1125,7 +1125,7 @@ int adf_rl_start(struct adf_accel_dev *accel_dev) } if ((fw_caps & RL_CAPABILITY_MASK) != RL_CAPABILITY_VALUE) { - dev_info(&GET_DEV(accel_dev), "not supported\n"); + dev_info(&GET_DEV(accel_dev), "feature not supported by FW\n"); ret = -EOPNOTSUPP; goto ret_free; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.h b/drivers/crypto/intel/qat/qat_common/adf_rl.h index 269c6656fb90eac27152892306adf4c211a6fbc4..bfe750ea0e83905f9cb7be9d98b44e70101b433e 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_rl.h +++ b/drivers/crypto/intel/qat/qat_common/adf_rl.h @@ -151,6 +151,8 @@ struct rl_sla { u16 ring_pairs_cnt; }; +u32 adf_rl_get_sla_arr_of_type(struct adf_rl *rl_data, enum rl_node_type type, + struct rl_sla ***sla_arr); int adf_rl_add_sla(struct adf_accel_dev *accel_dev, struct adf_rl_sla_input_data *sla_in); int adf_rl_update_sla(struct adf_accel_dev *accel_dev, diff --git a/drivers/crypto/intel/qat/qat_common/adf_sriov.c b/drivers/crypto/intel/qat/qat_common/adf_sriov.c index 87a70c00c41ee61c4e3dde0708fe4b3bcb4c8e16..8d645e7e04aa5534206875a7da8fe9e7f9f3b079 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sriov.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sriov.c @@ -26,10 +26,12 @@ static void adf_iov_send_resp(struct work_struct *work) u32 vf_nr = vf_info->vf_nr; bool ret; + mutex_lock(&vf_info->pfvf_mig_lock); ret = adf_recv_and_handle_vf2pf_msg(accel_dev, vf_nr); if (ret) /* re-enable interrupt on PF from this VF */ adf_enable_vf2pf_interrupts(accel_dev, 1 << vf_nr); + mutex_unlock(&vf_info->pfvf_mig_lock); kfree(pf2vf_resp); } @@ -62,6 +64,7 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev) vf_info->vf_nr = i; mutex_init(&vf_info->pf2vf_lock); + mutex_init(&vf_info->pfvf_mig_lock); ratelimit_state_init(&vf_info->vf2pf_ratelimit, ADF_VF2PF_RATELIMIT_INTERVAL, ADF_VF2PF_RATELIMIT_BURST); @@ -138,8 +141,10 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev) if (hw_data->configure_iov_threads) hw_data->configure_iov_threads(accel_dev, false); - for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) + for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) { mutex_destroy(&vf->pf2vf_lock); + mutex_destroy(&vf->pfvf_mig_lock); + } if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) { kfree(accel_dev->pf.vf_info); diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.c b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c index 2ff714d11bd2f6aa6a634b6d656ae03d83fe4a69..74fb0c2ed2412af543c2803d1b6ade84ffc95486 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_telemetry.c +++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c @@ -41,6 +41,20 @@ static int validate_tl_data(struct adf_tl_hw_data *tl_data) return 0; } +static int validate_tl_slice_counters(struct icp_qat_fw_init_admin_slice_cnt *slice_count, + u8 max_slices_per_type) +{ + u8 *sl_counter = (u8 *)slice_count; + int i; + + for (i = 0; i < ADF_TL_SL_CNT_COUNT; i++) { + if (sl_counter[i] > max_slices_per_type) + return -EINVAL; + } + + return 0; +} + static int adf_tl_alloc_mem(struct adf_accel_dev *accel_dev) { struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); @@ -214,6 +228,13 @@ int adf_tl_run(struct adf_accel_dev *accel_dev, int state) return ret; } + ret = validate_tl_slice_counters(&telemetry->slice_cnt, tl_data->max_sl_cnt); + if (ret) { + dev_err(dev, "invalid value returned by FW\n"); + adf_send_admin_tl_stop(accel_dev); + return ret; + } + telemetry->hbuffs = state; atomic_set(&telemetry->state, state); diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.h b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h index 9be81cd3b886064ca24cd946346e0f344e1f4251..e54a406cc1b4aec9313fca253b1685f967880813 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_telemetry.h +++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h @@ -40,6 +40,7 @@ struct adf_tl_hw_data { u8 num_dev_counters; u8 num_rp_counters; u8 max_rp; + u8 max_sl_cnt; }; struct adf_telemetry { diff --git a/drivers/crypto/intel/qat/qat_common/adf_transport.c b/drivers/crypto/intel/qat/qat_common/adf_transport.c index 630d0483c4e0a1a4134f9f73f0def8091e9e4a55..1efdf46490f147d463fcc5ef9ee14127fb1028ff 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_transport.c +++ b/drivers/crypto/intel/qat/qat_common/adf_transport.c @@ -474,7 +474,6 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev, int adf_init_etr_data(struct adf_accel_dev *accel_dev) { struct adf_etr_data *etr_data; - struct adf_hw_device_data *hw_data = accel_dev->hw_device; void __iomem *csr_addr; u32 size; u32 num_banks = 0; @@ -495,8 +494,7 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev) } accel_dev->transport = etr_data; - i = hw_data->get_etr_bar_id(hw_data); - csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr; + csr_addr = adf_get_etr_base(accel_dev); /* accel_dev->debugfs_dir should always be non-NULL here */ etr_data->debug = debugfs_create_dir("transport", diff --git a/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c b/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c index 4128200a903293ba66e5fe9c337407fddf93ebbe..85c682e248fb918a11642ef5b858e5e00eb4cce7 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c +++ b/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c @@ -110,6 +110,8 @@ struct qat_dh_ctx { unsigned int p_size; bool g2; struct qat_crypto_instance *inst; + struct crypto_kpp *ftfm; + bool fallback; } __packed __aligned(64); struct qat_asym_request { @@ -381,6 +383,36 @@ static int qat_dh_compute_value(struct kpp_request *req) return ret; } +static int qat_dh_generate_public_key(struct kpp_request *req) +{ + struct kpp_request *nreq = kpp_request_ctx(req); + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); + + if (ctx->fallback) { + memcpy(nreq, req, sizeof(*req)); + kpp_request_set_tfm(nreq, ctx->ftfm); + return crypto_kpp_generate_public_key(nreq); + } + + return qat_dh_compute_value(req); +} + +static int qat_dh_compute_shared_secret(struct kpp_request *req) +{ + struct kpp_request *nreq = kpp_request_ctx(req); + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); + + if (ctx->fallback) { + memcpy(nreq, req, sizeof(*req)); + kpp_request_set_tfm(nreq, ctx->ftfm); + return crypto_kpp_compute_shared_secret(nreq); + } + + return qat_dh_compute_value(req); +} + static int qat_dh_check_params_length(unsigned int p_len) { switch (p_len) { @@ -398,9 +430,6 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params) struct qat_crypto_instance *inst = ctx->inst; struct device *dev = &GET_DEV(inst->accel_dev); - if (qat_dh_check_params_length(params->p_size << 3)) - return -EINVAL; - ctx->p_size = params->p_size; ctx->p = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL); if (!ctx->p) @@ -454,6 +483,13 @@ static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf, if (crypto_dh_decode_key(buf, len, ¶ms) < 0) return -EINVAL; + if (qat_dh_check_params_length(params.p_size << 3)) { + ctx->fallback = true; + return crypto_kpp_set_secret(ctx->ftfm, buf, len); + } + + ctx->fallback = false; + /* Free old secret if any */ qat_dh_clear_ctx(dev, ctx); @@ -481,6 +517,9 @@ static unsigned int qat_dh_max_size(struct crypto_kpp *tfm) { struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); + if (ctx->fallback) + return crypto_kpp_maxsize(ctx->ftfm); + return ctx->p_size; } @@ -489,11 +528,22 @@ static int qat_dh_init_tfm(struct crypto_kpp *tfm) struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); struct qat_crypto_instance *inst = qat_crypto_get_instance_node(numa_node_id()); + const char *alg = kpp_alg_name(tfm); + unsigned int reqsize; if (!inst) return -EINVAL; - kpp_set_reqsize(tfm, sizeof(struct qat_asym_request) + 64); + ctx->ftfm = crypto_alloc_kpp(alg, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->ftfm)) + return PTR_ERR(ctx->ftfm); + + crypto_kpp_set_flags(ctx->ftfm, crypto_kpp_get_flags(tfm)); + + reqsize = max(sizeof(struct qat_asym_request) + 64, + sizeof(struct kpp_request) + crypto_kpp_reqsize(ctx->ftfm)); + + kpp_set_reqsize(tfm, reqsize); ctx->p_size = 0; ctx->g2 = false; @@ -506,6 +556,9 @@ static void qat_dh_exit_tfm(struct crypto_kpp *tfm) struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); struct device *dev = &GET_DEV(ctx->inst->accel_dev); + if (ctx->ftfm) + crypto_free_kpp(ctx->ftfm); + qat_dh_clear_ctx(dev, ctx); qat_crypto_put_instance(ctx->inst); } @@ -1265,8 +1318,8 @@ static struct akcipher_alg rsa = { static struct kpp_alg dh = { .set_secret = qat_dh_set_secret, - .generate_public_key = qat_dh_compute_value, - .compute_shared_secret = qat_dh_compute_value, + .generate_public_key = qat_dh_generate_public_key, + .compute_shared_secret = qat_dh_compute_shared_secret, .max_size = qat_dh_max_size, .init = qat_dh_init_tfm, .exit = qat_dh_exit_tfm, @@ -1276,6 +1329,7 @@ static struct kpp_alg dh = { .cra_priority = 1000, .cra_module = THIS_MODULE, .cra_ctxsize = sizeof(struct qat_dh_ctx), + .cra_flags = CRYPTO_ALG_NEED_FALLBACK, }, }; diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.c b/drivers/crypto/intel/qat/qat_common/qat_bl.c index 76baed0a76c0ee9386e9c14b60315026be6b532a..338acf29c487b6784abd04379df282d17309614c 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_bl.c +++ b/drivers/crypto/intel/qat/qat_common/qat_bl.c @@ -81,7 +81,8 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev, if (unlikely(!bufl)) return -ENOMEM; } else { - bufl = &buf->sgl_src.sgl_hdr; + bufl = container_of(&buf->sgl_src.sgl_hdr, + struct qat_alg_buf_list, hdr); memset(bufl, 0, sizeof(struct qat_alg_buf_list)); buf->sgl_src_valid = true; } @@ -139,7 +140,8 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev, if (unlikely(!buflout)) goto err_in; } else { - buflout = &buf->sgl_dst.sgl_hdr; + buflout = container_of(&buf->sgl_dst.sgl_hdr, + struct qat_alg_buf_list, hdr); memset(buflout, 0, sizeof(struct qat_alg_buf_list)); buf->sgl_dst_valid = true; } diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.h b/drivers/crypto/intel/qat/qat_common/qat_bl.h index d87e4f35ac395c768dd4f57ecf25a85f862c9352..85bc32a9ec0eb32e32ef85f282fcefb53c9f734f 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_bl.h +++ b/drivers/crypto/intel/qat/qat_common/qat_bl.h @@ -15,14 +15,17 @@ struct qat_alg_buf { } __packed; struct qat_alg_buf_list { - u64 resrvd; - u32 num_bufs; - u32 num_mapped_bufs; + /* New members must be added within the __struct_group() macro below. */ + __struct_group(qat_alg_buf_list_hdr, hdr, __packed, + u64 resrvd; + u32 num_bufs; + u32 num_mapped_bufs; + ); struct qat_alg_buf buffers[]; } __packed; struct qat_alg_fixed_buf_list { - struct qat_alg_buf_list sgl_hdr; + struct qat_alg_buf_list_hdr sgl_hdr; struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC]; } __packed __aligned(64); diff --git a/drivers/crypto/intel/qat/qat_common/qat_mig_dev.c b/drivers/crypto/intel/qat/qat_common/qat_mig_dev.c new file mode 100644 index 0000000000000000000000000000000000000000..892c2283a50e5ce1841258f34ba511de668b49a3 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/qat_mig_dev.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2024 Intel Corporation */ +#include +#include +#include +#include +#include +#include "adf_accel_devices.h" +#include "adf_common_drv.h" + +struct qat_mig_dev *qat_vfmig_create(struct pci_dev *pdev, int vf_id) +{ + struct adf_accel_dev *accel_dev; + struct qat_migdev_ops *ops; + struct qat_mig_dev *mdev; + + accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + if (!accel_dev) + return ERR_PTR(-ENODEV); + + ops = GET_VFMIG_OPS(accel_dev); + if (!ops || !ops->init || !ops->cleanup || !ops->reset || !ops->open || + !ops->close || !ops->suspend || !ops->resume || !ops->save_state || + !ops->load_state || !ops->save_setup || !ops->load_setup) + return ERR_PTR(-EINVAL); + + mdev = kmalloc(sizeof(*mdev), GFP_KERNEL); + if (!mdev) + return ERR_PTR(-ENOMEM); + + mdev->vf_id = vf_id; + mdev->parent_accel_dev = accel_dev; + + return mdev; +} +EXPORT_SYMBOL_GPL(qat_vfmig_create); + +int qat_vfmig_init(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->init(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_init); + +void qat_vfmig_cleanup(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->cleanup(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_cleanup); + +void qat_vfmig_reset(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->reset(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_reset); + +int qat_vfmig_open(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->open(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_open); + +void qat_vfmig_close(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + GET_VFMIG_OPS(accel_dev)->close(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_close); + +int qat_vfmig_suspend(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->suspend(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_suspend); + +int qat_vfmig_resume(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->resume(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_resume); + +int qat_vfmig_save_state(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->save_state(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_save_state); + +int qat_vfmig_save_setup(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->save_setup(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_save_setup); + +int qat_vfmig_load_state(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->load_state(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_load_state); + +int qat_vfmig_load_setup(struct qat_mig_dev *mdev, int size) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->load_setup(mdev, size); +} +EXPORT_SYMBOL_GPL(qat_vfmig_load_setup); + +void qat_vfmig_destroy(struct qat_mig_dev *mdev) +{ + kfree(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_destroy); diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c index af14090cc4be311a3d7fbe5ad6fb9b1074687e06..6e24d57e6b98e6248b1cef6421909661cdb09616 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c +++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include "adf_dh895xcc_hw_data.h" diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c index 70e56cc16ecebb761f50961b64d47cbef0b8241e..f4ee4c2e00da82535e2fdd4aca14ac5f85f74a06 100644 --- a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c +++ b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index d57c5adf932e3676ee9b790c582fd2f4ca8118fe..b49f20ab20d1029848f4d8532bb06db4e10f5473 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -7,6 +7,10 @@ config IOMMU_IOVA config IOMMU_API bool +config IOMMUFD_DRIVER + bool + default n + menuconfig IOMMU_SUPPORT bool "IOMMU Hardware Support" depends on MMU diff --git a/drivers/iommu/amd/Kconfig b/drivers/iommu/amd/Kconfig index 9b5fc3356bf2d8ac1ca9e3a8398200ae8ccd2940..8bd4c3b183ec6e475b58a1990d7b5c33ab141120 100644 --- a/drivers/iommu/amd/Kconfig +++ b/drivers/iommu/amd/Kconfig @@ -10,6 +10,7 @@ config AMD_IOMMU select IOMMU_API select IOMMU_IOVA select IOMMU_IO_PGTABLE + select IOMMUFD_DRIVER if IOMMUFD depends on X86_64 && PCI && ACPI && HAVE_CMPXCHG_DOUBLE help With this option you can enable support for AMD IOMMU hardware in diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h index 7dc30c2b56b302d8bd3cd129f410610d536059e6..dec4e5c2b66b8236fcd6faeb8497fdc9b42dfe20 100644 --- a/drivers/iommu/amd/amd_iommu_types.h +++ b/drivers/iommu/amd/amd_iommu_types.h @@ -97,7 +97,9 @@ #define FEATURE_GATS_MASK (3ULL) #define FEATURE_GAM_VAPIC BIT_ULL(21) #define FEATURE_GIOSUP BIT_ULL(48) +#define FEATURE_HASUP BIT_ULL(49) #define FEATURE_EPHSUP BIT_ULL(50) +#define FEATURE_HDSUP BIT_ULL(52) #define FEATURE_SNP BIT_ULL(63) #define FEATURE_PASID_SHIFT 32 @@ -212,6 +214,7 @@ /* macros and definitions for device table entries */ #define DEV_ENTRY_VALID 0x00 #define DEV_ENTRY_TRANSLATION 0x01 +#define DEV_ENTRY_HAD 0x07 #define DEV_ENTRY_PPR 0x34 #define DEV_ENTRY_IR 0x3d #define DEV_ENTRY_IW 0x3e @@ -370,10 +373,16 @@ #define PTE_LEVEL_PAGE_SIZE(level) \ (1ULL << (12 + (9 * (level)))) +/* + * The IOPTE dirty bit + */ +#define IOMMU_PTE_HD_BIT (6) + /* * Bit value definition for I/O PTE fields */ #define IOMMU_PTE_PR BIT_ULL(0) +#define IOMMU_PTE_HD BIT_ULL(IOMMU_PTE_HD_BIT) #define IOMMU_PTE_U BIT_ULL(59) #define IOMMU_PTE_FC BIT_ULL(60) #define IOMMU_PTE_IR BIT_ULL(61) @@ -384,6 +393,7 @@ */ #define DTE_FLAG_V BIT_ULL(0) #define DTE_FLAG_TV BIT_ULL(1) +#define DTE_FLAG_HAD (3ULL << 7) #define DTE_FLAG_GIOV BIT_ULL(54) #define DTE_FLAG_GV BIT_ULL(55) #define DTE_GLX_SHIFT (56) @@ -413,6 +423,7 @@ #define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) #define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_PR) +#define IOMMU_PTE_DIRTY(pte) ((pte) & IOMMU_PTE_HD) #define IOMMU_PTE_PAGE(pte) (iommu_phys_to_virt((pte) & IOMMU_PAGE_MASK)) #define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07) @@ -563,6 +574,7 @@ struct protection_domain { int nid; /* Node ID */ u64 *gcr3_tbl; /* Guest CR3 table */ unsigned long flags; /* flags to find out type of domain */ + bool dirty_tracking; /* dirty tracking is enabled in the domain */ unsigned dev_cnt; /* devices assigned to this domain */ unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */ }; diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c index 2892aa1b4dc1db1771b9ebe5d418a14da9e5f456..6c0621f6f572a4c4c0fb72ea1bdb5abe9d504311 100644 --- a/drivers/iommu/amd/io_pgtable.c +++ b/drivers/iommu/amd/io_pgtable.c @@ -486,6 +486,73 @@ static phys_addr_t iommu_v1_iova_to_phys(struct io_pgtable_ops *ops, unsigned lo return (__pte & ~offset_mask) | (iova & offset_mask); } +static bool pte_test_and_clear_dirty(u64 *ptep, unsigned long size, + unsigned long flags) +{ + bool test_only = flags & IOMMU_DIRTY_NO_CLEAR; + bool dirty = false; + int i, count; + + /* + * 2.2.3.2 Host Dirty Support + * When a non-default page size is used , software must OR the + * Dirty bits in all of the replicated host PTEs used to map + * the page. The IOMMU does not guarantee the Dirty bits are + * set in all of the replicated PTEs. Any portion of the page + * may have been written even if the Dirty bit is set in only + * one of the replicated PTEs. + */ + count = PAGE_SIZE_PTE_COUNT(size); + for (i = 0; i < count && test_only; i++) { + if (test_bit(IOMMU_PTE_HD_BIT, (unsigned long *)&ptep[i])) { + dirty = true; + break; + } + } + + for (i = 0; i < count && !test_only; i++) { + if (test_and_clear_bit(IOMMU_PTE_HD_BIT, + (unsigned long *)&ptep[i])) { + dirty = true; + } + } + + return dirty; +} + +static int iommu_v1_read_and_clear_dirty(struct io_pgtable_ops *ops, + unsigned long iova, size_t size, + unsigned long flags, + struct iommu_dirty_bitmap *dirty) +{ + struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops); + unsigned long end = iova + size - 1; + + do { + unsigned long pgsize = 0; + u64 *ptep, pte; + + ptep = fetch_pte(pgtable, iova, &pgsize); + if (ptep) + pte = READ_ONCE(*ptep); + if (!ptep || !IOMMU_PTE_PRESENT(pte)) { + pgsize = pgsize ?: PTE_LEVEL_PAGE_SIZE(0); + iova += pgsize; + continue; + } + + /* + * Mark the whole IOVA range as dirty even if only one of + * the replicated PTEs were marked dirty. + */ + if (pte_test_and_clear_dirty(ptep, pgsize, flags)) + iommu_dirty_bitmap_record(dirty, iova, pgsize); + iova += pgsize; + } while (iova < end); + + return 0; +} + /* * ---------------------------------------------------- */ @@ -527,6 +594,7 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo pgtable->iop.ops.map_pages = iommu_v1_map_pages; pgtable->iop.ops.unmap_pages = iommu_v1_unmap_pages; pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys; + pgtable->iop.ops.read_and_clear_dirty = iommu_v1_read_and_clear_dirty; return &pgtable->iop; } diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 95bd7c25ba6f366b5db2582e8cb5318491cbb523..caad10f9cee3f903d38a30df988bd7fa78551655 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -37,6 +37,7 @@ #include #include #include +#include #include "amd_iommu.h" #include "../dma-iommu.h" @@ -65,6 +66,7 @@ LIST_HEAD(hpet_map); LIST_HEAD(acpihid_map); const struct iommu_ops amd_iommu_ops; +const struct iommu_dirty_ops amd_dirty_ops; static ATOMIC_NOTIFIER_HEAD(ppr_notifier); int amd_iommu_max_glx_val = -1; @@ -1610,6 +1612,9 @@ static void set_dte_entry(struct amd_iommu *iommu, u16 devid, pte_root |= 1ULL << DEV_ENTRY_PPR; } + if (domain->dirty_tracking) + pte_root |= DTE_FLAG_HAD; + if (domain->flags & PD_IOMMUV2_MASK) { u64 gcr3 = iommu_virt_to_phys(domain->gcr3_tbl); u64 glx = domain->glx; @@ -2155,28 +2160,76 @@ static inline u64 dma_max_address(void) return ((1ULL << PM_LEVEL_SHIFT(amd_iommu_gpt_level)) - 1); } -static struct iommu_domain *amd_iommu_domain_alloc(unsigned type) +static bool amd_iommu_hd_support(struct amd_iommu *iommu) +{ + return iommu && (iommu->features & FEATURE_HDSUP); +} + +static struct iommu_domain *do_iommu_domain_alloc(unsigned int type, + struct device *dev, u32 flags) { + bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING; struct protection_domain *domain; + struct amd_iommu *iommu = NULL; + + if (dev) { + iommu = rlookup_amd_iommu(dev); + if (!iommu) + return ERR_PTR(-ENODEV); + } /* * Since DTE[Mode]=0 is prohibited on SNP-enabled system, * default to use IOMMU_DOMAIN_DMA[_FQ]. */ if (amd_iommu_snp_en && (type == IOMMU_DOMAIN_IDENTITY)) - return NULL; + return ERR_PTR(-EINVAL); + + if (dirty_tracking && !amd_iommu_hd_support(iommu)) + return ERR_PTR(-EOPNOTSUPP); domain = protection_domain_alloc(type); if (!domain) - return NULL; + return ERR_PTR(-ENOMEM); domain->domain.geometry.aperture_start = 0; domain->domain.geometry.aperture_end = dma_max_address(); domain->domain.geometry.force_aperture = true; + if (iommu) { + domain->domain.type = type; + domain->domain.pgsize_bitmap = iommu->iommu.ops->pgsize_bitmap; + domain->domain.ops = iommu->iommu.ops->default_domain_ops; + + if (dirty_tracking) + domain->domain.dirty_ops = &amd_dirty_ops; + } + return &domain->domain; } +static struct iommu_domain *amd_iommu_domain_alloc(unsigned int type) +{ + struct iommu_domain *domain; + + domain = do_iommu_domain_alloc(type, NULL, 0); + if (IS_ERR(domain)) + return NULL; + + return domain; +} + +static struct iommu_domain *amd_iommu_domain_alloc_user(struct device *dev, + u32 flags) +{ + unsigned int type = IOMMU_DOMAIN_UNMANAGED; + + if (flags & ~IOMMU_HWPT_ALLOC_DIRTY_TRACKING) + return ERR_PTR(-EOPNOTSUPP); + + return do_iommu_domain_alloc(type, dev, flags); +} + static void amd_iommu_domain_free(struct iommu_domain *dom) { struct protection_domain *domain; @@ -2214,6 +2267,13 @@ static int amd_iommu_attach_device(struct iommu_domain *dom, dev_data->defer_attach = false; + /* + * Restrict to devices with compatible IOMMU hardware support + * when enforcement of dirty tracking is enabled. + */ + if (dom->dirty_ops && !amd_iommu_hd_support(iommu)) + return -EINVAL; + if (dev_data->domain) detach_device(dev); @@ -2332,6 +2392,11 @@ static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap) return true; case IOMMU_CAP_DEFERRED_FLUSH: return true; + case IOMMU_CAP_DIRTY_TRACKING: { + struct amd_iommu *iommu = rlookup_amd_iommu(dev); + + return amd_iommu_hd_support(iommu); + } default: break; } @@ -2339,6 +2404,73 @@ static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap) return false; } +static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain, + bool enable) +{ + struct protection_domain *pdomain = to_pdomain(domain); + struct dev_table_entry *dev_table; + struct iommu_dev_data *dev_data; + bool domain_flush = false; + struct amd_iommu *iommu; + unsigned long flags; + u64 pte_root; + + spin_lock_irqsave(&pdomain->lock, flags); + if (!(pdomain->dirty_tracking ^ enable)) { + spin_unlock_irqrestore(&pdomain->lock, flags); + return 0; + } + + list_for_each_entry(dev_data, &pdomain->dev_list, list) { + iommu = rlookup_amd_iommu(dev_data->dev); + if (!iommu) + continue; + + dev_table = get_dev_table(iommu); + pte_root = dev_table[dev_data->devid].data[0]; + + pte_root = (enable ? pte_root | DTE_FLAG_HAD : + pte_root & ~DTE_FLAG_HAD); + + /* Flush device DTE */ + dev_table[dev_data->devid].data[0] = pte_root; + device_flush_dte(dev_data); + domain_flush = true; + } + + /* Flush IOTLB to mark IOPTE dirty on the next translation(s) */ + if (domain_flush) { + amd_iommu_domain_flush_tlb_pde(pdomain); + amd_iommu_domain_flush_complete(pdomain); + } + pdomain->dirty_tracking = enable; + spin_unlock_irqrestore(&pdomain->lock, flags); + + return 0; +} + +static int amd_iommu_read_and_clear_dirty(struct iommu_domain *domain, + unsigned long iova, size_t size, + unsigned long flags, + struct iommu_dirty_bitmap *dirty) +{ + struct protection_domain *pdomain = to_pdomain(domain); + struct io_pgtable_ops *ops = &pdomain->iop.iop.ops; + unsigned long lflags; + + if (!ops || !ops->read_and_clear_dirty) + return -EOPNOTSUPP; + + spin_lock_irqsave(&pdomain->lock, lflags); + if (!pdomain->dirty_tracking && dirty->bitmap) { + spin_unlock_irqrestore(&pdomain->lock, lflags); + return -EINVAL; + } + spin_unlock_irqrestore(&pdomain->lock, lflags); + + return ops->read_and_clear_dirty(ops, iova, size, flags, dirty); +} + static void amd_iommu_get_resv_regions(struct device *dev, struct list_head *head) { @@ -2461,9 +2593,15 @@ static bool amd_iommu_enforce_cache_coherency(struct iommu_domain *domain) return true; } +const struct iommu_dirty_ops amd_dirty_ops = { + .set_dirty_tracking = amd_iommu_set_dirty_tracking, + .read_and_clear_dirty = amd_iommu_read_and_clear_dirty, +}; + const struct iommu_ops amd_iommu_ops = { .capable = amd_iommu_capable, .domain_alloc = amd_iommu_domain_alloc, + .domain_alloc_user = amd_iommu_domain_alloc_user, .probe_device = amd_iommu_probe_device, .release_device = amd_iommu_release_device, .probe_finalize = amd_iommu_probe_finalize, diff --git a/drivers/iommu/intel/Kconfig b/drivers/iommu/intel/Kconfig index 2e56bd79f589d30c2787e695b5c93750fcb480e0..f5348b80652b65bdc043c2a01168789c65a2e626 100644 --- a/drivers/iommu/intel/Kconfig +++ b/drivers/iommu/intel/Kconfig @@ -15,6 +15,7 @@ config INTEL_IOMMU select DMA_OPS select IOMMU_API select IOMMU_IOVA + select IOMMUFD_DRIVER if IOMMUFD select NEED_DMA_MAP_STATE select DMAR_TABLE select SWIOTLB diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 744e4e6b8d72d350c75ea72355eddd446c0d59d4..4f296b8a022a955517ebb158533a65cc69c7cc11 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -300,6 +300,7 @@ static int iommu_skip_te_disable; #define IDENTMAP_AZALIA 4 const struct iommu_ops intel_iommu_ops; +static const struct iommu_dirty_ops intel_dirty_ops; static bool translation_pre_enabled(struct intel_iommu *iommu) { @@ -4061,6 +4062,48 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) return NULL; } +static struct iommu_domain * +intel_iommu_domain_alloc_user(struct device *dev, u32 flags) +{ + struct iommu_domain *domain; + struct intel_iommu *iommu; + bool dirty_tracking; + + if (flags & + (~(IOMMU_HWPT_ALLOC_NEST_PARENT | IOMMU_HWPT_ALLOC_DIRTY_TRACKING))) + return ERR_PTR(-EOPNOTSUPP); + + iommu = device_to_iommu(dev, NULL, NULL); + if (!iommu) + return ERR_PTR(-ENODEV); + + if ((flags & IOMMU_HWPT_ALLOC_NEST_PARENT) && !nested_supported(iommu)) + return ERR_PTR(-EOPNOTSUPP); + + dirty_tracking = (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING); + if (dirty_tracking && !ssads_supported(iommu)) + return ERR_PTR(-EOPNOTSUPP); + + /* + * domain_alloc_user op needs to fully initialize a domain + * before return, so uses iommu_domain_alloc() here for + * simple. + */ + domain = iommu_domain_alloc(dev->bus); + if (!domain) + domain = ERR_PTR(-ENOMEM); + + if (!IS_ERR(domain) && dirty_tracking) { + if (to_dmar_domain(domain)->use_first_level) { + iommu_domain_free(domain); + return ERR_PTR(-EOPNOTSUPP); + } + domain->dirty_ops = &intel_dirty_ops; + } + + return domain; +} + static void intel_iommu_domain_free(struct iommu_domain *domain) { if (domain != &si_domain->domain && domain != &blocking_domain) @@ -4081,6 +4124,9 @@ static int prepare_domain_attach_device(struct iommu_domain *domain, if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap)) return -EINVAL; + if (domain->dirty_ops && !ssads_supported(iommu)) + return -EINVAL; + /* check if this iommu agaw is sufficient for max mapped address */ addr_width = agaw_to_width(iommu->agaw); if (addr_width > cap_mgaw(iommu->cap)) @@ -4336,6 +4382,8 @@ static bool intel_iommu_capable(struct device *dev, enum iommu_cap cap) return dmar_platform_optin(); case IOMMU_CAP_ENFORCE_CACHE_COHERENCY: return ecap_sc_support(info->iommu->ecap); + case IOMMU_CAP_DIRTY_TRACKING: + return ssads_supported(info->iommu); default: return false; } @@ -4733,6 +4781,9 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain, if (!pasid_supported(iommu) || dev_is_real_dma_subdevice(dev)) return -EOPNOTSUPP; + if (domain->dirty_ops) + return -EINVAL; + if (context_copied(iommu, info->bus, info->devfn)) return -EBUSY; @@ -4791,10 +4842,88 @@ static void *intel_iommu_hw_info(struct device *dev, u32 *length, u32 *type) return vtd; } +static int intel_iommu_set_dirty_tracking(struct iommu_domain *domain, + bool enable) +{ + struct dmar_domain *dmar_domain = to_dmar_domain(domain); + struct device_domain_info *info; + int ret; + + spin_lock(&dmar_domain->lock); + if (dmar_domain->dirty_tracking == enable) + goto out_unlock; + + list_for_each_entry(info, &dmar_domain->devices, link) { + ret = intel_pasid_setup_dirty_tracking(info->iommu, + info->domain, info->dev, + IOMMU_NO_PASID, enable); + if (ret) + goto err_unwind; + } + + dmar_domain->dirty_tracking = enable; +out_unlock: + spin_unlock(&dmar_domain->lock); + + return 0; + +err_unwind: + list_for_each_entry(info, &dmar_domain->devices, link) + intel_pasid_setup_dirty_tracking(info->iommu, dmar_domain, + info->dev, IOMMU_NO_PASID, + dmar_domain->dirty_tracking); + spin_unlock(&dmar_domain->lock); + return ret; +} + +static int intel_iommu_read_and_clear_dirty(struct iommu_domain *domain, + unsigned long iova, size_t size, + unsigned long flags, + struct iommu_dirty_bitmap *dirty) +{ + struct dmar_domain *dmar_domain = to_dmar_domain(domain); + unsigned long end = iova + size - 1; + unsigned long pgsize; + + /* + * IOMMUFD core calls into a dirty tracking disabled domain without an + * IOVA bitmap set in order to clean dirty bits in all PTEs that might + * have occurred when we stopped dirty tracking. This ensures that we + * never inherit dirtied bits from a previous cycle. + */ + if (!dmar_domain->dirty_tracking && dirty->bitmap) + return -EINVAL; + + do { + struct dma_pte *pte; + int lvl = 0; + + pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &lvl, + GFP_ATOMIC); + pgsize = level_size(lvl) << VTD_PAGE_SHIFT; + if (!pte || !dma_pte_present(pte)) { + iova += pgsize; + continue; + } + + if (dma_sl_pte_test_and_clear_dirty(pte, flags)) + iommu_dirty_bitmap_record(dirty, iova, pgsize); + iova += pgsize; + } while (iova < end); + + return 0; +} + +static const struct iommu_dirty_ops intel_dirty_ops = { + .set_dirty_tracking = intel_iommu_set_dirty_tracking, + .read_and_clear_dirty = intel_iommu_read_and_clear_dirty, +}; + const struct iommu_ops intel_iommu_ops = { .capable = intel_iommu_capable, .hw_info = intel_iommu_hw_info, .domain_alloc = intel_iommu_domain_alloc, + .domain_alloc_user = intel_iommu_domain_alloc_user, .probe_device = intel_iommu_probe_device, .probe_finalize = intel_iommu_probe_finalize, .release_device = intel_iommu_release_device, diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h index e6a3e70656166a081b0184f9f8c281d86d8f27b1..49ea164cb006f54a480c7204f13c05b3f48a7882 100644 --- a/drivers/iommu/intel/iommu.h +++ b/drivers/iommu/intel/iommu.h @@ -48,6 +48,9 @@ #define DMA_FL_PTE_DIRTY BIT_ULL(6) #define DMA_FL_PTE_XD BIT_ULL(63) +#define DMA_SL_PTE_DIRTY_BIT 9 +#define DMA_SL_PTE_DIRTY BIT_ULL(DMA_SL_PTE_DIRTY_BIT) + #define ADDR_WIDTH_5LEVEL (57) #define ADDR_WIDTH_4LEVEL (48) @@ -539,6 +542,10 @@ enum { #define sm_supported(iommu) (intel_iommu_sm && ecap_smts((iommu)->ecap)) #define pasid_supported(iommu) (sm_supported(iommu) && \ ecap_pasid((iommu)->ecap)) +#define ssads_supported(iommu) (sm_supported(iommu) && \ + ecap_slads((iommu)->ecap)) +#define nested_supported(iommu) (sm_supported(iommu) && \ + ecap_nest((iommu)->ecap)) struct pasid_entry; struct pasid_state_entry; @@ -595,6 +602,7 @@ struct dmar_domain { u8 has_mappings:1; /* Has mappings configured through * iommu_map() interface. */ + u8 dirty_tracking:1; /* Dirty tracking is enabled */ spinlock_t lock; /* Protect device tracking lists */ struct list_head devices; /* all devices' list */ @@ -784,6 +792,16 @@ static inline bool dma_pte_present(struct dma_pte *pte) return (pte->val & 3) != 0; } +static inline bool dma_sl_pte_test_and_clear_dirty(struct dma_pte *pte, + unsigned long flags) +{ + if (flags & IOMMU_DIRTY_NO_CLEAR) + return (pte->val & DMA_SL_PTE_DIRTY) != 0; + + return test_and_clear_bit(DMA_SL_PTE_DIRTY_BIT, + (unsigned long *)&pte->val); +} + static inline bool dma_pte_superpage(struct dma_pte *pte) { return (pte->val & DMA_PTE_LARGE_PAGE); diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index 8faa93cffac45d179d201c6bba776572d9b8dee6..06ea2dd5354215a28f7a3bb2eabbf0cc08bc7ace 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -277,6 +277,11 @@ static inline void pasid_set_bits(u64 *ptr, u64 mask, u64 bits) WRITE_ONCE(*ptr, (old & ~mask) | bits); } +static inline u64 pasid_get_bits(u64 *ptr) +{ + return READ_ONCE(*ptr); +} + /* * Setup the DID(Domain Identifier) field (Bit 64~79) of scalable mode * PASID entry. @@ -335,6 +340,36 @@ static inline void pasid_set_fault_enable(struct pasid_entry *pe) pasid_set_bits(&pe->val[0], 1 << 1, 0); } +/* + * Enable second level A/D bits by setting the SLADE (Second Level + * Access Dirty Enable) field (Bit 9) of a scalable mode PASID + * entry. + */ +static inline void pasid_set_ssade(struct pasid_entry *pe) +{ + pasid_set_bits(&pe->val[0], 1 << 9, 1 << 9); +} + +/* + * Disable second level A/D bits by clearing the SLADE (Second Level + * Access Dirty Enable) field (Bit 9) of a scalable mode PASID + * entry. + */ +static inline void pasid_clear_ssade(struct pasid_entry *pe) +{ + pasid_set_bits(&pe->val[0], 1 << 9, 0); +} + +/* + * Checks if second level A/D bits specifically the SLADE (Second Level + * Access Dirty Enable) field (Bit 9) of a scalable mode PASID + * entry is set. + */ +static inline bool pasid_get_ssade(struct pasid_entry *pe) +{ + return pasid_get_bits(&pe->val[0]) & (1 << 9); +} + /* * Setup the WPE(Write Protect Enable) field (Bit 132) of a * scalable mode PASID entry. @@ -630,6 +665,8 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu, pasid_set_translation_type(pte, PASID_ENTRY_PGTT_SL_ONLY); pasid_set_fault_enable(pte); pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); + if (domain->dirty_tracking) + pasid_set_ssade(pte); pasid_set_present(pte); spin_unlock(&iommu->lock); @@ -639,6 +676,78 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu, return 0; } +/* + * Set up dirty tracking on a second only or nested translation type. + */ +int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu, + struct dmar_domain *domain, + struct device *dev, u32 pasid, + bool enabled) +{ + struct pasid_entry *pte; + u16 did, pgtt; + + spin_lock(&iommu->lock); + + pte = intel_pasid_get_entry(dev, pasid); + if (!pte) { + spin_unlock(&iommu->lock); + dev_err_ratelimited( + dev, "Failed to get pasid entry of PASID %d\n", pasid); + return -ENODEV; + } + + did = domain_id_iommu(domain, iommu); + pgtt = pasid_pte_get_pgtt(pte); + if (pgtt != PASID_ENTRY_PGTT_SL_ONLY && + pgtt != PASID_ENTRY_PGTT_NESTED) { + spin_unlock(&iommu->lock); + dev_err_ratelimited( + dev, + "Dirty tracking not supported on translation type %d\n", + pgtt); + return -EOPNOTSUPP; + } + + if (pasid_get_ssade(pte) == enabled) { + spin_unlock(&iommu->lock); + return 0; + } + + if (enabled) + pasid_set_ssade(pte); + else + pasid_clear_ssade(pte); + spin_unlock(&iommu->lock); + + if (!ecap_coherent(iommu->ecap)) + clflush_cache_range(pte, sizeof(*pte)); + + /* + * From VT-d spec table 25 "Guidance to Software for Invalidations": + * + * - PASID-selective-within-Domain PASID-cache invalidation + * If (PGTT=SS or Nested) + * - Domain-selective IOTLB invalidation + * Else + * - PASID-selective PASID-based IOTLB invalidation + * - If (pasid is RID_PASID) + * - Global Device-TLB invalidation to affected functions + * Else + * - PASID-based Device-TLB invalidation (with S=1 and + * Addr[63:12]=0x7FFFFFFF_FFFFF) to affected functions + */ + pasid_cache_invalidation_with_pasid(iommu, did, pasid); + + iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); + + /* Device IOTLB doesn't need to be flushed in caching mode. */ + if (!cap_caching_mode(iommu->cap)) + devtlb_invalidation_with_pasid(iommu, dev, pasid); + + return 0; +} + /* * Set up the scalable mode pasid entry for passthrough translation type. */ diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h index 4e9e68c3c3888f6acd4c3ecff8ebc90f1db39955..958050b093aa24df0d8cebcb327b00371b64cb01 100644 --- a/drivers/iommu/intel/pasid.h +++ b/drivers/iommu/intel/pasid.h @@ -106,6 +106,10 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu, int intel_pasid_setup_second_level(struct intel_iommu *iommu, struct dmar_domain *domain, struct device *dev, u32 pasid); +int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu, + struct dmar_domain *domain, + struct device *dev, u32 pasid, + bool enabled); int intel_pasid_setup_pass_through(struct intel_iommu *iommu, struct dmar_domain *domain, struct device *dev, u32 pasid); diff --git a/drivers/iommu/iommufd/Makefile b/drivers/iommu/iommufd/Makefile index 8aeba81800c512dc9e9eb1c32b3240080b503db1..34b446146961c29e7b24dc5cc890a5aa557a6ce8 100644 --- a/drivers/iommu/iommufd/Makefile +++ b/drivers/iommu/iommufd/Makefile @@ -11,3 +11,4 @@ iommufd-y := \ iommufd-$(CONFIG_IOMMUFD_TEST) += selftest.o obj-$(CONFIG_IOMMUFD) += iommufd.o +obj-$(CONFIG_IOMMUFD_DRIVER) += iova_bitmap.o diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c index ce78c3671539c77d27059d3aa11c7367f493eeff..2a41fd2b6ef8e13b0c39e512dd484ec5c9ebab21 100644 --- a/drivers/iommu/iommufd/device.c +++ b/drivers/iommu/iommufd/device.c @@ -540,7 +540,7 @@ iommufd_device_auto_get_domain(struct iommufd_device *idev, } hwpt = iommufd_hw_pagetable_alloc(idev->ictx, ioas, idev, - immediate_attach); + 0, immediate_attach); if (IS_ERR(hwpt)) { destroy_hwpt = ERR_CAST(hwpt); goto out_unlock; @@ -1185,6 +1185,10 @@ int iommufd_get_hw_info(struct iommufd_ucmd *ucmd) */ cmd->data_len = data_len; + cmd->out_capabilities = 0; + if (device_iommu_capable(idev->dev, IOMMU_CAP_DIRTY_TRACKING)) + cmd->out_capabilities |= IOMMU_HW_CAP_DIRTY_TRACKING; + rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); out_free: kfree(data); diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c index cf2c1504e20d843a6c00741f52c4fa6f66b01a90..72a5269984b0183ba262421486a09c0ccebf1077 100644 --- a/drivers/iommu/iommufd/hw_pagetable.c +++ b/drivers/iommu/iommufd/hw_pagetable.c @@ -5,6 +5,7 @@ #include #include +#include "../iommu-priv.h" #include "iommufd_private.h" void iommufd_hw_pagetable_destroy(struct iommufd_object *obj) @@ -60,6 +61,7 @@ int iommufd_hw_pagetable_enforce_cc(struct iommufd_hw_pagetable *hwpt) * @ictx: iommufd context * @ioas: IOAS to associate the domain with * @idev: Device to get an iommu_domain for + * @flags: Flags from userspace * @immediate_attach: True if idev should be attached to the hwpt * * Allocate a new iommu_domain and return it as a hw_pagetable. The HWPT @@ -72,13 +74,18 @@ int iommufd_hw_pagetable_enforce_cc(struct iommufd_hw_pagetable *hwpt) */ struct iommufd_hw_pagetable * iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, - struct iommufd_device *idev, bool immediate_attach) + struct iommufd_device *idev, u32 flags, + bool immediate_attach) { + const struct iommu_ops *ops = dev_iommu_ops(idev->dev); struct iommufd_hw_pagetable *hwpt; int rc; lockdep_assert_held(&ioas->mutex); + if (flags && !ops->domain_alloc_user) + return ERR_PTR(-EOPNOTSUPP); + hwpt = iommufd_object_alloc(ictx, hwpt, IOMMUFD_OBJ_HW_PAGETABLE); if (IS_ERR(hwpt)) return hwpt; @@ -88,10 +95,19 @@ iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, refcount_inc(&ioas->obj.users); hwpt->ioas = ioas; - hwpt->domain = iommu_domain_alloc(idev->dev->bus); - if (!hwpt->domain) { - rc = -ENOMEM; - goto out_abort; + if (ops->domain_alloc_user) { + hwpt->domain = ops->domain_alloc_user(idev->dev, flags); + if (IS_ERR(hwpt->domain)) { + rc = PTR_ERR(hwpt->domain); + hwpt->domain = NULL; + goto out_abort; + } + } else { + hwpt->domain = iommu_domain_alloc(idev->dev->bus); + if (!hwpt->domain) { + rc = -ENOMEM; + goto out_abort; + } } /* @@ -141,7 +157,9 @@ int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd) struct iommufd_ioas *ioas; int rc; - if (cmd->flags || cmd->__reserved) + if ((cmd->flags & ~(IOMMU_HWPT_ALLOC_NEST_PARENT | + IOMMU_HWPT_ALLOC_DIRTY_TRACKING)) || + cmd->__reserved) return -EOPNOTSUPP; idev = iommufd_get_device(ucmd, cmd->dev_id); @@ -155,7 +173,8 @@ int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd) } mutex_lock(&ioas->mutex); - hwpt = iommufd_hw_pagetable_alloc(ucmd->ictx, ioas, idev, false); + hwpt = iommufd_hw_pagetable_alloc(ucmd->ictx, ioas, + idev, cmd->flags, false); if (IS_ERR(hwpt)) { rc = PTR_ERR(hwpt); goto out_unlock; @@ -177,3 +196,50 @@ int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd) iommufd_put_object(&idev->obj); return rc; } + +int iommufd_hwpt_set_dirty_tracking(struct iommufd_ucmd *ucmd) +{ + struct iommu_hwpt_set_dirty_tracking *cmd = ucmd->cmd; + struct iommufd_hw_pagetable *hwpt; + struct iommufd_ioas *ioas; + int rc = -EOPNOTSUPP; + bool enable; + + if (cmd->flags & ~IOMMU_HWPT_DIRTY_TRACKING_ENABLE) + return rc; + + hwpt = iommufd_get_hwpt(ucmd, cmd->hwpt_id); + if (IS_ERR(hwpt)) + return PTR_ERR(hwpt); + + ioas = hwpt->ioas; + enable = cmd->flags & IOMMU_HWPT_DIRTY_TRACKING_ENABLE; + + rc = iopt_set_dirty_tracking(&ioas->iopt, hwpt->domain, enable); + + iommufd_put_object(&hwpt->obj); + return rc; +} + +int iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd *ucmd) +{ + struct iommu_hwpt_get_dirty_bitmap *cmd = ucmd->cmd; + struct iommufd_hw_pagetable *hwpt; + struct iommufd_ioas *ioas; + int rc = -EOPNOTSUPP; + + if ((cmd->flags & ~(IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR)) || + cmd->__reserved) + return -EOPNOTSUPP; + + hwpt = iommufd_get_hwpt(ucmd, cmd->hwpt_id); + if (IS_ERR(hwpt)) + return PTR_ERR(hwpt); + + ioas = hwpt->ioas; + rc = iopt_read_and_clear_dirty_data(&ioas->iopt, hwpt->domain, + cmd->flags, cmd); + + iommufd_put_object(&hwpt->obj); + return rc; +} diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c index 2d22c027aa59825a4e82c404485f6071319ffbeb..05fd9d3abf1b809614cced9e9387679797866103 100644 --- a/drivers/iommu/iommufd/io_pagetable.c +++ b/drivers/iommu/iommufd/io_pagetable.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "io_pagetable.h" #include "double_span.h" @@ -424,6 +425,177 @@ int iopt_map_user_pages(struct iommufd_ctx *ictx, struct io_pagetable *iopt, return 0; } +struct iova_bitmap_fn_arg { + unsigned long flags; + struct io_pagetable *iopt; + struct iommu_domain *domain; + struct iommu_dirty_bitmap *dirty; +}; + +static int __iommu_read_and_clear_dirty(struct iova_bitmap *bitmap, + unsigned long iova, size_t length, + void *opaque) +{ + struct iopt_area *area; + struct iopt_area_contig_iter iter; + struct iova_bitmap_fn_arg *arg = opaque; + struct iommu_domain *domain = arg->domain; + struct iommu_dirty_bitmap *dirty = arg->dirty; + const struct iommu_dirty_ops *ops = domain->dirty_ops; + unsigned long last_iova = iova + length - 1; + unsigned long flags = arg->flags; + int ret; + + iopt_for_each_contig_area(&iter, area, arg->iopt, iova, last_iova) { + unsigned long last = min(last_iova, iopt_area_last_iova(area)); + + ret = ops->read_and_clear_dirty(domain, iter.cur_iova, + last - iter.cur_iova + 1, flags, + dirty); + if (ret) + return ret; + } + + if (!iopt_area_contig_done(&iter)) + return -EINVAL; + return 0; +} + +static int +iommu_read_and_clear_dirty(struct iommu_domain *domain, + struct io_pagetable *iopt, unsigned long flags, + struct iommu_hwpt_get_dirty_bitmap *bitmap) +{ + const struct iommu_dirty_ops *ops = domain->dirty_ops; + struct iommu_iotlb_gather gather; + struct iommu_dirty_bitmap dirty; + struct iova_bitmap_fn_arg arg; + struct iova_bitmap *iter; + int ret = 0; + + if (!ops || !ops->read_and_clear_dirty) + return -EOPNOTSUPP; + + iter = iova_bitmap_alloc(bitmap->iova, bitmap->length, + bitmap->page_size, + u64_to_user_ptr(bitmap->data)); + if (IS_ERR(iter)) + return -ENOMEM; + + iommu_dirty_bitmap_init(&dirty, iter, &gather); + + arg.flags = flags; + arg.iopt = iopt; + arg.domain = domain; + arg.dirty = &dirty; + iova_bitmap_for_each(iter, &arg, __iommu_read_and_clear_dirty); + + if (!(flags & IOMMU_DIRTY_NO_CLEAR)) + iommu_iotlb_sync(domain, &gather); + + iova_bitmap_free(iter); + + return ret; +} + +int iommufd_check_iova_range(struct io_pagetable *iopt, + struct iommu_hwpt_get_dirty_bitmap *bitmap) +{ + size_t iommu_pgsize = iopt->iova_alignment; + u64 last_iova; + + if (check_add_overflow(bitmap->iova, bitmap->length - 1, &last_iova)) + return -EOVERFLOW; + + if (bitmap->iova > ULONG_MAX || last_iova > ULONG_MAX) + return -EOVERFLOW; + + if ((bitmap->iova & (iommu_pgsize - 1)) || + ((last_iova + 1) & (iommu_pgsize - 1))) + return -EINVAL; + + if (!bitmap->page_size) + return -EINVAL; + + if ((bitmap->iova & (bitmap->page_size - 1)) || + ((last_iova + 1) & (bitmap->page_size - 1))) + return -EINVAL; + + return 0; +} + +int iopt_read_and_clear_dirty_data(struct io_pagetable *iopt, + struct iommu_domain *domain, + unsigned long flags, + struct iommu_hwpt_get_dirty_bitmap *bitmap) +{ + int ret; + + ret = iommufd_check_iova_range(iopt, bitmap); + if (ret) + return ret; + + down_read(&iopt->iova_rwsem); + ret = iommu_read_and_clear_dirty(domain, iopt, flags, bitmap); + up_read(&iopt->iova_rwsem); + + return ret; +} + +static int iopt_clear_dirty_data(struct io_pagetable *iopt, + struct iommu_domain *domain) +{ + const struct iommu_dirty_ops *ops = domain->dirty_ops; + struct iommu_iotlb_gather gather; + struct iommu_dirty_bitmap dirty; + struct iopt_area *area; + int ret = 0; + + lockdep_assert_held_read(&iopt->iova_rwsem); + + iommu_dirty_bitmap_init(&dirty, NULL, &gather); + + for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area; + area = iopt_area_iter_next(area, 0, ULONG_MAX)) { + if (!area->pages) + continue; + + ret = ops->read_and_clear_dirty(domain, iopt_area_iova(area), + iopt_area_length(area), 0, + &dirty); + if (ret) + break; + } + + iommu_iotlb_sync(domain, &gather); + return ret; +} + +int iopt_set_dirty_tracking(struct io_pagetable *iopt, + struct iommu_domain *domain, bool enable) +{ + const struct iommu_dirty_ops *ops = domain->dirty_ops; + int ret = 0; + + if (!ops) + return -EOPNOTSUPP; + + down_read(&iopt->iova_rwsem); + + /* Clear dirty bits from PTEs to ensure a clean snapshot */ + if (enable) { + ret = iopt_clear_dirty_data(iopt, domain); + if (ret) + goto out_unlock; + } + + ret = ops->set_dirty_tracking(domain, enable); + +out_unlock: + up_read(&iopt->iova_rwsem); + return ret; +} + int iopt_get_pages(struct io_pagetable *iopt, unsigned long iova, unsigned long length, struct list_head *pages_list) { diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h index 2c58670011fe979b6da6687a9904408bba5f8a9a..034129130db3757ef58ee8a789747dcaa3586d7e 100644 --- a/drivers/iommu/iommufd/iommufd_private.h +++ b/drivers/iommu/iommufd/iommufd_private.h @@ -8,6 +8,9 @@ #include #include #include +#include +#include +#include struct iommu_domain; struct iommu_group; @@ -70,6 +73,13 @@ int iopt_unmap_iova(struct io_pagetable *iopt, unsigned long iova, unsigned long length, unsigned long *unmapped); int iopt_unmap_all(struct io_pagetable *iopt, unsigned long *unmapped); +int iopt_read_and_clear_dirty_data(struct io_pagetable *iopt, + struct iommu_domain *domain, + unsigned long flags, + struct iommu_hwpt_get_dirty_bitmap *bitmap); +int iopt_set_dirty_tracking(struct io_pagetable *iopt, + struct iommu_domain *domain, bool enable); + void iommufd_access_notify_unmap(struct io_pagetable *iopt, unsigned long iova, unsigned long length); int iopt_table_add_domain(struct io_pagetable *iopt, @@ -222,6 +232,8 @@ int iommufd_option_rlimit_mode(struct iommu_option *cmd, struct iommufd_ctx *ictx); int iommufd_vfio_ioas(struct iommufd_ucmd *ucmd); +int iommufd_check_iova_range(struct io_pagetable *iopt, + struct iommu_hwpt_get_dirty_bitmap *bitmap); /* * A HW pagetable is called an iommu_domain inside the kernel. This user object @@ -240,9 +252,20 @@ struct iommufd_hw_pagetable { struct list_head hwpt_item; }; +static inline struct iommufd_hw_pagetable * +iommufd_get_hwpt(struct iommufd_ucmd *ucmd, u32 id) +{ + return container_of(iommufd_get_object(ucmd->ictx, id, + IOMMUFD_OBJ_HW_PAGETABLE), + struct iommufd_hw_pagetable, obj); +} +int iommufd_hwpt_set_dirty_tracking(struct iommufd_ucmd *ucmd); +int iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd *ucmd); + struct iommufd_hw_pagetable * iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, - struct iommufd_device *idev, bool immediate_attach); + struct iommufd_device *idev, u32 flags, + bool immediate_attach); int iommufd_hw_pagetable_enforce_cc(struct iommufd_hw_pagetable *hwpt); int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt, struct iommufd_device *idev); diff --git a/drivers/iommu/iommufd/iommufd_test.h b/drivers/iommu/iommufd/iommufd_test.h index 3f3644375bf13c8fa78600f1f9e15d893195af65..1f2e93d3d4e87f758f0eb67e93343bf86f813f16 100644 --- a/drivers/iommu/iommufd/iommufd_test.h +++ b/drivers/iommu/iommufd/iommufd_test.h @@ -19,6 +19,8 @@ enum { IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT, IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE, IOMMU_TEST_OP_ACCESS_REPLACE_IOAS, + IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS, + IOMMU_TEST_OP_DIRTY, }; enum { @@ -40,6 +42,10 @@ enum { MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES = 1 << 0, }; +enum { + MOCK_FLAGS_DEVICE_NO_DIRTY = 1 << 0, +}; + struct iommu_test_cmd { __u32 size; __u32 op; @@ -56,6 +62,13 @@ struct iommu_test_cmd { /* out_idev_id is the standard iommufd_bind object */ __u32 out_idev_id; } mock_domain; + struct { + __u32 out_stdev_id; + __u32 out_hwpt_id; + __u32 out_idev_id; + /* Expand mock_domain to set mock device flags */ + __u32 dev_flags; + } mock_domain_flags; struct { __u32 pt_id; } mock_domain_replace; @@ -95,6 +108,14 @@ struct iommu_test_cmd { struct { __u32 ioas_id; } access_replace_ioas; + struct { + __u32 flags; + __aligned_u64 iova; + __aligned_u64 length; + __aligned_u64 page_size; + __aligned_u64 uptr; + __aligned_u64 out_nr_dirty; + } dirty; }; __u32 last; }; diff --git a/drivers/vfio/iova_bitmap.c b/drivers/iommu/iommufd/iova_bitmap.c similarity index 98% rename from drivers/vfio/iova_bitmap.c rename to drivers/iommu/iommufd/iova_bitmap.c index 7af5b204990bb52d6747506cac55b5414cde748c..a365e18128da52240b4ef649b9d6c607a060a2af 100644 --- a/drivers/vfio/iova_bitmap.c +++ b/drivers/iommu/iommufd/iova_bitmap.c @@ -269,6 +269,7 @@ struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length, iova_bitmap_free(bitmap); return ERR_PTR(rc); } +EXPORT_SYMBOL_NS_GPL(iova_bitmap_alloc, IOMMUFD); /** * iova_bitmap_free() - Frees an IOVA bitmap object @@ -290,6 +291,7 @@ void iova_bitmap_free(struct iova_bitmap *bitmap) kfree(bitmap); } +EXPORT_SYMBOL_NS_GPL(iova_bitmap_free, IOMMUFD); /* * Returns the remaining bitmap indexes from mapped_total_index to process for @@ -388,6 +390,7 @@ int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque, return ret; } +EXPORT_SYMBOL_NS_GPL(iova_bitmap_for_each, IOMMUFD); /** * iova_bitmap_set() - Records an IOVA range in bitmap @@ -425,4 +428,4 @@ void iova_bitmap_set(struct iova_bitmap *bitmap, cur_bit += nbits; } while (cur_bit <= last_bit); } -EXPORT_SYMBOL_GPL(iova_bitmap_set); +EXPORT_SYMBOL_NS_GPL(iova_bitmap_set, IOMMUFD); diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c index e71523cbd0de4352479aadeb8f33dd6d2ba87df8..d50f42a730aa36c63b408be1174d8f553880467b 100644 --- a/drivers/iommu/iommufd/main.c +++ b/drivers/iommu/iommufd/main.c @@ -307,6 +307,8 @@ union ucmd_buffer { struct iommu_destroy destroy; struct iommu_hw_info info; struct iommu_hwpt_alloc hwpt; + struct iommu_hwpt_get_dirty_bitmap get_dirty_bitmap; + struct iommu_hwpt_set_dirty_tracking set_dirty_tracking; struct iommu_ioas_alloc alloc; struct iommu_ioas_allow_iovas allow_iovas; struct iommu_ioas_copy ioas_copy; @@ -342,6 +344,10 @@ static const struct iommufd_ioctl_op iommufd_ioctl_ops[] = { __reserved), IOCTL_OP(IOMMU_HWPT_ALLOC, iommufd_hwpt_alloc, struct iommu_hwpt_alloc, __reserved), + IOCTL_OP(IOMMU_HWPT_GET_DIRTY_BITMAP, iommufd_hwpt_get_dirty_bitmap, + struct iommu_hwpt_get_dirty_bitmap, data), + IOCTL_OP(IOMMU_HWPT_SET_DIRTY_TRACKING, iommufd_hwpt_set_dirty_tracking, + struct iommu_hwpt_set_dirty_tracking, __reserved), IOCTL_OP(IOMMU_IOAS_ALLOC, iommufd_ioas_alloc_ioctl, struct iommu_ioas_alloc, out_ioas_id), IOCTL_OP(IOMMU_IOAS_ALLOW_IOVAS, iommufd_ioas_allow_iovas, @@ -552,5 +558,6 @@ MODULE_ALIAS_MISCDEV(VFIO_MINOR); MODULE_ALIAS("devname:vfio/vfio"); #endif MODULE_IMPORT_NS(IOMMUFD_INTERNAL); +MODULE_IMPORT_NS(IOMMUFD); MODULE_DESCRIPTION("I/O Address Space Management for passthrough devices"); MODULE_LICENSE("GPL"); diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c index 56506d5753f15c9f7079a661773404636d975c9e..22f9fcdfc55afc04031787500948b79934e8f513 100644 --- a/drivers/iommu/iommufd/selftest.c +++ b/drivers/iommu/iommufd/selftest.c @@ -24,6 +24,7 @@ static struct platform_device *selftest_iommu_dev; size_t iommufd_test_memory_limit = 65536; enum { + MOCK_DIRTY_TRACK = 1, MOCK_IO_PAGE_SIZE = PAGE_SIZE / 2, /* @@ -36,6 +37,7 @@ enum { _MOCK_PFN_START = MOCK_PFN_MASK + 1, MOCK_PFN_START_IOVA = _MOCK_PFN_START, MOCK_PFN_LAST_IOVA = _MOCK_PFN_START, + MOCK_PFN_DIRTY_IOVA = _MOCK_PFN_START << 1, }; /* @@ -86,6 +88,7 @@ void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd, } struct mock_iommu_domain { + unsigned long flags; struct iommu_domain domain; struct xarray pfns; }; @@ -96,6 +99,7 @@ enum selftest_obj_type { struct mock_dev { struct device dev; + unsigned long flags; }; struct selftest_obj { @@ -118,6 +122,11 @@ static void mock_domain_blocking_free(struct iommu_domain *domain) static int mock_domain_nop_attach(struct iommu_domain *domain, struct device *dev) { + struct mock_dev *mdev = container_of(dev, struct mock_dev, dev); + + if (domain->dirty_ops && (mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY)) + return -EINVAL; + return 0; } @@ -146,6 +155,69 @@ static void *mock_domain_hw_info(struct device *dev, u32 *length, u32 *type) return info; } +static int mock_domain_set_dirty_tracking(struct iommu_domain *domain, + bool enable) +{ + struct mock_iommu_domain *mock = + container_of(domain, struct mock_iommu_domain, domain); + unsigned long flags = mock->flags; + + if (enable && !domain->dirty_ops) + return -EINVAL; + + /* No change? */ + if (!(enable ^ !!(flags & MOCK_DIRTY_TRACK))) + return 0; + + flags = (enable ? flags | MOCK_DIRTY_TRACK : flags & ~MOCK_DIRTY_TRACK); + + mock->flags = flags; + return 0; +} + +static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain, + unsigned long iova, size_t size, + unsigned long flags, + struct iommu_dirty_bitmap *dirty) +{ + struct mock_iommu_domain *mock = + container_of(domain, struct mock_iommu_domain, domain); + unsigned long i, max = size / MOCK_IO_PAGE_SIZE; + void *ent, *old; + + if (!(mock->flags & MOCK_DIRTY_TRACK) && dirty->bitmap) + return -EINVAL; + + for (i = 0; i < max; i++) { + unsigned long cur = iova + i * MOCK_IO_PAGE_SIZE; + + ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE); + if (ent && (xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA)) { + /* Clear dirty */ + if (!(flags & IOMMU_DIRTY_NO_CLEAR)) { + unsigned long val; + + val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA; + old = xa_store(&mock->pfns, + cur / MOCK_IO_PAGE_SIZE, + xa_mk_value(val), GFP_KERNEL); + WARN_ON_ONCE(ent != old); + } + iommu_dirty_bitmap_record(dirty, cur, + MOCK_IO_PAGE_SIZE); + } + } + + return 0; +} + +const struct iommu_dirty_ops dirty_ops = { + .set_dirty_tracking = mock_domain_set_dirty_tracking, + .read_and_clear_dirty = mock_domain_read_and_clear_dirty, +}; + +static const struct iommu_ops mock_ops; + static struct iommu_domain *mock_domain_alloc(unsigned int iommu_domain_type) { struct mock_iommu_domain *mock; @@ -162,10 +234,34 @@ static struct iommu_domain *mock_domain_alloc(unsigned int iommu_domain_type) mock->domain.geometry.aperture_start = MOCK_APERTURE_START; mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST; mock->domain.pgsize_bitmap = MOCK_IO_PAGE_SIZE; + mock->domain.ops = mock_ops.default_domain_ops; + mock->domain.type = iommu_domain_type; xa_init(&mock->pfns); return &mock->domain; } +static struct iommu_domain * +mock_domain_alloc_user(struct device *dev, u32 flags) +{ + struct mock_dev *mdev = container_of(dev, struct mock_dev, dev); + struct iommu_domain *domain; + + if (flags & + (~(IOMMU_HWPT_ALLOC_NEST_PARENT | IOMMU_HWPT_ALLOC_DIRTY_TRACKING))) + return ERR_PTR(-EOPNOTSUPP); + + if ((flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) && + (mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY)) + return ERR_PTR(-EOPNOTSUPP); + + domain = mock_domain_alloc(IOMMU_DOMAIN_UNMANAGED); + if (domain && !(mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY)) + domain->dirty_ops = &dirty_ops; + if (!domain) + domain = ERR_PTR(-ENOMEM); + return domain; +} + static void mock_domain_free(struct iommu_domain *domain) { struct mock_iommu_domain *mock = @@ -243,7 +339,7 @@ static size_t mock_domain_unmap_pages(struct iommu_domain *domain, for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) { ent = xa_erase(&mock->pfns, iova / MOCK_IO_PAGE_SIZE); - WARN_ON(!ent); + /* * iommufd generates unmaps that must be a strict * superset of the map's performend So every starting @@ -253,13 +349,13 @@ static size_t mock_domain_unmap_pages(struct iommu_domain *domain, * passed to map_pages */ if (first) { - WARN_ON(!(xa_to_value(ent) & - MOCK_PFN_START_IOVA)); + WARN_ON(ent && !(xa_to_value(ent) & + MOCK_PFN_START_IOVA)); first = false; } if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize) - WARN_ON(!(xa_to_value(ent) & - MOCK_PFN_LAST_IOVA)); + WARN_ON(ent && !(xa_to_value(ent) & + MOCK_PFN_LAST_IOVA)); iova += MOCK_IO_PAGE_SIZE; ret += MOCK_IO_PAGE_SIZE; @@ -283,7 +379,18 @@ static phys_addr_t mock_domain_iova_to_phys(struct iommu_domain *domain, static bool mock_domain_capable(struct device *dev, enum iommu_cap cap) { - return cap == IOMMU_CAP_CACHE_COHERENCY; + struct mock_dev *mdev = container_of(dev, struct mock_dev, dev); + + switch (cap) { + case IOMMU_CAP_CACHE_COHERENCY: + return true; + case IOMMU_CAP_DIRTY_TRACKING: + return !(mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY); + default: + break; + } + + return false; } static void mock_domain_set_plaform_dma_ops(struct device *dev) @@ -307,6 +414,7 @@ static const struct iommu_ops mock_ops = { .pgsize_bitmap = MOCK_IO_PAGE_SIZE, .hw_info = mock_domain_hw_info, .domain_alloc = mock_domain_alloc, + .domain_alloc_user = mock_domain_alloc_user, .capable = mock_domain_capable, .set_platform_dma_ops = mock_domain_set_plaform_dma_ops, .device_group = generic_device_group, @@ -362,16 +470,20 @@ static void mock_dev_release(struct device *dev) kfree(mdev); } -static struct mock_dev *mock_dev_create(void) +static struct mock_dev *mock_dev_create(unsigned long dev_flags) { struct mock_dev *mdev; int rc; + if (dev_flags & ~(MOCK_FLAGS_DEVICE_NO_DIRTY)) + return ERR_PTR(-EINVAL); + mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); if (!mdev) return ERR_PTR(-ENOMEM); device_initialize(&mdev->dev); + mdev->flags = dev_flags; mdev->dev.release = mock_dev_release; mdev->dev.bus = &iommufd_mock_bus_type.bus; @@ -407,6 +519,7 @@ static int iommufd_test_mock_domain(struct iommufd_ucmd *ucmd, struct iommufd_device *idev; struct selftest_obj *sobj; u32 pt_id = cmd->id; + u32 dev_flags = 0; u32 idev_id; int rc; @@ -417,7 +530,10 @@ static int iommufd_test_mock_domain(struct iommufd_ucmd *ucmd, sobj->idev.ictx = ucmd->ictx; sobj->type = TYPE_IDEV; - sobj->idev.mock_dev = mock_dev_create(); + if (cmd->op == IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS) + dev_flags = cmd->mock_domain_flags.dev_flags; + + sobj->idev.mock_dev = mock_dev_create(dev_flags); if (IS_ERR(sobj->idev.mock_dev)) { rc = PTR_ERR(sobj->idev.mock_dev); goto out_sobj; @@ -977,6 +1093,73 @@ static_assert((unsigned int)MOCK_ACCESS_RW_WRITE == IOMMUFD_ACCESS_RW_WRITE); static_assert((unsigned int)MOCK_ACCESS_RW_SLOW_PATH == __IOMMUFD_ACCESS_RW_SLOW_PATH); +static int iommufd_test_dirty(struct iommufd_ucmd *ucmd, unsigned int mockpt_id, + unsigned long iova, size_t length, + unsigned long page_size, void __user *uptr, + u32 flags) +{ + unsigned long bitmap_size, i, max; + struct iommu_test_cmd *cmd = ucmd->cmd; + struct iommufd_hw_pagetable *hwpt; + struct mock_iommu_domain *mock; + int rc, count = 0; + void *tmp; + + if (!page_size || !length || iova % page_size || length % page_size || + !uptr) + return -EINVAL; + + hwpt = get_md_pagetable(ucmd, mockpt_id, &mock); + if (IS_ERR(hwpt)) + return PTR_ERR(hwpt); + + if (!(mock->flags & MOCK_DIRTY_TRACK)) { + rc = -EINVAL; + goto out_put; + } + + max = length / page_size; + bitmap_size = max / BITS_PER_BYTE; + + tmp = kvzalloc(bitmap_size, GFP_KERNEL_ACCOUNT); + if (!tmp) { + rc = -ENOMEM; + goto out_put; + } + + if (copy_from_user(tmp, uptr, bitmap_size)) { + rc = -EFAULT; + goto out_free; + } + + for (i = 0; i < max; i++) { + unsigned long cur = iova + i * page_size; + void *ent, *old; + + if (!test_bit(i, (unsigned long *)tmp)) + continue; + + ent = xa_load(&mock->pfns, cur / page_size); + if (ent) { + unsigned long val; + + val = xa_to_value(ent) | MOCK_PFN_DIRTY_IOVA; + old = xa_store(&mock->pfns, cur / page_size, + xa_mk_value(val), GFP_KERNEL); + WARN_ON_ONCE(ent != old); + count++; + } + } + + cmd->dirty.out_nr_dirty = count; + rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); +out_free: + kvfree(tmp); +out_put: + iommufd_put_object(&hwpt->obj); + return rc; +} + void iommufd_selftest_destroy(struct iommufd_object *obj) { struct selftest_obj *sobj = container_of(obj, struct selftest_obj, obj); @@ -1000,6 +1183,7 @@ int iommufd_test(struct iommufd_ucmd *ucmd) cmd->add_reserved.start, cmd->add_reserved.length); case IOMMU_TEST_OP_MOCK_DOMAIN: + case IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS: return iommufd_test_mock_domain(ucmd, cmd); case IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE: return iommufd_test_mock_domain_replace( @@ -1041,6 +1225,12 @@ int iommufd_test(struct iommufd_ucmd *ucmd) return -EINVAL; iommufd_test_memory_limit = cmd->memory_limit.limit; return 0; + case IOMMU_TEST_OP_DIRTY: + return iommufd_test_dirty(ucmd, cmd->id, cmd->dirty.iova, + cmd->dirty.length, + cmd->dirty.page_size, + u64_to_user_ptr(cmd->dirty.uptr), + cmd->dirty.flags); default: return -EOPNOTSUPP; } diff --git a/drivers/vfio/Makefile b/drivers/vfio/Makefile index c82ea032d3521268138811a1cc1b718755c90c26..68c05705200fce8fc9824a8521bbe554e5c130f7 100644 --- a/drivers/vfio/Makefile +++ b/drivers/vfio/Makefile @@ -1,8 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_VFIO) += vfio.o -vfio-y += vfio_main.o \ - iova_bitmap.o +vfio-y += vfio_main.o vfio-$(CONFIG_VFIO_DEVICE_CDEV) += device_cdev.o vfio-$(CONFIG_VFIO_GROUP) += group.o vfio-$(CONFIG_IOMMUFD) += iommufd.o diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig index 8125e5f37832c40adbf25a6868389c78639e42cc..04ac975432a3024fc2c74c649e3325679811a430 100644 --- a/drivers/vfio/pci/Kconfig +++ b/drivers/vfio/pci/Kconfig @@ -65,4 +65,6 @@ source "drivers/vfio/pci/hisilicon/Kconfig" source "drivers/vfio/pci/pds/Kconfig" +source "drivers/vfio/pci/qat/Kconfig" + endmenu diff --git a/drivers/vfio/pci/Makefile b/drivers/vfio/pci/Makefile index 45167be462d8f601c2da3924fd6848ef6c059cf9..52aa7423e6df69bc0c0c67ac5d79a48ba51b7b67 100644 --- a/drivers/vfio/pci/Makefile +++ b/drivers/vfio/pci/Makefile @@ -13,3 +13,5 @@ obj-$(CONFIG_MLX5_VFIO_PCI) += mlx5/ obj-$(CONFIG_HISI_ACC_VFIO_PCI) += hisilicon/ obj-$(CONFIG_PDS_VFIO_PCI) += pds/ + +obj-$(CONFIG_QAT_VFIO_PCI) += qat/ diff --git a/drivers/vfio/pci/mlx5/Kconfig b/drivers/vfio/pci/mlx5/Kconfig index 7088edc4fb28d88f5603e8f68462993123eece46..c3ced56b7787650ce8b82039b419413e81deedfa 100644 --- a/drivers/vfio/pci/mlx5/Kconfig +++ b/drivers/vfio/pci/mlx5/Kconfig @@ -3,6 +3,7 @@ config MLX5_VFIO_PCI tristate "VFIO support for MLX5 PCI devices" depends on MLX5_CORE select VFIO_PCI_CORE + select IOMMUFD_DRIVER help This provides migration support for MLX5 devices using the VFIO framework. diff --git a/drivers/vfio/pci/mlx5/main.c b/drivers/vfio/pci/mlx5/main.c index 42ec574a86221074b2b201d4546e116adfd79179..5cf2b491d15a01467cc82a5df624ffc494da8b20 100644 --- a/drivers/vfio/pci/mlx5/main.c +++ b/drivers/vfio/pci/mlx5/main.c @@ -1376,6 +1376,7 @@ static struct pci_driver mlx5vf_pci_driver = { module_pci_driver(mlx5vf_pci_driver); +MODULE_IMPORT_NS(IOMMUFD); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Max Gurtovoy "); MODULE_AUTHOR("Yishai Hadas "); diff --git a/drivers/vfio/pci/pds/Kconfig b/drivers/vfio/pci/pds/Kconfig index 6eceef7b028aae9b8b7a8cb49614e88525f4bade..fec9b167c7b9ac98ae24dddd9265e30d95942e7d 100644 --- a/drivers/vfio/pci/pds/Kconfig +++ b/drivers/vfio/pci/pds/Kconfig @@ -5,6 +5,7 @@ config PDS_VFIO_PCI tristate "VFIO support for PDS PCI devices" depends on PDS_CORE && PCI_IOV select VFIO_PCI_CORE + select IOMMUFD_DRIVER help This provides generic PCI support for PDS devices using the VFIO framework. diff --git a/drivers/vfio/pci/pds/pci_drv.c b/drivers/vfio/pci/pds/pci_drv.c index caffa1a2cf591e9428f4cf960f2d9265ee86c3a9..a34dda5166293583337372fc0059129c726a9f45 100644 --- a/drivers/vfio/pci/pds/pci_drv.c +++ b/drivers/vfio/pci/pds/pci_drv.c @@ -204,6 +204,7 @@ static struct pci_driver pds_vfio_pci_driver = { module_pci_driver(pds_vfio_pci_driver); +MODULE_IMPORT_NS(IOMMUFD); MODULE_DESCRIPTION(PDS_VFIO_DRV_DESCRIPTION); MODULE_AUTHOR("Brett Creeley "); MODULE_LICENSE("GPL"); diff --git a/drivers/vfio/pci/qat/Kconfig b/drivers/vfio/pci/qat/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..bf52cfa4b595c3fdee279ec760bc7bade3142187 --- /dev/null +++ b/drivers/vfio/pci/qat/Kconfig @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0-only +config QAT_VFIO_PCI + tristate "VFIO support for QAT VF PCI devices" + select VFIO_PCI_CORE + depends on CRYPTO_DEV_QAT_4XXX + help + This provides migration support for Intel(R) QAT Virtual Function + using the VFIO framework. + + To compile this as a module, choose M here: the module + will be called qat_vfio_pci. If you don't know what to do here, + say N. diff --git a/drivers/vfio/pci/qat/Makefile b/drivers/vfio/pci/qat/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..5fe5c4ec19d3022b054c9d1d6159e536d62505a5 --- /dev/null +++ b/drivers/vfio/pci/qat/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_QAT_VFIO_PCI) += qat_vfio_pci.o +qat_vfio_pci-y := main.o diff --git a/drivers/vfio/pci/qat/main.c b/drivers/vfio/pci/qat/main.c new file mode 100644 index 0000000000000000000000000000000000000000..e36740a282e7bcbe2c69479704744d553fefa6ac --- /dev/null +++ b/drivers/vfio/pci/qat/main.c @@ -0,0 +1,702 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2024 Intel Corporation */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * The migration data of each Intel QAT VF device is encapsulated into a + * 4096 bytes block. The data consists of two parts. + * The first is a pre-configured set of attributes of the VF being migrated, + * which are only set when it is created. This can be migrated during pre-copy + * stage and used for a device compatibility check. + * The second is the VF state. This includes the required MMIO regions and + * the shadow states maintained by the QAT PF driver. This part can only be + * saved when the VF is fully quiesced and be migrated during stop-copy stage. + * Both these 2 parts of data are saved in hierarchical structures including + * a preamble section and several raw state sections. + * When the pre-configured part of the migration data is fully retrieved from + * user space, the preamble section are used to validate the correctness of + * the data blocks and check the version compatibility. The raw state sections + * are then used to do a device compatibility check. + * When the device transits from RESUMING state, the VF states are extracted + * from the raw state sections of the VF state part of the migration data and + * then loaded into the device. + */ + +struct qat_vf_migration_file { + struct file *filp; + /* protects migration region context */ + struct mutex lock; + bool disabled; + struct qat_vf_core_device *qat_vdev; + ssize_t filled_size; +}; + +struct qat_vf_core_device { + struct vfio_pci_core_device core_device; + struct qat_mig_dev *mdev; + /* protects migration state */ + struct mutex state_mutex; + enum vfio_device_mig_state mig_state; + struct qat_vf_migration_file *resuming_migf; + struct qat_vf_migration_file *saving_migf; +}; + +static int qat_vf_pci_open_device(struct vfio_device *core_vdev) +{ + struct qat_vf_core_device *qat_vdev = + container_of(core_vdev, struct qat_vf_core_device, + core_device.vdev); + struct vfio_pci_core_device *vdev = &qat_vdev->core_device; + int ret; + + ret = vfio_pci_core_enable(vdev); + if (ret) + return ret; + + ret = qat_vfmig_open(qat_vdev->mdev); + if (ret) { + vfio_pci_core_disable(vdev); + return ret; + } + qat_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING; + + vfio_pci_core_finish_enable(vdev); + + return 0; +} + +static void qat_vf_disable_fd(struct qat_vf_migration_file *migf) +{ + mutex_lock(&migf->lock); + migf->disabled = true; + migf->filp->f_pos = 0; + migf->filled_size = 0; + mutex_unlock(&migf->lock); +} + +static void qat_vf_disable_fds(struct qat_vf_core_device *qat_vdev) +{ + if (qat_vdev->resuming_migf) { + qat_vf_disable_fd(qat_vdev->resuming_migf); + fput(qat_vdev->resuming_migf->filp); + qat_vdev->resuming_migf = NULL; + } + + if (qat_vdev->saving_migf) { + qat_vf_disable_fd(qat_vdev->saving_migf); + fput(qat_vdev->saving_migf->filp); + qat_vdev->saving_migf = NULL; + } +} + +static void qat_vf_pci_close_device(struct vfio_device *core_vdev) +{ + struct qat_vf_core_device *qat_vdev = container_of(core_vdev, + struct qat_vf_core_device, core_device.vdev); + + qat_vfmig_close(qat_vdev->mdev); + qat_vf_disable_fds(qat_vdev); + vfio_pci_core_close_device(core_vdev); +} + +static long qat_vf_precopy_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct qat_vf_migration_file *migf = filp->private_data; + struct qat_vf_core_device *qat_vdev = migf->qat_vdev; + struct qat_mig_dev *mig_dev = qat_vdev->mdev; + struct vfio_precopy_info info; + loff_t *pos = &filp->f_pos; + unsigned long minsz; + int ret = 0; + + if (cmd != VFIO_MIG_GET_PRECOPY_INFO) + return -ENOTTY; + + minsz = offsetofend(struct vfio_precopy_info, dirty_bytes); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + if (info.argsz < minsz) + return -EINVAL; + + mutex_lock(&qat_vdev->state_mutex); + if (qat_vdev->mig_state != VFIO_DEVICE_STATE_PRE_COPY && + qat_vdev->mig_state != VFIO_DEVICE_STATE_PRE_COPY_P2P) { + mutex_unlock(&qat_vdev->state_mutex); + return -EINVAL; + } + + mutex_lock(&migf->lock); + if (migf->disabled) { + ret = -ENODEV; + goto out; + } + + if (*pos > mig_dev->setup_size) { + ret = -EINVAL; + goto out; + } + + info.dirty_bytes = 0; + info.initial_bytes = mig_dev->setup_size - *pos; + +out: + mutex_unlock(&migf->lock); + mutex_unlock(&qat_vdev->state_mutex); + if (ret) + return ret; + return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; +} + +static ssize_t qat_vf_save_read(struct file *filp, char __user *buf, + size_t len, loff_t *pos) +{ + struct qat_vf_migration_file *migf = filp->private_data; + struct qat_mig_dev *mig_dev = migf->qat_vdev->mdev; + ssize_t done = 0; + loff_t *offs; + int ret; + + if (pos) + return -ESPIPE; + offs = &filp->f_pos; + + mutex_lock(&migf->lock); + if (*offs > migf->filled_size || *offs < 0) { + done = -EINVAL; + goto out_unlock; + } + + if (migf->disabled) { + done = -ENODEV; + goto out_unlock; + } + + len = min_t(size_t, migf->filled_size - *offs, len); + if (len) { + ret = copy_to_user(buf, mig_dev->state + *offs, len); + if (ret) { + done = -EFAULT; + goto out_unlock; + } + *offs += len; + done = len; + } + +out_unlock: + mutex_unlock(&migf->lock); + return done; +} + +static int qat_vf_release_file(struct inode *inode, struct file *filp) +{ + struct qat_vf_migration_file *migf = filp->private_data; + + qat_vf_disable_fd(migf); + mutex_destroy(&migf->lock); + kfree(migf); + + return 0; +} + +static const struct file_operations qat_vf_save_fops = { + .owner = THIS_MODULE, + .read = qat_vf_save_read, + .unlocked_ioctl = qat_vf_precopy_ioctl, + .compat_ioctl = compat_ptr_ioctl, + .release = qat_vf_release_file, + .llseek = no_llseek, +}; + +static int qat_vf_save_state(struct qat_vf_core_device *qat_vdev, + struct qat_vf_migration_file *migf) +{ + int ret; + + ret = qat_vfmig_save_state(qat_vdev->mdev); + if (ret) + return ret; + migf->filled_size = qat_vdev->mdev->state_size; + + return 0; +} + +static int qat_vf_save_setup(struct qat_vf_core_device *qat_vdev, + struct qat_vf_migration_file *migf) +{ + int ret; + + ret = qat_vfmig_save_setup(qat_vdev->mdev); + if (ret) + return ret; + migf->filled_size = qat_vdev->mdev->setup_size; + + return 0; +} + +/* + * Allocate a file handler for user space and then save the migration data for + * the device being migrated. If this is called in the pre-copy stage, save the + * pre-configured device data. Otherwise, if this is called in the stop-copy + * stage, save the device state. In both cases, update the data size which can + * then be read from user space. + */ +static struct qat_vf_migration_file * +qat_vf_save_device_data(struct qat_vf_core_device *qat_vdev, bool pre_copy) +{ + struct qat_vf_migration_file *migf; + int ret; + + migf = kzalloc(sizeof(*migf), GFP_KERNEL); + if (!migf) + return ERR_PTR(-ENOMEM); + + migf->filp = anon_inode_getfile("qat_vf_mig", &qat_vf_save_fops, + migf, O_RDONLY); + ret = PTR_ERR_OR_ZERO(migf->filp); + if (ret) { + kfree(migf); + return ERR_PTR(ret); + } + + stream_open(migf->filp->f_inode, migf->filp); + mutex_init(&migf->lock); + + if (pre_copy) + ret = qat_vf_save_setup(qat_vdev, migf); + else + ret = qat_vf_save_state(qat_vdev, migf); + if (ret) { + fput(migf->filp); + return ERR_PTR(ret); + } + + migf->qat_vdev = qat_vdev; + + return migf; +} + +static ssize_t qat_vf_resume_write(struct file *filp, const char __user *buf, + size_t len, loff_t *pos) +{ + struct qat_vf_migration_file *migf = filp->private_data; + struct qat_mig_dev *mig_dev = migf->qat_vdev->mdev; + loff_t end, *offs; + ssize_t done = 0; + int ret; + + if (pos) + return -ESPIPE; + offs = &filp->f_pos; + + if (*offs < 0 || + check_add_overflow((loff_t)len, *offs, &end)) + return -EOVERFLOW; + + if (end > mig_dev->state_size) + return -ENOMEM; + + mutex_lock(&migf->lock); + if (migf->disabled) { + done = -ENODEV; + goto out_unlock; + } + + ret = copy_from_user(mig_dev->state + *offs, buf, len); + if (ret) { + done = -EFAULT; + goto out_unlock; + } + *offs += len; + migf->filled_size += len; + + /* + * Load the pre-configured device data first to check if the target + * device is compatible with the source device. + */ + ret = qat_vfmig_load_setup(mig_dev, migf->filled_size); + if (ret && ret != -EAGAIN) { + done = ret; + goto out_unlock; + } + done = len; + +out_unlock: + mutex_unlock(&migf->lock); + return done; +} + +static const struct file_operations qat_vf_resume_fops = { + .owner = THIS_MODULE, + .write = qat_vf_resume_write, + .release = qat_vf_release_file, + .llseek = no_llseek, +}; + +static struct qat_vf_migration_file * +qat_vf_resume_device_data(struct qat_vf_core_device *qat_vdev) +{ + struct qat_vf_migration_file *migf; + int ret; + + migf = kzalloc(sizeof(*migf), GFP_KERNEL); + if (!migf) + return ERR_PTR(-ENOMEM); + + migf->filp = anon_inode_getfile("qat_vf_mig", &qat_vf_resume_fops, migf, O_WRONLY); + ret = PTR_ERR_OR_ZERO(migf->filp); + if (ret) { + kfree(migf); + return ERR_PTR(ret); + } + + migf->qat_vdev = qat_vdev; + migf->filled_size = 0; + stream_open(migf->filp->f_inode, migf->filp); + mutex_init(&migf->lock); + + return migf; +} + +static int qat_vf_load_device_data(struct qat_vf_core_device *qat_vdev) +{ + return qat_vfmig_load_state(qat_vdev->mdev); +} + +static struct file *qat_vf_pci_step_device_state(struct qat_vf_core_device *qat_vdev, u32 new) +{ + u32 cur = qat_vdev->mig_state; + int ret; + + /* + * As the device is not capable of just stopping P2P DMAs, suspend the + * device completely once any of the P2P states are reached. + * When it is suspended, all its MMIO registers can still be operated + * correctly, jobs submitted through ring are queued while no jobs are + * processed by the device. The MMIO states can be safely migrated to + * the target VF during stop-copy stage and restored correctly in the + * target VF. All queued jobs can be resumed then. + */ + if ((cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_RUNNING_P2P) || + (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_PRE_COPY_P2P)) { + ret = qat_vfmig_suspend(qat_vdev->mdev); + if (ret) + return ERR_PTR(ret); + return NULL; + } + + if ((cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_RUNNING) || + (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_PRE_COPY)) { + qat_vfmig_resume(qat_vdev->mdev); + return NULL; + } + + if ((cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_STOP) || + (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING_P2P)) + return NULL; + + if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) { + struct qat_vf_migration_file *migf; + + migf = qat_vf_save_device_data(qat_vdev, false); + if (IS_ERR(migf)) + return ERR_CAST(migf); + get_file(migf->filp); + qat_vdev->saving_migf = migf; + return migf->filp; + } + + if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) { + struct qat_vf_migration_file *migf; + + migf = qat_vf_resume_device_data(qat_vdev); + if (IS_ERR(migf)) + return ERR_CAST(migf); + get_file(migf->filp); + qat_vdev->resuming_migf = migf; + return migf->filp; + } + + if ((cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP) || + (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_RUNNING) || + (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_RUNNING_P2P)) { + qat_vf_disable_fds(qat_vdev); + return NULL; + } + + if ((cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_PRE_COPY) || + (cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_PRE_COPY_P2P)) { + struct qat_vf_migration_file *migf; + + migf = qat_vf_save_device_data(qat_vdev, true); + if (IS_ERR(migf)) + return ERR_CAST(migf); + get_file(migf->filp); + qat_vdev->saving_migf = migf; + return migf->filp; + } + + if (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_STOP_COPY) { + struct qat_vf_migration_file *migf = qat_vdev->saving_migf; + + if (!migf) + return ERR_PTR(-EINVAL); + ret = qat_vf_save_state(qat_vdev, migf); + if (ret) + return ERR_PTR(ret); + return NULL; + } + + if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) { + ret = qat_vf_load_device_data(qat_vdev); + if (ret) + return ERR_PTR(ret); + + qat_vf_disable_fds(qat_vdev); + return NULL; + } + + /* vfio_mig_get_next_state() does not use arcs other than the above */ + WARN_ON(true); + return ERR_PTR(-EINVAL); +} + +static void qat_vf_reset_done(struct qat_vf_core_device *qat_vdev) +{ + qat_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING; + qat_vfmig_reset(qat_vdev->mdev); + qat_vf_disable_fds(qat_vdev); +} + +static struct file *qat_vf_pci_set_device_state(struct vfio_device *vdev, + enum vfio_device_mig_state new_state) +{ + struct qat_vf_core_device *qat_vdev = container_of(vdev, + struct qat_vf_core_device, core_device.vdev); + enum vfio_device_mig_state next_state; + struct file *res = NULL; + int ret; + + mutex_lock(&qat_vdev->state_mutex); + while (new_state != qat_vdev->mig_state) { + ret = vfio_mig_get_next_state(vdev, qat_vdev->mig_state, + new_state, &next_state); + if (ret) { + res = ERR_PTR(ret); + break; + } + res = qat_vf_pci_step_device_state(qat_vdev, next_state); + if (IS_ERR(res)) + break; + qat_vdev->mig_state = next_state; + if (WARN_ON(res && new_state != qat_vdev->mig_state)) { + fput(res); + res = ERR_PTR(-EINVAL); + break; + } + } + mutex_unlock(&qat_vdev->state_mutex); + + return res; +} + +static int qat_vf_pci_get_device_state(struct vfio_device *vdev, + enum vfio_device_mig_state *curr_state) +{ + struct qat_vf_core_device *qat_vdev = container_of(vdev, + struct qat_vf_core_device, core_device.vdev); + + mutex_lock(&qat_vdev->state_mutex); + *curr_state = qat_vdev->mig_state; + mutex_unlock(&qat_vdev->state_mutex); + + return 0; +} + +static int qat_vf_pci_get_data_size(struct vfio_device *vdev, + unsigned long *stop_copy_length) +{ + struct qat_vf_core_device *qat_vdev = container_of(vdev, + struct qat_vf_core_device, core_device.vdev); + + mutex_lock(&qat_vdev->state_mutex); + *stop_copy_length = qat_vdev->mdev->state_size; + mutex_unlock(&qat_vdev->state_mutex); + + return 0; +} + +static const struct vfio_migration_ops qat_vf_pci_mig_ops = { + .migration_set_state = qat_vf_pci_set_device_state, + .migration_get_state = qat_vf_pci_get_device_state, + .migration_get_data_size = qat_vf_pci_get_data_size, +}; + +static void qat_vf_pci_release_dev(struct vfio_device *core_vdev) +{ + struct qat_vf_core_device *qat_vdev = container_of(core_vdev, + struct qat_vf_core_device, core_device.vdev); + + qat_vfmig_cleanup(qat_vdev->mdev); + qat_vfmig_destroy(qat_vdev->mdev); + mutex_destroy(&qat_vdev->state_mutex); + vfio_pci_core_release_dev(core_vdev); +} + +static int qat_vf_pci_init_dev(struct vfio_device *core_vdev) +{ + struct qat_vf_core_device *qat_vdev = container_of(core_vdev, + struct qat_vf_core_device, core_device.vdev); + struct qat_mig_dev *mdev; + struct pci_dev *parent; + int ret, vf_id; + + core_vdev->migration_flags = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P | + VFIO_MIGRATION_PRE_COPY; + core_vdev->mig_ops = &qat_vf_pci_mig_ops; + + ret = vfio_pci_core_init_dev(core_vdev); + if (ret) + return ret; + + mutex_init(&qat_vdev->state_mutex); + + parent = pci_physfn(qat_vdev->core_device.pdev); + vf_id = pci_iov_vf_id(qat_vdev->core_device.pdev); + if (vf_id < 0) { + ret = -ENODEV; + goto err_rel; + } + + mdev = qat_vfmig_create(parent, vf_id); + if (IS_ERR(mdev)) { + ret = PTR_ERR(mdev); + goto err_rel; + } + + ret = qat_vfmig_init(mdev); + if (ret) + goto err_destroy; + + qat_vdev->mdev = mdev; + + return 0; + +err_destroy: + qat_vfmig_destroy(mdev); +err_rel: + vfio_pci_core_release_dev(core_vdev); + return ret; +} + +static const struct vfio_device_ops qat_vf_pci_ops = { + .name = "qat-vf-vfio-pci", + .init = qat_vf_pci_init_dev, + .release = qat_vf_pci_release_dev, + .open_device = qat_vf_pci_open_device, + .close_device = qat_vf_pci_close_device, + .ioctl = vfio_pci_core_ioctl, + .read = vfio_pci_core_read, + .write = vfio_pci_core_write, + .mmap = vfio_pci_core_mmap, + .request = vfio_pci_core_request, + .match = vfio_pci_core_match, + .bind_iommufd = vfio_iommufd_physical_bind, + .unbind_iommufd = vfio_iommufd_physical_unbind, + .attach_ioas = vfio_iommufd_physical_attach_ioas, + .detach_ioas = vfio_iommufd_physical_detach_ioas, +}; + +static struct qat_vf_core_device *qat_vf_drvdata(struct pci_dev *pdev) +{ + struct vfio_pci_core_device *core_device = pci_get_drvdata(pdev); + + return container_of(core_device, struct qat_vf_core_device, core_device); +} + +static void qat_vf_pci_aer_reset_done(struct pci_dev *pdev) +{ + struct qat_vf_core_device *qat_vdev = qat_vf_drvdata(pdev); + + if (!qat_vdev->mdev) + return; + + mutex_lock(&qat_vdev->state_mutex); + qat_vf_reset_done(qat_vdev); + mutex_unlock(&qat_vdev->state_mutex); +} + +static int +qat_vf_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct device *dev = &pdev->dev; + struct qat_vf_core_device *qat_vdev; + int ret; + + qat_vdev = vfio_alloc_device(qat_vf_core_device, core_device.vdev, dev, &qat_vf_pci_ops); + if (IS_ERR(qat_vdev)) + return PTR_ERR(qat_vdev); + + pci_set_drvdata(pdev, &qat_vdev->core_device); + ret = vfio_pci_core_register_device(&qat_vdev->core_device); + if (ret) + goto out_put_device; + + return 0; + +out_put_device: + vfio_put_device(&qat_vdev->core_device.vdev); + return ret; +} + +static void qat_vf_vfio_pci_remove(struct pci_dev *pdev) +{ + struct qat_vf_core_device *qat_vdev = qat_vf_drvdata(pdev); + + vfio_pci_core_unregister_device(&qat_vdev->core_device); + vfio_put_device(&qat_vdev->core_device.vdev); +} + +static const struct pci_device_id qat_vf_vfio_pci_table[] = { + /* Intel QAT GEN4 4xxx VF device */ + { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4941) }, + { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4943) }, + { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4945) }, + {} +}; +MODULE_DEVICE_TABLE(pci, qat_vf_vfio_pci_table); + +static const struct pci_error_handlers qat_vf_err_handlers = { + .reset_done = qat_vf_pci_aer_reset_done, + .error_detected = vfio_pci_core_aer_err_detected, +}; + +static struct pci_driver qat_vf_vfio_pci_driver = { + .name = "qat_vfio_pci", + .id_table = qat_vf_vfio_pci_table, + .probe = qat_vf_vfio_pci_probe, + .remove = qat_vf_vfio_pci_remove, + .err_handler = &qat_vf_err_handlers, + .driver_managed_dma = true, +}; +module_pci_driver(qat_vf_vfio_pci_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Xin Zeng "); +MODULE_DESCRIPTION("QAT VFIO PCI - VFIO PCI driver with live migration support for Intel(R) QAT GEN4 device family"); +MODULE_IMPORT_NS(CRYPTO_QAT); diff --git a/drivers/vfio/vfio_main.c b/drivers/vfio/vfio_main.c index 40732e8ed4c6fb018ddfb27fbeaff37775dcc233..a96d97da367daa87a9e5920f36216d64f2c1afc0 100644 --- a/drivers/vfio/vfio_main.c +++ b/drivers/vfio/vfio_main.c @@ -1693,6 +1693,7 @@ static void __exit vfio_cleanup(void) module_init(vfio_init); module_exit(vfio_cleanup); +MODULE_IMPORT_NS(IOMMUFD); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR(DRIVER_AUTHOR); diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index 1b7a44b35616c7d00cb383425c72fe10ee079ff1..25142a0e2fc2c51d4c7807a1fb87cc21b16a163b 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -166,6 +166,10 @@ struct io_pgtable_ops { struct iommu_iotlb_gather *gather); phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops, unsigned long iova); + int (*read_and_clear_dirty)(struct io_pgtable_ops *ops, + unsigned long iova, size_t size, + unsigned long flags, + struct iommu_dirty_bitmap *dirty); }; /** diff --git a/include/linux/iommu.h b/include/linux/iommu.h index eaa67e1f1ea96bdf0db7f70eedfce7756b6378ff..6d9211049d88e44bbc3ea6617f62918561e862ce 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -13,6 +13,7 @@ #include #include #include +#include #include #define IOMMU_READ (1 << 0) @@ -37,6 +38,7 @@ struct bus_type; struct device; struct iommu_domain; struct iommu_domain_ops; +struct iommu_dirty_ops; struct notifier_block; struct iommu_sva; struct iommu_fault_event; @@ -95,6 +97,8 @@ struct iommu_domain_geometry { struct iommu_domain { unsigned type; const struct iommu_domain_ops *ops; + const struct iommu_dirty_ops *dirty_ops; + unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ struct iommu_domain_geometry geometry; struct iommu_dma_cookie *iova_cookie; @@ -133,6 +137,7 @@ enum iommu_cap { * usefully support the non-strict DMA flush queue. */ IOMMU_CAP_DEFERRED_FLUSH, + IOMMU_CAP_DIRTY_TRACKING, /* IOMMU supports dirty tracking */ }; /* These are the possible reserved region types */ @@ -227,6 +232,35 @@ struct iommu_iotlb_gather { bool queued; }; +/** + * struct iommu_dirty_bitmap - Dirty IOVA bitmap state + * @bitmap: IOVA bitmap + * @gather: Range information for a pending IOTLB flush + */ +struct iommu_dirty_bitmap { + struct iova_bitmap *bitmap; + struct iommu_iotlb_gather *gather; +}; + +/* Read but do not clear any dirty bits */ +#define IOMMU_DIRTY_NO_CLEAR (1 << 0) + +/** + * struct iommu_dirty_ops - domain specific dirty tracking operations + * @set_dirty_tracking: Enable or Disable dirty tracking on the iommu domain + * @read_and_clear_dirty: Walk IOMMU page tables for dirtied PTEs marshalled + * into a bitmap, with a bit represented as a page. + * Reads the dirty PTE bits and clears it from IO + * pagetables. + */ +struct iommu_dirty_ops { + int (*set_dirty_tracking)(struct iommu_domain *domain, bool enabled); + int (*read_and_clear_dirty)(struct iommu_domain *domain, + unsigned long iova, size_t size, + unsigned long flags, + struct iommu_dirty_bitmap *dirty); +}; + /** * struct iommu_ops - iommu ops and capabilities * @capable: check capability @@ -234,7 +268,15 @@ struct iommu_iotlb_gather { * op is allocated in the iommu driver and freed by the caller after * use. The information type is one of enum iommu_hw_info_type defined * in include/uapi/linux/iommufd.h. - * @domain_alloc: allocate iommu domain + * @domain_alloc: allocate and return an iommu domain if success. Otherwise + * NULL is returned. The domain is not fully initialized until + * the caller iommu_domain_alloc() returns. + * @domain_alloc_user: Allocate an iommu domain corresponding to the input + * parameters as defined in include/uapi/linux/iommufd.h. + * Unlike @domain_alloc, it is called only by IOMMUFD and + * must fully initialize the new domain before return. + * Upon success, a domain is returned. Upon failure, + * ERR_PTR must be returned. * @probe_device: Add device to iommu driver handling * @release_device: Remove device from iommu driver handling * @probe_finalize: Do final setup work after the device is added to an IOMMU @@ -267,6 +309,7 @@ struct iommu_ops { /* Domain allocation and freeing by the iommu driver */ struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); + struct iommu_domain *(*domain_alloc_user)(struct device *dev, u32 flags); struct iommu_device *(*probe_device)(struct device *dev); void (*release_device)(struct device *dev); @@ -634,6 +677,28 @@ static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) return gather && gather->queued; } +static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty, + struct iova_bitmap *bitmap, + struct iommu_iotlb_gather *gather) +{ + if (gather) + iommu_iotlb_gather_init(gather); + + dirty->bitmap = bitmap; + dirty->gather = gather; +} + +static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty, + unsigned long iova, + unsigned long length) +{ + if (dirty->bitmap) + iova_bitmap_set(dirty->bitmap, iova, length); + + if (dirty->gather) + iommu_iotlb_gather_add_range(dirty->gather, iova, length); +} + /* PCI device grouping function */ extern struct iommu_group *pci_device_group(struct device *dev); /* Generic device grouping function */ @@ -740,6 +805,8 @@ struct iommu_fwspec {}; struct iommu_device {}; struct iommu_fault_param {}; struct iommu_iotlb_gather {}; +struct iommu_dirty_bitmap {}; +struct iommu_dirty_ops {}; static inline bool iommu_present(const struct bus_type *bus) { @@ -972,6 +1039,18 @@ static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) return false; } +static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty, + struct iova_bitmap *bitmap, + struct iommu_iotlb_gather *gather) +{ +} + +static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty, + unsigned long iova, + unsigned long length) +{ +} + static inline void iommu_device_unregister(struct iommu_device *iommu) { } diff --git a/include/linux/iova_bitmap.h b/include/linux/iova_bitmap.h index c006cf0a25f3daac2ccc39c67c9a3193245a4077..1c338f5e5b7a62027290b44ad47c4a74d84706ac 100644 --- a/include/linux/iova_bitmap.h +++ b/include/linux/iova_bitmap.h @@ -7,6 +7,7 @@ #define _IOVA_BITMAP_H_ #include +#include struct iova_bitmap; @@ -14,6 +15,7 @@ typedef int (*iova_bitmap_fn_t)(struct iova_bitmap *bitmap, unsigned long iova, size_t length, void *opaque); +#if IS_ENABLED(CONFIG_IOMMUFD_DRIVER) struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length, unsigned long page_size, u64 __user *data); @@ -22,5 +24,29 @@ int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque, iova_bitmap_fn_t fn); void iova_bitmap_set(struct iova_bitmap *bitmap, unsigned long iova, size_t length); +#else +static inline struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, + size_t length, + unsigned long page_size, + u64 __user *data) +{ + return NULL; +} + +static inline void iova_bitmap_free(struct iova_bitmap *bitmap) +{ +} + +static inline int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque, + iova_bitmap_fn_t fn) +{ + return -EOPNOTSUPP; +} + +static inline void iova_bitmap_set(struct iova_bitmap *bitmap, + unsigned long iova, size_t length) +{ +} +#endif #endif diff --git a/include/linux/qat/qat_mig_dev.h b/include/linux/qat/qat_mig_dev.h new file mode 100644 index 0000000000000000000000000000000000000000..dbbb6a063dd25cb1509a756b5dc437cc7e1439f7 --- /dev/null +++ b/include/linux/qat/qat_mig_dev.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2024 Intel Corporation */ +#ifndef QAT_MIG_DEV_H_ +#define QAT_MIG_DEV_H_ + +struct pci_dev; + +struct qat_mig_dev { + void *parent_accel_dev; + u8 *state; + u32 setup_size; + u32 remote_setup_size; + u32 state_size; + s32 vf_id; +}; + +struct qat_mig_dev *qat_vfmig_create(struct pci_dev *pdev, int vf_id); +int qat_vfmig_init(struct qat_mig_dev *mdev); +void qat_vfmig_cleanup(struct qat_mig_dev *mdev); +void qat_vfmig_reset(struct qat_mig_dev *mdev); +int qat_vfmig_open(struct qat_mig_dev *mdev); +void qat_vfmig_close(struct qat_mig_dev *mdev); +int qat_vfmig_suspend(struct qat_mig_dev *mdev); +int qat_vfmig_resume(struct qat_mig_dev *mdev); +int qat_vfmig_save_state(struct qat_mig_dev *mdev); +int qat_vfmig_save_setup(struct qat_mig_dev *mdev); +int qat_vfmig_load_state(struct qat_mig_dev *mdev); +int qat_vfmig_load_setup(struct qat_mig_dev *mdev, int size); +void qat_vfmig_destroy(struct qat_mig_dev *mdev); + +#endif /*QAT_MIG_DEV_H_*/ diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h index b4ba0c0cbab6b8b1562fa359d34f9835a9cde757..c44eecf5d318e520a3c78c69e60b2d55a4d1618b 100644 --- a/include/uapi/linux/iommufd.h +++ b/include/uapi/linux/iommufd.h @@ -47,6 +47,8 @@ enum { IOMMUFD_CMD_VFIO_IOAS, IOMMUFD_CMD_HWPT_ALLOC, IOMMUFD_CMD_GET_HW_INFO, + IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING, + IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP, }; /** @@ -347,10 +349,22 @@ struct iommu_vfio_ioas { }; #define IOMMU_VFIO_IOAS _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VFIO_IOAS) +/** + * enum iommufd_hwpt_alloc_flags - Flags for HWPT allocation + * @IOMMU_HWPT_ALLOC_NEST_PARENT: If set, allocate a HWPT that can serve as + * the parent HWPT in a nesting configuration. + * @IOMMU_HWPT_ALLOC_DIRTY_TRACKING: Dirty tracking support for device IOMMU is + * enforced on device attachment + */ +enum iommufd_hwpt_alloc_flags { + IOMMU_HWPT_ALLOC_NEST_PARENT = 1 << 0, + IOMMU_HWPT_ALLOC_DIRTY_TRACKING = 1 << 1, +}; + /** * struct iommu_hwpt_alloc - ioctl(IOMMU_HWPT_ALLOC) * @size: sizeof(struct iommu_hwpt_alloc) - * @flags: Must be 0 + * @flags: Combination of enum iommufd_hwpt_alloc_flags * @dev_id: The device to allocate this HWPT for * @pt_id: The IOAS to connect this HWPT to * @out_hwpt_id: The ID of the new HWPT @@ -404,6 +418,20 @@ enum iommu_hw_info_type { IOMMU_HW_INFO_TYPE_INTEL_VTD, }; +/** + * enum iommufd_hw_capabilities + * @IOMMU_HW_CAP_DIRTY_TRACKING: IOMMU hardware support for dirty tracking + * If available, it means the following APIs + * are supported: + * + * IOMMU_HWPT_GET_DIRTY_BITMAP + * IOMMU_HWPT_SET_DIRTY_TRACKING + * + */ +enum iommufd_hw_capabilities { + IOMMU_HW_CAP_DIRTY_TRACKING = 1 << 0, +}; + /** * struct iommu_hw_info - ioctl(IOMMU_GET_HW_INFO) * @size: sizeof(struct iommu_hw_info) @@ -415,6 +443,8 @@ enum iommu_hw_info_type { * the iommu type specific hardware information data * @out_data_type: Output the iommu hardware info type as defined in the enum * iommu_hw_info_type. + * @out_capabilities: Output the generic iommu capability info type as defined + * in the enum iommu_hw_capabilities. * @__reserved: Must be 0 * * Query an iommu type specific hardware information data from an iommu behind @@ -439,6 +469,81 @@ struct iommu_hw_info { __aligned_u64 data_uptr; __u32 out_data_type; __u32 __reserved; + __aligned_u64 out_capabilities; }; #define IOMMU_GET_HW_INFO _IO(IOMMUFD_TYPE, IOMMUFD_CMD_GET_HW_INFO) + +/* + * enum iommufd_hwpt_set_dirty_tracking_flags - Flags for steering dirty + * tracking + * @IOMMU_HWPT_DIRTY_TRACKING_ENABLE: Enable dirty tracking + */ +enum iommufd_hwpt_set_dirty_tracking_flags { + IOMMU_HWPT_DIRTY_TRACKING_ENABLE = 1, +}; + +/** + * struct iommu_hwpt_set_dirty_tracking - ioctl(IOMMU_HWPT_SET_DIRTY_TRACKING) + * @size: sizeof(struct iommu_hwpt_set_dirty_tracking) + * @flags: Combination of enum iommufd_hwpt_set_dirty_tracking_flags + * @hwpt_id: HW pagetable ID that represents the IOMMU domain + * @__reserved: Must be 0 + * + * Toggle dirty tracking on an HW pagetable. + */ +struct iommu_hwpt_set_dirty_tracking { + __u32 size; + __u32 flags; + __u32 hwpt_id; + __u32 __reserved; +}; +#define IOMMU_HWPT_SET_DIRTY_TRACKING _IO(IOMMUFD_TYPE, \ + IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING) + +/** + * enum iommufd_hwpt_get_dirty_bitmap_flags - Flags for getting dirty bits + * @IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR: Just read the PTEs without clearing + * any dirty bits metadata. This flag + * can be passed in the expectation + * where the next operation is an unmap + * of the same IOVA range. + * + */ +enum iommufd_hwpt_get_dirty_bitmap_flags { + IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR = 1, +}; + +/** + * struct iommu_hwpt_get_dirty_bitmap - ioctl(IOMMU_HWPT_GET_DIRTY_BITMAP) + * @size: sizeof(struct iommu_hwpt_get_dirty_bitmap) + * @hwpt_id: HW pagetable ID that represents the IOMMU domain + * @flags: Combination of enum iommufd_hwpt_get_dirty_bitmap_flags + * @__reserved: Must be 0 + * @iova: base IOVA of the bitmap first bit + * @length: IOVA range size + * @page_size: page size granularity of each bit in the bitmap + * @data: bitmap where to set the dirty bits. The bitmap bits each + * represent a page_size which you deviate from an arbitrary iova. + * + * Checking a given IOVA is dirty: + * + * data[(iova / page_size) / 64] & (1ULL << ((iova / page_size) % 64)) + * + * Walk the IOMMU pagetables for a given IOVA range to return a bitmap + * with the dirty IOVAs. In doing so it will also by default clear any + * dirty bit metadata set in the IOPTE. + */ +struct iommu_hwpt_get_dirty_bitmap { + __u32 size; + __u32 hwpt_id; + __u32 flags; + __u32 __reserved; + __aligned_u64 iova; + __aligned_u64 length; + __aligned_u64 page_size; + __aligned_u64 data; +}; +#define IOMMU_HWPT_GET_DIRTY_BITMAP _IO(IOMMUFD_TYPE, \ + IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP) + #endif diff --git a/tools/testing/selftests/iommu/iommufd.c b/tools/testing/selftests/iommu/iommufd.c index 33d08600be13d6c23cf7b36ef6ef8ab6145b4ecc..de61447b9558484288c10b21eee5b7de81a59ef3 100644 --- a/tools/testing/selftests/iommu/iommufd.c +++ b/tools/testing/selftests/iommu/iommufd.c @@ -86,12 +86,13 @@ TEST_F(iommufd, cmd_fail) TEST_F(iommufd, cmd_length) { -#define TEST_LENGTH(_struct, _ioctl) \ +#define TEST_LENGTH(_struct, _ioctl, _last) \ { \ + size_t min_size = offsetofend(struct _struct, _last); \ struct { \ struct _struct cmd; \ uint8_t extra; \ - } cmd = { .cmd = { .size = sizeof(struct _struct) - 1 }, \ + } cmd = { .cmd = { .size = min_size - 1 }, \ .extra = UINT8_MAX }; \ int old_errno; \ int rc; \ @@ -112,16 +113,19 @@ TEST_F(iommufd, cmd_length) } \ } - TEST_LENGTH(iommu_destroy, IOMMU_DESTROY); - TEST_LENGTH(iommu_hw_info, IOMMU_GET_HW_INFO); - TEST_LENGTH(iommu_ioas_alloc, IOMMU_IOAS_ALLOC); - TEST_LENGTH(iommu_ioas_iova_ranges, IOMMU_IOAS_IOVA_RANGES); - TEST_LENGTH(iommu_ioas_allow_iovas, IOMMU_IOAS_ALLOW_IOVAS); - TEST_LENGTH(iommu_ioas_map, IOMMU_IOAS_MAP); - TEST_LENGTH(iommu_ioas_copy, IOMMU_IOAS_COPY); - TEST_LENGTH(iommu_ioas_unmap, IOMMU_IOAS_UNMAP); - TEST_LENGTH(iommu_option, IOMMU_OPTION); - TEST_LENGTH(iommu_vfio_ioas, IOMMU_VFIO_IOAS); + TEST_LENGTH(iommu_destroy, IOMMU_DESTROY, id); + TEST_LENGTH(iommu_hw_info, IOMMU_GET_HW_INFO, __reserved); + TEST_LENGTH(iommu_hwpt_alloc, IOMMU_HWPT_ALLOC, __reserved); + TEST_LENGTH(iommu_ioas_alloc, IOMMU_IOAS_ALLOC, out_ioas_id); + TEST_LENGTH(iommu_ioas_iova_ranges, IOMMU_IOAS_IOVA_RANGES, + out_iova_alignment); + TEST_LENGTH(iommu_ioas_allow_iovas, IOMMU_IOAS_ALLOW_IOVAS, + allowed_iovas); + TEST_LENGTH(iommu_ioas_map, IOMMU_IOAS_MAP, iova); + TEST_LENGTH(iommu_ioas_copy, IOMMU_IOAS_COPY, src_iova); + TEST_LENGTH(iommu_ioas_unmap, IOMMU_IOAS_UNMAP, length); + TEST_LENGTH(iommu_option, IOMMU_OPTION, val64); + TEST_LENGTH(iommu_vfio_ioas, IOMMU_VFIO_IOAS, __reserved); #undef TEST_LENGTH } @@ -1404,16 +1408,242 @@ TEST_F(iommufd_mock_domain, alloc_hwpt) int i; for (i = 0; i != variant->mock_domains; i++) { + uint32_t hwpt_id[2]; uint32_t stddev_id; - uint32_t hwpt_id; - test_cmd_hwpt_alloc(self->idev_ids[0], self->ioas_id, &hwpt_id); - test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL); + test_err_hwpt_alloc(EOPNOTSUPP, + self->idev_ids[i], self->ioas_id, + ~IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id[0]); + test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id, + 0, &hwpt_id[0]); + test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id, + IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id[1]); + + /* Do a hw_pagetable rotation test */ + test_cmd_mock_domain_replace(self->stdev_ids[i], hwpt_id[0]); + EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hwpt_id[0])); + test_cmd_mock_domain_replace(self->stdev_ids[i], hwpt_id[1]); + EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hwpt_id[1])); + test_cmd_mock_domain_replace(self->stdev_ids[i], self->ioas_id); + test_ioctl_destroy(hwpt_id[1]); + + test_cmd_mock_domain(hwpt_id[0], &stddev_id, NULL, NULL); test_ioctl_destroy(stddev_id); - test_ioctl_destroy(hwpt_id); + test_ioctl_destroy(hwpt_id[0]); } } +FIXTURE(iommufd_dirty_tracking) +{ + int fd; + uint32_t ioas_id; + uint32_t hwpt_id; + uint32_t stdev_id; + uint32_t idev_id; + unsigned long page_size; + unsigned long bitmap_size; + void *bitmap; + void *buffer; +}; + +FIXTURE_VARIANT(iommufd_dirty_tracking) +{ + unsigned long buffer_size; +}; + +FIXTURE_SETUP(iommufd_dirty_tracking) +{ + void *vrc; + int rc; + + self->fd = open("/dev/iommu", O_RDWR); + ASSERT_NE(-1, self->fd); + + rc = posix_memalign(&self->buffer, HUGEPAGE_SIZE, variant->buffer_size); + if (rc || !self->buffer) { + SKIP(return, "Skipping buffer_size=%lu due to errno=%d", + variant->buffer_size, rc); + } + + assert((uintptr_t)self->buffer % HUGEPAGE_SIZE == 0); + vrc = mmap(self->buffer, variant->buffer_size, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0); + assert(vrc == self->buffer); + + self->page_size = MOCK_PAGE_SIZE; + self->bitmap_size = + variant->buffer_size / self->page_size / BITS_PER_BYTE; + + /* Provision with an extra (MOCK_PAGE_SIZE) for the unaligned case */ + rc = posix_memalign(&self->bitmap, PAGE_SIZE, + self->bitmap_size + MOCK_PAGE_SIZE); + assert(!rc); + assert(self->bitmap); + assert((uintptr_t)self->bitmap % PAGE_SIZE == 0); + + test_ioctl_ioas_alloc(&self->ioas_id); + test_cmd_mock_domain(self->ioas_id, &self->stdev_id, &self->hwpt_id, + &self->idev_id); +} + +FIXTURE_TEARDOWN(iommufd_dirty_tracking) +{ + munmap(self->buffer, variant->buffer_size); + munmap(self->bitmap, self->bitmap_size); + teardown_iommufd(self->fd, _metadata); +} + +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128k) +{ + /* one u32 index bitmap */ + .buffer_size = 128UL * 1024UL, +}; + +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty256k) +{ + /* one u64 index bitmap */ + .buffer_size = 256UL * 1024UL, +}; + +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty640k) +{ + /* two u64 index and trailing end bitmap */ + .buffer_size = 640UL * 1024UL, +}; + +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M) +{ + /* 4K bitmap (128M IOVA range) */ + .buffer_size = 128UL * 1024UL * 1024UL, +}; + +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty256M) +{ + /* 8K bitmap (256M IOVA range) */ + .buffer_size = 256UL * 1024UL * 1024UL, +}; + +TEST_F(iommufd_dirty_tracking, enforce_dirty) +{ + uint32_t ioas_id, stddev_id, idev_id; + uint32_t hwpt_id, _hwpt_id; + uint32_t dev_flags; + + /* Regular case */ + dev_flags = MOCK_FLAGS_DEVICE_NO_DIRTY; + test_cmd_hwpt_alloc(self->idev_id, self->ioas_id, + IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id); + test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL); + test_err_mock_domain_flags(EINVAL, hwpt_id, dev_flags, &stddev_id, + NULL); + test_ioctl_destroy(stddev_id); + test_ioctl_destroy(hwpt_id); + + /* IOMMU device does not support dirty tracking */ + test_ioctl_ioas_alloc(&ioas_id); + test_cmd_mock_domain_flags(ioas_id, dev_flags, &stddev_id, &_hwpt_id, + &idev_id); + test_err_hwpt_alloc(EOPNOTSUPP, idev_id, ioas_id, + IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id); + test_ioctl_destroy(stddev_id); +} + +TEST_F(iommufd_dirty_tracking, set_dirty_tracking) +{ + uint32_t stddev_id; + uint32_t hwpt_id; + + test_cmd_hwpt_alloc(self->idev_id, self->ioas_id, + IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id); + test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL); + test_cmd_set_dirty_tracking(hwpt_id, true); + test_cmd_set_dirty_tracking(hwpt_id, false); + + test_ioctl_destroy(stddev_id); + test_ioctl_destroy(hwpt_id); +} + +TEST_F(iommufd_dirty_tracking, device_dirty_capability) +{ + uint32_t caps = 0; + uint32_t stddev_id; + uint32_t hwpt_id; + + test_cmd_hwpt_alloc(self->idev_id, self->ioas_id, 0, &hwpt_id); + test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL); + test_cmd_get_hw_capabilities(self->idev_id, caps, + IOMMU_HW_CAP_DIRTY_TRACKING); + ASSERT_EQ(IOMMU_HW_CAP_DIRTY_TRACKING, + caps & IOMMU_HW_CAP_DIRTY_TRACKING); + + test_ioctl_destroy(stddev_id); + test_ioctl_destroy(hwpt_id); +} + +TEST_F(iommufd_dirty_tracking, get_dirty_bitmap) +{ + uint32_t stddev_id; + uint32_t hwpt_id; + uint32_t ioas_id; + + test_ioctl_ioas_alloc(&ioas_id); + test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer, + variant->buffer_size, MOCK_APERTURE_START); + + test_cmd_hwpt_alloc(self->idev_id, ioas_id, + IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id); + test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL); + + test_cmd_set_dirty_tracking(hwpt_id, true); + + test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size, + MOCK_APERTURE_START, self->page_size, + self->bitmap, self->bitmap_size, 0, _metadata); + + /* PAGE_SIZE unaligned bitmap */ + test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size, + MOCK_APERTURE_START, self->page_size, + self->bitmap + MOCK_PAGE_SIZE, + self->bitmap_size, 0, _metadata); + + test_ioctl_destroy(stddev_id); + test_ioctl_destroy(hwpt_id); +} + +TEST_F(iommufd_dirty_tracking, get_dirty_bitmap_no_clear) +{ + uint32_t stddev_id; + uint32_t hwpt_id; + uint32_t ioas_id; + + test_ioctl_ioas_alloc(&ioas_id); + test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer, + variant->buffer_size, MOCK_APERTURE_START); + + test_cmd_hwpt_alloc(self->idev_id, ioas_id, + IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id); + test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL); + + test_cmd_set_dirty_tracking(hwpt_id, true); + + test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size, + MOCK_APERTURE_START, self->page_size, + self->bitmap, self->bitmap_size, + IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR, + _metadata); + + /* Unaligned bitmap */ + test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size, + MOCK_APERTURE_START, self->page_size, + self->bitmap + MOCK_PAGE_SIZE, + self->bitmap_size, + IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR, + _metadata); + + test_ioctl_destroy(stddev_id); + test_ioctl_destroy(hwpt_id); +} + /* VFIO compatibility IOCTLs */ TEST_F(iommufd, simple_ioctls) diff --git a/tools/testing/selftests/iommu/iommufd_fail_nth.c b/tools/testing/selftests/iommu/iommufd_fail_nth.c index a220ca2a689d160c95129d0bc281ed63dfb38c73..1fcd69cb0e416718c85657f5278e75824a0d7da4 100644 --- a/tools/testing/selftests/iommu/iommufd_fail_nth.c +++ b/tools/testing/selftests/iommu/iommufd_fail_nth.c @@ -612,10 +612,10 @@ TEST_FAIL_NTH(basic_fail_nth, device) &idev_id)) return -1; - if (_test_cmd_get_hw_info(self->fd, idev_id, &info, sizeof(info))) + if (_test_cmd_get_hw_info(self->fd, idev_id, &info, sizeof(info), NULL)) return -1; - if (_test_cmd_hwpt_alloc(self->fd, idev_id, ioas_id, &hwpt_id)) + if (_test_cmd_hwpt_alloc(self->fd, idev_id, ioas_id, 0, &hwpt_id)) return -1; if (_test_cmd_mock_domain_replace(self->fd, stdev_id, ioas_id2, NULL)) diff --git a/tools/testing/selftests/iommu/iommufd_utils.h b/tools/testing/selftests/iommu/iommufd_utils.h index e0753d03ecaa8576005120cf86dd47a529df1f94..70d558e0f0c747ea887d33805c2e7ff78fc6b8fb 100644 --- a/tools/testing/selftests/iommu/iommufd_utils.h +++ b/tools/testing/selftests/iommu/iommufd_utils.h @@ -16,6 +16,25 @@ /* Hack to make assertions more readable */ #define _IOMMU_TEST_CMD(x) IOMMU_TEST_CMD +/* Imported from include/asm-generic/bitops/generic-non-atomic.h */ +#define BITS_PER_BYTE 8 +#define BITS_PER_LONG __BITS_PER_LONG +#define BIT_MASK(nr) (1UL << ((nr) % __BITS_PER_LONG)) +#define BIT_WORD(nr) ((nr) / __BITS_PER_LONG) + +static inline void set_bit(unsigned int nr, unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + *p |= mask; +} + +static inline bool test_bit(unsigned int nr, unsigned long *addr) +{ + return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1))); +} + static void *buffer; static unsigned long BUFFER_SIZE; @@ -74,6 +93,38 @@ static int _test_cmd_mock_domain(int fd, unsigned int ioas_id, __u32 *stdev_id, EXPECT_ERRNO(_errno, _test_cmd_mock_domain(self->fd, ioas_id, \ stdev_id, hwpt_id, NULL)) +static int _test_cmd_mock_domain_flags(int fd, unsigned int ioas_id, + __u32 stdev_flags, __u32 *stdev_id, + __u32 *hwpt_id, __u32 *idev_id) +{ + struct iommu_test_cmd cmd = { + .size = sizeof(cmd), + .op = IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS, + .id = ioas_id, + .mock_domain_flags = { .dev_flags = stdev_flags }, + }; + int ret; + + ret = ioctl(fd, IOMMU_TEST_CMD, &cmd); + if (ret) + return ret; + if (stdev_id) + *stdev_id = cmd.mock_domain_flags.out_stdev_id; + assert(cmd.id != 0); + if (hwpt_id) + *hwpt_id = cmd.mock_domain_flags.out_hwpt_id; + if (idev_id) + *idev_id = cmd.mock_domain_flags.out_idev_id; + return 0; +} +#define test_cmd_mock_domain_flags(ioas_id, flags, stdev_id, hwpt_id, idev_id) \ + ASSERT_EQ(0, _test_cmd_mock_domain_flags(self->fd, ioas_id, flags, \ + stdev_id, hwpt_id, idev_id)) +#define test_err_mock_domain_flags(_errno, ioas_id, flags, stdev_id, hwpt_id) \ + EXPECT_ERRNO(_errno, \ + _test_cmd_mock_domain_flags(self->fd, ioas_id, flags, \ + stdev_id, hwpt_id, NULL)) + static int _test_cmd_mock_domain_replace(int fd, __u32 stdev_id, __u32 pt_id, __u32 *hwpt_id) { @@ -103,10 +154,11 @@ static int _test_cmd_mock_domain_replace(int fd, __u32 stdev_id, __u32 pt_id, pt_id, NULL)) static int _test_cmd_hwpt_alloc(int fd, __u32 device_id, __u32 pt_id, - __u32 *hwpt_id) + __u32 flags, __u32 *hwpt_id) { struct iommu_hwpt_alloc cmd = { .size = sizeof(cmd), + .flags = flags, .dev_id = device_id, .pt_id = pt_id, }; @@ -120,8 +172,12 @@ static int _test_cmd_hwpt_alloc(int fd, __u32 device_id, __u32 pt_id, return 0; } -#define test_cmd_hwpt_alloc(device_id, pt_id, hwpt_id) \ - ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, hwpt_id)) +#define test_cmd_hwpt_alloc(device_id, pt_id, flags, hwpt_id) \ + ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, \ + pt_id, flags, hwpt_id)) +#define test_err_hwpt_alloc(_errno, device_id, pt_id, flags, hwpt_id) \ + EXPECT_ERRNO(_errno, _test_cmd_hwpt_alloc(self->fd, device_id, \ + pt_id, flags, hwpt_id)) static int _test_cmd_access_replace_ioas(int fd, __u32 access_id, unsigned int ioas_id) @@ -142,6 +198,125 @@ static int _test_cmd_access_replace_ioas(int fd, __u32 access_id, #define test_cmd_access_replace_ioas(access_id, ioas_id) \ ASSERT_EQ(0, _test_cmd_access_replace_ioas(self->fd, access_id, ioas_id)) +static int _test_cmd_set_dirty_tracking(int fd, __u32 hwpt_id, bool enabled) +{ + struct iommu_hwpt_set_dirty_tracking cmd = { + .size = sizeof(cmd), + .flags = enabled ? IOMMU_HWPT_DIRTY_TRACKING_ENABLE : 0, + .hwpt_id = hwpt_id, + }; + int ret; + + ret = ioctl(fd, IOMMU_HWPT_SET_DIRTY_TRACKING, &cmd); + if (ret) + return -errno; + return 0; +} +#define test_cmd_set_dirty_tracking(hwpt_id, enabled) \ + ASSERT_EQ(0, _test_cmd_set_dirty_tracking(self->fd, hwpt_id, enabled)) + +static int _test_cmd_get_dirty_bitmap(int fd, __u32 hwpt_id, size_t length, + __u64 iova, size_t page_size, + __u64 *bitmap, __u32 flags) +{ + struct iommu_hwpt_get_dirty_bitmap cmd = { + .size = sizeof(cmd), + .hwpt_id = hwpt_id, + .flags = flags, + .iova = iova, + .length = length, + .page_size = page_size, + .data = (uintptr_t)bitmap, + }; + int ret; + + ret = ioctl(fd, IOMMU_HWPT_GET_DIRTY_BITMAP, &cmd); + if (ret) + return ret; + return 0; +} + +#define test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, \ + bitmap, flags) \ + ASSERT_EQ(0, _test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, \ + page_size, bitmap, flags)) + +static int _test_cmd_mock_domain_set_dirty(int fd, __u32 hwpt_id, size_t length, + __u64 iova, size_t page_size, + __u64 *bitmap, __u64 *dirty) +{ + struct iommu_test_cmd cmd = { + .size = sizeof(cmd), + .op = IOMMU_TEST_OP_DIRTY, + .id = hwpt_id, + .dirty = { + .iova = iova, + .length = length, + .page_size = page_size, + .uptr = (uintptr_t)bitmap, + } + }; + int ret; + + ret = ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_DIRTY), &cmd); + if (ret) + return -ret; + if (dirty) + *dirty = cmd.dirty.out_nr_dirty; + return 0; +} + +#define test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, page_size, \ + bitmap, nr) \ + ASSERT_EQ(0, \ + _test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, \ + page_size, bitmap, nr)) + +static int _test_mock_dirty_bitmaps(int fd, __u32 hwpt_id, size_t length, + __u64 iova, size_t page_size, __u64 *bitmap, + __u64 bitmap_size, __u32 flags, + struct __test_metadata *_metadata) +{ + unsigned long i, nbits = bitmap_size * BITS_PER_BYTE; + unsigned long nr = nbits / 2; + __u64 out_dirty = 0; + + /* Mark all even bits as dirty in the mock domain */ + for (i = 0; i < nbits; i += 2) + set_bit(i, (unsigned long *)bitmap); + + test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, page_size, + bitmap, &out_dirty); + ASSERT_EQ(nr, out_dirty); + + /* Expect all even bits as dirty in the user bitmap */ + memset(bitmap, 0, bitmap_size); + test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap, + flags); + /* Beware ASSERT_EQ() is two statements -- braces are not redundant! */ + for (i = 0; i < nbits; i++) { + ASSERT_EQ(!(i % 2), test_bit(i, (unsigned long *)bitmap)); + } + + memset(bitmap, 0, bitmap_size); + test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap, + flags); + + /* It as read already -- expect all zeroes */ + for (i = 0; i < nbits; i++) { + ASSERT_EQ(!(i % 2) && (flags & + IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR), + test_bit(i, (unsigned long *)bitmap)); + } + + return 0; +} +#define test_mock_dirty_bitmaps(hwpt_id, length, iova, page_size, bitmap, \ + bitmap_size, flags, _metadata) \ + ASSERT_EQ(0, _test_mock_dirty_bitmaps(self->fd, hwpt_id, length, iova, \ + page_size, bitmap, bitmap_size, \ + flags, _metadata)) + static int _test_cmd_create_access(int fd, unsigned int ioas_id, __u32 *access_id, unsigned int flags) { @@ -266,6 +441,17 @@ static int _test_ioctl_ioas_map(int fd, unsigned int ioas_id, void *buffer, IOMMU_IOAS_MAP_READABLE)); \ }) +#define test_ioctl_ioas_map_fixed_id(ioas_id, buffer, length, iova) \ + ({ \ + __u64 __iova = iova; \ + ASSERT_EQ(0, \ + _test_ioctl_ioas_map( \ + self->fd, ioas_id, buffer, length, &__iova, \ + IOMMU_IOAS_MAP_FIXED_IOVA | \ + IOMMU_IOAS_MAP_WRITEABLE | \ + IOMMU_IOAS_MAP_READABLE)); \ + }) + #define test_err_ioctl_ioas_map_fixed(_errno, buffer, length, iova) \ ({ \ __u64 __iova = iova; \ @@ -354,8 +540,8 @@ static void teardown_iommufd(int fd, struct __test_metadata *_metadata) #endif /* @data can be NULL */ -static int _test_cmd_get_hw_info(int fd, __u32 device_id, - void *data, size_t data_len) +static int _test_cmd_get_hw_info(int fd, __u32 device_id, void *data, + size_t data_len, uint32_t *capabilities) { struct iommu_test_hw_info *info = (struct iommu_test_hw_info *)data; struct iommu_hw_info cmd = { @@ -363,6 +549,7 @@ static int _test_cmd_get_hw_info(int fd, __u32 device_id, .dev_id = device_id, .data_len = data_len, .data_uptr = (uint64_t)data, + .out_capabilities = 0, }; int ret; @@ -399,14 +586,19 @@ static int _test_cmd_get_hw_info(int fd, __u32 device_id, assert(!info->flags); } + if (capabilities) + *capabilities = cmd.out_capabilities; + return 0; } -#define test_cmd_get_hw_info(device_id, data, data_len) \ - ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, \ - data, data_len)) +#define test_cmd_get_hw_info(device_id, data, data_len) \ + ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, data, \ + data_len, NULL)) + +#define test_err_get_hw_info(_errno, device_id, data, data_len) \ + EXPECT_ERRNO(_errno, _test_cmd_get_hw_info(self->fd, device_id, data, \ + data_len, NULL)) -#define test_err_get_hw_info(_errno, device_id, data, data_len) \ - EXPECT_ERRNO(_errno, \ - _test_cmd_get_hw_info(self->fd, device_id, \ - data, data_len)) +#define test_cmd_get_hw_capabilities(device_id, caps, mask) \ + ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, NULL, 0, &caps))