diff --git a/MAINTAINERS b/MAINTAINERS index aed432c2b8e9c5b271846c1b126f9ba0cd10d41c..6df3375e7a21f225b673f0718ad05ca3aaa838d9 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9531,6 +9531,11 @@ S: Maintained F: Documentation/ABI/testing/debugfs-hisi-zip F: drivers/crypto/hisilicon/zip/ +HISILICON HNS3 PTP SYNC DRIVER +M: Yonglong Liu +S: Supported +F: drivers/ptp/ptp_hisi.c + HMM - Heterogeneous Memory Management M: Jérôme Glisse L: linux-mm@kvack.org diff --git a/arch/arm64/configs/tencent.config b/arch/arm64/configs/tencent.config index cccfc23b08cb2e821265404bd8fda780f0e1ae96..11ede604a739095cfde19163daa43aced6c91d80 100644 --- a/arch/arm64/configs/tencent.config +++ b/arch/arm64/configs/tencent.config @@ -899,6 +899,7 @@ CONFIG_I2C_DESIGNWARE_PLATFORM=y CONFIG_I2C_XGENE_SLIMPRO=m CONFIG_I2C_SLAVE=y CONFIG_SPI=y +CONFIG_PTP_HISI=m CONFIG_GPIO_SYSFS=y CONFIG_GPIO_DWAPB=y CONFIG_POWER_RESET_HISI=y diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig index 3312e1d93c3b5120d4fccb087369caf415679cc0..14bd24316d419b537e787335dc0a321e2b2164ba 100644 --- a/drivers/net/ethernet/hisilicon/Kconfig +++ b/drivers/net/ethernet/hisilicon/Kconfig @@ -92,6 +92,7 @@ config HNS3 depends on PCI select NET_DEVLINK select PAGE_POOL + select PAGE_POOL_STATS help This selects the framework support for Hisilicon Network Subsystem 3. This layer facilitates clients like ENET, RoCE and user-space ethernet diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile index e214bfaece1f3aca3d0603400f9d779af1f1edf4..cc9158097e933d87ef4c7e9cca3893a976f59a61 100644 --- a/drivers/net/ethernet/hisilicon/hns3/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/Makefile @@ -12,6 +12,7 @@ obj-$(CONFIG_HNS3) += hnae3.o obj-$(CONFIG_HNS3_ENET) += hns3.o hns3-objs = hns3_enet.o hns3_ethtool.o hns3_debugfs.o +hns3-objs += hns3_ext.o hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o @@ -24,6 +25,6 @@ obj-$(CONFIG_HNS3_HCLGE) += hclge.o hclge-objs = hns3pf/hclge_main.o hns3pf/hclge_mdio.o hns3pf/hclge_tm.o hns3pf/hclge_regs.o \ hns3pf/hclge_mbx.o hns3pf/hclge_err.o hns3pf/hclge_debugfs.o hns3pf/hclge_ptp.o hns3pf/hclge_devlink.o \ hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o - +hclge-objs += hns3pf/hclge_ext.o hclge-$(CONFIG_HNS3_DCB) += hns3pf/hclge_dcb.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index abcd7877f7d2a585df38e017f1d86107b96a1253..0de9b83c9d4ecfd32b21c2639112e651ba7e1378 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -47,6 +47,8 @@ enum HCLGE_MBX_OPCODE { HCLGE_MBX_VF_UNINIT, /* (VF -> PF) vf is unintializing */ HCLGE_MBX_HANDLE_VF_TBL, /* (VF -> PF) store/clear hw table */ HCLGE_MBX_GET_RING_VECTOR_MAP, /* (VF -> PF) get ring-to-vector map */ + HCLGE_MBX_SET_QB = 0x28, /* (VF -> PF) set queue bonding */ + HCLGE_MBX_PUSH_QB_STATE, /* (PF -> VF) push qb state */ HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */ HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */ @@ -77,6 +79,12 @@ enum hclge_mbx_tbl_cfg_subcode { HCLGE_MBX_VPORT_LIST_CLEAR, }; +enum hclge_mbx_qb_cfg_subcode { + HCLGE_MBX_QB_CHECK_CAPS = 0, /* query whether support qb */ + HCLGE_MBX_QB_ENABLE, /* request pf enable qb */ + HCLGE_MBX_QB_GET_STATE /* query whether qb enabled */ +}; + #define HCLGE_MBX_MAX_MSG_SIZE 14 #define HCLGE_MBX_MAX_RESP_DATA_SIZE 8U #define HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM 4 diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index aaf1f42624a79b609e617b2700b0af82a2c5d81b..6d0321e07f9204048ae4242d64e6cdf14a6a021e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -43,6 +43,7 @@ #define HNAE3_DEVICE_VERSION_V1 0x00020 #define HNAE3_DEVICE_VERSION_V2 0x00021 #define HNAE3_DEVICE_VERSION_V3 0x00030 +#define HNAE3_DEVICE_VERSION_V4 0x00032 #define HNAE3_PCI_REVISION_BIT_SIZE 8 @@ -103,6 +104,9 @@ enum HNAE3_DEV_CAP_BITS { HNAE3_DEV_SUPPORT_LANE_NUM_B, HNAE3_DEV_SUPPORT_WOL_B, HNAE3_DEV_SUPPORT_TM_FLUSH_B, + HNAE3_DEV_SUPPORT_VF_FAULT_B, + HNAE3_DEV_SUPPORT_NOTIFY_PKT_B, + HNAE3_DEV_SUPPORT_ERR_MOD_GEN_REG_B, }; #define hnae3_ae_dev_fd_supported(ae_dev) \ @@ -177,6 +181,15 @@ enum HNAE3_DEV_CAP_BITS { #define hnae3_ae_dev_tm_flush_supported(hdev) \ test_bit(HNAE3_DEV_SUPPORT_TM_FLUSH_B, (hdev)->ae_dev->caps) +#define hnae3_ae_dev_vf_fault_supported(ae_dev) \ + test_bit(HNAE3_DEV_SUPPORT_VF_FAULT_B, (ae_dev)->caps) + +#define hnae3_ae_dev_notify_pkt_supported(ae_dev) \ + test_bit(HNAE3_DEV_SUPPORT_NOTIFY_PKT_B, (ae_dev)->caps) + +#define hnae3_ae_dev_gen_reg_dfx_supported(hdev) \ + test_bit(HNAE3_DEV_SUPPORT_ERR_MOD_GEN_REG_B, (hdev)->ae_dev->caps) + enum HNAE3_PF_CAP_BITS { HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0, }; @@ -271,6 +284,7 @@ enum hnae3_reset_type { HNAE3_GLOBAL_RESET, HNAE3_IMP_RESET, HNAE3_NONE_RESET, + HNAE3_VF_EXP_RESET, HNAE3_MAX_RESET, }; @@ -357,6 +371,15 @@ struct hnae3_vector_info { #define HNAE3_FW_VERSION_BYTE0_SHIFT 0 #define HNAE3_FW_VERSION_BYTE0_MASK GENMASK(7, 0) +#define HNAE3_SCC_VERSION_BYTE3_SHIFT 24 +#define HNAE3_SCC_VERSION_BYTE3_MASK GENMASK(31, 24) +#define HNAE3_SCC_VERSION_BYTE2_SHIFT 16 +#define HNAE3_SCC_VERSION_BYTE2_MASK GENMASK(23, 16) +#define HNAE3_SCC_VERSION_BYTE1_SHIFT 8 +#define HNAE3_SCC_VERSION_BYTE1_MASK GENMASK(15, 8) +#define HNAE3_SCC_VERSION_BYTE0_SHIFT 0 +#define HNAE3_SCC_VERSION_BYTE0_MASK GENMASK(7, 0) + struct hnae3_ring_chain_node { struct hnae3_ring_chain_node *next; u32 tqp_index; @@ -383,6 +406,7 @@ struct hnae3_dev_specs { u16 mc_mac_size; u32 mac_stats_num; u8 tnl_num; + u8 hilink_version; }; struct hnae3_client_ops { @@ -563,6 +587,10 @@ struct hnae3_ae_dev { * Check if any cls flower rule exist * dbg_read_cmd * Execute debugfs read command. + * request_flush_qb_config + * Request to update queue bonding configuration + * query_fd_qb_state + * Query whether hw queue bonding enabled * set_tx_hwts_info * Save information for 1588 tx packet * get_rx_hwts @@ -762,6 +790,8 @@ struct hnae3_ae_ops { struct ethtool_link_ksettings *cmd); int (*set_phy_link_ksettings)(struct hnae3_handle *handle, const struct ethtool_link_ksettings *cmd); + void (*request_flush_qb_config)(struct hnae3_handle *handle); + bool (*query_fd_qb_state)(struct hnae3_handle *handle); bool (*set_tx_hwts_info)(struct hnae3_handle *handle, struct sk_buff *skb); void (*get_rx_hwts)(struct hnae3_handle *handle, struct sk_buff *skb, @@ -777,6 +807,8 @@ struct hnae3_ae_ops { struct ethtool_wolinfo *wol); int (*set_wol)(struct hnae3_handle *handle, struct ethtool_wolinfo *wol); + int (*priv_ops)(struct hnae3_handle *handle, int opcode, + void *data, size_t length); }; struct hnae3_dcb_ops { @@ -814,6 +846,7 @@ struct hnae3_tc_info { u8 max_tc; /* Total number of TCs */ u8 num_tc; /* Total number of enabled TCs */ bool mqprio_active; + bool mqprio_destroy; bool dcb_ets_active; }; @@ -874,6 +907,7 @@ struct hnae3_roce_private_info { enum hnae3_pflag { HNAE3_PFLAG_LIMIT_PROMISC, + HNAE3_PFLAG_FD_QB_ENABLE, HNAE3_PFLAG_MAX }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3_ext.h b/drivers/net/ethernet/hisilicon/hns3/hnae3_ext.h new file mode 100644 index 0000000000000000000000000000000000000000..7e0463c320bda19a6ef910e1b2ef2b14c4ba6fbd --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3_ext.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +// Copyright (c) 2023 Hisilicon Limited. + +#ifndef __HNAE3_EXT_H +#define __HNAE3_EXT_H + +enum hnae3_event_type_custom { + HNAE3_VF_RESET_CUSTOM, + HNAE3_VF_FUNC_RESET_CUSTOM, + HNAE3_VF_PF_FUNC_RESET_CUSTOM, + HNAE3_VF_FULL_RESET_CUSTOM, + HNAE3_FLR_RESET_CUSTOM, + HNAE3_FUNC_RESET_CUSTOM, + HNAE3_GLOBAL_RESET_CUSTOM, + HNAE3_IMP_RESET_CUSTOM, + HNAE3_UNKNOWN_RESET_CUSTOM, + HNAE3_NONE_RESET_CUSTOM, + HNAE3_PORT_FAULT, + HNAE3_RESET_DONE_CUSTOM, + HNAE3_FUNC_RESET_FAIL_CUSTOM, + HNAE3_GLOBAL_RESET_FAIL_CUSTOM, + HNAE3_IMP_RESET_FAIL_CUSTOM, + HNAE3_PPU_POISON_CUSTOM, + HNAE3_IMP_RD_POISON_CUSTOM, + HNAE3_ROCEE_AXI_RESP_CUSTOM, + HNAE3_INVALID_EVENT_CUSTOM, +}; + +enum hnae3_ext_opcode { + HNAE3_EXT_OPC_RESET, + HNAE3_EXT_OPC_EVENT_CALLBACK, + HNAE3_EXT_OPC_GET_PFC_STORM_PARA, + HNAE3_EXT_OPC_SET_PFC_STORM_PARA, + HNAE3_EXT_OPC_SET_NOTIFY_PARAM, + HNAE3_EXT_OPC_SET_NOTIFY_START, + HNAE3_EXT_OPC_SET_TORUS_PARAM, + HNAE3_EXT_OPC_GET_TORUS_PARAM, + HNAE3_EXT_OPC_CLEAN_STATS64, + HNAE3_EXT_OPC_GET_PORT_EXT_ID_INFO, + HNAE3_EXT_OPC_GET_PORT_EXT_NUM_INFO, + HNAE3_EXT_OPC_GET_PORT_NUM, + HNAE3_EXT_OPC_GET_PRESENT, + HNAE3_EXT_OPC_SET_SFP_STATE, + HNAE3_EXT_OPC_DISABLE_LANE, + HNAE3_EXT_OPC_GET_LANE_STATUS, + HNAE3_EXT_OPC_DISABLE_CLOCK, + HNAE3_EXT_OPC_SET_PFC_TIME, + HNAE3_EXT_OPC_GET_HILINK_REF_LOS, + HNAE3_EXT_OPC_GET_PORT_FAULT_STATUS, + HNAE3_EXT_OPC_GET_PORT_TYPE, + HNAE3_EXT_OPC_SET_MAC_STATE, + HNAE3_EXT_OPC_SET_LED, + HNAE3_EXT_OPC_GET_LED_SIGNAL, + HNAE3_EXT_OPC_GET_PHY_REG, + HNAE3_EXT_OPC_SET_PHY_REG, +}; + +struct hnae3_led_state_para { + u32 type; + u32 status; +}; + +struct hnae3_phy_para { + u32 page_select_addr; + u32 reg_addr; + u16 page; + u16 data; +}; + +struct hnae3_lamp_signal { + u8 error; + u8 locate; + u8 activity; +}; + +struct hnae3_pfc_storm_para { + u32 dir; + u32 enable; + u32 period_ms; + u32 times; + u32 recovery_period_ms; +}; + +enum hnae3_port_fault_type { + HNAE3_FAULT_TYPE_CDR_FLASH, + HNAE3_FAULT_TYPE_9545_ERR, + HNAE3_FAULT_TYPE_CDR_CORE, + HNAE3_FAULT_TYPE_HILINK_REF_LOS, + HNAE3_FAULT_TYPE_INVALID +}; + +struct hnae3_port_fault { + u32 fault_type; + u32 fault_status; +}; + +struct hnae3_notify_pkt_param { + u32 ipg; /* inter-packet gap of sending, the unit is one cycle of clock */ + u16 num; /* packet number of sending */ + u8 enable; /* send enable, 0=Disable, 1=Enable */ + u8 init; /* initialization flag, product does not need to set value */ + u8 data[64]; /* note packet data */ +}; + +struct hnae3_torus_param { + u32 enable; /* 1d torus mode enable */ + u32 mac_id; /* export mac id of port */ + u8 is_node0; /* if current node is node0 */ +}; + +struct hane3_port_ext_id_info { + u32 chip_id; + u32 mac_id; + u32 io_die_id; +}; + +struct hane3_port_ext_num_info { + u32 chip_num; + u32 io_die_num; +}; +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c index dcecb23daac6e15c240d9834c7eb29c2b8cb5d4a..4f4b5292f3588897a6528d0e27eeac40f4e6ea12 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c @@ -157,6 +157,9 @@ static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = { {HCLGE_COMM_CAP_LANE_NUM_B, HNAE3_DEV_SUPPORT_LANE_NUM_B}, {HCLGE_COMM_CAP_WOL_B, HNAE3_DEV_SUPPORT_WOL_B}, {HCLGE_COMM_CAP_TM_FLUSH_B, HNAE3_DEV_SUPPORT_TM_FLUSH_B}, + {HCLGE_COMM_CAP_VF_FAULT_B, HNAE3_DEV_SUPPORT_VF_FAULT_B}, + {HCLGE_COMM_CAP_NOTIFY_PKT_B, HNAE3_DEV_SUPPORT_NOTIFY_PKT_B}, + {HCLGE_COMM_CAP_ERR_MOD_GEN_REG_B, HNAE3_DEV_SUPPORT_ERR_MOD_GEN_REG_B}, }; static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = { @@ -350,7 +353,7 @@ static int hclge_comm_cmd_csq_done(struct hclge_comm_hw *hw) static u32 hclge_get_cmdq_tx_timeout(u16 opcode, u32 tx_timeout) { static const struct hclge_cmdq_tx_timeout_map cmdq_tx_timeout_map[] = { - {HCLGE_OPC_CFG_RST_TRIGGER, HCLGE_COMM_CMDQ_TX_TIMEOUT_500MS}, + {HCLGE_OPC_CFG_RST_TRIGGER, HCLGE_COMM_CMDQ_CFG_RST_TIMEOUT}, }; u32 i; @@ -469,10 +472,14 @@ static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw, int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc, int num) { + bool is_special = hclge_comm_is_special_opcode(desc->opcode); struct hclge_comm_cmq_ring *csq = &hw->cmq.csq; int ret; int ntc; + if (hw->cmq.ops.trace_cmd_send) + hw->cmq.ops.trace_cmd_send(hw, desc, num, is_special); + spin_lock_bh(&hw->cmq.csq.lock); if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state)) { @@ -506,6 +513,9 @@ int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc, spin_unlock_bh(&hw->cmq.csq.lock); + if (hw->cmq.ops.trace_cmd_get) + hw->cmq.ops.trace_cmd_get(hw, desc, num, is_special); + return ret; } @@ -583,6 +593,17 @@ int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw) return ret; } +void hclge_comm_cmd_init_ops(struct hclge_comm_hw *hw, + const struct hclge_comm_cmq_ops *ops) +{ + struct hclge_comm_cmq *cmdq = &hw->cmq; + + if (ops) { + cmdq->ops.trace_cmd_send = ops->trace_cmd_send; + cmdq->ops.trace_cmd_get = ops->trace_cmd_get; + } +} + int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw, u32 *fw_version, bool is_pf, unsigned long reset_pending) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h index 2b7197ce0ae8fcae190ed04ad510faf0dd1e32d3..e3d72b21aba86573e8e3149c8a23b85578319779 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h @@ -55,7 +55,7 @@ #define HCLGE_COMM_NIC_CMQ_DESC_NUM_S 3 #define HCLGE_COMM_NIC_CMQ_DESC_NUM 1024 #define HCLGE_COMM_CMDQ_TX_TIMEOUT_DEFAULT 30000 -#define HCLGE_COMM_CMDQ_TX_TIMEOUT_500MS 500000 +#define HCLGE_COMM_CMDQ_CFG_RST_TIMEOUT 1000000 enum hclge_opcode_type { /* Generic commands */ @@ -91,8 +91,10 @@ enum hclge_opcode_type { HCLGE_OPC_DFX_RCB_REG = 0x004D, HCLGE_OPC_DFX_TQP_REG = 0x004E, HCLGE_OPC_DFX_SSU_REG_2 = 0x004F, + HCLGE_OPC_DFX_GEN_REG = 0x7038, HCLGE_OPC_QUERY_DEV_SPECS = 0x0050, + HCLGE_OPC_GET_QUEUE_ERR_VF = 0x0067, /* MAC command */ HCLGE_OPC_CONFIG_MAC_MODE = 0x0301, @@ -245,6 +247,9 @@ enum hclge_opcode_type { HCLGE_OPC_QCN_AJUST_INIT = 0x1A07, HCLGE_OPC_QCN_DFX_CNT_STATUS = 0x1A08, + /* SCC commands */ + HCLGE_OPC_QUERY_SCC_VER = 0x1A84, + /* Mailbox command */ HCLGEVF_OPC_MBX_PF_TO_VF = 0x2000, HCLGEVF_OPC_MBX_VF_TO_PF = 0x2001, @@ -309,6 +314,23 @@ enum hclge_opcode_type { /* Query link diagnosis info command */ HCLGE_OPC_QUERY_LINK_DIAGNOSIS = 0x702A, + + /* EXT command */ + HCLGE_OPC_CONFIG_NIC_CLOCK = 0x0060, + HCLGE_OPC_CONFIG_SWITCH_PARAM = 0x1033, + HCLGE_OPC_CONFIG_VLAN_FILTER = 0x1100, + HCLGE_OPC_SET_NOTIFY_PKT = 0x180A, + HCLGE_OPC_CONFIG_1D_TORUS = 0x2300, + HCLGE_OPC_CHIP_ID_GET = 0x7003, + HCLGE_OPC_GET_CHIP_NUM = 0x7005, + HCLGE_OPC_GET_PORT_NUM = 0x7006, + HCLGE_OPC_SET_LED = 0x7007, + HCLGE_OPC_DISABLE_NET_LANE = 0x7008, + HCLGE_OPC_CFG_PAUSE_STORM_PARA = 0x7019, + HCLGE_OPC_CFG_GET_HILINK_REF_LOS = 0x701B, + HCLGE_OPC_GET_PORT_FAULT_STATUS = 0x7023, + HCLGE_OPC_SFP_GET_PRESENT = 0x7101, + HCLGE_OPC_SFP_SET_STATUS = 0x7102, }; enum hclge_comm_cmd_return_status { @@ -348,9 +370,12 @@ enum HCLGE_COMM_CAP_BITS { HCLGE_COMM_CAP_GRO_B = 20, HCLGE_COMM_CAP_FD_B = 21, HCLGE_COMM_CAP_FEC_STATS_B = 25, + HCLGE_COMM_CAP_VF_FAULT_B = 26, HCLGE_COMM_CAP_LANE_NUM_B = 27, HCLGE_COMM_CAP_WOL_B = 28, + HCLGE_COMM_CAP_NOTIFY_PKT_B = 29, HCLGE_COMM_CAP_TM_FLUSH_B = 31, + HCLGE_COMM_CAP_ERR_MOD_GEN_REG_B = 32, }; enum HCLGE_COMM_API_CAP_BITS { @@ -390,6 +415,11 @@ struct hclge_comm_query_version_cmd { __le32 caps[HCLGE_COMM_QUERY_CAP_LENGTH]; /* capabilities of device */ }; +struct hclge_comm_query_scc_cmd { + __le32 scc_version; + u8 rsv[20]; +}; + #define HCLGE_DESC_DATA_LEN 6 struct hclge_desc { __le16 opcode; @@ -421,11 +451,22 @@ enum hclge_comm_cmd_status { HCLGE_COMM_ERR_CSQ_ERROR = -3, }; +struct hclge_comm_hw; +struct hclge_comm_cmq_ops { + void (*trace_cmd_send)(struct hclge_comm_hw *hw, + struct hclge_desc *desc, + int num, bool is_special); + void (*trace_cmd_get)(struct hclge_comm_hw *hw, + struct hclge_desc *desc, + int num, bool is_special); +}; + struct hclge_comm_cmq { struct hclge_comm_cmq_ring csq; struct hclge_comm_cmq_ring crq; u16 tx_timeout; enum hclge_comm_cmd_status last_status; + struct hclge_comm_cmq_ops ops; }; struct hclge_comm_hw { @@ -472,5 +513,6 @@ int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw); int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw, u32 *fw_version, bool is_pf, unsigned long reset_pending); - +void hclge_comm_cmd_init_ops(struct hclge_comm_hw *hw, + const struct hclge_comm_cmq_ops *ops); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index 4f385a18d288e4a28e46253380336e10187fe10f..48192fb9f75bd9052a2c027db7d69f048e83cbd2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -414,6 +414,9 @@ static struct hns3_dbg_cap_info hns3_dbg_cap[] = { }, { .name = "support tm flush", .cap_bit = HNAE3_DEV_SUPPORT_TM_FLUSH_B, + }, { + .name = "support vf fault detect", + .cap_bit = HNAE3_DEV_SUPPORT_VF_FAULT_B, } }; @@ -959,7 +962,7 @@ static const struct hns3_dbg_item tx_bd_info_items[] = { { "OT_VLAN_TAG", 3 }, { "TV", 5 }, { "OLT_VLAN_LEN", 2 }, - { "PAYLEN_OL4CS", 2 }, + { "PAYLEN_FDOP_OL4CS", 2 }, { "BD_FE_SC_VLD", 2 }, { "MSS_HW_CSUM", 0 }, }; @@ -978,7 +981,7 @@ static void hns3_dump_tx_bd_info(struct hns3_desc *desc, char **result, int idx) sprintf(result[j++], "%u", le16_to_cpu(desc->tx.tv)); sprintf(result[j++], "%u", le32_to_cpu(desc->tx.ol_type_vlan_len_msec)); - sprintf(result[j++], "%#x", le32_to_cpu(desc->tx.paylen_ol4cs)); + sprintf(result[j++], "%#x", le32_to_cpu(desc->tx.paylen_fdop_ol4cs)); sprintf(result[j++], "%#x", le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri)); sprintf(result[j++], "%u", le16_to_cpu(desc->tx.mss_hw_csum)); } @@ -1094,6 +1097,8 @@ hns3_dbg_dev_specs(struct hnae3_handle *h, char *buf, int len, int *pos) *pos += scnprintf(buf + *pos, len - *pos, "TX timeout threshold: %d seconds\n", dev->watchdog_timeo / HZ); + *pos += scnprintf(buf + *pos, len - *pos, "Hilink Version: %u\n", + dev_specs->hilink_version); } static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 677cfaa5fe08cd73f773893fefdff3bca5e97ae4..52b1ae1e57e5a3c5c19bf44a4dde755a9175c964 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -26,6 +26,7 @@ #include #include "hnae3.h" +#include "hnae3_ext.h" #include "hns3_enet.h" /* All hns3 tracepoints are defined by the include below, which * must be included exactly once across the whole kernel with @@ -1541,6 +1542,73 @@ static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring, return 0; } +static bool hns3_query_fd_qb_state(struct hnae3_handle *handle) +{ + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + + if (!test_bit(HNAE3_PFLAG_FD_QB_ENABLE, &handle->priv_flags)) + return false; + + if (!ops->query_fd_qb_state) + return false; + + return ops->query_fd_qb_state(handle); +} + +/* fd_op is the field of tx bd indicates hw whether to add or delete + * a qb rule or do nothing. + */ +static u8 hns3_fd_qb_handle(struct hns3_enet_ring *ring, struct sk_buff *skb) +{ + struct hnae3_handle *handle = ring->tqp->handle; + union l4_hdr_info l4; + union l3_hdr_info l3; + u8 l4_proto_tmp = 0; + __be16 frag_off; + u8 ip_version; + u8 fd_op = 0; + + if (!hns3_query_fd_qb_state(handle)) + return 0; + + if (skb->encapsulation) { + ip_version = inner_ip_hdr(skb)->version; + l3.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + } else { + ip_version = ip_hdr(skb)->version; + l3.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + } + + if (ip_version == IP_VERSION_IPV6) { + unsigned char *exthdr; + + exthdr = l3.hdr + sizeof(*l3.v6); + l4_proto_tmp = l3.v6->nexthdr; + if (l4.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto_tmp, &frag_off); + } else if (ip_version == IP_VERSION_IPV4) { + l4_proto_tmp = l3.v4->protocol; + } + + if (l4_proto_tmp != IPPROTO_TCP) + return 0; + + ring->fd_qb_tx_sample++; + if (l4.tcp->fin || l4.tcp->rst) { + hnae3_set_bit(fd_op, HNS3_TXD_FD_DEL_B, 1); + ring->fd_qb_tx_sample = 0; + } else if (l4.tcp->syn || + ring->fd_qb_tx_sample >= HNS3_FD_QB_FORCE_CNT_MAX) { + hnae3_set_bit(fd_op, HNS3_TXD_FD_ADD_B, 1); + ring->fd_qb_tx_sample = 0; + } + + return fd_op; +} + /* check if the hardware is capable of checksum offloading */ static bool hns3_check_hw_tx_csum(struct sk_buff *skb) { @@ -1558,7 +1626,7 @@ static bool hns3_check_hw_tx_csum(struct sk_buff *skb) } struct hns3_desc_param { - u32 paylen_ol4cs; + u32 paylen_fdop_ol4cs; u32 ol_type_vlan_len_msec; u32 type_cs_vlan_tso; u16 mss_hw_csum; @@ -1568,7 +1636,7 @@ struct hns3_desc_param { static void hns3_init_desc_data(struct sk_buff *skb, struct hns3_desc_param *pa) { - pa->paylen_ol4cs = skb->len; + pa->paylen_fdop_ol4cs = skb->len; pa->ol_type_vlan_len_msec = 0; pa->type_cs_vlan_tso = 0; pa->mss_hw_csum = 0; @@ -1636,7 +1704,7 @@ static int hns3_handle_csum_partial(struct hns3_enet_ring *ring, return ret; } - ret = hns3_set_tso(skb, ¶m->paylen_ol4cs, ¶m->mss_hw_csum, + ret = hns3_set_tso(skb, ¶m->paylen_fdop_ol4cs, ¶m->mss_hw_csum, ¶m->type_cs_vlan_tso, &desc_cb->send_bytes); if (unlikely(ret < 0)) { hns3_ring_stats_update(ring, tx_tso_err); @@ -1650,6 +1718,7 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, struct hns3_desc_cb *desc_cb) { struct hns3_desc_param param; + u8 fd_op; int ret; hns3_init_desc_data(skb, ¶m); @@ -1665,11 +1734,15 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, return ret; } + fd_op = hns3_fd_qb_handle(ring, skb); + hnae3_set_field(param.paylen_fdop_ol4cs, HNS3_TXD_FD_OP_M, + HNS3_TXD_FD_OP_S, fd_op); + /* Set txbd */ desc->tx.ol_type_vlan_len_msec = cpu_to_le32(param.ol_type_vlan_len_msec); desc->tx.type_cs_vlan_tso_len = cpu_to_le32(param.type_cs_vlan_tso); - desc->tx.paylen_ol4cs = cpu_to_le32(param.paylen_ol4cs); + desc->tx.paylen_fdop_ol4cs = cpu_to_le32(param.paylen_fdop_ol4cs); desc->tx.mss_hw_csum = cpu_to_le16(param.mss_hw_csum); desc->tx.vlan_tag = cpu_to_le16(param.inner_vtag); desc->tx.outer_vlan_tag = cpu_to_le16(param.out_vtag); @@ -4960,6 +5033,11 @@ static void hns3_alloc_page_pool(struct hns3_enet_ring *ring) } } +bool hns3_is_page_pool_enabled(void) +{ + return page_pool_enabled; +} + static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) { int ret; @@ -5390,6 +5468,9 @@ static int hns3_client_init(struct hnae3_handle *handle) hns3_state_init(handle); + if (test_bit(HNAE3_DEV_SUPPORT_QB_B, ae_dev->caps)) + set_bit(HNAE3_PFLAG_FD_QB_ENABLE, &handle->supported_pflags); + ret = register_netdev(netdev); if (ret) { dev_err(priv->dev, "probe register netdev fail!\n"); @@ -5943,12 +6024,16 @@ static void hns3_process_hw_error(struct hnae3_handle *handle, if (hns3_hw_err[i].type == type) { dev_err(&handle->pdev->dev, "Detected %s!\n", hns3_hw_err[i].msg); + if (handle->ae_algo->ops->priv_ops) + handle->ae_algo->ops->priv_ops(handle, + HNAE3_EXT_OPC_EVENT_CALLBACK, &type, + sizeof(type)); break; } } } -static const struct hnae3_client_ops client_ops = { +const struct hnae3_client_ops client_ops = { .init_instance = hns3_client_init, .uninit_instance = hns3_client_uninit, .link_status_change = hns3_link_status_change, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index acd756b0c7c9a4134a0d7c47a50b8112dc881ee1..3afab7db17eba2d47b719af0596b9cde6a95c590 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -179,6 +179,11 @@ enum hns3_nic_state { #define HNS3_TXD_DECTTL_S 12 #define HNS3_TXD_DECTTL_M (0xf << HNS3_TXD_DECTTL_S) +#define HNS3_TXD_FD_ADD_B 1 +#define HNS3_TXD_FD_DEL_B 0 +#define HNS3_TXD_FD_OP_M GENMASK(21, 20) +#define HNS3_TXD_FD_OP_S 20 + #define HNS3_TXD_OL4CS_B 22 #define HNS3_TXD_MSS_S 0 @@ -214,6 +219,8 @@ enum hns3_nic_state { #define HNS3_CQ_MODE_EQE 1U #define HNS3_CQ_MODE_CQE 0U +#define HNS3_FD_QB_FORCE_CNT_MAX 20 + enum hns3_pkt_l2t_type { HNS3_L2_TYPE_UNICAST, HNS3_L2_TYPE_MULTICAST, @@ -285,7 +292,7 @@ struct __packed hns3_desc { }; }; - __le32 paylen_ol4cs; + __le32 paylen_fdop_ol4cs; __le16 bdtp_fe_sc_vld_ra_ri; __le16 mss_hw_csum; } tx; @@ -398,6 +405,9 @@ enum hns3_pkt_ol4type { HNS3_OL4_TYPE_UNKNOWN }; +#define IP_VERSION_IPV4 0x4 +#define IP_VERSION_IPV6 0x6 + struct hns3_rx_ptype { u32 ptype : 8; u32 csum_level : 2; @@ -754,4 +764,5 @@ void hns3_cq_period_mode_init(struct hns3_nic_priv *priv, void hns3_external_lb_prepare(struct net_device *ndev, bool if_running); void hns3_external_lb_restore(struct net_device *ndev, bool if_running); +bool hns3_is_page_pool_enabled(void); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 78181eea93c1c155c7ccf0a5ce6b63a73d937a91..86cfb4f8ac501367d0c992f38a4640cb2325b102 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -5,6 +5,7 @@ #include #include #include +#include #include "hns3_enet.h" #include "hns3_ethtool.h" @@ -481,22 +482,38 @@ static void hns3_update_limit_promisc_mode(struct net_device *netdev, hns3_request_update_promisc_mode(handle); } +static void hns3_update_fd_qb_state(struct net_device *netdev, bool enable) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + + if (!handle->ae_algo->ops->request_flush_qb_config) + return; + + handle->ae_algo->ops->request_flush_qb_config(handle); +} + static const struct hns3_pflag_desc hns3_priv_flags[HNAE3_PFLAG_MAX] = { - { "limit_promisc", hns3_update_limit_promisc_mode } + { "limit_promisc", hns3_update_limit_promisc_mode }, + { "qb_enable", hns3_update_fd_qb_state }, }; static int hns3_get_sset_count(struct net_device *netdev, int stringset) { struct hnae3_handle *h = hns3_get_handle(netdev); const struct hnae3_ae_ops *ops = h->ae_algo->ops; + int pp_stats_count = 0; if (!ops->get_sset_count) return -EOPNOTSUPP; switch (stringset) { case ETH_SS_STATS: +#ifdef CONFIG_PAGE_POOL_STATS + if (hns3_is_page_pool_enabled()) + pp_stats_count = page_pool_ethtool_stats_get_count(); +#endif return ((HNS3_TQP_STATS_COUNT * h->kinfo.num_tqps) + - ops->get_sset_count(h, stringset)); + ops->get_sset_count(h, stringset) + pp_stats_count); case ETH_SS_TEST: return ops->get_sset_count(h, stringset); @@ -564,6 +581,10 @@ static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data) switch (stringset) { case ETH_SS_STATS: +#ifdef CONFIG_PAGE_POOL_STATS + if (hns3_is_page_pool_enabled()) + buff = page_pool_ethtool_stats_get_strings(buff); +#endif buff = hns3_get_strings_tqps(h, buff); ops->get_strings(h, stringset, (u8 *)buff); break; @@ -611,6 +632,28 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data) return data; } +#ifdef CONFIG_PAGE_POOL_STATS +static u64 *hns3_ethtool_pp_stats(struct hnae3_handle *handle, u64 *data) +{ + struct hns3_nic_priv *priv = handle->priv; + int ring_num = handle->kinfo.num_tqps; + struct page_pool_stats stats = {0}; + struct page_pool *page_pool; + int i; + + if (!hns3_is_page_pool_enabled()) + return data; + + for (i = 0; i < ring_num; i++) { + page_pool = priv->ring[i + ring_num].page_pool; + if (page_pool) + page_pool_get_stats(page_pool, &stats); + } + + return page_pool_ethtool_stats_get(data, &stats); +} +#endif + /* hns3_get_stats - get detail statistics. * @netdev: net device * @stats: statistics info. @@ -632,6 +675,10 @@ static void hns3_get_stats(struct net_device *netdev, return; } +#ifdef CONFIG_PAGE_POOL_STATS + p = hns3_ethtool_pp_stats(h, p); +#endif + h->ae_algo->ops->update_stats(h); /* get per-queue stats */ @@ -1798,6 +1845,14 @@ static int hns3_get_module_info(struct net_device *netdev, modinfo->type = ETH_MODULE_SFF_8636; modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; break; + case SFF8024_ID_QSFP_DD: + case SFF8024_ID_QSFP_PLUS_CMIS: + modinfo->type = ETH_MODULE_SFF_8636; + if (sfp_type.flat_mem & HNS3_CMIS_FLAT_MEMORY) + modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; + else + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + break; default: netdev_err(netdev, "Optical module unknown: %#x\n", sfp_type.type); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h index da207d1d9aa93d1ce1bc95b58bf4aa9fe49d7f20..34504ed2c0868cf81156c5933aa345c20807846b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h @@ -12,9 +12,11 @@ struct hns3_stats { int stats_offset; }; +#define HNS3_CMIS_FLAT_MEMORY BIT(7) struct hns3_sfp_type { u8 type; u8 ext_type; + u8 flat_mem; }; struct hns3_pflag_desc { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ext.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ext.c new file mode 100644 index 0000000000000000000000000000000000000000..9220418e67b633ee471aae0b1fbbc49661bcd5c3 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ext.c @@ -0,0 +1,543 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2023 Hisilicon Limited. + +#include "hns3_ext.h" + +int nic_netdev_match_check(struct net_device *ndev) +{ +#define HNS3_DRIVER_NAME_LEN 5 + + struct ethtool_drvinfo drv_info; + struct hnae3_handle *h; + + if (!ndev || !ndev->ethtool_ops || + !ndev->ethtool_ops->get_drvinfo) + return -EINVAL; + + ndev->ethtool_ops->get_drvinfo(ndev, &drv_info); + + if (strncmp(drv_info.driver, "hns3", HNS3_DRIVER_NAME_LEN)) + return -EINVAL; + + h = hns3_get_handle(ndev); + if (h->flags & HNAE3_SUPPORT_VF) + return -EINVAL; + + return 0; +} +EXPORT_SYMBOL(nic_netdev_match_check); + +static int nic_invoke_pri_ops(struct net_device *ndev, int opcode, + void *data, size_t length) + +{ + struct hnae3_handle *h; + int ret; + + if (nic_netdev_match_check(ndev)) + return -ENODEV; + + if ((!data && length) || (data && !length)) { + netdev_err(ndev, "failed to check data and length"); + return -EINVAL; + } + + h = hns3_get_handle(ndev); + if (!h->ae_algo->ops->priv_ops) + return -EOPNOTSUPP; + + ret = h->ae_algo->ops->priv_ops(h, opcode, data, length); + if (ret) + netdev_err(ndev, + "failed to invoke pri ops, opcode = %#x, ret = %d\n", + opcode, ret); + + return ret; +} + +void nic_chip_recover_handler(struct net_device *ndev, + enum hnae3_event_type_custom event_t) +{ + if (nic_netdev_match_check(ndev)) + return; + + dev_info(&ndev->dev, "reset type is %d!!\n", event_t); + + if (event_t == HNAE3_PPU_POISON_CUSTOM) + event_t = HNAE3_FUNC_RESET_CUSTOM; + + if (event_t != HNAE3_FUNC_RESET_CUSTOM && + event_t != HNAE3_GLOBAL_RESET_CUSTOM && + event_t != HNAE3_IMP_RESET_CUSTOM) { + dev_err(&ndev->dev, "reset type err!!\n"); + return; + } + + nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_RESET, &event_t, sizeof(event_t)); +} +EXPORT_SYMBOL(nic_chip_recover_handler); + +static int nic_check_pfc_storm_para(u32 dir, u32 enable, u32 period_ms, + u32 times, u32 recovery_period_ms) +{ + if ((dir != HNS3_PFC_STORM_PARA_DIR_RX && + dir != HNS3_PFC_STORM_PARA_DIR_TX) || + (enable != HNS3_PFC_STORM_PARA_DISABLE && + enable != HNS3_PFC_STORM_PARA_ENABLE)) + return -EINVAL; + + if (period_ms < HNS3_PFC_STORM_PARA_PERIOD_MIN || + period_ms > HNS3_PFC_STORM_PARA_PERIOD_MAX || + recovery_period_ms < HNS3_PFC_STORM_PARA_PERIOD_MIN || + recovery_period_ms > HNS3_PFC_STORM_PARA_PERIOD_MAX || + times <= 0) + return -EINVAL; + + return 0; +} + +int nic_set_pfc_storm_para(struct net_device *ndev, u32 dir, u32 enable, + u32 period_ms, u32 times, u32 recovery_period_ms) +{ + struct hnae3_pfc_storm_para para; + + if (nic_check_pfc_storm_para(dir, enable, period_ms, times, + recovery_period_ms)) { + pr_err("set pfc storm para failed because invalid input param.\n"); + return -EINVAL; + } + + para.dir = dir; + para.enable = enable; + para.period_ms = period_ms; + para.times = times; + para.recovery_period_ms = recovery_period_ms; + + return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_SET_PFC_STORM_PARA, + ¶, sizeof(para)); +} +EXPORT_SYMBOL(nic_set_pfc_storm_para); + +int nic_get_pfc_storm_para(struct net_device *ndev, u32 dir, u32 *enable, + u32 *period_ms, u32 *times, u32 *recovery_period_ms) +{ + struct hnae3_pfc_storm_para para; + int ret; + + if (!enable || !period_ms || !times || !recovery_period_ms || + (dir != HNS3_PFC_STORM_PARA_DIR_RX && + dir != HNS3_PFC_STORM_PARA_DIR_TX)) { + pr_err("get pfc storm para failed because invalid input param.\n"); + return -EINVAL; + } + + para.dir = dir; + ret = nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_GET_PFC_STORM_PARA, + ¶, sizeof(para)); + if (ret) + return ret; + + *enable = para.enable; + *period_ms = para.period_ms; + *times = para.times; + *recovery_period_ms = para.recovery_period_ms; + return 0; +} +EXPORT_SYMBOL(nic_get_pfc_storm_para); + +int nic_set_notify_pkt_param(struct net_device *ndev, + struct hnae3_notify_pkt_param *param) +{ + return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_SET_NOTIFY_PARAM, + param, sizeof(*param)); +} +EXPORT_SYMBOL(nic_set_notify_pkt_param); + +int nic_set_notify_pkt_start(struct net_device *ndev) +{ + return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_SET_NOTIFY_START, NULL, 0); +} +EXPORT_SYMBOL(nic_set_notify_pkt_start); + +int nic_set_torus_param(struct net_device *ndev, struct hnae3_torus_param *param) +{ + if (!param || (param->enable != 0 && param->enable != 1)) + return -EINVAL; + + return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_SET_TORUS_PARAM, + param, sizeof(*param)); +} +EXPORT_SYMBOL(nic_set_torus_param); + +int nic_get_torus_param(struct net_device *ndev, struct hnae3_torus_param *param) +{ + return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_GET_TORUS_PARAM, + param, sizeof(*param)); +} +EXPORT_SYMBOL(nic_get_torus_param); + +int nic_clean_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats) +{ + struct hnae3_knic_private_info *kinfo; + struct hns3_enet_ring *ring; + struct hns3_nic_priv *priv; + struct hnae3_handle *h; + int i, ret; + + if (nic_netdev_match_check(ndev)) + return -ENODEV; + + priv = netdev_priv(ndev); + h = hns3_get_handle(ndev); + kinfo = &h->kinfo; + + rtnl_lock(); + if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) || + test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) { + ret = -EBUSY; + goto end_unlock; + } + + ret = nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_CLEAN_STATS64, + NULL, 0); + if (ret) + goto end_unlock; + + for (i = 0; i < kinfo->num_tqps; i++) { + ring = &priv->ring[i]; + memset(&ring->stats, 0, sizeof(struct ring_stats)); + ring = &priv->ring[i + kinfo->num_tqps]; + memset(&ring->stats, 0, sizeof(struct ring_stats)); + } + + memset(&ndev->stats, 0, sizeof(struct net_device_stats)); + netdev_info(ndev, "clean stats succ\n"); + +end_unlock: + rtnl_unlock(); + return ret; +} +EXPORT_SYMBOL(nic_clean_stats64); + +int nic_set_cpu_affinity(struct net_device *ndev, cpumask_t *affinity_mask) +{ + struct hns3_enet_tqp_vector *tqp_vector; + struct hns3_nic_priv *priv; + int ret = 0; + u16 i; + + if (nic_netdev_match_check(ndev)) + return -ENODEV; + + if (!affinity_mask) { + netdev_err(ndev, + "Invalid input param when set ethernet cpu affinity\n"); + return -EINVAL; + } + + priv = netdev_priv(ndev); + rtnl_lock(); + if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) || + test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) { + ret = -EBUSY; + goto err_unlock; + } + + if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { + netdev_err(ndev, + "ethernet is down, not support cpu affinity set\n"); + ret = -ENETDOWN; + goto err_unlock; + } + + for (i = 0; i < priv->vector_num; i++) { + tqp_vector = &priv->tqp_vector[i]; + if (tqp_vector->irq_init_flag != HNS3_VECTOR_INITED) + continue; + + tqp_vector->affinity_mask = *affinity_mask; + + ret = irq_set_affinity_hint(tqp_vector->vector_irq, NULL); + if (ret) { + netdev_err(ndev, + "failed to reset affinity hint, ret = %d\n", ret); + goto err_unlock; + } + + ret = irq_set_affinity_hint(tqp_vector->vector_irq, + &tqp_vector->affinity_mask); + if (ret) { + netdev_err(ndev, + "failed to set affinity hint, ret = %d\n", ret); + goto err_unlock; + } + } + + netdev_info(ndev, "set nic cpu affinity %*pb succeed\n", + cpumask_pr_args(affinity_mask)); + +err_unlock: + rtnl_unlock(); + return ret; +} +EXPORT_SYMBOL(nic_set_cpu_affinity); + +static int nic_get_ext_id_info(struct net_device *ndev, + struct hane3_port_ext_id_info *id_info) +{ + return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_GET_PORT_EXT_ID_INFO, + id_info, sizeof(*id_info)); +} + +int nic_get_chipid(struct net_device *ndev, u32 *chip_id) +{ + struct hane3_port_ext_id_info info; + int ret; + + if (!chip_id) + return -EINVAL; + + ret = nic_get_ext_id_info(ndev, &info); + if (ret) + return ret; + + *chip_id = info.chip_id; + return 0; +} +EXPORT_SYMBOL(nic_get_chipid); + +int nic_get_mac_id(struct net_device *ndev, u32 *mac_id) +{ + struct hane3_port_ext_id_info info; + int ret; + + if (!mac_id) + return -EINVAL; + + ret = nic_get_ext_id_info(ndev, &info); + if (ret) + return ret; + + *mac_id = info.mac_id; + return 0; +} +EXPORT_SYMBOL(nic_get_mac_id); + +int nic_get_io_die_id(struct net_device *ndev, u32 *io_die_id) +{ + struct hane3_port_ext_id_info info; + int ret; + + if (!io_die_id) + return -EINVAL; + + ret = nic_get_ext_id_info(ndev, &info); + if (ret) + return ret; + + *io_die_id = info.io_die_id; + return 0; +} +EXPORT_SYMBOL(nic_get_io_die_id); + +static int nic_get_ext_num_info(struct net_device *ndev, + struct hane3_port_ext_num_info *num_info) +{ + return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_GET_PORT_EXT_NUM_INFO, + num_info, sizeof(*num_info)); +} + +int nic_get_chip_num(struct net_device *ndev, u32 *chip_num) +{ + struct hane3_port_ext_num_info info; + int ret; + + if (!chip_num) + return -EINVAL; + + ret = nic_get_ext_num_info(ndev, &info); + if (ret) + return ret; + + *chip_num = info.chip_num; + return 0; +} +EXPORT_SYMBOL(nic_get_chip_num); + +int nic_get_io_die_num(struct net_device *ndev, u32 *io_die_num) +{ + struct hane3_port_ext_num_info info; + int ret; + + if (!io_die_num) + return -EINVAL; + + ret = nic_get_ext_num_info(ndev, &info); + if (ret) + return ret; + + *io_die_num = info.io_die_num; + return 0; +} +EXPORT_SYMBOL(nic_get_io_die_num); + +int nic_get_port_num_of_die(struct net_device *ndev, u32 *port_num) +{ + return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_GET_PORT_NUM, + port_num, sizeof(*port_num)); +} +EXPORT_SYMBOL(nic_get_port_num_of_die); + +int nic_get_port_num_per_chip(struct net_device *ndev, u32 *port_num) +{ + return nic_get_port_num_of_die(ndev, port_num); +} +EXPORT_SYMBOL(nic_get_port_num_per_chip); + +int nic_set_tx_timeout(struct net_device *ndev, int tx_timeout) +{ + if (nic_netdev_match_check(ndev)) + return -ENODEV; + + if (tx_timeout <= 0 || tx_timeout > HNS3_MAX_TX_TIMEOUT) + return -EINVAL; + + ndev->watchdog_timeo = tx_timeout * HZ; + + return 0; +} +EXPORT_SYMBOL(nic_set_tx_timeout); + +int nic_get_sfp_present(struct net_device *ndev, int *present) +{ + return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_GET_PRESENT, + present, sizeof(*present)); +} +EXPORT_SYMBOL(nic_get_sfp_present); + +int nic_set_sfp_state(struct net_device *ndev, bool en) +{ + u32 state = en ? 1 : 0; + + return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_SET_SFP_STATE, + &state, sizeof(state)); +} +EXPORT_SYMBOL(nic_set_sfp_state); + +int nic_disable_net_lane(struct net_device *ndev) +{ + return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_DISABLE_LANE, NULL, 0); +} +EXPORT_SYMBOL(nic_disable_net_lane); + +int nic_get_net_lane_status(struct net_device *ndev, u32 *status) +{ + return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_GET_LANE_STATUS, + status, sizeof(*status)); +} +EXPORT_SYMBOL(nic_get_net_lane_status); + +int nic_disable_clock(struct net_device *ndev) +{ + return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_DISABLE_CLOCK, + NULL, 0); +} +EXPORT_SYMBOL(nic_disable_clock); + +int nic_set_pfc_time_cfg(struct net_device *ndev, u16 time) +{ + return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_SET_PFC_TIME, + &time, sizeof(time)); +} +EXPORT_SYMBOL(nic_set_pfc_time_cfg); + +int nic_get_port_fault_status(struct net_device *ndev, u32 fault_type, u32 *status) +{ + int opcode = HNAE3_EXT_OPC_GET_PORT_FAULT_STATUS; + struct hnae3_port_fault fault_para; + int ret; + + if (!status) + return -EINVAL; + + if (fault_type == HNAE3_FAULT_TYPE_HILINK_REF_LOS) + opcode = HNAE3_EXT_OPC_GET_HILINK_REF_LOS; + + fault_para.fault_type = fault_type; + ret = nic_invoke_pri_ops(ndev, opcode, &fault_para, sizeof(fault_para)); + if (ret) + return ret; + + *status = fault_para.fault_status; + return 0; +} +EXPORT_SYMBOL(nic_get_port_fault_status); + +int nic_get_port_wire_type(struct net_device *ndev, u32 *wire_type) +{ + return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_GET_PORT_TYPE, + wire_type, sizeof(*wire_type)); +} +EXPORT_SYMBOL(nic_get_port_wire_type); + +int nic_set_mac_state(struct net_device *ndev, int enable) +{ + return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_SET_MAC_STATE, + &enable, sizeof(enable)); +} +EXPORT_SYMBOL(nic_set_mac_state); + +int nic_set_led(struct net_device *ndev, u32 type, u32 status) +{ + struct hnae3_led_state_para para; + + para.status = status; + para.type = type; + + return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_SET_LED, + ¶, sizeof(para)); +} +EXPORT_SYMBOL(nic_set_led); + +int nic_get_led_signal(struct net_device *ndev, struct hnae3_lamp_signal *signal) +{ + return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_GET_LED_SIGNAL, + signal, sizeof(*signal)); +} +EXPORT_SYMBOL(nic_get_led_signal); + +int nic_get_phy_reg(struct net_device *ndev, u32 page_select_addr, + u16 page, u32 reg_addr, u16 *data) +{ + struct hnae3_phy_para para; + int ret; + + if (!data) + return -EINVAL; + + para.page_select_addr = page_select_addr; + para.page = page; + para.reg_addr = reg_addr; + ret = nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_GET_PHY_REG, + ¶, sizeof(para)); + if (ret) + return ret; + + *data = para.data; + return 0; +} +EXPORT_SYMBOL(nic_get_phy_reg); + +int nic_set_phy_reg(struct net_device *ndev, u32 page_select_addr, + u16 page, u32 reg_addr, u16 data) +{ + struct hnae3_phy_para para; + + para.page_select_addr = page_select_addr; + para.page = page; + para.reg_addr = reg_addr; + para.data = data; + return nic_invoke_pri_ops(ndev, HNAE3_EXT_OPC_SET_PHY_REG, + ¶, sizeof(para)); +} +EXPORT_SYMBOL(nic_set_phy_reg); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ext.h b/drivers/net/ethernet/hisilicon/hns3/hns3_ext.h new file mode 100644 index 0000000000000000000000000000000000000000..3f2e0d2f01490f14539dc86d4e6e9d91f459847a --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ext.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2023 Hisilicon Limited. */ + +#ifndef __HNS3_EXT_H +#define __HNS3_EXT_H +#include +#include "hns3_enet.h" +#include "hnae3_ext.h" + +#define HNS3_PFC_STORM_PARA_DIR_RX 0 +#define HNS3_PFC_STORM_PARA_DIR_TX 1 +#define HNS3_PFC_STORM_PARA_DISABLE 0 +#define HNS3_PFC_STORM_PARA_ENABLE 1 +#define HNS3_PFC_STORM_PARA_PERIOD_MIN 5 +#define HNS3_PFC_STORM_PARA_PERIOD_MAX 2000 +#define HNS3_MAX_TX_TIMEOUT 600 + +#define nic_set_8211_phy_reg nic_set_phy_reg +#define nic_get_8211_phy_reg nic_get_phy_reg +#define nic_set_8521_phy_reg(ndev, page, reg_addr, data) \ + nic_set_phy_reg(ndev, 0, page, reg_addr, data) +#define nic_get_8521_phy_reg(ndev, page, reg_addr, data) \ + nic_get_phy_reg(ndev, 0, page, reg_addr, data) + +#define nic_get_cdr_flash_status(ndev, status) \ + nic_get_port_fault_status(ndev, HNAE3_FAULT_TYPE_CDR_FLASH, status) +#define nic_get_hilink_ref_los(ndev, status) \ + nic_get_port_fault_status(ndev, HNAE3_FAULT_TYPE_HILINK_REF_LOS, status) + +int nic_netdev_match_check(struct net_device *netdev); +void nic_chip_recover_handler(struct net_device *ndev, + enum hnae3_event_type_custom event_t); +int nic_set_pfc_storm_para(struct net_device *ndev, u32 dir, u32 enable, + u32 period_ms, u32 times, u32 recovery_period_ms); +int nic_get_pfc_storm_para(struct net_device *ndev, u32 dir, u32 *enable, + u32 *period_ms, u32 *times, u32 *recovery_period_ms); +int nic_set_notify_pkt_param(struct net_device *ndev, + struct hnae3_notify_pkt_param *param); +int nic_set_notify_pkt_start(struct net_device *ndev); +int nic_set_torus_param(struct net_device *ndev, struct hnae3_torus_param *param); +int nic_get_torus_param(struct net_device *ndev, struct hnae3_torus_param *param); +int nic_clean_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats); +int nic_set_cpu_affinity(struct net_device *ndev, cpumask_t *affinity_mask); +int nic_get_chipid(struct net_device *ndev, u32 *chip_id); +int nic_get_mac_id(struct net_device *ndev, u32 *mac_id); +int nic_get_io_die_id(struct net_device *ndev, u32 *io_die_id); +int nic_get_chip_num(struct net_device *ndev, u32 *chip_num); +int nic_get_io_die_num(struct net_device *ndev, u32 *io_die_num); +int nic_get_port_num_of_die(struct net_device *ndev, u32 *port_num); +int nic_get_port_num_per_chip(struct net_device *ndev, u32 *port_num); +int nic_set_tx_timeout(struct net_device *ndev, int tx_timeout); +int nic_get_sfp_present(struct net_device *ndev, int *present); +int nic_set_sfp_state(struct net_device *ndev, bool en); +int nic_disable_net_lane(struct net_device *ndev); +int nic_get_net_lane_status(struct net_device *ndev, u32 *status); +int nic_disable_clock(struct net_device *ndev); +int nic_set_pfc_time_cfg(struct net_device *ndev, u16 time); +int nic_get_port_fault_status(struct net_device *ndev, u32 fault_type, u32 *status); +int nic_get_port_wire_type(struct net_device *ndev, u32 *wire_type); +int nic_set_mac_state(struct net_device *ndev, int enable); +int nic_set_led(struct net_device *ndev, u32 type, u32 status); +int nic_get_led_signal(struct net_device *ndev, struct hnae3_lamp_signal *signal); +int nic_get_phy_reg(struct net_device *ndev, u32 page_select_addr, + u16 page, u32 reg_addr, u16 *data); +int nic_set_phy_reg(struct net_device *ndev, u32 page_select_addr, + u16 page, u32 reg_addr, u16 data); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 4d15eb73b972857f6a1760e3c414488d2af347cf..9ccf95a6295e9f66c1803b0b4d6bf7e7e1a2d1b2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -354,6 +354,12 @@ struct hclge_sfp_info_cmd { u8 rsv[6]; }; +struct hclge_port_fault_cmd { + __le32 fault_status; + __le32 port_type; + u8 rsv[16]; +}; + #define HCLGE_MAC_CFG_FEC_AUTO_EN_B 0 #define HCLGE_MAC_CFG_FEC_MODE_S 1 #define HCLGE_MAC_CFG_FEC_MODE_M GENMASK(3, 1) @@ -727,11 +733,11 @@ struct hclge_fd_tcam_config_3_cmd { #define HCLGE_FD_AD_DROP_B 0 #define HCLGE_FD_AD_DIRECT_QID_B 1 -#define HCLGE_FD_AD_QID_S 2 -#define HCLGE_FD_AD_QID_M GENMASK(11, 2) +#define HCLGE_FD_AD_QID_L_S 2 +#define HCLGE_FD_AD_QID_L_M GENMASK(11, 2) #define HCLGE_FD_AD_USE_COUNTER_B 12 -#define HCLGE_FD_AD_COUNTER_NUM_S 13 -#define HCLGE_FD_AD_COUNTER_NUM_M GENMASK(20, 13) +#define HCLGE_FD_AD_COUNTER_NUM_L_S 13 +#define HCLGE_FD_AD_COUNTER_NUM_L_M GENMASK(19, 13) #define HCLGE_FD_AD_NXT_STEP_B 20 #define HCLGE_FD_AD_NXT_KEY_S 21 #define HCLGE_FD_AD_NXT_KEY_M GENMASK(25, 21) @@ -741,6 +747,8 @@ struct hclge_fd_tcam_config_3_cmd { #define HCLGE_FD_AD_TC_OVRD_B 16 #define HCLGE_FD_AD_TC_SIZE_S 17 #define HCLGE_FD_AD_TC_SIZE_M GENMASK(20, 17) +#define HCLGE_FD_AD_QID_H_B 21 +#define HCLGE_FD_AD_COUNTER_NUM_H_B 26 struct hclge_fd_ad_config_cmd { u8 stage; @@ -758,6 +766,24 @@ struct hclge_fd_ad_cnt_read_cmd { u8 rsv2[8]; }; +struct hclge_fd_qb_cfg_cmd { + u8 en; + u8 vf_id; + u8 rsv[22]; +}; + +#define HCLGE_FD_QB_AD_RULE_ID_VLD_B 0 +#define HCLGE_FD_QB_AD_COUNTER_VLD_B 1 +struct hclge_fd_qb_ad_cmd { + u8 vf_id; + u8 rsv1; + u8 ad_sel; + u8 rsv2; + __le16 hit_rule_id; + u8 counter_id; + u8 rsv3[17]; +}; + #define HCLGE_FD_USER_DEF_OFT_S 0 #define HCLGE_FD_USER_DEF_OFT_M GENMASK(14, 0) #define HCLGE_FD_USER_DEF_EN_B 15 @@ -828,7 +854,8 @@ struct hclge_dev_specs_1_cmd { __le16 mc_mac_size; u8 rsv1[6]; u8 tnl_num; - u8 rsv2[5]; + u8 hilink_version; + u8 rsv2[4]; }; /* mac speed type defined in firmware command */ @@ -867,11 +894,17 @@ struct hclge_phy_link_ksetting_1_cmd { u8 rsv[22]; }; +#define HCLGE_PHY_RW_DIRECTLY 0 +#define HCLGE_PHY_RW_WITH_PAGE 1 struct hclge_phy_reg_cmd { __le16 reg_addr; u8 rsv0[2]; __le16 reg_val; - u8 rsv1[18]; + u8 rsv1[2]; + u8 type; + u8 dev_addr; + __le16 page; + u8 rsv2[12]; }; struct hclge_wol_cfg_cmd { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c index b98301e205f7fff69b21f36a96e93ec358a371c1..eabbacb1c714087a53d0381514b140e802c409b8 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c @@ -619,6 +619,8 @@ static int hclge_setup_tc(struct hnae3_handle *h, return ret; } + kinfo->tc_info.mqprio_destroy = !tc; + ret = hclge_notify_down_uinit(hdev); if (ret) return ret; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index ff3f8f424ad9004bbd549bc32cc43abca3ea2610..177841b85084aa1ee5fb775fc7f7b307fcbfcb80 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -161,10 +161,8 @@ static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset, return 0; } -static int hclge_dbg_cmd_send(struct hclge_dev *hdev, - struct hclge_desc *desc_src, - int index, int bd_num, - enum hclge_opcode_type cmd) +int hclge_dbg_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc_src, + int index, int bd_num, enum hclge_opcode_type cmd) { struct hclge_desc *desc = desc_src; int ret, i; @@ -1510,8 +1508,7 @@ static int hclge_dbg_dump_mng_table(struct hclge_dev *hdev, char *buf, int len) #define HCLGE_DBG_TCAM_BUF_SIZE 256 static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x, - char *tcam_buf, - struct hclge_dbg_tcam_msg tcam_msg) + char *tcam_buf, u8 stage, u32 loc) { struct hclge_fd_tcam_config_1_cmd *req1; struct hclge_fd_tcam_config_2_cmd *req2; @@ -1531,9 +1528,9 @@ static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x, req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data; req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data; - req1->stage = tcam_msg.stage; + req1->stage = stage; req1->xy_sel = sel_x ? 1 : 0; - req1->index = cpu_to_le32(tcam_msg.loc); + req1->index = cpu_to_le32(loc); ret = hclge_cmd_send(&hdev->hw, desc, 3); if (ret) @@ -1541,7 +1538,7 @@ static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x, pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos, "read result tcam key %s(%u):\n", sel_x ? "x" : "y", - tcam_msg.loc); + loc); /* tcam_data0 ~ tcam_data1 */ req = (__le32 *)req1->tcam_data; @@ -1586,7 +1583,6 @@ static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs) static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len) { u32 rule_num = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; - struct hclge_dbg_tcam_msg tcam_msg; int i, ret, rule_cnt; u16 *rule_locs; char *tcam_buf; @@ -1621,10 +1617,7 @@ static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len) ret = 0; for (i = 0; i < rule_cnt; i++) { - tcam_msg.stage = HCLGE_FD_STAGE_1; - tcam_msg.loc = rule_locs[i]; - - ret = hclge_dbg_fd_tcam_read(hdev, true, tcam_buf, tcam_msg); + ret = hclge_dbg_fd_tcam_read(hdev, true, tcam_buf, HCLGE_FD_STAGE_1, rule_locs[i]); if (ret) { dev_err(&hdev->pdev->dev, "failed to get fd tcam key x, ret = %d\n", ret); @@ -1633,7 +1626,7 @@ static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len) pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf); - ret = hclge_dbg_fd_tcam_read(hdev, false, tcam_buf, tcam_msg); + ret = hclge_dbg_fd_tcam_read(hdev, false, tcam_buf, HCLGE_FD_STAGE_1, rule_locs[i]); if (ret) { dev_err(&hdev->pdev->dev, "failed to get fd tcam key y, ret = %d\n", ret); @@ -1649,6 +1642,86 @@ static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len) return ret; } +static int hclge_query_rules_valid(struct hclge_dev *hdev, u8 stage, u32 loc) +{ +#define HCLGE_TCAM_SELECTION_X 1 + struct hclge_fd_tcam_config_1_cmd *req1; + struct hclge_fd_tcam_config_2_cmd *req2; + struct hclge_fd_tcam_config_3_cmd *req3; + struct hclge_desc desc[3]; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true); + desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true); + + req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data; + req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data; + req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data; + + req1->stage = stage; + req1->xy_sel = HCLGE_TCAM_SELECTION_X; + req1->index = cpu_to_le32(loc); + + ret = hclge_cmd_send(&hdev->hw, desc, 3); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to read tcam status, ret = %d\n", ret); + return ret; + } + + return req1->entry_vld; +} + +static int hclge_dbg_dump_qb_tcam(struct hclge_dev *hdev, char *buf, int len) +{ + char *tcam_buf; + int pos = 0; + int ret = 0; + u32 i; + + if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) { + dev_err(&hdev->pdev->dev, + "Only FD-supported dev supports dump fd tcam\n"); + return -EOPNOTSUPP; + } + + tcam_buf = kzalloc(HCLGE_DBG_TCAM_BUF_SIZE, GFP_KERNEL); + if (!tcam_buf) + return -ENOMEM; + + for (i = 0; i < hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; i++) { + if (hclge_query_rules_valid(hdev, HCLGE_FD_STAGE_1, i) <= 0) + continue; + + ret = hclge_dbg_fd_tcam_read(hdev, true, tcam_buf, + HCLGE_FD_STAGE_1, i); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get qb tcam key x, ret = %d\n", ret); + goto out; + } + + pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf); + + ret = hclge_dbg_fd_tcam_read(hdev, false, tcam_buf, + HCLGE_FD_STAGE_1, i); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get qb tcam key y, ret = %d\n", ret); + goto out; + } + + pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf); + } + +out: + kfree(tcam_buf); + return ret; +} + static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len) { u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */ @@ -2400,6 +2473,14 @@ static int hclge_dbg_dump_ptp_info(struct hclge_dev *hdev, char *buf, int len) return 0; } +static int hclge_dbg_dump_tcam(struct hclge_dev *hdev, char *buf, int len) +{ + if (test_bit(HCLGE_STATE_HW_QB_ENABLE, &hdev->state)) + return hclge_dbg_dump_qb_tcam(hdev, buf, len); + else + return hclge_dbg_dump_fd_tcam(hdev, buf, len); +}; + static int hclge_dbg_dump_mac_uc(struct hclge_dev *hdev, char *buf, int len) { hclge_dbg_dump_mac_list(hdev, buf, len, true); @@ -2539,14 +2620,14 @@ static const struct hclge_dbg_func hclge_dbg_cmd_func[] = { .cmd = HNAE3_DBG_CMD_REG_DCB, .dbg_dump = hclge_dbg_dump_dcb, }, - { - .cmd = HNAE3_DBG_CMD_FD_TCAM, - .dbg_dump = hclge_dbg_dump_fd_tcam, - }, { .cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS, .dbg_dump = hclge_dbg_dump_mac_tnl_status, }, + { + .cmd = HNAE3_DBG_CMD_FD_TCAM, + .dbg_dump = hclge_dbg_dump_tcam, + }, { .cmd = HNAE3_DBG_CMD_SERV_INFO, .dbg_dump = hclge_dbg_dump_serv_info, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h index 724052928b884a301967e8bc9c778c76460fd428..3fe78e3c2368e224cfe5337be2055c9a40dff14c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h @@ -69,11 +69,6 @@ struct hclge_dbg_reg_common_msg { enum hclge_opcode_type cmd; }; -struct hclge_dbg_tcam_msg { - u8 stage; - u32 loc; -}; - #define HCLGE_DBG_MAX_DFX_MSG_LEN 60 struct hclge_dbg_dfx_message { int flag; @@ -771,4 +766,7 @@ struct hclge_dbg_vlan_cfg { u8 pri_only2; }; +int hclge_dbg_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc_src, + int index, int bd_num, enum hclge_opcode_type cmd); + #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c index 9a939c0b217f717746924af4f651e384e2f0035a..a1571c1086788b434f19363b095ce00baa71cae9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c @@ -5,6 +5,34 @@ #include "hclge_devlink.h" +static int hclge_devlink_scc_info_get(struct devlink *devlink, + struct devlink_info_req *req) +{ + struct hclge_devlink_priv *priv = devlink_priv(devlink); + char scc_version[HCLGE_DEVLINK_FW_SCC_LEN]; + struct hclge_dev *hdev = priv->hdev; + u32 scc_version_tmp; + int ret; + + ret = hclge_query_scc_version(hdev, &scc_version_tmp); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get scc version, ret = %d\n", ret); + return ret; + } + + snprintf(scc_version, sizeof(scc_version), "%lu.%lu.%lu.%lu", + hnae3_get_field(scc_version_tmp, HNAE3_SCC_VERSION_BYTE3_MASK, + HNAE3_FW_VERSION_BYTE3_SHIFT), + hnae3_get_field(scc_version_tmp, HNAE3_SCC_VERSION_BYTE2_MASK, + HNAE3_FW_VERSION_BYTE2_SHIFT), + hnae3_get_field(scc_version_tmp, HNAE3_SCC_VERSION_BYTE1_MASK, + HNAE3_FW_VERSION_BYTE1_SHIFT), + hnae3_get_field(scc_version_tmp, HNAE3_SCC_VERSION_BYTE0_MASK, + HNAE3_FW_VERSION_BYTE0_SHIFT)); + return devlink_info_version_running_put(req, "fw.scc", scc_version); +} + static int hclge_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req, struct netlink_ext_ack *extack) @@ -13,6 +41,7 @@ static int hclge_devlink_info_get(struct devlink *devlink, struct hclge_devlink_priv *priv = devlink_priv(devlink); char version_str[HCLGE_DEVLINK_FW_STRING_LEN]; struct hclge_dev *hdev = priv->hdev; + int ret; snprintf(version_str, sizeof(version_str), "%lu.%lu.%lu.%lu", hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK, @@ -24,9 +53,18 @@ static int hclge_devlink_info_get(struct devlink *devlink, hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK, HNAE3_FW_VERSION_BYTE0_SHIFT)); - return devlink_info_version_running_put(req, - DEVLINK_INFO_VERSION_GENERIC_FW, - version_str); + ret = devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW, + version_str); + if (ret) { + dev_err(&hdev->pdev->dev, "failed to set running version of fw\n"); + return ret; + } + + if (hdev->pdev->revision > HNAE3_DEVICE_VERSION_V2) + ret = hclge_devlink_scc_info_get(devlink, req); + + return ret; } static int hclge_devlink_reload_down(struct devlink *devlink, bool netns_change, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h index 918be04507a59b38b63cf9d003b6a75b102df53c..148effa5ea897217949c0abf09e3ecd672e21304 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h @@ -6,6 +6,8 @@ #include "hclge_main.h" +#define HCLGE_DEVLINK_FW_SCC_LEN 32 + struct hclge_devlink_priv { struct hclge_dev *hdev; }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c index 3f35227ef1fab3e555b684a1d43cec5a8185bacf..db1811f7578cc7708c24cdf3fe36e92dee0eaf84 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -1198,6 +1198,426 @@ static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = { } }; +static const struct hclge_mod_reg_info hclge_ssu_reg_0_info[] = { + { + .reg_name = "SSU_BP_STATUS_0~5", + .reg_offset_group = {5, 6, 7, 8, 9, 10}, + .group_size = 6 + }, { + .reg_name = "LO_PRI_UNICAST_CUR_CNT", + .reg_offset_group = {54}, + .group_size = 1 + }, { + .reg_name = "HI/LO_PRI_MULTICAST_CUR_CNT", + .reg_offset_group = {55, 56}, + .group_size = 2 + }, { + .reg_name = "SSU_MB_RD_RLT_DROP_CNT", + .reg_offset_group = {29}, + .group_size = 1 + }, { + .reg_name = "SSU_PPP_MAC_KEY_NUM", + .reg_offset_group = {31, 30}, + .group_size = 2 + }, { + .reg_name = "SSU_PPP_HOST_KEY_NUM", + .reg_offset_group = {33, 32}, + .group_size = 2 + }, { + .reg_name = "PPP_SSU_MAC/HOST_RLT_NUM", + .reg_offset_group = {35, 34, 37, 36}, + .group_size = 4 + }, { + .reg_name = "FULL/PART_DROP_NUM", + .reg_offset_group = {18, 19}, + .group_size = 2 + }, { + .reg_name = "PPP_KEY/RLT_DROP_NUM", + .reg_offset_group = {20, 21}, + .group_size = 2 + }, { + .reg_name = "NIC/ROC_L2_ERR_DROP_PKT_CNT", + .reg_offset_group = {48, 49}, + .group_size = 2 + }, { + .reg_name = "NIC/ROC_L2_ERR_DROP_PKT_CNT_RX", + .reg_offset_group = {50, 51}, + .group_size = 2 + }, +}; + +static const struct hclge_mod_reg_info hclge_ssu_reg_1_info[] = { + { + .reg_name = "RX_PACKET_IN/OUT_CNT", + .reg_offset_group = {13, 12, 15, 14}, + .group_size = 4 + }, { + .reg_name = "TX_PACKET_IN/OUT_CNT", + .reg_offset_group = {17, 16, 19, 18}, + .group_size = 4 + }, { + .reg_name = "RX_PACKET_TC0_IN/OUT_CNT", + .reg_offset_group = {25, 24, 41, 40}, + .group_size = 4 + }, { + .reg_name = "RX_PACKET_TC1_IN/OUT_CNT", + .reg_offset_group = {27, 26, 43, 42}, + .group_size = 4 + }, { + .reg_name = "RX_PACKET_TC2_IN/OUT_CNT", + .reg_offset_group = {29, 28, 45, 44}, + .group_size = 4 + }, { + .reg_name = "RX_PACKET_TC3_IN/OUT_CNT", + .reg_offset_group = {31, 30, 47, 46}, + .group_size = 4 + }, { + .reg_name = "RX_PACKET_TC4_IN/OUT_CNT", + .reg_offset_group = {33, 32, 49, 48}, + .group_size = 4 + }, { + .reg_name = "RX_PACKET_TC5_IN/OUT_CNT", + .reg_offset_group = {35, 34, 51, 50}, + .group_size = 4 + }, { + .reg_name = "RX_PACKET_TC6_IN/OUT_CNT", + .reg_offset_group = {37, 36, 53, 52}, + .group_size = 4 + }, { + .reg_name = "RX_PACKET_TC7_IN/OUT_CNT", + .reg_offset_group = {39, 38, 55, 54}, + .group_size = 4 + }, { + .reg_name = "TX_PACKET_TC0_IN/OUT_CNT", + .reg_offset_group = {57, 56, 73, 72}, + .group_size = 4 + }, { + .reg_name = "TX_PACKET_TC1_IN/OUT_CNT", + .reg_offset_group = {59, 58, 75, 74}, + .group_size = 4 + }, { + .reg_name = "TX_PACKET_TC2_IN/OUT_CNT", + .reg_offset_group = {61, 60, 77, 76}, + .group_size = 4 + }, { + .reg_name = "TX_PACKET_TC3_IN/OUT_CNT", + .reg_offset_group = {63, 62, 79, 78}, + .group_size = 4 + }, { + .reg_name = "TX_PACKET_TC4_IN/OUT_CNT", + .reg_offset_group = {65, 64, 81, 80}, + .group_size = 4 + }, { + .reg_name = "TX_PACKET_TC5_IN/OUT_CNT", + .reg_offset_group = {67, 66, 83, 82}, + .group_size = 4 + }, { + .reg_name = "TX_PACKET_TC6_IN/OUT_CNT", + .reg_offset_group = {69, 68, 85, 84}, + .group_size = 4 + }, { + .reg_name = "TX_PACKET_TC7_IN/OUT_CNT", + .reg_offset_group = {71, 70, 87, 86}, + .group_size = 4 + }, { + .reg_name = "PACKET_TC0~3_CURR_BUFFER_CNT", + .reg_offset_group = {1, 2, 3, 4}, + .group_size = 4 + }, { + .reg_name = "PACKET_TC4~7_CURR_BUFFER_CNT", + .reg_offset_group = {5, 6, 7, 8}, + .group_size = 4 + }, { + .reg_name = "ROC_RX_PACKET_IN_CNT", + .reg_offset_group = {21, 20}, + .group_size = 2 + }, { + .reg_name = "ROC_TX_PACKET_OUT_CNT", + .reg_offset_group = {23, 22}, + .group_size = 2 + } +}; + +static const struct hclge_mod_reg_info hclge_rpu_reg_0_info[] = { + { + .reg_name = "RPU_FSM_DFX_ST0/ST1_TNL", + .has_suffix = true, + .reg_offset_group = {1, 2}, + .group_size = 2 + }, { + .reg_name = "RPU_RX_PKT_DROP_CNT_TNL", + .has_suffix = true, + .reg_offset_group = {3}, + .group_size = 1 + } +}; + +static const struct hclge_mod_reg_info hclge_rpu_reg_1_info[] = { + { + .reg_name = "FIFO_DFX_ST0_1_2_4", + .reg_offset_group = {1, 2, 3, 5}, + .group_size = 4 + } +}; + +static const struct hclge_mod_reg_info hclge_igu_egu_reg_info[] = { + { + .reg_name = "IGU_RX_ERR_PKT", + .reg_offset_group = {1}, + .group_size = 1 + }, { + .reg_name = "IGU_RX_OUT_ALL_PKT", + .reg_offset_group = {29, 28}, + .group_size = 2 + }, { + .reg_name = "EGU_TX_OUT_ALL_PKT", + .reg_offset_group = {39, 38}, + .group_size = 2 + }, { + .reg_name = "EGU_TX_ERR_PKT", + .reg_offset_group = {5}, + .group_size = 1 + } +}; + +static const struct hclge_mod_reg_info hclge_gen_reg_info_tnl[] = { + { + .reg_name = "SSU2RPU_TNL_WR_PKT_CNT_TNL", + .has_suffix = true, + .reg_offset_group = {1}, + .group_size = 1 + }, { + .reg_name = "RPU2HST_TNL_WR_PKT_CNT_TNL", + .has_suffix = true, + .reg_offset_group = {12}, + .group_size = 1 + } +}; + +static const struct hclge_mod_reg_info hclge_gen_reg_info[] = { + { + .reg_name = "SSU_OVERSIZE_DROP_CNT", + .reg_offset_group = {12}, + .group_size = 1 + }, { + .reg_name = "ROCE_RX_BYPASS_5NS_DROP_NUM", + .reg_offset_group = {13}, + .group_size = 1 + }, { + .reg_name = "RX_PKT_IN/OUT_ERR_CNT", + .reg_offset_group = {15, 14, 19, 18}, + .group_size = 4 + }, { + .reg_name = "TX_PKT_IN/OUT_ERR_CNT", + .reg_offset_group = {17, 16, 21, 20}, + .group_size = 4 + }, { + .reg_name = "ETS_TC_READY", + .reg_offset_group = {22}, + .group_size = 1 + }, { + .reg_name = "MIB_TX/RX_BAD_PKTS", + .reg_offset_group = {19, 18, 29, 28}, + .group_size = 4 + }, { + .reg_name = "MIB_TX/RX_GOOD_PKTS", + .reg_offset_group = {21, 20, 31, 30}, + .group_size = 4 + }, { + .reg_name = "MIB_TX/RX_TOTAL_PKTS", + .reg_offset_group = {23, 22, 33, 32}, + .group_size = 4 + }, { + .reg_name = "MIB_TX/RX_PAUSE_PKTS", + .reg_offset_group = {25, 24, 35, 34}, + .group_size = 4 + }, { + .reg_name = "MIB_TX_ERR_ALL_PKTS", + .reg_offset_group = {27, 26}, + .group_size = 2 + }, { + .reg_name = "MIB_RX_FCS_ERR_PKTS", + .reg_offset_group = {37, 36}, + .group_size = 2 + }, { + .reg_name = "IGU_EGU_AUTO_GATE_EN", + .reg_offset_group = {42}, + .group_size = 1 + }, { + .reg_name = "IGU_EGU_INT_SRC", + .reg_offset_group = {43}, + .group_size = 1 + }, { + .reg_name = "EGU_READY_NUM_CFG", + .reg_offset_group = {44}, + .group_size = 1 + }, { + .reg_name = "IGU_EGU_TNL_DFX", + .reg_offset_group = {45}, + .group_size = 1 + }, { + .reg_name = "TX_TNL_NOTE_PKT", + .reg_offset_group = {46}, + .group_size = 1 + } +}; + +static const struct hclge_mod_reg_common_msg hclge_ssu_reg_common_msg[] = { + { + .cmd = HCLGE_OPC_DFX_SSU_REG_0, + .result_regs = hclge_ssu_reg_0_info, + .bd_num = HCLGE_BD_NUM_SSU_REG_0, + .result_regs_size = ARRAY_SIZE(hclge_ssu_reg_0_info) + }, { + .cmd = HCLGE_OPC_DFX_SSU_REG_1, + .result_regs = hclge_ssu_reg_1_info, + .bd_num = HCLGE_BD_NUM_SSU_REG_1, + .result_regs_size = ARRAY_SIZE(hclge_ssu_reg_1_info) + }, { + .cmd = HCLGE_OPC_DFX_RPU_REG_0, + .result_regs = hclge_rpu_reg_0_info, + .bd_num = HCLGE_BD_NUM_RPU_REG_0, + .result_regs_size = ARRAY_SIZE(hclge_rpu_reg_0_info), + .need_para = true + }, { + .cmd = HCLGE_OPC_DFX_RPU_REG_1, + .result_regs = hclge_rpu_reg_1_info, + .bd_num = HCLGE_BD_NUM_RPU_REG_1, + .result_regs_size = ARRAY_SIZE(hclge_rpu_reg_1_info) + }, { + .cmd = HCLGE_OPC_DFX_IGU_EGU_REG, + .result_regs = hclge_igu_egu_reg_info, + .bd_num = HCLGE_BD_NUM_IGU_EGU_REG, + .result_regs_size = ARRAY_SIZE(hclge_igu_egu_reg_info) + }, { + .cmd = HCLGE_OPC_DFX_GEN_REG, + .result_regs = hclge_gen_reg_info_tnl, + .bd_num = HCLGE_BD_NUM_GEN_REG, + .result_regs_size = ARRAY_SIZE(hclge_gen_reg_info_tnl), + .need_para = true + }, { + .cmd = HCLGE_OPC_DFX_GEN_REG, + .result_regs = hclge_gen_reg_info, + .bd_num = HCLGE_BD_NUM_GEN_REG, + .result_regs_size = ARRAY_SIZE(hclge_gen_reg_info) + } +}; + +static int +hclge_print_mod_reg_info(struct device *dev, struct hclge_desc *desc, + const struct hclge_mod_reg_info *reg_info, int size) +{ + int i, j, pos, actual_len; + u8 offset, bd_idx, index; + char *buf; + + buf = kzalloc(HCLGE_MOD_REG_INFO_LEN_MAX, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + for (i = 0; i < size; i++) { + actual_len = strlen(reg_info[i].reg_name) + + HCLGE_MOD_REG_EXTRA_LEN + + HCLGE_MOD_REG_VALUE_LEN * reg_info[i].group_size; + if (actual_len > HCLGE_MOD_REG_INFO_LEN_MAX) { + dev_info(dev, "length of reg(%s) is invalid, len=%d\n", + reg_info[i].reg_name, actual_len); + continue; + } + + pos = scnprintf(buf, HCLGE_MOD_REG_INFO_LEN_MAX, "%s", + reg_info[i].reg_name); + if (reg_info[i].has_suffix) + pos += scnprintf(buf + pos, + HCLGE_MOD_REG_INFO_LEN_MAX - pos, "%u", + le32_to_cpu(desc->data[0])); + pos += scnprintf(buf + pos, + HCLGE_MOD_REG_INFO_LEN_MAX - pos, + ":"); + for (j = 0; j < reg_info[i].group_size; j++) { + offset = reg_info[i].reg_offset_group[j]; + index = offset % HCLGE_DESC_DATA_LEN; + bd_idx = offset / HCLGE_DESC_DATA_LEN; + pos += scnprintf(buf + pos, + HCLGE_MOD_REG_INFO_LEN_MAX - pos, + " %08x", + le32_to_cpu(desc[bd_idx].data[index])); + } + buf[pos] = '\0'; + dev_info(dev, "%s\n", buf); + } + + kfree(buf); + return 0; +} + +static bool hclge_err_mod_check_support_cmd(enum hclge_opcode_type opcode, + struct hclge_dev *hdev) +{ + if (opcode == HCLGE_OPC_DFX_GEN_REG && + !hnae3_ae_dev_gen_reg_dfx_supported(hdev)) + return false; + return true; +} + +/* For each common msg, send cmdq to IMP and print result reg info. + * If there is a parameter, loop it and request. + */ +static void +hclge_query_reg_info(struct hclge_dev *hdev, + struct hclge_mod_reg_common_msg *msg, u32 loop_time, + u32 *loop_para) +{ + int desc_len, i, ret; + + desc_len = msg->bd_num * sizeof(struct hclge_desc); + msg->desc = kzalloc(desc_len, GFP_KERNEL); + if (!msg->desc) { + dev_err(&hdev->pdev->dev, "failed to query reg info, ret=%d", + -ENOMEM); + return; + } + + for (i = 0; i < loop_time; i++) { + ret = hclge_dbg_cmd_send(hdev, msg->desc, *loop_para, + msg->bd_num, msg->cmd); + loop_para++; + if (ret) + continue; + ret = hclge_print_mod_reg_info(&hdev->pdev->dev, msg->desc, + msg->result_regs, + msg->result_regs_size); + if (ret) + dev_err(&hdev->pdev->dev, "failed to print mod reg info, ret=%d\n", + ret); + } + + kfree(msg->desc); +} + +static void hclge_query_reg_info_of_ssu(struct hclge_dev *hdev) +{ + u32 loop_para[HCLGE_MOD_MSG_PARA_ARRAY_MAX_SIZE] = {0}; + struct hclge_mod_reg_common_msg msg; + u8 i, j, num; + u32 loop_time; + + num = ARRAY_SIZE(hclge_ssu_reg_common_msg); + for (i = 0; i < num; i++) { + msg = hclge_ssu_reg_common_msg[i]; + if (!hclge_err_mod_check_support_cmd(msg.cmd, hdev)) + continue; + loop_time = 1; + loop_para[0] = 0; + if (msg.need_para) { + loop_time = hdev->ae_dev->dev_specs.tnl_num; + for (j = 0; j < loop_time; j++) + loop_para[j] = j + 1; + } + hclge_query_reg_info(hdev, &msg, loop_time, loop_para); + } +} + static const struct hclge_hw_module_id hclge_hw_module_id_st[] = { { .module_id = MODULE_NONE, @@ -1210,7 +1630,8 @@ static const struct hclge_hw_module_id hclge_hw_module_id_st[] = { .msg = "MODULE_GE" }, { .module_id = MODULE_IGU_EGU, - .msg = "MODULE_IGU_EGU" + .msg = "MODULE_IGU_EGU", + .query_reg_info = hclge_query_reg_info_of_ssu }, { .module_id = MODULE_LGE, .msg = "MODULE_LGE" @@ -1231,7 +1652,8 @@ static const struct hclge_hw_module_id hclge_hw_module_id_st[] = { .msg = "MODULE_RTC" }, { .module_id = MODULE_SSU, - .msg = "MODULE_SSU" + .msg = "MODULE_SSU", + .query_reg_info = hclge_query_reg_info_of_ssu }, { .module_id = MODULE_TM, .msg = "MODULE_TM" @@ -1301,10 +1723,12 @@ static const struct hclge_hw_type_id hclge_hw_type_id_st[] = { .msg = "tqp_int_ecc_error" }, { .type_id = PF_ABNORMAL_INT_ERROR, - .msg = "pf_abnormal_int_error" + .msg = "pf_abnormal_int_error", + .cause_by_vf = true }, { .type_id = MPF_ABNORMAL_INT_ERROR, - .msg = "mpf_abnormal_int_error" + .msg = "mpf_abnormal_int_error", + .cause_by_vf = true }, { .type_id = COMMON_ERROR, .msg = "common_error" @@ -2759,8 +3183,8 @@ void hclge_handle_occurred_error(struct hclge_dev *hdev) hclge_handle_error_info_log(ae_dev); } -static void -hclge_handle_error_type_reg_log(struct device *dev, +static bool +hclge_handle_error_type_reg_log(struct hclge_dev *hdev, struct hclge_mod_err_info *mod_info, struct hclge_type_reg_err_info *type_reg_info) { @@ -2768,8 +3192,10 @@ hclge_handle_error_type_reg_log(struct device *dev, #define HCLGE_ERR_TYPE_IS_RAS_OFFSET 7 u8 mod_id, total_module, type_id, total_type, i, is_ras; + struct device *dev = &hdev->pdev->dev; u8 index_module = MODULE_NONE; u8 index_type = NONE_ERROR; + bool cause_by_vf = false; mod_id = mod_info->mod_id; type_id = type_reg_info->type_id & HCLGE_ERR_TYPE_MASK; @@ -2788,6 +3214,7 @@ hclge_handle_error_type_reg_log(struct device *dev, for (i = 0; i < total_type; i++) { if (type_id == hclge_hw_type_id_st[i].type_id) { index_type = i; + cause_by_vf = hclge_hw_type_id_st[i].cause_by_vf; break; } } @@ -2805,6 +3232,11 @@ hclge_handle_error_type_reg_log(struct device *dev, dev_err(dev, "reg_value:\n"); for (i = 0; i < type_reg_info->reg_num; i++) dev_err(dev, "0x%08x\n", type_reg_info->hclge_reg[i]); + + if (hclge_hw_module_id_st[index_module].query_reg_info) + hclge_hw_module_id_st[index_module].query_reg_info(hdev); + + return cause_by_vf; } static void hclge_handle_error_module_log(struct hnae3_ae_dev *ae_dev, @@ -2815,6 +3247,7 @@ static void hclge_handle_error_module_log(struct hnae3_ae_dev *ae_dev, struct device *dev = &hdev->pdev->dev; struct hclge_mod_err_info *mod_info; struct hclge_sum_err_info *sum_info; + bool cause_by_vf = false; u8 mod_num, err_num, i; u32 offset = 0; @@ -2843,12 +3276,16 @@ static void hclge_handle_error_module_log(struct hnae3_ae_dev *ae_dev, type_reg_info = (struct hclge_type_reg_err_info *) &buf[offset++]; - hclge_handle_error_type_reg_log(dev, mod_info, - type_reg_info); + if (hclge_handle_error_type_reg_log(hdev, mod_info, + type_reg_info)) + cause_by_vf = true; offset += type_reg_info->reg_num; } } + + if (hnae3_ae_dev_vf_fault_supported(hdev->ae_dev) && cause_by_vf) + set_bit(HNAE3_VF_EXP_RESET, &ae_dev->hw_err_reset_req); } static int hclge_query_all_err_bd_num(struct hclge_dev *hdev, u32 *bd_num) @@ -2940,3 +3377,98 @@ int hclge_handle_error_info_log(struct hnae3_ae_dev *ae_dev) out: return ret; } + +static bool hclge_reset_vf_in_bitmap(struct hclge_dev *hdev, + unsigned long *bitmap) +{ + struct hclge_vport *vport; + bool exist_set = false; + int func_id; + int ret; + + func_id = find_first_bit(bitmap, HCLGE_VPORT_NUM); + if (func_id == PF_VPORT_ID) + return false; + + while (func_id != HCLGE_VPORT_NUM) { + vport = hclge_get_vf_vport(hdev, + func_id - HCLGE_VF_VPORT_START_NUM); + if (!vport) { + dev_err(&hdev->pdev->dev, "invalid func id(%d)\n", + func_id); + return false; + } + + dev_info(&hdev->pdev->dev, "do function %d recovery.", func_id); + + ret = hclge_reset_tqp(&vport->nic); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to reset tqp, ret = %d.", ret); + return false; + } + + ret = hclge_inform_vf_reset(vport, HNAE3_VF_FUNC_RESET); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to reset func %d, ret = %d.", + func_id, ret); + return false; + } + + exist_set = true; + clear_bit(func_id, bitmap); + func_id = find_first_bit(bitmap, HCLGE_VPORT_NUM); + } + + return exist_set; +} + +static void hclge_get_vf_fault_bitmap(struct hclge_desc *desc, + unsigned long *bitmap) +{ +#define HCLGE_FIR_FAULT_BYTES 24 +#define HCLGE_SEC_FAULT_BYTES 8 + + u8 *buff; + + BUILD_BUG_ON(HCLGE_FIR_FAULT_BYTES + HCLGE_SEC_FAULT_BYTES != + BITS_TO_BYTES(HCLGE_VPORT_NUM)); + + memcpy(bitmap, desc[0].data, HCLGE_FIR_FAULT_BYTES); + buff = (u8 *)bitmap + HCLGE_FIR_FAULT_BYTES; + memcpy(buff, desc[1].data, HCLGE_SEC_FAULT_BYTES); +} + +int hclge_handle_vf_queue_err_ras(struct hclge_dev *hdev) +{ + unsigned long vf_fault_bitmap[BITS_TO_LONGS(HCLGE_VPORT_NUM)]; + struct hclge_desc desc[2]; + bool cause_by_vf = false; + int ret; + + if (!test_and_clear_bit(HNAE3_VF_EXP_RESET, + &hdev->ae_dev->hw_err_reset_req) || + !hnae3_ae_dev_vf_fault_supported(hdev->ae_dev)) + return 0; + + hclge_comm_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_GET_QUEUE_ERR_VF, + true); + desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + hclge_comm_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_GET_QUEUE_ERR_VF, + true); + + ret = hclge_comm_cmd_send(&hdev->hw.hw, desc, 2); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get vf bitmap, ret = %d.\n", ret); + return ret; + } + hclge_get_vf_fault_bitmap(desc, vf_fault_bitmap); + + cause_by_vf = hclge_reset_vf_in_bitmap(hdev, vf_fault_bitmap); + if (cause_by_vf) + hdev->ae_dev->hw_err_reset_req = 0; + + return 0; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h index 86be6fb329901755c0c409ffba3e9fdc2e023c33..45a783a506439a64572880a8dfcb35eabbfb1fb0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h @@ -5,6 +5,7 @@ #define __HCLGE_ERR_H #include "hclge_main.h" +#include "hclge_debugfs.h" #include "hnae3.h" #define HCLGE_MPF_RAS_INT_MIN_BD_NUM 10 @@ -115,6 +116,18 @@ #define HCLGE_REG_NUM_MAX 256 #define HCLGE_DESC_NO_DATA_LEN 8 +#define HCLGE_BD_NUM_SSU_REG_0 10 +#define HCLGE_BD_NUM_SSU_REG_1 15 +#define HCLGE_BD_NUM_RPU_REG_0 1 +#define HCLGE_BD_NUM_RPU_REG_1 2 +#define HCLGE_BD_NUM_IGU_EGU_REG 9 +#define HCLGE_BD_NUM_GEN_REG 8 +#define HCLGE_MOD_REG_INFO_LEN_MAX 256 +#define HCLGE_MOD_REG_EXTRA_LEN 11 +#define HCLGE_MOD_REG_VALUE_LEN 9 +#define HCLGE_MOD_REG_GROUP_MAX_SIZE 6 +#define HCLGE_MOD_MSG_PARA_ARRAY_MAX_SIZE 8 + enum hclge_err_int_type { HCLGE_ERR_INT_MSIX = 0, HCLGE_ERR_INT_RAS_CE = 1, @@ -191,11 +204,13 @@ struct hclge_hw_error { struct hclge_hw_module_id { enum hclge_mod_name_list module_id; const char *msg; + void (*query_reg_info)(struct hclge_dev *hdev); }; struct hclge_hw_type_id { enum hclge_err_type_list type_id; const char *msg; + bool cause_by_vf; /* indicate the error may from vf exception */ }; struct hclge_sum_err_info { @@ -217,6 +232,28 @@ struct hclge_type_reg_err_info { u32 hclge_reg[HCLGE_REG_NUM_MAX]; }; +struct hclge_mod_reg_info { + const char *reg_name; + bool has_suffix; /* add suffix for register name */ + /* the positions of reg values in hclge_desc.data */ + u8 reg_offset_group[HCLGE_MOD_REG_GROUP_MAX_SIZE]; + u8 group_size; +}; + +/* This structure defines cmdq used to query the hardware module debug + * regisgers. + */ +struct hclge_mod_reg_common_msg { + enum hclge_opcode_type cmd; + struct hclge_desc *desc; + u8 bd_num; /* the bd number of hclge_desc used */ + bool need_para; /* whether this cmdq needs to add para */ + + /* the regs need to print */ + const struct hclge_mod_reg_info *result_regs; + u16 result_regs_size; +}; + int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en); int hclge_config_nic_hw_error(struct hclge_dev *hdev, bool state); int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en); @@ -228,4 +265,5 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev, unsigned long *reset_requests); int hclge_handle_error_info_log(struct hnae3_ae_dev *ae_dev); int hclge_handle_mac_tnl(struct hclge_dev *hdev); +int hclge_handle_vf_queue_err_ras(struct hclge_dev *hdev); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ext.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ext.c new file mode 100644 index 0000000000000000000000000000000000000000..a396e965bc5e409d0fac325c193de6be3ad5c422 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ext.c @@ -0,0 +1,1490 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2023 Hisilicon Limited. + +#include "hclge_main.h" +#include "hnae3.h" +#include "hnae3_ext.h" +#include "hclge_cmd.h" +#include "hclge_ext.h" +#include "hclge_tm.h" + +static nic_event_fn_t nic_event_call; + +/* We use a lock to ensure that the address of the nic_event_call function + * is valid when it is called. Avoid null pointer exceptions caused by + * external unregister during invoking. + */ +static DEFINE_MUTEX(hclge_nic_event_lock); + +static int hclge_set_pfc_storm_para(struct hclge_dev *hdev, void *data, + size_t length) +{ + struct hclge_pfc_storm_para_cmd *para_cmd; + struct hnae3_pfc_storm_para *para; + struct hclge_desc desc; + int ret; + + if (length != sizeof(struct hnae3_pfc_storm_para)) + return -EINVAL; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PAUSE_STORM_PARA, + false); + para = (struct hnae3_pfc_storm_para *)data; + para_cmd = (struct hclge_pfc_storm_para_cmd *)desc.data; + para_cmd->dir = cpu_to_le32(para->dir); + para_cmd->enable = cpu_to_le32(para->enable); + para_cmd->period_ms = cpu_to_le32(para->period_ms); + para_cmd->times = cpu_to_le32(para->times); + para_cmd->recovery_period_ms = cpu_to_le32(para->recovery_period_ms); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to set pfc storm para, ret = %d\n", ret); + return ret; +} + +static int hclge_get_pfc_storm_para(struct hclge_dev *hdev, void *data, + size_t length) +{ + struct hclge_pfc_storm_para_cmd *para_cmd; + struct hnae3_pfc_storm_para *para; + struct hclge_desc desc; + int ret; + + if (length != sizeof(struct hnae3_pfc_storm_para)) + return -EINVAL; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PAUSE_STORM_PARA, true); + para = (struct hnae3_pfc_storm_para *)data; + para_cmd = (struct hclge_pfc_storm_para_cmd *)desc.data; + para_cmd->dir = cpu_to_le32(para->dir); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get pfc storm para, ret = %d\n", ret); + return ret; + } + + para->enable = le32_to_cpu(para_cmd->enable); + para->period_ms = le32_to_cpu(para_cmd->period_ms); + para->times = le32_to_cpu(para_cmd->times); + para->recovery_period_ms = le32_to_cpu(para_cmd->recovery_period_ms); + + return 0; +} + +static int hclge_notify_packet_para_cmd_send(struct hclge_dev *hdev, + struct hclge_notify_pkt_param_cmd *param_cmd) +{ +#define HCLGE_NOTIFY_PKT_DESC_NUM 4 + + struct hclge_desc desc[HCLGE_NOTIFY_PKT_DESC_NUM]; + u32 i, desc_data_len; + + desc_data_len = ARRAY_SIZE(desc[0].data); + for (i = 0; i < HCLGE_NOTIFY_PKT_DESC_NUM; i++) { + hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_SET_NOTIFY_PKT, + false); + if (i != HCLGE_NOTIFY_PKT_DESC_NUM - 1) + desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + } + + for (i = 0; i < HCLGE_NOTIFY_PKT_DESC_NUM * desc_data_len; i++) + desc[i / desc_data_len].data[i % desc_data_len] = + *((__le32 *)param_cmd + i); + + return hclge_cmd_send(&hdev->hw, desc, HCLGE_NOTIFY_PKT_DESC_NUM); +} + +static int hclge_set_notify_packet_para(struct hclge_dev *hdev, + void *data, size_t length) +{ + struct hnae3_notify_pkt_param *param = (struct hnae3_notify_pkt_param *)data; + struct hclge_notify_pkt_param_cmd param_cmd; + u32 i, pkt_cfg = 0; + int ret; + + if (length != sizeof(struct hnae3_notify_pkt_param)) + return -EINVAL; + + if (!hnae3_ae_dev_notify_pkt_supported(hdev->ae_dev)) + return -EOPNOTSUPP; + + if (param->enable) + pkt_cfg = HCLGE_NOTIFY_PARA_CFG_PKT_EN; + hnae3_set_field(pkt_cfg, HCLGE_NOTIFY_PARA_CFG_PKT_NUM_M, + HCLGE_NOTIFY_PARA_CFG_PKT_NUM_S, param->num); + + param_cmd.cfg = cpu_to_le32(pkt_cfg); + param_cmd.ipg = cpu_to_le32(param->ipg); + for (i = 0; i < ARRAY_SIZE(param_cmd.data); i++) + param_cmd.data[i] = cpu_to_le32(*((u32 *)param->data + i)); + + hnae3_set_bit(param_cmd.vld_cfg, 0, 1); + hnae3_set_bit(param_cmd.vld_ipg, 0, 1); + hnae3_set_bit(param_cmd.vld_data, 0, 1); + + ret = hclge_notify_packet_para_cmd_send(hdev, ¶m_cmd); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to set notify packet content, ret = %d\n", ret); + return ret; + } + + param->init = 1; + memcpy(&hdev->notify_param, param, sizeof(*param)); + return 0; +} + +static int hclge_set_notify_packet_start(struct hclge_dev *hdev, + void *data, size_t length) +{ + u32 pkt_cfg = HCLGE_NOTIFY_PARA_CFG_START_EN; + struct hclge_notify_pkt_param_cmd param_cmd; + int ret; + + if (!hnae3_ae_dev_notify_pkt_supported(hdev->ae_dev)) + return -EOPNOTSUPP; + + memset(¶m_cmd, 0, sizeof(param_cmd)); + param_cmd.cfg = cpu_to_le32(pkt_cfg); + hnae3_set_bit(param_cmd.vld_cfg, 0, 1); + + ret = hclge_notify_packet_para_cmd_send(hdev, ¶m_cmd); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to send notify packet, ret = %d\n", ret); + return ret; +} + +static int hclge_torus_cfg_switch(struct hclge_dev *hdev, bool is_rocee, + bool enabled) +{ + struct hclge_mac_vlan_switch_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SWITCH_PARAM, true); + req = (struct hclge_mac_vlan_switch_cmd *)desc.data; + req->roce_sel = is_rocee ? 1 : 0; + /* set 0 to let firmware choose current function */ + req->func_id = 0; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get switch param, ret = %d\n", ret); + return ret; + } + + hnae3_set_bit(req->switch_param, HCLGE_SWITCH_ALW_LPBK_B, 1); + hnae3_set_bit(req->switch_param, HCLGE_SWITCH_ALW_LCL_LPBK_B, 0); + hnae3_set_bit(req->switch_param, HCLGE_SWITCH_ANTI_SPOOF_B, enabled); + if (!is_rocee) + hnae3_set_bit(req->switch_param, HCLGE_SWITCH_ALW_DST_OVRD_B, + enabled); + + hclge_comm_cmd_reuse_desc(&desc, false); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to set switch param, ret = %d\n", ret); + + return ret; +} + +static int hclge_torus_cfg_vlan_filter(struct hclge_dev *hdev, + bool enabled) +{ + struct hclge_vlan_filter_ctrl_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_VLAN_FILTER, true); + req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; + req->vlan_type = HCLGE_FILTER_TYPE_PORT; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get torus vlan filter, ret = %d\n", ret); + return ret; + } + + hnae3_set_bit(req->vlan_fe, HCLGE_VLAN_FE_NIC_INGRESS, !enabled); + hnae3_set_bit(req->vlan_fe, HCLGE_VLAN_FE_ROCEE_INGRESS, !enabled); + req->vlan_type = HCLGE_FILTER_TYPE_PORT; + + hclge_comm_cmd_reuse_desc(&desc, false); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to set torus vlan filter, ret = %d\n", ret); + + return ret; +} + +static int hclge_torus_cfg(struct hclge_dev *hdev, + struct hnae3_torus_param *param) +{ + struct hclge_torus_cfg_cmd *req; + struct hclge_desc desc; + u32 lan_fwd_tc_cfg = 0; + u32 lan_port_pair = 0; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_1D_TORUS, true); + req = (struct hclge_torus_cfg_cmd *)desc.data; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get torus config, ret = %d\n", ret); + return ret; + } + + req->lan_port_pair = cpu_to_le32(param->mac_id & + HCLGE_TORUS_MAC_ID_MASK); + hnae3_set_bit(lan_port_pair, HCLGE_UC_LAN_PAIR_EN, 1); + hnae3_set_bit(lan_port_pair, HCLGE_MC_BC_LAN_PAIR_EN, 1); + hnae3_set_bit(lan_port_pair, HCLGE_LLDP_LAN_PAIR_EN, 1); + hnae3_set_bit(lan_port_pair, HCLGE_TC2VLANPRI_MAPPING_EN, 1); + hnae3_set_bit(lan_port_pair, HCLGE_TORUS_LPBK_DROP_EN, 1); + if (param->enable) + req->lan_port_pair |= cpu_to_le32(lan_port_pair); + + if (!param->is_node0) { + req->lan_fwd_tc_cfg &= cpu_to_le32(~HCLGE_TORUS_TC1_DROP_EN); + lan_fwd_tc_cfg &= ~HCLGE_TOURS_TCX_MAP_TCY_MASK; + lan_fwd_tc_cfg |= HCLGE_TOURS_TCX_MAP_TCY_INIT & + HCLGE_TOURS_TCX_MAP_TCY_MASK; + req->lan_fwd_tc_cfg |= cpu_to_le32(lan_fwd_tc_cfg); + } else { + req->lan_fwd_tc_cfg |= cpu_to_le32(HCLGE_TORUS_TC1_DROP_EN); + lan_fwd_tc_cfg &= ~HCLGE_TOURS_TCX_MAP_TCY_MASK; + lan_fwd_tc_cfg |= HCLGE_TOURS_TCX_MAP_TCY_NODE0_INIT & + HCLGE_TOURS_TCX_MAP_TCY_MASK; + req->lan_fwd_tc_cfg |= cpu_to_le32(lan_fwd_tc_cfg); + } + + req->torus_en = cpu_to_le32(param->enable); + hclge_comm_cmd_reuse_desc(&desc, false); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, "failed to set torus, ret = %d\n", + ret); + + return ret; +} + +static int hclge_set_torus_param(struct hclge_dev *hdev, void *data, + size_t length) +{ + struct hnae3_torus_param *param = (struct hnae3_torus_param *)data; + int ret; + + if (hdev->ae_dev->dev_version == HNAE3_DEVICE_VERSION_V4) + return -EOPNOTSUPP; + + if (length != sizeof(struct hnae3_torus_param)) + return -EINVAL; + + ret = hclge_torus_cfg_switch(hdev, false, !!param->enable); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to config nic switch param, ret = %d\n", ret); + return ret; + } + + ret = hclge_torus_cfg_switch(hdev, true, !!param->enable); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to config roce switch param, ret = %d\n", ret); + return ret; + } + + ret = hclge_torus_cfg_vlan_filter(hdev, !!param->enable); + if (ret) + return ret; + + ret = hclge_torus_cfg(hdev, param); + if (ret) + return ret; + + hdev->torus_param = *param; + return 0; +} + +static int hclge_get_torus_param(struct hclge_dev *hdev, void *data, + size_t length) +{ + struct hnae3_torus_param *param = (struct hnae3_torus_param *)data; + struct hclge_torus_cfg_cmd *req; + struct hclge_desc desc; + int ret; + + if (hdev->ae_dev->dev_version == HNAE3_DEVICE_VERSION_V4) + return -EOPNOTSUPP; + + if (length != sizeof(struct hnae3_torus_param)) + return -EINVAL; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_1D_TORUS, true); + req = (struct hclge_torus_cfg_cmd *)desc.data; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get torus param, ret = %d\n", ret); + return ret; + } + + param->mac_id = + le32_to_cpu(req->lan_port_pair) & HCLGE_TORUS_MAC_ID_MASK; + param->enable = le32_to_cpu(req->torus_en); + + return 0; +} + +static int hclge_clean_stats64(struct hclge_dev *hdev, void *data, + size_t length) +{ + struct hnae3_knic_private_info *kinfo; + struct hclge_comm_tqp *tqp; + int i; + + kinfo = &hdev->vport[0].nic.kinfo; + for (i = 0; i < kinfo->num_tqps; i++) { + tqp = container_of(kinfo->tqp[i], struct hclge_comm_tqp, q); + memset(&tqp->tqp_stats, 0, sizeof(struct hclge_comm_tqp_stats)); + } + memset(&hdev->mac_stats, 0, sizeof(struct hclge_mac_stats)); + return 0; +} + +static int hclge_get_info_from_cmd(struct hclge_dev *hdev, + struct hclge_desc *desc, u32 num, int opcode) +{ + u32 i; + + for (i = 0; i < num; i++) { + hclge_cmd_setup_basic_desc(desc + i, opcode, true); + if (i != num - 1) + desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); + } + + return hclge_cmd_send(&hdev->hw, desc, num); +} + +static int hclge_get_extend_port_id_info(struct hclge_dev *hdev, + void *data, size_t length) +{ + struct hane3_port_ext_id_info *info; + struct hclge_id_info_cmd *info_cmd; + struct hclge_desc desc; + int ret; + + if (length != sizeof(struct hane3_port_ext_id_info)) + return -EINVAL; + + ret = hclge_get_info_from_cmd(hdev, &desc, 1, HCLGE_OPC_CHIP_ID_GET); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get extend port id info, ret = %d\n", + ret); + return ret; + } + + info_cmd = (struct hclge_id_info_cmd *)desc.data; + info = (struct hane3_port_ext_id_info *)data; + info->chip_id = le32_to_cpu(info_cmd->chip_id); + info->mac_id = le32_to_cpu(info_cmd->mac_id); + info->io_die_id = le32_to_cpu(info_cmd->io_die_id); + return 0; +} + +static int hclge_get_extend_port_num_info(struct hclge_dev *hdev, + void *data, size_t length) +{ + struct hane3_port_ext_num_info *num_info; + struct hclge_num_info_cmd *resp; + struct hclge_desc desc; + int ret; + + if (length != sizeof(struct hane3_port_ext_num_info)) + return -EINVAL; + + ret = hclge_get_info_from_cmd(hdev, &desc, 1, HCLGE_OPC_GET_CHIP_NUM); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get extend port number info, ret = %d\n", ret); + return ret; + } + + resp = (struct hclge_num_info_cmd *)(desc.data); + num_info = (struct hane3_port_ext_num_info *)data; + num_info->chip_num = le32_to_cpu(resp->chip_num); + num_info->io_die_num = le32_to_cpu(resp->io_die_num); + return 0; +} + +static int hclge_get_port_num(struct hclge_dev *hdev, void *data, + size_t length) +{ + struct hclge_port_num_info_cmd *resp; + struct hclge_desc desc; + int ret; + + if (length != sizeof(u32)) + return -EINVAL; + + ret = hclge_get_info_from_cmd(hdev, &desc, 1, HCLGE_OPC_GET_PORT_NUM); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get port number, ret = %d\n", ret); + return ret; + } + + resp = (struct hclge_port_num_info_cmd *)(desc.data); + *(u32 *)data = le32_to_cpu(resp->port_num); + return 0; +} + +static int hclge_get_sfp_present(struct hclge_dev *hdev, void *data, + size_t length) +{ + struct hclge_sfp_present_cmd *resp; + struct hclge_desc desc; + int ret; + + if (length != sizeof(u32)) + return -EINVAL; + + ret = hclge_get_info_from_cmd(hdev, &desc, 1, HCLGE_OPC_SFP_GET_PRESENT); + if (ret) { + dev_err(&hdev->pdev->dev, "failed to get sfp present, ret = %d\n", ret); + return ret; + } + + resp = (struct hclge_sfp_present_cmd *)desc.data; + *(u32 *)data = le32_to_cpu(resp->sfp_present); + return 0; +} + +static int hclge_set_sfp_state(struct hclge_dev *hdev, void *data, + size_t length) +{ + struct hclge_sfp_enable_cmd *req; + struct hclge_desc desc; + u32 state; + int ret; + + if (length != sizeof(u32)) + return -EINVAL; + + state = *(u32 *)data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SFP_SET_STATUS, false); + req = (struct hclge_sfp_enable_cmd *)desc.data; + req->sfp_enable = cpu_to_le32(state); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to set sfp state, ret = %d\n", ret); + + return ret; +} + +static int hclge_set_net_lane_status(struct hclge_dev *hdev, + u32 enable) +{ + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_DISABLE_NET_LANE, false); + desc.data[0] = cpu_to_le32(enable); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to set net lane status, ret = %d\n", ret); + + return ret; +} + +static int hclge_disable_net_lane(struct hclge_dev *hdev, void *data, + size_t length) +{ + return hclge_set_net_lane_status(hdev, 0); +} + +static int hclge_get_net_lane_status(struct hclge_dev *hdev, void *data, + size_t length) +{ + struct hclge_desc desc; + int ret; + + if (length != sizeof(u32)) + return -EINVAL; + + ret = hclge_get_info_from_cmd(hdev, &desc, 1, HCLGE_OPC_DISABLE_NET_LANE); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get net lane status, ret = %d\n", ret); + return ret; + } + + *(u32 *)data = le32_to_cpu(desc.data[0]); + return 0; +} + +static int hclge_disable_nic_clock(struct hclge_dev *hdev, void *data, + size_t length) +{ + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_NIC_CLOCK, false); + desc.data[0] = 0; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to disable nic clock, ret = %d\n", ret); + return ret; +} + +static int hclge_set_pause_trans_time(struct hclge_dev *hdev, void *data, + size_t length) +{ + struct hclge_cfg_pause_param_cmd *pause_param; + struct hclge_desc desc; + u16 pause_trans_time; + int ret; + + if (length != sizeof(u16)) + return -EINVAL; + + pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; + ret = hclge_get_info_from_cmd(hdev, &desc, 1, HCLGE_OPC_CFG_MAC_PARA); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get pause cfg info, ret = %d\n", ret); + return ret; + } + + pause_trans_time = *(u16 *)data; + if (pause_trans_time == le16_to_cpu(pause_param->pause_trans_time)) + return 0; + + ret = hclge_pause_param_cfg(hdev, pause_param->mac_addr, + pause_param->pause_trans_gap, + pause_trans_time); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to set pause trans time, ret = %d\n", ret); + return ret; + } + + hdev->tm_info.pause_time = pause_trans_time; + return 0; +} + +static int hclge_get_hilink_ref_los(struct hclge_dev *hdev, void *data, + size_t length) +{ + struct hclge_port_fault_cmd *fault_cmd; + struct hclge_desc desc; + int ret; + + if (length != sizeof(struct hnae3_port_fault)) + return -EINVAL; + + fault_cmd = (struct hclge_port_fault_cmd *)desc.data; + ret = hclge_get_info_from_cmd(hdev, &desc, 1, HCLGE_OPC_CFG_GET_HILINK_REF_LOS); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get hilink ref los, ret = %d\n", ret); + return ret; + } + + *(u32 *)data = le32_to_cpu(fault_cmd->fault_status); + return 0; +} + +static int hclge_get_port_fault_status(struct hclge_dev *hdev, void *data, + size_t length) +{ + struct hclge_port_fault_cmd *fault_cmd; + struct hnae3_port_fault *para; + struct hclge_desc desc; + int ret; + + if (length != sizeof(struct hnae3_port_fault)) + return -EINVAL; + + para = (struct hnae3_port_fault *)data; + fault_cmd = (struct hclge_port_fault_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_PORT_FAULT_STATUS, true); + fault_cmd->port_type = cpu_to_le32(para->fault_type); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get port fault status, type = %u, ret = %d\n", + para->fault_type, ret); + return ret; + } + + para->fault_status = le32_to_cpu(fault_cmd->fault_status); + + return 0; +} + +static int hclge_get_port_wire_type(struct hclge_dev *hdev, void *data, + size_t length) +{ + u8 module_type; + + if (length != sizeof(u32)) + return -EINVAL; + + hclge_get_media_type(&hdev->vport[0].nic, NULL, &module_type); + *(u32 *)data = module_type; + return 0; +} + +static void hclge_set_phy_state(struct hclge_dev *hdev, bool enable) +{ + struct phy_device *phydev = hdev->hw.mac.phydev; + + if (!phydev) + return; + + if (enable && (phydev->state == PHY_READY || phydev->state == PHY_HALTED)) + phy_start(phydev); + else if (!enable && (phy_is_started(phydev) || phydev->state == PHY_DOWN || + phydev->state == PHY_ERROR)) + phy_stop(phydev); +} + +static int hclge_set_mac_state(struct hclge_dev *hdev, void *data, + size_t length) +{ + bool enable; + int ret; + + if (length != sizeof(int)) + return -EINVAL; + + enable = !!*(int *)data; + ret = hclge_cfg_mac_mode(hdev, enable); + + if (!ret && !hclge_comm_dev_phy_imp_supported(hdev->ae_dev)) + hclge_set_phy_state(hdev, enable); + + return ret; +} + +static int hclge_set_led(struct hclge_dev *hdev, void *data, + size_t length) +{ + struct hclge_lamp_signal_cmd *para_cmd; + struct hnae3_led_state_para *para; + struct hclge_desc desc; + int ret; + + if (length != sizeof(struct hnae3_led_state_para)) + return -EINVAL; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_LED, false); + para = (struct hnae3_led_state_para *)data; + para_cmd = (struct hclge_lamp_signal_cmd *)desc.data; + para_cmd->type = cpu_to_le32(para->type); + para_cmd->status = cpu_to_le32(para->status); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, "failed to set led, ret = %d\n", ret); + + return ret; +} + +static int hclge_get_led_signal(struct hclge_dev *hdev, void *data, + size_t length) +{ + struct hclge_lamp_signal_cmd *signal_cmd; + struct hnae3_lamp_signal *signal; + struct hclge_desc desc; + int ret; + + if (length != sizeof(struct hnae3_lamp_signal)) + return -EINVAL; + + ret = hclge_get_info_from_cmd(hdev, &desc, 1, HCLGE_OPC_SET_LED); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get led signal, ret = %d\n", ret); + return ret; + } + + signal = (struct hnae3_lamp_signal *)data; + signal_cmd = (struct hclge_lamp_signal_cmd *)desc.data; + signal->error = signal_cmd->error; + signal->locate = signal_cmd->locate; + signal->activity = signal_cmd->activity; + return 0; +} + +static int hclge_def_phy_opt(struct mii_bus *mdio_bus, u32 phy_addr, + u16 reg_addr, u16 *data, + enum hclge_phy_op_code opt_type) +{ + int ret; + + if (opt_type == PHY_OP_READ) { + ret = mdio_bus->read(mdio_bus, phy_addr, reg_addr); + if (ret >= 0) { + *data = (u16)ret; + ret = 0; + } + } else { + ret = mdio_bus->write(mdio_bus, phy_addr, reg_addr, *data); + } + return ret; +} + +static int hclge_phy_reg_opt(struct hclge_dev *hdev, + struct hnae3_phy_para *para, + enum hclge_phy_op_code opt_type) +{ + struct mii_bus *mdio_bus = hdev->hw.mac.mdio_bus; + u32 phy_addr = hdev->hw.mac.phy_addr; + bool need_page_select = false; + u16 cur_page; + int ret; + + /* operate flow: + * 1 record current page addr + * 2 jump to operated page + * 3 operate register(read or write) + * 4 come back to the page recorded in the first step. + */ + mutex_lock(&mdio_bus->mdio_lock); + + /* check if page select is needed and record current page addr. + * no need to change page when read page 0 + */ + if (opt_type != PHY_OP_READ || para->page != 0) { + ret = mdio_bus->read(mdio_bus, phy_addr, + para->page_select_addr); + if (ret < 0) { + dev_err(&hdev->pdev->dev, + "failed to read current phy %u reg page\n", + phy_addr); + mutex_unlock(&mdio_bus->mdio_lock); + return ret; + } + cur_page = (u16)ret; + need_page_select = cur_page != para->page; + } + + /* jump to operated page */ + if (need_page_select) { + ret = mdio_bus->write(mdio_bus, phy_addr, + para->page_select_addr, para->page); + if (ret < 0) { + mutex_unlock(&mdio_bus->mdio_lock); + dev_err(&hdev->pdev->dev, + "failed to change phy %u page %u to page %u\n", + phy_addr, cur_page, para->page); + return ret; + } + } + + /* operate register(read or write) */ + ret = hclge_def_phy_opt(mdio_bus, phy_addr, para->reg_addr, ¶->data, + opt_type); + if (ret < 0) + dev_err(&hdev->pdev->dev, + "failed to %s phy %u page %u reg %u\n, ret = %d", + opt_type == PHY_OP_READ ? "read" : "write", + phy_addr, para->page, para->reg_addr, ret); + + /* come back to the page recorded in the first step. */ + if (need_page_select) { + ret = mdio_bus->write(mdio_bus, phy_addr, + para->page_select_addr, cur_page); + if (ret < 0) + dev_err(&hdev->pdev->dev, + "failed to restore phy %u reg page %u\n", + phy_addr, cur_page); + } + + mutex_unlock(&mdio_bus->mdio_lock); + + return ret; +} + +static int hclge_8521_phy_ext_opt(struct mii_bus *mdio_bus, u32 phy_addr, + u16 reg_addr, u16 *data, + enum hclge_phy_op_code opt_type) +{ +#define EXT_REG_ADDR 0x1e +#define EXT_DATA_ADDR 0x1f + int ret; + + ret = mdio_bus->write(mdio_bus, phy_addr, EXT_REG_ADDR, reg_addr); + if (ret < 0) + return ret; + + return hclge_def_phy_opt(mdio_bus, phy_addr, EXT_DATA_ADDR, data, + opt_type); +} + +static int hclge_8521_phy_mmd_opt(struct mii_bus *mdio_bus, u32 phy_addr, + u32 reg_addr, u16 *data, + enum hclge_phy_op_code opt_type) +{ +#define MMD_REG_ADDR 0xd +#define MMD_DATA_ADDR 0xe + u16 mmd_index; + u16 mmd_reg; + int ret; + + mmd_index = reg_addr >> 16U; + mmd_reg = reg_addr & 0xFFFF; + + ret = mdio_bus->write(mdio_bus, phy_addr, MMD_REG_ADDR, mmd_index); + if (ret < 0) + return ret; + ret = mdio_bus->write(mdio_bus, phy_addr, MMD_DATA_ADDR, mmd_reg); + if (ret < 0) + return ret; + ret = mdio_bus->write(mdio_bus, phy_addr, MMD_REG_ADDR, + mmd_index | 0x4000); + if (ret < 0) + return ret; + + return hclge_def_phy_opt(mdio_bus, phy_addr, MMD_DATA_ADDR, data, + opt_type); +} + +static void hclge_8521_phy_restores_to_utp_mii(struct hclge_dev *hdev, + struct mii_bus *mdio_bus, + u32 phy_addr) +{ + u16 phy_mii_region_val = 0x6; + u16 utp_region_val = 0x0; + int ret; + + ret = hclge_8521_phy_ext_opt(mdio_bus, phy_addr, + HCLGE_8521_PHY_SMI_SDS_ADDR, + &utp_region_val, PHY_OP_WRITE); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to choose phy space, ret = %d\n", ret); + + ret = hclge_8521_phy_ext_opt(mdio_bus, phy_addr, + HCLGE_8521_PHY_LDS_MII_ADDR, + &phy_mii_region_val, PHY_OP_WRITE); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to choose phy MII, ret = %d\n", ret); +} + +static int hclge_8521_phy_utp_mii_opt(struct hnae3_phy_para *para, + struct mii_bus *mdio_bus, u32 phy_addr, + enum hclge_phy_op_code opt_type) +{ + u16 phy_mii_region_val = 0x6; + u16 utp_region_val = 0x0; + int ret; + + ret = hclge_8521_phy_ext_opt(mdio_bus, phy_addr, + HCLGE_8521_PHY_SMI_SDS_ADDR, + &utp_region_val, PHY_OP_WRITE); + if (ret) + return ret; + + ret = hclge_8521_phy_ext_opt(mdio_bus, phy_addr, + HCLGE_8521_PHY_LDS_MII_ADDR, + &phy_mii_region_val, PHY_OP_WRITE); + if (ret) + return ret; + + return hclge_def_phy_opt(mdio_bus, phy_addr, (u16)para->reg_addr, + ¶->data, opt_type); +} + +static int hclge_8521_phy_utp_mmd_opt(struct hnae3_phy_para *para, + struct mii_bus *mdio_bus, u32 phy_addr, + enum hclge_phy_op_code opt_type) +{ + u16 utp_region_val = 0x0; + int ret; + + ret = hclge_8521_phy_ext_opt(mdio_bus, phy_addr, + HCLGE_8521_PHY_SMI_SDS_ADDR, + &utp_region_val, PHY_OP_WRITE); + if (ret) + return ret; + + return hclge_8521_phy_mmd_opt(mdio_bus, phy_addr, para->reg_addr, + ¶->data, opt_type); +} + +static int hclge_8521_phy_utp_lds_opt(struct hnae3_phy_para *para, + struct mii_bus *mdio_bus, u32 phy_addr, + enum hclge_phy_op_code opt_type) +{ + u16 lds_mii_region_val = 0x4; + u16 utp_region_val = 0x0; + int ret; + + ret = hclge_8521_phy_ext_opt(mdio_bus, phy_addr, + HCLGE_8521_PHY_SMI_SDS_ADDR, + &utp_region_val, PHY_OP_WRITE); + if (ret) + return ret; + + ret = hclge_8521_phy_ext_opt(mdio_bus, phy_addr, + HCLGE_8521_PHY_LDS_MII_ADDR, + &lds_mii_region_val, PHY_OP_WRITE); + if (ret) + return ret; + + return hclge_def_phy_opt(mdio_bus, phy_addr, (u16)para->reg_addr, + ¶->data, opt_type); +} + +static int hclge_8521_phy_utp_ext_opt(struct hnae3_phy_para *para, + struct mii_bus *mdio_bus, u32 phy_addr, + enum hclge_phy_op_code opt_type) +{ + u16 utp_region_val = 0x0; + int ret; + + ret = hclge_8521_phy_ext_opt(mdio_bus, phy_addr, + HCLGE_8521_PHY_SMI_SDS_ADDR, + &utp_region_val, PHY_OP_WRITE); + if (ret) + return ret; + + return hclge_8521_phy_ext_opt(mdio_bus, phy_addr, (u16)para->reg_addr, + ¶->data, opt_type); +} + +static int hclge_8521_phy_sds_mii_opt(struct hnae3_phy_para *para, + struct mii_bus *mdio_bus, u32 phy_addr, + enum hclge_phy_op_code opt_type) +{ + u16 sds_region_val = 0x2; + int ret; + + ret = hclge_8521_phy_ext_opt(mdio_bus, phy_addr, + HCLGE_8521_PHY_SMI_SDS_ADDR, + &sds_region_val, PHY_OP_WRITE); + if (ret) + return ret; + + return hclge_def_phy_opt(mdio_bus, phy_addr, (u16)para->reg_addr, + ¶->data, opt_type); +} + +static int hclge_8521_phy_sds_ext_opt(struct hnae3_phy_para *para, + struct mii_bus *mdio_bus, u32 phy_addr, + enum hclge_phy_op_code opt_type) +{ + u16 sds_region_val = 0x2; + int ret; + + ret = hclge_8521_phy_ext_opt(mdio_bus, phy_addr, + HCLGE_8521_PHY_SMI_SDS_ADDR, + &sds_region_val, PHY_OP_WRITE); + if (ret) + return ret; + + return hclge_8521_phy_ext_opt(mdio_bus, phy_addr, (u16)para->reg_addr, + ¶->data, opt_type); +} + +static int hclge_8521_phy_opt(struct hclge_dev *hdev, + struct hnae3_phy_para *para, + enum hclge_phy_op_code opt_type) +{ + struct mii_bus *mdio_bus = hdev->hw.mac.mdio_bus; + u32 phy_addr = hdev->hw.mac.phy_addr; + int ret; + + mutex_lock(&mdio_bus->mdio_lock); + switch (para->page) { + case HCLGE_PHY_REGION_UTP_MII: + ret = hclge_8521_phy_utp_mii_opt(para, mdio_bus, + phy_addr, opt_type); + break; + case HCLGE_PHY_REGION_UTP_MMD: + ret = hclge_8521_phy_utp_mmd_opt(para, mdio_bus, + phy_addr, opt_type); + break; + case HCLGE_PHY_REGION_UTP_LDS: + ret = hclge_8521_phy_utp_lds_opt(para, mdio_bus, + phy_addr, opt_type); + break; + case HCLGE_PHY_REGION_UTP_EXT: + ret = hclge_8521_phy_utp_ext_opt(para, mdio_bus, + phy_addr, opt_type); + break; + case HCLGE_PHY_REGION_SDS_MII: + ret = hclge_8521_phy_sds_mii_opt(para, mdio_bus, + phy_addr, opt_type); + break; + case HCLGE_PHY_REGION_SDS_EXT: + ret = hclge_8521_phy_sds_ext_opt(para, mdio_bus, + phy_addr, opt_type); + break; + case HCLGE_PHY_REGION_COM_REG: + ret = hclge_8521_phy_ext_opt(mdio_bus, phy_addr, + (u16)para->reg_addr, + ¶->data, opt_type); + break; + default: + dev_err(&hdev->pdev->dev, "invalid reg region: %d\n", + para->page); + mutex_unlock(&mdio_bus->mdio_lock); + return -EINVAL; + } + + if (ret) + dev_err(&hdev->pdev->dev, + "phy operation failed %d, reg_region: %d, data: 0x%x\n", + ret, para->page, para->data); + + /* Set the region to UTP MII after operating the 8521 phy register */ + hclge_8521_phy_restores_to_utp_mii(hdev, mdio_bus, phy_addr); + mutex_unlock(&mdio_bus->mdio_lock); + return ret; +} + +static int hclge_check_phy_opt_param(struct hclge_dev *hdev, void *data, + size_t length) +{ + struct hnae3_phy_para *para = (struct hnae3_phy_para *)data; + struct hclge_mac *mac = &hdev->hw.mac; + + if (length != sizeof(*para)) + return -EINVAL; + + if (mac->media_type != HNAE3_MEDIA_TYPE_COPPER) { + dev_err(&hdev->pdev->dev, "this is not a copper port"); + return -EOPNOTSUPP; + } + + if (hnae3_dev_phy_imp_supported(hdev)) + return 0; + + if (!mac->phydev) { + dev_err(&hdev->pdev->dev, "this net device has no phy"); + return -EINVAL; + } + + if (!mac->mdio_bus) { + dev_err(&hdev->pdev->dev, "this net device has no mdio bus"); + return -EINVAL; + } + + return 0; +} + +static int hclge_8211_phy_indirect_opt(struct hclge_dev *hdev, + struct hnae3_phy_para *para, + struct mii_bus *mdio_bus, u32 phy_addr, + enum hclge_phy_op_code opt_type) +{ + u32 indirect_reg_data; + int ret; + + /* select indirect page 0xa43 */ + ret = mdio_bus->write(mdio_bus, phy_addr, para->page_select_addr, + HCLGE_8211_PHY_INDIRECT_PAGE); + if (ret < 0) { + dev_err(&hdev->pdev->dev, + "failed to change phy %u indirect page 0xa43\n", + phy_addr); + return ret; + } + /* indirect access addr = page_no*16 + 2*(reg_no%16) */ + indirect_reg_data = (para->page << 4) + ((para->reg_addr % 16) << 1); + ret = mdio_bus->write(mdio_bus, phy_addr, HCLGE_8211_PHY_INDIRECT_REG, + indirect_reg_data); + if (ret < 0) { + dev_err(&hdev->pdev->dev, + "failed to write phy %u indirect reg\n", phy_addr); + return ret; + } + + ret = hclge_def_phy_opt(mdio_bus, phy_addr, + HCLGE_8211_PHY_INDIRECT_DATA, ¶->data, + opt_type); + if (ret < 0) + dev_err(&hdev->pdev->dev, + "failed to %s phy %u indirect data\n, ret = %d", + opt_type == PHY_OP_READ ? "read" : "write", + phy_addr, ret); + + return ret; +} + +static int hclge_8211_phy_need_indirect_access(u16 page) +{ + if (page >= HCLGE_8211_PHY_INDIRECT_RANGE1_S && + page <= HCLGE_8211_PHY_INDIRECT_RANGE1_E) + return true; + else if (page >= HCLGE_8211_PHY_INDIRECT_RANGE2_S && + page <= HCLGE_8211_PHY_INDIRECT_RANGE2_E) + return true; + + return false; +} + +static int hclge_8211_phy_reg_opt(struct hclge_dev *hdev, + struct hnae3_phy_para *para, + enum hclge_phy_op_code opt_type) +{ + struct mii_bus *mdio_bus = hdev->hw.mac.mdio_bus; + u32 phy_addr = hdev->hw.mac.phy_addr; + u16 save_page; + int ret; + + mutex_lock(&mdio_bus->mdio_lock); + ret = mdio_bus->read(mdio_bus, phy_addr, para->page_select_addr); + if (ret < 0) { + dev_err(&hdev->pdev->dev, + "failed to record phy %u reg page\n", phy_addr); + mutex_unlock(&mdio_bus->mdio_lock); + return ret; + } + save_page = ret; + ret = hclge_8211_phy_indirect_opt(hdev, para, mdio_bus, phy_addr, + opt_type); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to indirect access 8211 phy %u\n", phy_addr); + ret = mdio_bus->write(mdio_bus, phy_addr, para->page_select_addr, + save_page); + if (ret < 0) + dev_err(&hdev->pdev->dev, + "failed to restore phy %u reg page %u\n", + phy_addr, save_page); + mutex_unlock(&mdio_bus->mdio_lock); + + return ret; +} + +static int hclge_rw_8211_phy_reg(struct hclge_dev *hdev, + struct hnae3_phy_para *para, + enum hclge_phy_op_code opt_type) +{ + if (hclge_8211_phy_need_indirect_access(para->page)) + return hclge_8211_phy_reg_opt(hdev, para, opt_type); + + return hclge_phy_reg_opt(hdev, para, opt_type); +} + +/* used when imp support phy drvier */ +static int hclge_read_phy_reg_with_page(struct hclge_dev *hdev, u16 page, + u16 reg_addr, u16 *val) +{ + struct hclge_phy_reg_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PHY_REG, true); + + req = (struct hclge_phy_reg_cmd *)desc.data; + req->reg_addr = cpu_to_le16(reg_addr); + req->type = HCLGE_PHY_RW_WITH_PAGE; + req->page = cpu_to_le16(page); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to read phy page %u reg %u, ret = %d\n", + page, reg_addr, ret); + return ret; + } + + *val = le16_to_cpu(req->reg_val); + return 0; +} + +/* used when imp support phy drvier */ +static int hclge_write_phy_reg_with_page(struct hclge_dev *hdev, u16 page, + u16 reg_addr, u16 val) +{ + struct hclge_phy_reg_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PHY_REG, false); + + req = (struct hclge_phy_reg_cmd *)desc.data; + req->reg_addr = cpu_to_le16(reg_addr); + req->type = HCLGE_PHY_RW_WITH_PAGE; + req->page = cpu_to_le16(page); + req->reg_val = cpu_to_le16(val); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to write phy page %u reg %u, ret = %d\n", + page, reg_addr, ret); + + return ret; +} + +static int hclge_rw_phy_reg_with_page(struct hclge_dev *hdev, + struct hnae3_phy_para *para, + enum hclge_phy_op_code opt_type) +{ + if (opt_type == PHY_OP_READ) + return hclge_read_phy_reg_with_page(hdev, para->page, + para->reg_addr, + ¶->data); + + return hclge_write_phy_reg_with_page(hdev, para->page, para->reg_addr, + para->data); +} + +static int hclge_rw_phy_reg(struct hclge_dev *hdev, void *data, + size_t length, enum hclge_phy_op_code opt_type) +{ + struct hnae3_phy_para *para = (struct hnae3_phy_para *)data; + struct hclge_mac *mac = &hdev->hw.mac; + u32 phy_id; + int ret; + + ret = hclge_check_phy_opt_param(hdev, data, length); + if (ret < 0) + return ret; + + if (hnae3_dev_phy_imp_supported(hdev)) + return hclge_rw_phy_reg_with_page(hdev, para, opt_type); + + phy_id = mac->phydev->phy_id & HCLGE_PHY_ID_MASK; + switch (phy_id) { + case HCLGE_PHY_ID_FOR_RTL8211: + return hclge_rw_8211_phy_reg(hdev, para, opt_type); + case HCLGE_PHY_ID_FOR_YT8521: + return hclge_8521_phy_opt(hdev, para, opt_type); + case HCLGE_PHY_ID_FOR_MVL1512: + default: + return hclge_phy_reg_opt(hdev, para, opt_type); + } +} + +static int hclge_get_phy_reg(struct hclge_dev *hdev, void *data, size_t length) +{ + return hclge_rw_phy_reg(hdev, data, length, PHY_OP_READ); +} + +static int hclge_set_phy_reg(struct hclge_dev *hdev, void *data, size_t length) +{ + return hclge_rw_phy_reg(hdev, data, length, PHY_OP_WRITE); +} + +static void hclge_ext_resotre_config(struct hclge_dev *hdev) +{ + if (hdev->reset_type != HNAE3_IMP_RESET && + hdev->reset_type != HNAE3_GLOBAL_RESET) + return; + + if (hdev->notify_param.init) + hclge_set_notify_packet_para(hdev, &hdev->notify_param, + sizeof(hdev->notify_param)); + + hclge_set_torus_param(hdev, &hdev->torus_param, + sizeof(hdev->torus_param)); +} + +static int hclge_set_reset_task(struct hclge_dev *hdev, void *data, + size_t length) +{ + u32 *reset_level = (u32 *)data; + + if (length != sizeof(u32)) + return -EINVAL; + + dev_warn(&hdev->pdev->dev, "reset level is %u\n", *reset_level); + + /* request reset & schedule reset task */ + set_bit(*reset_level, &hdev->reset_request); + hclge_reset_task_schedule(hdev); + return 0; +} + +int hclge_ext_call_event(struct hclge_dev *hdev, + enum hnae3_event_type_custom event_t) +{ + if (event_t >= HNAE3_INVALID_EVENT_CUSTOM) + return -EINVAL; + + mutex_lock(&hclge_nic_event_lock); + if (!nic_event_call) { + mutex_unlock(&hclge_nic_event_lock); + return -EOPNOTSUPP; + } + + nic_event_call(hdev->vport[0].nic.netdev, event_t); + mutex_unlock(&hclge_nic_event_lock); + return 0; +} + +int nic_register_event(nic_event_fn_t event_call) +{ + if (!event_call) { + pr_err("hns3: register event handle is null\n"); + return -EINVAL; + } + + mutex_lock(&hclge_nic_event_lock); + if (nic_event_call) { + mutex_unlock(&hclge_nic_event_lock); + pr_err("hns3: event already register\n"); + return -EBUSY; + } + + nic_event_call = event_call; + + mutex_unlock(&hclge_nic_event_lock); + pr_info("hns3: event register success\n"); + return 0; +} +EXPORT_SYMBOL(nic_register_event); + +int nic_unregister_event(void) +{ + mutex_lock(&hclge_nic_event_lock); + nic_event_call = NULL; + + mutex_unlock(&hclge_nic_event_lock); + pr_info("hns3: event unregister success\n"); + return 0; +} +EXPORT_SYMBOL(nic_unregister_event); + +static int hclge_nic_call_event(struct hclge_dev *hdev, void *data, + size_t length) +{ +#define ERROR_EVENT_TYPE_NUM 4 + + u32 event_type[ERROR_EVENT_TYPE_NUM] = { + HNAE3_PPU_POISON_CUSTOM, + HNAE3_IMP_RESET_CUSTOM, + HNAE3_IMP_RD_POISON_CUSTOM, + HNAE3_ROCEE_AXI_RESP_CUSTOM, + }; + u32 *index = (u32 *)data; + + if (length != sizeof(u32)) + return -EINVAL; + + if ((*index) >= ERROR_EVENT_TYPE_NUM) + return 0; + + return hclge_ext_call_event(hdev, event_type[*index]); +} + +static enum hnae3_event_type_custom +hclge_get_reset_fail_type(enum hnae3_reset_type reset_type) +{ + const struct hclge_reset_fail_type_map fail_type_map[] = { + {HNAE3_FUNC_RESET, HNAE3_FUNC_RESET_FAIL_CUSTOM}, + {HNAE3_GLOBAL_RESET, HNAE3_GLOBAL_RESET_FAIL_CUSTOM}, + {HNAE3_IMP_RESET, HNAE3_IMP_RESET_FAIL_CUSTOM}, + }; + u32 i; + + for (i = 0; i < ARRAY_SIZE(fail_type_map); i++) + if (fail_type_map[i].reset_type == reset_type) + return fail_type_map[i].custom_type; + + return HNAE3_INVALID_EVENT_CUSTOM; +} + +static void hclge_report_reset_fail_custom(struct hclge_dev *hdev) +{ +#define HCLGE_RESET_MAX_FAIL_CNT_CUSTOM 1 + + u32 max_fail_custom_cnt = HCLGE_RESET_MAX_FAIL_CNT; + + mutex_lock(&hclge_nic_event_lock); + if (nic_event_call) + max_fail_custom_cnt = HCLGE_RESET_MAX_FAIL_CNT_CUSTOM; + mutex_unlock(&hclge_nic_event_lock); + + if (hdev->rst_stats.reset_fail_cnt < max_fail_custom_cnt) + return; + + dev_err(&hdev->pdev->dev, "failed to report reset!\n"); + hclge_ext_call_event(hdev, hclge_get_reset_fail_type(hdev->reset_type)); +} + +void hclge_ext_reset_end(struct hclge_dev *hdev, bool done) +{ + if (!done) { + hclge_report_reset_fail_custom(hdev); + return; + } + + hclge_ext_resotre_config(hdev); + hclge_ext_call_event(hdev, HNAE3_RESET_DONE_CUSTOM); + dev_info(&hdev->pdev->dev, "report reset done!\n"); +} + +static const hclge_priv_ops_fn hclge_ext_func_arr[] = { + [HNAE3_EXT_OPC_RESET] = hclge_set_reset_task, + [HNAE3_EXT_OPC_EVENT_CALLBACK] = hclge_nic_call_event, + [HNAE3_EXT_OPC_GET_PFC_STORM_PARA] = hclge_get_pfc_storm_para, + [HNAE3_EXT_OPC_SET_PFC_STORM_PARA] = hclge_set_pfc_storm_para, + [HNAE3_EXT_OPC_SET_NOTIFY_PARAM] = hclge_set_notify_packet_para, + [HNAE3_EXT_OPC_SET_NOTIFY_START] = hclge_set_notify_packet_start, + [HNAE3_EXT_OPC_SET_TORUS_PARAM] = hclge_set_torus_param, + [HNAE3_EXT_OPC_GET_TORUS_PARAM] = hclge_get_torus_param, + [HNAE3_EXT_OPC_CLEAN_STATS64] = hclge_clean_stats64, + [HNAE3_EXT_OPC_GET_PORT_EXT_ID_INFO] = hclge_get_extend_port_id_info, + [HNAE3_EXT_OPC_GET_PORT_EXT_NUM_INFO] = hclge_get_extend_port_num_info, + [HNAE3_EXT_OPC_GET_PORT_NUM] = hclge_get_port_num, + [HNAE3_EXT_OPC_GET_PRESENT] = hclge_get_sfp_present, + [HNAE3_EXT_OPC_SET_SFP_STATE] = hclge_set_sfp_state, + [HNAE3_EXT_OPC_DISABLE_LANE] = hclge_disable_net_lane, + [HNAE3_EXT_OPC_GET_LANE_STATUS] = hclge_get_net_lane_status, + [HNAE3_EXT_OPC_DISABLE_CLOCK] = hclge_disable_nic_clock, + [HNAE3_EXT_OPC_SET_PFC_TIME] = hclge_set_pause_trans_time, + [HNAE3_EXT_OPC_GET_HILINK_REF_LOS] = hclge_get_hilink_ref_los, + [HNAE3_EXT_OPC_GET_PORT_FAULT_STATUS] = hclge_get_port_fault_status, + [HNAE3_EXT_OPC_GET_PORT_TYPE] = hclge_get_port_wire_type, + [HNAE3_EXT_OPC_SET_MAC_STATE] = hclge_set_mac_state, + [HNAE3_EXT_OPC_SET_LED] = hclge_set_led, + [HNAE3_EXT_OPC_GET_LED_SIGNAL] = hclge_get_led_signal, + [HNAE3_EXT_OPC_GET_PHY_REG] = hclge_get_phy_reg, + [HNAE3_EXT_OPC_SET_PHY_REG] = hclge_set_phy_reg, +}; + +int hclge_ext_ops_handle(struct hnae3_handle *handle, int opcode, + void *data, size_t length) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + int cmd_num = ARRAY_SIZE(hclge_ext_func_arr); + struct hclge_dev *hdev = vport->back; + hclge_priv_ops_fn ext_opcode_func; + + if (opcode >= cmd_num) { + dev_err(&hdev->pdev->dev, "invalid opcode %d\n", opcode); + return -EINVAL; + } + + ext_opcode_func = hclge_ext_func_arr[opcode]; + if (!ext_opcode_func) { + dev_err(&hdev->pdev->dev, "unsupported opcode %d\n", opcode); + return -EOPNOTSUPP; + } + + return ext_opcode_func(hdev, data, length); +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ext.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ext.h new file mode 100644 index 0000000000000000000000000000000000000000..5aca6b097e42fc85cf4d8c0a490ff2d660405694 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ext.h @@ -0,0 +1,162 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016-2017 Hisilicon Limited. */ + +#ifndef __HCLGE_EXT_H +#define __HCLGE_EXT_H +#include + +#define HCLGE_PHY_ID_FOR_RTL8211 0x001cc910 +#define HCLGE_PHY_ID_FOR_MVL1512 0x01410dd0 +#define HCLGE_PHY_ID_FOR_YT8521 0x00000110 +#define HCLGE_PHY_ID_MASK 0xFFFFFFF0U + +enum hclge_phy_page_region { + HCLGE_PHY_REGION_UTP_MII, + HCLGE_PHY_REGION_UTP_MMD, + HCLGE_PHY_REGION_UTP_LDS, + HCLGE_PHY_REGION_UTP_EXT, + HCLGE_PHY_REGION_SDS_MII, + HCLGE_PHY_REGION_SDS_EXT, + HCLGE_PHY_REGION_COM_REG, + HCLGE_PHY_REGION_MAX +}; + +enum hclge_phy_op_code { + PHY_OP_READ, + PHY_OP_WRITE +}; + +#define HCLGE_8211_PHY_INDIRECT_PAGE 0xa43 +#define HCLGE_8211_PHY_INDIRECT_REG 0x1b +#define HCLGE_8211_PHY_INDIRECT_DATA 0x1c +#define HCLGE_8211_PHY_INDIRECT_RANGE1_S 0xDC0 +#define HCLGE_8211_PHY_INDIRECT_RANGE1_E 0xDCF +#define HCLGE_8211_PHY_INDIRECT_RANGE2_S 0xDE0 +#define HCLGE_8211_PHY_INDIRECT_RANGE2_E 0xDF0 + +#define HCLGE_8521_PHY_SMI_SDS_ADDR 0xA000 +#define HCLGE_8521_PHY_LDS_MII_ADDR 0x100 + +#define HCLGE_NOTIFY_PARA_CFG_PKT_EN BIT(0) +#define HCLGE_NOTIFY_PARA_CFG_START_EN BIT(1) +#define HCLGE_NOTIFY_PARA_CFG_PKT_NUM_M GENMASK(5, 2) +#define HCLGE_NOTIFY_PARA_CFG_PKT_NUM_S 2 + +#define HCLGE_TORUS_MAC_ID_MASK 0x3 +#define HCLGE_TOURS_TCX_MAP_TCY_INIT 0x1c6144 +#define HCLGE_TOURS_TCX_MAP_TCY_NODE0_INIT 0x1c6141 + +#define HCLGE_VLAN_FE_NIC_INGRESS 0 +#define HCLGE_VLAN_FE_ROCEE_INGRESS 2 + +#define HCLGE_TORUS_LPBK_DROP_EN 20 +#define HCLGE_TC2VLANPRI_MAPPING_EN 19 +#define HCLGE_LLDP_LAN_PAIR_EN 18 +#define HCLGE_MC_BC_LAN_PAIR_EN 17 +#define HCLGE_UC_LAN_PAIR_EN 16 + +#define HCLGE_TORUS_TC1_DROP_EN BIT(26) + +#define HCLGE_TOURS_TCX_MAP_TCY_MASK 0x1c71c7 + +struct hclge_id_info_cmd { + __le32 chip_id; + __le32 mac_id; + __le32 io_die_id; + u8 rsv[12]; +}; + +struct hclge_num_info_cmd { + __le32 chip_num; + __le32 io_die_num; + u8 rsv[16]; +}; + +struct hclge_port_num_info_cmd { + __le32 port_num; + u8 rsv[20]; +}; + +struct hclge_pfc_storm_para_cmd { + __le32 dir; + __le32 enable; + __le32 period_ms; + __le32 times; + __le32 recovery_period_ms; + __le32 rsv; +}; + +struct hclge_notify_pkt_param_cmd { + __le32 cfg; + __le32 ipg; + __le32 data[16]; + u8 vld_cfg; + u8 vld_ipg; + u8 vld_data; + u8 rsv[21]; +}; + +struct hclge_torus_cfg_cmd { + u8 rsv[4]; + __le32 lan_port_pair; + __le32 lan_fwd_tc_cfg; + __le32 pause_time_out; + __le32 pause_time_out_en; + __le32 torus_en; +}; + +struct hclge_sfp_present_cmd { + __le32 sfp_present; + __le32 rsv[5]; +}; + +struct hclge_sfp_enable_cmd { + __le32 sfp_enable; + __le32 rsv[5]; +}; + +struct hclge_lamp_signal_cmd { + __le32 type; + __le32 status; + u8 error; + u8 locate; + u8 activity; + u8 rsv[13]; +}; + +struct hclge_reset_fail_type_map { + enum hnae3_reset_type reset_type; + enum hnae3_event_type_custom custom_type; +}; + +typedef int (*hclge_priv_ops_fn)(struct hclge_dev *hdev, void *data, + size_t length); + +/** + * nic_event_fn_t - nic event handler prototype + * @netdev: net device + * @hnae3_event_type_custom: nic device event type + */ +typedef void (*nic_event_fn_t) (struct net_device *netdev, + enum hnae3_event_type_custom); + +/** + * nic_register_event - register for nic event handling + * @event_call: nic event handler + * return 0 - success , negative - fail + */ +int nic_register_event(nic_event_fn_t event_call); + +/** + * nic_unregister_event - unregister for nic event handling + * return 0 - success , negative - fail + */ +int nic_unregister_event(void); + +int hclge_ext_call_event(struct hclge_dev *hdev, + enum hnae3_event_type_custom event_t); +void hclge_ext_reset_end(struct hclge_dev *hdev, bool done); + +int hclge_ext_ops_handle(struct hnae3_handle *handle, int opcode, + void *data, size_t length); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index dfd0c5f4cb9f554e28e98dc9ac7e1997635d83be..6c4168f7fd35a19b816d2aea29da9ce88daa55f1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -15,8 +15,10 @@ #include #include #include +#include #include "hclge_cmd.h" #include "hclge_dcb.h" +#include "hclge_ext.h" #include "hclge_main.h" #include "hclge_mbx.h" #include "hclge_mdio.h" @@ -26,6 +28,7 @@ #include "hnae3.h" #include "hclge_devlink.h" #include "hclge_comm_cmd.h" +#include "hclge_trace.h" #define HCLGE_NAME "hclge" @@ -36,7 +39,6 @@ #define BUF_MAX_PERCENT 100 #define BUF_RESERVE_PERCENT 90 -#define HCLGE_RESET_MAX_FAIL_CNT 5 #define HCLGE_RESET_SYNC_TIME 100 #define HCLGE_PF_RESET_SYNC_TIME 20 #define HCLGE_PF_RESET_SYNC_CNT 1500 @@ -62,6 +64,7 @@ static void hclge_update_fec_stats(struct hclge_dev *hdev); static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret, int wait_cnt); static int hclge_update_port_info(struct hclge_dev *hdev); +static void hclge_reset_end(struct hnae3_handle *handle, bool done); static struct hnae3_ae_algo ae_algo; @@ -333,7 +336,9 @@ static const struct key_info tuple_key_info[] = { { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 }, { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 }, { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 }, - { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 }, + { OUTER_TUN_VNI, 24, KEY_OPT_VNI, + offsetof(struct hclge_fd_rule, tuples.outer_tun_vni), + offsetof(struct hclge_fd_rule, tuples_mask.outer_tun_vni) }, { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 }, { INNER_DST_MAC, 48, KEY_OPT_MAC, offsetof(struct hclge_fd_rule, tuples.dst_mac), @@ -391,6 +396,48 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) return hclge_comm_cmd_send(&hw->hw, desc, num); } +static void hclge_trace_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc, + int num, bool is_special) +{ + int i; + + trace_hclge_pf_cmd_send(hw, desc, 0, num); + + if (!is_special) { + for (i = 1; i < num; i++) + trace_hclge_pf_cmd_send(hw, &desc[i], i, num); + } else { + for (i = 1; i < num; i++) + trace_hclge_pf_special_cmd_send(hw, (u32 *)&desc[i], + i, num); + } +} + +static void hclge_trace_cmd_get(struct hclge_comm_hw *hw, struct hclge_desc *desc, + int num, bool is_special) +{ + int i; + + if (!HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag))) + return; + + trace_hclge_pf_cmd_get(hw, desc, 0, num); + + if (!is_special) { + for (i = 1; i < num; i++) + trace_hclge_pf_cmd_get(hw, &desc[i], i, num); + } else { + for (i = 1; i < num; i++) + trace_hclge_pf_special_cmd_get(hw, (u32 *)&desc[i], + i, num); + } +} + +static const struct hclge_comm_cmq_ops hclge_cmq_ops = { + .trace_cmd_send = hclge_trace_cmd_send, + .trace_cmd_get = hclge_trace_cmd_get, +}; + static int hclge_mac_update_stats_defective(struct hclge_dev *hdev) { #define HCLGE_MAC_CMD_NUM 21 @@ -645,8 +692,12 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK; } - count += 1; - handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; + if (hdev->ae_dev->dev_specs.hilink_version != + HCLGE_HILINK_H60) { + count += 1; + handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; + } + count += 1; handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; count += 1; @@ -882,9 +933,9 @@ static const struct hclge_speed_bit_map speed_bit_map[] = { {HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT}, {HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT}, {HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT}, - {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BIT}, - {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BIT}, - {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT}, + {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BITS}, + {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BITS}, + {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BITS}, }; static int hclge_get_speed_bit(u32 speed, u32 *speed_bit) @@ -940,100 +991,106 @@ static void hclge_update_fec_support(struct hclge_mac *mac) mac->supported); } +static const struct hclge_link_mode_bmap hclge_sr_link_mode_bmap[] = { + {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseSR_Full_BIT}, + {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT}, + {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT}, + {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT}, + {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseSR_Full_BIT}, + {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT}, + {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT}, + {HCLGE_SUPPORT_200G_R4_EXT_BIT, + ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT}, + {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT}, +}; + +static const struct hclge_link_mode_bmap hclge_lr_link_mode_bmap[] = { + {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseLR_Full_BIT}, + {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT}, + {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT}, + {HCLGE_SUPPORT_100G_R4_BIT, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT}, + {HCLGE_SUPPORT_100G_R2_BIT, + ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT}, + {HCLGE_SUPPORT_200G_R4_EXT_BIT, + ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT}, + {HCLGE_SUPPORT_200G_R4_BIT, + ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT}, +}; + +static const struct hclge_link_mode_bmap hclge_cr_link_mode_bmap[] = { + {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseCR_Full_BIT}, + {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT}, + {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT}, + {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT}, + {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseCR_Full_BIT}, + {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT}, + {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT}, + {HCLGE_SUPPORT_200G_R4_EXT_BIT, + ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT}, + {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT}, +}; + +static const struct hclge_link_mode_bmap hclge_kr_link_mode_bmap[] = { + {HCLGE_SUPPORT_1G_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT}, + {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT}, + {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT}, + {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT}, + {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT}, + {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseKR_Full_BIT}, + {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT}, + {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT}, + {HCLGE_SUPPORT_200G_R4_EXT_BIT, + ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT}, + {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT}, +}; + static void hclge_convert_setting_sr(u16 speed_ability, unsigned long *link_mode) { - if (speed_ability & HCLGE_SUPPORT_10G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_25G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_40G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_50G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_100G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_200G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, - link_mode); + int i; + + for (i = 0; i < ARRAY_SIZE(hclge_sr_link_mode_bmap); i++) { + if (speed_ability & hclge_sr_link_mode_bmap[i].support_bit) + linkmode_set_bit(hclge_sr_link_mode_bmap[i].link_mode, + link_mode); + } } static void hclge_convert_setting_lr(u16 speed_ability, unsigned long *link_mode) { - if (speed_ability & HCLGE_SUPPORT_10G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_25G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_50G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_40G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_100G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_200G_BIT) - linkmode_set_bit( - ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, - link_mode); + int i; + + for (i = 0; i < ARRAY_SIZE(hclge_lr_link_mode_bmap); i++) { + if (speed_ability & hclge_lr_link_mode_bmap[i].support_bit) + linkmode_set_bit(hclge_lr_link_mode_bmap[i].link_mode, + link_mode); + } } static void hclge_convert_setting_cr(u16 speed_ability, unsigned long *link_mode) { - if (speed_ability & HCLGE_SUPPORT_10G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_25G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_40G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_50G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_100G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_200G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, - link_mode); + int i; + + for (i = 0; i < ARRAY_SIZE(hclge_cr_link_mode_bmap); i++) { + if (speed_ability & hclge_cr_link_mode_bmap[i].support_bit) + linkmode_set_bit(hclge_cr_link_mode_bmap[i].link_mode, + link_mode); + } } static void hclge_convert_setting_kr(u16 speed_ability, unsigned long *link_mode) { - if (speed_ability & HCLGE_SUPPORT_1G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_10G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_25G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_40G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_50G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_100G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, - link_mode); - if (speed_ability & HCLGE_SUPPORT_200G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, - link_mode); + int i; + + for (i = 0; i < ARRAY_SIZE(hclge_kr_link_mode_bmap); i++) { + if (speed_ability & hclge_kr_link_mode_bmap[i].support_bit) + linkmode_set_bit(hclge_kr_link_mode_bmap[i].link_mode, + link_mode); + } } static void hclge_convert_setting_fec(struct hclge_mac *mac) @@ -1156,13 +1213,13 @@ static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability) static u32 hclge_get_max_speed(u16 speed_ability) { - if (speed_ability & HCLGE_SUPPORT_200G_BIT) + if (speed_ability & HCLGE_SUPPORT_200G_BITS) return HCLGE_MAC_SPEED_200G; - if (speed_ability & HCLGE_SUPPORT_100G_BIT) + if (speed_ability & HCLGE_SUPPORT_100G_BITS) return HCLGE_MAC_SPEED_100G; - if (speed_ability & HCLGE_SUPPORT_50G_BIT) + if (speed_ability & HCLGE_SUPPORT_50G_BITS) return HCLGE_MAC_SPEED_50G; if (speed_ability & HCLGE_SUPPORT_40G_BIT) @@ -1352,6 +1409,7 @@ static void hclge_parse_dev_specs(struct hclge_dev *hdev, ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size); ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size); ae_dev->dev_specs.tnl_num = req1->tnl_num; + ae_dev->dev_specs.hilink_version = req1->hilink_version; } static void hclge_check_dev_specs(struct hclge_dev *hdev) @@ -2942,7 +3000,7 @@ static void hclge_mbx_task_schedule(struct hclge_dev *hdev) } } -static void hclge_reset_task_schedule(struct hclge_dev *hdev) +void hclge_reset_task_schedule(struct hclge_dev *hdev) { if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) && @@ -3431,7 +3489,7 @@ static int hclge_get_status(struct hnae3_handle *handle) return hdev->hw.mac.link; } -static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf) +struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf) { if (!pci_num_vf(hdev->pdev)) { dev_err(&hdev->pdev->dev, @@ -4145,7 +4203,7 @@ static void hclge_show_rst_info(struct hclge_dev *hdev) static bool hclge_reset_err_handle(struct hclge_dev *hdev) { -#define MAX_RESET_FAIL_CNT 5 + struct hnae3_handle *handle = &hdev->vport[0].nic; if (hdev->reset_pending) { dev_info(&hdev->pdev->dev, "Reset pending %lu\n", @@ -4157,7 +4215,7 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev) "reset failed because new reset interrupt\n"); hclge_clear_reset_cause(hdev); return false; - } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) { + } else if (hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT) { hdev->rst_stats.reset_fail_cnt++; set_bit(hdev->reset_type, &hdev->reset_pending); dev_info(&hdev->pdev->dev, @@ -4171,7 +4229,7 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev) /* recover the handshake status when reset fail */ hclge_reset_handshake(hdev, true); - dev_err(&hdev->pdev->dev, "Reset fail!\n"); + hclge_reset_end(handle, false); hclge_show_rst_info(hdev); @@ -4288,6 +4346,7 @@ static int hclge_reset_prepare(struct hclge_dev *hdev) static int hclge_reset_rebuild(struct hclge_dev *hdev) { + struct hnae3_handle *handle = &hdev->vport[0].nic; int ret; hdev->rst_stats.hw_reset_done_cnt++; @@ -4333,6 +4392,8 @@ static int hclge_reset_rebuild(struct hclge_dev *hdev) hclge_update_reset_level(hdev); + hclge_reset_end(handle, true); + return 0; } @@ -4354,10 +4415,11 @@ static void hclge_reset(struct hclge_dev *hdev) hclge_reset_task_schedule(hdev); } -static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) +void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); struct hclge_dev *hdev = ae_dev->priv; + int ret; /* We might end up getting called broadly because of 2 below cases: * 1. Recoverable error was conveyed through APEI and only way to bring @@ -4391,9 +4453,12 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n", hdev->reset_level); - /* request reset & schedule reset task */ - set_bit(hdev->reset_level, &hdev->reset_request); - hclge_reset_task_schedule(hdev); + ret = hclge_ext_call_event(hdev, (u32)hdev->reset_level); + if (ret) { + /* request reset & schedule reset task */ + set_bit(hdev->reset_level, &hdev->reset_request); + hclge_reset_task_schedule(hdev); + } if (hdev->reset_level < HNAE3_GLOBAL_RESET) hdev->reset_level++; @@ -4419,7 +4484,15 @@ static void hclge_reset_timer(struct timer_list *t) dev_info(&hdev->pdev->dev, "triggering reset in reset timer\n"); - hclge_reset_event(hdev->pdev, NULL); + hclge_reset_event(hdev->pdev, &hdev->vport[0].nic); +} + +static void hclge_reset_end(struct hnae3_handle *handle, bool done) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + hclge_ext_reset_end(hdev, done); } static void hclge_reset_subtask(struct hclge_dev *hdev) @@ -4459,8 +4532,8 @@ static void hclge_handle_err_reset_request(struct hclge_dev *hdev) hclge_set_def_reset_request(ae_dev, reset_type); } - if (hdev->default_reset_request && ae_dev->ops->reset_event) - ae_dev->ops->reset_event(hdev->pdev, NULL); + if (hdev->default_reset_request) + hclge_reset_event(hdev->pdev, &hdev->vport[0].nic); /* enable interrupt after error handling complete */ hclge_enable_vector(&hdev->misc_vector, true); @@ -4475,6 +4548,7 @@ static void hclge_handle_err_recovery(struct hclge_dev *hdev) if (hclge_find_error_source(hdev)) { hclge_handle_error_info_log(ae_dev); hclge_handle_mac_tnl(hdev); + hclge_handle_vf_queue_err_ras(hdev); } hclge_handle_err_reset_request(hdev); @@ -4555,6 +4629,177 @@ static void hclge_update_vport_alive(struct hclge_dev *hdev) } } +static int hclge_set_fd_qb_counter(struct hclge_dev *hdev, u8 vf_id) +{ + struct hclge_fd_qb_ad_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_QB_AD_OP, false); + req = (struct hclge_fd_qb_ad_cmd *)desc.data; + req->vf_id = vf_id; + hnae3_set_bit(req->ad_sel, HCLGE_FD_QB_AD_COUNTER_VLD_B, 1); + req->counter_id = vf_id % hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_warn(&hdev->pdev->dev, + "failed to set qb counter for vport %u, ret = %d.\n", + vf_id, ret); + return ret; +} + +static void hclge_init_fd_qb_counter(struct hclge_dev *hdev) +{ + int ret; + u16 i; + + if (!test_bit(HNAE3_DEV_SUPPORT_QB_B, hdev->ae_dev->caps)) + return; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + ret = hclge_set_fd_qb_counter(hdev, i); + if (ret) + return; + } +} + +static int hclge_set_fd_qb(struct hclge_dev *hdev, u8 vf_id, bool enable) +{ + struct hclge_fd_qb_cfg_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_QB_CTRL, false); + req = (struct hclge_fd_qb_cfg_cmd *)desc.data; + req->en = enable; + req->vf_id = vf_id; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to %s qb config for vport %u, ret = %d.\n", + enable ? "enable" : "disable", vf_id, ret); + return ret; +} + +static int hclge_sync_pf_qb_mode(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = &hdev->vport[0]; + struct hnae3_handle *handle = &vport->nic; + bool request_enable = true; + int ret; + u16 i; + + if (!test_and_clear_bit(HCLGE_VPORT_STATE_QB_CHANGE, &vport->state)) + return 0; + + spin_lock_bh(&hdev->fd_rule_lock); + if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE || + hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE || + !test_bit(HNAE3_PFLAG_FD_QB_ENABLE, &handle->priv_flags)) + request_enable = false; + + if (request_enable == + test_bit(HCLGE_STATE_HW_QB_ENABLE, &hdev->state)) { + spin_unlock_bh(&hdev->fd_rule_lock); + return 0; + } + + if (request_enable) + hclge_clear_arfs_rules(hdev); + + ret = hclge_set_fd_qb(hdev, vport->vport_id, request_enable); + if (!ret) { + if (request_enable) { + set_bit(HCLGE_STATE_HW_QB_ENABLE, &hdev->state); + hdev->fd_active_type = HCLGE_FD_QB_ACTIVE; + } else { + clear_bit(HCLGE_STATE_HW_QB_ENABLE, &hdev->state); + hdev->fd_active_type = HCLGE_FD_RULE_NONE; + } + + for (i = 1; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + set_bit(HCLGE_VPORT_STATE_QB_CHANGE, &vport->state); + } + } else { + set_bit(HCLGE_VPORT_STATE_QB_CHANGE, &vport->state); + } + spin_unlock_bh(&hdev->fd_rule_lock); + + return ret; +} + +static int hclge_sync_vf_qb_mode(struct hclge_vport *vport) +{ + struct hclge_dev *hdev = vport->back; + bool request_enable = false; + int ret; + + if (!test_and_clear_bit(HCLGE_VPORT_STATE_QB_CHANGE, &vport->state)) + return 0; + + if (vport->vf_info.trusted && vport->vf_info.request_qb_en && + test_bit(HCLGE_STATE_HW_QB_ENABLE, &hdev->state)) + request_enable = true; + + ret = hclge_set_fd_qb(hdev, vport->vport_id, request_enable); + if (ret) + set_bit(HCLGE_VPORT_STATE_QB_CHANGE, &vport->state); + else + vport->vf_info.qb_en = request_enable ? 1 : 0; + + return ret; +} + +static int hclge_disable_fd_qb_mode(struct hclge_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = hdev->ae_dev; + struct hclge_vport *vport; + int ret; + u16 i; + + if (!test_bit(HNAE3_DEV_SUPPORT_QB_B, ae_dev->caps) || + !test_bit(HCLGE_STATE_HW_QB_ENABLE, &hdev->state)) + return 0; + + ret = hclge_set_fd_qb(hdev, 0, false); + if (ret) + return ret; + + clear_bit(HCLGE_STATE_HW_QB_ENABLE, &hdev->state); + + for (i = 1; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + set_bit(HCLGE_VPORT_STATE_QB_CHANGE, &vport->state); + } + + return 0; +} + +static void hclge_sync_fd_qb_mode(struct hclge_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = hdev->ae_dev; + struct hclge_vport *vport; + int ret; + u16 i; + + if (!test_bit(HNAE3_DEV_SUPPORT_QB_B, ae_dev->caps)) + return; + + ret = hclge_sync_pf_qb_mode(hdev); + if (ret) + return; + + for (i = 1; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + + ret = hclge_sync_vf_qb_mode(vport); + if (ret) + return; + } +} + static void hclge_periodic_service_task(struct hclge_dev *hdev) { unsigned long delta = round_jiffies_relative(HZ); @@ -4568,6 +4813,7 @@ static void hclge_periodic_service_task(struct hclge_dev *hdev) hclge_update_link_status(hdev); hclge_sync_mac_table(hdev); hclge_sync_promisc_mode(hdev); + hclge_sync_fd_qb_mode(hdev); hclge_sync_fd_table(hdev); if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { @@ -5086,10 +5332,29 @@ static void hclge_request_update_promisc_mode(struct hnae3_handle *handle) set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); } +static bool hclge_query_fd_qb_state(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return test_bit(HCLGE_STATE_HW_QB_ENABLE, &hdev->state); +} + +static void hclge_flush_qb_config(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + set_bit(HCLGE_VPORT_STATE_QB_CHANGE, &vport->state); +} + static void hclge_sync_fd_state(struct hclge_dev *hdev) { - if (hlist_empty(&hdev->fd_rule_list)) + struct hclge_vport *vport = &hdev->vport[0]; + + if (hlist_empty(&hdev->fd_rule_list)) { hdev->fd_active_type = HCLGE_FD_RULE_NONE; + set_bit(HCLGE_VPORT_STATE_QB_CHANGE, &vport->state); + } } static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location) @@ -5495,8 +5760,9 @@ static int hclge_init_fd_config(struct hclge_dev *hdev) /* If use max 400bit key, we can support tuples for ether type */ if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) { - key_cfg->tuple_active |= - BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC); + key_cfg->tuple_active |= BIT(INNER_DST_MAC) | + BIT(INNER_SRC_MAC) | + BIT(OUTER_TUN_VNI); if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES; } @@ -5514,6 +5780,11 @@ static int hclge_init_fd_config(struct hclge_dev *hdev) if (ret) return ret; + if (!hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) + hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1] = 1; + + hclge_init_fd_qb_counter(hdev); + return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1); } @@ -5562,6 +5833,8 @@ static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x, static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc, struct hclge_fd_ad_data *action) { +#define HCLGE_FD_COUNTER_MAX_SIZE_DEV_V2 128 + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); struct hclge_fd_ad_config_cmd *req; struct hclge_desc desc; @@ -5588,14 +5861,17 @@ static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc, hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet); hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B, action->forward_to_direct_queue); - hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S, + hnae3_set_field(ad_data, HCLGE_FD_AD_QID_L_M, HCLGE_FD_AD_QID_L_S, action->queue_id); hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter); - hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M, - HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id); + hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_L_M, + HCLGE_FD_AD_COUNTER_NUM_L_S, action->counter_id); hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage); hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S, - action->counter_id); + action->next_input_key); + hnae3_set_bit(ad_data, HCLGE_FD_AD_QID_H_B, + action->queue_id >= HCLGE_FD_COUNTER_MAX_SIZE_DEV_V2 ? + 1 : 0); req->ad_data = cpu_to_le64(ad_data); ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -5608,6 +5884,8 @@ static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc, static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y, struct hclge_fd_rule *rule) { +#define HCLGE_VNI_LENGTH 3 + int offset, moffset, ip_offset; enum HCLGE_FD_KEY_OPT key_opt; u16 tmp_x_s, tmp_y_s; @@ -5660,6 +5938,14 @@ static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y, *(__le32 *)key_x = cpu_to_le32(tmp_x_l); *(__le32 *)key_y = cpu_to_le32(tmp_y_l); + return true; + case KEY_OPT_VNI: + calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset])); + calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset])); + for (i = 0; i < HCLGE_VNI_LENGTH; i++) { + key_x[i] = (cpu_to_le32(tmp_x_l) >> (i * BITS_PER_BYTE)) & 0xFF; + key_y[i] = (cpu_to_le32(tmp_y_l) >> (i * BITS_PER_BYTE)) & 0xFF; + } return true; default: return false; @@ -5882,6 +6168,45 @@ static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec, return 0; } +static int hclge_fd_check_vxlan4_tuple(struct ethtool_rx_flow_spec *fs, + u32 *unused_tuple) +{ + struct ethtool_vxlan4_spec *spec = &fs->h_u.vxlan_ip4_spec; + struct ethtool_vxlan4_spec *mask = &fs->m_u.vxlan_ip4_spec; + + /* Vni is only 24 bits and must be greater than 0, and it can not be + * masked. + */ + if (!spec->vni || be32_to_cpu(spec->vni) >= VXLAN_N_VID || + mask->vni != HCLGE_FD_VXLAN_VNI_UNMASK || !unused_tuple) + return -EINVAL; + + *unused_tuple |= BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); + + if (is_zero_ether_addr(spec->src)) + *unused_tuple |= BIT(INNER_SRC_MAC); + + if (is_zero_ether_addr(spec->dst)) + *unused_tuple |= BIT(INNER_DST_MAC); + + if (!spec->eth_type) + *unused_tuple |= BIT(INNER_ETH_TYPE); + + if (!spec->ip4src) + *unused_tuple |= BIT(INNER_SRC_IP); + + if (!spec->ip4dst) + *unused_tuple |= BIT(INNER_DST_IP); + + if (!spec->tos) + *unused_tuple |= BIT(INNER_IP_TOS); + + if (!spec->l4_proto) + *unused_tuple |= BIT(INNER_IP_PROTO); + + return 0; +} + static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec, u32 *unused_tuple) { @@ -5937,6 +6262,45 @@ static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec, return 0; } +static int hclge_fd_check_vxlan6_tuple(struct ethtool_rx_flow_spec *fs, + u32 *unused_tuple) +{ + struct ethtool_vxlan6_spec *spec = &fs->h_u.vxlan_ip6_spec; + struct ethtool_vxlan6_spec *mask = &fs->m_u.vxlan_ip6_spec; + + /* Vni is only 24 bits and must be greater than 0, and it can not be + * masked. + */ + if (!spec->vni || be32_to_cpu(spec->vni) >= VXLAN_N_VID || + mask->vni != HCLGE_FD_VXLAN_VNI_UNMASK || !unused_tuple) + return -EINVAL; + + *unused_tuple |= BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT); + + if (is_zero_ether_addr(spec->src)) + *unused_tuple |= BIT(INNER_SRC_MAC); + + if (is_zero_ether_addr(spec->dst)) + *unused_tuple |= BIT(INNER_DST_MAC); + + if (!spec->eth_type) + *unused_tuple |= BIT(INNER_ETH_TYPE); + + if (ipv6_addr_any((struct in6_addr *)spec->ip6src)) + *unused_tuple |= BIT(INNER_SRC_IP); + + if (ipv6_addr_any((struct in6_addr *)spec->ip6dst)) + *unused_tuple |= BIT(INNER_DST_IP); + + if (!spec->tclass) + *unused_tuple |= BIT(INNER_IP_TOS); + + if (!spec->l4_proto) + *unused_tuple |= BIT(INNER_IP_PROTO); + + return 0; +} + static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple) { if (!spec || !unused_tuple) @@ -6119,6 +6483,9 @@ static int hclge_fd_check_spec(struct hclge_dev *hdev, ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec, unused_tuple); break; + case VXLAN_V4_FLOW: + ret = hclge_fd_check_vxlan4_tuple(fs, unused_tuple); + break; case SCTP_V6_FLOW: case TCP_V6_FLOW: case UDP_V6_FLOW: @@ -6129,6 +6496,9 @@ static int hclge_fd_check_spec(struct hclge_dev *hdev, ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec, unused_tuple); break; + case VXLAN_V6_FLOW: + ret = hclge_fd_check_vxlan6_tuple(fs, unused_tuple); + break; case ETHER_FLOW: if (hdev->fd_cfg.fd_mode != HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) { @@ -6209,6 +6579,37 @@ static void hclge_fd_get_ip4_tuple(struct ethtool_rx_flow_spec *fs, rule->tuples_mask.ether_proto = 0xFFFF; } +static void hclge_fd_get_vxlan4_tuple(struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule) +{ + struct ethtool_vxlan4_spec *h = &fs->h_u.vxlan_ip4_spec; + struct ethtool_vxlan4_spec *m = &fs->m_u.vxlan_ip4_spec; + + rule->tuples.outer_tun_vni = be32_to_cpu(h->vni); + rule->tuples_mask.outer_tun_vni = be32_to_cpu(m->vni); + + ether_addr_copy(rule->tuples.src_mac, h->src); + ether_addr_copy(rule->tuples_mask.src_mac, m->src); + + ether_addr_copy(rule->tuples.dst_mac, h->dst); + ether_addr_copy(rule->tuples_mask.dst_mac, m->dst); + + rule->tuples.ether_proto = be16_to_cpu(h->eth_type); + rule->tuples_mask.ether_proto = be16_to_cpu(m->eth_type); + + rule->tuples.ip_tos = h->tos; + rule->tuples_mask.ip_tos = m->tos; + + rule->tuples.ip_proto = h->l4_proto; + rule->tuples_mask.ip_proto = m->l4_proto; + + rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(h->ip4src); + rule->tuples_mask.src_ip[IPV4_INDEX] = be32_to_cpu(m->ip4src); + + rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(h->ip4dst); + rule->tuples_mask.dst_ip[IPV4_INDEX] = be32_to_cpu(m->ip4dst); +} + static void hclge_fd_get_tcpip6_tuple(struct ethtool_rx_flow_spec *fs, struct hclge_fd_rule *rule, u8 ip_proto) { @@ -6238,6 +6639,37 @@ static void hclge_fd_get_tcpip6_tuple(struct ethtool_rx_flow_spec *fs, rule->tuples_mask.ip_proto = 0xFF; } +static void hclge_fd_get_vxlan6_tuple(struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule) +{ + struct ethtool_vxlan6_spec *h = &fs->h_u.vxlan_ip6_spec; + struct ethtool_vxlan6_spec *m = &fs->m_u.vxlan_ip6_spec; + + rule->tuples.outer_tun_vni = be32_to_cpu(h->vni); + rule->tuples_mask.outer_tun_vni = be32_to_cpu(m->vni); + + ether_addr_copy(rule->tuples.src_mac, h->src); + ether_addr_copy(rule->tuples_mask.src_mac, m->src); + + ether_addr_copy(rule->tuples.dst_mac, h->dst); + ether_addr_copy(rule->tuples_mask.dst_mac, m->dst); + + rule->tuples.ether_proto = be16_to_cpu(h->eth_type); + rule->tuples_mask.ether_proto = be16_to_cpu(m->eth_type); + + rule->tuples.ip_tos = h->tclass; + rule->tuples_mask.ip_tos = m->tclass; + + rule->tuples.ip_proto = h->l4_proto; + rule->tuples_mask.ip_proto = m->l4_proto; + + be32_to_cpu_array(rule->tuples.src_ip, h->ip6src, IPV6_SIZE); + be32_to_cpu_array(rule->tuples_mask.src_ip, m->ip6src, IPV6_SIZE); + + be32_to_cpu_array(rule->tuples.dst_ip, h->ip6dst, IPV6_SIZE); + be32_to_cpu_array(rule->tuples_mask.dst_ip, m->ip6dst, IPV6_SIZE); +} + static void hclge_fd_get_ip6_tuple(struct ethtool_rx_flow_spec *fs, struct hclge_fd_rule *rule) { @@ -6316,6 +6748,9 @@ static int hclge_fd_get_tuple(struct ethtool_rx_flow_spec *fs, case IP_USER_FLOW: hclge_fd_get_ip4_tuple(fs, rule); break; + case VXLAN_V4_FLOW: + hclge_fd_get_vxlan4_tuple(fs, rule); + break; case SCTP_V6_FLOW: hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_SCTP); break; @@ -6328,6 +6763,9 @@ static int hclge_fd_get_tuple(struct ethtool_rx_flow_spec *fs, case IPV6_USER_FLOW: hclge_fd_get_ip6_tuple(fs, rule); break; + case VXLAN_V6_FLOW: + hclge_fd_get_vxlan6_tuple(fs, rule); + break; case ETHER_FLOW: hclge_fd_get_ether_tuple(fs, rule); break; @@ -6366,6 +6804,10 @@ static int hclge_add_fd_entry_common(struct hclge_dev *hdev, { int ret; + ret = hclge_disable_fd_qb_mode(hdev); + if (ret) + return ret; + spin_lock_bh(&hdev->fd_rule_lock); if (hdev->fd_active_type != rule->rule_type && @@ -6674,6 +7116,48 @@ static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule, spec->ip_ver = ETH_RX_NFC_IP4; } +static void hclge_fd_get_vxlan4_info(struct hclge_fd_rule *rule, + struct ethtool_vxlan4_spec *spec, + struct ethtool_vxlan4_spec *spec_mask) +{ + spec->vni = cpu_to_be32(rule->tuples.outer_tun_vni); + spec_mask->vni = rule->unused_tuple & BIT(OUTER_TUN_VNI) ? 0 : + cpu_to_be32(rule->tuples_mask.outer_tun_vni); + + ether_addr_copy(spec->src, rule->tuples.src_mac); + ether_addr_copy(spec->dst, rule->tuples.dst_mac); + + if (rule->unused_tuple & BIT(INNER_SRC_MAC)) + eth_zero_addr(spec_mask->src); + else + ether_addr_copy(spec_mask->src, rule->tuples_mask.src_mac); + + if (rule->unused_tuple & BIT(INNER_DST_MAC)) + eth_zero_addr(spec_mask->dst); + else + ether_addr_copy(spec_mask->dst, rule->tuples_mask.dst_mac); + + spec->eth_type = cpu_to_be16(rule->tuples.ether_proto); + spec_mask->eth_type = rule->unused_tuple & BIT(INNER_ETH_TYPE) ? 0 : + cpu_to_be16(rule->tuples_mask.ether_proto); + + spec->tos = rule->tuples.ip_tos; + spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ? 0 : + rule->tuples_mask.ip_tos; + + spec->l4_proto = rule->tuples.ip_proto; + spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ? 0 : + rule->tuples_mask.ip_proto; + + spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]); + spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ? 0 : + cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]); + + spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]); + spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ? 0 : + cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]); +} + static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule, struct ethtool_tcpip6_spec *spec, struct ethtool_tcpip6_spec *spec_mask) @@ -6734,6 +7218,56 @@ static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule, 0 : rule->tuples_mask.ip_proto; } +static void hclge_fd_get_vxlan6_info(struct hclge_fd_rule *rule, + struct ethtool_vxlan6_spec *spec, + struct ethtool_vxlan6_spec *spec_mask) +{ + spec->vni = cpu_to_be32(rule->tuples.outer_tun_vni); + spec_mask->vni = rule->unused_tuple & BIT(OUTER_TUN_VNI) ? 0 : + cpu_to_be32(rule->tuples_mask.outer_tun_vni); + + ether_addr_copy(spec->src, rule->tuples.src_mac); + ether_addr_copy(spec->dst, rule->tuples.dst_mac); + + if (rule->unused_tuple & BIT(INNER_SRC_MAC)) + eth_zero_addr(spec_mask->src); + else + ether_addr_copy(spec_mask->src, rule->tuples_mask.src_mac); + + if (rule->unused_tuple & BIT(INNER_DST_MAC)) + eth_zero_addr(spec_mask->dst); + else + ether_addr_copy(spec_mask->dst, rule->tuples_mask.dst_mac); + + spec->eth_type = cpu_to_be16(rule->tuples.ether_proto); + spec_mask->eth_type = rule->unused_tuple & BIT(INNER_ETH_TYPE) ? 0 : + cpu_to_be16(rule->tuples_mask.ether_proto); + + spec->tclass = rule->tuples.ip_tos; + spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ? 0 : + rule->tuples_mask.ip_tos; + + spec->l4_proto = rule->tuples.ip_proto; + spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ? 0 : + rule->tuples_mask.ip_proto; + + cpu_to_be32_array(spec->ip6src, + rule->tuples.src_ip, IPV6_SIZE); + cpu_to_be32_array(spec->ip6dst, + rule->tuples.dst_ip, IPV6_SIZE); + if (rule->unused_tuple & BIT(INNER_SRC_IP)) + memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src)); + else + cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip, + IPV6_SIZE); + + if (rule->unused_tuple & BIT(INNER_DST_IP)) + memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst)); + else + cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip, + IPV6_SIZE); +} + static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule, struct ethhdr *spec, struct ethhdr *spec_mask) @@ -6860,6 +7394,10 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle, hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec, &fs->m_u.usr_ip4_spec); break; + case VXLAN_V4_FLOW: + hclge_fd_get_vxlan4_info(rule, &fs->h_u.vxlan_ip4_spec, + &fs->m_u.vxlan_ip4_spec); + break; case SCTP_V6_FLOW: case TCP_V6_FLOW: case UDP_V6_FLOW: @@ -6870,6 +7408,10 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle, hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec, &fs->m_u.usr_ip6_spec); break; + case VXLAN_V6_FLOW: + hclge_fd_get_vxlan6_info(rule, &fs->h_u.vxlan_ip6_spec, + &fs->m_u.vxlan_ip6_spec); + break; /* The flow type of fd rule has been checked before adding in to rule * list. As other flow types have been handled, it must be ETHER_FLOW * for the default case @@ -7479,7 +8021,7 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) hclge_task_schedule(hdev, 0); } -static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) +int hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) { #define HCLGE_LINK_STATUS_WAIT_CNT 3 @@ -7508,14 +8050,17 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { - dev_err(&hdev->pdev->dev, - "mac enable fail, ret =%d.\n", ret); - return; + dev_err(&hdev->pdev->dev, "failed to %s mac, ret = %d.\n", + enable ? "enable" : "disable", ret); + + return ret; } if (!enable) hclge_mac_link_status_wait(hdev, HCLGE_LINK_STATUS_DOWN, HCLGE_LINK_STATUS_WAIT_CNT); + + return 0; } static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid, @@ -8008,6 +8553,7 @@ int hclge_vport_start(struct hclge_vport *vport) set_bit(HCLGE_VPORT_STATE_INITED, &vport->state); set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); + set_bit(HCLGE_VPORT_STATE_QB_CHANGE, &vport->state); set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); vport->last_active_jiffies = jiffies; vport->need_notify = 0; @@ -9894,6 +10440,7 @@ static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) static int hclge_init_vlan_filter(struct hclge_dev *hdev) { struct hclge_vport *vport; + bool enable = true; int ret; int i; @@ -9913,8 +10460,12 @@ static int hclge_init_vlan_filter(struct hclge_dev *hdev) vport->cur_vlan_fltr_en = true; } + if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps) && + !test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps)) + enable = false; + return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, - HCLGE_FILTER_FE_INGRESS, true, 0); + HCLGE_FILTER_FE_INGRESS, enable, 0); } static int hclge_init_vlan_type(struct hclge_dev *hdev) @@ -10186,6 +10737,7 @@ static void hclge_restore_hw_table(struct hclge_dev *hdev) hclge_restore_vport_port_base_vlan_config(hdev); hclge_restore_vport_vlan_table(vport); set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); + clear_bit(HCLGE_STATE_HW_QB_ENABLE, &hdev->state); hclge_restore_fd_entries(handle); } @@ -10827,6 +11379,24 @@ static u32 hclge_get_fw_version(struct hnae3_handle *handle) return hdev->fw_version; } +int hclge_query_scc_version(struct hclge_dev *hdev, u32 *scc_version) +{ + struct hclge_comm_query_scc_cmd *resp; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_SCC_VER, 1); + resp = (struct hclge_comm_query_scc_cmd *)desc.data; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + return ret; + + *scc_version = le32_to_cpu(resp->scc_version); + + return 0; +} + static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) { struct phy_device *phydev = hdev->hw.mac.phydev; @@ -10985,8 +11555,8 @@ static void hclge_get_ksettings_an_result(struct hnae3_handle *handle, *lane_num = hdev->hw.mac.lane_num; } -static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type, - u8 *module_type) +void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type, + u8 *module_type) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; @@ -11622,6 +12192,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) goto err_devlink_uninit; /* Firmware command initialize */ + hclge_comm_cmd_init_ops(&hdev->hw.hw, &hclge_cmq_ops); ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version, true, hdev->reset_pending); if (ret) @@ -11930,6 +12501,8 @@ static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable) return 0; vport->vf_info.trusted = new_trusted; + + set_bit(HCLGE_VPORT_STATE_QB_CHANGE, &vport->state); set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); hclge_task_schedule(hdev, 0); @@ -12100,6 +12673,8 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } + hclge_reset_tc_config(hdev); + ret = hclge_tm_init_hw(hdev, true); if (ret) { dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret); @@ -12657,7 +13232,7 @@ static int hclge_get_dscp_prio(struct hnae3_handle *h, u8 dscp, u8 *tc_mode, return 0; } -static const struct hnae3_ae_ops hclge_ops = { +struct hnae3_ae_ops hclge_ops = { .init_ae_dev = hclge_init_ae_dev, .uninit_ae_dev = hclge_uninit_ae_dev, .reset_prepare = hclge_reset_prepare_general, @@ -12670,6 +13245,8 @@ static const struct hnae3_ae_ops hclge_ops = { .put_vector = hclge_put_vector, .set_promisc_mode = hclge_set_promisc_mode, .request_update_promisc_mode = hclge_request_update_promisc_mode, + .request_flush_qb_config = hclge_flush_qb_config, + .query_fd_qb_state = hclge_query_fd_qb_state, .set_loopback = hclge_set_loopback, .start = hclge_ae_start, .stop = hclge_ae_stop, @@ -12763,6 +13340,7 @@ static const struct hnae3_ae_ops hclge_ops = { .get_dscp_prio = hclge_get_dscp_prio, .get_wol = hclge_get_wol, .set_wol = hclge_set_wol, + .priv_ops = hclge_ext_ops_handle, }; static struct hnae3_ae_algo ae_algo = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 7bc2049b723daa387aba2a083dc526a2e68083ba..0240f026436baa9526411326028cb1f46c498075 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -12,7 +12,7 @@ #include "hclge_cmd.h" #include "hclge_ptp.h" -#include "hnae3.h" +#include "hnae3_ext.h" #include "hclge_comm_rss.h" #include "hclge_comm_tqp_stats.h" @@ -26,6 +26,8 @@ #define HCLGE_RD_FIRST_STATS_NUM 2 #define HCLGE_RD_OTHER_STATS_NUM 4 +#define HCLGE_RESET_MAX_FAIL_CNT 5 + #define HCLGE_INVALID_VPORT 0xffff #define HCLGE_PF_CFG_BLOCK_SIZE 32 @@ -185,15 +187,25 @@ enum HLCGE_PORT_TYPE { #define HCLGE_SUPPORT_1G_BIT BIT(0) #define HCLGE_SUPPORT_10G_BIT BIT(1) #define HCLGE_SUPPORT_25G_BIT BIT(2) -#define HCLGE_SUPPORT_50G_BIT BIT(3) -#define HCLGE_SUPPORT_100G_BIT BIT(4) +#define HCLGE_SUPPORT_50G_R2_BIT BIT(3) +#define HCLGE_SUPPORT_100G_R4_BIT BIT(4) /* to be compatible with exsit board */ #define HCLGE_SUPPORT_40G_BIT BIT(5) #define HCLGE_SUPPORT_100M_BIT BIT(6) #define HCLGE_SUPPORT_10M_BIT BIT(7) -#define HCLGE_SUPPORT_200G_BIT BIT(8) +#define HCLGE_SUPPORT_200G_R4_EXT_BIT BIT(8) +#define HCLGE_SUPPORT_50G_R1_BIT BIT(9) +#define HCLGE_SUPPORT_100G_R2_BIT BIT(10) +#define HCLGE_SUPPORT_200G_R4_BIT BIT(11) + #define HCLGE_SUPPORT_GE \ (HCLGE_SUPPORT_1G_BIT | HCLGE_SUPPORT_100M_BIT | HCLGE_SUPPORT_10M_BIT) +#define HCLGE_SUPPORT_50G_BITS \ + (HCLGE_SUPPORT_50G_R2_BIT | HCLGE_SUPPORT_50G_R1_BIT) +#define HCLGE_SUPPORT_100G_BITS \ + (HCLGE_SUPPORT_100G_R4_BIT | HCLGE_SUPPORT_100G_R2_BIT) +#define HCLGE_SUPPORT_200G_BITS \ + (HCLGE_SUPPORT_200G_R4_EXT_BIT | HCLGE_SUPPORT_200G_R4_BIT) enum HCLGE_DEV_STATE { HCLGE_STATE_REINITING, @@ -214,6 +226,7 @@ enum HCLGE_DEV_STATE { HCLGE_STATE_FD_TBL_CHANGED, HCLGE_STATE_FD_CLEAR_ALL, HCLGE_STATE_FD_USER_DEF_CHANGED, + HCLGE_STATE_HW_QB_ENABLE, HCLGE_STATE_PTP_EN, HCLGE_STATE_PTP_TX_HANDLING, HCLGE_STATE_FEC_STATS_UPDATING, @@ -246,6 +259,12 @@ enum HCLGE_MAC_DUPLEX { HCLGE_MAC_FULL }; +/* hilink version */ +enum hclge_hilink_version { + HCLGE_HILINK_H32 = 0, + HCLGE_HILINK_H60 = 1, +}; + #define QUERY_SFP_SPEED 0 #define QUERY_ACTIVE_SPEED 1 @@ -369,6 +388,7 @@ struct hclge_tm_info { enum hclge_fc_mode fc_mode; u8 hw_pfc_map; /* Allow for packet drop or not on this TC */ u8 pfc_en; /* PFC enabled or not for user priority */ + u16 pause_time; }; /* max number of mac statistics on each version */ @@ -624,6 +644,7 @@ struct key_info { #define HCLGE_FD_USER_DEF_DATA GENMASK(15, 0) #define HCLGE_FD_USER_DEF_OFFSET GENMASK(15, 0) #define HCLGE_FD_USER_DEF_OFFSET_UNMASK GENMASK(15, 0) +#define HCLGE_FD_VXLAN_VNI_UNMASK GENMASK(31, 0) /* assigned by firmware, the real filter number for each pf may be less */ #define MAX_FD_FILTER_NUM 4096 @@ -639,6 +660,7 @@ enum HCLGE_FD_ACTIVE_RULE_TYPE { HCLGE_FD_ARFS_ACTIVE, HCLGE_FD_EP_ACTIVE, HCLGE_FD_TC_FLOWER_ACTIVE, + HCLGE_FD_QB_ACTIVE, }; enum HCLGE_FD_PACKET_TYPE { @@ -717,6 +739,7 @@ struct hclge_fd_rule_tuples { u32 l4_user_def; u8 ip_tos; u8 ip_proto; + u32 outer_tun_vni; }; struct hclge_fd_rule { @@ -965,6 +988,8 @@ struct hclge_dev { struct hclge_ptp *ptp; struct devlink *devlink; struct hclge_comm_rss_cfg rss_cfg; + struct hnae3_notify_pkt_param notify_param; + struct hnae3_torus_param torus_param; }; /* VPort level vlan tag configuration for TX direction */ @@ -994,6 +1019,7 @@ struct hclge_rx_vtag_cfg { enum HCLGE_VPORT_STATE { HCLGE_VPORT_STATE_ALIVE, HCLGE_VPORT_STATE_MAC_TBL_CHANGE, + HCLGE_VPORT_STATE_QB_CHANGE, HCLGE_VPORT_STATE_PROMISC_CHANGE, HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, HCLGE_VPORT_STATE_INITED, @@ -1024,6 +1050,8 @@ struct hclge_vf_info { u32 spoofchk; u32 max_tx_rate; u32 trusted; + u8 request_qb_en; + u8 qb_en; u8 request_uc_en; u8 request_mc_en; u8 request_bc_en; @@ -1076,6 +1104,11 @@ struct hclge_mac_speed_map { u32 speed_fw; /* speed defined in firmware */ }; +struct hclge_link_mode_bmap { + u16 support_bit; + enum ethtool_link_mode_bit_indices link_mode; +}; + int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc, bool en_mc_pmc, bool en_bc_pmc); int hclge_add_uc_addr_common(struct hclge_vport *vport, @@ -1146,4 +1179,12 @@ int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len); int hclge_push_vf_link_status(struct hclge_vport *vport); int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en); int hclge_mac_update_stats(struct hclge_dev *hdev); +struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf); +int hclge_inform_vf_reset(struct hclge_vport *vport, u16 reset_type); +int hclge_query_scc_version(struct hclge_dev *hdev, u32 *scc_version); +void hclge_reset_task_schedule(struct hclge_dev *hdev); +void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle); +void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type, + u8 *module_type); +int hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 04ff9bf121853ab7a0f876c55b49d1e6d9199035..700a074ef753b145450280ba2025453a75e8bafd 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -124,7 +124,7 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, return status; } -static int hclge_inform_vf_reset(struct hclge_vport *vport, u16 reset_type) +int hclge_inform_vf_reset(struct hclge_vport *vport, u16 reset_type) { __le16 msg_data; u8 dest_vfid; @@ -811,7 +811,7 @@ static void hclge_handle_ncsi_error(struct hclge_dev *hdev) ae_dev->ops->set_default_reset_request(ae_dev, HNAE3_GLOBAL_RESET); dev_warn(&hdev->pdev->dev, "requesting reset due to NCSI error\n"); - ae_dev->ops->reset_event(hdev->pdev, NULL); + hclge_reset_event(hdev->pdev, &hdev->vport[0].nic); } static void hclge_handle_vf_tbl(struct hclge_vport *vport, @@ -831,6 +831,36 @@ static void hclge_handle_vf_tbl(struct hclge_vport *vport, } } +static void hclge_handle_vf_qb(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req, + struct hclge_respond_to_vf_msg *resp_msg) +{ + struct hclge_dev *hdev = vport->back; + + if (mbx_req->msg.subcode == HCLGE_MBX_QB_CHECK_CAPS) { + struct hnae3_handle *handle = &hdev->vport[0].nic; + + resp_msg->data[0] = test_bit(HNAE3_PFLAG_FD_QB_ENABLE, + &handle->supported_pflags); + resp_msg->len = sizeof(u8); + } else if (mbx_req->msg.subcode == HCLGE_MBX_QB_ENABLE) { + vport->vf_info.request_qb_en = mbx_req->msg.data[0]; + set_bit(HCLGE_VPORT_STATE_QB_CHANGE, &vport->state); + } else if (mbx_req->msg.subcode == HCLGE_MBX_QB_GET_STATE) { + u16 msg_data = vport->vf_info.qb_en; + int ret; + + ret = hclge_send_mbx_msg(vport, (u8 *)&msg_data, + sizeof(msg_data), + HCLGE_MBX_PUSH_QB_STATE, + vport->vport_id); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to inform qb state to vport %u, ret = %d\n", + vport->vport_id, ret); + } +} + static int hclge_mbx_map_ring_to_vector_handler(struct hclge_mbx_ops_param *param) { @@ -1040,6 +1070,12 @@ static int hclge_mbx_handle_vf_tbl_handler(struct hclge_mbx_ops_param *param) return 0; } +static int hclge_mbx_handle_vf_qb_handler(struct hclge_mbx_ops_param *param) +{ + hclge_handle_vf_qb(param->vport, param->req, param->resp_msg); + return 0; +} + static const hclge_mbx_ops_fn hclge_mbx_ops_list[HCLGE_MBX_OPCODE_MAX] = { [HCLGE_MBX_RESET] = hclge_mbx_reset_handler, [HCLGE_MBX_SET_UNICAST] = hclge_mbx_set_unicast_handler, @@ -1064,6 +1100,7 @@ static const hclge_mbx_ops_fn hclge_mbx_ops_list[HCLGE_MBX_OPCODE_MAX] = { [HCLGE_MBX_VF_UNINIT] = hclge_mbx_vf_uninit_handler, [HCLGE_MBX_HANDLE_VF_TBL] = hclge_mbx_handle_vf_tbl_handler, [HCLGE_MBX_GET_RING_VECTOR_MAP] = hclge_mbx_get_ring_vector_map_handler, + [HCLGE_MBX_SET_QB] = hclge_mbx_handle_vf_qb_handler, [HCLGE_MBX_GET_VF_FLR_STATUS] = hclge_mbx_get_vf_flr_status_handler, [HCLGE_MBX_PUSH_LINK_STATUS] = hclge_mbx_push_link_status_handler, [HCLGE_MBX_NCSI_ERROR] = hclge_mbx_ncsi_error_handler, @@ -1123,10 +1160,11 @@ void hclge_mbx_handler(struct hclge_dev *hdev) req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data; flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); - if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) { + if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B) || + req->mbx_src_vfid > hdev->num_req_vfs)) { dev_warn(&hdev->pdev->dev, - "dropped invalid mailbox message, code = %u\n", - req->msg.code); + "dropped invalid mailbox message, code = %u, vfid = %u\n", + req->msg.code, req->mbx_src_vfid); /* dropping/not processing this invalid message */ crq->desc[crq->next_to_use].flag = 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index c58c3122176289cb0991e45e823f74ad3bf83e3a..6387120bee04c884d0077e68eb3171c64a94809e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -185,8 +185,8 @@ int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap, return hclge_cmd_send(&hdev->hw, &desc, 1); } -static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr, - u8 pause_trans_gap, u16 pause_trans_time) +int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr, + u8 pause_trans_gap, u16 pause_trans_time) { struct hclge_cfg_pause_param_cmd *pause_param; struct hclge_desc desc; @@ -1498,7 +1498,7 @@ static int hclge_pause_param_setup_hw(struct hclge_dev *hdev) return hclge_pause_param_cfg(hdev, mac->mac_addr, HCLGE_DEFAULT_PAUSE_TRANS_GAP, - HCLGE_DEFAULT_PAUSE_TRANS_TIME); + hdev->tm_info.pause_time); } static int hclge_pfc_setup_hw(struct hclge_dev *hdev) @@ -1692,6 +1692,7 @@ int hclge_tm_schd_init(struct hclge_dev *hdev) /* fc_mode is HCLGE_FC_FULL on reset */ hdev->tm_info.fc_mode = HCLGE_FC_FULL; hdev->fc_mode_last_time = hdev->tm_info.fc_mode; + hdev->tm_info.pause_time = HCLGE_DEFAULT_PAUSE_TRANS_TIME; if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE && hdev->tm_info.num_pg != 1) @@ -2143,3 +2144,19 @@ int hclge_tm_flush_cfg(struct hclge_dev *hdev, bool enable) return ret; } + +void hclge_reset_tc_config(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = &hdev->vport[0]; + struct hnae3_knic_private_info *kinfo; + + kinfo = &vport->nic.kinfo; + + if (!kinfo->tc_info.mqprio_destroy) + return; + + /* clear tc info, including mqprio_destroy and mqprio_active */ + memset(&kinfo->tc_info, 0, sizeof(kinfo->tc_info)); + hclge_tm_schd_info_update(hdev, 0); + hclge_comm_rss_indir_init_cfg(hdev->ae_dev, &hdev->rss_cfg); +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index 53eec6df5194682da957a0a2f57a161df87fc953..49cc441f999da556c7fd0cb2e555052fd9504735 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -244,6 +244,8 @@ int hclge_tm_init_hw(struct hclge_dev *hdev, bool init); int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap, u8 pfc_bitmap); int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx); +int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr, + u8 pause_trans_gap, u16 pause_trans_time); int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr); int hclge_mac_pause_setup_hw(struct hclge_dev *hdev); void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats); @@ -277,4 +279,5 @@ int hclge_tm_get_port_shaper(struct hclge_dev *hdev, int hclge_up_to_tc_map(struct hclge_dev *hdev); int hclge_dscp_to_tc_map(struct hclge_dev *hdev); int hclge_tm_flush_cfg(struct hclge_dev *hdev, bool enable); +void hclge_reset_tc_config(struct hclge_dev *hdev); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h index f3cd5a376eca90493395687fc40f9da27715a674..d8afc2a052dc69c6af9b71f644df9b27eda4f71a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h @@ -10,6 +10,7 @@ #include +#define PF_DESC_LEN (sizeof(struct hclge_desc) / sizeof(u32)) #define PF_GET_MBX_LEN (sizeof(struct hclge_mbx_vf_to_pf_cmd) / sizeof(u32)) #define PF_SEND_MBX_LEN (sizeof(struct hclge_mbx_pf_to_vf_cmd) / sizeof(u32)) @@ -77,6 +78,99 @@ TRACE_EVENT(hclge_pf_mbx_send, ) ); +DECLARE_EVENT_CLASS(hclge_pf_cmd_template, + TP_PROTO(struct hclge_comm_hw *hw, + struct hclge_desc *desc, + int index, + int num), + TP_ARGS(hw, desc, index, num), + + TP_STRUCT__entry(__field(u16, opcode) + __field(u16, flag) + __field(u16, retval) + __field(u16, rsv) + __field(int, index) + __field(int, num) + __string(pciname, pci_name(hw->cmq.csq.pdev)) + __array(u32, data, HCLGE_DESC_DATA_LEN)), + + TP_fast_assign(int i; + __entry->opcode = le16_to_cpu(desc->opcode); + __entry->flag = le16_to_cpu(desc->flag); + __entry->retval = le16_to_cpu(desc->retval); + __entry->rsv = le16_to_cpu(desc->rsv); + __entry->index = index; + __entry->num = num; + __assign_str(pciname, pci_name(hw->cmq.csq.pdev)); + for (i = 0; i < HCLGE_DESC_DATA_LEN; i++) + __entry->data[i] = le32_to_cpu(desc->data[i]);), + + TP_printk("%s opcode:0x%04x %d-%d flag:0x%04x retval:0x%04x rsv:0x%04x data:%s", + __get_str(pciname), __entry->opcode, + __entry->index, __entry->num, + __entry->flag, __entry->retval, __entry->rsv, + __print_array(__entry->data, + HCLGE_DESC_DATA_LEN, sizeof(u32))) +); + +DEFINE_EVENT(hclge_pf_cmd_template, hclge_pf_cmd_send, + TP_PROTO(struct hclge_comm_hw *hw, + struct hclge_desc *desc, + int index, + int num), + TP_ARGS(hw, desc, index, num) +); + +DEFINE_EVENT(hclge_pf_cmd_template, hclge_pf_cmd_get, + TP_PROTO(struct hclge_comm_hw *hw, + struct hclge_desc *desc, + int index, + int num), + TP_ARGS(hw, desc, index, num) +); + +DECLARE_EVENT_CLASS(hclge_pf_special_cmd_template, + TP_PROTO(struct hclge_comm_hw *hw, + u32 *data, + int index, + int num), + TP_ARGS(hw, data, index, num), + + TP_STRUCT__entry(__field(int, index) + __field(int, num) + __string(pciname, pci_name(hw->cmq.csq.pdev)) + __array(u32, data, PF_DESC_LEN)), + + TP_fast_assign(int i; + __entry->index = index; + __entry->num = num; + __assign_str(pciname, pci_name(hw->cmq.csq.pdev)); + for (i = 0; i < PF_DESC_LEN; i++) + __entry->data[i] = le32_to_cpu(data[i]); + ), + + TP_printk("%s %d-%d data:%s", + __get_str(pciname), + __entry->index, __entry->num, + __print_array(__entry->data, + PF_DESC_LEN, sizeof(u32))) +); + +DEFINE_EVENT(hclge_pf_special_cmd_template, hclge_pf_special_cmd_send, + TP_PROTO(struct hclge_comm_hw *hw, + u32 *desc, + int index, + int num), + TP_ARGS(hw, desc, index, num)); + +DEFINE_EVENT(hclge_pf_special_cmd_template, hclge_pf_special_cmd_get, + TP_PROTO(struct hclge_comm_hw *hw, + u32 *desc, + int index, + int num), + TP_ARGS(hw, desc, index, num) +); + #endif /* _HCLGE_TRACE_H_ */ /* This must be outside ifdef _HCLGE_TRACE_H */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c index 1b535142c65a6ecfa1559863f2ad1e9a759bcc4b..d32e6f17966a95d8a9cd796d3746278e25f4c98d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c @@ -41,8 +41,9 @@ static int hclgevf_devlink_reload_down(struct devlink *devlink, struct pci_dev *pdev = hdev->pdev; int ret; - if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) { - dev_err(&pdev->dev, "reset is handling\n"); + if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || + !test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state)) { + dev_err(&pdev->dev, "reset is handling or driver removed\n"); return -EBUSY; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 0aa9beefd1c7ee6c53d9f2623069bae00848e77f..9bf8bd19cf938a87115c70468006046ebf604fa8 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -11,6 +11,7 @@ #include "hnae3.h" #include "hclgevf_devlink.h" #include "hclge_comm_rss.h" +#include "hclgevf_trace.h" #define HCLGEVF_NAME "hclgevf" @@ -47,6 +48,42 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num) return hclge_comm_cmd_send(&hw->hw, desc, num); } +static void hclgevf_trace_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc, + int num, bool is_special) +{ + int i; + + trace_hclge_vf_cmd_send(hw, desc, 0, num); + + if (is_special) + return; + + for (i = 1; i < num; i++) + trace_hclge_vf_cmd_send(hw, &desc[i], i, num); +} + +static void hclgevf_trace_cmd_get(struct hclge_comm_hw *hw, struct hclge_desc *desc, + int num, bool is_special) +{ + int i; + + if (!HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag))) + return; + + trace_hclge_vf_cmd_get(hw, desc, 0, num); + + if (is_special) + return; + + for (i = 1; i < num; i++) + trace_hclge_vf_cmd_get(hw, &desc[i], i, num); +} + +static const struct hclge_comm_cmq_ops hclgevf_cmq_ops = { + .trace_cmd_send = hclgevf_trace_cmd_send, + .trace_cmd_get = hclgevf_trace_cmd_get, +}; + void hclgevf_arq_init(struct hclgevf_dev *hdev) { struct hclge_comm_cmq *cmdq = &hdev->hw.hw.cmq; @@ -354,6 +391,74 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev) return 0; } +static void hclgevf_update_fd_qb_state(struct hclgevf_dev *hdev) +{ + struct hnae3_handle *handle = &hdev->nic; + struct hclge_vf_to_pf_msg send_msg; + int ret; + + if (!hdev->qb_cfg.pf_support_qb || + !test_bit(HNAE3_PFLAG_FD_QB_ENABLE, &handle->priv_flags)) + return; + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_QB, + HCLGE_MBX_QB_GET_STATE); + ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); + if (ret) + dev_err(&hdev->pdev->dev, "failed to get qb state, ret = %d", + ret); +} + +static void hclgevf_get_pf_qb_caps(struct hclgevf_dev *hdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); + struct hclge_vf_to_pf_msg send_msg; + u8 resp_msg; + int ret; + + if (!test_bit(HNAE3_DEV_SUPPORT_QB_B, ae_dev->caps)) + return; + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_QB, + HCLGE_MBX_QB_CHECK_CAPS); + ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, + sizeof(resp_msg)); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get qb caps from PF, ret = %d", ret); + return; + } + + hdev->qb_cfg.pf_support_qb = resp_msg > 0; +} + +static void hclgevf_set_fd_qb(struct hnae3_handle *handle) +{ +#define HCLGEVF_QB_MBX_STATE_OFFSET 0 + + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclge_vf_to_pf_msg send_msg; + u8 resp_msg; + int ret; + + hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_QB, + HCLGE_MBX_QB_ENABLE); + send_msg.data[HCLGEVF_QB_MBX_STATE_OFFSET] = + test_bit(HNAE3_PFLAG_FD_QB_ENABLE, &handle->priv_flags) ? 1 : 0; + ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, + sizeof(resp_msg)); + if (ret) + dev_err(&hdev->pdev->dev, "failed to set qb state, ret = %d", + ret); +} + +static bool hclgevf_query_fd_qb_state(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + return hdev->qb_cfg.hw_qb_en; +} + static void hclgevf_request_link_info(struct hclgevf_dev *hdev) { struct hclge_vf_to_pf_msg send_msg; @@ -1901,6 +2006,8 @@ static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) hclgevf_sync_promisc_mode(hdev); + hclgevf_update_fd_qb_state(hdev); + hdev->last_serv_processed = jiffies; out: @@ -2796,6 +2903,7 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) } hclgevf_arq_init(hdev); + ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version, false, hdev->reset_pending); @@ -2854,6 +2962,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) goto err_cmd_queue_init; hclgevf_arq_init(hdev); + + hclge_comm_cmd_init_ops(&hdev->hw.hw, &hclgevf_cmq_ops); ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version, false, hdev->reset_pending); @@ -2939,6 +3049,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) goto err_config; } + hclgevf_get_pf_qb_caps(hdev); + hclgevf_init_rxd_adv_layout(hdev); set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state); @@ -3323,6 +3435,8 @@ static const struct hnae3_ae_ops hclgevf_ops = { .set_promisc_mode = hclgevf_set_promisc_mode, .request_update_promisc_mode = hclgevf_request_update_promisc_mode, .get_cmdq_stat = hclgevf_get_cmdq_stat, + .request_flush_qb_config = hclgevf_set_fd_qb, + .query_fd_qb_state = hclgevf_query_fd_qb_state, }; static struct hnae3_ae_algo ae_algovf = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index a73f2bf3a56a6426704c64a20e74403c715ac09f..ad59f6a612540f35a0056ca5f4d44cff2fccb06d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -206,6 +206,11 @@ struct hclgevf_mac_table_cfg { struct list_head mc_mac_list; }; +struct hclgevf_qb_cfg { + bool pf_support_qb; + bool hw_qb_en; +}; + struct hclgevf_dev { struct pci_dev *pdev; struct hnae3_ae_dev *ae_dev; @@ -274,6 +279,7 @@ struct hclgevf_dev { unsigned long serv_processed_cnt; unsigned long last_serv_processed; + struct hclgevf_qb_cfg qb_cfg; struct devlink *devlink; }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index 85c2a634c8f96a1d4d3356b0adf9c1f87f8ed9b8..6ccf23ce2744055449204936ac5607e1248bfcc6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -271,6 +271,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) case HCLGE_MBX_LINK_STAT_MODE: case HCLGE_MBX_PUSH_VLAN_INFO: case HCLGE_MBX_PUSH_PROMISC_INFO: + case HCLGE_MBX_PUSH_QB_STATE: hclgevf_handle_mbx_msg(hdev, req); break; default: @@ -296,6 +297,19 @@ static void hclgevf_parse_promisc_info(struct hclgevf_dev *hdev, "Promisc mode is closed by host for being untrusted.\n"); } +static void hclgevf_parse_qb_info(struct hclgevf_dev *hdev, u16 qb_state) +{ +#define HCLGEVF_HW_QB_ON 1 +#define HCLGEVF_HW_QB_OFF 0 + + if (qb_state > HCLGEVF_HW_QB_ON) { + dev_warn(&hdev->pdev->dev, "Invalid state, ignored.\n"); + return; + } + + hdev->qb_cfg.hw_qb_en = qb_state > HCLGEVF_HW_QB_OFF; +} + void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) { struct hclge_mbx_port_base_vlan *vlan_info; @@ -374,6 +388,9 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) case HCLGE_MBX_PUSH_PROMISC_INFO: hclgevf_parse_promisc_info(hdev, le16_to_cpu(msg_q[1])); break; + case HCLGE_MBX_PUSH_QB_STATE: + hclgevf_parse_qb_info(hdev, msg_q[1]); + break; default: dev_err(&hdev->pdev->dev, "fetched unsupported(%u) message from arq\n", diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h index b259e95dd53c263819af84e280d1aa2133165c59..e2e3a2602b6adf169e48c34337d76b4bd568d546 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h @@ -77,6 +77,56 @@ TRACE_EVENT(hclge_vf_mbx_send, ) ); +DECLARE_EVENT_CLASS(hclge_vf_cmd_template, + TP_PROTO(struct hclge_comm_hw *hw, + struct hclge_desc *desc, + int index, + int num), + + TP_ARGS(hw, desc, index, num), + + TP_STRUCT__entry(__field(u16, opcode) + __field(u16, flag) + __field(u16, retval) + __field(u16, rsv) + __field(int, index) + __field(int, num) + __string(pciname, pci_name(hw->cmq.csq.pdev)) + __array(u32, data, HCLGE_DESC_DATA_LEN)), + + TP_fast_assign(int i; + __entry->opcode = le16_to_cpu(desc->opcode); + __entry->flag = le16_to_cpu(desc->flag); + __entry->retval = le16_to_cpu(desc->retval); + __entry->rsv = le16_to_cpu(desc->rsv); + __entry->index = index; + __entry->num = num; + __assign_str(pciname, pci_name(hw->cmq.csq.pdev)); + for (i = 0; i < HCLGE_DESC_DATA_LEN; i++) + __entry->data[i] = le32_to_cpu(desc->data[i]);), + + TP_printk("%s opcode:0x%04x %d-%d flag:0x%04x retval:0x%04x rsv:0x%04x data:%s", + __get_str(pciname), __entry->opcode, + __entry->index, __entry->num, + __entry->flag, __entry->retval, __entry->rsv, + __print_array(__entry->data, + HCLGE_DESC_DATA_LEN, sizeof(u32))) +); + +DEFINE_EVENT(hclge_vf_cmd_template, hclge_vf_cmd_send, + TP_PROTO(struct hclge_comm_hw *hw, + struct hclge_desc *desc, + int index, + int num), + TP_ARGS(hw, desc, index, num)); + +DEFINE_EVENT(hclge_vf_cmd_template, hclge_vf_cmd_get, + TP_PROTO(struct hclge_comm_hw *hw, + struct hclge_desc *desc, + int index, + int num), + TP_ARGS(hw, desc, index, num)); + #endif /* _HCLGEVF_TRACE_H_ */ /* This must be outside ifdef _HCLGEVF_TRACE_H */ diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig index ed9d97a032f1cc64faacc32fcc102603af66504e..14a7b49e47601f37aee4bccac42320d4336b0204 100644 --- a/drivers/ptp/Kconfig +++ b/drivers/ptp/Kconfig @@ -211,4 +211,15 @@ config PTP_DFL_TOD To compile this driver as a module, choose M here: the module will be called ptp_dfl_tod. +config PTP_HISI + tristate "HiSilicon PTP sync platform driver" + help + PTP sync driver work on multichip system, eliminates the bus latency + between multichip, and provide a higher precision clock source. But + the clock source of PTP sync device is from the RTC of HNS3 ethernet + device, so, if you want the PTP sync device works, you must enable + HNS3 driver also. + + If unsure, say N. + endmenu diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile index dea0cebd2303ed52f5eaf9afd85cfe15b9853cc0..8d21ee20787d3fa1ea9f4629c5b6eac5c8b9a631 100644 --- a/drivers/ptp/Makefile +++ b/drivers/ptp/Makefile @@ -20,3 +20,4 @@ obj-$(CONFIG_PTP_1588_CLOCK_MOCK) += ptp_mock.o obj-$(CONFIG_PTP_1588_CLOCK_VMW) += ptp_vmw.o obj-$(CONFIG_PTP_1588_CLOCK_OCP) += ptp_ocp.o obj-$(CONFIG_PTP_DFL_TOD) += ptp_dfl_tod.o +obj-$(CONFIG_PTP_HISI) += ptp_hisi.o diff --git a/drivers/ptp/ptp_hisi.c b/drivers/ptp/ptp_hisi.c new file mode 100644 index 0000000000000000000000000000000000000000..d42459992b4cd977ad78090f2b6bd72be4e9c1df --- /dev/null +++ b/drivers/ptp/ptp_hisi.c @@ -0,0 +1,1027 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2022 Hisilicon Limited. +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef PTP_CLOCK_NAME_LEN +#define PTP_CLOCK_NAME_LEN 32 +#endif + +#define HISI_PTP_VERSION "22.10.2" + +#define HISI_PTP_NAME "hisi_ptp" +#define HISI_PTP_INT_NAME_LEN 32 + +#define HISI_PTP_DBGFS_STS_LEN 2048 +#define HISI_PTP_DBGFS_REG_LEN 0x10000 + +#define HISI_RES_T_PERI_SC 0 +#define HISI_RES_N_NET_SC 0 +#define HISI_RES_N_IO_SC 1 + +#define HISI_PTP_INIT_DONE 0 + +/* peri subctrl reg offset */ +#define PERI_SC_PTP_RESET_REQ 0xE18 +#define PERI_SC_PTP_RESET_DREQ 0xE1C +#define PERI_SC_LOCAL_TIMER_COMP_HIGH_ADDR 0x5000 +#define PERI_SC_LOCAL_TIMER_COMP_LOW_ADDR 0x5004 +#define PERI_SC_BAUD_VALUE_ADDR 0x5008 +#define PERI_SC_LOCAL_CNT_EN_ADDR 0x500C +#define PERI_SC_SYNC_ERR_COMP_HIGH_ADDR 0x5010 +#define PERI_SC_SYNC_ERR_COMP_LOW_ADDR 0x5014 +#define PERI_SC_CRC_EN_ADDR 0x5018 +#define PERI_SC_ONE_CYCLE_NUM_ADDR 0x5020 +#define PERI_SC_SYNC_ERR_CLR_ADDR 0x5024 +#define PERI_SC_RX_SHIFT_EN_ADDR 0x5028 +#define PERI_SC_TIMEL_CY_NUM_ADDR 0x502C +#define PERI_SC_INT_PTP_SYNC_ERR_ADDR 0x5044 +#define PERI_SC_INT_PTP_SYNC_ERR_MASK_ADDR 0x5048 +#define PERI_SC_INT_ORIGIN 0x504C +#define PERI_SC_CRC_ERR_COUNT 0x5050 +#define PERI_SC_CRC_INT_CONTRL_ADDR 0x5054 +#define PERI_SC_CAPTURE_PTP_TIME_COMP_HIGH 0x5058 +#define PERI_SC_CAPTURE_PTP_TIME_COMP_LOW 0x505C +#define PERI_SC_CAPTURE_SYSTEM_COUNTER_BIN_HIGH 0x5060 +#define PERI_SC_CAPTURE_SYSTEM_COUNTER_BIN_LOW 0x5064 +#define PERI_SC_CAPTURE_VLD 0x5068 +#define PERI_SC_LOCAL_TIME_LOW_ADDR 0x5070 +#define PERI_SC_LOCAL_TIME_HIGH_ADDR 0x5074 + +/* net subctrl reg offset */ +#define NET_SC_PTP_BAUD_VALUE_ADDR 0x2008 +#define NET_SC_PTP_COUNTER_EN_ADDR 0x200C +#define NET_SC_PTP_NORMAL_MODE_EN 0x2010 +#define NET_SC_PTP_WIRE_DELAY_CAL_EN 0x2014 +#define NET_SC_SAMPLE_DELAY_CFG_ADDR 0x2018 +#define NET_SC_PTP_TX_DFXBUS0_ADDR 0x201C +#define NET_SC_PTP_TX_DFXBUS1_ADDR 0x2020 +#define NET_SC_PTP_TX_DFXBUS2_ADDR 0x2024 +#define NET_SC_PTP_TX_DFXBUS3_ADDR 0x2028 + +/* io subctrl reg offset */ +#define IO_SC_PTP_BAUD_VALUE_ADDR 0x2008 +#define IO_SC_PTP_COUNTER_EN_ADDR 0x200C +#define IO_SC_PTP_NORMAL_MODE_EN 0x2010 +#define IO_SC_PTP_WIRE_DELAY_CAL_EN 0x2014 +#define IO_SC_SAMPLE_DELAY_CFG_ADDR 0x2018 +#define IO_SC_PTP_TX_DFXBUS0_ADDR 0x201C +#define IO_SC_PTP_TX_DFXBUS1_ADDR 0x2020 +#define IO_SC_PTP_TX_DFXBUS2_ADDR 0x2024 +#define IO_SC_PTP_TX_DFXBUS3_ADDR 0x2028 + +/* default values */ +#define HISI_DEF_BAUD 0x1388 +#define HISI_DEF_TIME_COMP 0xB2432 +#define HISI_DEF_ERR_COMP 0xFFFFFFFF +#define HISI_DEF_ONE_CYCLE_NUM 0x50 + +#define HISI_PTP_TX_IDLE_MASK GENMASK(26, 23) + +#define HISI_PTP_RX_CRC_INT_EN BIT(0) +#define HISI_PTP_RX_CRC_CLR BIT(1) +#define HISI_PTP_RX_CRC_CLR_AND_EN \ + (HISI_PTP_RX_CRC_INT_EN | HISI_PTP_RX_CRC_INT_EN) +#define HISI_PTP_RX_CRC_CLR_AND_DISABLE HISI_PTP_RX_CRC_CLR + +#define HISI_PTP_SUP_CHK_CNT 32 +/* suppress check window and suppress time, unit: ms */ +#define HISI_PTP_SUP_CHK_THR 10 +#define HISI_PTP_SUP_TIME 100 + +enum HISI_PTP_TX_MODE { + HISI_PTP_CAL_MODE, + HISI_PTP_NORMAL_MODE, +}; + +struct hisi_ptp_rx { + struct list_head node; + char name[HISI_PTP_INT_NAME_LEN]; + struct device *dev; + u64 time_comp; /* internal wire time compensation value */ + int irq; + void __iomem *base; +}; + +struct hisi_ptp_tx { + struct device *dev; + void __iomem *base; + void __iomem *io_sc_base; +}; + +struct hisi_ptp_pdev { + struct list_head ptp_rx_list; + struct hisi_ptp_tx *ptp_tx; + u32 tx_cnt; + u32 rx_total; + u32 rx_cnt; + unsigned long flag; + void __iomem *rx_base; /* peri subctl base of chip 0 */ + u32 irq_cnt; + unsigned long last_jiffies; /* record last irq jiffies */ + struct timer_list suppress_timer; + struct ptp_clock *clock; + struct ptp_clock_info info; + rwlock_t rw_lock; + struct dentry *dbgfs_root; +}; + +struct hisi_ptp_reg { + const char *name; + u32 offset; +}; + +static struct hisi_ptp_pdev g_ptpdev; + +static uint err_threshold = HISI_DEF_ERR_COMP; +module_param(err_threshold, uint, 0644); +MODULE_PARM_DESC(err_threshold, "PTP time sync error threshold"); + +static struct hisi_ptp_pdev *hisi_ptp_get_pdev(struct ptp_clock_info *info) +{ + struct hisi_ptp_pdev *ptp = + container_of(info, struct hisi_ptp_pdev, info); + return ptp; +} + +/* This function should call under rw_lock */ +static void hisi_ptp_disable(struct hisi_ptp_pdev *ptp) +{ + struct hisi_ptp_rx *rx; + void __iomem *base; + + /* disable tx */ + if (ptp->ptp_tx && ptp->ptp_tx->base) { + base = ptp->ptp_tx->base; + writel(0, base + NET_SC_PTP_COUNTER_EN_ADDR); + } + + /* disable all totem rx and interrupt */ + list_for_each_entry(rx, &ptp->ptp_rx_list, node) { + base = rx->base; + writel(0, base + PERI_SC_RX_SHIFT_EN_ADDR); + writel(1, base + PERI_SC_INT_PTP_SYNC_ERR_MASK_ADDR); + writel(HISI_PTP_RX_CRC_CLR_AND_DISABLE, + base + PERI_SC_CRC_INT_CONTRL_ADDR); + } +} + +/* This function should call under rw_lock */ +static void hisi_ptp_unmask_irq(struct hisi_ptp_pdev *ptp) +{ + struct hisi_ptp_rx *rx; + void __iomem *base; + + /* clear CRC errors and unmask all totem interrupt */ + list_for_each_entry(rx, &ptp->ptp_rx_list, node) { + base = rx->base; + writel(HISI_PTP_RX_CRC_INT_EN, + base + PERI_SC_CRC_INT_CONTRL_ADDR); + writel(0, base + PERI_SC_INT_PTP_SYNC_ERR_MASK_ADDR); + } +} + +/* This function should call under rw_lock */ +static void hisi_ptp_wait_and_enable(struct hisi_ptp_pdev *ptp) +{ +#define HISI_PTP_TX_IDLE_WAIT_CNT 20 + void __iomem *nimbus_base; + struct hisi_ptp_rx *rx; + void __iomem *base; + int delay_cnt = 0; + + if (!ptp->ptp_tx || !ptp->ptp_tx->base) + return; + + /* wait for tx idle */ + nimbus_base = ptp->ptp_tx->base; + while (delay_cnt++ < HISI_PTP_TX_IDLE_WAIT_CNT) { + u32 dfx_bus0 = readl(nimbus_base + NET_SC_PTP_TX_DFXBUS0_ADDR); + + /* wait bit26:23 to 0 */ + if ((dfx_bus0 & HISI_PTP_TX_IDLE_MASK) == 0) + break; + + udelay(1); + } + + /* enable all totem interrupt and rx */ + list_for_each_entry(rx, &ptp->ptp_rx_list, node) { + base = rx->base; + writel(1, base + PERI_SC_SYNC_ERR_CLR_ADDR); + writel(0, base + PERI_SC_SYNC_ERR_CLR_ADDR); + writel(1, base + PERI_SC_INT_PTP_SYNC_ERR_ADDR); + writel(1, base + PERI_SC_RX_SHIFT_EN_ADDR); + } + + /* enable tx */ + writel(1, nimbus_base + NET_SC_PTP_COUNTER_EN_ADDR); + + hisi_ptp_unmask_irq(ptp); +} + +/* This function should call under rw_lock */ +static bool hisi_ptp_need_suppress(struct hisi_ptp_pdev *ptp) +{ + if (time_is_before_jiffies(ptp->last_jiffies + + msecs_to_jiffies(HISI_PTP_SUP_CHK_THR))) { + ptp->last_jiffies = jiffies; + ptp->irq_cnt = 0; + return false; + } + + if (ptp->irq_cnt++ < HISI_PTP_SUP_CHK_CNT) + return false; + + return true; +} + +static irqreturn_t hisi_ptp_irq_handle(int irq, void *data) +{ + struct hisi_ptp_pdev *ptp = (struct hisi_ptp_pdev *)data; + + dev_dbg(ptp->ptp_tx->dev, "ptp time sync error, irq:%d\n", irq); + + write_lock(&ptp->rw_lock); + + hisi_ptp_disable(ptp); + + if (hisi_ptp_need_suppress(ptp)) { + mod_timer(&ptp->suppress_timer, + jiffies + msecs_to_jiffies(HISI_PTP_SUP_TIME)); + write_unlock(&ptp->rw_lock); + return IRQ_HANDLED; + } + + hisi_ptp_wait_and_enable(ptp); + + write_unlock(&ptp->rw_lock); + + return IRQ_HANDLED; +} + +static int hisi_ptp_get_rx_resource(struct platform_device *pdev, + struct hisi_ptp_pdev *ptp) +{ + struct hisi_ptp_rx *rx; + struct resource *peri; + unsigned long flags; + u32 rx_total = 0; + bool is_base_rx; + int ret; + + ret = device_property_read_u32(&pdev->dev, "rx_num", &rx_total); + if (ret) { + dev_err(&pdev->dev, "failed to read rx total property\n"); + return ret; + } + + rx = devm_kzalloc(&pdev->dev, sizeof(struct hisi_ptp_rx), GFP_KERNEL); + if (!rx) + return -ENOMEM; + + peri = platform_get_resource(pdev, IORESOURCE_MEM, HISI_RES_T_PERI_SC); + if (!peri) { + dev_err(&pdev->dev, "failed to get rx peri resource\n"); + return -EINVAL; + } + + rx->base = devm_ioremap(&pdev->dev, peri->start, resource_size(peri)); + if (!rx->base) { + dev_err(&pdev->dev, "failed to remap rx peri resource\n"); + return -ENOMEM; + } + + rx->irq = platform_get_irq(pdev, 0); + if (rx->irq < 0) { + dev_err(&pdev->dev, "failed to get irq, ret = %d\n", rx->irq); + return rx->irq; + } + snprintf(rx->name, HISI_PTP_INT_NAME_LEN, "%s-%d", HISI_PTP_NAME, + rx->irq); + ret = devm_request_irq(&pdev->dev, rx->irq, hisi_ptp_irq_handle, 0, + rx->name, ptp); + if (ret) { + dev_err(&pdev->dev, "failed to request irq(%d), ret = %d\n", + rx->irq, ret); + return ret; + } + + is_base_rx = device_property_present(&pdev->dev, "base_rx"); + + rx->dev = &pdev->dev; + + write_lock_irqsave(&ptp->rw_lock, flags); + + if (is_base_rx) + ptp->rx_base = rx->base; + + ptp->rx_cnt++; + + /* use the first rx device to init the global rx_total */ + if (ptp->rx_total == 0) + ptp->rx_total = rx_total; + + if (ptp->rx_total != rx_total || ptp->rx_cnt > ptp->rx_total) { + write_unlock_irqrestore(&ptp->rw_lock, flags); + dev_err(&pdev->dev, + "failed to probe rx device, please check the asl file!\n"); + dev_err(&pdev->dev, + "rx_total:%u, current rx_total:%u, rx_cnt:%u\n", + ptp->rx_total, rx_total, ptp->rx_cnt); + + return -EINVAL; + } + + list_add_tail(&rx->node, &ptp->ptp_rx_list); + + write_unlock_irqrestore(&ptp->rw_lock, flags); + + return 0; +} + +static int hisi_ptp_get_tx_resource(struct platform_device *pdev, + struct hisi_ptp_pdev *ptp) +{ + struct hisi_ptp_tx *tx; + struct resource *mem; + unsigned long flags; + + write_lock_irqsave(&ptp->rw_lock, flags); + /* use have only one tx device */ + if (ptp->tx_cnt) { + write_unlock_irqrestore(&ptp->rw_lock, flags); + dev_err(&pdev->dev, + "failed to probe tx device, more than one tx device found, please check the asl file!\n"); + return -EINVAL; + } + write_unlock_irqrestore(&ptp->rw_lock, flags); + + tx = devm_kzalloc(&pdev->dev, sizeof(struct hisi_ptp_tx), GFP_KERNEL); + if (!tx) + return -ENOMEM; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, HISI_RES_N_NET_SC); + if (!mem) { + dev_err(&pdev->dev, "failed to get tx net sc resource\n"); + return -EINVAL; + } + + tx->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); + if (!tx->base) { + dev_err(&pdev->dev, "failed to remap tx net sc resource\n"); + return -ENOMEM; + } + + mem = platform_get_resource(pdev, IORESOURCE_MEM, HISI_RES_N_IO_SC); + if (!mem) { + dev_err(&pdev->dev, "failed to get tx nimbus io sc resource\n"); + return -EINVAL; + } + + tx->io_sc_base = devm_ioremap(&pdev->dev, mem->start, + resource_size(mem)); + if (!tx->io_sc_base) { + dev_err(&pdev->dev, "failed to remap tx nimbus io resource\n"); + return -ENOMEM; + } + + tx->dev = &pdev->dev; + + write_lock_irqsave(&ptp->rw_lock, flags); + ptp->tx_cnt++; + ptp->ptp_tx = tx; + write_unlock_irqrestore(&ptp->rw_lock, flags); + + return 0; +} + +static void hisi_ptp_cal_time_start(struct hisi_ptp_pdev *ptp) +{ + void __iomem *io_sc_base; + struct hisi_ptp_rx *rx; + void __iomem *base; + + /* config all rx to enter calculation mode. */ + list_for_each_entry(rx, &ptp->ptp_rx_list, node) { + base = rx->base; + writel(1, base + PERI_SC_PTP_RESET_REQ); + writel(1, base + PERI_SC_PTP_RESET_DREQ); + writel(0, base + PERI_SC_LOCAL_TIMER_COMP_HIGH_ADDR); + writel(0, base + PERI_SC_LOCAL_TIMER_COMP_LOW_ADDR); + writel(1, base + PERI_SC_CRC_EN_ADDR); + writel(0, base + PERI_SC_LOCAL_CNT_EN_ADDR); + writel(1, base + PERI_SC_RX_SHIFT_EN_ADDR); + } + + /* config tx to enter calculation mode. */ + base = ptp->ptp_tx->base; + io_sc_base = ptp->ptp_tx->io_sc_base; + writel(HISI_PTP_CAL_MODE, io_sc_base + IO_SC_PTP_NORMAL_MODE_EN); + writel(HISI_PTP_CAL_MODE, base + NET_SC_PTP_NORMAL_MODE_EN); + + writel(HISI_DEF_BAUD, io_sc_base + IO_SC_PTP_BAUD_VALUE_ADDR); + writel(1, io_sc_base + IO_SC_PTP_COUNTER_EN_ADDR); + writel(0, io_sc_base + IO_SC_PTP_WIRE_DELAY_CAL_EN); + writel(1, io_sc_base + IO_SC_PTP_WIRE_DELAY_CAL_EN); +} + +static void hisi_ptp_cal_time_get(struct hisi_ptp_pdev *ptp) +{ +#define HISI_PTP_MAX_WAIT_CNT 60 + struct hisi_ptp_rx *rx; + void __iomem *base; + int cnt; + u32 rd_l; + u32 rd_h; + u32 td_l; + u32 td_h; + u64 rd; + u64 td; + + list_for_each_entry(rx, &ptp->ptp_rx_list, node) { + base = rx->base; + rx->time_comp = HISI_DEF_TIME_COMP; + + cnt = 0; + do { + if (readl(base + PERI_SC_CAPTURE_VLD) == 0) { + mdelay(1); + continue; + } + + rd_h = readl(base + + PERI_SC_CAPTURE_SYSTEM_COUNTER_BIN_HIGH); + rd_l = readl(base + + PERI_SC_CAPTURE_SYSTEM_COUNTER_BIN_LOW); + td_h = readl(base + PERI_SC_CAPTURE_PTP_TIME_COMP_HIGH); + td_l = readl(base + PERI_SC_CAPTURE_PTP_TIME_COMP_LOW); + + rd = (u64)rd_h << 32 | rd_l; + td = (u64)td_h << 32 | td_l; + + if (!rd || !td || rd < td) { + mdelay(1); + continue; + } + + rx->time_comp = rd - td; + break; + } while (cnt++ <= HISI_PTP_MAX_WAIT_CNT); + } +} + +static void hisi_ptp_cal_time_end(struct hisi_ptp_pdev *ptp) +{ + void __iomem *io_sc_base; + struct hisi_ptp_rx *rx; + void __iomem *base; + + /* config all rx to exit calculation mode. */ + list_for_each_entry(rx, &ptp->ptp_rx_list, node) { + base = rx->base; + writel(0, base + PERI_SC_RX_SHIFT_EN_ADDR); + } + + /* config tx to exit calculation mode. */ + base = ptp->ptp_tx->base; + io_sc_base = ptp->ptp_tx->io_sc_base; + + writel(0, io_sc_base + IO_SC_PTP_COUNTER_EN_ADDR); + writel(HISI_PTP_NORMAL_MODE, io_sc_base + IO_SC_PTP_NORMAL_MODE_EN); + writel(HISI_PTP_NORMAL_MODE, base + NET_SC_PTP_NORMAL_MODE_EN); +} + +/* This function should call under rw_lock */ +static void hisi_ptp_cal_time_comp(struct hisi_ptp_pdev *ptp) +{ + hisi_ptp_cal_time_start(ptp); + hisi_ptp_cal_time_get(ptp); + hisi_ptp_cal_time_end(ptp); +} + +/* This function should call under rw_lock */ +static void hisi_ptp_peri_rx_init(struct hisi_ptp_pdev *ptp) +{ + struct hisi_ptp_rx *rx; + void __iomem *base; + + list_for_each_entry(rx, &ptp->ptp_rx_list, node) { + base = rx->base; + writel(1, base + PERI_SC_CRC_EN_ADDR); + writel(upper_32_bits(rx->time_comp), + base + PERI_SC_LOCAL_TIMER_COMP_HIGH_ADDR); + writel(lower_32_bits(rx->time_comp), + base + PERI_SC_LOCAL_TIMER_COMP_LOW_ADDR); + writel(err_threshold, + base + PERI_SC_SYNC_ERR_COMP_LOW_ADDR); + writel(1, base + PERI_SC_CRC_INT_CONTRL_ADDR); + writel(0, base + PERI_SC_SYNC_ERR_CLR_ADDR); + writel(1, base + PERI_SC_LOCAL_CNT_EN_ADDR); + writel(1, base + PERI_SC_RX_SHIFT_EN_ADDR); + } +} + +/* This function should call under rw_lock */ +static void hisi_ptp_net_tx_init(struct hisi_ptp_pdev *ptp) +{ + void __iomem *base; + + base = ptp->ptp_tx->base; + writel(1, base + NET_SC_PTP_COUNTER_EN_ADDR); +} + +static int hisi_ptp_adjfine(struct ptp_clock_info *ptp_info, long delta) +{ + return -EOPNOTSUPP; +} + +static int hisi_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta) +{ + return -EOPNOTSUPP; +} + +static int hisi_ptp_settime(struct ptp_clock_info *ptp_info, + const struct timespec64 *ts) +{ + return -EOPNOTSUPP; +} + +static int hisi_ptp_gettime(struct ptp_clock_info *ptp_info, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct hisi_ptp_pdev *ptp = hisi_ptp_get_pdev(ptp_info); + unsigned long flags; + u32 hi = UINT_MAX; + u32 lo = UINT_MAX; + u64 ns; + + read_lock_irqsave(&ptp->rw_lock, flags); + + if (ptp->rx_base) { + hi = readl(ptp->rx_base + PERI_SC_LOCAL_TIME_HIGH_ADDR); + lo = readl(ptp->rx_base + PERI_SC_LOCAL_TIME_LOW_ADDR); + } + + read_unlock_irqrestore(&ptp->rw_lock, flags); + + ns = (u64)hi * NSEC_PER_SEC + lo; + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int hisi_ptp_create_clock(struct hisi_ptp_pdev *ptp) +{ + dev_info(ptp->ptp_tx->dev, "register ptp clock\n"); + + snprintf(ptp->info.name, PTP_CLOCK_NAME_LEN, "%s", HISI_PTP_NAME); + ptp->info.owner = THIS_MODULE; + ptp->info.adjfine = hisi_ptp_adjfine; + ptp->info.adjtime = hisi_ptp_adjtime; + ptp->info.settime64 = hisi_ptp_settime; + ptp->info.gettimex64 = hisi_ptp_gettime; + ptp->clock = ptp_clock_register(&ptp->info, ptp->ptp_tx->dev); + if (IS_ERR(ptp->clock)) { + dev_err(ptp->ptp_tx->dev, + "failed to register ptp clock, ret = %ld\n", + PTR_ERR(ptp->clock)); + return PTR_ERR(ptp->clock); + } + + return 0; +} + +static void hisi_ptp_timer(struct timer_list *t) +{ + struct hisi_ptp_pdev *ptp = from_timer(ptp, t, suppress_timer); + unsigned long flags; + + write_lock_irqsave(&ptp->rw_lock, flags); + + dev_dbg(ptp->ptp_tx->dev, "ptp timer timeout handler.\n"); + + ptp->last_jiffies = jiffies; + ptp->irq_cnt = 0; + + hisi_ptp_wait_and_enable(ptp); + + write_unlock_irqrestore(&ptp->rw_lock, flags); +} + +static int hisi_ptp_probe(struct platform_device *pdev) +{ + struct hisi_ptp_pdev *ptp = &g_ptpdev; + unsigned long flags; + const char *type; + int ret; + + dev_info(&pdev->dev, "ptp probe start\n"); + + ret = device_property_read_string(&pdev->dev, "type", &type); + if (ret) { + dev_err(&pdev->dev, "failed to read device type, ret = %d\n", + ret); + return ret; + } + + if (!memcmp(type, "rx", strlen("rx"))) { + ret = hisi_ptp_get_rx_resource(pdev, ptp); + } else if (!memcmp(type, "tx", strlen("tx"))) { + ret = hisi_ptp_get_tx_resource(pdev, ptp); + } else { + dev_err(&pdev->dev, + "failed to probe unknown device, type: %s\n", + type); + ret = -EINVAL; + } + if (ret) + return ret; + + write_lock_irqsave(&ptp->rw_lock, flags); + + if (ptp->rx_total == 0 || ptp->rx_total != ptp->rx_cnt || + ptp->tx_cnt != 1) { + write_unlock_irqrestore(&ptp->rw_lock, flags); + dev_info(&pdev->dev, + "waiting for devices...rx total:%u, now:%u. tx total:1, now:%u\n", + ptp->rx_total, ptp->rx_cnt, ptp->tx_cnt); + return 0; + } + + if (!ptp->rx_base) { + write_unlock_irqrestore(&ptp->rw_lock, flags); + dev_err(&pdev->dev, + "failed to probe, no base rx device, please check the asl file!\n"); + return -EINVAL; + } + + hisi_ptp_disable(ptp); + hisi_ptp_cal_time_comp(ptp); + hisi_ptp_peri_rx_init(ptp); + hisi_ptp_net_tx_init(ptp); + hisi_ptp_unmask_irq(ptp); + + write_unlock_irqrestore(&ptp->rw_lock, flags); + + ret = hisi_ptp_create_clock(ptp); + if (ret) { + write_lock_irqsave(&ptp->rw_lock, flags); + hisi_ptp_disable(ptp); + write_unlock_irqrestore(&ptp->rw_lock, flags); + return ret; + } + + set_bit(HISI_PTP_INIT_DONE, &ptp->flag); + + dev_info(&pdev->dev, "ptp probe end\n"); + return 0; +} + +static int hisi_ptp_remove(struct platform_device *pdev) +{ + struct hisi_ptp_pdev *ptp = &g_ptpdev; + struct hisi_ptp_rx *rx; + unsigned long flags; + + if (test_and_clear_bit(HISI_PTP_INIT_DONE, &ptp->flag)) { + ptp_clock_unregister(ptp->clock); + ptp->clock = NULL; + + write_lock_irqsave(&ptp->rw_lock, flags); + hisi_ptp_disable(ptp); + write_unlock_irqrestore(&ptp->rw_lock, flags); + + dev_info(&pdev->dev, "unregister ptp clock\n"); + } + + write_lock_irqsave(&ptp->rw_lock, flags); + if (ptp->ptp_tx && ptp->ptp_tx->dev == &pdev->dev) { + ptp->tx_cnt--; + ptp->ptp_tx = NULL; + dev_info(&pdev->dev, "remove tx ptp device\n"); + } else { + list_for_each_entry(rx, &ptp->ptp_rx_list, node) { + if (rx->dev == &pdev->dev) { + ptp->rx_cnt--; + list_del(&rx->node); + dev_info(&pdev->dev, "remove rx ptp device\n"); + break; + } + } + } + write_unlock_irqrestore(&ptp->rw_lock, flags); + + return 0; +} + +static const struct acpi_device_id hisi_ptp_acpi_match[] = { + { "HISI0411", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, hisi_ptp_acpi_match); + +static struct platform_driver hisi_ptp_driver = { + .probe = hisi_ptp_probe, + .remove = hisi_ptp_remove, + .driver = { + .name = HISI_PTP_NAME, + .acpi_match_table = ACPI_PTR(hisi_ptp_acpi_match), + }, +}; + +static ssize_t hisi_ptp_dbg_read_state(struct file *filp, char __user *buf, + size_t cnt, loff_t *ppos) +{ + struct hisi_ptp_pdev *ptp = filp->private_data; + struct hisi_ptp_rx *rx; + unsigned long flags; + ssize_t size = 0; + char *read_buf; + int pos = 0; + int len; + + if (*ppos < 0) + return -EINVAL; + if (cnt <= 0) + return 0; + if (!access_ok(buf, cnt)) + return -EFAULT; + + read_buf = kvzalloc(HISI_PTP_DBGFS_STS_LEN, GFP_KERNEL); + if (!read_buf) + return -ENOMEM; + + len = HISI_PTP_DBGFS_STS_LEN; + + write_lock_irqsave(&ptp->rw_lock, flags); + pos += scnprintf(read_buf + pos, len - pos, "error threshold: %#x\n", + err_threshold); + pos += scnprintf(read_buf + pos, len - pos, "tx count: %u\n", + ptp->tx_cnt); + pos += scnprintf(read_buf + pos, len - pos, "rx total: %u\n", + ptp->rx_total); + pos += scnprintf(read_buf + pos, len - pos, "rx count: %u\n", + ptp->rx_cnt); + pos += scnprintf(read_buf + pos, len - pos, "irq count: %u\n", + ptp->irq_cnt); + pos += scnprintf(read_buf + pos, len - pos, "irq last jiffies: %lu\n", + ptp->last_jiffies); + + list_for_each_entry(rx, &ptp->ptp_rx_list, node) { + pos += scnprintf(read_buf + pos, len - pos, "name: %s\n", + rx->name); + pos += scnprintf(read_buf + pos, len - pos, "time comp: %#llx\n", + rx->time_comp); + pos += scnprintf(read_buf + pos, len - pos, "irq: %d\n", + rx->irq); + } + write_unlock_irqrestore(&ptp->rw_lock, flags); + + size = simple_read_from_buffer(buf, cnt, ppos, read_buf, + strlen(read_buf)); + + kvfree(read_buf); + + return size; +} + +static const struct hisi_ptp_reg hisi_ptp_tx_reg[] = { + {"NET_SC_PTP_BAUD_VALUE_ADDR ", + NET_SC_PTP_BAUD_VALUE_ADDR}, + {"NET_SC_PTP_COUNTER_EN_ADDR ", + NET_SC_PTP_COUNTER_EN_ADDR}, + {"NET_SC_PTP_NORMAL_MODE_EN ", + NET_SC_PTP_NORMAL_MODE_EN}, + {"NET_SC_PTP_WIRE_DELAY_CAL_EN", + NET_SC_PTP_WIRE_DELAY_CAL_EN}, + {"NET_SC_SAMPLE_DELAY_CFG_ADDR", + NET_SC_SAMPLE_DELAY_CFG_ADDR}, + {"NET_SC_PTP_TX_DFXBUS0_ADDR ", + NET_SC_PTP_TX_DFXBUS0_ADDR}, + {"NET_SC_PTP_TX_DFXBUS1_ADDR ", + NET_SC_PTP_TX_DFXBUS1_ADDR}, + {"NET_SC_PTP_TX_DFXBUS2_ADDR ", + NET_SC_PTP_TX_DFXBUS2_ADDR}, + {"NET_SC_PTP_TX_DFXBUS3_ADDR ", + NET_SC_PTP_TX_DFXBUS3_ADDR} +}; + +static const struct hisi_ptp_reg hisi_ptp_tx_io_reg[] = { + {"IO_SC_PTP_BAUD_VALUE_ADDR ", + IO_SC_PTP_BAUD_VALUE_ADDR}, + {"IO_SC_PTP_COUNTER_EN_ADDR ", + IO_SC_PTP_COUNTER_EN_ADDR}, + {"IO_SC_PTP_NORMAL_MODE_EN ", + IO_SC_PTP_NORMAL_MODE_EN}, + {"IO_SC_PTP_WIRE_DELAY_CAL_EN", + IO_SC_PTP_WIRE_DELAY_CAL_EN}, + {"IO_SC_SAMPLE_DELAY_CFG_ADDR", + IO_SC_SAMPLE_DELAY_CFG_ADDR}, + {"IO_SC_PTP_TX_DFXBUS0_ADDR ", + IO_SC_PTP_TX_DFXBUS0_ADDR}, + {"IO_SC_PTP_TX_DFXBUS1_ADDR ", + IO_SC_PTP_TX_DFXBUS1_ADDR}, + {"IO_SC_PTP_TX_DFXBUS2_ADDR ", + IO_SC_PTP_TX_DFXBUS2_ADDR}, + {"IO_SC_PTP_TX_DFXBUS3_ADDR ", + IO_SC_PTP_TX_DFXBUS3_ADDR} +}; + +static const struct hisi_ptp_reg hisi_ptp_rx_reg[] = { + {"PERI_SC_LOCAL_TIMER_COMP_HIGH_ADDR ", + PERI_SC_LOCAL_TIMER_COMP_HIGH_ADDR}, + {"PERI_SC_LOCAL_TIMER_COMP_LOW_ADDR ", + PERI_SC_LOCAL_TIMER_COMP_LOW_ADDR}, + {"PERI_SC_BAUD_VALUE_ADDR ", + PERI_SC_BAUD_VALUE_ADDR}, + {"PERI_SC_LOCAL_CNT_EN_ADDR ", + PERI_SC_LOCAL_CNT_EN_ADDR}, + {"PERI_SC_SYNC_ERR_COMP_HIGH_ADDR ", + PERI_SC_SYNC_ERR_COMP_HIGH_ADDR}, + {"PERI_SC_SYNC_ERR_COMP_LOW_ADDR ", + PERI_SC_SYNC_ERR_COMP_LOW_ADDR}, + {"PERI_SC_CRC_EN_ADDR ", + PERI_SC_CRC_EN_ADDR}, + {"PERI_SC_ONE_CYCLE_NUM_ADDR ", + PERI_SC_ONE_CYCLE_NUM_ADDR}, + {"PERI_SC_SYNC_ERR_CLR_ADDR ", + PERI_SC_SYNC_ERR_CLR_ADDR}, + {"PERI_SC_RX_SHIFT_EN_ADDR ", + PERI_SC_RX_SHIFT_EN_ADDR}, + {"PERI_SC_TIMEL_CY_NUM_ADDR ", + PERI_SC_TIMEL_CY_NUM_ADDR}, + {"PERI_SC_INT_PTP_SYNC_ERR_ADDR ", + PERI_SC_INT_PTP_SYNC_ERR_ADDR}, + {"PERI_SC_INT_PTP_SYNC_ERR_MASK_ADDR ", + PERI_SC_INT_PTP_SYNC_ERR_MASK_ADDR}, + {"PERI_SC_INT_ORIGIN ", + PERI_SC_INT_ORIGIN}, + {"PERI_SC_CRC_ERR_COUNT ", + PERI_SC_CRC_ERR_COUNT}, + {"PERI_SC_CRC_INT_CONTRL_ADDR ", + PERI_SC_CRC_INT_CONTRL_ADDR}, + {"PERI_SC_CAPTURE_PTP_TIME_COMP_HIGH ", + PERI_SC_CAPTURE_PTP_TIME_COMP_HIGH}, + {"PERI_SC_CAPTURE_PTP_TIME_COMP_LOW ", + PERI_SC_CAPTURE_PTP_TIME_COMP_LOW}, + {"PERI_SC_CAPTURE_SYSTEM_COUNTER_BIN_HIGH", + PERI_SC_CAPTURE_SYSTEM_COUNTER_BIN_HIGH}, + {"PERI_SC_CAPTURE_SYSTEM_COUNTER_BIN_LOW ", + PERI_SC_CAPTURE_SYSTEM_COUNTER_BIN_LOW}, + {"PERI_SC_CAPTURE_VLD ", + PERI_SC_CAPTURE_VLD}, + {"PERI_SC_LOCAL_TIME_LOW_ADDR ", + PERI_SC_LOCAL_TIME_LOW_ADDR}, + {"PERI_SC_LOCAL_TIME_HIGH_ADDR ", + PERI_SC_LOCAL_TIME_HIGH_ADDR} +}; + +static void hisi_ptp_dump_reg(void __iomem *base, + const struct hisi_ptp_reg *reg, int reg_len, + char *buf, int len, int *pos) +{ + int i; + + for (i = 0; i < reg_len; i++) + *pos += scnprintf(buf + *pos, len - *pos, "%s : 0x%08x\n", + reg[i].name, readl(base + reg[i].offset)); +} + +static ssize_t hisi_ptp_dbg_read_reg(struct file *filp, char __user *buf, + size_t cnt, loff_t *ppos) +{ + struct hisi_ptp_pdev *ptp = filp->private_data; + struct hisi_ptp_rx *rx; + unsigned long flags; + ssize_t size = 0; + char *read_buf; + int pos = 0; + int len; + + if (*ppos < 0) + return -EINVAL; + if (cnt <= 0) + return 0; + if (!access_ok(buf, cnt)) + return -EFAULT; + + read_buf = kvzalloc(HISI_PTP_DBGFS_REG_LEN, GFP_KERNEL); + if (!read_buf) + return -ENOMEM; + + len = HISI_PTP_DBGFS_REG_LEN; + + write_lock_irqsave(&ptp->rw_lock, flags); + if (ptp->ptp_tx && ptp->ptp_tx->base) + hisi_ptp_dump_reg(ptp->ptp_tx->base, hisi_ptp_tx_reg, + ARRAY_SIZE(hisi_ptp_tx_reg), + read_buf, len, &pos); + + if (ptp->ptp_tx && ptp->ptp_tx->io_sc_base) + hisi_ptp_dump_reg(ptp->ptp_tx->io_sc_base, hisi_ptp_tx_io_reg, + ARRAY_SIZE(hisi_ptp_tx_io_reg), + read_buf, len, &pos); + + list_for_each_entry(rx, &ptp->ptp_rx_list, node) + hisi_ptp_dump_reg(rx->base, hisi_ptp_rx_reg, + ARRAY_SIZE(hisi_ptp_rx_reg), + read_buf, len, &pos); + + write_unlock_irqrestore(&ptp->rw_lock, flags); + + size = simple_read_from_buffer(buf, cnt, ppos, read_buf, + strlen(read_buf)); + + kvfree(read_buf); + + return size; +} + +static const struct file_operations hisi_ptp_dbg_state_ops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = hisi_ptp_dbg_read_state, +}; + +static const struct file_operations hisi_ptp_dbg_reg_ops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = hisi_ptp_dbg_read_reg, +}; + +static void hisi_ptp_dbgfs_init(struct hisi_ptp_pdev *ptp) +{ + ptp->dbgfs_root = debugfs_create_dir(HISI_PTP_NAME, NULL); + debugfs_create_file("state", 0400, ptp->dbgfs_root, ptp, + &hisi_ptp_dbg_state_ops); + debugfs_create_file("reg", 0400, ptp->dbgfs_root, ptp, + &hisi_ptp_dbg_reg_ops); +} + +static void hisi_ptp_dbgfs_uninit(struct hisi_ptp_pdev *ptp) +{ + debugfs_remove_recursive(ptp->dbgfs_root); +} + +static int __init hisi_ptp_module_init(void) +{ + struct hisi_ptp_pdev *ptp = &g_ptpdev; + int ret; + + memset(ptp, 0, sizeof(struct hisi_ptp_pdev)); + rwlock_init(&ptp->rw_lock); + INIT_LIST_HEAD(&ptp->ptp_rx_list); + + timer_setup(&ptp->suppress_timer, hisi_ptp_timer, 0); + + ret = platform_driver_register(&hisi_ptp_driver); + if (ret) { + del_timer_sync(&ptp->suppress_timer); + pr_err("failed to register ptp platform driver, ret = %d\n", + ret); + return ret; + } + + hisi_ptp_dbgfs_init(ptp); + + pr_info("hisi ptp platform driver inited, version: %s\n", + HISI_PTP_VERSION); + + return 0; +} +module_init(hisi_ptp_module_init); + +static void __exit hisi_ptp_module_exit(void) +{ + struct hisi_ptp_pdev *ptp = &g_ptpdev; + + pr_info("hisi ptp platform driver exit\n"); + + hisi_ptp_dbgfs_uninit(ptp); + + platform_driver_unregister(&hisi_ptp_driver); + + if (ptp->suppress_timer.function) + del_timer_sync(&ptp->suppress_timer); + + memset(ptp, 0, sizeof(struct hisi_ptp_pdev)); +} +module_exit(hisi_ptp_module_exit); + +MODULE_DESCRIPTION("HiSilicon PTP driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(HISI_PTP_VERSION); diff --git a/include/linux/sfp.h b/include/linux/sfp.h index 9346cd44814d6aa0b69968baad4a261dfc6adbf9..e0b333d9e9991fb135dbbfce73632b6d6577fc39 100644 --- a/include/linux/sfp.h +++ b/include/linux/sfp.h @@ -284,6 +284,18 @@ enum { SFF8024_ID_QSFP_8438 = 0x0c, SFF8024_ID_QSFP_8436_8636 = 0x0d, SFF8024_ID_QSFP28_8636 = 0x11, + SFF8024_ID_CXP2 = 0x12, + SFF8024_ID_CDFP = 0x13, + SFF8024_ID_HD4X_FANOUT = 0x14, + SFF8024_ID_HD8X_FANOUT = 0x15, + SFF8024_ID_CDFP_S3 = 0x16, + SFF8024_ID_MICRO_QSFP = 0x17, + SFF8024_ID_QSFP_DD = 0x18, + SFF8024_ID_OSFP = 0x19, + SFF8024_ID_DSFP = 0x1B, + SFF8024_ID_QSFP_PLUS_CMIS = 0x1E, + SFF8024_ID_SFP_DD_CMIS = 0x1F, + SFF8024_ID_SFP_PLUS_CMIS = 0x20, SFF8024_ENCODING_UNSPEC = 0x00, SFF8024_ENCODING_8B10B = 0x01, diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index f7fba0dc87e545350f8199c84023207f58054930..410bb75552bac4d74656f80cf4a139f4b89d7509 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -1017,6 +1017,28 @@ struct ethtool_usrip4_spec { __u8 proto; }; +/** + * struct ethtool_vxlan4_spec - general flow specification for VxLAN IPv4 + * @vni: VxLAN network identifier + * @dst: Inner destination eth addr + * @src: Inner source eth addr + * @eth_type: Inner ethernet type + * @tos: Inner type-of-service + * @l4_proto: Inner transport protocol number + * @ip4src: Inner source host + * @ip4dst: Inner destination host + */ +struct ethtool_vxlan4_spec { + __be32 vni; + __u8 dst[ETH_ALEN]; + __u8 src[ETH_ALEN]; + __be16 eth_type; + __u8 tos; + __u8 l4_proto; + __be32 ip4src; + __be32 ip4dst; +}; + /** * struct ethtool_tcpip6_spec - flow specification for TCP/IPv6 etc. * @ip6src: Source host @@ -1067,6 +1089,28 @@ struct ethtool_usrip6_spec { __u8 l4_proto; }; +/** + * struct ethtool_vxlan6_spec - general flow specification for VxLAN IPv6 + * @vni: VxLAN network identifier + * @dst: Inner destination eth addr + * @src: Inner source eth addr + * @eth_type: Inner ethernet type + * @tclass: Inner traffic Class + * @l4_proto: Inner transport protocol number + * @ip6src: Inner source host + * @ip6dst: Inner destination host + */ +struct ethtool_vxlan6_spec { + __be32 vni; + __u8 dst[ETH_ALEN]; + __u8 src[ETH_ALEN]; + __be16 eth_type; + __u8 tclass; + __u8 l4_proto; + __be32 ip6src[4]; + __be32 ip6dst[4]; +}; + union ethtool_flow_union { struct ethtool_tcpip4_spec tcp_ip4_spec; struct ethtool_tcpip4_spec udp_ip4_spec; @@ -1074,12 +1118,14 @@ union ethtool_flow_union { struct ethtool_ah_espip4_spec ah_ip4_spec; struct ethtool_ah_espip4_spec esp_ip4_spec; struct ethtool_usrip4_spec usr_ip4_spec; + struct ethtool_vxlan4_spec vxlan_ip4_spec; struct ethtool_tcpip6_spec tcp_ip6_spec; struct ethtool_tcpip6_spec udp_ip6_spec; struct ethtool_tcpip6_spec sctp_ip6_spec; struct ethtool_ah_espip6_spec ah_ip6_spec; struct ethtool_ah_espip6_spec esp_ip6_spec; struct ethtool_usrip6_spec usr_ip6_spec; + struct ethtool_vxlan6_spec vxlan_ip6_spec; struct ethhdr ether_spec; __u8 hdata[52]; }; @@ -2011,6 +2057,8 @@ static inline int ethtool_validate_duplex(__u8 duplex) #define IPV4_FLOW 0x10 /* hash only */ #define IPV6_FLOW 0x11 /* hash only */ #define ETHER_FLOW 0x12 /* spec only (ether_spec) */ +#define VXLAN_V4_FLOW 0x43 /* spec only (vxlan_ip4_spec) */ +#define VXLAN_V6_FLOW 0x44 /* spec only (vxlan_ip6_spec) */ /* Flag to enable additional fields in struct ethtool_rx_flow_spec */ #define FLOW_EXT 0x80000000 #define FLOW_MAC_EXT 0x40000000