diff options
Diffstat (limited to 'drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c')
-rw-r--r-- | drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 1063 |
1 files changed, 869 insertions, 194 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index ffdd96020860..f7637c08bb3a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -26,7 +26,9 @@ #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f)) -static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu); +#define HCLGE_BUF_SIZE_UNIT 256 + +static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps); static int hclge_init_vlan_config(struct hclge_dev *hdev); static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, @@ -48,6 +50,62 @@ static const struct pci_device_id ae_algo_pci_tbl[] = { MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl); +static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG, + HCLGE_CMDQ_TX_ADDR_H_REG, + HCLGE_CMDQ_TX_DEPTH_REG, + HCLGE_CMDQ_TX_TAIL_REG, + HCLGE_CMDQ_TX_HEAD_REG, + HCLGE_CMDQ_RX_ADDR_L_REG, + HCLGE_CMDQ_RX_ADDR_H_REG, + HCLGE_CMDQ_RX_DEPTH_REG, + HCLGE_CMDQ_RX_TAIL_REG, + HCLGE_CMDQ_RX_HEAD_REG, + HCLGE_VECTOR0_CMDQ_SRC_REG, + HCLGE_CMDQ_INTR_STS_REG, + HCLGE_CMDQ_INTR_EN_REG, + HCLGE_CMDQ_INTR_GEN_REG}; + +static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE, + HCLGE_VECTOR0_OTER_EN_REG, + HCLGE_MISC_RESET_STS_REG, + HCLGE_MISC_VECTOR_INT_STS, + HCLGE_GLOBAL_RESET_REG, + HCLGE_FUN_RST_ING, + HCLGE_GRO_EN_REG}; + +static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG, + HCLGE_RING_RX_ADDR_H_REG, + HCLGE_RING_RX_BD_NUM_REG, + HCLGE_RING_RX_BD_LENGTH_REG, + HCLGE_RING_RX_MERGE_EN_REG, + HCLGE_RING_RX_TAIL_REG, + HCLGE_RING_RX_HEAD_REG, + HCLGE_RING_RX_FBD_NUM_REG, + HCLGE_RING_RX_OFFSET_REG, + HCLGE_RING_RX_FBD_OFFSET_REG, + HCLGE_RING_RX_STASH_REG, + HCLGE_RING_RX_BD_ERR_REG, + HCLGE_RING_TX_ADDR_L_REG, + HCLGE_RING_TX_ADDR_H_REG, + HCLGE_RING_TX_BD_NUM_REG, + HCLGE_RING_TX_PRIORITY_REG, + HCLGE_RING_TX_TC_REG, + HCLGE_RING_TX_MERGE_EN_REG, + HCLGE_RING_TX_TAIL_REG, + HCLGE_RING_TX_HEAD_REG, + HCLGE_RING_TX_FBD_NUM_REG, + HCLGE_RING_TX_OFFSET_REG, + HCLGE_RING_TX_EBD_NUM_REG, + HCLGE_RING_TX_EBD_OFFSET_REG, + HCLGE_RING_TX_BD_ERR_REG, + HCLGE_RING_EN_REG}; + +static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG, + HCLGE_TQP_INTR_GL0_REG, + HCLGE_TQP_INTR_GL1_REG, + HCLGE_TQP_INTR_GL2_REG, + HCLGE_TQP_INTR_RL_REG}; + static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { "App Loopback test", "Serdes serial Loopback test", @@ -631,6 +689,22 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev) hdev->num_tqps = __le16_to_cpu(req->tqp_num); hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; + if (req->tx_buf_size) + hdev->tx_buf_size = + __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S; + else + hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF; + + hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT); + + if (req->dv_buf_size) + hdev->dv_buf_size = + __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S; + else + hdev->dv_buf_size = HCLGE_DEFAULT_DV; + + hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT); + if (hnae3_dev_roce_supported(hdev)) { hdev->roce_base_msix_offset = hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), @@ -886,7 +960,7 @@ static int hclge_configure(struct hclge_dev *hdev) hdev->pfc_max = hdev->tc_max; } - hdev->tm_info.num_tc = hdev->tc_max; + hdev->tm_info.num_tc = 1; /* Currently not support uncontiuous tc */ for (i = 0; i < hdev->tm_info.num_tc; i++) @@ -921,6 +995,28 @@ static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min, return hclge_cmd_send(&hdev->hw, &desc, 1); } +static int hclge_config_gro(struct hclge_dev *hdev, bool en) +{ + struct hclge_cfg_gro_status_cmd *req; + struct hclge_desc desc; + int ret; + + if (!hnae3_dev_gro_supported(hdev)) + return 0; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false); + req = (struct hclge_cfg_gro_status_cmd *)desc.data; + + req->gro_en = cpu_to_le16(en ? 1 : 0); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "GRO hardware config cmd failed, ret = %d\n", ret); + + return ret; +} + static int hclge_alloc_tqps(struct hclge_dev *hdev) { struct hclge_tqp *tqp; @@ -1144,6 +1240,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) for (i = 0; i < num_vport; i++) { vport->back = hdev; vport->vport_id = i; + vport->mps = HCLGE_MAC_DEFAULT_FRAME; if (i == 0) ret = hclge_vport_setup(vport, tqp_main_vport); @@ -1289,40 +1386,51 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, { u32 shared_buf_min, shared_buf_tc, shared_std; int tc_num, pfc_enable_num; - u32 shared_buf; + u32 shared_buf, aligned_mps; u32 rx_priv; int i; tc_num = hclge_get_tc_num(hdev); pfc_enable_num = hclge_get_pfc_enalbe_num(hdev); + aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT); if (hnae3_dev_dcb_supported(hdev)) - shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV; + shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size; else - shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV; + shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF + + hdev->dv_buf_size; - shared_buf_tc = pfc_enable_num * hdev->mps + - (tc_num - pfc_enable_num) * hdev->mps / 2 + - hdev->mps; - shared_std = max_t(u32, shared_buf_min, shared_buf_tc); + shared_buf_tc = pfc_enable_num * aligned_mps + + (tc_num - pfc_enable_num) * aligned_mps / 2 + + aligned_mps; + shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc), + HCLGE_BUF_SIZE_UNIT); rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc); - if (rx_all <= rx_priv + shared_std) + if (rx_all < rx_priv + shared_std) return false; - shared_buf = rx_all - rx_priv; + shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT); buf_alloc->s_buf.buf_size = shared_buf; - buf_alloc->s_buf.self.high = shared_buf; - buf_alloc->s_buf.self.low = 2 * hdev->mps; + if (hnae3_dev_dcb_supported(hdev)) { + buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size; + buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high + - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT); + } else { + buf_alloc->s_buf.self.high = aligned_mps + + HCLGE_NON_DCB_ADDITIONAL_BUF; + buf_alloc->s_buf.self.low = + roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT); + } for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { if ((hdev->hw_tc_map & BIT(i)) && (hdev->tm_info.hw_pfc_map & BIT(i))) { - buf_alloc->s_buf.tc_thrd[i].low = hdev->mps; - buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps; + buf_alloc->s_buf.tc_thrd[i].low = aligned_mps; + buf_alloc->s_buf.tc_thrd[i].high = 2 * aligned_mps; } else { buf_alloc->s_buf.tc_thrd[i].low = 0; - buf_alloc->s_buf.tc_thrd[i].high = hdev->mps; + buf_alloc->s_buf.tc_thrd[i].high = aligned_mps; } } @@ -1340,11 +1448,11 @@ static int hclge_tx_buffer_calc(struct hclge_dev *hdev, for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; - if (total_size < HCLGE_DEFAULT_TX_BUF) + if (total_size < hdev->tx_buf_size) return -ENOMEM; if (hdev->hw_tc_map & BIT(i)) - priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF; + priv->tx_buf_size = hdev->tx_buf_size; else priv->tx_buf_size = 0; @@ -1362,7 +1470,6 @@ static int hclge_tx_buffer_calc(struct hclge_dev *hdev, static int hclge_rx_buffer_calc(struct hclge_dev *hdev, struct hclge_pkt_buf_alloc *buf_alloc) { -#define HCLGE_BUF_SIZE_UNIT 128 u32 rx_all = hdev->pkt_buf_size, aligned_mps; int no_pfc_priv_num, pfc_priv_num; struct hclge_priv_buf *priv; @@ -1388,13 +1495,16 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev, priv->enable = 1; if (hdev->tm_info.hw_pfc_map & BIT(i)) { priv->wl.low = aligned_mps; - priv->wl.high = priv->wl.low + aligned_mps; + priv->wl.high = + roundup(priv->wl.low + aligned_mps, + HCLGE_BUF_SIZE_UNIT); priv->buf_size = priv->wl.high + - HCLGE_DEFAULT_DV; + hdev->dv_buf_size; } else { priv->wl.low = 0; priv->wl.high = 2 * aligned_mps; - priv->buf_size = priv->wl.high; + priv->buf_size = priv->wl.high + + hdev->dv_buf_size; } } else { priv->enable = 0; @@ -1424,13 +1534,13 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev, priv->enable = 1; if (hdev->tm_info.hw_pfc_map & BIT(i)) { - priv->wl.low = 128; + priv->wl.low = 256; priv->wl.high = priv->wl.low + aligned_mps; - priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV; + priv->buf_size = priv->wl.high + hdev->dv_buf_size; } else { priv->wl.low = 0; priv->wl.high = aligned_mps; - priv->buf_size = priv->wl.high; + priv->buf_size = priv->wl.high + hdev->dv_buf_size; } } @@ -1873,37 +1983,6 @@ static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed, return hclge_cfg_mac_speed_dup(hdev, speed, duplex); } -static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed, - u8 *duplex) -{ - struct hclge_query_an_speed_dup_cmd *req; - struct hclge_desc desc; - int speed_tmp; - int ret; - - req = (struct hclge_query_an_speed_dup_cmd *)desc.data; - - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true); - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { - dev_err(&hdev->pdev->dev, - "mac speed/autoneg/duplex query cmd failed %d\n", - ret); - return ret; - } - - *duplex = hnae3_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B); - speed_tmp = hnae3_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M, - HCLGE_QUERY_SPEED_S); - - ret = hclge_parse_speed(speed_tmp, speed); - if (ret) - dev_err(&hdev->pdev->dev, - "could not parse speed(=%d), %d\n", speed_tmp, ret); - - return ret; -} - static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) { struct hclge_config_auto_neg_cmd *req; @@ -1947,12 +2026,10 @@ static int hclge_get_autoneg(struct hnae3_handle *handle) static int hclge_mac_init(struct hclge_dev *hdev) { - struct hnae3_handle *handle = &hdev->vport[0].nic; - struct net_device *netdev = handle->kinfo.netdev; struct hclge_mac *mac = &hdev->hw.mac; - int mtu; int ret; + hdev->support_sfp_query = true; hdev->hw.mac.duplex = HCLGE_MAC_FULL; ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed, hdev->hw.mac.duplex); @@ -1964,15 +2041,16 @@ static int hclge_mac_init(struct hclge_dev *hdev) mac->link = 0; - if (netdev) - mtu = netdev->mtu; - else - mtu = ETH_DATA_LEN; + ret = hclge_set_mac_mtu(hdev, hdev->mps); + if (ret) { + dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret); + return ret; + } - ret = hclge_set_mtu(handle, mtu); + ret = hclge_buffer_alloc(hdev); if (ret) dev_err(&hdev->pdev->dev, - "set mtu failed ret=%d\n", ret); + "allocate buffer fail, ret=%d\n", ret); return ret; } @@ -2061,34 +2139,58 @@ static void hclge_update_link_status(struct hclge_dev *hdev) } } +static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed) +{ + struct hclge_sfp_speed_cmd *resp = NULL; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SFP_GET_SPEED, true); + resp = (struct hclge_sfp_speed_cmd *)desc.data; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret == -EOPNOTSUPP) { + dev_warn(&hdev->pdev->dev, + "IMP do not support get SFP speed %d\n", ret); + return ret; + } else if (ret) { + dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret); + return ret; + } + + *speed = resp->sfp_speed; + + return 0; +} + static int hclge_update_speed_duplex(struct hclge_dev *hdev) { struct hclge_mac mac = hdev->hw.mac; - u8 duplex; int speed; int ret; - /* get the speed and duplex as autoneg'result from mac cmd when phy + /* get the speed from SFP cmd when phy * doesn't exit. */ - if (mac.phydev || !mac.autoneg) + if (mac.phydev) return 0; - ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex); - if (ret) { - dev_err(&hdev->pdev->dev, - "mac autoneg/speed/duplex query failed %d\n", ret); - return ret; - } + /* if IMP does not support get SFP/qSFP speed, return directly */ + if (!hdev->support_sfp_query) + return 0; - ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex); - if (ret) { - dev_err(&hdev->pdev->dev, - "mac speed/duplex config failed %d\n", ret); + ret = hclge_get_sfp_speed(hdev, &speed); + if (ret == -EOPNOTSUPP) { + hdev->support_sfp_query = false; + return ret; + } else if (ret) { return ret; } - return 0; + if (speed == HCLGE_MAC_SPEED_UNKNOWN) + return 0; /* do nothing if no SFP */ + + /* must config full duplex for SFP */ + return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL); } static int hclge_update_speed_duplex_h(struct hnae3_handle *handle) @@ -2129,12 +2231,13 @@ static void hclge_service_complete(struct hclge_dev *hdev) static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) { - u32 rst_src_reg; - u32 cmdq_src_reg; + u32 rst_src_reg, cmdq_src_reg, msix_src_reg; /* fetch the events from their corresponding regs */ rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); + msix_src_reg = hclge_read_dev(&hdev->hw, + HCLGE_VECTOR0_PF_OTHER_INT_STS_REG); /* Assumption: If by any chance reset and mailbox events are reported * together then we will only process reset event in this go and will @@ -2144,7 +2247,16 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) */ /* check for vector0 reset event sources */ + if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) { + dev_info(&hdev->pdev->dev, "IMP reset interrupt\n"); + set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); + return HCLGE_VECTOR0_EVENT_RST; + } + if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) { + dev_info(&hdev->pdev->dev, "global reset interrupt\n"); set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); @@ -2152,17 +2264,16 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) } if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) { + dev_info(&hdev->pdev->dev, "core reset interrupt\n"); set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); set_bit(HNAE3_CORE_RESET, &hdev->reset_pending); *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); return HCLGE_VECTOR0_EVENT_RST; } - if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) { - set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); - *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); - return HCLGE_VECTOR0_EVENT_RST; - } + /* check for vector0 msix event source */ + if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) + return HCLGE_VECTOR0_EVENT_ERR; /* check for vector0 mailbox(=CMDQ RX) event source */ if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { @@ -2214,6 +2325,19 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data) /* vector 0 interrupt is shared with reset and mailbox source events.*/ switch (event_cause) { + case HCLGE_VECTOR0_EVENT_ERR: + /* we do not know what type of reset is required now. This could + * only be decided after we fetch the type of errors which + * caused this event. Therefore, we will do below for now: + * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we + * have defered type of reset to be used. + * 2. Schedule the reset serivce task. + * 3. When service task receives HNAE3_UNKNOWN_RESET type it + * will fetch the correct type of reset. This would be done + * by first decoding the types of errors. + */ + set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request); + /* fall through */ case HCLGE_VECTOR0_EVENT_RST: hclge_reset_task_schedule(hdev); break; @@ -2308,21 +2432,56 @@ static int hclge_notify_client(struct hclge_dev *hdev, int ret; ret = client->ops->reset_notify(handle, type); - if (ret) + if (ret) { + dev_err(&hdev->pdev->dev, + "notify nic client failed %d(%d)\n", type, ret); return ret; + } } return 0; } +static int hclge_notify_roce_client(struct hclge_dev *hdev, + enum hnae3_reset_notify_type type) +{ + struct hnae3_client *client = hdev->roce_client; + int ret = 0; + u16 i; + + if (!client) + return 0; + + if (!client->ops->reset_notify) + return -EOPNOTSUPP; + + for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { + struct hnae3_handle *handle = &hdev->vport[i].roce; + + ret = client->ops->reset_notify(handle, type); + if (ret) { + dev_err(&hdev->pdev->dev, + "notify roce client failed %d(%d)", + type, ret); + return ret; + } + } + + return ret; +} + static int hclge_reset_wait(struct hclge_dev *hdev) { #define HCLGE_RESET_WATI_MS 100 -#define HCLGE_RESET_WAIT_CNT 5 +#define HCLGE_RESET_WAIT_CNT 200 u32 val, reg, reg_bit; u32 cnt = 0; switch (hdev->reset_type) { + case HNAE3_IMP_RESET: + reg = HCLGE_GLOBAL_RESET_REG; + reg_bit = HCLGE_IMP_RESET_BIT; + break; case HNAE3_GLOBAL_RESET: reg = HCLGE_GLOBAL_RESET_REG; reg_bit = HCLGE_GLOBAL_RESET_BIT; @@ -2335,6 +2494,8 @@ static int hclge_reset_wait(struct hclge_dev *hdev) reg = HCLGE_FUN_RST_ING; reg_bit = HCLGE_FUN_RST_ING_B; break; + case HNAE3_FLR_RESET: + break; default: dev_err(&hdev->pdev->dev, "Wait for unsupported reset type: %d\n", @@ -2342,6 +2503,20 @@ static int hclge_reset_wait(struct hclge_dev *hdev) return -EINVAL; } + if (hdev->reset_type == HNAE3_FLR_RESET) { + while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) && + cnt++ < HCLGE_RESET_WAIT_CNT) + msleep(HCLGE_RESET_WATI_MS); + + if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) { + dev_err(&hdev->pdev->dev, + "flr wait timeout: %d\n", cnt); + return -EBUSY; + } + + return 0; + } + val = hclge_read_dev(&hdev->hw, reg); while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) { msleep(HCLGE_RESET_WATI_MS); @@ -2358,6 +2533,55 @@ static int hclge_reset_wait(struct hclge_dev *hdev) return 0; } +static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset) +{ + struct hclge_vf_rst_cmd *req; + struct hclge_desc desc; + + req = (struct hclge_vf_rst_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false); + req->dest_vfid = func_id; + + if (reset) + req->vf_rst = 0x1; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) +{ + int i; + + for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) { + struct hclge_vport *vport = &hdev->vport[i]; + int ret; + + /* Send cmd to set/clear VF's FUNC_RST_ING */ + ret = hclge_set_vf_rst(hdev, vport->vport_id, reset); + if (ret) { + dev_err(&hdev->pdev->dev, + "set vf(%d) rst failed %d!\n", + vport->vport_id, ret); + return ret; + } + + if (!reset) + continue; + + /* Inform VF to process the reset. + * hclge_inform_reset_assert_to_vf may fail if VF + * driver is not loaded. + */ + ret = hclge_inform_reset_assert_to_vf(vport); + if (ret) + dev_warn(&hdev->pdev->dev, + "inform reset to vf(%d) failed %d!\n", + vport->vport_id, ret); + } + + return 0; +} + int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) { struct hclge_desc desc; @@ -2396,11 +2620,16 @@ static void hclge_do_reset(struct hclge_dev *hdev) break; case HNAE3_FUNC_RESET: dev_info(&pdev->dev, "PF Reset requested\n"); - hclge_func_reset_cmd(hdev, 0); /* schedule again to check later */ set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending); hclge_reset_task_schedule(hdev); break; + case HNAE3_FLR_RESET: + dev_info(&pdev->dev, "FLR requested\n"); + /* schedule again to check later */ + set_bit(HNAE3_FLR_RESET, &hdev->reset_pending); + hclge_reset_task_schedule(hdev); + break; default: dev_warn(&pdev->dev, "Unsupported reset type: %d\n", hdev->reset_type); @@ -2413,21 +2642,46 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev, { enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; + /* first, resolve any unknown reset type to the known type(s) */ + if (test_bit(HNAE3_UNKNOWN_RESET, addr)) { + /* we will intentionally ignore any errors from this function + * as we will end up in *some* reset request in any case + */ + hclge_handle_hw_msix_error(hdev, addr); + clear_bit(HNAE3_UNKNOWN_RESET, addr); + /* We defered the clearing of the error event which caused + * interrupt since it was not posssible to do that in + * interrupt context (and this is the reason we introduced + * new UNKNOWN reset type). Now, the errors have been + * handled and cleared in hardware we can safely enable + * interrupts. This is an exception to the norm. + */ + hclge_enable_vector(&hdev->misc_vector, true); + } + /* return the highest priority reset level amongst all */ - if (test_bit(HNAE3_GLOBAL_RESET, addr)) + if (test_bit(HNAE3_IMP_RESET, addr)) { + rst_level = HNAE3_IMP_RESET; + clear_bit(HNAE3_IMP_RESET, addr); + clear_bit(HNAE3_GLOBAL_RESET, addr); + clear_bit(HNAE3_CORE_RESET, addr); + clear_bit(HNAE3_FUNC_RESET, addr); + } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) { rst_level = HNAE3_GLOBAL_RESET; - else if (test_bit(HNAE3_CORE_RESET, addr)) + clear_bit(HNAE3_GLOBAL_RESET, addr); + clear_bit(HNAE3_CORE_RESET, addr); + clear_bit(HNAE3_FUNC_RESET, addr); + } else if (test_bit(HNAE3_CORE_RESET, addr)) { rst_level = HNAE3_CORE_RESET; - else if (test_bit(HNAE3_IMP_RESET, addr)) - rst_level = HNAE3_IMP_RESET; - else if (test_bit(HNAE3_FUNC_RESET, addr)) + clear_bit(HNAE3_CORE_RESET, addr); + clear_bit(HNAE3_FUNC_RESET, addr); + } else if (test_bit(HNAE3_FUNC_RESET, addr)) { rst_level = HNAE3_FUNC_RESET; - - /* now, clear all other resets */ - clear_bit(HNAE3_GLOBAL_RESET, addr); - clear_bit(HNAE3_CORE_RESET, addr); - clear_bit(HNAE3_IMP_RESET, addr); - clear_bit(HNAE3_FUNC_RESET, addr); + clear_bit(HNAE3_FUNC_RESET, addr); + } else if (test_bit(HNAE3_FLR_RESET, addr)) { + rst_level = HNAE3_FLR_RESET; + clear_bit(HNAE3_FLR_RESET, addr); + } return rst_level; } @@ -2457,39 +2711,209 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev) hclge_enable_vector(&hdev->misc_vector, true); } +static int hclge_reset_prepare_down(struct hclge_dev *hdev) +{ + int ret = 0; + + switch (hdev->reset_type) { + case HNAE3_FUNC_RESET: + /* fall through */ + case HNAE3_FLR_RESET: + ret = hclge_set_all_vf_rst(hdev, true); + break; + default: + break; + } + + return ret; +} + +static int hclge_reset_prepare_wait(struct hclge_dev *hdev) +{ + u32 reg_val; + int ret = 0; + + switch (hdev->reset_type) { + case HNAE3_FUNC_RESET: + /* There is no mechanism for PF to know if VF has stopped IO + * for now, just wait 100 ms for VF to stop IO + */ + msleep(100); + ret = hclge_func_reset_cmd(hdev, 0); + if (ret) { + dev_err(&hdev->pdev->dev, + "asserting function reset fail %d!\n", ret); + return ret; + } + + /* After performaning pf reset, it is not necessary to do the + * mailbox handling or send any command to firmware, because + * any mailbox handling or command to firmware is only valid + * after hclge_cmd_init is called. + */ + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + break; + case HNAE3_FLR_RESET: + /* There is no mechanism for PF to know if VF has stopped IO + * for now, just wait 100 ms for VF to stop IO + */ + msleep(100); + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); + break; + case HNAE3_IMP_RESET: + reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); + hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, + BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val); + break; + default: + break; + } + + dev_info(&hdev->pdev->dev, "prepare wait ok\n"); + + return ret; +} + +static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout) +{ +#define MAX_RESET_FAIL_CNT 5 +#define RESET_UPGRADE_DELAY_SEC 10 + + if (hdev->reset_pending) { + dev_info(&hdev->pdev->dev, "Reset pending %lu\n", + hdev->reset_pending); + return true; + } else if ((hdev->reset_type != HNAE3_IMP_RESET) && + (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) & + BIT(HCLGE_IMP_RESET_BIT))) { + dev_info(&hdev->pdev->dev, + "reset failed because IMP Reset is pending\n"); + hclge_clear_reset_cause(hdev); + return false; + } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) { + hdev->reset_fail_cnt++; + if (is_timeout) { + set_bit(hdev->reset_type, &hdev->reset_pending); + dev_info(&hdev->pdev->dev, + "re-schedule to wait for hw reset done\n"); + return true; + } + + dev_info(&hdev->pdev->dev, "Upgrade reset level\n"); + hclge_clear_reset_cause(hdev); + mod_timer(&hdev->reset_timer, + jiffies + RESET_UPGRADE_DELAY_SEC * HZ); + + return false; + } + + hclge_clear_reset_cause(hdev); + dev_err(&hdev->pdev->dev, "Reset fail!\n"); + return false; +} + +static int hclge_reset_prepare_up(struct hclge_dev *hdev) +{ + int ret = 0; + + switch (hdev->reset_type) { + case HNAE3_FUNC_RESET: + /* fall through */ + case HNAE3_FLR_RESET: + ret = hclge_set_all_vf_rst(hdev, false); + break; + default: + break; + } + + return ret; +} + static void hclge_reset(struct hclge_dev *hdev) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); - struct hnae3_handle *handle; + bool is_timeout = false; + int ret; /* Initialize ae_dev reset status as well, in case enet layer wants to * know if device is undergoing reset */ ae_dev->reset_type = hdev->reset_type; + hdev->reset_count++; /* perform reset of the stack & ae device for a client */ - handle = &hdev->vport[0].nic; + ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) + goto err_reset; + + ret = hclge_reset_prepare_down(hdev); + if (ret) + goto err_reset; + rtnl_lock(); - hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) + goto err_reset_lock; + rtnl_unlock(); - if (!hclge_reset_wait(hdev)) { - rtnl_lock(); - hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); - hclge_reset_ae_dev(hdev->ae_dev); - hclge_notify_client(hdev, HNAE3_INIT_CLIENT); + ret = hclge_reset_prepare_wait(hdev); + if (ret) + goto err_reset; - hclge_clear_reset_cause(hdev); - } else { - rtnl_lock(); - /* schedule again to check pending resets later */ - set_bit(hdev->reset_type, &hdev->reset_pending); - hclge_reset_task_schedule(hdev); + if (hclge_reset_wait(hdev)) { + is_timeout = true; + goto err_reset; } - hclge_notify_client(hdev, HNAE3_UP_CLIENT); - handle->last_reset_time = jiffies; + ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); + if (ret) + goto err_reset; + + rtnl_lock(); + ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); + if (ret) + goto err_reset_lock; + + ret = hclge_reset_ae_dev(hdev->ae_dev); + if (ret) + goto err_reset_lock; + + ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT); + if (ret) + goto err_reset_lock; + + hclge_clear_reset_cause(hdev); + + ret = hclge_reset_prepare_up(hdev); + if (ret) + goto err_reset_lock; + + ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT); + if (ret) + goto err_reset_lock; + rtnl_unlock(); + + ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT); + if (ret) + goto err_reset; + + ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT); + if (ret) + goto err_reset; + + hdev->last_reset_time = jiffies; + hdev->reset_fail_cnt = 0; ae_dev->reset_type = HNAE3_NONE_RESET; + + return; + +err_reset_lock: + rtnl_unlock(); +err_reset: + if (hclge_reset_err_handle(hdev, is_timeout)) + hclge_reset_task_schedule(hdev); } static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) @@ -2515,20 +2939,42 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) if (!handle) handle = &hdev->vport[0].nic; - if (time_before(jiffies, (handle->last_reset_time + 3 * HZ))) + if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ))) return; - else if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ))) - handle->reset_level = HNAE3_FUNC_RESET; + else if (hdev->default_reset_request) + hdev->reset_level = + hclge_get_reset_level(hdev, + &hdev->default_reset_request); + else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) + hdev->reset_level = HNAE3_FUNC_RESET; dev_info(&hdev->pdev->dev, "received reset event , reset type is %d", - handle->reset_level); + hdev->reset_level); /* request reset & schedule reset task */ - set_bit(handle->reset_level, &hdev->reset_request); + set_bit(hdev->reset_level, &hdev->reset_request); hclge_reset_task_schedule(hdev); - if (handle->reset_level < HNAE3_GLOBAL_RESET) - handle->reset_level++; + if (hdev->reset_level < HNAE3_GLOBAL_RESET) + hdev->reset_level++; +} + +static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev, + enum hnae3_reset_type rst_type) +{ + struct hclge_dev *hdev = ae_dev->priv; + + set_bit(rst_type, &hdev->default_reset_request); +} + +static void hclge_reset_timer(struct timer_list *t) +{ + struct hclge_dev *hdev = from_timer(hdev, t, reset_timer); + + dev_info(&hdev->pdev->dev, + "triggering global reset in reset timer\n"); + set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request); + hclge_reset_event(hdev->pdev, NULL); } static void hclge_reset_subtask(struct hclge_dev *hdev) @@ -2542,6 +2988,7 @@ static void hclge_reset_subtask(struct hclge_dev *hdev) * b. else, we can come back later to check this status so re-sched * now. */ + hdev->last_reset_time = jiffies; hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending); if (hdev->reset_type != HNAE3_NONE_RESET) hclge_reset(hdev); @@ -2584,6 +3031,23 @@ static void hclge_mailbox_service_task(struct work_struct *work) clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); } +static void hclge_update_vport_alive(struct hclge_dev *hdev) +{ + int i; + + /* start from vport 1 for PF is always alive */ + for (i = 1; i < hdev->num_alloc_vport; i++) { + struct hclge_vport *vport = &hdev->vport[i]; + + if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ)) + clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); + + /* If vf is not alive, set to default value */ + if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) + vport->mps = HCLGE_MAC_DEFAULT_FRAME; + } +} + static void hclge_service_task(struct work_struct *work) { struct hclge_dev *hdev = @@ -2596,6 +3060,7 @@ static void hclge_service_task(struct work_struct *work) hclge_update_speed_duplex(hdev); hclge_update_link_status(hdev); + hclge_update_vport_alive(hdev); hclge_service_complete(hdev); } @@ -4212,6 +4677,13 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle, u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); u16 tqps; + if (vf > hdev->num_req_vfs) { + dev_err(&hdev->pdev->dev, + "Error: vf id (%d) > max vf num (%d)\n", + vf, hdev->num_req_vfs); + return -EINVAL; + } + dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id; tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps; @@ -4222,13 +4694,6 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle, return -EINVAL; } - if (vf > hdev->num_req_vfs) { - dev_err(&hdev->pdev->dev, - "Error: vf id (%d) > max vf num (%d)\n", - vf, hdev->num_req_vfs); - return -EINVAL; - } - action = HCLGE_FD_ACTION_ACCEPT_PACKET; q_index = ring; } @@ -4336,8 +4801,16 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle) struct hlist_node *node; int ret; + /* Return ok here, because reset error handling will check this + * return value. If error is returned here, the reset process will + * fail. + */ if (!hnae3_dev_fd_supported(hdev)) - return -EOPNOTSUPP; + return 0; + + /* if fd is disabled, should not restore it when reset */ + if (!hdev->fd_cfg.fd_en) + return 0; hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); @@ -4592,6 +5065,31 @@ static int hclge_get_all_rules(struct hnae3_handle *handle, return 0; } +static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) || + hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING); +} + +static bool hclge_ae_dev_resetting(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); +} + +static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hdev->reset_count; +} + static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) { struct hclge_vport *vport = hclge_get_vport(handle); @@ -4801,19 +5299,28 @@ static void hclge_reset_tqp_stats(struct hnae3_handle *handle) } } -static int hclge_ae_start(struct hnae3_handle *handle) +static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - int i; - for (i = 0; i < vport->alloc_tqps; i++) - hclge_tqp_enable(hdev, i, 0, true); + if (enable) { + mod_timer(&hdev->service_timer, jiffies + HZ); + } else { + del_timer_sync(&hdev->service_timer); + cancel_work_sync(&hdev->service_task); + clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); + } +} + +static int hclge_ae_start(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; /* mac enable */ hclge_cfg_mac_mode(hdev, true); clear_bit(HCLGE_STATE_DOWN, &hdev->state); - mod_timer(&hdev->service_timer, jiffies + HZ); hdev->hw.mac.link = 0; /* reset tqp stats */ @@ -4832,17 +5339,17 @@ static void hclge_ae_stop(struct hnae3_handle *handle) set_bit(HCLGE_STATE_DOWN, &hdev->state); - del_timer_sync(&hdev->service_timer); - cancel_work_sync(&hdev->service_task); - clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); - - if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) { + /* If it is not PF reset, the firmware will disable the MAC, + * so it only need to stop phy here. + */ + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && + hdev->reset_type != HNAE3_FUNC_RESET) { hclge_mac_stop_phy(hdev); return; } - for (i = 0; i < vport->alloc_tqps; i++) - hclge_tqp_enable(hdev, i, 0, false); + for (i = 0; i < handle->kinfo.num_tqps; i++) + hclge_reset_tqp(handle, i); /* Mac disable */ hclge_cfg_mac_mode(hdev, false); @@ -4851,11 +5358,35 @@ static void hclge_ae_stop(struct hnae3_handle *handle) /* reset tqp stats */ hclge_reset_tqp_stats(handle); - del_timer_sync(&hdev->service_timer); - cancel_work_sync(&hdev->service_task); hclge_update_link_status(hdev); } +int hclge_vport_start(struct hclge_vport *vport) +{ + set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); + vport->last_active_jiffies = jiffies; + return 0; +} + +void hclge_vport_stop(struct hclge_vport *vport) +{ + clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); +} + +static int hclge_client_start(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_vport_start(vport); +} + +static void hclge_client_stop(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + hclge_vport_stop(vport); +} + static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, u16 cmdq_resp, u8 resp_code, enum hclge_mac_vlan_tbl_opcode op) @@ -6003,54 +6534,76 @@ int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) return hclge_set_vlan_rx_offload_cfg(vport); } -static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu) +static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps) { struct hclge_config_max_frm_size_cmd *req; struct hclge_desc desc; - int max_frm_size; - int ret; - - max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; - - if (max_frm_size < HCLGE_MAC_MIN_FRAME || - max_frm_size > HCLGE_MAC_MAX_FRAME) - return -EINVAL; - - max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME); hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); req = (struct hclge_config_max_frm_size_cmd *)desc.data; - req->max_frm_size = cpu_to_le16(max_frm_size); + req->max_frm_size = cpu_to_le16(new_mps); req->min_frm_size = HCLGE_MAC_MIN_FRAME; - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) - dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret); - else - hdev->mps = max_frm_size; - - return ret; + return hclge_cmd_send(&hdev->hw, &desc, 1); } static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) { struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_set_vport_mtu(vport, new_mtu); +} + +int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu) +{ struct hclge_dev *hdev = vport->back; - int ret; + int i, max_frm_size, ret = 0; + + max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN; + if (max_frm_size < HCLGE_MAC_MIN_FRAME || + max_frm_size > HCLGE_MAC_MAX_FRAME) + return -EINVAL; + + max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME); + mutex_lock(&hdev->vport_lock); + /* VF's mps must fit within hdev->mps */ + if (vport->vport_id && max_frm_size > hdev->mps) { + mutex_unlock(&hdev->vport_lock); + return -EINVAL; + } else if (vport->vport_id) { + vport->mps = max_frm_size; + mutex_unlock(&hdev->vport_lock); + return 0; + } + + /* PF's mps must be greater then VF's mps */ + for (i = 1; i < hdev->num_alloc_vport; i++) + if (max_frm_size < hdev->vport[i].mps) { + mutex_unlock(&hdev->vport_lock); + return -EINVAL; + } + + hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); - ret = hclge_set_mac_mtu(hdev, new_mtu); + ret = hclge_set_mac_mtu(hdev, max_frm_size); if (ret) { dev_err(&hdev->pdev->dev, "Change mtu fail, ret =%d\n", ret); - return ret; + goto out; } + hdev->mps = max_frm_size; + vport->mps = max_frm_size; + ret = hclge_buffer_alloc(hdev); if (ret) dev_err(&hdev->pdev->dev, "Allocate buffer fail, ret =%d\n", ret); +out: + hclge_notify_client(hdev, HNAE3_UP_CLIENT); + mutex_unlock(&hdev->vport_lock); return ret; } @@ -6098,8 +6651,7 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); } -static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, - u16 queue_id) +u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id) { struct hnae3_queue *queue; struct hclge_tqp *tqp; @@ -6250,7 +6802,7 @@ int hclge_cfg_flowctrl(struct hclge_dev *hdev) if (!phydev->link || !phydev->autoneg) return 0; - local_advertising = ethtool_adv_to_lcl_adv_t(phydev->advertising); + local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising); if (phydev->pause) remote_advertising = LPA_PAUSE_CAP; @@ -6612,6 +7164,8 @@ static void hclge_state_uninit(struct hclge_dev *hdev) if (hdev->service_timer.function) del_timer_sync(&hdev->service_timer); + if (hdev->reset_timer.function) + del_timer_sync(&hdev->reset_timer); if (hdev->service_task.func) cancel_work_sync(&hdev->service_task); if (hdev->rst_service_task.func) @@ -6620,6 +7174,34 @@ static void hclge_state_uninit(struct hclge_dev *hdev) cancel_work_sync(&hdev->mbx_service_task); } +static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev) +{ +#define HCLGE_FLR_WAIT_MS 100 +#define HCLGE_FLR_WAIT_CNT 50 + struct hclge_dev *hdev = ae_dev->priv; + int cnt = 0; + + clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state); + clear_bit(HNAE3_FLR_DONE, &hdev->flr_state); + set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request); + hclge_reset_event(hdev->pdev, NULL); + + while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) && + cnt++ < HCLGE_FLR_WAIT_CNT) + msleep(HCLGE_FLR_WAIT_MS); + + if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state)) + dev_err(&hdev->pdev->dev, + "flr wait down timeout: %d\n", cnt); +} + +static void hclge_flr_done(struct hnae3_ae_dev *ae_dev) +{ + struct hclge_dev *hdev = ae_dev->priv; + + set_bit(HNAE3_FLR_DONE, &hdev->flr_state); +} + static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) { struct pci_dev *pdev = ae_dev->pdev; @@ -6635,7 +7217,11 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) hdev->pdev = pdev; hdev->ae_dev = ae_dev; hdev->reset_type = HNAE3_NONE_RESET; + hdev->reset_level = HNAE3_FUNC_RESET; ae_dev->priv = hdev; + hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN; + + mutex_init(&hdev->vport_lock); ret = hclge_pci_init(hdev); if (ret) { @@ -6727,6 +7313,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) goto err_mdiobus_unreg; } + ret = hclge_config_gro(hdev, true); + if (ret) + goto err_mdiobus_unreg; + ret = hclge_init_vlan_config(hdev); if (ret) { dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); @@ -6762,13 +7352,14 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ret = hclge_hw_error_set_state(hdev, true); if (ret) { dev_err(&pdev->dev, - "hw error interrupts enable failed, ret =%d\n", ret); + "fail(%d) to enable hw error interrupts\n", ret); goto err_mdiobus_unreg; } hclge_dcb_ops_set(hdev); timer_setup(&hdev->service_timer, hclge_service_timer, 0); + timer_setup(&hdev->reset_timer, hclge_reset_timer, 0); INIT_WORK(&hdev->service_task, hclge_service_task); INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task); INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task); @@ -6779,6 +7370,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_enable_vector(&hdev->misc_vector, true); hclge_state_init(hdev); + hdev->last_reset_time = jiffies; pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME); return 0; @@ -6806,6 +7398,17 @@ static void hclge_stats_clear(struct hclge_dev *hdev) memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats)); } +static void hclge_reset_vport_state(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + hclge_vport_start(vport); + vport++; + } +} + static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) { struct hclge_dev *hdev = ae_dev->priv; @@ -6823,19 +7426,6 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } - ret = hclge_get_cap(hdev); - if (ret) { - dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", - ret); - return ret; - } - - ret = hclge_configure(hdev); - if (ret) { - dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); - return ret; - } - ret = hclge_map_tqp(hdev); if (ret) { dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); @@ -6856,6 +7446,10 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } + ret = hclge_config_gro(hdev, true); + if (ret) + return ret; + ret = hclge_init_vlan_config(hdev); if (ret) { dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); @@ -6881,11 +7475,17 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } - /* Re-enable the TM hw error interrupts because - * they get disabled on core/global reset. + /* Re-enable the hw error interrupts because + * the interrupts get disabled on core/global reset. */ - if (hclge_enable_tm_hw_error(hdev, true)) - dev_err(&pdev->dev, "failed to enable TM hw error interrupts\n"); + ret = hclge_hw_error_set_state(hdev, true); + if (ret) { + dev_err(&pdev->dev, + "fail(%d) to re-enable HNS hw error interrupts\n", ret); + return ret; + } + + hclge_reset_vport_state(hdev); dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", HCLGE_DRIVER_NAME); @@ -6913,6 +7513,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_destroy_cmd_queue(&hdev->hw); hclge_misc_irq_uninit(hdev); hclge_pci_uninit(hdev); + mutex_destroy(&hdev->vport_lock); ae_dev->priv = NULL; } @@ -7166,8 +7767,15 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num, return 0; } +#define MAX_SEPARATE_NUM 4 +#define SEPARATOR_VALUE 0xFFFFFFFF +#define REG_NUM_PER_LINE 4 +#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) + static int hclge_get_regs_len(struct hnae3_handle *handle) { + int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; + struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; u32 regs_num_32_bit, regs_num_64_bit; @@ -7180,15 +7788,25 @@ static int hclge_get_regs_len(struct hnae3_handle *handle) return -EOPNOTSUPP; } - return regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64); + cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; + common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; + ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; + tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; + + return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps + + tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE + + regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64); } static void hclge_get_regs(struct hnae3_handle *handle, u32 *version, void *data) { + struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; u32 regs_num_32_bit, regs_num_64_bit; + int i, j, reg_um, separator_num; + u32 *reg = data; int ret; *version = hdev->fw_version; @@ -7200,16 +7818,53 @@ static void hclge_get_regs(struct hnae3_handle *handle, u32 *version, return; } - ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, data); + /* fetching per-PF registers valus from PF PCIe register space */ + reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); + separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; + for (i = 0; i < reg_um; i++) + *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + + reg_um = sizeof(common_reg_addr_list) / sizeof(u32); + separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; + for (i = 0; i < reg_um; i++) + *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + + reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); + separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; + for (j = 0; j < kinfo->num_tqps; j++) { + for (i = 0; i < reg_um; i++) + *reg++ = hclge_read_dev(&hdev->hw, + ring_reg_addr_list[i] + + 0x200 * j); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + } + + reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); + separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; + for (j = 0; j < hdev->num_msi_used - 1; j++) { + for (i = 0; i < reg_um; i++) + *reg++ = hclge_read_dev(&hdev->hw, + tqp_intr_reg_addr_list[i] + + 4 * j); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + } + + /* fetching PF common registers values from firmware */ + ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg); if (ret) { dev_err(&hdev->pdev->dev, "Get 32 bit register failed, ret = %d.\n", ret); return; } - data = (u32 *)data + regs_num_32_bit; - ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, - data); + reg += regs_num_32_bit; + ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg); if (ret) dev_err(&hdev->pdev->dev, "Get 64 bit register failed, ret = %d.\n", ret); @@ -7272,9 +7927,19 @@ static void hclge_get_link_mode(struct hnae3_handle *handle, } } +static int hclge_gro_en(struct hnae3_handle *handle, int enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hclge_config_gro(hdev, enable); +} + static const struct hnae3_ae_ops hclge_ops = { .init_ae_dev = hclge_init_ae_dev, .uninit_ae_dev = hclge_uninit_ae_dev, + .flr_prepare = hclge_flr_prepare, + .flr_done = hclge_flr_done, .init_client_instance = hclge_init_client_instance, .uninit_client_instance = hclge_uninit_client_instance, .map_ring_to_vector = hclge_map_ring_to_vector, @@ -7285,6 +7950,8 @@ static const struct hnae3_ae_ops hclge_ops = { .set_loopback = hclge_set_loopback, .start = hclge_ae_start, .stop = hclge_ae_stop, + .client_start = hclge_client_start, + .client_stop = hclge_client_stop, .get_status = hclge_get_status, .get_ksettings_an_result = hclge_get_ksettings_an_result, .update_speed_duplex_h = hclge_update_speed_duplex_h, @@ -7321,6 +7988,7 @@ static const struct hnae3_ae_ops hclge_ops = { .set_vf_vlan_filter = hclge_set_vf_vlan_filter, .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag, .reset_event = hclge_reset_event, + .set_default_reset_request = hclge_set_def_reset_request, .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info, .set_channels = hclge_set_channels, .get_channels = hclge_get_channels, @@ -7336,7 +8004,14 @@ static const struct hnae3_ae_ops hclge_ops = { .get_fd_all_rules = hclge_get_all_rules, .restore_fd_rules = hclge_restore_fd_entries, .enable_fd = hclge_enable_fd, - .process_hw_error = hclge_process_ras_hw_error, + .dbg_run_cmd = hclge_dbg_run_cmd, + .handle_hw_ras_error = hclge_handle_hw_ras_error, + .get_hw_reset_stat = hclge_get_hw_reset_stat, + .ae_dev_resetting = hclge_ae_dev_resetting, + .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt, + .set_gro_en = hclge_gro_en, + .get_global_queue_id = hclge_covert_handle_qid_global, + .set_timer_task = hclge_set_timer_task, }; static struct hnae3_ae_algo ae_algo = { |