Print this page
11490 SRS ring polling disabled for VLANs
11491 Want DLS bypass for VLAN traffic
11492 add VLVF bypass to ixgbe core
2869 duplicate packets with vnics over aggrs
11489 DLS stat delete and aggr kstat can deadlock
Portions contributed by: Theo Schlossnagle <jesus@omniti.com>
Reviewed by: Patrick Mooney <patrick.mooney@joyent.com>
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Dan McDonald <danmcd@joyent.com>

*** 55,74 **** static boolean_t ixgbe_rx_drain(ixgbe_t *); static int ixgbe_alloc_rings(ixgbe_t *); static void ixgbe_free_rings(ixgbe_t *); static int ixgbe_alloc_rx_data(ixgbe_t *); static void ixgbe_free_rx_data(ixgbe_t *); ! static void ixgbe_setup_rings(ixgbe_t *); ! static void ixgbe_setup_rx(ixgbe_t *); static void ixgbe_setup_tx(ixgbe_t *); static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *); static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *); static void ixgbe_setup_rss(ixgbe_t *); static void ixgbe_setup_vmdq(ixgbe_t *); static void ixgbe_setup_vmdq_rss(ixgbe_t *); static void ixgbe_setup_rss_table(ixgbe_t *); static void ixgbe_init_unicst(ixgbe_t *); static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *); static void ixgbe_setup_multicst(ixgbe_t *); static void ixgbe_get_hw_state(ixgbe_t *); static void ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe); static void ixgbe_get_conf(ixgbe_t *); --- 55,75 ---- static boolean_t ixgbe_rx_drain(ixgbe_t *); static int ixgbe_alloc_rings(ixgbe_t *); static void ixgbe_free_rings(ixgbe_t *); static int ixgbe_alloc_rx_data(ixgbe_t *); static void ixgbe_free_rx_data(ixgbe_t *); ! static int ixgbe_setup_rings(ixgbe_t *); ! static int ixgbe_setup_rx(ixgbe_t *); static void ixgbe_setup_tx(ixgbe_t *); static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *); static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *); static void ixgbe_setup_rss(ixgbe_t *); static void ixgbe_setup_vmdq(ixgbe_t *); static void ixgbe_setup_vmdq_rss(ixgbe_t *); static void ixgbe_setup_rss_table(ixgbe_t *); static void ixgbe_init_unicst(ixgbe_t *); + static int ixgbe_init_vlan(ixgbe_t *); static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *); static void ixgbe_setup_multicst(ixgbe_t *); static void ixgbe_get_hw_state(ixgbe_t *); static void ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe); static void ixgbe_get_conf(ixgbe_t *);
*** 111,120 **** --- 112,123 ---- static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *); static void ixgbe_intr_other_work(ixgbe_t *, uint32_t); static void ixgbe_get_driver_control(struct ixgbe_hw *); static int ixgbe_addmac(void *, const uint8_t *); static int ixgbe_remmac(void *, const uint8_t *); + static int ixgbe_addvlan(mac_group_driver_t, uint16_t); + static int ixgbe_remvlan(mac_group_driver_t, uint16_t); static void ixgbe_release_driver_control(struct ixgbe_hw *); static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t); static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t); static int ixgbe_resume(dev_info_t *);
*** 1157,1166 **** --- 1160,1171 ---- for (i = 0; i < ixgbe->num_rx_groups; i++) { rx_group = &ixgbe->rx_groups[i]; rx_group->index = i; rx_group->ixgbe = ixgbe; + list_create(&rx_group->vlans, sizeof (ixgbe_vlan_t), + offsetof(ixgbe_vlan_t, ixvl_link)); } for (i = 0; i < ixgbe->num_tx_rings; i++) { tx_ring = &ixgbe->tx_rings[i]; tx_ring->index = i;
*** 1907,1917 **** } /* * Setup the rx/tx rings */ ! ixgbe_setup_rings(ixgbe); /* * ixgbe_start() will be called when resetting, however if reset * happens, we need to clear the ERROR, STALL and OVERTEMP flags * before enabling the interrupts. --- 1912,1923 ---- } /* * Setup the rx/tx rings */ ! if (ixgbe_setup_rings(ixgbe) != IXGBE_SUCCESS) ! goto start_failure; /* * ixgbe_start() will be called when resetting, however if reset * happens, we need to clear the ERROR, STALL and OVERTEMP flags * before enabling the interrupts.
*** 2280,2289 **** --- 2286,2305 ---- kmem_free(ixgbe->tx_rings, sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings); ixgbe->tx_rings = NULL; } + for (uint_t i = 0; i < ixgbe->num_rx_groups; i++) { + ixgbe_vlan_t *vlp; + ixgbe_rx_group_t *rx_group = &ixgbe->rx_groups[i]; + + while ((vlp = list_remove_head(&rx_group->vlans)) != NULL) + kmem_free(vlp, sizeof (ixgbe_vlan_t)); + + list_destroy(&rx_group->vlans); + } + if (ixgbe->rx_groups != NULL) { kmem_free(ixgbe->rx_groups, sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups); ixgbe->rx_groups = NULL; }
*** 2334,2356 **** } /* * ixgbe_setup_rings - Setup rx/tx rings. */ ! static void ixgbe_setup_rings(ixgbe_t *ixgbe) { /* * Setup the rx/tx rings, including the following: * * 1. Setup the descriptor ring and the control block buffers; * 2. Initialize necessary registers for receive/transmit; * 3. Initialize software pointers/parameters for receive/transmit; */ ! ixgbe_setup_rx(ixgbe); ixgbe_setup_tx(ixgbe); } static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring) { --- 2350,2375 ---- } /* * ixgbe_setup_rings - Setup rx/tx rings. */ ! static int ixgbe_setup_rings(ixgbe_t *ixgbe) { /* * Setup the rx/tx rings, including the following: * * 1. Setup the descriptor ring and the control block buffers; * 2. Initialize necessary registers for receive/transmit; * 3. Initialize software pointers/parameters for receive/transmit; */ ! if (ixgbe_setup_rx(ixgbe) != IXGBE_SUCCESS) ! return (IXGBE_FAILURE); ixgbe_setup_tx(ixgbe); + + return (IXGBE_SUCCESS); } static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring) {
*** 2433,2443 **** IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; reg_val |= IXGBE_SRRCTL_DROP_EN; IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val); } ! static void ixgbe_setup_rx(ixgbe_t *ixgbe) { ixgbe_rx_ring_t *rx_ring; struct ixgbe_hw *hw = &ixgbe->hw; uint32_t reg_val; --- 2452,2462 ---- IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; reg_val |= IXGBE_SRRCTL_DROP_EN; IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val); } ! static int ixgbe_setup_rx(ixgbe_t *ixgbe) { ixgbe_rx_ring_t *rx_ring; struct ixgbe_hw *hw = &ixgbe->hw; uint32_t reg_val;
*** 2526,2535 **** --- 2545,2563 ---- default: break; } /* + * Initialize VLAN SW and HW state if VLAN filtering is + * enabled. + */ + if (ixgbe->vlft_enabled) { + if (ixgbe_init_vlan(ixgbe) != IXGBE_SUCCESS) + return (IXGBE_FAILURE); + } + + /* * Enable the receive unit. This must be done after filter * control is set in FCTRL. On 82598, we disable the descriptor monitor. * 82598 is the only adapter which defines this RXCTRL option. */ reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
*** 2616,2625 **** --- 2644,2655 ---- reg_val |= IXGBE_RDRXCTL_FCOE_WRFIX; reg_val &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val); } + + return (IXGBE_SUCCESS); } static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring) {
*** 2817,2827 **** */ static void ixgbe_setup_vmdq(ixgbe_t *ixgbe) { struct ixgbe_hw *hw = &ixgbe->hw; ! uint32_t vmdctl, i, vtctl; /* * Setup the VMDq Control register, enable VMDq based on * packet destination MAC address: */ --- 2847,2857 ---- */ static void ixgbe_setup_vmdq(ixgbe_t *ixgbe) { struct ixgbe_hw *hw = &ixgbe->hw; ! uint32_t vmdctl, i, vtctl, vlnctl; /* * Setup the VMDq Control register, enable VMDq based on * packet destination MAC address: */
*** 2853,2866 **** } /* * Enable Virtualization and Replication. */ ! vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl); /* * Enable receiving packets to all VFs */ IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL); IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL); break; --- 2883,2906 ---- } /* * Enable Virtualization and Replication. */ ! vtctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); ! ixgbe->rx_def_group = vtctl & IXGBE_VT_CTL_POOL_MASK; ! vtctl |= IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl); /* + * Enable VLAN filtering and switching (VFTA and VLVF). + */ + vlnctl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlnctl |= IXGBE_VLNCTRL_VFE; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctl); + ixgbe->vlft_enabled = B_TRUE; + + /* * Enable receiving packets to all VFs */ IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL); IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL); break;
*** 2876,2886 **** static void ixgbe_setup_vmdq_rss(ixgbe_t *ixgbe) { struct ixgbe_hw *hw = &ixgbe->hw; uint32_t i, mrqc; ! uint32_t vtctl, vmdctl; /* * Initialize RETA/ERETA table */ ixgbe_setup_rss_table(ixgbe); --- 2916,2926 ---- static void ixgbe_setup_vmdq_rss(ixgbe_t *ixgbe) { struct ixgbe_hw *hw = &ixgbe->hw; uint32_t i, mrqc; ! uint32_t vtctl, vmdctl, vlnctl; /* * Initialize RETA/ERETA table */ ixgbe_setup_rss_table(ixgbe);
*** 2960,2973 **** --- 3000,3024 ---- hw->mac.type == ixgbe_mac_X550EM_x || hw->mac.type == ixgbe_mac_X550EM_a) { /* * Enable Virtualization and Replication. */ + vtctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); + ixgbe->rx_def_group = vtctl & IXGBE_VT_CTL_POOL_MASK; + vtctl |= IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl); /* + * Enable VLAN filtering and switching (VFTA and VLVF). + */ + vlnctl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlnctl |= IXGBE_VLNCTRL_VFE; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctl); + ixgbe->vlft_enabled = B_TRUE; + + /* * Enable receiving packets to all VFs */ IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL); IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL); }
*** 3134,3143 **** --- 3185,3241 ---- return (-1); } /* + * Restore the HW state to match the SW state during restart. + */ + static int + ixgbe_init_vlan(ixgbe_t *ixgbe) + { + /* + * The device is starting for the first time; there is nothing + * to do. + */ + if (!ixgbe->vlft_init) { + ixgbe->vlft_init = B_TRUE; + return (IXGBE_SUCCESS); + } + + for (uint_t i = 0; i < ixgbe->num_rx_groups; i++) { + int ret; + boolean_t vlvf_bypass; + ixgbe_rx_group_t *rxg = &ixgbe->rx_groups[i]; + struct ixgbe_hw *hw = &ixgbe->hw; + + if (rxg->aupe) { + uint32_t vml2flt; + + vml2flt = IXGBE_READ_REG(hw, IXGBE_VMOLR(rxg->index)); + vml2flt |= IXGBE_VMOLR_AUPE; + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(rxg->index), vml2flt); + } + + vlvf_bypass = (rxg->index == ixgbe->rx_def_group); + for (ixgbe_vlan_t *vlp = list_head(&rxg->vlans); vlp != NULL; + vlp = list_next(&rxg->vlans, vlp)) { + ret = ixgbe_set_vfta(hw, vlp->ixvl_vid, rxg->index, + B_TRUE, vlvf_bypass); + + if (ret != IXGBE_SUCCESS) { + ixgbe_error(ixgbe, "Failed to program VFTA" + " for group %u, VID: %u, ret: %d.", + rxg->index, vlp->ixvl_vid, ret); + return (IXGBE_FAILURE); + } + } + } + + return (IXGBE_SUCCESS); + } + + /* * ixgbe_multicst_add - Add a multicst address. */ int ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr) {
*** 6159,6168 **** --- 6257,6267 ---- void ixgbe_fill_group(void *arg, mac_ring_type_t rtype, const int index, mac_group_info_t *infop, mac_group_handle_t gh) { ixgbe_t *ixgbe = (ixgbe_t *)arg; + struct ixgbe_hw *hw = &ixgbe->hw; switch (rtype) { case MAC_RING_TYPE_RX: { ixgbe_rx_group_t *rx_group;
*** 6172,6181 **** --- 6271,6294 ---- infop->mgi_driver = (mac_group_driver_t)rx_group; infop->mgi_start = NULL; infop->mgi_stop = NULL; infop->mgi_addmac = ixgbe_addmac; infop->mgi_remmac = ixgbe_remmac; + + if ((ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ || + ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) && + (hw->mac.type == ixgbe_mac_82599EB || + hw->mac.type == ixgbe_mac_X540 || + hw->mac.type == ixgbe_mac_X550 || + hw->mac.type == ixgbe_mac_X550EM_x)) { + infop->mgi_addvlan = ixgbe_addvlan; + infop->mgi_remvlan = ixgbe_remvlan; + } else { + infop->mgi_addvlan = NULL; + infop->mgi_remvlan = NULL; + } + infop->mgi_count = (ixgbe->num_rx_rings / ixgbe->num_rx_groups); break; } case MAC_RING_TYPE_TX:
*** 6270,6279 **** --- 6383,6614 ---- mutex_exit(&ixgbe->gen_lock); return (0); } + + static ixgbe_vlan_t * + ixgbe_find_vlan(ixgbe_rx_group_t *rx_group, uint16_t vid) + { + for (ixgbe_vlan_t *vlp = list_head(&rx_group->vlans); vlp != NULL; + vlp = list_next(&rx_group->vlans, vlp)) { + if (vlp->ixvl_vid == vid) + return (vlp); + } + + return (NULL); + } + + /* + * Attempt to use a VLAN HW filter for this group. If the group is + * interested in untagged packets then set AUPE only. If the group is + * the default then only set the VFTA. Leave the VLVF slots open for + * reserved groups to guarantee their use of HW filtering. + */ + static int + ixgbe_addvlan(mac_group_driver_t gdriver, uint16_t vid) + { + ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)gdriver; + ixgbe_t *ixgbe = rx_group->ixgbe; + struct ixgbe_hw *hw = &ixgbe->hw; + ixgbe_vlan_t *vlp; + int ret; + boolean_t is_def_grp; + + mutex_enter(&ixgbe->gen_lock); + + if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { + mutex_exit(&ixgbe->gen_lock); + return (ECANCELED); + } + + /* + * Let's be sure VLAN filtering is enabled. + */ + VERIFY3B(ixgbe->vlft_enabled, ==, B_TRUE); + is_def_grp = (rx_group->index == ixgbe->rx_def_group); + + /* + * VLAN filtering is enabled but we want to receive untagged + * traffic on this group -- set the AUPE bit on the group and + * leave the VLAN tables alone. + */ + if (vid == MAC_VLAN_UNTAGGED) { + /* + * We never enable AUPE on the default group; it is + * redundant. Untagged traffic which passes L2 + * filtering is delivered to the default group if no + * other group is interested. + */ + if (!is_def_grp) { + uint32_t vml2flt; + + vml2flt = IXGBE_READ_REG(hw, + IXGBE_VMOLR(rx_group->index)); + vml2flt |= IXGBE_VMOLR_AUPE; + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(rx_group->index), + vml2flt); + rx_group->aupe = B_TRUE; + } + + mutex_exit(&ixgbe->gen_lock); + return (0); + } + + vlp = ixgbe_find_vlan(rx_group, vid); + if (vlp != NULL) { + /* Only the default group supports multiple clients. */ + VERIFY3B(is_def_grp, ==, B_TRUE); + vlp->ixvl_refs++; + mutex_exit(&ixgbe->gen_lock); + return (0); + } + + /* + * The default group doesn't require a VLVF entry, only a VFTA + * entry. All traffic passing L2 filtering (MPSAR + VFTA) is + * delivered to the default group if no other group is + * interested. The fourth argument, vlvf_bypass, tells the + * ixgbe common code to avoid using a VLVF slot if one isn't + * already allocated to this VLAN. + * + * This logic is meant to reserve VLVF slots for use by + * reserved groups: guaranteeing their use of HW filtering. + */ + ret = ixgbe_set_vfta(hw, vid, rx_group->index, B_TRUE, is_def_grp); + + if (ret == IXGBE_SUCCESS) { + vlp = kmem_zalloc(sizeof (ixgbe_vlan_t), KM_SLEEP); + vlp->ixvl_vid = vid; + vlp->ixvl_refs = 1; + list_insert_tail(&rx_group->vlans, vlp); + mutex_exit(&ixgbe->gen_lock); + return (0); + } + + /* + * We should actually never return ENOSPC because we've set + * things up so that every reserved group is guaranteed to + * have a VLVF slot. + */ + if (ret == IXGBE_ERR_PARAM) + ret = EINVAL; + else if (ret == IXGBE_ERR_NO_SPACE) + ret = ENOSPC; + else + ret = EIO; + + mutex_exit(&ixgbe->gen_lock); + return (ret); + } + + /* + * Attempt to remove the VLAN HW filter associated with this group. If + * we are removing a HW filter for the default group then we know only + * the VFTA was set (VLVF is reserved for non-default/reserved + * groups). If the group wishes to stop receiving untagged traffic + * then clear the AUPE but leave the VLAN filters alone. + */ + static int + ixgbe_remvlan(mac_group_driver_t gdriver, uint16_t vid) + { + ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)gdriver; + ixgbe_t *ixgbe = rx_group->ixgbe; + struct ixgbe_hw *hw = &ixgbe->hw; + int ret; + ixgbe_vlan_t *vlp; + boolean_t is_def_grp; + + mutex_enter(&ixgbe->gen_lock); + + if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { + mutex_exit(&ixgbe->gen_lock); + return (ECANCELED); + } + + is_def_grp = (rx_group->index == ixgbe->rx_def_group); + + /* See the AUPE comment in ixgbe_addvlan(). */ + if (vid == MAC_VLAN_UNTAGGED) { + if (!is_def_grp) { + uint32_t vml2flt; + + vml2flt = IXGBE_READ_REG(hw, + IXGBE_VMOLR(rx_group->index)); + vml2flt &= ~IXGBE_VMOLR_AUPE; + IXGBE_WRITE_REG(hw, + IXGBE_VMOLR(rx_group->index), vml2flt); + rx_group->aupe = B_FALSE; + } + mutex_exit(&ixgbe->gen_lock); + return (0); + } + + vlp = ixgbe_find_vlan(rx_group, vid); + if (vlp == NULL) + return (ENOENT); + + /* + * See the comment in ixgbe_addvlan() about is_def_grp and + * vlvf_bypass. + */ + if (vlp->ixvl_refs == 1) { + ret = ixgbe_set_vfta(hw, vid, rx_group->index, B_FALSE, + is_def_grp); + } else { + /* + * Only the default group can have multiple clients. + * If there is more than one client, leave the + * VFTA[vid] bit alone. + */ + VERIFY3B(is_def_grp, ==, B_TRUE); + VERIFY3U(vlp->ixvl_refs, >, 1); + vlp->ixvl_refs--; + mutex_exit(&ixgbe->gen_lock); + return (0); + } + + if (ret != IXGBE_SUCCESS) { + mutex_exit(&ixgbe->gen_lock); + /* IXGBE_ERR_PARAM should be the only possible error here. */ + if (ret == IXGBE_ERR_PARAM) + return (EINVAL); + else + return (EIO); + } + + VERIFY3U(vlp->ixvl_refs, ==, 1); + vlp->ixvl_refs = 0; + list_remove(&rx_group->vlans, vlp); + kmem_free(vlp, sizeof (ixgbe_vlan_t)); + + /* + * Calling ixgbe_set_vfta() on a non-default group may have + * cleared the VFTA[vid] bit even though the default group + * still has clients using the vid. This happens because the + * ixgbe common code doesn't ref count the use of VLANs. Check + * for any use of vid on the default group and make sure the + * VFTA[vid] bit is set. This operation is idempotent: setting + * VFTA[vid] to true if already true won't hurt anything. + */ + if (!is_def_grp) { + ixgbe_rx_group_t *defgrp; + + defgrp = &ixgbe->rx_groups[ixgbe->rx_def_group]; + vlp = ixgbe_find_vlan(defgrp, vid); + if (vlp != NULL) { + /* This shouldn't fail, but if it does return EIO. */ + ret = ixgbe_set_vfta(hw, vid, rx_group->index, B_TRUE, + B_TRUE); + if (ret != IXGBE_SUCCESS) + return (EIO); + } + } + + mutex_exit(&ixgbe->gen_lock); + return (0); + } /* * Add a mac address. */ static int