Print this page
11490 SRS ring polling disabled for VLANs
11491 Want DLS bypass for VLAN traffic
11492 add VLVF bypass to ixgbe core
2869 duplicate packets with vnics over aggrs
11489 DLS stat delete and aggr kstat can deadlock
Portions contributed by: Theo Schlossnagle <jesus@omniti.com>
Reviewed by: Patrick Mooney <patrick.mooney@joyent.com>
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Dan McDonald <danmcd@joyent.com>

*** 18,28 **** * * CDDL HEADER END */ /* * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. ! * Copyright 2017, Joyent, Inc. */ #include <sys/types.h> #include <sys/callb.h> #include <sys/cpupart.h> --- 18,28 ---- * * CDDL HEADER END */ /* * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. ! * Copyright 2018 Joyent, Inc. */ #include <sys/types.h> #include <sys/callb.h> #include <sys/cpupart.h>
*** 1184,1194 **** if (mac_srs->srs_type & SRST_TX) { mac_srs->srs_tx_soft_rings = (mac_soft_ring_t **) kmem_zalloc(sizeof (mac_soft_ring_t *) * MAX_RINGS_PER_GROUP, KM_SLEEP); ! if (mcip->mci_state_flags & MCIS_IS_AGGR) { mac_srs_tx_t *tx = &mac_srs->srs_tx; tx->st_soft_rings = (mac_soft_ring_t **) kmem_zalloc(sizeof (mac_soft_ring_t *) * MAX_RINGS_PER_GROUP, KM_SLEEP); --- 1184,1194 ---- if (mac_srs->srs_type & SRST_TX) { mac_srs->srs_tx_soft_rings = (mac_soft_ring_t **) kmem_zalloc(sizeof (mac_soft_ring_t *) * MAX_RINGS_PER_GROUP, KM_SLEEP); ! if (mcip->mci_state_flags & MCIS_IS_AGGR_CLIENT) { mac_srs_tx_t *tx = &mac_srs->srs_tx; tx->st_soft_rings = (mac_soft_ring_t **) kmem_zalloc(sizeof (mac_soft_ring_t *) * MAX_RINGS_PER_GROUP, KM_SLEEP);
*** 1593,1609 **** mac_tx_srs_update_bwlimit(flent->fe_tx_srs, mrp); } /* * When the first sub-flow is added to a link, we disable polling on the ! * link and also modify the entry point to mac_rx_srs_subflow_process. * (polling is disabled because with the subflow added, accounting * for polling needs additional logic, it is assumed that when a subflow is * added, we can take some hit as a result of disabling polling rather than * adding more complexity - if this becomes a perf. issue we need to * re-rvaluate this logic). When the last subflow is removed, we turn back ! * polling and also reset the entry point to mac_rx_srs_process. * * In the future if there are multiple SRS, we can simply * take one and give it to the flow rather than disabling polling and * resetting the entry point. */ --- 1593,1609 ---- mac_tx_srs_update_bwlimit(flent->fe_tx_srs, mrp); } /* * When the first sub-flow is added to a link, we disable polling on the ! * link and also modify the entry point to mac_rx_srs_subflow_process(). * (polling is disabled because with the subflow added, accounting * for polling needs additional logic, it is assumed that when a subflow is * added, we can take some hit as a result of disabling polling rather than * adding more complexity - if this becomes a perf. issue we need to * re-rvaluate this logic). When the last subflow is removed, we turn back ! * polling and also reset the entry point to mac_rx_srs_process(). * * In the future if there are multiple SRS, we can simply * take one and give it to the flow rather than disabling polling and * resetting the entry point. */
*** 1644,1654 **** /* * Change the S/W classifier so that we can land in the * correct processing function with correct argument. * If all subflows have been removed we can revert to ! * mac_rx_srsprocess, else we need mac_rx_srs_subflow_process. */ mutex_enter(&flent->fe_lock); flent->fe_cb_fn = (flow_fn_t)rx_func; flent->fe_cb_arg1 = (void *)mip; flent->fe_cb_arg2 = flent->fe_rx_srs[0]; --- 1644,1654 ---- /* * Change the S/W classifier so that we can land in the * correct processing function with correct argument. * If all subflows have been removed we can revert to ! * mac_rx_srs_process(), else we need mac_rx_srs_subflow_process(). */ mutex_enter(&flent->fe_lock); flent->fe_cb_fn = (flow_fn_t)rx_func; flent->fe_cb_arg1 = (void *)mip; flent->fe_cb_arg2 = flent->fe_rx_srs[0];
*** 2183,2193 **** * stay in poll mode but don't poll the H/W for more packets. * 4) Anytime in polling mode, if we poll the H/W for packets and * find nothing plus we have an existing backlog * (sr_poll_pkt_cnt > 0), we stay in polling mode but don't poll * the H/W for packets anymore (let the polling thread go to sleep). ! * 5) Once the backlog is relived (packets are processed) we reenable * polling (by signalling the poll thread) only when the backlog * dips below sr_poll_thres. * 6) sr_hiwat is used exclusively when we are not polling capable * and is used to decide when to drop packets so the SRS queue * length doesn't grow infinitely. --- 2183,2193 ---- * stay in poll mode but don't poll the H/W for more packets. * 4) Anytime in polling mode, if we poll the H/W for packets and * find nothing plus we have an existing backlog * (sr_poll_pkt_cnt > 0), we stay in polling mode but don't poll * the H/W for packets anymore (let the polling thread go to sleep). ! * 5) Once the backlog is relieved (packets are processed) we reenable * polling (by signalling the poll thread) only when the backlog * dips below sr_poll_thres. * 6) sr_hiwat is used exclusively when we are not polling capable * and is used to decide when to drop packets so the SRS queue * length doesn't grow infinitely.
*** 2254,2265 **** mac_rx_srs_poll_ring, mac_srs, 0, &p0, TS_RUN, mac_srs->srs_pri); /* * Some drivers require serialization and don't send * packet chains in interrupt context. For such ! * drivers, we should always queue in soft ring ! * so that we get a chance to switch into a polling * mode under backlog. */ ring_info = mac_hwring_getinfo((mac_ring_handle_t)ring); if (ring_info & MAC_RING_RX_ENQUEUE) mac_srs->srs_state |= SRS_SOFTRING_QUEUE; --- 2254,2265 ---- mac_rx_srs_poll_ring, mac_srs, 0, &p0, TS_RUN, mac_srs->srs_pri); /* * Some drivers require serialization and don't send * packet chains in interrupt context. For such ! * drivers, we should always queue in the soft ring ! * so that we get a chance to switch into polling * mode under backlog. */ ring_info = mac_hwring_getinfo((mac_ring_handle_t)ring); if (ring_info & MAC_RING_RX_ENQUEUE) mac_srs->srs_state |= SRS_SOFTRING_QUEUE;
*** 2362,2374 **** mac_set_pool_effective(use_default, cpupart, mrp, emrp); pool_unlock(); } /* ! * Set up the RX SRSs. If the S/W SRS is not set, set it up, if there ! * is a group associated with this MAC client, set up SRSs for individual ! * h/w rings. */ void mac_rx_srs_group_setup(mac_client_impl_t *mcip, flow_entry_t *flent, uint32_t link_type) { --- 2362,2376 ---- mac_set_pool_effective(use_default, cpupart, mrp, emrp); pool_unlock(); } /* ! * Set up the Rx SRSes. If there is no group associated with the ! * client, then only setup SW classification. If the client has ! * exlusive (MAC_GROUP_STATE_RESERVED) use of the group, then create an ! * SRS for each HW ring. If the client is sharing a group, then make ! * sure to teardown the HW SRSes. */ void mac_rx_srs_group_setup(mac_client_impl_t *mcip, flow_entry_t *flent, uint32_t link_type) {
*** 2375,2391 **** mac_impl_t *mip = mcip->mci_mip; mac_soft_ring_set_t *mac_srs; mac_ring_t *ring; uint32_t fanout_type; mac_group_t *rx_group = flent->fe_rx_ring_group; fanout_type = mac_find_fanout(flent, link_type); ! /* Create the SRS for S/W classification if none exists */ if (flent->fe_rx_srs[0] == NULL) { ASSERT(flent->fe_rx_srs_cnt == 0); - /* Setup the Rx SRS */ mac_srs = mac_srs_create(mcip, flent, fanout_type | link_type, mac_rx_deliver, mcip, NULL, NULL); mutex_enter(&flent->fe_lock); flent->fe_cb_fn = (flow_fn_t)mac_srs->srs_rx.sr_lower_proc; flent->fe_cb_arg1 = (void *)mip; --- 2377,2394 ---- mac_impl_t *mip = mcip->mci_mip; mac_soft_ring_set_t *mac_srs; mac_ring_t *ring; uint32_t fanout_type; mac_group_t *rx_group = flent->fe_rx_ring_group; + boolean_t no_unicast; fanout_type = mac_find_fanout(flent, link_type); + no_unicast = (mcip->mci_state_flags & MCIS_NO_UNICAST_ADDR) != 0; ! /* Create the SRS for SW classification if none exists */ if (flent->fe_rx_srs[0] == NULL) { ASSERT(flent->fe_rx_srs_cnt == 0); mac_srs = mac_srs_create(mcip, flent, fanout_type | link_type, mac_rx_deliver, mcip, NULL, NULL); mutex_enter(&flent->fe_lock); flent->fe_cb_fn = (flow_fn_t)mac_srs->srs_rx.sr_lower_proc; flent->fe_cb_arg1 = (void *)mip;
*** 2393,2434 **** mutex_exit(&flent->fe_lock); } if (rx_group == NULL) return; /* ! * fanout for default SRS is done when default SRS are created ! * above. As each ring is added to the group, we setup the ! * SRS and fanout to it. */ switch (rx_group->mrg_state) { case MAC_GROUP_STATE_RESERVED: for (ring = rx_group->mrg_rings; ring != NULL; ring = ring->mr_next) { switch (ring->mr_state) { case MR_INUSE: case MR_FREE: if (ring->mr_srs != NULL) break; if (ring->mr_state != MR_INUSE) (void) mac_start_ring(ring); /* ! * Since the group is exclusively ours create ! * an SRS for this ring to allow the ! * individual SRS to dynamically poll the ! * ring. Do this only if the client is not ! * a VLAN MAC client, since for VLAN we do ! * s/w classification for the VID check, and ! * if it has a unicast address. */ ! if ((mcip->mci_state_flags & ! MCIS_NO_UNICAST_ADDR) || ! i_mac_flow_vid(mcip->mci_flent) != ! VLAN_ID_NONE) { break; ! } mac_srs = mac_srs_create(mcip, flent, fanout_type | link_type, mac_rx_deliver, mcip, NULL, ring); break; default: --- 2396,2442 ---- mutex_exit(&flent->fe_lock); } if (rx_group == NULL) return; + /* ! * If the group is marked RESERVED then setup an SRS and ! * fanout for each HW ring. */ switch (rx_group->mrg_state) { case MAC_GROUP_STATE_RESERVED: for (ring = rx_group->mrg_rings; ring != NULL; ring = ring->mr_next) { + uint16_t vid = i_mac_flow_vid(mcip->mci_flent); + switch (ring->mr_state) { case MR_INUSE: case MR_FREE: if (ring->mr_srs != NULL) break; if (ring->mr_state != MR_INUSE) (void) mac_start_ring(ring); /* ! * If a client requires SW VLAN ! * filtering or has no unicast address ! * then we don't create any HW ring ! * SRSes. */ ! if ((!MAC_GROUP_HW_VLAN(rx_group) && ! vid != VLAN_ID_NONE) || no_unicast) break; ! ! /* ! * When a client has exclusive use of ! * a group, and that group's traffic ! * is fully HW classified, we create ! * an SRS for each HW ring in order to ! * make use of dynamic polling of said ! * HW rings. ! */ mac_srs = mac_srs_create(mcip, flent, fanout_type | link_type, mac_rx_deliver, mcip, NULL, ring); break; default:
*** 2440,2457 **** } } break; case MAC_GROUP_STATE_SHARED: /* ! * Set all rings of this group to software classified. ! * ! * If the group is current RESERVED, the existing mac ! * client (the only client on this group) is using ! * this group exclusively. In that case we need to ! * disable polling on the rings of the group (if it ! * was enabled), and free the SRS associated with the ! * rings. */ mac_rx_switch_grp_to_sw(rx_group); break; default: ASSERT(B_FALSE); --- 2448,2460 ---- } } break; case MAC_GROUP_STATE_SHARED: /* ! * When a group is shared by multiple clients, we must ! * use SW classifiction to ensure packets are ! * delivered to the correct client. */ mac_rx_switch_grp_to_sw(rx_group); break; default: ASSERT(B_FALSE);
*** 2500,2513 **** } mac_tx_srs_setup(mcip, flent); } /* ! * Remove all the RX SRSs. If we want to remove only the SRSs associated ! * with h/w rings, leave the S/W SRS alone. This is used when we want to ! * move the MAC client from one group to another, so we need to teardown ! * on the h/w SRSs. */ void mac_rx_srs_group_teardown(flow_entry_t *flent, boolean_t hwonly) { mac_soft_ring_set_t *mac_srs; --- 2503,2517 ---- } mac_tx_srs_setup(mcip, flent); } /* ! * Teardown all the Rx SRSes. Unless hwonly is set, then only teardown ! * the Rx HW SRSes and leave the SW SRS alone. The hwonly flag is set ! * when we wish to move a MAC client from one group to another. In ! * that case, we need to release the current HW SRSes but keep the SW ! * SRS for continued traffic classifiction. */ void mac_rx_srs_group_teardown(flow_entry_t *flent, boolean_t hwonly) { mac_soft_ring_set_t *mac_srs;
*** 2521,2532 **** mac_rx_srs_quiesce(mac_srs, SRS_CONDEMNED); mac_srs_free(mac_srs); flent->fe_rx_srs[i] = NULL; flent->fe_rx_srs_cnt--; } ! ASSERT(!hwonly || flent->fe_rx_srs_cnt == 1); ! ASSERT(hwonly || flent->fe_rx_srs_cnt == 0); } /* * Remove the TX SRS. */ --- 2525,2544 ---- mac_rx_srs_quiesce(mac_srs, SRS_CONDEMNED); mac_srs_free(mac_srs); flent->fe_rx_srs[i] = NULL; flent->fe_rx_srs_cnt--; } ! ! /* ! * If we are only tearing down the HW SRSes then there must be ! * one SRS left for SW classification. Otherwise we are tearing ! * down both HW and SW and there should be no SRSes left. ! */ ! if (hwonly) ! VERIFY3S(flent->fe_rx_srs_cnt, ==, 1); ! else ! VERIFY3S(flent->fe_rx_srs_cnt, ==, 0); } /* * Remove the TX SRS. */
*** 2824,2833 **** --- 2836,2846 ---- * For such clients (identified by the MCIS_NO_UNICAST_ADDR flag) we * always give the default group and use software classification (i.e. * even if this is the only client in the default group, we will * leave group as shared). */ + int mac_datapath_setup(mac_client_impl_t *mcip, flow_entry_t *flent, uint32_t link_type) { mac_impl_t *mip = mcip->mci_mip;
*** 2834,2843 **** --- 2847,2857 ---- mac_group_t *rgroup = NULL; mac_group_t *tgroup = NULL; mac_group_t *default_rgroup; mac_group_t *default_tgroup; int err; + uint16_t vid; uint8_t *mac_addr; mac_group_state_t next_state; mac_client_impl_t *group_only_mcip; mac_resource_props_t *mrp = MCIP_RESOURCE_PROPS(mcip); mac_resource_props_t *emrp = MCIP_EFFECTIVE_PROPS(mcip);
*** 2846,2855 **** --- 2860,2870 ---- boolean_t use_default = B_FALSE; cpupart_t *cpupart; boolean_t no_unicast; boolean_t isprimary = flent->fe_type & FLOW_PRIMARY_MAC; mac_client_impl_t *reloc_pmcip = NULL; + boolean_t use_hw; ASSERT(MAC_PERIM_HELD((mac_handle_t)mip)); switch (link_type) { case SRST_FLOW:
*** 2877,2902 **** txhw = (mrp->mrp_mask & MRP_TX_RINGS) && (mrp->mrp_ntxrings > 0 || (mrp->mrp_mask & MRP_TXRINGS_UNSPEC)); /* ! * By default we have given the primary all the rings ! * i.e. the default group. Let's see if the primary ! * needs to be relocated so that the addition of this ! * client doesn't impact the primary's performance, ! * i.e. if the primary is in the default group and ! * we add this client, the primary will lose polling. ! * We do this only for NICs supporting dynamic ring ! * grouping and only when this is the first client ! * after the primary (i.e. nactiveclients is 2) */ if (!isprimary && mip->mi_nactiveclients == 2 && (group_only_mcip = mac_primary_client_handle(mip)) != NULL && mip->mi_rx_group_type == MAC_GROUP_TYPE_DYNAMIC) { reloc_pmcip = mac_check_primary_relocation( group_only_mcip, rxhw); } /* * Check to see if we can get an exclusive group for * this mac address or if there already exists a * group that has this mac address (case of VLANs). * If no groups are available, use the default group. --- 2892,2922 ---- txhw = (mrp->mrp_mask & MRP_TX_RINGS) && (mrp->mrp_ntxrings > 0 || (mrp->mrp_mask & MRP_TXRINGS_UNSPEC)); /* ! * All the rings initially belong to the default group ! * under dynamic grouping. The primary client uses the ! * default group when it is the only client. The ! * default group is also used as the destination for ! * all multicast and broadcast traffic of all clients. ! * Therefore, the primary client loses its ability to ! * poll the softrings on addition of a second client. ! * To avoid a performance penalty, MAC will move the ! * primary client to a dedicated group when it can. ! * ! * When using static grouping, the primary client ! * begins life on a non-default group. There is ! * no moving needed upon addition of a second client. */ if (!isprimary && mip->mi_nactiveclients == 2 && (group_only_mcip = mac_primary_client_handle(mip)) != NULL && mip->mi_rx_group_type == MAC_GROUP_TYPE_DYNAMIC) { reloc_pmcip = mac_check_primary_relocation( group_only_mcip, rxhw); } + /* * Check to see if we can get an exclusive group for * this mac address or if there already exists a * group that has this mac address (case of VLANs). * If no groups are available, use the default group.
*** 2906,2916 **** --- 2926,2956 ---- err = ENOSPC; goto setup_failed; } else if (rgroup == NULL) { rgroup = default_rgroup; } + /* + * If we are adding a second client to a + * non-default group then we need to move the + * existing client to the default group and + * add the new client to the default group as + * well. + */ + if (rgroup != default_rgroup && + rgroup->mrg_state == MAC_GROUP_STATE_RESERVED) { + group_only_mcip = MAC_GROUP_ONLY_CLIENT(rgroup); + err = mac_rx_switch_group(group_only_mcip, rgroup, + default_rgroup); + + if (err != 0) + goto setup_failed; + + rgroup = default_rgroup; + } + + /* * Check to see if we can get an exclusive group for * this mac client. If no groups are available, use * the default group. */ tgroup = mac_reserve_tx_group(mcip, B_FALSE);
*** 2937,2954 **** MAC_GROUP_TYPE_DYNAMIC) { MAC_RX_RING_RESERVED(mip, rgroup->mrg_cur_count); } } flent->fe_rx_ring_group = rgroup; /* ! * Add the client to the group. This could cause ! * either this group to move to the shared state or ! * cause the default group to move to the shared state. ! * The actions on this group are done here, while the ! * actions on the default group are postponed to ! * the end of this function. */ mac_group_add_client(rgroup, mcip); next_state = mac_group_next_state(rgroup, &group_only_mcip, default_rgroup, B_TRUE); mac_set_group_state(rgroup, next_state); --- 2977,2997 ---- MAC_GROUP_TYPE_DYNAMIC) { MAC_RX_RING_RESERVED(mip, rgroup->mrg_cur_count); } } + flent->fe_rx_ring_group = rgroup; /* ! * Add the client to the group and update the ! * group's state. If rgroup != default_group ! * then the rgroup should only ever have one ! * client and be in the RESERVED state. But no ! * matter what, the default_rgroup will enter ! * the SHARED state since it has to receive ! * all broadcast and multicast traffic. This ! * case is handled later in the function. */ mac_group_add_client(rgroup, mcip); next_state = mac_group_next_state(rgroup, &group_only_mcip, default_rgroup, B_TRUE); mac_set_group_state(rgroup, next_state);
*** 2969,3000 **** mac_group_add_client(tgroup, mcip); next_state = mac_group_next_state(tgroup, &group_only_mcip, default_tgroup, B_FALSE); tgroup->mrg_state = next_state; } - /* - * Setup the Rx and Tx SRSes. If we got a pristine group - * exclusively above, mac_srs_group_setup would simply create - * the required SRSes. If we ended up sharing a previously - * reserved group, mac_srs_group_setup would also dismantle the - * SRSes of the previously exclusive group - */ - mac_srs_group_setup(mcip, flent, link_type); /* We are setting up minimal datapath only */ ! if (no_unicast) break; ! /* Program the S/W Classifer */ if ((err = mac_flow_add(mip->mi_flow_tab, flent)) != 0) goto setup_failed; ! /* Program the H/W Classifier */ ! if ((err = mac_add_macaddr(mip, rgroup, mac_addr, ! (mcip->mci_state_flags & MCIS_UNICAST_HW) != 0)) != 0) goto setup_failed; mcip->mci_unicast = mac_find_macaddr(mip, mac_addr); ! ASSERT(mcip->mci_unicast != NULL); /* (Re)init the v6 token & local addr used by link protection */ mac_protect_update_mac_token(mcip); break; default: --- 3012,3052 ---- mac_group_add_client(tgroup, mcip); next_state = mac_group_next_state(tgroup, &group_only_mcip, default_tgroup, B_FALSE); tgroup->mrg_state = next_state; } /* We are setting up minimal datapath only */ ! if (no_unicast) { ! mac_srs_group_setup(mcip, flent, link_type); break; ! } ! ! /* Program software classification. */ if ((err = mac_flow_add(mip->mi_flow_tab, flent)) != 0) goto setup_failed; ! /* Program hardware classification. */ ! vid = i_mac_flow_vid(flent); ! use_hw = (mcip->mci_state_flags & MCIS_UNICAST_HW) != 0; ! err = mac_add_macaddr_vlan(mip, rgroup, mac_addr, vid, use_hw); ! ! if (err != 0) goto setup_failed; + mcip->mci_unicast = mac_find_macaddr(mip, mac_addr); ! VERIFY3P(mcip->mci_unicast, !=, NULL); ! ! /* ! * Setup the Rx and Tx SRSes. If the client has a ! * reserved group, then mac_srs_group_setup() creates ! * the required SRSes for the HW rings. If we have a ! * shared group, mac_srs_group_setup() dismantles the ! * HW SRSes of the previously exclusive group. ! */ ! mac_srs_group_setup(mcip, flent, link_type); ! /* (Re)init the v6 token & local addr used by link protection */ mac_protect_update_mac_token(mcip); break; default:
*** 3034,3054 **** pool_unlock(); } ASSERT(default_rgroup->mrg_state == MAC_GROUP_STATE_SHARED); } /* ! * If we get an exclusive group for a VLAN MAC client we ! * need to take the s/w path to make the additional check for ! * the vid. Disable polling and set it to s/w classification. ! * Similarly for clients that don't have a unicast address. */ if (rgroup->mrg_state == MAC_GROUP_STATE_RESERVED && ! (i_mac_flow_vid(flent) != VLAN_ID_NONE || no_unicast)) { mac_rx_switch_grp_to_sw(rgroup); } } mac_set_rings_effective(mcip); return (0); setup_failed: /* Switch the primary back to default group */ --- 3086,3112 ---- pool_unlock(); } ASSERT(default_rgroup->mrg_state == MAC_GROUP_STATE_SHARED); } + /* ! * A VLAN MAC client on a reserved group still ! * requires SW classification if the MAC doesn't ! * provide VLAN HW filtering. ! * ! * Clients with no unicast address also require SW ! * classification. */ if (rgroup->mrg_state == MAC_GROUP_STATE_RESERVED && ! ((!MAC_GROUP_HW_VLAN(rgroup) && vid != VLAN_ID_NONE) || ! no_unicast)) { mac_rx_switch_grp_to_sw(rgroup); } + } + mac_set_rings_effective(mcip); return (0); setup_failed: /* Switch the primary back to default group */
*** 3070,3079 **** --- 3128,3138 ---- flow_entry_t *group_only_flent; mac_group_t *default_group; boolean_t check_default_group = B_FALSE; mac_group_state_t next_state; mac_resource_props_t *mrp = MCIP_RESOURCE_PROPS(mcip); + uint16_t vid; ASSERT(MAC_PERIM_HELD((mac_handle_t)mip)); switch (link_type) { case SRST_FLOW:
*** 3082,3101 **** return; case SRST_LINK: /* Stop sending packets */ mac_tx_client_block(mcip); ! /* Stop the packets coming from the H/W */ if (mcip->mci_unicast != NULL) { int err; ! err = mac_remove_macaddr(mcip->mci_unicast); if (err != 0) { ! cmn_err(CE_WARN, "%s: failed to remove a MAC" ! " address because of error 0x%x", mip->mi_name, err); } mcip->mci_unicast = NULL; } /* Stop the packets coming from the S/W classifier */ mac_flow_remove(mip->mi_flow_tab, flent, B_FALSE); --- 3141,3168 ---- return; case SRST_LINK: /* Stop sending packets */ mac_tx_client_block(mcip); + group = flent->fe_rx_ring_group; + vid = i_mac_flow_vid(flent); ! /* ! * Stop the packet flow from the hardware by disabling ! * any hardware filters assigned to this client. ! */ if (mcip->mci_unicast != NULL) { int err; ! ! err = mac_remove_macaddr_vlan(mcip->mci_unicast, vid); ! if (err != 0) { ! cmn_err(CE_WARN, "%s: failed to remove a MAC HW" ! " filters because of error 0x%x", mip->mi_name, err); } + mcip->mci_unicast = NULL; } /* Stop the packets coming from the S/W classifier */ mac_flow_remove(mip->mi_flow_tab, flent, B_FALSE);
*** 3112,3132 **** * Release our hold on the group as well. We need * to check if the shared group has only one client * left who can use it exclusively. Also, if we * were the last client, release the group. */ - group = flent->fe_rx_ring_group; default_group = MAC_DEFAULT_RX_GROUP(mip); if (group != NULL) { mac_group_remove_client(group, mcip); next_state = mac_group_next_state(group, &grp_only_mcip, default_group, B_TRUE); if (next_state == MAC_GROUP_STATE_RESERVED) { /* * Only one client left on this RX group. */ ! ASSERT(grp_only_mcip != NULL); mac_set_group_state(group, MAC_GROUP_STATE_RESERVED); group_only_flent = grp_only_mcip->mci_flent; /* --- 3179,3199 ---- * Release our hold on the group as well. We need * to check if the shared group has only one client * left who can use it exclusively. Also, if we * were the last client, release the group. */ default_group = MAC_DEFAULT_RX_GROUP(mip); if (group != NULL) { mac_group_remove_client(group, mcip); next_state = mac_group_next_state(group, &grp_only_mcip, default_group, B_TRUE); + if (next_state == MAC_GROUP_STATE_RESERVED) { /* * Only one client left on this RX group. */ ! VERIFY3P(grp_only_mcip, !=, NULL); mac_set_group_state(group, MAC_GROUP_STATE_RESERVED); group_only_flent = grp_only_mcip->mci_flent; /*
*** 3147,3157 **** * This is a non-default group being freed up. * We need to reevaluate the default group * to see if the primary client can get * exclusive access to the default group. */ ! ASSERT(group != MAC_DEFAULT_RX_GROUP(mip)); if (mrp->mrp_mask & MRP_RX_RINGS) { MAC_RX_GRP_RELEASED(mip); if (mip->mi_rx_group_type == MAC_GROUP_TYPE_DYNAMIC) { MAC_RX_RING_RELEASED(mip, --- 3214,3224 ---- * This is a non-default group being freed up. * We need to reevaluate the default group * to see if the primary client can get * exclusive access to the default group. */ ! VERIFY3P(group, !=, MAC_DEFAULT_RX_GROUP(mip)); if (mrp->mrp_mask & MRP_RX_RINGS) { MAC_RX_GRP_RELEASED(mip); if (mip->mi_rx_group_type == MAC_GROUP_TYPE_DYNAMIC) { MAC_RX_RING_RELEASED(mip,
*** 3161,3171 **** mac_release_rx_group(mcip, group); mac_set_group_state(group, MAC_GROUP_STATE_REGISTERED); check_default_group = B_TRUE; } else { ! ASSERT(next_state == MAC_GROUP_STATE_SHARED); mac_set_group_state(group, MAC_GROUP_STATE_SHARED); mac_rx_group_unmark(group, MR_CONDEMNED); } flent->fe_rx_ring_group = NULL; --- 3228,3239 ---- mac_release_rx_group(mcip, group); mac_set_group_state(group, MAC_GROUP_STATE_REGISTERED); check_default_group = B_TRUE; } else { ! VERIFY3S(next_state, ==, ! MAC_GROUP_STATE_SHARED); mac_set_group_state(group, MAC_GROUP_STATE_SHARED); mac_rx_group_unmark(group, MR_CONDEMNED); } flent->fe_rx_ring_group = NULL;
*** 3250,3265 **** * mip. If so set the group state to reserved, and set up the SRSes * over the default group. */ if (check_default_group) { default_group = MAC_DEFAULT_RX_GROUP(mip); ! ASSERT(default_group->mrg_state == MAC_GROUP_STATE_SHARED); next_state = mac_group_next_state(default_group, &grp_only_mcip, default_group, B_TRUE); if (next_state == MAC_GROUP_STATE_RESERVED) { ! ASSERT(grp_only_mcip != NULL && ! mip->mi_nactiveclients == 1); mac_set_group_state(default_group, MAC_GROUP_STATE_RESERVED); mac_rx_srs_group_setup(grp_only_mcip, grp_only_mcip->mci_flent, SRST_LINK); mac_fanout_setup(grp_only_mcip, --- 3318,3333 ---- * mip. If so set the group state to reserved, and set up the SRSes * over the default group. */ if (check_default_group) { default_group = MAC_DEFAULT_RX_GROUP(mip); ! VERIFY3S(default_group->mrg_state, ==, MAC_GROUP_STATE_SHARED); next_state = mac_group_next_state(default_group, &grp_only_mcip, default_group, B_TRUE); if (next_state == MAC_GROUP_STATE_RESERVED) { ! VERIFY3P(grp_only_mcip, !=, NULL); ! VERIFY3U(mip->mi_nactiveclients, ==, 1); mac_set_group_state(default_group, MAC_GROUP_STATE_RESERVED); mac_rx_srs_group_setup(grp_only_mcip, grp_only_mcip->mci_flent, SRST_LINK); mac_fanout_setup(grp_only_mcip,
*** 3779,3789 **** /* * In the case of aggr, the soft ring associated with a Tx ring * is also stored in st_soft_rings[] array. That entry should * be removed. */ ! if (mcip->mci_state_flags & MCIS_IS_AGGR) { mac_srs_tx_t *tx = &mac_srs->srs_tx; ASSERT(tx->st_soft_rings[tx_ring->mr_index] == remove_sring); tx->st_soft_rings[tx_ring->mr_index] = NULL; } --- 3847,3857 ---- /* * In the case of aggr, the soft ring associated with a Tx ring * is also stored in st_soft_rings[] array. That entry should * be removed. */ ! if (mcip->mci_state_flags & MCIS_IS_AGGR_CLIENT) { mac_srs_tx_t *tx = &mac_srs->srs_tx; ASSERT(tx->st_soft_rings[tx_ring->mr_index] == remove_sring); tx->st_soft_rings[tx_ring->mr_index] = NULL; }
*** 3808,3818 **** mac_ring_t *ring; mac_srs_tx_t *tx = &tx_srs->srs_tx; boolean_t is_aggr; uint_t ring_info = 0; ! is_aggr = (mcip->mci_state_flags & MCIS_IS_AGGR) != 0; grp = flent->fe_tx_ring_group; if (grp == NULL) { ring = (mac_ring_t *)mip->mi_default_tx_ring; goto no_group; } --- 3876,3886 ---- mac_ring_t *ring; mac_srs_tx_t *tx = &tx_srs->srs_tx; boolean_t is_aggr; uint_t ring_info = 0; ! is_aggr = (mcip->mci_state_flags & MCIS_IS_AGGR_CLIENT) != 0; grp = flent->fe_tx_ring_group; if (grp == NULL) { ring = (mac_ring_t *)mip->mi_default_tx_ring; goto no_group; }