1958 if (mac_rx_srs->srs_type & SRST_FANOUT_PROTO) {
1959 mutex_enter(&cpu_lock);
1960 cpuid = mac_next_bind_cpu(cpupart);
1961 /* Create the protocol softrings */
1962 mac_srs_create_proto_softrings(0, soft_ring_flag,
1963 mac_rx_srs->srs_pri, mcip, mac_rx_srs, cpuid,
1964 rx_func, x_arg1, x_arg2, B_FALSE);
1965 mutex_exit(&cpu_lock);
1966 } else {
1967 /*
1968 * This is the case when there is no fanout which is
1969 * true for subflows.
1970 */
1971 mac_rx_srs->srs_type |= SRST_NO_SOFT_RINGS;
1972 }
1973 mac_srs_update_fanout_list(mac_rx_srs);
1974 mac_srs_client_poll_enable(mcip, mac_rx_srs);
1975 }
1976
1977 /*
1978 * mac_fanout_setup:
1979 *
1980 * Calls mac_srs_fanout_init() or modify() depending upon whether
1981 * the SRS is getting initialized or re-initialized.
1982 */
1983 void
1984 mac_fanout_setup(mac_client_impl_t *mcip, flow_entry_t *flent,
1985 mac_resource_props_t *mrp, mac_direct_rx_t rx_func, void *x_arg1,
1986 mac_resource_handle_t x_arg2, cpupart_t *cpupart)
1987 {
1988 mac_soft_ring_set_t *mac_rx_srs, *mac_tx_srs;
1989 int i, rx_srs_cnt;
1990
1991 ASSERT(MAC_PERIM_HELD((mac_handle_t)mcip->mci_mip));
1992 /*
1993 * This is an aggregation port. Fanout will be setup
1994 * over the aggregation itself.
1995 */
1996 if (mcip->mci_state_flags & MCIS_EXCLUSIVE)
1997 return;
1998
1999 mac_rx_srs = flent->fe_rx_srs[0];
2000 /*
2001 * Set up the fanout on the tx side only once, with the
2002 * first rx SRS. The CPU binding, fanout, and bandwidth
2003 * criteria are common to both RX and TX, so
2004 * initializing them along side avoids redundant code.
2005 */
2006 mac_tx_srs = flent->fe_tx_srs;
2007 rx_srs_cnt = flent->fe_rx_srs_cnt;
2008
2009 /* No fanout for subflows */
2010 if (flent->fe_type & FLOW_USER) {
2011 mac_srs_fanout_init(mcip, mrp, rx_func,
2012 x_arg1, x_arg2, mac_rx_srs, mac_tx_srs,
2013 cpupart);
2014 return;
2015 }
2016
2017 if (mrp->mrp_mask & MRP_CPUS_USERSPEC)
2018 mac_flow_user_cpu_init(flent, mrp);
2019 else
2035 x_arg1, x_arg2, mac_rx_srs, mac_tx_srs,
2036 cpupart);
2037 break;
2038 case SRS_FANOUT_INIT:
2039 break;
2040 case SRS_FANOUT_REINIT:
2041 mac_rx_srs_quiesce(mac_rx_srs, SRS_QUIESCE);
2042 mac_srs_fanout_modify(mcip, rx_func, x_arg1,
2043 x_arg2, mac_rx_srs, mac_tx_srs);
2044 mac_rx_srs_restart(mac_rx_srs);
2045 break;
2046 default:
2047 VERIFY(mac_rx_srs->srs_fanout_state <=
2048 SRS_FANOUT_REINIT);
2049 break;
2050 }
2051 }
2052 }
2053
2054 /*
2055 * mac_srs_create:
2056 *
2057 * Create a mac_soft_ring_set_t (SRS). If soft_ring_fanout_type is
2058 * SRST_TX, an SRS for Tx side is created. Otherwise an SRS for Rx side
2059 * processing is created.
2060 *
2061 * Details on Rx SRS:
2062 * Create a SRS and also add the necessary soft rings for TCP and
2063 * non-TCP based on fanout type and count specified.
2064 *
2065 * mac_soft_ring_fanout, mac_srs_fanout_modify (?),
2066 * mac_soft_ring_stop_workers, mac_soft_ring_set_destroy, etc need
2067 * to be heavily modified.
2068 *
2069 * mi_soft_ring_list_size, mi_soft_ring_size, etc need to disappear.
2070 */
2071 mac_soft_ring_set_t *
2072 mac_srs_create(mac_client_impl_t *mcip, flow_entry_t *flent, uint32_t srs_type,
2073 mac_direct_rx_t rx_func, void *x_arg1, mac_resource_handle_t x_arg2,
2074 mac_ring_t *ring)
2075 {
2076 mac_soft_ring_set_t *mac_srs;
2338 ring->mr_classify_type = MAC_SW_CLASSIFIER;
2339 }
2340 }
2341
2342 /*
2343 * Create the Rx SRS for S/W classifier and for each ring in the
2344 * group (if exclusive group). Also create the Tx SRS.
2345 */
2346 void
2347 mac_srs_group_setup(mac_client_impl_t *mcip, flow_entry_t *flent,
2348 uint32_t link_type)
2349 {
2350 cpupart_t *cpupart;
2351 mac_resource_props_t *mrp = MCIP_RESOURCE_PROPS(mcip);
2352 mac_resource_props_t *emrp = MCIP_EFFECTIVE_PROPS(mcip);
2353 boolean_t use_default = B_FALSE;
2354
2355 mac_rx_srs_group_setup(mcip, flent, link_type);
2356 mac_tx_srs_group_setup(mcip, flent, link_type);
2357
2358 pool_lock();
2359 cpupart = mac_pset_find(mrp, &use_default);
2360 mac_fanout_setup(mcip, flent, MCIP_RESOURCE_PROPS(mcip),
2361 mac_rx_deliver, mcip, NULL, cpupart);
2362 mac_set_pool_effective(use_default, cpupart, mrp, emrp);
2363 pool_unlock();
2364 }
2365
2366 /*
2367 * Set up the Rx SRSes. If there is no group associated with the
2368 * client, then only setup SW classification. If the client has
2369 * exlusive (MAC_GROUP_STATE_RESERVED) use of the group, then create an
2370 * SRS for each HW ring. If the client is sharing a group, then make
2371 * sure to teardown the HW SRSes.
2372 */
2373 void
2374 mac_rx_srs_group_setup(mac_client_impl_t *mcip, flow_entry_t *flent,
2375 uint32_t link_type)
2376 {
2377 mac_impl_t *mip = mcip->mci_mip;
2378 mac_soft_ring_set_t *mac_srs;
2379 mac_ring_t *ring;
2380 uint32_t fanout_type;
2381 mac_group_t *rx_group = flent->fe_rx_ring_group;
2382 boolean_t no_unicast;
2383
2384 fanout_type = mac_find_fanout(flent, link_type);
2385 no_unicast = (mcip->mci_state_flags & MCIS_NO_UNICAST_ADDR) != 0;
2386
2387 /* Create the SRS for SW classification if none exists */
2388 if (flent->fe_rx_srs[0] == NULL) {
2389 ASSERT(flent->fe_rx_srs_cnt == 0);
2390 mac_srs = mac_srs_create(mcip, flent, fanout_type | link_type,
2391 mac_rx_deliver, mcip, NULL, NULL);
2392 mutex_enter(&flent->fe_lock);
2393 flent->fe_cb_fn = (flow_fn_t)mac_srs->srs_rx.sr_lower_proc;
2394 flent->fe_cb_arg1 = (void *)mip;
2395 flent->fe_cb_arg2 = (void *)mac_srs;
2396 mutex_exit(&flent->fe_lock);
2397 }
2398
2399 if (rx_group == NULL)
2400 return;
2401
2402 /*
2403 * If the group is marked RESERVED then setup an SRS and
2452 /*
2453 * When a group is shared by multiple clients, we must
2454 * use SW classifiction to ensure packets are
2455 * delivered to the correct client.
2456 */
2457 mac_rx_switch_grp_to_sw(rx_group);
2458 break;
2459 default:
2460 ASSERT(B_FALSE);
2461 break;
2462 }
2463 }
2464
2465 /*
2466 * Set up the TX SRS.
2467 */
2468 void
2469 mac_tx_srs_group_setup(mac_client_impl_t *mcip, flow_entry_t *flent,
2470 uint32_t link_type)
2471 {
2472 int cnt;
2473 int ringcnt;
2474 mac_ring_t *ring;
2475 mac_group_t *grp;
2476
2477 /*
2478 * If we are opened exclusively (like aggr does for aggr_ports),
2479 * don't set up Tx SRS and Tx soft rings as they won't be used.
2480 * The same thing has to be done for Rx side also. See bug:
2481 * 6880080
2482 */
2483 if (mcip->mci_state_flags & MCIS_EXCLUSIVE) {
2484 /*
2485 * If we have rings, start them here.
2486 */
2487 if (flent->fe_tx_ring_group == NULL)
2488 return;
2489 grp = (mac_group_t *)flent->fe_tx_ring_group;
2490 ringcnt = grp->mrg_cur_count;
2491 ring = grp->mrg_rings;
2492 for (cnt = 0; cnt < ringcnt; cnt++) {
2493 if (ring->mr_state != MR_INUSE) {
2494 (void) mac_start_ring(ring);
2495 }
2496 ring = ring->mr_next;
2497 }
2498 return;
2499 }
2500 if (flent->fe_tx_srs == NULL) {
2501 (void) mac_srs_create(mcip, flent, SRST_TX | link_type,
2502 NULL, mcip, NULL, NULL);
2503 }
2504 mac_tx_srs_setup(mcip, flent);
2505 }
2506
2507 /*
2508 * Teardown all the Rx SRSes. Unless hwonly is set, then only teardown
2509 * the Rx HW SRSes and leave the SW SRS alone. The hwonly flag is set
2510 * when we wish to move a MAC client from one group to another. In
2511 * that case, we need to release the current HW SRSes but keep the SW
2512 * SRS for continued traffic classifiction.
2513 */
2514 void
2515 mac_rx_srs_group_teardown(flow_entry_t *flent, boolean_t hwonly)
2516 {
2517 mac_soft_ring_set_t *mac_srs;
2518 int i;
2519 int count = flent->fe_rx_srs_cnt;
2520
2521 for (i = 0; i < count; i++) {
2522 if (i == 0 && hwonly)
2523 continue;
3151 * any hardware filters assigned to this client.
3152 */
3153 if (mcip->mci_unicast != NULL) {
3154 int err;
3155
3156 err = mac_remove_macaddr_vlan(mcip->mci_unicast, vid);
3157
3158 if (err != 0) {
3159 cmn_err(CE_WARN, "%s: failed to remove a MAC HW"
3160 " filters because of error 0x%x",
3161 mip->mi_name, err);
3162 }
3163
3164 mcip->mci_unicast = NULL;
3165 }
3166
3167 /* Stop the packets coming from the S/W classifier */
3168 mac_flow_remove(mip->mi_flow_tab, flent, B_FALSE);
3169 mac_flow_wait(flent, FLOW_DRIVER_UPCALL);
3170
3171 /* Now quiesce and destroy all SRS and soft rings */
3172 mac_rx_srs_group_teardown(flent, B_FALSE);
3173 mac_tx_srs_group_teardown(mcip, flent, SRST_LINK);
3174
3175 ASSERT((mcip->mci_flent == flent) &&
3176 (flent->fe_next == NULL));
3177
3178 /*
3179 * Release our hold on the group as well. We need
3180 * to check if the shared group has only one client
3181 * left who can use it exclusively. Also, if we
3182 * were the last client, release the group.
3183 */
3184 default_group = MAC_DEFAULT_RX_GROUP(mip);
3185 if (group != NULL) {
3186 mac_group_remove_client(group, mcip);
3187 next_state = mac_group_next_state(group,
3188 &grp_only_mcip, default_group, B_TRUE);
3189
3190 if (next_state == MAC_GROUP_STATE_RESERVED) {
3191 /*
3192 * Only one client left on this RX group.
3193 */
3194 VERIFY3P(grp_only_mcip, !=, NULL);
3195 mac_set_group_state(group,
3196 MAC_GROUP_STATE_RESERVED);
4005 flent->fe_rx_srs_cnt - 1, maxcpus);
4006 /*
4007 * If soft_ring_count returned by
4008 * mac_compute_soft_ring_count() is 0, bump it
4009 * up by 1 because we always have atleast one
4010 * TCP, UDP, and OTH soft ring associated with
4011 * an SRS.
4012 */
4013 soft_ring_count = (soft_ring_count == 0) ?
4014 1 : soft_ring_count;
4015 rx_srs = flent->fe_rx_srs[0];
4016 srs_cpu = &rx_srs->srs_cpu;
4017 if (soft_ring_count != srs_cpu->mc_rx_fanout_cnt) {
4018 mac_fanout_setup(mcip, flent, mcip_mrp,
4019 mac_rx_deliver, mcip, NULL, cpupart);
4020 }
4021 }
4022 }
4023
4024 /*
4025 * Walk through the list of mac clients for the MAC.
4026 * For each active mac client, recompute the number of soft rings
4027 * associated with every client, only if current speed is different
4028 * from the speed that was previously used for soft ring computation.
4029 * If the cable is disconnected whlie the NIC is started, we would get
4030 * notification with speed set to 0. We do not recompute in that case.
4031 */
4032 void
4033 mac_fanout_recompute(mac_impl_t *mip)
4034 {
4035 mac_client_impl_t *mcip;
4036 cpupart_t *cpupart;
4037 boolean_t use_default;
4038 mac_resource_props_t *mrp, *emrp;
4039
4040 i_mac_perim_enter(mip);
4041 if ((mip->mi_state_flags & MIS_IS_VNIC) != 0 ||
4042 mip->mi_linkstate != LINK_STATE_UP) {
4043 i_mac_perim_exit(mip);
4044 return;
4045 }
4046
4047 for (mcip = mip->mi_clients_list; mcip != NULL;
4048 mcip = mcip->mci_client_next) {
4049 if ((mcip->mci_state_flags & MCIS_SHARE_BOUND) != 0 ||
4050 !MCIP_DATAPATH_SETUP(mcip))
4051 continue;
4052 mrp = MCIP_RESOURCE_PROPS(mcip);
4053 emrp = MCIP_EFFECTIVE_PROPS(mcip);
4054 use_default = B_FALSE;
4055 pool_lock();
4056 cpupart = mac_pset_find(mrp, &use_default);
4057 mac_fanout_recompute_client(mcip, cpupart);
4058 mac_set_pool_effective(use_default, cpupart, mrp, emrp);
4059 pool_unlock();
4060 }
4061 i_mac_perim_exit(mip);
4062 }
4063
4064 /*
4065 * Given a MAC, change the polling state for all its MAC clients. 'enable' is
4066 * B_TRUE to enable polling or B_FALSE to disable. Polling is enabled by
4067 * default.
4068 */
4069 void
4070 mac_poll_state_change(mac_handle_t mh, boolean_t enable)
4071 {
4072 mac_impl_t *mip = (mac_impl_t *)mh;
4073 mac_client_impl_t *mcip;
4074
4075 i_mac_perim_enter(mip);
4076 if (enable)
4077 mip->mi_state_flags &= ~MIS_POLL_DISABLE;
4078 else
4079 mip->mi_state_flags |= MIS_POLL_DISABLE;
4080 for (mcip = mip->mi_clients_list; mcip != NULL;
|
1958 if (mac_rx_srs->srs_type & SRST_FANOUT_PROTO) {
1959 mutex_enter(&cpu_lock);
1960 cpuid = mac_next_bind_cpu(cpupart);
1961 /* Create the protocol softrings */
1962 mac_srs_create_proto_softrings(0, soft_ring_flag,
1963 mac_rx_srs->srs_pri, mcip, mac_rx_srs, cpuid,
1964 rx_func, x_arg1, x_arg2, B_FALSE);
1965 mutex_exit(&cpu_lock);
1966 } else {
1967 /*
1968 * This is the case when there is no fanout which is
1969 * true for subflows.
1970 */
1971 mac_rx_srs->srs_type |= SRST_NO_SOFT_RINGS;
1972 }
1973 mac_srs_update_fanout_list(mac_rx_srs);
1974 mac_srs_client_poll_enable(mcip, mac_rx_srs);
1975 }
1976
1977 /*
1978 * Calls mac_srs_fanout_init() or modify() depending upon whether
1979 * the SRS is getting initialized or re-initialized.
1980 */
1981 void
1982 mac_fanout_setup(mac_client_impl_t *mcip, flow_entry_t *flent,
1983 mac_resource_props_t *mrp, mac_direct_rx_t rx_func, void *x_arg1,
1984 mac_resource_handle_t x_arg2, cpupart_t *cpupart)
1985 {
1986 mac_soft_ring_set_t *mac_rx_srs, *mac_tx_srs;
1987 int i, rx_srs_cnt;
1988
1989 ASSERT(MAC_PERIM_HELD((mac_handle_t)mcip->mci_mip));
1990
1991 /*
1992 * Aggr ports do not have SRSes. This function should never be
1993 * called on an aggr port.
1994 */
1995 ASSERT3U((mcip->mci_state_flags & MCIS_IS_AGGR_PORT), ==, 0);
1996 mac_rx_srs = flent->fe_rx_srs[0];
1997
1998 /*
1999 * Set up the fanout on the tx side only once, with the
2000 * first rx SRS. The CPU binding, fanout, and bandwidth
2001 * criteria are common to both RX and TX, so
2002 * initializing them along side avoids redundant code.
2003 */
2004 mac_tx_srs = flent->fe_tx_srs;
2005 rx_srs_cnt = flent->fe_rx_srs_cnt;
2006
2007 /* No fanout for subflows */
2008 if (flent->fe_type & FLOW_USER) {
2009 mac_srs_fanout_init(mcip, mrp, rx_func,
2010 x_arg1, x_arg2, mac_rx_srs, mac_tx_srs,
2011 cpupart);
2012 return;
2013 }
2014
2015 if (mrp->mrp_mask & MRP_CPUS_USERSPEC)
2016 mac_flow_user_cpu_init(flent, mrp);
2017 else
2033 x_arg1, x_arg2, mac_rx_srs, mac_tx_srs,
2034 cpupart);
2035 break;
2036 case SRS_FANOUT_INIT:
2037 break;
2038 case SRS_FANOUT_REINIT:
2039 mac_rx_srs_quiesce(mac_rx_srs, SRS_QUIESCE);
2040 mac_srs_fanout_modify(mcip, rx_func, x_arg1,
2041 x_arg2, mac_rx_srs, mac_tx_srs);
2042 mac_rx_srs_restart(mac_rx_srs);
2043 break;
2044 default:
2045 VERIFY(mac_rx_srs->srs_fanout_state <=
2046 SRS_FANOUT_REINIT);
2047 break;
2048 }
2049 }
2050 }
2051
2052 /*
2053 * Create a mac_soft_ring_set_t (SRS). If soft_ring_fanout_type is
2054 * SRST_TX, an SRS for Tx side is created. Otherwise an SRS for Rx side
2055 * processing is created.
2056 *
2057 * Details on Rx SRS:
2058 * Create a SRS and also add the necessary soft rings for TCP and
2059 * non-TCP based on fanout type and count specified.
2060 *
2061 * mac_soft_ring_fanout, mac_srs_fanout_modify (?),
2062 * mac_soft_ring_stop_workers, mac_soft_ring_set_destroy, etc need
2063 * to be heavily modified.
2064 *
2065 * mi_soft_ring_list_size, mi_soft_ring_size, etc need to disappear.
2066 */
2067 mac_soft_ring_set_t *
2068 mac_srs_create(mac_client_impl_t *mcip, flow_entry_t *flent, uint32_t srs_type,
2069 mac_direct_rx_t rx_func, void *x_arg1, mac_resource_handle_t x_arg2,
2070 mac_ring_t *ring)
2071 {
2072 mac_soft_ring_set_t *mac_srs;
2334 ring->mr_classify_type = MAC_SW_CLASSIFIER;
2335 }
2336 }
2337
2338 /*
2339 * Create the Rx SRS for S/W classifier and for each ring in the
2340 * group (if exclusive group). Also create the Tx SRS.
2341 */
2342 void
2343 mac_srs_group_setup(mac_client_impl_t *mcip, flow_entry_t *flent,
2344 uint32_t link_type)
2345 {
2346 cpupart_t *cpupart;
2347 mac_resource_props_t *mrp = MCIP_RESOURCE_PROPS(mcip);
2348 mac_resource_props_t *emrp = MCIP_EFFECTIVE_PROPS(mcip);
2349 boolean_t use_default = B_FALSE;
2350
2351 mac_rx_srs_group_setup(mcip, flent, link_type);
2352 mac_tx_srs_group_setup(mcip, flent, link_type);
2353
2354 /* Aggr ports don't have SRSes; thus there is no soft ring fanout. */
2355 if ((mcip->mci_state_flags & MCIS_IS_AGGR_PORT) != 0)
2356 return;
2357
2358 pool_lock();
2359 cpupart = mac_pset_find(mrp, &use_default);
2360 mac_fanout_setup(mcip, flent, MCIP_RESOURCE_PROPS(mcip),
2361 mac_rx_deliver, mcip, NULL, cpupart);
2362 mac_set_pool_effective(use_default, cpupart, mrp, emrp);
2363 pool_unlock();
2364 }
2365
2366 /*
2367 * Set up the Rx SRSes. If there is no group associated with the
2368 * client, then only setup SW classification. If the client has
2369 * exlusive (MAC_GROUP_STATE_RESERVED) use of the group, then create an
2370 * SRS for each HW ring. If the client is sharing a group, then make
2371 * sure to teardown the HW SRSes.
2372 */
2373 void
2374 mac_rx_srs_group_setup(mac_client_impl_t *mcip, flow_entry_t *flent,
2375 uint32_t link_type)
2376 {
2377 mac_impl_t *mip = mcip->mci_mip;
2378 mac_soft_ring_set_t *mac_srs;
2379 mac_ring_t *ring;
2380 uint32_t fanout_type;
2381 mac_group_t *rx_group = flent->fe_rx_ring_group;
2382 boolean_t no_unicast;
2383
2384 /*
2385 * If this is an an aggr port, then don't setup Rx SRS and Rx
2386 * soft rings as they won't be used. However, we still need to
2387 * start the rings to receive data on them.
2388 */
2389 if (mcip->mci_state_flags & MCIS_IS_AGGR_PORT) {
2390 if (rx_group == NULL)
2391 return;
2392
2393 for (ring = rx_group->mrg_rings; ring != NULL;
2394 ring = ring->mr_next) {
2395 if (ring->mr_state != MR_INUSE)
2396 (void) mac_start_ring(ring);
2397 }
2398
2399 return;
2400 }
2401
2402 /*
2403 * Aggr ports should never have SRSes.
2404 */
2405 ASSERT3U((mcip->mci_state_flags & MCIS_IS_AGGR_PORT), ==, 0);
2406
2407 fanout_type = mac_find_fanout(flent, link_type);
2408 no_unicast = (mcip->mci_state_flags & MCIS_NO_UNICAST_ADDR) != 0;
2409
2410 /* Create the SRS for SW classification if none exists */
2411 if (flent->fe_rx_srs[0] == NULL) {
2412 ASSERT(flent->fe_rx_srs_cnt == 0);
2413 mac_srs = mac_srs_create(mcip, flent, fanout_type | link_type,
2414 mac_rx_deliver, mcip, NULL, NULL);
2415 mutex_enter(&flent->fe_lock);
2416 flent->fe_cb_fn = (flow_fn_t)mac_srs->srs_rx.sr_lower_proc;
2417 flent->fe_cb_arg1 = (void *)mip;
2418 flent->fe_cb_arg2 = (void *)mac_srs;
2419 mutex_exit(&flent->fe_lock);
2420 }
2421
2422 if (rx_group == NULL)
2423 return;
2424
2425 /*
2426 * If the group is marked RESERVED then setup an SRS and
2475 /*
2476 * When a group is shared by multiple clients, we must
2477 * use SW classifiction to ensure packets are
2478 * delivered to the correct client.
2479 */
2480 mac_rx_switch_grp_to_sw(rx_group);
2481 break;
2482 default:
2483 ASSERT(B_FALSE);
2484 break;
2485 }
2486 }
2487
2488 /*
2489 * Set up the TX SRS.
2490 */
2491 void
2492 mac_tx_srs_group_setup(mac_client_impl_t *mcip, flow_entry_t *flent,
2493 uint32_t link_type)
2494 {
2495 /*
2496 * If this is an exclusive client (e.g. an aggr port), then
2497 * don't setup Tx SRS and Tx soft rings as they won't be used.
2498 * However, we still need to start the rings to send data
2499 * across them.
2500 */
2501 if (mcip->mci_state_flags & MCIS_EXCLUSIVE) {
2502 mac_ring_t *ring;
2503 mac_group_t *grp;
2504
2505 grp = (mac_group_t *)flent->fe_tx_ring_group;
2506
2507 if (grp == NULL)
2508 return;
2509
2510 for (ring = grp->mrg_rings; ring != NULL;
2511 ring = ring->mr_next) {
2512 if (ring->mr_state != MR_INUSE)
2513 (void) mac_start_ring(ring);
2514 }
2515
2516 return;
2517 }
2518
2519 /*
2520 * Aggr ports should never have SRSes.
2521 */
2522 ASSERT3U((mcip->mci_state_flags & MCIS_IS_AGGR_PORT), ==, 0);
2523
2524 if (flent->fe_tx_srs == NULL) {
2525 (void) mac_srs_create(mcip, flent, SRST_TX | link_type,
2526 NULL, mcip, NULL, NULL);
2527 }
2528
2529 mac_tx_srs_setup(mcip, flent);
2530 }
2531
2532 /*
2533 * Teardown all the Rx SRSes. Unless hwonly is set, then only teardown
2534 * the Rx HW SRSes and leave the SW SRS alone. The hwonly flag is set
2535 * when we wish to move a MAC client from one group to another. In
2536 * that case, we need to release the current HW SRSes but keep the SW
2537 * SRS for continued traffic classifiction.
2538 */
2539 void
2540 mac_rx_srs_group_teardown(flow_entry_t *flent, boolean_t hwonly)
2541 {
2542 mac_soft_ring_set_t *mac_srs;
2543 int i;
2544 int count = flent->fe_rx_srs_cnt;
2545
2546 for (i = 0; i < count; i++) {
2547 if (i == 0 && hwonly)
2548 continue;
3176 * any hardware filters assigned to this client.
3177 */
3178 if (mcip->mci_unicast != NULL) {
3179 int err;
3180
3181 err = mac_remove_macaddr_vlan(mcip->mci_unicast, vid);
3182
3183 if (err != 0) {
3184 cmn_err(CE_WARN, "%s: failed to remove a MAC HW"
3185 " filters because of error 0x%x",
3186 mip->mi_name, err);
3187 }
3188
3189 mcip->mci_unicast = NULL;
3190 }
3191
3192 /* Stop the packets coming from the S/W classifier */
3193 mac_flow_remove(mip->mi_flow_tab, flent, B_FALSE);
3194 mac_flow_wait(flent, FLOW_DRIVER_UPCALL);
3195
3196 /* Quiesce and destroy all the SRSes. */
3197 mac_rx_srs_group_teardown(flent, B_FALSE);
3198 mac_tx_srs_group_teardown(mcip, flent, SRST_LINK);
3199
3200 ASSERT3P(mcip->mci_flent, ==, flent);
3201 ASSERT3P(flent->fe_next, ==, NULL);
3202
3203 /*
3204 * Release our hold on the group as well. We need
3205 * to check if the shared group has only one client
3206 * left who can use it exclusively. Also, if we
3207 * were the last client, release the group.
3208 */
3209 default_group = MAC_DEFAULT_RX_GROUP(mip);
3210 if (group != NULL) {
3211 mac_group_remove_client(group, mcip);
3212 next_state = mac_group_next_state(group,
3213 &grp_only_mcip, default_group, B_TRUE);
3214
3215 if (next_state == MAC_GROUP_STATE_RESERVED) {
3216 /*
3217 * Only one client left on this RX group.
3218 */
3219 VERIFY3P(grp_only_mcip, !=, NULL);
3220 mac_set_group_state(group,
3221 MAC_GROUP_STATE_RESERVED);
4030 flent->fe_rx_srs_cnt - 1, maxcpus);
4031 /*
4032 * If soft_ring_count returned by
4033 * mac_compute_soft_ring_count() is 0, bump it
4034 * up by 1 because we always have atleast one
4035 * TCP, UDP, and OTH soft ring associated with
4036 * an SRS.
4037 */
4038 soft_ring_count = (soft_ring_count == 0) ?
4039 1 : soft_ring_count;
4040 rx_srs = flent->fe_rx_srs[0];
4041 srs_cpu = &rx_srs->srs_cpu;
4042 if (soft_ring_count != srs_cpu->mc_rx_fanout_cnt) {
4043 mac_fanout_setup(mcip, flent, mcip_mrp,
4044 mac_rx_deliver, mcip, NULL, cpupart);
4045 }
4046 }
4047 }
4048
4049 /*
4050 * Walk through the list of MAC clients for the MAC.
4051 * For each active MAC client, recompute the number of soft rings
4052 * associated with every client, only if current speed is different
4053 * from the speed that was previously used for soft ring computation.
4054 * If the cable is disconnected whlie the NIC is started, we would get
4055 * notification with speed set to 0. We do not recompute in that case.
4056 */
4057 void
4058 mac_fanout_recompute(mac_impl_t *mip)
4059 {
4060 mac_client_impl_t *mcip;
4061 cpupart_t *cpupart;
4062 boolean_t use_default;
4063 mac_resource_props_t *mrp, *emrp;
4064
4065 i_mac_perim_enter(mip);
4066 if ((mip->mi_state_flags & MIS_IS_VNIC) != 0 ||
4067 mip->mi_linkstate != LINK_STATE_UP) {
4068 i_mac_perim_exit(mip);
4069 return;
4070 }
4071
4072 for (mcip = mip->mi_clients_list; mcip != NULL;
4073 mcip = mcip->mci_client_next) {
4074 /* Aggr port clients don't have SRSes. */
4075 if ((mcip->mci_state_flags & MCIS_IS_AGGR_PORT) != 0)
4076 continue;
4077
4078 if ((mcip->mci_state_flags & MCIS_SHARE_BOUND) != 0 ||
4079 !MCIP_DATAPATH_SETUP(mcip))
4080 continue;
4081 mrp = MCIP_RESOURCE_PROPS(mcip);
4082 emrp = MCIP_EFFECTIVE_PROPS(mcip);
4083 use_default = B_FALSE;
4084 pool_lock();
4085 cpupart = mac_pset_find(mrp, &use_default);
4086 mac_fanout_recompute_client(mcip, cpupart);
4087 mac_set_pool_effective(use_default, cpupart, mrp, emrp);
4088 pool_unlock();
4089 }
4090
4091 i_mac_perim_exit(mip);
4092 }
4093
4094 /*
4095 * Given a MAC, change the polling state for all its MAC clients. 'enable' is
4096 * B_TRUE to enable polling or B_FALSE to disable. Polling is enabled by
4097 * default.
4098 */
4099 void
4100 mac_poll_state_change(mac_handle_t mh, boolean_t enable)
4101 {
4102 mac_impl_t *mip = (mac_impl_t *)mh;
4103 mac_client_impl_t *mcip;
4104
4105 i_mac_perim_enter(mip);
4106 if (enable)
4107 mip->mi_state_flags &= ~MIS_POLL_DISABLE;
4108 else
4109 mip->mi_state_flags |= MIS_POLL_DISABLE;
4110 for (mcip = mip->mi_clients_list; mcip != NULL;
|