Print this page
11490 SRS ring polling disabled for VLANs
11491 Want DLS bypass for VLAN traffic
11492 add VLVF bypass to ixgbe core
2869 duplicate packets with vnics over aggrs
11489 DLS stat delete and aggr kstat can deadlock
Portions contributed by: Theo Schlossnagle <jesus@omniti.com>
Reviewed by: Patrick Mooney <patrick.mooney@joyent.com>
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Dan McDonald <danmcd@joyent.com>

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/io/mac/mac_sched.c
          +++ new/usr/src/uts/common/io/mac/mac_sched.c
↓ open down ↓ 13 lines elided ↑ open up ↑
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
  23   23   * Use is subject to license terms.
  24      - * Copyright 2017 Joyent, Inc.
       24 + * Copyright 2018 Joyent, Inc.
  25   25   * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
  26   26   */
  27   27  
  28   28  /*
  29   29   * MAC data path
  30   30   *
  31   31   * The MAC data path is concerned with the flow of traffic from mac clients --
  32   32   * DLS, IP, etc. -- to various GLDv3 device drivers -- e1000g, vnic, aggr,
  33   33   * ixgbe, etc. -- and from the GLDv3 device drivers back to clients.
  34   34   *
↓ open down ↓ 258 lines elided ↑ open up ↑
 293  293   * MAC makes the determination as to which of these modes a given soft ring set
 294  294   * obtains based on parameters such as whether or not it's the primary mac
 295  295   * client, whether it's on a 10 GbE or faster device, user controlled dladm(1M)
 296  296   * properties, and the nature of the hardware and the resources that it has.
 297  297   *
 298  298   * When there is no fanout, MAC does not create any soft rings for a device and
 299  299   * the device has frames delivered directly to the MAC client.
 300  300   *
 301  301   * Otherwise, all fanout is performed by software. MAC divides incoming frames
 302  302   * into one of three buckets -- IPv4 TCP traffic, IPv4 UDP traffic, and
 303      - * everything else. Note, VLAN tagged traffic is considered other, regardless of
 304      - * the interior EtherType. Regardless of the type of fanout, these three
 305      - * categories or buckets are always used.
      303 + * everything else. Regardless of the type of fanout, these three categories
      304 + * or buckets are always used.
 306  305   *
 307  306   * The difference between protocol level fanout and full software ring protocol
 308  307   * fanout is the number of software rings that end up getting created. The
 309  308   * system always uses the same number of software rings per protocol bucket. So
 310  309   * in the first case when we're just doing protocol level fanout, we just create
 311  310   * one software ring each for IPv4 TCP traffic, IPv4 UDP traffic, and everything
 312  311   * else.
 313  312   *
 314  313   * In the case where we do full software ring protocol fanout, we generally use
 315  314   * mac_compute_soft_ring_count() to determine the number of rings. There are
↓ open down ↓ 1152 lines elided ↑ open up ↑
1468 1467          OTH,
1469 1468          UNDEF
1470 1469  };
1471 1470  
1472 1471  /*
1473 1472   * Pair of local and remote ports in the transport header
1474 1473   */
1475 1474  #define PORTS_SIZE 4
1476 1475  
1477 1476  /*
1478      - * mac_rx_srs_proto_fanout
1479      - *
1480      - * This routine delivers packets destined to an SRS into one of the
     1477 + * This routine delivers packets destined for an SRS into one of the
1481 1478   * protocol soft rings.
1482 1479   *
1483      - * Given a chain of packets we need to split it up into multiple sub chains
1484      - * destined into TCP, UDP or OTH soft ring. Instead of entering
1485      - * the soft ring one packet at a time, we want to enter it in the form of a
1486      - * chain otherwise we get this start/stop behaviour where the worker thread
1487      - * goes to sleep and then next packets comes in forcing it to wake up etc.
     1480 + * Given a chain of packets we need to split it up into multiple sub
     1481 + * chains: TCP, UDP or OTH soft ring. Instead of entering the soft
     1482 + * ring one packet at a time, we want to enter it in the form of a
     1483 + * chain otherwise we get this start/stop behaviour where the worker
     1484 + * thread goes to sleep and then next packet comes in forcing it to
     1485 + * wake up.
1488 1486   */
1489 1487  static void
1490 1488  mac_rx_srs_proto_fanout(mac_soft_ring_set_t *mac_srs, mblk_t *head)
1491 1489  {
1492 1490          struct ether_header             *ehp;
1493 1491          struct ether_vlan_header        *evhp;
1494 1492          uint32_t                        sap;
1495 1493          ipha_t                          *ipha;
1496 1494          uint8_t                         *dstaddr;
1497 1495          size_t                          hdrsize;
↓ open down ↓ 18 lines elided ↑ open up ↑
1516 1514           * If we don't have a Rx ring, S/W classification would have done
1517 1515           * its job and its a packet meant for us. If we were polling on
1518 1516           * the default ring (i.e. there was a ring assigned to this SRS),
1519 1517           * then we need to make sure that the mac address really belongs
1520 1518           * to us.
1521 1519           */
1522 1520          hw_classified = mac_srs->srs_ring != NULL &&
1523 1521              mac_srs->srs_ring->mr_classify_type == MAC_HW_CLASSIFIER;
1524 1522  
1525 1523          /*
1526      -         * Special clients (eg. VLAN, non ether, etc) need DLS
1527      -         * processing in the Rx path. SRST_DLS_BYPASS will be clear for
1528      -         * such SRSs. Another way of disabling bypass is to set the
     1524 +         * Some clients, such as non-ethernet, need DLS processing in
     1525 +         * the Rx path. Such clients clear the SRST_DLS_BYPASS flag.
     1526 +         * DLS bypass may also be disabled via the
1529 1527           * MCIS_RX_BYPASS_DISABLE flag.
1530 1528           */
1531 1529          dls_bypass = ((mac_srs->srs_type & SRST_DLS_BYPASS) != 0) &&
1532 1530              ((mcip->mci_state_flags & MCIS_RX_BYPASS_DISABLE) == 0);
1533 1531  
1534 1532          bzero(headmp, MAX_SR_TYPES * sizeof (mblk_t *));
1535 1533          bzero(tailmp, MAX_SR_TYPES * sizeof (mblk_t *));
1536 1534          bzero(cnt, MAX_SR_TYPES * sizeof (int));
1537 1535          bzero(sz, MAX_SR_TYPES * sizeof (size_t));
1538 1536  
1539 1537          /*
1540      -         * We got a chain from SRS that we need to send to the soft rings.
1541      -         * Since squeues for TCP & IPv4 sap poll their soft rings (for
1542      -         * performance reasons), we need to separate out v4_tcp, v4_udp
1543      -         * and the rest goes in other.
     1538 +         * We have a chain from SRS that we need to split across the
     1539 +         * soft rings. The squeues for the TCP and IPv4 SAPs use their
     1540 +         * own soft rings to allow polling from the squeue. The rest of
     1541 +         * the packets are delivered on the OTH soft ring which cannot
     1542 +         * be polled.
1544 1543           */
1545 1544          while (head != NULL) {
1546 1545                  mp = head;
1547 1546                  head = head->b_next;
1548 1547                  mp->b_next = NULL;
1549 1548  
1550 1549                  type = OTH;
1551 1550                  sz1 = (mp->b_cont == NULL) ? MBLKL(mp) : msgdsize(mp);
1552 1551  
1553 1552                  if (is_ether) {
↓ open down ↓ 7 lines elided ↑ open up ↑
1561 1560                          }
1562 1561                          ehp = (struct ether_header *)mp->b_rptr;
1563 1562  
1564 1563                          /*
1565 1564                           * Determine if this is a VLAN or non-VLAN packet.
1566 1565                           */
1567 1566                          if ((sap = ntohs(ehp->ether_type)) == VLAN_TPID) {
1568 1567                                  evhp = (struct ether_vlan_header *)mp->b_rptr;
1569 1568                                  sap = ntohs(evhp->ether_type);
1570 1569                                  hdrsize = sizeof (struct ether_vlan_header);
     1570 +
1571 1571                                  /*
1572      -                                 * Check if the VID of the packet, if any,
1573      -                                 * belongs to this client.
     1572 +                                 * Check if the VID of the packet, if
     1573 +                                 * any, belongs to this client.
     1574 +                                 * Technically, if this packet came up
     1575 +                                 * via a HW classified ring then we
     1576 +                                 * don't need to perform this check.
     1577 +                                 * Perhaps a future optimization.
1574 1578                                   */
1575 1579                                  if (!mac_client_check_flow_vid(mcip,
1576 1580                                      VLAN_ID(ntohs(evhp->ether_tci)))) {
1577 1581                                          mac_rx_drop_pkt(mac_srs, mp);
1578 1582                                          continue;
1579 1583                                  }
1580 1584                          } else {
1581 1585                                  hdrsize = sizeof (struct ether_header);
1582 1586                          }
1583 1587                          is_unicast =
↓ open down ↓ 44 lines elided ↑ open up ↑
1628 1632                   *
1629 1633                   * In the normal case the packet will have at least the L2
1630 1634                   * header and the IP + Transport header in the same mblk.
1631 1635                   * This is usually the case when the NIC driver sends up
1632 1636                   * the packet. This is also true when the stack generates
1633 1637                   * a packet that is looped back and when the stack uses the
1634 1638                   * fastpath mechanism. The normal case is optimized for
1635 1639                   * performance and may bypass DLS. All other cases go through
1636 1640                   * the 'OTH' type path without DLS bypass.
1637 1641                   */
1638      -
1639 1642                  ipha = (ipha_t *)(mp->b_rptr + hdrsize);
1640 1643                  if ((type != OTH) && MBLK_RX_FANOUT_SLOWPATH(mp, ipha))
1641 1644                          type = OTH;
1642 1645  
1643 1646                  if (type == OTH) {
1644 1647                          FANOUT_ENQUEUE_MP(headmp[type], tailmp[type],
1645 1648                              cnt[type], bw_ctl, sz[type], sz1, mp);
1646 1649                          continue;
1647 1650                  }
1648 1651  
1649 1652                  ASSERT(type == UNDEF);
     1653 +
1650 1654                  /*
1651      -                 * We look for at least 4 bytes past the IP header to get
1652      -                 * the port information. If we get an IP fragment, we don't
1653      -                 * have the port information, and we use just the protocol
1654      -                 * information.
     1655 +                 * Determine the type from the IP protocol value. If
     1656 +                 * classified as TCP or UDP, then update the read
     1657 +                 * pointer to the beginning of the IP header.
     1658 +                 * Otherwise leave the message as is for further
     1659 +                 * processing by DLS.
1655 1660                   */
1656 1661                  switch (ipha->ipha_protocol) {
1657 1662                  case IPPROTO_TCP:
1658 1663                          type = V4_TCP;
1659 1664                          mp->b_rptr += hdrsize;
1660 1665                          break;
1661 1666                  case IPPROTO_UDP:
1662 1667                          type = V4_UDP;
1663 1668                          mp->b_rptr += hdrsize;
1664 1669                          break;
↓ open down ↓ 23 lines elided ↑ open up ↑
1688 1693                          }
1689 1694                          mac_rx_soft_ring_process(mcip, softring,
1690 1695                              headmp[type], tailmp[type], cnt[type], sz[type]);
1691 1696                  }
1692 1697          }
1693 1698  }
1694 1699  
1695 1700  int     fanout_unaligned = 0;
1696 1701  
1697 1702  /*
1698      - * mac_rx_srs_long_fanout
1699      - *
1700      - * The fanout routine for VLANs, and for anything else that isn't performing
1701      - * explicit dls bypass.  Returns -1 on an error (drop the packet due to a
1702      - * malformed packet), 0 on success, with values written in *indx and *type.
     1703 + * The fanout routine for any clients with DLS bypass disabled or for
     1704 + * traffic classified as "other". Returns -1 on an error (drop the
     1705 + * packet due to a malformed packet), 0 on success, with values
     1706 + * written in *indx and *type.
1703 1707   */
1704 1708  static int
1705 1709  mac_rx_srs_long_fanout(mac_soft_ring_set_t *mac_srs, mblk_t *mp,
1706 1710      uint32_t sap, size_t hdrsize, enum pkt_type *type, uint_t *indx)
1707 1711  {
1708 1712          ip6_t           *ip6h;
1709 1713          ipha_t          *ipha;
1710 1714          uint8_t         *whereptr;
1711 1715          uint_t          hash;
1712 1716          uint16_t        remlen;
↓ open down ↓ 145 lines elided ↑ open up ↑
1858 1862          return (0);
1859 1863  
1860 1864  src_dst_based_fanout:
1861 1865          hash = HASH_ADDR(src_val, dst_val, (uint32_t)0);
1862 1866          *indx = COMPUTE_INDEX(hash, mac_srs->srs_oth_ring_count);
1863 1867          *type = OTH;
1864 1868          return (0);
1865 1869  }
1866 1870  
1867 1871  /*
1868      - * mac_rx_srs_fanout
1869      - *
1870      - * This routine delivers packets destined to an SRS into a soft ring member
     1872 + * This routine delivers packets destined for an SRS into a soft ring member
1871 1873   * of the set.
1872 1874   *
1873      - * Given a chain of packets we need to split it up into multiple sub chains
1874      - * destined for one of the TCP, UDP or OTH soft rings. Instead of entering
1875      - * the soft ring one packet at a time, we want to enter it in the form of a
1876      - * chain otherwise we get this start/stop behaviour where the worker thread
1877      - * goes to sleep and then next packets comes in forcing it to wake up etc.
     1875 + * Given a chain of packets we need to split it up into multiple sub
     1876 + * chains: TCP, UDP or OTH soft ring. Instead of entering the soft
     1877 + * ring one packet at a time, we want to enter it in the form of a
     1878 + * chain otherwise we get this start/stop behaviour where the worker
     1879 + * thread goes to sleep and then next packet comes in forcing it to
     1880 + * wake up.
1878 1881   *
1879 1882   * Note:
1880 1883   * Since we know what is the maximum fanout possible, we create a 2D array
1881 1884   * of 'softring types * MAX_SR_FANOUT' for the head, tail, cnt and sz
1882 1885   * variables so that we can enter the softrings with chain. We need the
1883 1886   * MAX_SR_FANOUT so we can allocate the arrays on the stack (a kmem_alloc
1884 1887   * for each packet would be expensive). If we ever want to have the
1885 1888   * ability to have unlimited fanout, we should probably declare a head,
1886 1889   * tail, cnt, sz with each soft ring (a data struct which contains a softring
1887 1890   * along with these members) and create an array of this uber struct so we
↓ open down ↓ 40 lines elided ↑ open up ↑
1928 1931           * If we don't have a Rx ring, S/W classification would have done
1929 1932           * its job and its a packet meant for us. If we were polling on
1930 1933           * the default ring (i.e. there was a ring assigned to this SRS),
1931 1934           * then we need to make sure that the mac address really belongs
1932 1935           * to us.
1933 1936           */
1934 1937          hw_classified = mac_srs->srs_ring != NULL &&
1935 1938              mac_srs->srs_ring->mr_classify_type == MAC_HW_CLASSIFIER;
1936 1939  
1937 1940          /*
1938      -         * Special clients (eg. VLAN, non ether, etc) need DLS
1939      -         * processing in the Rx path. SRST_DLS_BYPASS will be clear for
1940      -         * such SRSs. Another way of disabling bypass is to set the
1941      -         * MCIS_RX_BYPASS_DISABLE flag.
     1941 +         * Some clients, such as non Ethernet, need DLS processing in
     1942 +         * the Rx path. Such clients clear the SRST_DLS_BYPASS flag.
     1943 +         * DLS bypass may also be disabled via the
     1944 +         * MCIS_RX_BYPASS_DISABLE flag, but this is only consumed by
     1945 +         * sun4v vsw currently.
1942 1946           */
1943 1947          dls_bypass = ((mac_srs->srs_type & SRST_DLS_BYPASS) != 0) &&
1944 1948              ((mcip->mci_state_flags & MCIS_RX_BYPASS_DISABLE) == 0);
1945 1949  
1946 1950          /*
1947 1951           * Since the softrings are never destroyed and we always
1948 1952           * create equal number of softrings for TCP, UDP and rest,
1949 1953           * its OK to check one of them for count and use it without
1950 1954           * any lock. In future, if soft rings get destroyed because
1951 1955           * of reduction in fanout, we will need to ensure that happens
↓ open down ↓ 1 lines elided ↑ open up ↑
1953 1957           */
1954 1958          fanout_cnt = mac_srs->srs_tcp_ring_count;
1955 1959  
1956 1960          bzero(headmp, MAX_SR_TYPES * MAX_SR_FANOUT * sizeof (mblk_t *));
1957 1961          bzero(tailmp, MAX_SR_TYPES * MAX_SR_FANOUT * sizeof (mblk_t *));
1958 1962          bzero(cnt, MAX_SR_TYPES * MAX_SR_FANOUT * sizeof (int));
1959 1963          bzero(sz, MAX_SR_TYPES * MAX_SR_FANOUT * sizeof (size_t));
1960 1964  
1961 1965          /*
1962 1966           * We got a chain from SRS that we need to send to the soft rings.
1963      -         * Since squeues for TCP & IPv4 sap poll their soft rings (for
     1967 +         * Since squeues for TCP & IPv4 SAP poll their soft rings (for
1964 1968           * performance reasons), we need to separate out v4_tcp, v4_udp
1965 1969           * and the rest goes in other.
1966 1970           */
1967 1971          while (head != NULL) {
1968 1972                  mp = head;
1969 1973                  head = head->b_next;
1970 1974                  mp->b_next = NULL;
1971 1975  
1972 1976                  type = OTH;
1973 1977                  sz1 = (mp->b_cont == NULL) ? MBLKL(mp) : msgdsize(mp);
↓ open down ↓ 9 lines elided ↑ open up ↑
1983 1987                          }
1984 1988                          ehp = (struct ether_header *)mp->b_rptr;
1985 1989  
1986 1990                          /*
1987 1991                           * Determine if this is a VLAN or non-VLAN packet.
1988 1992                           */
1989 1993                          if ((sap = ntohs(ehp->ether_type)) == VLAN_TPID) {
1990 1994                                  evhp = (struct ether_vlan_header *)mp->b_rptr;
1991 1995                                  sap = ntohs(evhp->ether_type);
1992 1996                                  hdrsize = sizeof (struct ether_vlan_header);
     1997 +
1993 1998                                  /*
1994      -                                 * Check if the VID of the packet, if any,
1995      -                                 * belongs to this client.
     1999 +                                 * Check if the VID of the packet, if
     2000 +                                 * any, belongs to this client.
     2001 +                                 * Technically, if this packet came up
     2002 +                                 * via a HW classified ring then we
     2003 +                                 * don't need to perform this check.
     2004 +                                 * Perhaps a future optimization.
1996 2005                                   */
1997 2006                                  if (!mac_client_check_flow_vid(mcip,
1998 2007                                      VLAN_ID(ntohs(evhp->ether_tci)))) {
1999 2008                                          mac_rx_drop_pkt(mac_srs, mp);
2000 2009                                          continue;
2001 2010                                  }
2002 2011                          } else {
2003 2012                                  hdrsize = sizeof (struct ether_header);
2004 2013                          }
2005 2014                          is_unicast =
↓ open down ↓ 19 lines elided ↑ open up ↑
2025 2034                                  mac_rx_drop_pkt(mac_srs, mp);
2026 2035                                  continue;
2027 2036                          }
2028 2037  
2029 2038                          FANOUT_ENQUEUE_MP(headmp[type][indx],
2030 2039                              tailmp[type][indx], cnt[type][indx], bw_ctl,
2031 2040                              sz[type][indx], sz1, mp);
2032 2041                          continue;
2033 2042                  }
2034 2043  
2035      -
2036 2044                  /*
2037 2045                   * If we are using the default Rx ring where H/W or S/W
2038 2046                   * classification has not happened, we need to verify if
2039 2047                   * this unicast packet really belongs to us.
2040 2048                   */
2041 2049                  if (sap == ETHERTYPE_IP) {
2042 2050                          /*
2043 2051                           * If we are H/W classified, but we have promisc
2044 2052                           * on, then we need to check for the unicast address.
2045 2053                           */
↓ open down ↓ 568 lines elided ↑ open up ↑
2614 2622          mac_srs->srs_count = 0;
2615 2623  
2616 2624          ASSERT(head != NULL);
2617 2625          ASSERT(tail != NULL);
2618 2626  
2619 2627          if ((tid = mac_srs->srs_tid) != NULL)
2620 2628                  mac_srs->srs_tid = NULL;
2621 2629  
2622 2630          mac_srs->srs_state |= (SRS_PROC|proc_type);
2623 2631  
2624      -
2625 2632          /*
2626 2633           * mcip is NULL for broadcast and multicast flows. The promisc
2627 2634           * callbacks for broadcast and multicast packets are delivered from
2628 2635           * mac_rx() and we don't need to worry about that case in this path
2629 2636           */
2630 2637          if (mcip != NULL) {
2631 2638                  if (mcip->mci_promisc_list != NULL) {
2632 2639                          mutex_exit(&mac_srs->srs_lock);
2633 2640                          mac_promisc_client_dispatch(mcip, head);
2634 2641                          mutex_enter(&mac_srs->srs_lock);
2635 2642                  }
2636 2643                  if (MAC_PROTECT_ENABLED(mcip, MPT_IPNOSPOOF)) {
2637 2644                          mutex_exit(&mac_srs->srs_lock);
2638 2645                          mac_protect_intercept_dynamic(mcip, head);
2639 2646                          mutex_enter(&mac_srs->srs_lock);
2640 2647                  }
2641 2648          }
2642 2649  
2643 2650          /*
2644      -         * Check if SRS itself is doing the processing
2645      -         * This direct path does not apply when subflows are present. In this
2646      -         * case, packets need to be dispatched to a soft ring according to the
2647      -         * flow's bandwidth and other resources contraints.
     2651 +         * Check if SRS itself is doing the processing. This direct
     2652 +         * path applies only when subflows are present.
2648 2653           */
2649 2654          if (mac_srs->srs_type & SRST_NO_SOFT_RINGS) {
2650 2655                  mac_direct_rx_t         proc;
2651 2656                  void                    *arg1;
2652 2657                  mac_resource_handle_t   arg2;
2653 2658  
2654 2659                  /*
2655 2660                   * This is the case when a Rx is directly
2656 2661                   * assigned and we have a fully classified
2657 2662                   * protocol chain. We can deal with it in
↓ open down ↓ 1991 lines elided ↑ open up ↑
4649 4654              !(mcip->mci_state_flags & MCIS_STRIP_DISABLE)) {
4650 4655                  /*
4651 4656                   * If the client has exactly one VID associated with it
4652 4657                   * and striping of VLAN header is not disabled,
4653 4658                   * remove the VLAN tag from the packet before
4654 4659                   * passing it on to the client's receive callback.
4655 4660                   * Note that this needs to be done after we dispatch
4656 4661                   * the packet to the promiscuous listeners of the
4657 4662                   * client, since they expect to see the whole
4658 4663                   * frame including the VLAN headers.
     4664 +                 *
     4665 +                 * The MCIS_STRIP_DISABLE is only issued when sun4v
     4666 +                 * vsw is in play.
4659 4667                   */
4660 4668                  mp_chain = mac_strip_vlan_tag_chain(mp_chain);
4661 4669          }
4662 4670  
4663 4671          mcip->mci_rx_fn(mcip->mci_rx_arg, mrh, mp_chain, B_FALSE);
4664 4672  }
4665 4673  
4666 4674  /*
4667      - * mac_rx_soft_ring_process
     4675 + * Process a chain for a given soft ring. If the number of packets
     4676 + * queued in the SRS and its associated soft rings (including this
     4677 + * one) is very small (tracked by srs_poll_pkt_cnt) then allow the
     4678 + * entering thread (interrupt or poll thread) to process the chain
     4679 + * inline. This is meant to reduce latency under low load.
4668 4680   *
4669      - * process a chain for a given soft ring. The number of packets queued
4670      - * in the SRS and its associated soft rings (including this one) is
4671      - * very small (tracked by srs_poll_pkt_cnt), then allow the entering
4672      - * thread (interrupt or poll thread) to do inline processing. This
4673      - * helps keep the latency down under low load.
4674      - *
4675 4681   * The proc and arg for each mblk is already stored in the mblk in
4676 4682   * appropriate places.
4677 4683   */
4678 4684  /* ARGSUSED */
4679 4685  void
4680 4686  mac_rx_soft_ring_process(mac_client_impl_t *mcip, mac_soft_ring_t *ringp,
4681 4687      mblk_t *mp_chain, mblk_t *tail, int cnt, size_t sz)
4682 4688  {
4683 4689          mac_direct_rx_t         proc;
4684 4690          void                    *arg1;
↓ open down ↓ 37 lines elided ↑ open up ↑
4722 4728                          /*
4723 4729                           * We are the chain of 1 packet so
4724 4730                           * go through this fast path.
4725 4731                           */
4726 4732                          ASSERT(mp_chain->b_next == NULL);
4727 4733  
4728 4734                          (*proc)(arg1, arg2, mp_chain, NULL);
4729 4735  
4730 4736                          ASSERT(MUTEX_NOT_HELD(&ringp->s_ring_lock));
4731 4737                          /*
4732      -                         * If we have a soft ring set which is doing
4733      -                         * bandwidth control, we need to decrement
4734      -                         * srs_size and count so it the SRS can have a
4735      -                         * accurate idea of what is the real data
4736      -                         * queued between SRS and its soft rings. We
4737      -                         * decrement the counters only when the packet
4738      -                         * gets processed by both SRS and the soft ring.
     4738 +                         * If we have an SRS performing bandwidth
     4739 +                         * control then we need to decrement the size
     4740 +                         * and count so the SRS has an accurate count
     4741 +                         * of the data queued between the SRS and its
     4742 +                         * soft rings. We decrement the counters only
     4743 +                         * when the packet is processed by both the
     4744 +                         * SRS and the soft ring.
4739 4745                           */
4740 4746                          mutex_enter(&mac_srs->srs_lock);
4741 4747                          MAC_UPDATE_SRS_COUNT_LOCKED(mac_srs, cnt);
4742 4748                          MAC_UPDATE_SRS_SIZE_LOCKED(mac_srs, sz);
4743 4749                          mutex_exit(&mac_srs->srs_lock);
4744 4750  
4745 4751                          mutex_enter(&ringp->s_ring_lock);
4746 4752                          ringp->s_ring_run = NULL;
4747 4753                          ringp->s_ring_state &= ~S_RING_PROC;
4748 4754                          if (ringp->s_ring_state & S_RING_CLIENT_WAIT)
4749 4755                                  cv_signal(&ringp->s_ring_client_cv);
4750 4756  
4751 4757                          if ((ringp->s_ring_first == NULL) ||
4752 4758                              (ringp->s_ring_state & S_RING_BLANK)) {
4753 4759                                  /*
4754      -                                 * We processed inline our packet and
4755      -                                 * nothing new has arrived or our
     4760 +                                 * We processed a single packet inline
     4761 +                                 * and nothing new has arrived or our
4756 4762                                   * receiver doesn't want to receive
4757 4763                                   * any packets. We are done.
4758 4764                                   */
4759 4765                                  mutex_exit(&ringp->s_ring_lock);
4760 4766                                  return;
4761 4767                          }
4762 4768                  } else {
4763 4769                          SOFT_RING_ENQUEUE_CHAIN(ringp,
4764 4770                              mp_chain, tail, cnt, sz);
4765 4771                  }
↓ open down ↓ 229 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX