Print this page
11490 SRS ring polling disabled for VLANs
11491 Want DLS bypass for VLAN traffic
11492 add VLVF bypass to ixgbe core
2869 duplicate packets with vnics over aggrs
11489 DLS stat delete and aggr kstat can deadlock
Portions contributed by: Theo Schlossnagle <jesus@omniti.com>
Reviewed by: Patrick Mooney <patrick.mooney@joyent.com>
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Dan McDonald <danmcd@joyent.com>

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/io/ixgbe/ixgbe_main.c
          +++ new/usr/src/uts/common/io/ixgbe/ixgbe_main.c
↓ open down ↓ 49 lines elided ↑ open up ↑
  50   50  static int ixgbe_chip_start(ixgbe_t *);
  51   51  static void ixgbe_chip_stop(ixgbe_t *);
  52   52  static int ixgbe_reset(ixgbe_t *);
  53   53  static void ixgbe_tx_clean(ixgbe_t *);
  54   54  static boolean_t ixgbe_tx_drain(ixgbe_t *);
  55   55  static boolean_t ixgbe_rx_drain(ixgbe_t *);
  56   56  static int ixgbe_alloc_rings(ixgbe_t *);
  57   57  static void ixgbe_free_rings(ixgbe_t *);
  58   58  static int ixgbe_alloc_rx_data(ixgbe_t *);
  59   59  static void ixgbe_free_rx_data(ixgbe_t *);
  60      -static void ixgbe_setup_rings(ixgbe_t *);
  61      -static void ixgbe_setup_rx(ixgbe_t *);
       60 +static int ixgbe_setup_rings(ixgbe_t *);
       61 +static int ixgbe_setup_rx(ixgbe_t *);
  62   62  static void ixgbe_setup_tx(ixgbe_t *);
  63   63  static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *);
  64   64  static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *);
  65   65  static void ixgbe_setup_rss(ixgbe_t *);
  66   66  static void ixgbe_setup_vmdq(ixgbe_t *);
  67   67  static void ixgbe_setup_vmdq_rss(ixgbe_t *);
  68   68  static void ixgbe_setup_rss_table(ixgbe_t *);
  69   69  static void ixgbe_init_unicst(ixgbe_t *);
       70 +static int ixgbe_init_vlan(ixgbe_t *);
  70   71  static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *);
  71   72  static void ixgbe_setup_multicst(ixgbe_t *);
  72   73  static void ixgbe_get_hw_state(ixgbe_t *);
  73   74  static void ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe);
  74   75  static void ixgbe_get_conf(ixgbe_t *);
  75   76  static void ixgbe_init_params(ixgbe_t *);
  76   77  static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int);
  77   78  static void ixgbe_driver_link_check(ixgbe_t *);
  78   79  static void ixgbe_sfp_check(void *);
  79   80  static void ixgbe_overtemp_check(void *);
↓ open down ↓ 26 lines elided ↑ open up ↑
 106  107  static int ixgbe_disable_intrs(ixgbe_t *);
 107  108  static uint_t ixgbe_intr_legacy(void *, void *);
 108  109  static uint_t ixgbe_intr_msi(void *, void *);
 109  110  static uint_t ixgbe_intr_msix(void *, void *);
 110  111  static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *);
 111  112  static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *);
 112  113  static void ixgbe_intr_other_work(ixgbe_t *, uint32_t);
 113  114  static void ixgbe_get_driver_control(struct ixgbe_hw *);
 114  115  static int ixgbe_addmac(void *, const uint8_t *);
 115  116  static int ixgbe_remmac(void *, const uint8_t *);
      117 +static int ixgbe_addvlan(mac_group_driver_t, uint16_t);
      118 +static int ixgbe_remvlan(mac_group_driver_t, uint16_t);
 116  119  static void ixgbe_release_driver_control(struct ixgbe_hw *);
 117  120  
 118  121  static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t);
 119  122  static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t);
 120  123  static int ixgbe_resume(dev_info_t *);
 121  124  static int ixgbe_suspend(dev_info_t *);
 122  125  static int ixgbe_quiesce(dev_info_t *);
 123  126  static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *);
 124  127  static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
 125  128  static int ixgbe_cbfunc(dev_info_t *, ddi_cb_action_t, void *, void *, void *);
↓ open down ↓ 1026 lines elided ↑ open up ↑
1152 1155                  rx_ring->index = i;
1153 1156                  rx_ring->ixgbe = ixgbe;
1154 1157                  rx_ring->group_index = i / ring_per_group;
1155 1158                  rx_ring->hw_index = ixgbe_get_hw_rx_index(ixgbe, i);
1156 1159          }
1157 1160  
1158 1161          for (i = 0; i < ixgbe->num_rx_groups; i++) {
1159 1162                  rx_group = &ixgbe->rx_groups[i];
1160 1163                  rx_group->index = i;
1161 1164                  rx_group->ixgbe = ixgbe;
     1165 +                list_create(&rx_group->vlans, sizeof (ixgbe_vlan_t),
     1166 +                    offsetof(ixgbe_vlan_t, ixvl_link));
1162 1167          }
1163 1168  
1164 1169          for (i = 0; i < ixgbe->num_tx_rings; i++) {
1165 1170                  tx_ring = &ixgbe->tx_rings[i];
1166 1171                  tx_ring->index = i;
1167 1172                  tx_ring->ixgbe = ixgbe;
1168 1173                  if (ixgbe->tx_head_wb_enable)
1169 1174                          tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb;
1170 1175                  else
1171 1176                          tx_ring->tx_recycle = ixgbe_tx_recycle_legacy;
↓ open down ↓ 730 lines elided ↑ open up ↑
1902 1907                  ixgbe_get_hw_state(ixgbe);
1903 1908          }
1904 1909  
1905 1910          if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1906 1911                  goto start_failure;
1907 1912          }
1908 1913  
1909 1914          /*
1910 1915           * Setup the rx/tx rings
1911 1916           */
1912      -        ixgbe_setup_rings(ixgbe);
     1917 +        if (ixgbe_setup_rings(ixgbe) != IXGBE_SUCCESS)
     1918 +                goto start_failure;
1913 1919  
1914 1920          /*
1915 1921           * ixgbe_start() will be called when resetting, however if reset
1916 1922           * happens, we need to clear the ERROR, STALL and OVERTEMP flags
1917 1923           * before enabling the interrupts.
1918 1924           */
1919 1925          atomic_and_32(&ixgbe->ixgbe_state, ~(IXGBE_ERROR
1920 1926              | IXGBE_STALL| IXGBE_OVERTEMP));
1921 1927  
1922 1928          /*
↓ open down ↓ 352 lines elided ↑ open up ↑
2275 2281                      sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
2276 2282                  ixgbe->rx_rings = NULL;
2277 2283          }
2278 2284  
2279 2285          if (ixgbe->tx_rings != NULL) {
2280 2286                  kmem_free(ixgbe->tx_rings,
2281 2287                      sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
2282 2288                  ixgbe->tx_rings = NULL;
2283 2289          }
2284 2290  
     2291 +        for (uint_t i = 0; i < ixgbe->num_rx_groups; i++) {
     2292 +                ixgbe_vlan_t *vlp;
     2293 +                ixgbe_rx_group_t *rx_group = &ixgbe->rx_groups[i];
     2294 +
     2295 +                while ((vlp = list_remove_head(&rx_group->vlans)) != NULL)
     2296 +                        kmem_free(vlp, sizeof (ixgbe_vlan_t));
     2297 +
     2298 +                list_destroy(&rx_group->vlans);
     2299 +        }
     2300 +
2285 2301          if (ixgbe->rx_groups != NULL) {
2286 2302                  kmem_free(ixgbe->rx_groups,
2287 2303                      sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups);
2288 2304                  ixgbe->rx_groups = NULL;
2289 2305          }
2290 2306  }
2291 2307  
2292 2308  static int
2293 2309  ixgbe_alloc_rx_data(ixgbe_t *ixgbe)
2294 2310  {
↓ open down ↓ 34 lines elided ↑ open up ↑
2329 2345                          }
2330 2346                  }
2331 2347  
2332 2348                  mutex_exit(&ixgbe->rx_pending_lock);
2333 2349          }
2334 2350  }
2335 2351  
2336 2352  /*
2337 2353   * ixgbe_setup_rings - Setup rx/tx rings.
2338 2354   */
2339      -static void
     2355 +static int
2340 2356  ixgbe_setup_rings(ixgbe_t *ixgbe)
2341 2357  {
2342 2358          /*
2343 2359           * Setup the rx/tx rings, including the following:
2344 2360           *
2345 2361           * 1. Setup the descriptor ring and the control block buffers;
2346 2362           * 2. Initialize necessary registers for receive/transmit;
2347 2363           * 3. Initialize software pointers/parameters for receive/transmit;
2348 2364           */
2349      -        ixgbe_setup_rx(ixgbe);
     2365 +        if (ixgbe_setup_rx(ixgbe) != IXGBE_SUCCESS)
     2366 +                return (IXGBE_FAILURE);
2350 2367  
2351 2368          ixgbe_setup_tx(ixgbe);
     2369 +
     2370 +        return (IXGBE_SUCCESS);
2352 2371  }
2353 2372  
2354 2373  static void
2355 2374  ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring)
2356 2375  {
2357 2376          ixgbe_t *ixgbe = rx_ring->ixgbe;
2358 2377          ixgbe_rx_data_t *rx_data = rx_ring->rx_data;
2359 2378          struct ixgbe_hw *hw = &ixgbe->hw;
2360 2379          rx_control_block_t *rcb;
2361 2380          union ixgbe_adv_rx_desc *rbd;
↓ open down ↓ 66 lines elided ↑ open up ↑
2428 2447          /*
2429 2448           * Setup the Split and Replication Receive Control Register.
2430 2449           * Set the rx buffer size and the advanced descriptor type.
2431 2450           */
2432 2451          reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
2433 2452              IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2434 2453          reg_val |= IXGBE_SRRCTL_DROP_EN;
2435 2454          IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val);
2436 2455  }
2437 2456  
2438      -static void
     2457 +static int
2439 2458  ixgbe_setup_rx(ixgbe_t *ixgbe)
2440 2459  {
2441 2460          ixgbe_rx_ring_t *rx_ring;
2442 2461          struct ixgbe_hw *hw = &ixgbe->hw;
2443 2462          uint32_t reg_val;
2444 2463          uint32_t i;
2445 2464          uint32_t psrtype_rss_bit;
2446 2465  
2447 2466          /*
2448 2467           * Ensure that Rx is disabled while setting up
↓ open down ↓ 72 lines elided ↑ open up ↑
2521 2540                   * VMDq and RSS are needed.
2522 2541                   */
2523 2542                  ixgbe_setup_vmdq_rss(ixgbe);
2524 2543                  break;
2525 2544  
2526 2545          default:
2527 2546                  break;
2528 2547          }
2529 2548  
2530 2549          /*
     2550 +         * Initialize VLAN SW and HW state if VLAN filtering is
     2551 +         * enabled.
     2552 +         */
     2553 +        if (ixgbe->vlft_enabled) {
     2554 +                if (ixgbe_init_vlan(ixgbe) != IXGBE_SUCCESS)
     2555 +                        return (IXGBE_FAILURE);
     2556 +        }
     2557 +
     2558 +        /*
2531 2559           * Enable the receive unit.  This must be done after filter
2532 2560           * control is set in FCTRL. On 82598, we disable the descriptor monitor.
2533 2561           * 82598 is the only adapter which defines this RXCTRL option.
2534 2562           */
2535 2563          reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2536 2564          if (hw->mac.type == ixgbe_mac_82598EB)
2537 2565                  reg_val |= IXGBE_RXCTRL_DMBYPS; /* descriptor monitor bypass */
2538 2566          reg_val |= IXGBE_RXCTRL_RXEN;
2539 2567          (void) ixgbe_enable_rx_dma(hw, reg_val);
2540 2568  
↓ open down ↓ 70 lines elided ↑ open up ↑
2611 2639                  reg_val |= IXGBE_RSCDBU_RSCACKDIS;
2612 2640                  IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, reg_val);
2613 2641  
2614 2642                  reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2615 2643                  reg_val |= IXGBE_RDRXCTL_RSCACKC;
2616 2644                  reg_val |= IXGBE_RDRXCTL_FCOE_WRFIX;
2617 2645                  reg_val &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2618 2646  
2619 2647                  IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2620 2648          }
     2649 +
     2650 +        return (IXGBE_SUCCESS);
2621 2651  }
2622 2652  
2623 2653  static void
2624 2654  ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring)
2625 2655  {
2626 2656          ixgbe_t *ixgbe = tx_ring->ixgbe;
2627 2657          struct ixgbe_hw *hw = &ixgbe->hw;
2628 2658          uint32_t size;
2629 2659          uint32_t buf_low;
2630 2660          uint32_t buf_high;
↓ open down ↓ 181 lines elided ↑ open up ↑
2812 2842          IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2813 2843  }
2814 2844  
2815 2845  /*
2816 2846   * ixgbe_setup_vmdq - Setup MAC classification feature
2817 2847   */
2818 2848  static void
2819 2849  ixgbe_setup_vmdq(ixgbe_t *ixgbe)
2820 2850  {
2821 2851          struct ixgbe_hw *hw = &ixgbe->hw;
2822      -        uint32_t vmdctl, i, vtctl;
     2852 +        uint32_t vmdctl, i, vtctl, vlnctl;
2823 2853  
2824 2854          /*
2825 2855           * Setup the VMDq Control register, enable VMDq based on
2826 2856           * packet destination MAC address:
2827 2857           */
2828 2858          switch (hw->mac.type) {
2829 2859          case ixgbe_mac_82598EB:
2830 2860                  /*
2831 2861                   * VMDq Enable = 1;
2832 2862                   * VMDq Filter = 0; MAC filtering
↓ open down ↓ 15 lines elided ↑ open up ↑
2848 2878                  IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl);
2849 2879  
2850 2880                  for (i = 0; i < hw->mac.num_rar_entries; i++) {
2851 2881                          IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2852 2882                          IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2853 2883                  }
2854 2884  
2855 2885                  /*
2856 2886                   * Enable Virtualization and Replication.
2857 2887                   */
2858      -                vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
     2888 +                vtctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
     2889 +                ixgbe->rx_def_group = vtctl & IXGBE_VT_CTL_POOL_MASK;
     2890 +                vtctl |= IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2859 2891                  IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2860 2892  
2861 2893                  /*
     2894 +                 * Enable VLAN filtering and switching (VFTA and VLVF).
     2895 +                 */
     2896 +                vlnctl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
     2897 +                vlnctl |= IXGBE_VLNCTRL_VFE;
     2898 +                IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctl);
     2899 +                ixgbe->vlft_enabled = B_TRUE;
     2900 +
     2901 +                /*
2862 2902                   * Enable receiving packets to all VFs
2863 2903                   */
2864 2904                  IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2865 2905                  IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2866 2906                  break;
2867 2907  
2868 2908          default:
2869 2909                  break;
2870 2910          }
2871 2911  }
2872 2912  
2873 2913  /*
2874 2914   * ixgbe_setup_vmdq_rss - Setup both vmdq feature and rss feature.
2875 2915   */
2876 2916  static void
2877 2917  ixgbe_setup_vmdq_rss(ixgbe_t *ixgbe)
2878 2918  {
2879 2919          struct ixgbe_hw *hw = &ixgbe->hw;
2880 2920          uint32_t i, mrqc;
2881      -        uint32_t vtctl, vmdctl;
     2921 +        uint32_t vtctl, vmdctl, vlnctl;
2882 2922  
2883 2923          /*
2884 2924           * Initialize RETA/ERETA table
2885 2925           */
2886 2926          ixgbe_setup_rss_table(ixgbe);
2887 2927  
2888 2928          /*
2889 2929           * Enable and setup RSS and VMDq
2890 2930           */
2891 2931          switch (hw->mac.type) {
↓ open down ↓ 63 lines elided ↑ open up ↑
2955 2995          }
2956 2996  
2957 2997          if (hw->mac.type == ixgbe_mac_82599EB ||
2958 2998              hw->mac.type == ixgbe_mac_X540 ||
2959 2999              hw->mac.type == ixgbe_mac_X550 ||
2960 3000              hw->mac.type == ixgbe_mac_X550EM_x ||
2961 3001              hw->mac.type == ixgbe_mac_X550EM_a) {
2962 3002                  /*
2963 3003                   * Enable Virtualization and Replication.
2964 3004                   */
     3005 +                vtctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
     3006 +                ixgbe->rx_def_group = vtctl & IXGBE_VT_CTL_POOL_MASK;
     3007 +                vtctl |= IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2965 3008                  vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2966 3009                  IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2967 3010  
2968 3011                  /*
     3012 +                 * Enable VLAN filtering and switching (VFTA and VLVF).
     3013 +                 */
     3014 +                vlnctl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
     3015 +                vlnctl |= IXGBE_VLNCTRL_VFE;
     3016 +                IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctl);
     3017 +                ixgbe->vlft_enabled = B_TRUE;
     3018 +
     3019 +                /*
2969 3020                   * Enable receiving packets to all VFs
2970 3021                   */
2971 3022                  IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2972 3023                  IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2973 3024          }
2974 3025  }
2975 3026  
2976 3027  /*
2977 3028   * ixgbe_setup_rss_table - Setup RSS table
2978 3029   */
↓ open down ↓ 150 lines elided ↑ open up ↑
3129 3180          for (slot = 0; slot < ixgbe->unicst_total; slot++) {
3130 3181                  if (bcmp(ixgbe->unicst_addr[slot].mac.addr,
3131 3182                      mac_addr, ETHERADDRL) == 0)
3132 3183                          return (slot);
3133 3184          }
3134 3185  
3135 3186          return (-1);
3136 3187  }
3137 3188  
3138 3189  /*
     3190 + * Restore the HW state to match the SW state during restart.
     3191 + */
     3192 +static int
     3193 +ixgbe_init_vlan(ixgbe_t *ixgbe)
     3194 +{
     3195 +        /*
     3196 +         * The device is starting for the first time; there is nothing
     3197 +         * to do.
     3198 +         */
     3199 +        if (!ixgbe->vlft_init) {
     3200 +                ixgbe->vlft_init = B_TRUE;
     3201 +                return (IXGBE_SUCCESS);
     3202 +        }
     3203 +
     3204 +        for (uint_t i = 0; i < ixgbe->num_rx_groups; i++) {
     3205 +                int                     ret;
     3206 +                boolean_t               vlvf_bypass;
     3207 +                ixgbe_rx_group_t        *rxg = &ixgbe->rx_groups[i];
     3208 +                struct ixgbe_hw         *hw = &ixgbe->hw;
     3209 +
     3210 +                if (rxg->aupe) {
     3211 +                        uint32_t vml2flt;
     3212 +
     3213 +                        vml2flt = IXGBE_READ_REG(hw, IXGBE_VMOLR(rxg->index));
     3214 +                        vml2flt |= IXGBE_VMOLR_AUPE;
     3215 +                        IXGBE_WRITE_REG(hw, IXGBE_VMOLR(rxg->index), vml2flt);
     3216 +                }
     3217 +
     3218 +                vlvf_bypass = (rxg->index == ixgbe->rx_def_group);
     3219 +                for (ixgbe_vlan_t *vlp = list_head(&rxg->vlans); vlp != NULL;
     3220 +                    vlp = list_next(&rxg->vlans, vlp)) {
     3221 +                        ret = ixgbe_set_vfta(hw, vlp->ixvl_vid, rxg->index,
     3222 +                            B_TRUE, vlvf_bypass);
     3223 +
     3224 +                        if (ret != IXGBE_SUCCESS) {
     3225 +                                ixgbe_error(ixgbe, "Failed to program VFTA"
     3226 +                                    " for group %u, VID: %u, ret: %d.",
     3227 +                                    rxg->index, vlp->ixvl_vid, ret);
     3228 +                                return (IXGBE_FAILURE);
     3229 +                        }
     3230 +                }
     3231 +        }
     3232 +
     3233 +        return (IXGBE_SUCCESS);
     3234 +}
     3235 +
     3236 +/*
3139 3237   * ixgbe_multicst_add - Add a multicst address.
3140 3238   */
3141 3239  int
3142 3240  ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr)
3143 3241  {
3144 3242          ASSERT(mutex_owned(&ixgbe->gen_lock));
3145 3243  
3146 3244          if ((multiaddr[0] & 01) == 0) {
3147 3245                  return (EINVAL);
3148 3246          }
↓ open down ↓ 3005 lines elided ↑ open up ↑
6154 6252  }
6155 6253  
6156 6254  /*
6157 6255   * Callback funtion for MAC layer to register all groups.
6158 6256   */
6159 6257  void
6160 6258  ixgbe_fill_group(void *arg, mac_ring_type_t rtype, const int index,
6161 6259      mac_group_info_t *infop, mac_group_handle_t gh)
6162 6260  {
6163 6261          ixgbe_t *ixgbe = (ixgbe_t *)arg;
     6262 +        struct ixgbe_hw *hw = &ixgbe->hw;
6164 6263  
6165 6264          switch (rtype) {
6166 6265          case MAC_RING_TYPE_RX: {
6167 6266                  ixgbe_rx_group_t *rx_group;
6168 6267  
6169 6268                  rx_group = &ixgbe->rx_groups[index];
6170 6269                  rx_group->group_handle = gh;
6171 6270  
6172 6271                  infop->mgi_driver = (mac_group_driver_t)rx_group;
6173 6272                  infop->mgi_start = NULL;
6174 6273                  infop->mgi_stop = NULL;
6175 6274                  infop->mgi_addmac = ixgbe_addmac;
6176 6275                  infop->mgi_remmac = ixgbe_remmac;
     6276 +
     6277 +                if ((ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ ||
     6278 +                    ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) &&
     6279 +                    (hw->mac.type == ixgbe_mac_82599EB ||
     6280 +                    hw->mac.type == ixgbe_mac_X540 ||
     6281 +                    hw->mac.type == ixgbe_mac_X550 ||
     6282 +                    hw->mac.type == ixgbe_mac_X550EM_x)) {
     6283 +                        infop->mgi_addvlan = ixgbe_addvlan;
     6284 +                        infop->mgi_remvlan = ixgbe_remvlan;
     6285 +                } else {
     6286 +                        infop->mgi_addvlan = NULL;
     6287 +                        infop->mgi_remvlan = NULL;
     6288 +                }
     6289 +
6177 6290                  infop->mgi_count = (ixgbe->num_rx_rings / ixgbe->num_rx_groups);
6178 6291  
6179 6292                  break;
6180 6293          }
6181 6294          case MAC_RING_TYPE_TX:
6182 6295                  break;
6183 6296          default:
6184 6297                  break;
6185 6298          }
6186 6299  }
↓ open down ↓ 78 lines elided ↑ open up ↑
6265 6378           * vector allocation register (IVAR).
6266 6379           */
6267 6380          ixgbe_disable_ivar(ixgbe, hw_r_idx, 0);
6268 6381  
6269 6382          BT_CLEAR(ixgbe->vect_map[v_idx].rx_map, r_idx);
6270 6383  
6271 6384          mutex_exit(&ixgbe->gen_lock);
6272 6385  
6273 6386          return (0);
6274 6387  }
     6388 +
     6389 +static ixgbe_vlan_t *
     6390 +ixgbe_find_vlan(ixgbe_rx_group_t *rx_group, uint16_t vid)
     6391 +{
     6392 +        for (ixgbe_vlan_t *vlp = list_head(&rx_group->vlans); vlp != NULL;
     6393 +            vlp = list_next(&rx_group->vlans, vlp)) {
     6394 +                if (vlp->ixvl_vid == vid)
     6395 +                        return (vlp);
     6396 +        }
     6397 +
     6398 +        return (NULL);
     6399 +}
     6400 +
     6401 +/*
     6402 + * Attempt to use a VLAN HW filter for this group. If the group is
     6403 + * interested in untagged packets then set AUPE only. If the group is
     6404 + * the default then only set the VFTA. Leave the VLVF slots open for
     6405 + * reserved groups to guarantee their use of HW filtering.
     6406 + */
     6407 +static int
     6408 +ixgbe_addvlan(mac_group_driver_t gdriver, uint16_t vid)
     6409 +{
     6410 +        ixgbe_rx_group_t        *rx_group = (ixgbe_rx_group_t *)gdriver;
     6411 +        ixgbe_t                 *ixgbe = rx_group->ixgbe;
     6412 +        struct ixgbe_hw         *hw = &ixgbe->hw;
     6413 +        ixgbe_vlan_t            *vlp;
     6414 +        int                     ret;
     6415 +        boolean_t               is_def_grp;
     6416 +
     6417 +        mutex_enter(&ixgbe->gen_lock);
     6418 +
     6419 +        if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
     6420 +                mutex_exit(&ixgbe->gen_lock);
     6421 +                return (ECANCELED);
     6422 +        }
     6423 +
     6424 +        /*
     6425 +         * Let's be sure VLAN filtering is enabled.
     6426 +         */
     6427 +        VERIFY3B(ixgbe->vlft_enabled, ==, B_TRUE);
     6428 +        is_def_grp = (rx_group->index == ixgbe->rx_def_group);
     6429 +
     6430 +        /*
     6431 +         * VLAN filtering is enabled but we want to receive untagged
     6432 +         * traffic on this group -- set the AUPE bit on the group and
     6433 +         * leave the VLAN tables alone.
     6434 +         */
     6435 +        if (vid == MAC_VLAN_UNTAGGED) {
     6436 +                /*
     6437 +                 * We never enable AUPE on the default group; it is
     6438 +                 * redundant. Untagged traffic which passes L2
     6439 +                 * filtering is delivered to the default group if no
     6440 +                 * other group is interested.
     6441 +                 */
     6442 +                if (!is_def_grp) {
     6443 +                        uint32_t vml2flt;
     6444 +
     6445 +                        vml2flt = IXGBE_READ_REG(hw,
     6446 +                            IXGBE_VMOLR(rx_group->index));
     6447 +                        vml2flt |= IXGBE_VMOLR_AUPE;
     6448 +                        IXGBE_WRITE_REG(hw, IXGBE_VMOLR(rx_group->index),
     6449 +                            vml2flt);
     6450 +                        rx_group->aupe = B_TRUE;
     6451 +                }
     6452 +
     6453 +                mutex_exit(&ixgbe->gen_lock);
     6454 +                return (0);
     6455 +        }
     6456 +
     6457 +        vlp = ixgbe_find_vlan(rx_group, vid);
     6458 +        if (vlp != NULL) {
     6459 +                /* Only the default group supports multiple clients. */
     6460 +                VERIFY3B(is_def_grp, ==, B_TRUE);
     6461 +                vlp->ixvl_refs++;
     6462 +                mutex_exit(&ixgbe->gen_lock);
     6463 +                return (0);
     6464 +        }
     6465 +
     6466 +        /*
     6467 +         * The default group doesn't require a VLVF entry, only a VFTA
     6468 +         * entry. All traffic passing L2 filtering (MPSAR + VFTA) is
     6469 +         * delivered to the default group if no other group is
     6470 +         * interested. The fourth argument, vlvf_bypass, tells the
     6471 +         * ixgbe common code to avoid using a VLVF slot if one isn't
     6472 +         * already allocated to this VLAN.
     6473 +         *
     6474 +         * This logic is meant to reserve VLVF slots for use by
     6475 +         * reserved groups: guaranteeing their use of HW filtering.
     6476 +         */
     6477 +        ret = ixgbe_set_vfta(hw, vid, rx_group->index, B_TRUE, is_def_grp);
     6478 +
     6479 +        if (ret == IXGBE_SUCCESS) {
     6480 +                vlp = kmem_zalloc(sizeof (ixgbe_vlan_t), KM_SLEEP);
     6481 +                vlp->ixvl_vid = vid;
     6482 +                vlp->ixvl_refs = 1;
     6483 +                list_insert_tail(&rx_group->vlans, vlp);
     6484 +                mutex_exit(&ixgbe->gen_lock);
     6485 +                return (0);
     6486 +        }
     6487 +
     6488 +        /*
     6489 +         * We should actually never return ENOSPC because we've set
     6490 +         * things up so that every reserved group is guaranteed to
     6491 +         * have a VLVF slot.
     6492 +         */
     6493 +        if (ret == IXGBE_ERR_PARAM)
     6494 +                ret = EINVAL;
     6495 +        else if (ret == IXGBE_ERR_NO_SPACE)
     6496 +                ret = ENOSPC;
     6497 +        else
     6498 +                ret = EIO;
     6499 +
     6500 +        mutex_exit(&ixgbe->gen_lock);
     6501 +        return (ret);
     6502 +}
     6503 +
     6504 +/*
     6505 + * Attempt to remove the VLAN HW filter associated with this group. If
     6506 + * we are removing a HW filter for the default group then we know only
     6507 + * the VFTA was set (VLVF is reserved for non-default/reserved
     6508 + * groups). If the group wishes to stop receiving untagged traffic
     6509 + * then clear the AUPE but leave the VLAN filters alone.
     6510 + */
     6511 +static int
     6512 +ixgbe_remvlan(mac_group_driver_t gdriver, uint16_t vid)
     6513 +{
     6514 +        ixgbe_rx_group_t        *rx_group = (ixgbe_rx_group_t *)gdriver;
     6515 +        ixgbe_t                 *ixgbe = rx_group->ixgbe;
     6516 +        struct ixgbe_hw         *hw = &ixgbe->hw;
     6517 +        int                     ret;
     6518 +        ixgbe_vlan_t            *vlp;
     6519 +        boolean_t               is_def_grp;
     6520 +
     6521 +        mutex_enter(&ixgbe->gen_lock);
     6522 +
     6523 +        if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
     6524 +                mutex_exit(&ixgbe->gen_lock);
     6525 +                return (ECANCELED);
     6526 +        }
     6527 +
     6528 +        is_def_grp = (rx_group->index == ixgbe->rx_def_group);
     6529 +
     6530 +        /* See the AUPE comment in ixgbe_addvlan(). */
     6531 +        if (vid == MAC_VLAN_UNTAGGED) {
     6532 +                if (!is_def_grp) {
     6533 +                        uint32_t vml2flt;
     6534 +
     6535 +                        vml2flt = IXGBE_READ_REG(hw,
     6536 +                            IXGBE_VMOLR(rx_group->index));
     6537 +                        vml2flt &= ~IXGBE_VMOLR_AUPE;
     6538 +                        IXGBE_WRITE_REG(hw,
     6539 +                            IXGBE_VMOLR(rx_group->index), vml2flt);
     6540 +                        rx_group->aupe = B_FALSE;
     6541 +                }
     6542 +                mutex_exit(&ixgbe->gen_lock);
     6543 +                return (0);
     6544 +        }
     6545 +
     6546 +        vlp = ixgbe_find_vlan(rx_group, vid);
     6547 +        if (vlp == NULL)
     6548 +                return (ENOENT);
     6549 +
     6550 +        /*
     6551 +         * See the comment in ixgbe_addvlan() about is_def_grp and
     6552 +         * vlvf_bypass.
     6553 +         */
     6554 +        if (vlp->ixvl_refs == 1) {
     6555 +                ret = ixgbe_set_vfta(hw, vid, rx_group->index, B_FALSE,
     6556 +                    is_def_grp);
     6557 +        } else {
     6558 +                /*
     6559 +                 * Only the default group can have multiple clients.
     6560 +                 * If there is more than one client, leave the
     6561 +                 * VFTA[vid] bit alone.
     6562 +                 */
     6563 +                VERIFY3B(is_def_grp, ==, B_TRUE);
     6564 +                VERIFY3U(vlp->ixvl_refs, >, 1);
     6565 +                vlp->ixvl_refs--;
     6566 +                mutex_exit(&ixgbe->gen_lock);
     6567 +                return (0);
     6568 +        }
     6569 +
     6570 +        if (ret != IXGBE_SUCCESS) {
     6571 +                mutex_exit(&ixgbe->gen_lock);
     6572 +                /* IXGBE_ERR_PARAM should be the only possible error here. */
     6573 +                if (ret == IXGBE_ERR_PARAM)
     6574 +                        return (EINVAL);
     6575 +                else
     6576 +                        return (EIO);
     6577 +        }
     6578 +
     6579 +        VERIFY3U(vlp->ixvl_refs, ==, 1);
     6580 +        vlp->ixvl_refs = 0;
     6581 +        list_remove(&rx_group->vlans, vlp);
     6582 +        kmem_free(vlp, sizeof (ixgbe_vlan_t));
     6583 +
     6584 +        /*
     6585 +         * Calling ixgbe_set_vfta() on a non-default group may have
     6586 +         * cleared the VFTA[vid] bit even though the default group
     6587 +         * still has clients using the vid. This happens because the
     6588 +         * ixgbe common code doesn't ref count the use of VLANs. Check
     6589 +         * for any use of vid on the default group and make sure the
     6590 +         * VFTA[vid] bit is set. This operation is idempotent: setting
     6591 +         * VFTA[vid] to true if already true won't hurt anything.
     6592 +         */
     6593 +        if (!is_def_grp) {
     6594 +                ixgbe_rx_group_t *defgrp;
     6595 +
     6596 +                defgrp = &ixgbe->rx_groups[ixgbe->rx_def_group];
     6597 +                vlp = ixgbe_find_vlan(defgrp, vid);
     6598 +                if (vlp != NULL) {
     6599 +                        /* This shouldn't fail, but if it does return EIO. */
     6600 +                        ret = ixgbe_set_vfta(hw, vid, rx_group->index, B_TRUE,
     6601 +                            B_TRUE);
     6602 +                        if (ret != IXGBE_SUCCESS)
     6603 +                                return (EIO);
     6604 +                }
     6605 +        }
     6606 +
     6607 +        mutex_exit(&ixgbe->gen_lock);
     6608 +        return (0);
     6609 +}
6275 6610  
6276 6611  /*
6277 6612   * Add a mac address.
6278 6613   */
6279 6614  static int
6280 6615  ixgbe_addmac(void *arg, const uint8_t *mac_addr)
6281 6616  {
6282 6617          ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
6283 6618          ixgbe_t *ixgbe = rx_group->ixgbe;
6284 6619          struct ixgbe_hw *hw = &ixgbe->hw;
↓ open down ↓ 89 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX