Print this page
11490 SRS ring polling disabled for VLANs
11491 Want DLS bypass for VLAN traffic
11492 add VLVF bypass to ixgbe core
2869 duplicate packets with vnics over aggrs
11489 DLS stat delete and aggr kstat can deadlock
Portions contributed by: Theo Schlossnagle <jesus@omniti.com>
Reviewed by: Patrick Mooney <patrick.mooney@joyent.com>
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Dan McDonald <danmcd@joyent.com>


  40  * Local function protoypes
  41  */
  42 static int ixgbe_register_mac(ixgbe_t *);
  43 static int ixgbe_identify_hardware(ixgbe_t *);
  44 static int ixgbe_regs_map(ixgbe_t *);
  45 static void ixgbe_init_properties(ixgbe_t *);
  46 static int ixgbe_init_driver_settings(ixgbe_t *);
  47 static void ixgbe_init_locks(ixgbe_t *);
  48 static void ixgbe_destroy_locks(ixgbe_t *);
  49 static int ixgbe_init(ixgbe_t *);
  50 static int ixgbe_chip_start(ixgbe_t *);
  51 static void ixgbe_chip_stop(ixgbe_t *);
  52 static int ixgbe_reset(ixgbe_t *);
  53 static void ixgbe_tx_clean(ixgbe_t *);
  54 static boolean_t ixgbe_tx_drain(ixgbe_t *);
  55 static boolean_t ixgbe_rx_drain(ixgbe_t *);
  56 static int ixgbe_alloc_rings(ixgbe_t *);
  57 static void ixgbe_free_rings(ixgbe_t *);
  58 static int ixgbe_alloc_rx_data(ixgbe_t *);
  59 static void ixgbe_free_rx_data(ixgbe_t *);
  60 static void ixgbe_setup_rings(ixgbe_t *);
  61 static void ixgbe_setup_rx(ixgbe_t *);
  62 static void ixgbe_setup_tx(ixgbe_t *);
  63 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *);
  64 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *);
  65 static void ixgbe_setup_rss(ixgbe_t *);
  66 static void ixgbe_setup_vmdq(ixgbe_t *);
  67 static void ixgbe_setup_vmdq_rss(ixgbe_t *);
  68 static void ixgbe_setup_rss_table(ixgbe_t *);
  69 static void ixgbe_init_unicst(ixgbe_t *);

  70 static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *);
  71 static void ixgbe_setup_multicst(ixgbe_t *);
  72 static void ixgbe_get_hw_state(ixgbe_t *);
  73 static void ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe);
  74 static void ixgbe_get_conf(ixgbe_t *);
  75 static void ixgbe_init_params(ixgbe_t *);
  76 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int);
  77 static void ixgbe_driver_link_check(ixgbe_t *);
  78 static void ixgbe_sfp_check(void *);
  79 static void ixgbe_overtemp_check(void *);
  80 static void ixgbe_phy_check(void *);
  81 static void ixgbe_link_timer(void *);
  82 static void ixgbe_local_timer(void *);
  83 static void ixgbe_arm_watchdog_timer(ixgbe_t *);
  84 static void ixgbe_restart_watchdog_timer(ixgbe_t *);
  85 static void ixgbe_disable_adapter_interrupts(ixgbe_t *);
  86 static void ixgbe_enable_adapter_interrupts(ixgbe_t *);
  87 static boolean_t is_valid_mac_addr(uint8_t *);
  88 static boolean_t ixgbe_stall_check(ixgbe_t *);
  89 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t);


  96 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int);
  97 static void ixgbe_setup_ivar(ixgbe_t *, uint16_t, uint8_t, int8_t);
  98 static void ixgbe_enable_ivar(ixgbe_t *, uint16_t, int8_t);
  99 static void ixgbe_disable_ivar(ixgbe_t *, uint16_t, int8_t);
 100 static uint32_t ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index);
 101 static int ixgbe_map_intrs_to_vectors(ixgbe_t *);
 102 static void ixgbe_setup_adapter_vector(ixgbe_t *);
 103 static void ixgbe_rem_intr_handlers(ixgbe_t *);
 104 static void ixgbe_rem_intrs(ixgbe_t *);
 105 static int ixgbe_enable_intrs(ixgbe_t *);
 106 static int ixgbe_disable_intrs(ixgbe_t *);
 107 static uint_t ixgbe_intr_legacy(void *, void *);
 108 static uint_t ixgbe_intr_msi(void *, void *);
 109 static uint_t ixgbe_intr_msix(void *, void *);
 110 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *);
 111 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *);
 112 static void ixgbe_intr_other_work(ixgbe_t *, uint32_t);
 113 static void ixgbe_get_driver_control(struct ixgbe_hw *);
 114 static int ixgbe_addmac(void *, const uint8_t *);
 115 static int ixgbe_remmac(void *, const uint8_t *);


 116 static void ixgbe_release_driver_control(struct ixgbe_hw *);
 117 
 118 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t);
 119 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t);
 120 static int ixgbe_resume(dev_info_t *);
 121 static int ixgbe_suspend(dev_info_t *);
 122 static int ixgbe_quiesce(dev_info_t *);
 123 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *);
 124 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
 125 static int ixgbe_cbfunc(dev_info_t *, ddi_cb_action_t, void *, void *, void *);
 126 static int ixgbe_intr_cb_register(ixgbe_t *);
 127 static int ixgbe_intr_adjust(ixgbe_t *, ddi_cb_action_t, int);
 128 
 129 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
 130     const void *impl_data);
 131 static void ixgbe_fm_init(ixgbe_t *);
 132 static void ixgbe_fm_fini(ixgbe_t *);
 133 
 134 char *ixgbe_priv_props[] = {
 135         "_tx_copy_thresh",


1142         tx_size = ixgbe->max_frame_size;
1143         ixgbe->tx_buf_size = ((tx_size >> 10) +
1144             ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
1145 
1146         /*
1147          * Initialize rx/tx rings/groups parameters
1148          */
1149         ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
1150         for (i = 0; i < ixgbe->num_rx_rings; i++) {
1151                 rx_ring = &ixgbe->rx_rings[i];
1152                 rx_ring->index = i;
1153                 rx_ring->ixgbe = ixgbe;
1154                 rx_ring->group_index = i / ring_per_group;
1155                 rx_ring->hw_index = ixgbe_get_hw_rx_index(ixgbe, i);
1156         }
1157 
1158         for (i = 0; i < ixgbe->num_rx_groups; i++) {
1159                 rx_group = &ixgbe->rx_groups[i];
1160                 rx_group->index = i;
1161                 rx_group->ixgbe = ixgbe;


1162         }
1163 
1164         for (i = 0; i < ixgbe->num_tx_rings; i++) {
1165                 tx_ring = &ixgbe->tx_rings[i];
1166                 tx_ring->index = i;
1167                 tx_ring->ixgbe = ixgbe;
1168                 if (ixgbe->tx_head_wb_enable)
1169                         tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb;
1170                 else
1171                         tx_ring->tx_recycle = ixgbe_tx_recycle_legacy;
1172 
1173                 tx_ring->ring_size = ixgbe->tx_ring_size;
1174                 tx_ring->free_list_size = ixgbe->tx_ring_size +
1175                     (ixgbe->tx_ring_size >> 1);
1176         }
1177 
1178         /*
1179          * Initialize values of interrupt throttling rate
1180          */
1181         for (i = 1; i < MAX_INTR_VECTOR; i++)


1892          * autonegotiation will allow (usually 10Gb, infrastructure allowing)
1893          * so we never bothered with explicitly setting the link to 10Gb as it
1894          * would already be at that state on driver attach. With X550, we must
1895          * trigger a re-negotiation of the link in order to switch from a LPLU
1896          * 1Gb link to 10Gb (cable and link partner permitting.)
1897          */
1898         if (hw->mac.type == ixgbe_mac_X550 ||
1899             hw->mac.type == ixgbe_mac_X550EM_a ||
1900             hw->mac.type == ixgbe_mac_X550EM_x) {
1901                 (void) ixgbe_driver_setup_link(ixgbe, B_TRUE);
1902                 ixgbe_get_hw_state(ixgbe);
1903         }
1904 
1905         if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1906                 goto start_failure;
1907         }
1908 
1909         /*
1910          * Setup the rx/tx rings
1911          */
1912         ixgbe_setup_rings(ixgbe);

1913 
1914         /*
1915          * ixgbe_start() will be called when resetting, however if reset
1916          * happens, we need to clear the ERROR, STALL and OVERTEMP flags
1917          * before enabling the interrupts.
1918          */
1919         atomic_and_32(&ixgbe->ixgbe_state, ~(IXGBE_ERROR
1920             | IXGBE_STALL| IXGBE_OVERTEMP));
1921 
1922         /*
1923          * Enable adapter interrupts
1924          * The interrupts must be enabled after the driver state is START
1925          */
1926         ixgbe_enable_adapter_interrupts(ixgbe);
1927 
1928         for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1929                 mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1930         for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1931                 mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1932 


2265 }
2266 
2267 /*
2268  * ixgbe_free_rings - Free the memory space of rx/tx rings.
2269  */
2270 static void
2271 ixgbe_free_rings(ixgbe_t *ixgbe)
2272 {
2273         if (ixgbe->rx_rings != NULL) {
2274                 kmem_free(ixgbe->rx_rings,
2275                     sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
2276                 ixgbe->rx_rings = NULL;
2277         }
2278 
2279         if (ixgbe->tx_rings != NULL) {
2280                 kmem_free(ixgbe->tx_rings,
2281                     sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
2282                 ixgbe->tx_rings = NULL;
2283         }
2284 










2285         if (ixgbe->rx_groups != NULL) {
2286                 kmem_free(ixgbe->rx_groups,
2287                     sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups);
2288                 ixgbe->rx_groups = NULL;
2289         }
2290 }
2291 
2292 static int
2293 ixgbe_alloc_rx_data(ixgbe_t *ixgbe)
2294 {
2295         ixgbe_rx_ring_t *rx_ring;
2296         int i;
2297 
2298         for (i = 0; i < ixgbe->num_rx_rings; i++) {
2299                 rx_ring = &ixgbe->rx_rings[i];
2300                 if (ixgbe_alloc_rx_ring_data(rx_ring) != IXGBE_SUCCESS)
2301                         goto alloc_rx_rings_failure;
2302         }
2303         return (IXGBE_SUCCESS);
2304 


2319 
2320                 mutex_enter(&ixgbe->rx_pending_lock);
2321                 rx_data = rx_ring->rx_data;
2322 
2323                 if (rx_data != NULL) {
2324                         rx_data->flag |= IXGBE_RX_STOPPED;
2325 
2326                         if (rx_data->rcb_pending == 0) {
2327                                 ixgbe_free_rx_ring_data(rx_data);
2328                                 rx_ring->rx_data = NULL;
2329                         }
2330                 }
2331 
2332                 mutex_exit(&ixgbe->rx_pending_lock);
2333         }
2334 }
2335 
2336 /*
2337  * ixgbe_setup_rings - Setup rx/tx rings.
2338  */
2339 static void
2340 ixgbe_setup_rings(ixgbe_t *ixgbe)
2341 {
2342         /*
2343          * Setup the rx/tx rings, including the following:
2344          *
2345          * 1. Setup the descriptor ring and the control block buffers;
2346          * 2. Initialize necessary registers for receive/transmit;
2347          * 3. Initialize software pointers/parameters for receive/transmit;
2348          */
2349         ixgbe_setup_rx(ixgbe);

2350 
2351         ixgbe_setup_tx(ixgbe);


2352 }
2353 
2354 static void
2355 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring)
2356 {
2357         ixgbe_t *ixgbe = rx_ring->ixgbe;
2358         ixgbe_rx_data_t *rx_data = rx_ring->rx_data;
2359         struct ixgbe_hw *hw = &ixgbe->hw;
2360         rx_control_block_t *rcb;
2361         union ixgbe_adv_rx_desc *rbd;
2362         uint32_t size;
2363         uint32_t buf_low;
2364         uint32_t buf_high;
2365         uint32_t reg_val;
2366         int i;
2367 
2368         ASSERT(mutex_owned(&rx_ring->rx_lock));
2369         ASSERT(mutex_owned(&ixgbe->gen_lock));
2370 
2371         for (i = 0; i < ixgbe->rx_ring_size; i++) {


2418         if (hw->mac.type == ixgbe_mac_82599EB ||
2419             hw->mac.type == ixgbe_mac_X540 ||
2420             hw->mac.type == ixgbe_mac_X550 ||
2421             hw->mac.type == ixgbe_mac_X550EM_x ||
2422             hw->mac.type == ixgbe_mac_X550EM_a) {
2423                 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2424                 reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS);
2425                 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2426         }
2427 
2428         /*
2429          * Setup the Split and Replication Receive Control Register.
2430          * Set the rx buffer size and the advanced descriptor type.
2431          */
2432         reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
2433             IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2434         reg_val |= IXGBE_SRRCTL_DROP_EN;
2435         IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val);
2436 }
2437 
2438 static void
2439 ixgbe_setup_rx(ixgbe_t *ixgbe)
2440 {
2441         ixgbe_rx_ring_t *rx_ring;
2442         struct ixgbe_hw *hw = &ixgbe->hw;
2443         uint32_t reg_val;
2444         uint32_t i;
2445         uint32_t psrtype_rss_bit;
2446 
2447         /*
2448          * Ensure that Rx is disabled while setting up
2449          * the Rx unit and Rx descriptor ring(s)
2450          */
2451         ixgbe_disable_rx(hw);
2452 
2453         /* PSRTYPE must be configured for 82599 */
2454         if (ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ &&
2455             ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ_RSS) {
2456                 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
2457                     IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR;
2458                 reg_val |= IXGBE_PSRTYPE_L2HDR;


2511                 /*
2512                  * Multiple groups, each group has one ring,
2513                  * only VMDq is needed.
2514                  */
2515                 ixgbe_setup_vmdq(ixgbe);
2516                 break;
2517 
2518         case IXGBE_CLASSIFY_VMDQ_RSS:
2519                 /*
2520                  * Multiple groups and multiple rings, both
2521                  * VMDq and RSS are needed.
2522                  */
2523                 ixgbe_setup_vmdq_rss(ixgbe);
2524                 break;
2525 
2526         default:
2527                 break;
2528         }
2529 
2530         /*









2531          * Enable the receive unit.  This must be done after filter
2532          * control is set in FCTRL. On 82598, we disable the descriptor monitor.
2533          * 82598 is the only adapter which defines this RXCTRL option.
2534          */
2535         reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2536         if (hw->mac.type == ixgbe_mac_82598EB)
2537                 reg_val |= IXGBE_RXCTRL_DMBYPS; /* descriptor monitor bypass */
2538         reg_val |= IXGBE_RXCTRL_RXEN;
2539         (void) ixgbe_enable_rx_dma(hw, reg_val);
2540 
2541         /*
2542          * ixgbe_setup_rx_ring must be called after configuring RXCTRL
2543          */
2544         for (i = 0; i < ixgbe->num_rx_rings; i++) {
2545                 rx_ring = &ixgbe->rx_rings[i];
2546                 ixgbe_setup_rx_ring(rx_ring);
2547         }
2548 
2549         /*
2550          * The 82598 controller gives us the RNBC (Receive No Buffer


2601                         reg_val = IXGBE_READ_REG(hw, IXGBE_RSCCTL(i));
2602                         reg_val |= IXGBE_RSCCTL_RSCEN;
2603                         if (ixgbe->rx_buf_size == IXGBE_PKG_BUF_16k)
2604                                 reg_val |= IXGBE_RSCCTL_MAXDESC_1;
2605                         else
2606                                 reg_val |= IXGBE_RSCCTL_MAXDESC_4;
2607                         IXGBE_WRITE_REG(hw,  IXGBE_RSCCTL(i), reg_val);
2608                 }
2609 
2610                 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCDBU);
2611                 reg_val |= IXGBE_RSCDBU_RSCACKDIS;
2612                 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, reg_val);
2613 
2614                 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2615                 reg_val |= IXGBE_RDRXCTL_RSCACKC;
2616                 reg_val |= IXGBE_RDRXCTL_FCOE_WRFIX;
2617                 reg_val &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2618 
2619                 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2620         }


2621 }
2622 
2623 static void
2624 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring)
2625 {
2626         ixgbe_t *ixgbe = tx_ring->ixgbe;
2627         struct ixgbe_hw *hw = &ixgbe->hw;
2628         uint32_t size;
2629         uint32_t buf_low;
2630         uint32_t buf_high;
2631         uint32_t reg_val;
2632 
2633         ASSERT(mutex_owned(&tx_ring->tx_lock));
2634         ASSERT(mutex_owned(&ixgbe->gen_lock));
2635 
2636         /*
2637          * Initialize the length register
2638          */
2639         size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc);
2640         IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size);


2802         mrqc = IXGBE_MRQC_RSSEN |
2803             IXGBE_MRQC_RSS_FIELD_IPV4 |
2804             IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2805             IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2806             IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2807             IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2808             IXGBE_MRQC_RSS_FIELD_IPV6 |
2809             IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2810             IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2811             IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2812         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2813 }
2814 
2815 /*
2816  * ixgbe_setup_vmdq - Setup MAC classification feature
2817  */
2818 static void
2819 ixgbe_setup_vmdq(ixgbe_t *ixgbe)
2820 {
2821         struct ixgbe_hw *hw = &ixgbe->hw;
2822         uint32_t vmdctl, i, vtctl;
2823 
2824         /*
2825          * Setup the VMDq Control register, enable VMDq based on
2826          * packet destination MAC address:
2827          */
2828         switch (hw->mac.type) {
2829         case ixgbe_mac_82598EB:
2830                 /*
2831                  * VMDq Enable = 1;
2832                  * VMDq Filter = 0; MAC filtering
2833                  * Default VMDq output index = 0;
2834                  */
2835                 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2836                 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2837                 break;
2838 
2839         case ixgbe_mac_82599EB:
2840         case ixgbe_mac_X540:
2841         case ixgbe_mac_X550:
2842         case ixgbe_mac_X550EM_x:
2843         case ixgbe_mac_X550EM_a:
2844                 /*
2845                  * Enable VMDq-only.
2846                  */
2847                 vmdctl = IXGBE_MRQC_VMDQEN;
2848                 IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl);
2849 
2850                 for (i = 0; i < hw->mac.num_rar_entries; i++) {
2851                         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2852                         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2853                 }
2854 
2855                 /*
2856                  * Enable Virtualization and Replication.
2857                  */
2858                 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;


2859                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2860 
2861                 /*








2862                  * Enable receiving packets to all VFs
2863                  */
2864                 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2865                 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2866                 break;
2867 
2868         default:
2869                 break;
2870         }
2871 }
2872 
2873 /*
2874  * ixgbe_setup_vmdq_rss - Setup both vmdq feature and rss feature.
2875  */
2876 static void
2877 ixgbe_setup_vmdq_rss(ixgbe_t *ixgbe)
2878 {
2879         struct ixgbe_hw *hw = &ixgbe->hw;
2880         uint32_t i, mrqc;
2881         uint32_t vtctl, vmdctl;
2882 
2883         /*
2884          * Initialize RETA/ERETA table
2885          */
2886         ixgbe_setup_rss_table(ixgbe);
2887 
2888         /*
2889          * Enable and setup RSS and VMDq
2890          */
2891         switch (hw->mac.type) {
2892         case ixgbe_mac_82598EB:
2893                 /*
2894                  * Enable RSS & Setup RSS Hash functions
2895                  */
2896                 mrqc = IXGBE_MRQC_RSSEN |
2897                     IXGBE_MRQC_RSS_FIELD_IPV4 |
2898                     IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2899                     IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2900                     IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2901                     IXGBE_MRQC_RSS_FIELD_IPV6_EX |


2945 
2946                 for (i = 0; i < hw->mac.num_rar_entries; i++) {
2947                         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2948                         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2949                 }
2950                 break;
2951 
2952         default:
2953                 break;
2954 
2955         }
2956 
2957         if (hw->mac.type == ixgbe_mac_82599EB ||
2958             hw->mac.type == ixgbe_mac_X540 ||
2959             hw->mac.type == ixgbe_mac_X550 ||
2960             hw->mac.type == ixgbe_mac_X550EM_x ||
2961             hw->mac.type == ixgbe_mac_X550EM_a) {
2962                 /*
2963                  * Enable Virtualization and Replication.
2964                  */



2965                 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2966                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2967 
2968                 /*








2969                  * Enable receiving packets to all VFs
2970                  */
2971                 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2972                 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2973         }
2974 }
2975 
2976 /*
2977  * ixgbe_setup_rss_table - Setup RSS table
2978  */
2979 static void
2980 ixgbe_setup_rss_table(ixgbe_t *ixgbe)
2981 {
2982         struct ixgbe_hw *hw = &ixgbe->hw;
2983         uint32_t i, j;
2984         uint32_t random;
2985         uint32_t reta;
2986         uint32_t ring_per_group;
2987         uint32_t ring;
2988         uint32_t table_size;


3119 /*
3120  * ixgbe_unicst_find - Find the slot for the specified unicast address
3121  */
3122 int
3123 ixgbe_unicst_find(ixgbe_t *ixgbe, const uint8_t *mac_addr)
3124 {
3125         int slot;
3126 
3127         ASSERT(mutex_owned(&ixgbe->gen_lock));
3128 
3129         for (slot = 0; slot < ixgbe->unicst_total; slot++) {
3130                 if (bcmp(ixgbe->unicst_addr[slot].mac.addr,
3131                     mac_addr, ETHERADDRL) == 0)
3132                         return (slot);
3133         }
3134 
3135         return (-1);
3136 }
3137 
3138 /*















































3139  * ixgbe_multicst_add - Add a multicst address.
3140  */
3141 int
3142 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr)
3143 {
3144         ASSERT(mutex_owned(&ixgbe->gen_lock));
3145 
3146         if ((multiaddr[0] & 01) == 0) {
3147                 return (EINVAL);
3148         }
3149 
3150         if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) {
3151                 return (ENOENT);
3152         }
3153 
3154         bcopy(multiaddr,
3155             &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL);
3156         ixgbe->mcast_count++;
3157 
3158         /*


6144                 if (ixgbe->intr_type &
6145                     (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
6146                         mintr->mi_ddi_handle =
6147                             ixgbe->htable[tx_ring->intr_vector];
6148                 }
6149                 break;
6150         }
6151         default:
6152                 break;
6153         }
6154 }
6155 
6156 /*
6157  * Callback funtion for MAC layer to register all groups.
6158  */
6159 void
6160 ixgbe_fill_group(void *arg, mac_ring_type_t rtype, const int index,
6161     mac_group_info_t *infop, mac_group_handle_t gh)
6162 {
6163         ixgbe_t *ixgbe = (ixgbe_t *)arg;

6164 
6165         switch (rtype) {
6166         case MAC_RING_TYPE_RX: {
6167                 ixgbe_rx_group_t *rx_group;
6168 
6169                 rx_group = &ixgbe->rx_groups[index];
6170                 rx_group->group_handle = gh;
6171 
6172                 infop->mgi_driver = (mac_group_driver_t)rx_group;
6173                 infop->mgi_start = NULL;
6174                 infop->mgi_stop = NULL;
6175                 infop->mgi_addmac = ixgbe_addmac;
6176                 infop->mgi_remmac = ixgbe_remmac;














6177                 infop->mgi_count = (ixgbe->num_rx_rings / ixgbe->num_rx_groups);
6178 
6179                 break;
6180         }
6181         case MAC_RING_TYPE_TX:
6182                 break;
6183         default:
6184                 break;
6185         }
6186 }
6187 
6188 /*
6189  * Enable interrupt on the specificed rx ring.
6190  */
6191 int
6192 ixgbe_rx_ring_intr_enable(mac_intr_handle_t intrh)
6193 {
6194         ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
6195         ixgbe_t *ixgbe = rx_ring->ixgbe;
6196         int r_idx = rx_ring->index;


6255                  * stack via interrupts before xgbe_rx_ring_intr_enable()
6256                  * is called again. This is acceptable since interrupt
6257                  * adjustment is infrequent, and the stack will be
6258                  * able to handle these packets.
6259                  */
6260                 return (0);
6261         }
6262 
6263         /*
6264          * To disable interrupt by clearing the VAL bit of given interrupt
6265          * vector allocation register (IVAR).
6266          */
6267         ixgbe_disable_ivar(ixgbe, hw_r_idx, 0);
6268 
6269         BT_CLEAR(ixgbe->vect_map[v_idx].rx_map, r_idx);
6270 
6271         mutex_exit(&ixgbe->gen_lock);
6272 
6273         return (0);
6274 }






























































































































































































































6275 
6276 /*
6277  * Add a mac address.
6278  */
6279 static int
6280 ixgbe_addmac(void *arg, const uint8_t *mac_addr)
6281 {
6282         ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
6283         ixgbe_t *ixgbe = rx_group->ixgbe;
6284         struct ixgbe_hw *hw = &ixgbe->hw;
6285         int slot, i;
6286 
6287         mutex_enter(&ixgbe->gen_lock);
6288 
6289         if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
6290                 mutex_exit(&ixgbe->gen_lock);
6291                 return (ECANCELED);
6292         }
6293 
6294         if (ixgbe->unicst_avail == 0) {




  40  * Local function protoypes
  41  */
  42 static int ixgbe_register_mac(ixgbe_t *);
  43 static int ixgbe_identify_hardware(ixgbe_t *);
  44 static int ixgbe_regs_map(ixgbe_t *);
  45 static void ixgbe_init_properties(ixgbe_t *);
  46 static int ixgbe_init_driver_settings(ixgbe_t *);
  47 static void ixgbe_init_locks(ixgbe_t *);
  48 static void ixgbe_destroy_locks(ixgbe_t *);
  49 static int ixgbe_init(ixgbe_t *);
  50 static int ixgbe_chip_start(ixgbe_t *);
  51 static void ixgbe_chip_stop(ixgbe_t *);
  52 static int ixgbe_reset(ixgbe_t *);
  53 static void ixgbe_tx_clean(ixgbe_t *);
  54 static boolean_t ixgbe_tx_drain(ixgbe_t *);
  55 static boolean_t ixgbe_rx_drain(ixgbe_t *);
  56 static int ixgbe_alloc_rings(ixgbe_t *);
  57 static void ixgbe_free_rings(ixgbe_t *);
  58 static int ixgbe_alloc_rx_data(ixgbe_t *);
  59 static void ixgbe_free_rx_data(ixgbe_t *);
  60 static int ixgbe_setup_rings(ixgbe_t *);
  61 static int ixgbe_setup_rx(ixgbe_t *);
  62 static void ixgbe_setup_tx(ixgbe_t *);
  63 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *);
  64 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *);
  65 static void ixgbe_setup_rss(ixgbe_t *);
  66 static void ixgbe_setup_vmdq(ixgbe_t *);
  67 static void ixgbe_setup_vmdq_rss(ixgbe_t *);
  68 static void ixgbe_setup_rss_table(ixgbe_t *);
  69 static void ixgbe_init_unicst(ixgbe_t *);
  70 static int ixgbe_init_vlan(ixgbe_t *);
  71 static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *);
  72 static void ixgbe_setup_multicst(ixgbe_t *);
  73 static void ixgbe_get_hw_state(ixgbe_t *);
  74 static void ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe);
  75 static void ixgbe_get_conf(ixgbe_t *);
  76 static void ixgbe_init_params(ixgbe_t *);
  77 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int);
  78 static void ixgbe_driver_link_check(ixgbe_t *);
  79 static void ixgbe_sfp_check(void *);
  80 static void ixgbe_overtemp_check(void *);
  81 static void ixgbe_phy_check(void *);
  82 static void ixgbe_link_timer(void *);
  83 static void ixgbe_local_timer(void *);
  84 static void ixgbe_arm_watchdog_timer(ixgbe_t *);
  85 static void ixgbe_restart_watchdog_timer(ixgbe_t *);
  86 static void ixgbe_disable_adapter_interrupts(ixgbe_t *);
  87 static void ixgbe_enable_adapter_interrupts(ixgbe_t *);
  88 static boolean_t is_valid_mac_addr(uint8_t *);
  89 static boolean_t ixgbe_stall_check(ixgbe_t *);
  90 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t);


  97 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int);
  98 static void ixgbe_setup_ivar(ixgbe_t *, uint16_t, uint8_t, int8_t);
  99 static void ixgbe_enable_ivar(ixgbe_t *, uint16_t, int8_t);
 100 static void ixgbe_disable_ivar(ixgbe_t *, uint16_t, int8_t);
 101 static uint32_t ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index);
 102 static int ixgbe_map_intrs_to_vectors(ixgbe_t *);
 103 static void ixgbe_setup_adapter_vector(ixgbe_t *);
 104 static void ixgbe_rem_intr_handlers(ixgbe_t *);
 105 static void ixgbe_rem_intrs(ixgbe_t *);
 106 static int ixgbe_enable_intrs(ixgbe_t *);
 107 static int ixgbe_disable_intrs(ixgbe_t *);
 108 static uint_t ixgbe_intr_legacy(void *, void *);
 109 static uint_t ixgbe_intr_msi(void *, void *);
 110 static uint_t ixgbe_intr_msix(void *, void *);
 111 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *);
 112 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *);
 113 static void ixgbe_intr_other_work(ixgbe_t *, uint32_t);
 114 static void ixgbe_get_driver_control(struct ixgbe_hw *);
 115 static int ixgbe_addmac(void *, const uint8_t *);
 116 static int ixgbe_remmac(void *, const uint8_t *);
 117 static int ixgbe_addvlan(mac_group_driver_t, uint16_t);
 118 static int ixgbe_remvlan(mac_group_driver_t, uint16_t);
 119 static void ixgbe_release_driver_control(struct ixgbe_hw *);
 120 
 121 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t);
 122 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t);
 123 static int ixgbe_resume(dev_info_t *);
 124 static int ixgbe_suspend(dev_info_t *);
 125 static int ixgbe_quiesce(dev_info_t *);
 126 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *);
 127 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
 128 static int ixgbe_cbfunc(dev_info_t *, ddi_cb_action_t, void *, void *, void *);
 129 static int ixgbe_intr_cb_register(ixgbe_t *);
 130 static int ixgbe_intr_adjust(ixgbe_t *, ddi_cb_action_t, int);
 131 
 132 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
 133     const void *impl_data);
 134 static void ixgbe_fm_init(ixgbe_t *);
 135 static void ixgbe_fm_fini(ixgbe_t *);
 136 
 137 char *ixgbe_priv_props[] = {
 138         "_tx_copy_thresh",


1145         tx_size = ixgbe->max_frame_size;
1146         ixgbe->tx_buf_size = ((tx_size >> 10) +
1147             ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
1148 
1149         /*
1150          * Initialize rx/tx rings/groups parameters
1151          */
1152         ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
1153         for (i = 0; i < ixgbe->num_rx_rings; i++) {
1154                 rx_ring = &ixgbe->rx_rings[i];
1155                 rx_ring->index = i;
1156                 rx_ring->ixgbe = ixgbe;
1157                 rx_ring->group_index = i / ring_per_group;
1158                 rx_ring->hw_index = ixgbe_get_hw_rx_index(ixgbe, i);
1159         }
1160 
1161         for (i = 0; i < ixgbe->num_rx_groups; i++) {
1162                 rx_group = &ixgbe->rx_groups[i];
1163                 rx_group->index = i;
1164                 rx_group->ixgbe = ixgbe;
1165                 list_create(&rx_group->vlans, sizeof (ixgbe_vlan_t),
1166                     offsetof(ixgbe_vlan_t, ixvl_link));
1167         }
1168 
1169         for (i = 0; i < ixgbe->num_tx_rings; i++) {
1170                 tx_ring = &ixgbe->tx_rings[i];
1171                 tx_ring->index = i;
1172                 tx_ring->ixgbe = ixgbe;
1173                 if (ixgbe->tx_head_wb_enable)
1174                         tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb;
1175                 else
1176                         tx_ring->tx_recycle = ixgbe_tx_recycle_legacy;
1177 
1178                 tx_ring->ring_size = ixgbe->tx_ring_size;
1179                 tx_ring->free_list_size = ixgbe->tx_ring_size +
1180                     (ixgbe->tx_ring_size >> 1);
1181         }
1182 
1183         /*
1184          * Initialize values of interrupt throttling rate
1185          */
1186         for (i = 1; i < MAX_INTR_VECTOR; i++)


1897          * autonegotiation will allow (usually 10Gb, infrastructure allowing)
1898          * so we never bothered with explicitly setting the link to 10Gb as it
1899          * would already be at that state on driver attach. With X550, we must
1900          * trigger a re-negotiation of the link in order to switch from a LPLU
1901          * 1Gb link to 10Gb (cable and link partner permitting.)
1902          */
1903         if (hw->mac.type == ixgbe_mac_X550 ||
1904             hw->mac.type == ixgbe_mac_X550EM_a ||
1905             hw->mac.type == ixgbe_mac_X550EM_x) {
1906                 (void) ixgbe_driver_setup_link(ixgbe, B_TRUE);
1907                 ixgbe_get_hw_state(ixgbe);
1908         }
1909 
1910         if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1911                 goto start_failure;
1912         }
1913 
1914         /*
1915          * Setup the rx/tx rings
1916          */
1917         if (ixgbe_setup_rings(ixgbe) != IXGBE_SUCCESS)
1918                 goto start_failure;
1919 
1920         /*
1921          * ixgbe_start() will be called when resetting, however if reset
1922          * happens, we need to clear the ERROR, STALL and OVERTEMP flags
1923          * before enabling the interrupts.
1924          */
1925         atomic_and_32(&ixgbe->ixgbe_state, ~(IXGBE_ERROR
1926             | IXGBE_STALL| IXGBE_OVERTEMP));
1927 
1928         /*
1929          * Enable adapter interrupts
1930          * The interrupts must be enabled after the driver state is START
1931          */
1932         ixgbe_enable_adapter_interrupts(ixgbe);
1933 
1934         for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1935                 mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1936         for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1937                 mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1938 


2271 }
2272 
2273 /*
2274  * ixgbe_free_rings - Free the memory space of rx/tx rings.
2275  */
2276 static void
2277 ixgbe_free_rings(ixgbe_t *ixgbe)
2278 {
2279         if (ixgbe->rx_rings != NULL) {
2280                 kmem_free(ixgbe->rx_rings,
2281                     sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
2282                 ixgbe->rx_rings = NULL;
2283         }
2284 
2285         if (ixgbe->tx_rings != NULL) {
2286                 kmem_free(ixgbe->tx_rings,
2287                     sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
2288                 ixgbe->tx_rings = NULL;
2289         }
2290 
2291         for (uint_t i = 0; i < ixgbe->num_rx_groups; i++) {
2292                 ixgbe_vlan_t *vlp;
2293                 ixgbe_rx_group_t *rx_group = &ixgbe->rx_groups[i];
2294 
2295                 while ((vlp = list_remove_head(&rx_group->vlans)) != NULL)
2296                         kmem_free(vlp, sizeof (ixgbe_vlan_t));
2297 
2298                 list_destroy(&rx_group->vlans);
2299         }
2300 
2301         if (ixgbe->rx_groups != NULL) {
2302                 kmem_free(ixgbe->rx_groups,
2303                     sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups);
2304                 ixgbe->rx_groups = NULL;
2305         }
2306 }
2307 
2308 static int
2309 ixgbe_alloc_rx_data(ixgbe_t *ixgbe)
2310 {
2311         ixgbe_rx_ring_t *rx_ring;
2312         int i;
2313 
2314         for (i = 0; i < ixgbe->num_rx_rings; i++) {
2315                 rx_ring = &ixgbe->rx_rings[i];
2316                 if (ixgbe_alloc_rx_ring_data(rx_ring) != IXGBE_SUCCESS)
2317                         goto alloc_rx_rings_failure;
2318         }
2319         return (IXGBE_SUCCESS);
2320 


2335 
2336                 mutex_enter(&ixgbe->rx_pending_lock);
2337                 rx_data = rx_ring->rx_data;
2338 
2339                 if (rx_data != NULL) {
2340                         rx_data->flag |= IXGBE_RX_STOPPED;
2341 
2342                         if (rx_data->rcb_pending == 0) {
2343                                 ixgbe_free_rx_ring_data(rx_data);
2344                                 rx_ring->rx_data = NULL;
2345                         }
2346                 }
2347 
2348                 mutex_exit(&ixgbe->rx_pending_lock);
2349         }
2350 }
2351 
2352 /*
2353  * ixgbe_setup_rings - Setup rx/tx rings.
2354  */
2355 static int
2356 ixgbe_setup_rings(ixgbe_t *ixgbe)
2357 {
2358         /*
2359          * Setup the rx/tx rings, including the following:
2360          *
2361          * 1. Setup the descriptor ring and the control block buffers;
2362          * 2. Initialize necessary registers for receive/transmit;
2363          * 3. Initialize software pointers/parameters for receive/transmit;
2364          */
2365         if (ixgbe_setup_rx(ixgbe) != IXGBE_SUCCESS)
2366                 return (IXGBE_FAILURE);
2367 
2368         ixgbe_setup_tx(ixgbe);
2369 
2370         return (IXGBE_SUCCESS);
2371 }
2372 
2373 static void
2374 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring)
2375 {
2376         ixgbe_t *ixgbe = rx_ring->ixgbe;
2377         ixgbe_rx_data_t *rx_data = rx_ring->rx_data;
2378         struct ixgbe_hw *hw = &ixgbe->hw;
2379         rx_control_block_t *rcb;
2380         union ixgbe_adv_rx_desc *rbd;
2381         uint32_t size;
2382         uint32_t buf_low;
2383         uint32_t buf_high;
2384         uint32_t reg_val;
2385         int i;
2386 
2387         ASSERT(mutex_owned(&rx_ring->rx_lock));
2388         ASSERT(mutex_owned(&ixgbe->gen_lock));
2389 
2390         for (i = 0; i < ixgbe->rx_ring_size; i++) {


2437         if (hw->mac.type == ixgbe_mac_82599EB ||
2438             hw->mac.type == ixgbe_mac_X540 ||
2439             hw->mac.type == ixgbe_mac_X550 ||
2440             hw->mac.type == ixgbe_mac_X550EM_x ||
2441             hw->mac.type == ixgbe_mac_X550EM_a) {
2442                 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2443                 reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS);
2444                 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2445         }
2446 
2447         /*
2448          * Setup the Split and Replication Receive Control Register.
2449          * Set the rx buffer size and the advanced descriptor type.
2450          */
2451         reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
2452             IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2453         reg_val |= IXGBE_SRRCTL_DROP_EN;
2454         IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val);
2455 }
2456 
2457 static int
2458 ixgbe_setup_rx(ixgbe_t *ixgbe)
2459 {
2460         ixgbe_rx_ring_t *rx_ring;
2461         struct ixgbe_hw *hw = &ixgbe->hw;
2462         uint32_t reg_val;
2463         uint32_t i;
2464         uint32_t psrtype_rss_bit;
2465 
2466         /*
2467          * Ensure that Rx is disabled while setting up
2468          * the Rx unit and Rx descriptor ring(s)
2469          */
2470         ixgbe_disable_rx(hw);
2471 
2472         /* PSRTYPE must be configured for 82599 */
2473         if (ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ &&
2474             ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ_RSS) {
2475                 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
2476                     IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR;
2477                 reg_val |= IXGBE_PSRTYPE_L2HDR;


2530                 /*
2531                  * Multiple groups, each group has one ring,
2532                  * only VMDq is needed.
2533                  */
2534                 ixgbe_setup_vmdq(ixgbe);
2535                 break;
2536 
2537         case IXGBE_CLASSIFY_VMDQ_RSS:
2538                 /*
2539                  * Multiple groups and multiple rings, both
2540                  * VMDq and RSS are needed.
2541                  */
2542                 ixgbe_setup_vmdq_rss(ixgbe);
2543                 break;
2544 
2545         default:
2546                 break;
2547         }
2548 
2549         /*
2550          * Initialize VLAN SW and HW state if VLAN filtering is
2551          * enabled.
2552          */
2553         if (ixgbe->vlft_enabled) {
2554                 if (ixgbe_init_vlan(ixgbe) != IXGBE_SUCCESS)
2555                         return (IXGBE_FAILURE);
2556         }
2557 
2558         /*
2559          * Enable the receive unit.  This must be done after filter
2560          * control is set in FCTRL. On 82598, we disable the descriptor monitor.
2561          * 82598 is the only adapter which defines this RXCTRL option.
2562          */
2563         reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2564         if (hw->mac.type == ixgbe_mac_82598EB)
2565                 reg_val |= IXGBE_RXCTRL_DMBYPS; /* descriptor monitor bypass */
2566         reg_val |= IXGBE_RXCTRL_RXEN;
2567         (void) ixgbe_enable_rx_dma(hw, reg_val);
2568 
2569         /*
2570          * ixgbe_setup_rx_ring must be called after configuring RXCTRL
2571          */
2572         for (i = 0; i < ixgbe->num_rx_rings; i++) {
2573                 rx_ring = &ixgbe->rx_rings[i];
2574                 ixgbe_setup_rx_ring(rx_ring);
2575         }
2576 
2577         /*
2578          * The 82598 controller gives us the RNBC (Receive No Buffer


2629                         reg_val = IXGBE_READ_REG(hw, IXGBE_RSCCTL(i));
2630                         reg_val |= IXGBE_RSCCTL_RSCEN;
2631                         if (ixgbe->rx_buf_size == IXGBE_PKG_BUF_16k)
2632                                 reg_val |= IXGBE_RSCCTL_MAXDESC_1;
2633                         else
2634                                 reg_val |= IXGBE_RSCCTL_MAXDESC_4;
2635                         IXGBE_WRITE_REG(hw,  IXGBE_RSCCTL(i), reg_val);
2636                 }
2637 
2638                 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCDBU);
2639                 reg_val |= IXGBE_RSCDBU_RSCACKDIS;
2640                 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, reg_val);
2641 
2642                 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2643                 reg_val |= IXGBE_RDRXCTL_RSCACKC;
2644                 reg_val |= IXGBE_RDRXCTL_FCOE_WRFIX;
2645                 reg_val &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2646 
2647                 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2648         }
2649 
2650         return (IXGBE_SUCCESS);
2651 }
2652 
2653 static void
2654 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring)
2655 {
2656         ixgbe_t *ixgbe = tx_ring->ixgbe;
2657         struct ixgbe_hw *hw = &ixgbe->hw;
2658         uint32_t size;
2659         uint32_t buf_low;
2660         uint32_t buf_high;
2661         uint32_t reg_val;
2662 
2663         ASSERT(mutex_owned(&tx_ring->tx_lock));
2664         ASSERT(mutex_owned(&ixgbe->gen_lock));
2665 
2666         /*
2667          * Initialize the length register
2668          */
2669         size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc);
2670         IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size);


2832         mrqc = IXGBE_MRQC_RSSEN |
2833             IXGBE_MRQC_RSS_FIELD_IPV4 |
2834             IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2835             IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2836             IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2837             IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2838             IXGBE_MRQC_RSS_FIELD_IPV6 |
2839             IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2840             IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2841             IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2842         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2843 }
2844 
2845 /*
2846  * ixgbe_setup_vmdq - Setup MAC classification feature
2847  */
2848 static void
2849 ixgbe_setup_vmdq(ixgbe_t *ixgbe)
2850 {
2851         struct ixgbe_hw *hw = &ixgbe->hw;
2852         uint32_t vmdctl, i, vtctl, vlnctl;
2853 
2854         /*
2855          * Setup the VMDq Control register, enable VMDq based on
2856          * packet destination MAC address:
2857          */
2858         switch (hw->mac.type) {
2859         case ixgbe_mac_82598EB:
2860                 /*
2861                  * VMDq Enable = 1;
2862                  * VMDq Filter = 0; MAC filtering
2863                  * Default VMDq output index = 0;
2864                  */
2865                 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2866                 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2867                 break;
2868 
2869         case ixgbe_mac_82599EB:
2870         case ixgbe_mac_X540:
2871         case ixgbe_mac_X550:
2872         case ixgbe_mac_X550EM_x:
2873         case ixgbe_mac_X550EM_a:
2874                 /*
2875                  * Enable VMDq-only.
2876                  */
2877                 vmdctl = IXGBE_MRQC_VMDQEN;
2878                 IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl);
2879 
2880                 for (i = 0; i < hw->mac.num_rar_entries; i++) {
2881                         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2882                         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2883                 }
2884 
2885                 /*
2886                  * Enable Virtualization and Replication.
2887                  */
2888                 vtctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2889                 ixgbe->rx_def_group = vtctl & IXGBE_VT_CTL_POOL_MASK;
2890                 vtctl |= IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2891                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2892 
2893                 /*
2894                  * Enable VLAN filtering and switching (VFTA and VLVF).
2895                  */
2896                 vlnctl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2897                 vlnctl |= IXGBE_VLNCTRL_VFE;
2898                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctl);
2899                 ixgbe->vlft_enabled = B_TRUE;
2900 
2901                 /*
2902                  * Enable receiving packets to all VFs
2903                  */
2904                 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2905                 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2906                 break;
2907 
2908         default:
2909                 break;
2910         }
2911 }
2912 
2913 /*
2914  * ixgbe_setup_vmdq_rss - Setup both vmdq feature and rss feature.
2915  */
2916 static void
2917 ixgbe_setup_vmdq_rss(ixgbe_t *ixgbe)
2918 {
2919         struct ixgbe_hw *hw = &ixgbe->hw;
2920         uint32_t i, mrqc;
2921         uint32_t vtctl, vmdctl, vlnctl;
2922 
2923         /*
2924          * Initialize RETA/ERETA table
2925          */
2926         ixgbe_setup_rss_table(ixgbe);
2927 
2928         /*
2929          * Enable and setup RSS and VMDq
2930          */
2931         switch (hw->mac.type) {
2932         case ixgbe_mac_82598EB:
2933                 /*
2934                  * Enable RSS & Setup RSS Hash functions
2935                  */
2936                 mrqc = IXGBE_MRQC_RSSEN |
2937                     IXGBE_MRQC_RSS_FIELD_IPV4 |
2938                     IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2939                     IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2940                     IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2941                     IXGBE_MRQC_RSS_FIELD_IPV6_EX |


2985 
2986                 for (i = 0; i < hw->mac.num_rar_entries; i++) {
2987                         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2988                         IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2989                 }
2990                 break;
2991 
2992         default:
2993                 break;
2994 
2995         }
2996 
2997         if (hw->mac.type == ixgbe_mac_82599EB ||
2998             hw->mac.type == ixgbe_mac_X540 ||
2999             hw->mac.type == ixgbe_mac_X550 ||
3000             hw->mac.type == ixgbe_mac_X550EM_x ||
3001             hw->mac.type == ixgbe_mac_X550EM_a) {
3002                 /*
3003                  * Enable Virtualization and Replication.
3004                  */
3005                 vtctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3006                 ixgbe->rx_def_group = vtctl & IXGBE_VT_CTL_POOL_MASK;
3007                 vtctl |= IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
3008                 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
3009                 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
3010 
3011                 /*
3012                  * Enable VLAN filtering and switching (VFTA and VLVF).
3013                  */
3014                 vlnctl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3015                 vlnctl |= IXGBE_VLNCTRL_VFE;
3016                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctl);
3017                 ixgbe->vlft_enabled = B_TRUE;
3018 
3019                 /*
3020                  * Enable receiving packets to all VFs
3021                  */
3022                 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
3023                 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
3024         }
3025 }
3026 
3027 /*
3028  * ixgbe_setup_rss_table - Setup RSS table
3029  */
3030 static void
3031 ixgbe_setup_rss_table(ixgbe_t *ixgbe)
3032 {
3033         struct ixgbe_hw *hw = &ixgbe->hw;
3034         uint32_t i, j;
3035         uint32_t random;
3036         uint32_t reta;
3037         uint32_t ring_per_group;
3038         uint32_t ring;
3039         uint32_t table_size;


3170 /*
3171  * ixgbe_unicst_find - Find the slot for the specified unicast address
3172  */
3173 int
3174 ixgbe_unicst_find(ixgbe_t *ixgbe, const uint8_t *mac_addr)
3175 {
3176         int slot;
3177 
3178         ASSERT(mutex_owned(&ixgbe->gen_lock));
3179 
3180         for (slot = 0; slot < ixgbe->unicst_total; slot++) {
3181                 if (bcmp(ixgbe->unicst_addr[slot].mac.addr,
3182                     mac_addr, ETHERADDRL) == 0)
3183                         return (slot);
3184         }
3185 
3186         return (-1);
3187 }
3188 
3189 /*
3190  * Restore the HW state to match the SW state during restart.
3191  */
3192 static int
3193 ixgbe_init_vlan(ixgbe_t *ixgbe)
3194 {
3195         /*
3196          * The device is starting for the first time; there is nothing
3197          * to do.
3198          */
3199         if (!ixgbe->vlft_init) {
3200                 ixgbe->vlft_init = B_TRUE;
3201                 return (IXGBE_SUCCESS);
3202         }
3203 
3204         for (uint_t i = 0; i < ixgbe->num_rx_groups; i++) {
3205                 int                     ret;
3206                 boolean_t               vlvf_bypass;
3207                 ixgbe_rx_group_t        *rxg = &ixgbe->rx_groups[i];
3208                 struct ixgbe_hw         *hw = &ixgbe->hw;
3209 
3210                 if (rxg->aupe) {
3211                         uint32_t vml2flt;
3212 
3213                         vml2flt = IXGBE_READ_REG(hw, IXGBE_VMOLR(rxg->index));
3214                         vml2flt |= IXGBE_VMOLR_AUPE;
3215                         IXGBE_WRITE_REG(hw, IXGBE_VMOLR(rxg->index), vml2flt);
3216                 }
3217 
3218                 vlvf_bypass = (rxg->index == ixgbe->rx_def_group);
3219                 for (ixgbe_vlan_t *vlp = list_head(&rxg->vlans); vlp != NULL;
3220                     vlp = list_next(&rxg->vlans, vlp)) {
3221                         ret = ixgbe_set_vfta(hw, vlp->ixvl_vid, rxg->index,
3222                             B_TRUE, vlvf_bypass);
3223 
3224                         if (ret != IXGBE_SUCCESS) {
3225                                 ixgbe_error(ixgbe, "Failed to program VFTA"
3226                                     " for group %u, VID: %u, ret: %d.",
3227                                     rxg->index, vlp->ixvl_vid, ret);
3228                                 return (IXGBE_FAILURE);
3229                         }
3230                 }
3231         }
3232 
3233         return (IXGBE_SUCCESS);
3234 }
3235 
3236 /*
3237  * ixgbe_multicst_add - Add a multicst address.
3238  */
3239 int
3240 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr)
3241 {
3242         ASSERT(mutex_owned(&ixgbe->gen_lock));
3243 
3244         if ((multiaddr[0] & 01) == 0) {
3245                 return (EINVAL);
3246         }
3247 
3248         if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) {
3249                 return (ENOENT);
3250         }
3251 
3252         bcopy(multiaddr,
3253             &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL);
3254         ixgbe->mcast_count++;
3255 
3256         /*


6242                 if (ixgbe->intr_type &
6243                     (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
6244                         mintr->mi_ddi_handle =
6245                             ixgbe->htable[tx_ring->intr_vector];
6246                 }
6247                 break;
6248         }
6249         default:
6250                 break;
6251         }
6252 }
6253 
6254 /*
6255  * Callback funtion for MAC layer to register all groups.
6256  */
6257 void
6258 ixgbe_fill_group(void *arg, mac_ring_type_t rtype, const int index,
6259     mac_group_info_t *infop, mac_group_handle_t gh)
6260 {
6261         ixgbe_t *ixgbe = (ixgbe_t *)arg;
6262         struct ixgbe_hw *hw = &ixgbe->hw;
6263 
6264         switch (rtype) {
6265         case MAC_RING_TYPE_RX: {
6266                 ixgbe_rx_group_t *rx_group;
6267 
6268                 rx_group = &ixgbe->rx_groups[index];
6269                 rx_group->group_handle = gh;
6270 
6271                 infop->mgi_driver = (mac_group_driver_t)rx_group;
6272                 infop->mgi_start = NULL;
6273                 infop->mgi_stop = NULL;
6274                 infop->mgi_addmac = ixgbe_addmac;
6275                 infop->mgi_remmac = ixgbe_remmac;
6276 
6277                 if ((ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ ||
6278                     ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) &&
6279                     (hw->mac.type == ixgbe_mac_82599EB ||
6280                     hw->mac.type == ixgbe_mac_X540 ||
6281                     hw->mac.type == ixgbe_mac_X550 ||
6282                     hw->mac.type == ixgbe_mac_X550EM_x)) {
6283                         infop->mgi_addvlan = ixgbe_addvlan;
6284                         infop->mgi_remvlan = ixgbe_remvlan;
6285                 } else {
6286                         infop->mgi_addvlan = NULL;
6287                         infop->mgi_remvlan = NULL;
6288                 }
6289 
6290                 infop->mgi_count = (ixgbe->num_rx_rings / ixgbe->num_rx_groups);
6291 
6292                 break;
6293         }
6294         case MAC_RING_TYPE_TX:
6295                 break;
6296         default:
6297                 break;
6298         }
6299 }
6300 
6301 /*
6302  * Enable interrupt on the specificed rx ring.
6303  */
6304 int
6305 ixgbe_rx_ring_intr_enable(mac_intr_handle_t intrh)
6306 {
6307         ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
6308         ixgbe_t *ixgbe = rx_ring->ixgbe;
6309         int r_idx = rx_ring->index;


6368                  * stack via interrupts before xgbe_rx_ring_intr_enable()
6369                  * is called again. This is acceptable since interrupt
6370                  * adjustment is infrequent, and the stack will be
6371                  * able to handle these packets.
6372                  */
6373                 return (0);
6374         }
6375 
6376         /*
6377          * To disable interrupt by clearing the VAL bit of given interrupt
6378          * vector allocation register (IVAR).
6379          */
6380         ixgbe_disable_ivar(ixgbe, hw_r_idx, 0);
6381 
6382         BT_CLEAR(ixgbe->vect_map[v_idx].rx_map, r_idx);
6383 
6384         mutex_exit(&ixgbe->gen_lock);
6385 
6386         return (0);
6387 }
6388 
6389 static ixgbe_vlan_t *
6390 ixgbe_find_vlan(ixgbe_rx_group_t *rx_group, uint16_t vid)
6391 {
6392         for (ixgbe_vlan_t *vlp = list_head(&rx_group->vlans); vlp != NULL;
6393             vlp = list_next(&rx_group->vlans, vlp)) {
6394                 if (vlp->ixvl_vid == vid)
6395                         return (vlp);
6396         }
6397 
6398         return (NULL);
6399 }
6400 
6401 /*
6402  * Attempt to use a VLAN HW filter for this group. If the group is
6403  * interested in untagged packets then set AUPE only. If the group is
6404  * the default then only set the VFTA. Leave the VLVF slots open for
6405  * reserved groups to guarantee their use of HW filtering.
6406  */
6407 static int
6408 ixgbe_addvlan(mac_group_driver_t gdriver, uint16_t vid)
6409 {
6410         ixgbe_rx_group_t        *rx_group = (ixgbe_rx_group_t *)gdriver;
6411         ixgbe_t                 *ixgbe = rx_group->ixgbe;
6412         struct ixgbe_hw         *hw = &ixgbe->hw;
6413         ixgbe_vlan_t            *vlp;
6414         int                     ret;
6415         boolean_t               is_def_grp;
6416 
6417         mutex_enter(&ixgbe->gen_lock);
6418 
6419         if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
6420                 mutex_exit(&ixgbe->gen_lock);
6421                 return (ECANCELED);
6422         }
6423 
6424         /*
6425          * Let's be sure VLAN filtering is enabled.
6426          */
6427         VERIFY3B(ixgbe->vlft_enabled, ==, B_TRUE);
6428         is_def_grp = (rx_group->index == ixgbe->rx_def_group);
6429 
6430         /*
6431          * VLAN filtering is enabled but we want to receive untagged
6432          * traffic on this group -- set the AUPE bit on the group and
6433          * leave the VLAN tables alone.
6434          */
6435         if (vid == MAC_VLAN_UNTAGGED) {
6436                 /*
6437                  * We never enable AUPE on the default group; it is
6438                  * redundant. Untagged traffic which passes L2
6439                  * filtering is delivered to the default group if no
6440                  * other group is interested.
6441                  */
6442                 if (!is_def_grp) {
6443                         uint32_t vml2flt;
6444 
6445                         vml2flt = IXGBE_READ_REG(hw,
6446                             IXGBE_VMOLR(rx_group->index));
6447                         vml2flt |= IXGBE_VMOLR_AUPE;
6448                         IXGBE_WRITE_REG(hw, IXGBE_VMOLR(rx_group->index),
6449                             vml2flt);
6450                         rx_group->aupe = B_TRUE;
6451                 }
6452 
6453                 mutex_exit(&ixgbe->gen_lock);
6454                 return (0);
6455         }
6456 
6457         vlp = ixgbe_find_vlan(rx_group, vid);
6458         if (vlp != NULL) {
6459                 /* Only the default group supports multiple clients. */
6460                 VERIFY3B(is_def_grp, ==, B_TRUE);
6461                 vlp->ixvl_refs++;
6462                 mutex_exit(&ixgbe->gen_lock);
6463                 return (0);
6464         }
6465 
6466         /*
6467          * The default group doesn't require a VLVF entry, only a VFTA
6468          * entry. All traffic passing L2 filtering (MPSAR + VFTA) is
6469          * delivered to the default group if no other group is
6470          * interested. The fourth argument, vlvf_bypass, tells the
6471          * ixgbe common code to avoid using a VLVF slot if one isn't
6472          * already allocated to this VLAN.
6473          *
6474          * This logic is meant to reserve VLVF slots for use by
6475          * reserved groups: guaranteeing their use of HW filtering.
6476          */
6477         ret = ixgbe_set_vfta(hw, vid, rx_group->index, B_TRUE, is_def_grp);
6478 
6479         if (ret == IXGBE_SUCCESS) {
6480                 vlp = kmem_zalloc(sizeof (ixgbe_vlan_t), KM_SLEEP);
6481                 vlp->ixvl_vid = vid;
6482                 vlp->ixvl_refs = 1;
6483                 list_insert_tail(&rx_group->vlans, vlp);
6484                 mutex_exit(&ixgbe->gen_lock);
6485                 return (0);
6486         }
6487 
6488         /*
6489          * We should actually never return ENOSPC because we've set
6490          * things up so that every reserved group is guaranteed to
6491          * have a VLVF slot.
6492          */
6493         if (ret == IXGBE_ERR_PARAM)
6494                 ret = EINVAL;
6495         else if (ret == IXGBE_ERR_NO_SPACE)
6496                 ret = ENOSPC;
6497         else
6498                 ret = EIO;
6499 
6500         mutex_exit(&ixgbe->gen_lock);
6501         return (ret);
6502 }
6503 
6504 /*
6505  * Attempt to remove the VLAN HW filter associated with this group. If
6506  * we are removing a HW filter for the default group then we know only
6507  * the VFTA was set (VLVF is reserved for non-default/reserved
6508  * groups). If the group wishes to stop receiving untagged traffic
6509  * then clear the AUPE but leave the VLAN filters alone.
6510  */
6511 static int
6512 ixgbe_remvlan(mac_group_driver_t gdriver, uint16_t vid)
6513 {
6514         ixgbe_rx_group_t        *rx_group = (ixgbe_rx_group_t *)gdriver;
6515         ixgbe_t                 *ixgbe = rx_group->ixgbe;
6516         struct ixgbe_hw         *hw = &ixgbe->hw;
6517         int                     ret;
6518         ixgbe_vlan_t            *vlp;
6519         boolean_t               is_def_grp;
6520 
6521         mutex_enter(&ixgbe->gen_lock);
6522 
6523         if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
6524                 mutex_exit(&ixgbe->gen_lock);
6525                 return (ECANCELED);
6526         }
6527 
6528         is_def_grp = (rx_group->index == ixgbe->rx_def_group);
6529 
6530         /* See the AUPE comment in ixgbe_addvlan(). */
6531         if (vid == MAC_VLAN_UNTAGGED) {
6532                 if (!is_def_grp) {
6533                         uint32_t vml2flt;
6534 
6535                         vml2flt = IXGBE_READ_REG(hw,
6536                             IXGBE_VMOLR(rx_group->index));
6537                         vml2flt &= ~IXGBE_VMOLR_AUPE;
6538                         IXGBE_WRITE_REG(hw,
6539                             IXGBE_VMOLR(rx_group->index), vml2flt);
6540                         rx_group->aupe = B_FALSE;
6541                 }
6542                 mutex_exit(&ixgbe->gen_lock);
6543                 return (0);
6544         }
6545 
6546         vlp = ixgbe_find_vlan(rx_group, vid);
6547         if (vlp == NULL)
6548                 return (ENOENT);
6549 
6550         /*
6551          * See the comment in ixgbe_addvlan() about is_def_grp and
6552          * vlvf_bypass.
6553          */
6554         if (vlp->ixvl_refs == 1) {
6555                 ret = ixgbe_set_vfta(hw, vid, rx_group->index, B_FALSE,
6556                     is_def_grp);
6557         } else {
6558                 /*
6559                  * Only the default group can have multiple clients.
6560                  * If there is more than one client, leave the
6561                  * VFTA[vid] bit alone.
6562                  */
6563                 VERIFY3B(is_def_grp, ==, B_TRUE);
6564                 VERIFY3U(vlp->ixvl_refs, >, 1);
6565                 vlp->ixvl_refs--;
6566                 mutex_exit(&ixgbe->gen_lock);
6567                 return (0);
6568         }
6569 
6570         if (ret != IXGBE_SUCCESS) {
6571                 mutex_exit(&ixgbe->gen_lock);
6572                 /* IXGBE_ERR_PARAM should be the only possible error here. */
6573                 if (ret == IXGBE_ERR_PARAM)
6574                         return (EINVAL);
6575                 else
6576                         return (EIO);
6577         }
6578 
6579         VERIFY3U(vlp->ixvl_refs, ==, 1);
6580         vlp->ixvl_refs = 0;
6581         list_remove(&rx_group->vlans, vlp);
6582         kmem_free(vlp, sizeof (ixgbe_vlan_t));
6583 
6584         /*
6585          * Calling ixgbe_set_vfta() on a non-default group may have
6586          * cleared the VFTA[vid] bit even though the default group
6587          * still has clients using the vid. This happens because the
6588          * ixgbe common code doesn't ref count the use of VLANs. Check
6589          * for any use of vid on the default group and make sure the
6590          * VFTA[vid] bit is set. This operation is idempotent: setting
6591          * VFTA[vid] to true if already true won't hurt anything.
6592          */
6593         if (!is_def_grp) {
6594                 ixgbe_rx_group_t *defgrp;
6595 
6596                 defgrp = &ixgbe->rx_groups[ixgbe->rx_def_group];
6597                 vlp = ixgbe_find_vlan(defgrp, vid);
6598                 if (vlp != NULL) {
6599                         /* This shouldn't fail, but if it does return EIO. */
6600                         ret = ixgbe_set_vfta(hw, vid, rx_group->index, B_TRUE,
6601                             B_TRUE);
6602                         if (ret != IXGBE_SUCCESS)
6603                                 return (EIO);
6604                 }
6605         }
6606 
6607         mutex_exit(&ixgbe->gen_lock);
6608         return (0);
6609 }
6610 
6611 /*
6612  * Add a mac address.
6613  */
6614 static int
6615 ixgbe_addmac(void *arg, const uint8_t *mac_addr)
6616 {
6617         ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
6618         ixgbe_t *ixgbe = rx_group->ixgbe;
6619         struct ixgbe_hw *hw = &ixgbe->hw;
6620         int slot, i;
6621 
6622         mutex_enter(&ixgbe->gen_lock);
6623 
6624         if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
6625                 mutex_exit(&ixgbe->gen_lock);
6626                 return (ECANCELED);
6627         }
6628 
6629         if (ixgbe->unicst_avail == 0) {