8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
24 */
25
26 /*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Copyright (c) 2017, Joyent, Inc.
29 * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
30 * Copyright (c) 2013 Saso Kiselkov. All rights reserved.
31 * Copyright (c) 2013 OSN Online Service Nuernberg GmbH. All rights reserved.
32 * Copyright 2016 OmniTI Computer Consulting, Inc. All rights reserved.
33 */
34
35 #include "ixgbe_sw.h"
36
37 static char ixgbe_ident[] = "Intel 10Gb Ethernet";
38
39 /*
40 * Local function protoypes
41 */
42 static int ixgbe_register_mac(ixgbe_t *);
43 static int ixgbe_identify_hardware(ixgbe_t *);
44 static int ixgbe_regs_map(ixgbe_t *);
45 static void ixgbe_init_properties(ixgbe_t *);
46 static int ixgbe_init_driver_settings(ixgbe_t *);
47 static void ixgbe_init_locks(ixgbe_t *);
48 static void ixgbe_destroy_locks(ixgbe_t *);
2424 reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS);
2425 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2426 }
2427
2428 /*
2429 * Setup the Split and Replication Receive Control Register.
2430 * Set the rx buffer size and the advanced descriptor type.
2431 */
2432 reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
2433 IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2434 reg_val |= IXGBE_SRRCTL_DROP_EN;
2435 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val);
2436 }
2437
2438 static void
2439 ixgbe_setup_rx(ixgbe_t *ixgbe)
2440 {
2441 ixgbe_rx_ring_t *rx_ring;
2442 struct ixgbe_hw *hw = &ixgbe->hw;
2443 uint32_t reg_val;
2444 uint32_t ring_mapping;
2445 uint32_t i, index;
2446 uint32_t psrtype_rss_bit;
2447
2448 /*
2449 * Ensure that Rx is disabled while setting up
2450 * the Rx unit and Rx descriptor ring(s)
2451 */
2452 ixgbe_disable_rx(hw);
2453
2454 /* PSRTYPE must be configured for 82599 */
2455 if (ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ &&
2456 ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ_RSS) {
2457 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
2458 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR;
2459 reg_val |= IXGBE_PSRTYPE_L2HDR;
2460 reg_val |= 0x80000000;
2461 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), reg_val);
2462 } else {
2463 if (ixgbe->num_rx_groups > 32) {
2464 psrtype_rss_bit = 0x20000000;
2465 } else {
2531 /*
2532 * Enable the receive unit. This must be done after filter
2533 * control is set in FCTRL. On 82598, we disable the descriptor monitor.
2534 * 82598 is the only adapter which defines this RXCTRL option.
2535 */
2536 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2537 if (hw->mac.type == ixgbe_mac_82598EB)
2538 reg_val |= IXGBE_RXCTRL_DMBYPS; /* descriptor monitor bypass */
2539 reg_val |= IXGBE_RXCTRL_RXEN;
2540 (void) ixgbe_enable_rx_dma(hw, reg_val);
2541
2542 /*
2543 * ixgbe_setup_rx_ring must be called after configuring RXCTRL
2544 */
2545 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2546 rx_ring = &ixgbe->rx_rings[i];
2547 ixgbe_setup_rx_ring(rx_ring);
2548 }
2549
2550 /*
2551 * Setup the per-ring statistics mapping.
2552 */
2553 ring_mapping = 0;
2554 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2555 index = ixgbe->rx_rings[i].hw_index;
2556 ring_mapping = IXGBE_READ_REG(hw, IXGBE_RQSMR(index >> 2));
2557 ring_mapping |= (i & 0xF) << (8 * (index & 0x3));
2558 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(index >> 2), ring_mapping);
2559 }
2560
2561 /*
2562 * The Max Frame Size in MHADD/MAXFRS will be internally increased
2563 * by four bytes if the packet has a VLAN field, so includes MTU,
2564 * ethernet header and frame check sequence.
2565 * Register is MAXFRS in 82599.
2566 */
2567 reg_val = IXGBE_READ_REG(hw, IXGBE_MHADD);
2568 reg_val &= ~IXGBE_MHADD_MFS_MASK;
2569 reg_val |= (ixgbe->default_mtu + sizeof (struct ether_header)
2570 + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT;
2571 IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val);
2572
2573 /*
2574 * Setup Jumbo Frame enable bit
2575 */
2576 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2577 if (ixgbe->default_mtu > ETHERMTU)
2578 reg_val |= IXGBE_HLREG0_JUMBOEN;
2693 tx_ring->tbd_free = tx_ring->ring_size;
2694
2695 if (ixgbe->tx_ring_init == B_TRUE) {
2696 tx_ring->tcb_head = 0;
2697 tx_ring->tcb_tail = 0;
2698 tx_ring->tcb_free = tx_ring->free_list_size;
2699 }
2700
2701 /*
2702 * Initialize the s/w context structure
2703 */
2704 bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t));
2705 }
2706
2707 static void
2708 ixgbe_setup_tx(ixgbe_t *ixgbe)
2709 {
2710 struct ixgbe_hw *hw = &ixgbe->hw;
2711 ixgbe_tx_ring_t *tx_ring;
2712 uint32_t reg_val;
2713 uint32_t ring_mapping;
2714 int i;
2715
2716 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2717 tx_ring = &ixgbe->tx_rings[i];
2718 ixgbe_setup_tx_ring(tx_ring);
2719 }
2720
2721 /*
2722 * Setup the per-ring statistics mapping.
2723 */
2724 ring_mapping = 0;
2725 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2726 ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
2727 if ((i & 0x3) == 0x3) {
2728 switch (hw->mac.type) {
2729 case ixgbe_mac_82598EB:
2730 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2),
2731 ring_mapping);
2732 break;
2733
2734 case ixgbe_mac_82599EB:
2735 case ixgbe_mac_X540:
2736 case ixgbe_mac_X550:
2737 case ixgbe_mac_X550EM_x:
2738 case ixgbe_mac_X550EM_a:
2739 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2),
2740 ring_mapping);
2741 break;
2742
2743 default:
2744 break;
2745 }
2746
2747 ring_mapping = 0;
2748 }
2749 }
2750 if (i & 0x3) {
2751 switch (hw->mac.type) {
2752 case ixgbe_mac_82598EB:
2753 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping);
2754 break;
2755
2756 case ixgbe_mac_82599EB:
2757 case ixgbe_mac_X540:
2758 case ixgbe_mac_X550:
2759 case ixgbe_mac_X550EM_x:
2760 case ixgbe_mac_X550EM_a:
2761 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping);
2762 break;
2763
2764 default:
2765 break;
2766 }
2767 }
2768
2769 /*
2770 * Enable CRC appending and TX padding (for short tx frames)
2771 */
2772 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2773 reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN;
2774 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2775
2776 /*
2777 * enable DMA for 82599, X540 and X550 parts
2778 */
2779 if (hw->mac.type == ixgbe_mac_82599EB ||
2780 hw->mac.type == ixgbe_mac_X540 ||
2781 hw->mac.type == ixgbe_mac_X550 ||
2782 hw->mac.type == ixgbe_mac_X550EM_x ||
2783 hw->mac.type == ixgbe_mac_X550EM_a) {
2784 /* DMATXCTL.TE must be set after all Tx config is complete */
2785 reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2786 reg_val |= IXGBE_DMATXCTL_TE;
|
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
24 */
25
26 /*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Copyright 2019 Joyent, Inc.
29 * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
30 * Copyright (c) 2013 Saso Kiselkov. All rights reserved.
31 * Copyright (c) 2013 OSN Online Service Nuernberg GmbH. All rights reserved.
32 * Copyright 2016 OmniTI Computer Consulting, Inc. All rights reserved.
33 */
34
35 #include "ixgbe_sw.h"
36
37 static char ixgbe_ident[] = "Intel 10Gb Ethernet";
38
39 /*
40 * Local function protoypes
41 */
42 static int ixgbe_register_mac(ixgbe_t *);
43 static int ixgbe_identify_hardware(ixgbe_t *);
44 static int ixgbe_regs_map(ixgbe_t *);
45 static void ixgbe_init_properties(ixgbe_t *);
46 static int ixgbe_init_driver_settings(ixgbe_t *);
47 static void ixgbe_init_locks(ixgbe_t *);
48 static void ixgbe_destroy_locks(ixgbe_t *);
2424 reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS);
2425 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2426 }
2427
2428 /*
2429 * Setup the Split and Replication Receive Control Register.
2430 * Set the rx buffer size and the advanced descriptor type.
2431 */
2432 reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
2433 IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2434 reg_val |= IXGBE_SRRCTL_DROP_EN;
2435 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val);
2436 }
2437
2438 static void
2439 ixgbe_setup_rx(ixgbe_t *ixgbe)
2440 {
2441 ixgbe_rx_ring_t *rx_ring;
2442 struct ixgbe_hw *hw = &ixgbe->hw;
2443 uint32_t reg_val;
2444 uint32_t i;
2445 uint32_t psrtype_rss_bit;
2446
2447 /*
2448 * Ensure that Rx is disabled while setting up
2449 * the Rx unit and Rx descriptor ring(s)
2450 */
2451 ixgbe_disable_rx(hw);
2452
2453 /* PSRTYPE must be configured for 82599 */
2454 if (ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ &&
2455 ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ_RSS) {
2456 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
2457 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR;
2458 reg_val |= IXGBE_PSRTYPE_L2HDR;
2459 reg_val |= 0x80000000;
2460 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), reg_val);
2461 } else {
2462 if (ixgbe->num_rx_groups > 32) {
2463 psrtype_rss_bit = 0x20000000;
2464 } else {
2530 /*
2531 * Enable the receive unit. This must be done after filter
2532 * control is set in FCTRL. On 82598, we disable the descriptor monitor.
2533 * 82598 is the only adapter which defines this RXCTRL option.
2534 */
2535 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2536 if (hw->mac.type == ixgbe_mac_82598EB)
2537 reg_val |= IXGBE_RXCTRL_DMBYPS; /* descriptor monitor bypass */
2538 reg_val |= IXGBE_RXCTRL_RXEN;
2539 (void) ixgbe_enable_rx_dma(hw, reg_val);
2540
2541 /*
2542 * ixgbe_setup_rx_ring must be called after configuring RXCTRL
2543 */
2544 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2545 rx_ring = &ixgbe->rx_rings[i];
2546 ixgbe_setup_rx_ring(rx_ring);
2547 }
2548
2549 /*
2550 * The 82598 controller gives us the RNBC (Receive No Buffer
2551 * Count) register to determine the number of frames dropped
2552 * due to no available descriptors on the destination queue.
2553 * However, this register was removed starting with 82599 and
2554 * it was replaced with the RQSMR/QPRDC registers. The nice
2555 * thing about the new registers is that they allow you to map
2556 * groups of queues to specific stat registers. The bad thing
2557 * is there are only 16 slots in the stat registers, so this
2558 * won't work when we have 32 Rx groups. Instead, we map all
2559 * queues to the zero slot of the stat registers, giving us a
2560 * global counter at QPRDC[0] (with the equivalent semantics
2561 * of RNBC). Perhaps future controllers will have more slots
2562 * and we can implement per-group counters.
2563 */
2564 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2565 uint32_t index = ixgbe->rx_rings[i].hw_index;
2566 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(index >> 2), 0);
2567 }
2568
2569 /*
2570 * The Max Frame Size in MHADD/MAXFRS will be internally increased
2571 * by four bytes if the packet has a VLAN field, so includes MTU,
2572 * ethernet header and frame check sequence.
2573 * Register is MAXFRS in 82599.
2574 */
2575 reg_val = IXGBE_READ_REG(hw, IXGBE_MHADD);
2576 reg_val &= ~IXGBE_MHADD_MFS_MASK;
2577 reg_val |= (ixgbe->default_mtu + sizeof (struct ether_header)
2578 + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT;
2579 IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val);
2580
2581 /*
2582 * Setup Jumbo Frame enable bit
2583 */
2584 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2585 if (ixgbe->default_mtu > ETHERMTU)
2586 reg_val |= IXGBE_HLREG0_JUMBOEN;
2701 tx_ring->tbd_free = tx_ring->ring_size;
2702
2703 if (ixgbe->tx_ring_init == B_TRUE) {
2704 tx_ring->tcb_head = 0;
2705 tx_ring->tcb_tail = 0;
2706 tx_ring->tcb_free = tx_ring->free_list_size;
2707 }
2708
2709 /*
2710 * Initialize the s/w context structure
2711 */
2712 bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t));
2713 }
2714
2715 static void
2716 ixgbe_setup_tx(ixgbe_t *ixgbe)
2717 {
2718 struct ixgbe_hw *hw = &ixgbe->hw;
2719 ixgbe_tx_ring_t *tx_ring;
2720 uint32_t reg_val;
2721 int i;
2722
2723 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2724 tx_ring = &ixgbe->tx_rings[i];
2725 ixgbe_setup_tx_ring(tx_ring);
2726 }
2727
2728 /*
2729 * Setup the per-ring statistics mapping. We map all Tx queues
2730 * to slot 0 to stay consistent with Rx.
2731 */
2732 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2733 switch (hw->mac.type) {
2734 case ixgbe_mac_82598EB:
2735 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), 0);
2736 break;
2737
2738 default:
2739 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), 0);
2740 break;
2741 }
2742 }
2743
2744 /*
2745 * Enable CRC appending and TX padding (for short tx frames)
2746 */
2747 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2748 reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN;
2749 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2750
2751 /*
2752 * enable DMA for 82599, X540 and X550 parts
2753 */
2754 if (hw->mac.type == ixgbe_mac_82599EB ||
2755 hw->mac.type == ixgbe_mac_X540 ||
2756 hw->mac.type == ixgbe_mac_X550 ||
2757 hw->mac.type == ixgbe_mac_X550EM_x ||
2758 hw->mac.type == ixgbe_mac_X550EM_a) {
2759 /* DMATXCTL.TE must be set after all Tx config is complete */
2760 reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2761 reg_val |= IXGBE_DMATXCTL_TE;
|