Print this page
11490 SRS ring polling disabled for VLANs
11491 Want DLS bypass for VLAN traffic
11492 add VLVF bypass to ixgbe core
2869 duplicate packets with vnics over aggrs
11489 DLS stat delete and aggr kstat can deadlock
Portions contributed by: Theo Schlossnagle <jesus@omniti.com>
Reviewed by: Patrick Mooney <patrick.mooney@joyent.com>
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Dan McDonald <danmcd@joyent.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/ixgbe/ixgbe_main.c
+++ new/usr/src/uts/common/io/ixgbe/ixgbe_main.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
24 24 */
25 25
26 26 /*
27 27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 28 * Copyright 2019 Joyent, Inc.
29 29 * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
30 30 * Copyright (c) 2013 Saso Kiselkov. All rights reserved.
31 31 * Copyright (c) 2013 OSN Online Service Nuernberg GmbH. All rights reserved.
32 32 * Copyright 2016 OmniTI Computer Consulting, Inc. All rights reserved.
33 33 */
34 34
35 35 #include "ixgbe_sw.h"
36 36
37 37 static char ixgbe_ident[] = "Intel 10Gb Ethernet";
38 38
39 39 /*
40 40 * Local function protoypes
41 41 */
42 42 static int ixgbe_register_mac(ixgbe_t *);
43 43 static int ixgbe_identify_hardware(ixgbe_t *);
44 44 static int ixgbe_regs_map(ixgbe_t *);
45 45 static void ixgbe_init_properties(ixgbe_t *);
46 46 static int ixgbe_init_driver_settings(ixgbe_t *);
47 47 static void ixgbe_init_locks(ixgbe_t *);
48 48 static void ixgbe_destroy_locks(ixgbe_t *);
49 49 static int ixgbe_init(ixgbe_t *);
↓ open down ↓ |
49 lines elided |
↑ open up ↑ |
50 50 static int ixgbe_chip_start(ixgbe_t *);
51 51 static void ixgbe_chip_stop(ixgbe_t *);
52 52 static int ixgbe_reset(ixgbe_t *);
53 53 static void ixgbe_tx_clean(ixgbe_t *);
54 54 static boolean_t ixgbe_tx_drain(ixgbe_t *);
55 55 static boolean_t ixgbe_rx_drain(ixgbe_t *);
56 56 static int ixgbe_alloc_rings(ixgbe_t *);
57 57 static void ixgbe_free_rings(ixgbe_t *);
58 58 static int ixgbe_alloc_rx_data(ixgbe_t *);
59 59 static void ixgbe_free_rx_data(ixgbe_t *);
60 -static void ixgbe_setup_rings(ixgbe_t *);
61 -static void ixgbe_setup_rx(ixgbe_t *);
60 +static int ixgbe_setup_rings(ixgbe_t *);
61 +static int ixgbe_setup_rx(ixgbe_t *);
62 62 static void ixgbe_setup_tx(ixgbe_t *);
63 63 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *);
64 64 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *);
65 65 static void ixgbe_setup_rss(ixgbe_t *);
66 66 static void ixgbe_setup_vmdq(ixgbe_t *);
67 67 static void ixgbe_setup_vmdq_rss(ixgbe_t *);
68 68 static void ixgbe_setup_rss_table(ixgbe_t *);
69 69 static void ixgbe_init_unicst(ixgbe_t *);
70 +static int ixgbe_init_vlan(ixgbe_t *);
70 71 static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *);
71 72 static void ixgbe_setup_multicst(ixgbe_t *);
72 73 static void ixgbe_get_hw_state(ixgbe_t *);
73 74 static void ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe);
74 75 static void ixgbe_get_conf(ixgbe_t *);
75 76 static void ixgbe_init_params(ixgbe_t *);
76 77 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int);
77 78 static void ixgbe_driver_link_check(ixgbe_t *);
78 79 static void ixgbe_sfp_check(void *);
79 80 static void ixgbe_overtemp_check(void *);
80 81 static void ixgbe_phy_check(void *);
81 82 static void ixgbe_link_timer(void *);
82 83 static void ixgbe_local_timer(void *);
83 84 static void ixgbe_arm_watchdog_timer(ixgbe_t *);
84 85 static void ixgbe_restart_watchdog_timer(ixgbe_t *);
85 86 static void ixgbe_disable_adapter_interrupts(ixgbe_t *);
86 87 static void ixgbe_enable_adapter_interrupts(ixgbe_t *);
87 88 static boolean_t is_valid_mac_addr(uint8_t *);
88 89 static boolean_t ixgbe_stall_check(ixgbe_t *);
89 90 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t);
90 91 static void ixgbe_set_internal_mac_loopback(ixgbe_t *);
91 92 static boolean_t ixgbe_find_mac_address(ixgbe_t *);
92 93 static int ixgbe_alloc_intrs(ixgbe_t *);
93 94 static int ixgbe_alloc_intr_handles(ixgbe_t *, int);
94 95 static int ixgbe_add_intr_handlers(ixgbe_t *);
95 96 static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int);
96 97 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int);
97 98 static void ixgbe_setup_ivar(ixgbe_t *, uint16_t, uint8_t, int8_t);
98 99 static void ixgbe_enable_ivar(ixgbe_t *, uint16_t, int8_t);
99 100 static void ixgbe_disable_ivar(ixgbe_t *, uint16_t, int8_t);
100 101 static uint32_t ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index);
101 102 static int ixgbe_map_intrs_to_vectors(ixgbe_t *);
102 103 static void ixgbe_setup_adapter_vector(ixgbe_t *);
103 104 static void ixgbe_rem_intr_handlers(ixgbe_t *);
104 105 static void ixgbe_rem_intrs(ixgbe_t *);
105 106 static int ixgbe_enable_intrs(ixgbe_t *);
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
106 107 static int ixgbe_disable_intrs(ixgbe_t *);
107 108 static uint_t ixgbe_intr_legacy(void *, void *);
108 109 static uint_t ixgbe_intr_msi(void *, void *);
109 110 static uint_t ixgbe_intr_msix(void *, void *);
110 111 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *);
111 112 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *);
112 113 static void ixgbe_intr_other_work(ixgbe_t *, uint32_t);
113 114 static void ixgbe_get_driver_control(struct ixgbe_hw *);
114 115 static int ixgbe_addmac(void *, const uint8_t *);
115 116 static int ixgbe_remmac(void *, const uint8_t *);
117 +static int ixgbe_addvlan(mac_group_driver_t, uint16_t);
118 +static int ixgbe_remvlan(mac_group_driver_t, uint16_t);
116 119 static void ixgbe_release_driver_control(struct ixgbe_hw *);
117 120
118 121 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t);
119 122 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t);
120 123 static int ixgbe_resume(dev_info_t *);
121 124 static int ixgbe_suspend(dev_info_t *);
122 125 static int ixgbe_quiesce(dev_info_t *);
123 126 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *);
124 127 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
125 128 static int ixgbe_cbfunc(dev_info_t *, ddi_cb_action_t, void *, void *, void *);
126 129 static int ixgbe_intr_cb_register(ixgbe_t *);
127 130 static int ixgbe_intr_adjust(ixgbe_t *, ddi_cb_action_t, int);
128 131
129 132 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
130 133 const void *impl_data);
131 134 static void ixgbe_fm_init(ixgbe_t *);
132 135 static void ixgbe_fm_fini(ixgbe_t *);
133 136
134 137 char *ixgbe_priv_props[] = {
135 138 "_tx_copy_thresh",
136 139 "_tx_recycle_thresh",
137 140 "_tx_overload_thresh",
138 141 "_tx_resched_thresh",
139 142 "_rx_copy_thresh",
140 143 "_rx_limit_per_intr",
141 144 "_intr_throttling",
142 145 "_adv_pause_cap",
143 146 "_adv_asym_pause_cap",
144 147 NULL
145 148 };
146 149
147 150 #define IXGBE_MAX_PRIV_PROPS \
148 151 (sizeof (ixgbe_priv_props) / sizeof (mac_priv_prop_t))
149 152
150 153 static struct cb_ops ixgbe_cb_ops = {
151 154 nulldev, /* cb_open */
152 155 nulldev, /* cb_close */
153 156 nodev, /* cb_strategy */
154 157 nodev, /* cb_print */
155 158 nodev, /* cb_dump */
156 159 nodev, /* cb_read */
157 160 nodev, /* cb_write */
158 161 nodev, /* cb_ioctl */
159 162 nodev, /* cb_devmap */
160 163 nodev, /* cb_mmap */
161 164 nodev, /* cb_segmap */
162 165 nochpoll, /* cb_chpoll */
163 166 ddi_prop_op, /* cb_prop_op */
164 167 NULL, /* cb_stream */
165 168 D_MP | D_HOTPLUG, /* cb_flag */
166 169 CB_REV, /* cb_rev */
167 170 nodev, /* cb_aread */
168 171 nodev /* cb_awrite */
169 172 };
170 173
171 174 static struct dev_ops ixgbe_dev_ops = {
172 175 DEVO_REV, /* devo_rev */
173 176 0, /* devo_refcnt */
174 177 NULL, /* devo_getinfo */
175 178 nulldev, /* devo_identify */
176 179 nulldev, /* devo_probe */
177 180 ixgbe_attach, /* devo_attach */
178 181 ixgbe_detach, /* devo_detach */
179 182 nodev, /* devo_reset */
180 183 &ixgbe_cb_ops, /* devo_cb_ops */
181 184 NULL, /* devo_bus_ops */
182 185 ddi_power, /* devo_power */
183 186 ixgbe_quiesce, /* devo_quiesce */
184 187 };
185 188
186 189 static struct modldrv ixgbe_modldrv = {
187 190 &mod_driverops, /* Type of module. This one is a driver */
188 191 ixgbe_ident, /* Discription string */
189 192 &ixgbe_dev_ops /* driver ops */
190 193 };
191 194
192 195 static struct modlinkage ixgbe_modlinkage = {
193 196 MODREV_1, &ixgbe_modldrv, NULL
194 197 };
195 198
196 199 /*
197 200 * Access attributes for register mapping
198 201 */
199 202 ddi_device_acc_attr_t ixgbe_regs_acc_attr = {
200 203 DDI_DEVICE_ATTR_V1,
201 204 DDI_STRUCTURE_LE_ACC,
202 205 DDI_STRICTORDER_ACC,
203 206 DDI_FLAGERR_ACC
204 207 };
205 208
206 209 /*
207 210 * Loopback property
208 211 */
209 212 static lb_property_t lb_normal = {
210 213 normal, "normal", IXGBE_LB_NONE
211 214 };
212 215
213 216 static lb_property_t lb_mac = {
214 217 internal, "MAC", IXGBE_LB_INTERNAL_MAC
215 218 };
216 219
217 220 static lb_property_t lb_external = {
218 221 external, "External", IXGBE_LB_EXTERNAL
219 222 };
220 223
221 224 #define IXGBE_M_CALLBACK_FLAGS \
222 225 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
223 226
224 227 static mac_callbacks_t ixgbe_m_callbacks = {
225 228 IXGBE_M_CALLBACK_FLAGS,
226 229 ixgbe_m_stat,
227 230 ixgbe_m_start,
228 231 ixgbe_m_stop,
229 232 ixgbe_m_promisc,
230 233 ixgbe_m_multicst,
231 234 NULL,
232 235 NULL,
233 236 NULL,
234 237 ixgbe_m_ioctl,
235 238 ixgbe_m_getcapab,
236 239 NULL,
237 240 NULL,
238 241 ixgbe_m_setprop,
239 242 ixgbe_m_getprop,
240 243 ixgbe_m_propinfo
241 244 };
242 245
243 246 /*
244 247 * Initialize capabilities of each supported adapter type
245 248 */
246 249 static adapter_info_t ixgbe_82598eb_cap = {
247 250 64, /* maximum number of rx queues */
248 251 1, /* minimum number of rx queues */
249 252 64, /* default number of rx queues */
250 253 16, /* maximum number of rx groups */
251 254 1, /* minimum number of rx groups */
252 255 1, /* default number of rx groups */
253 256 32, /* maximum number of tx queues */
254 257 1, /* minimum number of tx queues */
255 258 8, /* default number of tx queues */
256 259 16366, /* maximum MTU size */
257 260 0xFFFF, /* maximum interrupt throttle rate */
258 261 0, /* minimum interrupt throttle rate */
259 262 200, /* default interrupt throttle rate */
260 263 18, /* maximum total msix vectors */
261 264 16, /* maximum number of ring vectors */
262 265 2, /* maximum number of other vectors */
263 266 IXGBE_EICR_LSC, /* "other" interrupt types handled */
264 267 0, /* "other" interrupt types enable mask */
265 268 (IXGBE_FLAG_DCA_CAPABLE /* capability flags */
266 269 | IXGBE_FLAG_RSS_CAPABLE
267 270 | IXGBE_FLAG_VMDQ_CAPABLE)
268 271 };
269 272
270 273 static adapter_info_t ixgbe_82599eb_cap = {
271 274 128, /* maximum number of rx queues */
272 275 1, /* minimum number of rx queues */
273 276 128, /* default number of rx queues */
274 277 64, /* maximum number of rx groups */
275 278 1, /* minimum number of rx groups */
276 279 1, /* default number of rx groups */
277 280 128, /* maximum number of tx queues */
278 281 1, /* minimum number of tx queues */
279 282 8, /* default number of tx queues */
280 283 15500, /* maximum MTU size */
281 284 0xFF8, /* maximum interrupt throttle rate */
282 285 0, /* minimum interrupt throttle rate */
283 286 200, /* default interrupt throttle rate */
284 287 64, /* maximum total msix vectors */
285 288 16, /* maximum number of ring vectors */
286 289 2, /* maximum number of other vectors */
287 290 (IXGBE_EICR_LSC
288 291 | IXGBE_EICR_GPI_SDP1
289 292 | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */
290 293
291 294 (IXGBE_SDP1_GPIEN
292 295 | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */
293 296
294 297 (IXGBE_FLAG_DCA_CAPABLE
295 298 | IXGBE_FLAG_RSS_CAPABLE
296 299 | IXGBE_FLAG_VMDQ_CAPABLE
297 300 | IXGBE_FLAG_RSC_CAPABLE
298 301 | IXGBE_FLAG_SFP_PLUG_CAPABLE) /* capability flags */
299 302 };
300 303
301 304 static adapter_info_t ixgbe_X540_cap = {
302 305 128, /* maximum number of rx queues */
303 306 1, /* minimum number of rx queues */
304 307 128, /* default number of rx queues */
305 308 64, /* maximum number of rx groups */
306 309 1, /* minimum number of rx groups */
307 310 1, /* default number of rx groups */
308 311 128, /* maximum number of tx queues */
309 312 1, /* minimum number of tx queues */
310 313 8, /* default number of tx queues */
311 314 15500, /* maximum MTU size */
312 315 0xFF8, /* maximum interrupt throttle rate */
313 316 0, /* minimum interrupt throttle rate */
314 317 200, /* default interrupt throttle rate */
315 318 64, /* maximum total msix vectors */
316 319 16, /* maximum number of ring vectors */
317 320 2, /* maximum number of other vectors */
318 321 (IXGBE_EICR_LSC
319 322 | IXGBE_EICR_GPI_SDP1_X540
320 323 | IXGBE_EICR_GPI_SDP2_X540), /* "other" interrupt types handled */
321 324
322 325 (IXGBE_SDP1_GPIEN_X540
323 326 | IXGBE_SDP2_GPIEN_X540), /* "other" interrupt types enable mask */
324 327
325 328 (IXGBE_FLAG_DCA_CAPABLE
326 329 | IXGBE_FLAG_RSS_CAPABLE
327 330 | IXGBE_FLAG_VMDQ_CAPABLE
328 331 | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */
329 332 };
330 333
331 334 static adapter_info_t ixgbe_X550_cap = {
332 335 128, /* maximum number of rx queues */
333 336 1, /* minimum number of rx queues */
334 337 128, /* default number of rx queues */
335 338 64, /* maximum number of rx groups */
336 339 1, /* minimum number of rx groups */
337 340 1, /* default number of rx groups */
338 341 128, /* maximum number of tx queues */
339 342 1, /* minimum number of tx queues */
340 343 8, /* default number of tx queues */
341 344 15500, /* maximum MTU size */
342 345 0xFF8, /* maximum interrupt throttle rate */
343 346 0, /* minimum interrupt throttle rate */
344 347 0x200, /* default interrupt throttle rate */
345 348 64, /* maximum total msix vectors */
346 349 16, /* maximum number of ring vectors */
347 350 2, /* maximum number of other vectors */
348 351 IXGBE_EICR_LSC, /* "other" interrupt types handled */
349 352 0, /* "other" interrupt types enable mask */
350 353 (IXGBE_FLAG_RSS_CAPABLE
351 354 | IXGBE_FLAG_VMDQ_CAPABLE
352 355 | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */
353 356 };
354 357
355 358 /*
356 359 * Module Initialization Functions.
357 360 */
358 361
359 362 int
360 363 _init(void)
361 364 {
362 365 int status;
363 366
364 367 mac_init_ops(&ixgbe_dev_ops, MODULE_NAME);
365 368
366 369 status = mod_install(&ixgbe_modlinkage);
367 370
368 371 if (status != DDI_SUCCESS) {
369 372 mac_fini_ops(&ixgbe_dev_ops);
370 373 }
371 374
372 375 return (status);
373 376 }
374 377
375 378 int
376 379 _fini(void)
377 380 {
378 381 int status;
379 382
380 383 status = mod_remove(&ixgbe_modlinkage);
381 384
382 385 if (status == DDI_SUCCESS) {
383 386 mac_fini_ops(&ixgbe_dev_ops);
384 387 }
385 388
386 389 return (status);
387 390 }
388 391
389 392 int
390 393 _info(struct modinfo *modinfop)
391 394 {
392 395 int status;
393 396
394 397 status = mod_info(&ixgbe_modlinkage, modinfop);
395 398
396 399 return (status);
397 400 }
398 401
399 402 /*
400 403 * ixgbe_attach - Driver attach.
401 404 *
402 405 * This function is the device specific initialization entry
403 406 * point. This entry point is required and must be written.
404 407 * The DDI_ATTACH command must be provided in the attach entry
405 408 * point. When attach() is called with cmd set to DDI_ATTACH,
406 409 * all normal kernel services (such as kmem_alloc(9F)) are
407 410 * available for use by the driver.
408 411 *
409 412 * The attach() function will be called once for each instance
410 413 * of the device on the system with cmd set to DDI_ATTACH.
411 414 * Until attach() succeeds, the only driver entry points which
412 415 * may be called are open(9E) and getinfo(9E).
413 416 */
414 417 static int
415 418 ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
416 419 {
417 420 ixgbe_t *ixgbe;
418 421 struct ixgbe_osdep *osdep;
419 422 struct ixgbe_hw *hw;
420 423 int instance;
421 424 char taskqname[32];
422 425
423 426 /*
424 427 * Check the command and perform corresponding operations
425 428 */
426 429 switch (cmd) {
427 430 default:
428 431 return (DDI_FAILURE);
429 432
430 433 case DDI_RESUME:
431 434 return (ixgbe_resume(devinfo));
432 435
433 436 case DDI_ATTACH:
434 437 break;
435 438 }
436 439
437 440 /* Get the device instance */
438 441 instance = ddi_get_instance(devinfo);
439 442
440 443 /* Allocate memory for the instance data structure */
441 444 ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP);
442 445
443 446 ixgbe->dip = devinfo;
444 447 ixgbe->instance = instance;
445 448
446 449 hw = &ixgbe->hw;
447 450 osdep = &ixgbe->osdep;
448 451 hw->back = osdep;
449 452 osdep->ixgbe = ixgbe;
450 453
451 454 /* Attach the instance pointer to the dev_info data structure */
452 455 ddi_set_driver_private(devinfo, ixgbe);
453 456
454 457 /*
455 458 * Initialize for FMA support
456 459 */
457 460 ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, PROP_FM_CAPABLE,
458 461 0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
459 462 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
460 463 ixgbe_fm_init(ixgbe);
461 464 ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT;
462 465
463 466 /*
464 467 * Map PCI config space registers
465 468 */
466 469 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
467 470 ixgbe_error(ixgbe, "Failed to map PCI configurations");
468 471 goto attach_fail;
469 472 }
470 473 ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
471 474
472 475 /*
473 476 * Identify the chipset family
474 477 */
475 478 if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) {
476 479 ixgbe_error(ixgbe, "Failed to identify hardware");
477 480 goto attach_fail;
478 481 }
479 482
480 483 /*
481 484 * Map device registers
482 485 */
483 486 if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) {
484 487 ixgbe_error(ixgbe, "Failed to map device registers");
485 488 goto attach_fail;
486 489 }
487 490 ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
488 491
489 492 /*
490 493 * Initialize driver parameters
491 494 */
492 495 ixgbe_init_properties(ixgbe);
493 496 ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS;
494 497
495 498 /*
496 499 * Register interrupt callback
497 500 */
498 501 if (ixgbe_intr_cb_register(ixgbe) != IXGBE_SUCCESS) {
499 502 ixgbe_error(ixgbe, "Failed to register interrupt callback");
500 503 goto attach_fail;
501 504 }
502 505
503 506 /*
504 507 * Allocate interrupts
505 508 */
506 509 if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) {
507 510 ixgbe_error(ixgbe, "Failed to allocate interrupts");
508 511 goto attach_fail;
509 512 }
510 513 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
511 514
512 515 /*
513 516 * Allocate rx/tx rings based on the ring numbers.
514 517 * The actual numbers of rx/tx rings are decided by the number of
515 518 * allocated interrupt vectors, so we should allocate the rings after
516 519 * interrupts are allocated.
517 520 */
518 521 if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) {
519 522 ixgbe_error(ixgbe, "Failed to allocate rx and tx rings");
520 523 goto attach_fail;
521 524 }
522 525 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS;
523 526
524 527 /*
525 528 * Map rings to interrupt vectors
526 529 */
527 530 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) {
528 531 ixgbe_error(ixgbe, "Failed to map interrupts to vectors");
529 532 goto attach_fail;
530 533 }
531 534
532 535 /*
533 536 * Add interrupt handlers
534 537 */
535 538 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) {
536 539 ixgbe_error(ixgbe, "Failed to add interrupt handlers");
537 540 goto attach_fail;
538 541 }
539 542 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
540 543
541 544 /*
542 545 * Create a taskq for sfp-change
543 546 */
544 547 (void) sprintf(taskqname, "ixgbe%d_sfp_taskq", instance);
545 548 if ((ixgbe->sfp_taskq = ddi_taskq_create(devinfo, taskqname,
546 549 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
547 550 ixgbe_error(ixgbe, "sfp_taskq create failed");
548 551 goto attach_fail;
549 552 }
550 553 ixgbe->attach_progress |= ATTACH_PROGRESS_SFP_TASKQ;
551 554
552 555 /*
553 556 * Create a taskq for over-temp
554 557 */
555 558 (void) sprintf(taskqname, "ixgbe%d_overtemp_taskq", instance);
556 559 if ((ixgbe->overtemp_taskq = ddi_taskq_create(devinfo, taskqname,
557 560 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
558 561 ixgbe_error(ixgbe, "overtemp_taskq create failed");
559 562 goto attach_fail;
560 563 }
561 564 ixgbe->attach_progress |= ATTACH_PROGRESS_OVERTEMP_TASKQ;
562 565
563 566 /*
564 567 * Create a taskq for processing external PHY interrupts
565 568 */
566 569 (void) sprintf(taskqname, "ixgbe%d_phy_taskq", instance);
567 570 if ((ixgbe->phy_taskq = ddi_taskq_create(devinfo, taskqname,
568 571 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
569 572 ixgbe_error(ixgbe, "phy_taskq create failed");
570 573 goto attach_fail;
571 574 }
572 575 ixgbe->attach_progress |= ATTACH_PROGRESS_PHY_TASKQ;
573 576
574 577 /*
575 578 * Initialize driver parameters
576 579 */
577 580 if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) {
578 581 ixgbe_error(ixgbe, "Failed to initialize driver settings");
579 582 goto attach_fail;
580 583 }
581 584
582 585 /*
583 586 * Initialize mutexes for this device.
584 587 * Do this before enabling the interrupt handler and
585 588 * register the softint to avoid the condition where
586 589 * interrupt handler can try using uninitialized mutex.
587 590 */
588 591 ixgbe_init_locks(ixgbe);
589 592 ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS;
590 593
591 594 /*
592 595 * Initialize chipset hardware
593 596 */
594 597 if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) {
595 598 ixgbe_error(ixgbe, "Failed to initialize adapter");
596 599 goto attach_fail;
597 600 }
598 601 ixgbe->link_check_complete = B_FALSE;
599 602 ixgbe->link_check_hrtime = gethrtime() +
600 603 (IXGBE_LINK_UP_TIME * 100000000ULL);
601 604 ixgbe->attach_progress |= ATTACH_PROGRESS_INIT;
602 605
603 606 if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) {
604 607 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
605 608 goto attach_fail;
606 609 }
607 610
608 611 /*
609 612 * Initialize adapter capabilities
610 613 */
611 614 ixgbe_init_params(ixgbe);
612 615
613 616 /*
614 617 * Initialize statistics
615 618 */
616 619 if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) {
617 620 ixgbe_error(ixgbe, "Failed to initialize statistics");
618 621 goto attach_fail;
619 622 }
620 623 ixgbe->attach_progress |= ATTACH_PROGRESS_STATS;
621 624
622 625 /*
623 626 * Register the driver to the MAC
624 627 */
625 628 if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) {
626 629 ixgbe_error(ixgbe, "Failed to register MAC");
627 630 goto attach_fail;
628 631 }
629 632 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN);
630 633 ixgbe->attach_progress |= ATTACH_PROGRESS_MAC;
631 634
632 635 ixgbe->periodic_id = ddi_periodic_add(ixgbe_link_timer, ixgbe,
633 636 IXGBE_CYCLIC_PERIOD, DDI_IPL_0);
634 637 if (ixgbe->periodic_id == 0) {
635 638 ixgbe_error(ixgbe, "Failed to add the link check timer");
636 639 goto attach_fail;
637 640 }
638 641 ixgbe->attach_progress |= ATTACH_PROGRESS_LINK_TIMER;
639 642
640 643 /*
641 644 * Now that mutex locks are initialized, and the chip is also
642 645 * initialized, enable interrupts.
643 646 */
644 647 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
645 648 ixgbe_error(ixgbe, "Failed to enable DDI interrupts");
646 649 goto attach_fail;
647 650 }
648 651 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
649 652
650 653 ixgbe_log(ixgbe, "%s", ixgbe_ident);
651 654 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_INITIALIZED);
652 655
653 656 return (DDI_SUCCESS);
654 657
655 658 attach_fail:
656 659 ixgbe_unconfigure(devinfo, ixgbe);
657 660 return (DDI_FAILURE);
658 661 }
659 662
660 663 /*
661 664 * ixgbe_detach - Driver detach.
662 665 *
663 666 * The detach() function is the complement of the attach routine.
664 667 * If cmd is set to DDI_DETACH, detach() is used to remove the
665 668 * state associated with a given instance of a device node
666 669 * prior to the removal of that instance from the system.
667 670 *
668 671 * The detach() function will be called once for each instance
669 672 * of the device for which there has been a successful attach()
670 673 * once there are no longer any opens on the device.
671 674 *
672 675 * Interrupts routine are disabled, All memory allocated by this
673 676 * driver are freed.
674 677 */
675 678 static int
676 679 ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
677 680 {
678 681 ixgbe_t *ixgbe;
679 682
680 683 /*
681 684 * Check detach command
682 685 */
683 686 switch (cmd) {
684 687 default:
685 688 return (DDI_FAILURE);
686 689
687 690 case DDI_SUSPEND:
688 691 return (ixgbe_suspend(devinfo));
689 692
690 693 case DDI_DETACH:
691 694 break;
692 695 }
693 696
694 697 /*
695 698 * Get the pointer to the driver private data structure
696 699 */
697 700 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
698 701 if (ixgbe == NULL)
699 702 return (DDI_FAILURE);
700 703
701 704 /*
702 705 * If the device is still running, it needs to be stopped first.
703 706 * This check is necessary because under some specific circumstances,
704 707 * the detach routine can be called without stopping the interface
705 708 * first.
706 709 */
707 710 if (ixgbe->ixgbe_state & IXGBE_STARTED) {
708 711 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED);
709 712 mutex_enter(&ixgbe->gen_lock);
710 713 ixgbe_stop(ixgbe, B_TRUE);
711 714 mutex_exit(&ixgbe->gen_lock);
712 715 /* Disable and stop the watchdog timer */
713 716 ixgbe_disable_watchdog_timer(ixgbe);
714 717 }
715 718
716 719 /*
717 720 * Check if there are still rx buffers held by the upper layer.
718 721 * If so, fail the detach.
719 722 */
720 723 if (!ixgbe_rx_drain(ixgbe))
721 724 return (DDI_FAILURE);
722 725
723 726 /*
724 727 * Do the remaining unconfigure routines
725 728 */
726 729 ixgbe_unconfigure(devinfo, ixgbe);
727 730
728 731 return (DDI_SUCCESS);
729 732 }
730 733
731 734 /*
732 735 * quiesce(9E) entry point.
733 736 *
734 737 * This function is called when the system is single-threaded at high
735 738 * PIL with preemption disabled. Therefore, this function must not be
736 739 * blocked.
737 740 *
738 741 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
739 742 * DDI_FAILURE indicates an error condition and should almost never happen.
740 743 */
741 744 static int
742 745 ixgbe_quiesce(dev_info_t *devinfo)
743 746 {
744 747 ixgbe_t *ixgbe;
745 748 struct ixgbe_hw *hw;
746 749
747 750 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
748 751
749 752 if (ixgbe == NULL)
750 753 return (DDI_FAILURE);
751 754
752 755 hw = &ixgbe->hw;
753 756
754 757 /*
755 758 * Disable the adapter interrupts
756 759 */
757 760 ixgbe_disable_adapter_interrupts(ixgbe);
758 761
759 762 /*
760 763 * Tell firmware driver is no longer in control
761 764 */
762 765 ixgbe_release_driver_control(hw);
763 766
764 767 /*
765 768 * Reset the chipset
766 769 */
767 770 (void) ixgbe_reset_hw(hw);
768 771
769 772 /*
770 773 * Reset PHY
771 774 */
772 775 (void) ixgbe_reset_phy(hw);
773 776
774 777 return (DDI_SUCCESS);
775 778 }
776 779
777 780 static void
778 781 ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe)
779 782 {
780 783 /*
781 784 * Disable interrupt
782 785 */
783 786 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
784 787 (void) ixgbe_disable_intrs(ixgbe);
785 788 }
786 789
787 790 /*
788 791 * remove the link check timer
789 792 */
790 793 if (ixgbe->attach_progress & ATTACH_PROGRESS_LINK_TIMER) {
791 794 if (ixgbe->periodic_id != NULL) {
792 795 ddi_periodic_delete(ixgbe->periodic_id);
793 796 ixgbe->periodic_id = NULL;
794 797 }
795 798 }
796 799
797 800 /*
798 801 * Unregister MAC
799 802 */
800 803 if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) {
801 804 (void) mac_unregister(ixgbe->mac_hdl);
802 805 }
803 806
804 807 /*
805 808 * Free statistics
806 809 */
807 810 if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) {
808 811 kstat_delete((kstat_t *)ixgbe->ixgbe_ks);
809 812 }
810 813
811 814 /*
812 815 * Remove interrupt handlers
813 816 */
814 817 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
815 818 ixgbe_rem_intr_handlers(ixgbe);
816 819 }
817 820
818 821 /*
819 822 * Remove taskq for sfp-status-change
820 823 */
821 824 if (ixgbe->attach_progress & ATTACH_PROGRESS_SFP_TASKQ) {
822 825 ddi_taskq_destroy(ixgbe->sfp_taskq);
823 826 }
824 827
825 828 /*
826 829 * Remove taskq for over-temp
827 830 */
828 831 if (ixgbe->attach_progress & ATTACH_PROGRESS_OVERTEMP_TASKQ) {
829 832 ddi_taskq_destroy(ixgbe->overtemp_taskq);
830 833 }
831 834
832 835 /*
833 836 * Remove taskq for external PHYs
834 837 */
835 838 if (ixgbe->attach_progress & ATTACH_PROGRESS_PHY_TASKQ) {
836 839 ddi_taskq_destroy(ixgbe->phy_taskq);
837 840 }
838 841
839 842 /*
840 843 * Remove interrupts
841 844 */
842 845 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) {
843 846 ixgbe_rem_intrs(ixgbe);
844 847 }
845 848
846 849 /*
847 850 * Unregister interrupt callback handler
848 851 */
849 852 if (ixgbe->cb_hdl != NULL) {
850 853 (void) ddi_cb_unregister(ixgbe->cb_hdl);
851 854 }
852 855
853 856 /*
854 857 * Remove driver properties
855 858 */
856 859 if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) {
857 860 (void) ddi_prop_remove_all(devinfo);
858 861 }
859 862
860 863 /*
861 864 * Stop the chipset
862 865 */
863 866 if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) {
864 867 mutex_enter(&ixgbe->gen_lock);
865 868 ixgbe_chip_stop(ixgbe);
866 869 mutex_exit(&ixgbe->gen_lock);
867 870 }
868 871
869 872 /*
870 873 * Free register handle
871 874 */
872 875 if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
873 876 if (ixgbe->osdep.reg_handle != NULL)
874 877 ddi_regs_map_free(&ixgbe->osdep.reg_handle);
875 878 }
876 879
877 880 /*
878 881 * Free PCI config handle
879 882 */
880 883 if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
881 884 if (ixgbe->osdep.cfg_handle != NULL)
882 885 pci_config_teardown(&ixgbe->osdep.cfg_handle);
883 886 }
884 887
885 888 /*
886 889 * Free locks
887 890 */
888 891 if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) {
889 892 ixgbe_destroy_locks(ixgbe);
890 893 }
891 894
892 895 /*
893 896 * Free the rx/tx rings
894 897 */
895 898 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) {
896 899 ixgbe_free_rings(ixgbe);
897 900 }
898 901
899 902 /*
900 903 * Unregister FMA capabilities
901 904 */
902 905 if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) {
903 906 ixgbe_fm_fini(ixgbe);
904 907 }
905 908
906 909 /*
907 910 * Free the driver data structure
908 911 */
909 912 kmem_free(ixgbe, sizeof (ixgbe_t));
910 913
911 914 ddi_set_driver_private(devinfo, NULL);
912 915 }
913 916
914 917 /*
915 918 * ixgbe_register_mac - Register the driver and its function pointers with
916 919 * the GLD interface.
917 920 */
918 921 static int
919 922 ixgbe_register_mac(ixgbe_t *ixgbe)
920 923 {
921 924 struct ixgbe_hw *hw = &ixgbe->hw;
922 925 mac_register_t *mac;
923 926 int status;
924 927
925 928 if ((mac = mac_alloc(MAC_VERSION)) == NULL)
926 929 return (IXGBE_FAILURE);
927 930
928 931 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
929 932 mac->m_driver = ixgbe;
930 933 mac->m_dip = ixgbe->dip;
931 934 mac->m_src_addr = hw->mac.addr;
932 935 mac->m_callbacks = &ixgbe_m_callbacks;
933 936 mac->m_min_sdu = 0;
934 937 mac->m_max_sdu = ixgbe->default_mtu;
935 938 mac->m_margin = VLAN_TAGSZ;
936 939 mac->m_priv_props = ixgbe_priv_props;
937 940 mac->m_v12n = MAC_VIRT_LEVEL1;
938 941
939 942 status = mac_register(mac, &ixgbe->mac_hdl);
940 943
941 944 mac_free(mac);
942 945
943 946 return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE);
944 947 }
945 948
946 949 /*
947 950 * ixgbe_identify_hardware - Identify the type of the chipset.
948 951 */
949 952 static int
950 953 ixgbe_identify_hardware(ixgbe_t *ixgbe)
951 954 {
952 955 struct ixgbe_hw *hw = &ixgbe->hw;
953 956 struct ixgbe_osdep *osdep = &ixgbe->osdep;
954 957
955 958 /*
956 959 * Get the device id
957 960 */
958 961 hw->vendor_id =
959 962 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
960 963 hw->device_id =
961 964 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
962 965 hw->revision_id =
963 966 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
964 967 hw->subsystem_device_id =
965 968 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
966 969 hw->subsystem_vendor_id =
967 970 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
968 971
969 972 /*
970 973 * Set the mac type of the adapter based on the device id
971 974 */
972 975 if (ixgbe_set_mac_type(hw) != IXGBE_SUCCESS) {
973 976 return (IXGBE_FAILURE);
974 977 }
975 978
976 979 /*
977 980 * Install adapter capabilities
978 981 */
979 982 switch (hw->mac.type) {
980 983 case ixgbe_mac_82598EB:
981 984 IXGBE_DEBUGLOG_0(ixgbe, "identify 82598 adapter\n");
982 985 ixgbe->capab = &ixgbe_82598eb_cap;
983 986
984 987 if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) {
985 988 ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
986 989 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1;
987 990 ixgbe->capab->other_gpie |= IXGBE_SDP1_GPIEN;
988 991 }
989 992 break;
990 993
991 994 case ixgbe_mac_82599EB:
992 995 IXGBE_DEBUGLOG_0(ixgbe, "identify 82599 adapter\n");
993 996 ixgbe->capab = &ixgbe_82599eb_cap;
994 997
995 998 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) {
996 999 ixgbe->capab->flags |= IXGBE_FLAG_TEMP_SENSOR_CAPABLE;
997 1000 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP0;
998 1001 ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN;
999 1002 }
1000 1003 break;
1001 1004
1002 1005 case ixgbe_mac_X540:
1003 1006 IXGBE_DEBUGLOG_0(ixgbe, "identify X540 adapter\n");
1004 1007 ixgbe->capab = &ixgbe_X540_cap;
1005 1008 /*
1006 1009 * For now, X540 is all set in its capab structure.
1007 1010 * As other X540 variants show up, things can change here.
1008 1011 */
1009 1012 break;
1010 1013
1011 1014 case ixgbe_mac_X550:
1012 1015 case ixgbe_mac_X550EM_x:
1013 1016 case ixgbe_mac_X550EM_a:
1014 1017 IXGBE_DEBUGLOG_0(ixgbe, "identify X550 adapter\n");
1015 1018 ixgbe->capab = &ixgbe_X550_cap;
1016 1019
1017 1020 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
1018 1021 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
1019 1022 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
1020 1023 hw->device_id == IXGBE_DEV_ID_X550EM_A_QSFP ||
1021 1024 hw->device_id == IXGBE_DEV_ID_X550EM_A_QSFP_N) {
1022 1025 ixgbe->capab->flags |= IXGBE_FLAG_SFP_PLUG_CAPABLE;
1023 1026 }
1024 1027
1025 1028 /*
1026 1029 * Link detection on X552 SFP+ and X552/X557-AT
1027 1030 */
1028 1031 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
1029 1032 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
1030 1033 hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N ||
1031 1034 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
1032 1035 ixgbe->capab->other_intr |=
1033 1036 IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
1034 1037 }
1035 1038 if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
1036 1039 ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN_X540;
1037 1040 }
1038 1041 break;
1039 1042
1040 1043 default:
1041 1044 IXGBE_DEBUGLOG_1(ixgbe,
1042 1045 "adapter not supported in ixgbe_identify_hardware(): %d\n",
1043 1046 hw->mac.type);
1044 1047 return (IXGBE_FAILURE);
1045 1048 }
1046 1049
1047 1050 return (IXGBE_SUCCESS);
1048 1051 }
1049 1052
1050 1053 /*
1051 1054 * ixgbe_regs_map - Map the device registers.
1052 1055 *
1053 1056 */
1054 1057 static int
1055 1058 ixgbe_regs_map(ixgbe_t *ixgbe)
1056 1059 {
1057 1060 dev_info_t *devinfo = ixgbe->dip;
1058 1061 struct ixgbe_hw *hw = &ixgbe->hw;
1059 1062 struct ixgbe_osdep *osdep = &ixgbe->osdep;
1060 1063 off_t mem_size;
1061 1064
1062 1065 /*
1063 1066 * First get the size of device registers to be mapped.
1064 1067 */
1065 1068 if (ddi_dev_regsize(devinfo, IXGBE_ADAPTER_REGSET, &mem_size)
1066 1069 != DDI_SUCCESS) {
1067 1070 return (IXGBE_FAILURE);
1068 1071 }
1069 1072
1070 1073 /*
1071 1074 * Call ddi_regs_map_setup() to map registers
1072 1075 */
1073 1076 if ((ddi_regs_map_setup(devinfo, IXGBE_ADAPTER_REGSET,
1074 1077 (caddr_t *)&hw->hw_addr, 0,
1075 1078 mem_size, &ixgbe_regs_acc_attr,
1076 1079 &osdep->reg_handle)) != DDI_SUCCESS) {
1077 1080 return (IXGBE_FAILURE);
1078 1081 }
1079 1082
1080 1083 return (IXGBE_SUCCESS);
1081 1084 }
1082 1085
1083 1086 /*
1084 1087 * ixgbe_init_properties - Initialize driver properties.
1085 1088 */
1086 1089 static void
1087 1090 ixgbe_init_properties(ixgbe_t *ixgbe)
1088 1091 {
1089 1092 /*
1090 1093 * Get conf file properties, including link settings
1091 1094 * jumbo frames, ring number, descriptor number, etc.
1092 1095 */
1093 1096 ixgbe_get_conf(ixgbe);
1094 1097 }
1095 1098
1096 1099 /*
1097 1100 * ixgbe_init_driver_settings - Initialize driver settings.
1098 1101 *
1099 1102 * The settings include hardware function pointers, bus information,
1100 1103 * rx/tx rings settings, link state, and any other parameters that
1101 1104 * need to be setup during driver initialization.
1102 1105 */
1103 1106 static int
1104 1107 ixgbe_init_driver_settings(ixgbe_t *ixgbe)
1105 1108 {
1106 1109 struct ixgbe_hw *hw = &ixgbe->hw;
1107 1110 dev_info_t *devinfo = ixgbe->dip;
1108 1111 ixgbe_rx_ring_t *rx_ring;
1109 1112 ixgbe_rx_group_t *rx_group;
1110 1113 ixgbe_tx_ring_t *tx_ring;
1111 1114 uint32_t rx_size;
1112 1115 uint32_t tx_size;
1113 1116 uint32_t ring_per_group;
1114 1117 int i;
1115 1118
1116 1119 /*
1117 1120 * Initialize chipset specific hardware function pointers
1118 1121 */
1119 1122 if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) {
1120 1123 return (IXGBE_FAILURE);
1121 1124 }
1122 1125
1123 1126 /*
1124 1127 * Get the system page size
1125 1128 */
1126 1129 ixgbe->sys_page_size = ddi_ptob(devinfo, (ulong_t)1);
1127 1130
1128 1131 /*
1129 1132 * Set rx buffer size
1130 1133 *
1131 1134 * The IP header alignment room is counted in the calculation.
1132 1135 * The rx buffer size is in unit of 1K that is required by the
1133 1136 * chipset hardware.
1134 1137 */
1135 1138 rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM;
1136 1139 ixgbe->rx_buf_size = ((rx_size >> 10) +
1137 1140 ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
1138 1141
1139 1142 /*
1140 1143 * Set tx buffer size
1141 1144 */
1142 1145 tx_size = ixgbe->max_frame_size;
1143 1146 ixgbe->tx_buf_size = ((tx_size >> 10) +
1144 1147 ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
1145 1148
1146 1149 /*
1147 1150 * Initialize rx/tx rings/groups parameters
1148 1151 */
1149 1152 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
1150 1153 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1151 1154 rx_ring = &ixgbe->rx_rings[i];
↓ open down ↓ |
1026 lines elided |
↑ open up ↑ |
1152 1155 rx_ring->index = i;
1153 1156 rx_ring->ixgbe = ixgbe;
1154 1157 rx_ring->group_index = i / ring_per_group;
1155 1158 rx_ring->hw_index = ixgbe_get_hw_rx_index(ixgbe, i);
1156 1159 }
1157 1160
1158 1161 for (i = 0; i < ixgbe->num_rx_groups; i++) {
1159 1162 rx_group = &ixgbe->rx_groups[i];
1160 1163 rx_group->index = i;
1161 1164 rx_group->ixgbe = ixgbe;
1165 + list_create(&rx_group->vlans, sizeof (ixgbe_vlan_t),
1166 + offsetof(ixgbe_vlan_t, ixvl_link));
1162 1167 }
1163 1168
1164 1169 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1165 1170 tx_ring = &ixgbe->tx_rings[i];
1166 1171 tx_ring->index = i;
1167 1172 tx_ring->ixgbe = ixgbe;
1168 1173 if (ixgbe->tx_head_wb_enable)
1169 1174 tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb;
1170 1175 else
1171 1176 tx_ring->tx_recycle = ixgbe_tx_recycle_legacy;
1172 1177
1173 1178 tx_ring->ring_size = ixgbe->tx_ring_size;
1174 1179 tx_ring->free_list_size = ixgbe->tx_ring_size +
1175 1180 (ixgbe->tx_ring_size >> 1);
1176 1181 }
1177 1182
1178 1183 /*
1179 1184 * Initialize values of interrupt throttling rate
1180 1185 */
1181 1186 for (i = 1; i < MAX_INTR_VECTOR; i++)
1182 1187 ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0];
1183 1188
1184 1189 /*
1185 1190 * The initial link state should be "unknown"
1186 1191 */
1187 1192 ixgbe->link_state = LINK_STATE_UNKNOWN;
1188 1193
1189 1194 return (IXGBE_SUCCESS);
1190 1195 }
1191 1196
1192 1197 /*
1193 1198 * ixgbe_init_locks - Initialize locks.
1194 1199 */
1195 1200 static void
1196 1201 ixgbe_init_locks(ixgbe_t *ixgbe)
1197 1202 {
1198 1203 ixgbe_rx_ring_t *rx_ring;
1199 1204 ixgbe_tx_ring_t *tx_ring;
1200 1205 int i;
1201 1206
1202 1207 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1203 1208 rx_ring = &ixgbe->rx_rings[i];
1204 1209 mutex_init(&rx_ring->rx_lock, NULL,
1205 1210 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1206 1211 }
1207 1212
1208 1213 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1209 1214 tx_ring = &ixgbe->tx_rings[i];
1210 1215 mutex_init(&tx_ring->tx_lock, NULL,
1211 1216 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1212 1217 mutex_init(&tx_ring->recycle_lock, NULL,
1213 1218 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1214 1219 mutex_init(&tx_ring->tcb_head_lock, NULL,
1215 1220 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1216 1221 mutex_init(&tx_ring->tcb_tail_lock, NULL,
1217 1222 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1218 1223 }
1219 1224
1220 1225 mutex_init(&ixgbe->gen_lock, NULL,
1221 1226 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1222 1227
1223 1228 mutex_init(&ixgbe->watchdog_lock, NULL,
1224 1229 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1225 1230 }
1226 1231
1227 1232 /*
1228 1233 * ixgbe_destroy_locks - Destroy locks.
1229 1234 */
1230 1235 static void
1231 1236 ixgbe_destroy_locks(ixgbe_t *ixgbe)
1232 1237 {
1233 1238 ixgbe_rx_ring_t *rx_ring;
1234 1239 ixgbe_tx_ring_t *tx_ring;
1235 1240 int i;
1236 1241
1237 1242 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1238 1243 rx_ring = &ixgbe->rx_rings[i];
1239 1244 mutex_destroy(&rx_ring->rx_lock);
1240 1245 }
1241 1246
1242 1247 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1243 1248 tx_ring = &ixgbe->tx_rings[i];
1244 1249 mutex_destroy(&tx_ring->tx_lock);
1245 1250 mutex_destroy(&tx_ring->recycle_lock);
1246 1251 mutex_destroy(&tx_ring->tcb_head_lock);
1247 1252 mutex_destroy(&tx_ring->tcb_tail_lock);
1248 1253 }
1249 1254
1250 1255 mutex_destroy(&ixgbe->gen_lock);
1251 1256 mutex_destroy(&ixgbe->watchdog_lock);
1252 1257 }
1253 1258
1254 1259 /*
1255 1260 * We need to try and determine which LED index in hardware corresponds to the
1256 1261 * link/activity LED. This is the one that'll be overwritten when we perform
1257 1262 * GLDv3 LED activity.
1258 1263 */
1259 1264 static void
1260 1265 ixgbe_led_init(ixgbe_t *ixgbe)
1261 1266 {
1262 1267 uint32_t reg, i;
1263 1268 struct ixgbe_hw *hw = &ixgbe->hw;
1264 1269
1265 1270 reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1266 1271 for (i = 0; i < 4; i++) {
1267 1272 if (((reg >> IXGBE_LED_MODE_SHIFT(i)) &
1268 1273 IXGBE_LED_MODE_MASK_BASE) == IXGBE_LED_LINK_ACTIVE) {
1269 1274 ixgbe->ixgbe_led_index = i;
1270 1275 return;
1271 1276 }
1272 1277 }
1273 1278
1274 1279 /*
1275 1280 * If we couldn't determine this, we use the default for various MACs
1276 1281 * based on information Intel has inserted into other drivers over the
1277 1282 * years.
1278 1283 */
1279 1284 switch (hw->mac.type) {
1280 1285 case ixgbe_mac_X550EM_a:
1281 1286 ixgbe->ixgbe_led_index = 0;
1282 1287 break;
1283 1288 case ixgbe_mac_X550EM_x:
1284 1289 ixgbe->ixgbe_led_index = 1;
1285 1290 break;
1286 1291 default:
1287 1292 ixgbe->ixgbe_led_index = 2;
1288 1293 break;
1289 1294 }
1290 1295 }
1291 1296
1292 1297 static int
1293 1298 ixgbe_resume(dev_info_t *devinfo)
1294 1299 {
1295 1300 ixgbe_t *ixgbe;
1296 1301 int i;
1297 1302
1298 1303 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1299 1304 if (ixgbe == NULL)
1300 1305 return (DDI_FAILURE);
1301 1306
1302 1307 mutex_enter(&ixgbe->gen_lock);
1303 1308
1304 1309 if (ixgbe->ixgbe_state & IXGBE_STARTED) {
1305 1310 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1306 1311 mutex_exit(&ixgbe->gen_lock);
1307 1312 return (DDI_FAILURE);
1308 1313 }
1309 1314
1310 1315 /*
1311 1316 * Enable and start the watchdog timer
1312 1317 */
1313 1318 ixgbe_enable_watchdog_timer(ixgbe);
1314 1319 }
1315 1320
1316 1321 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_SUSPENDED);
1317 1322
1318 1323 if (ixgbe->ixgbe_state & IXGBE_STARTED) {
1319 1324 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1320 1325 mac_tx_ring_update(ixgbe->mac_hdl,
1321 1326 ixgbe->tx_rings[i].ring_handle);
1322 1327 }
1323 1328 }
1324 1329
1325 1330 mutex_exit(&ixgbe->gen_lock);
1326 1331
1327 1332 return (DDI_SUCCESS);
1328 1333 }
1329 1334
1330 1335 static int
1331 1336 ixgbe_suspend(dev_info_t *devinfo)
1332 1337 {
1333 1338 ixgbe_t *ixgbe;
1334 1339
1335 1340 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1336 1341 if (ixgbe == NULL)
1337 1342 return (DDI_FAILURE);
1338 1343
1339 1344 mutex_enter(&ixgbe->gen_lock);
1340 1345
1341 1346 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_SUSPENDED);
1342 1347 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) {
1343 1348 mutex_exit(&ixgbe->gen_lock);
1344 1349 return (DDI_SUCCESS);
1345 1350 }
1346 1351 ixgbe_stop(ixgbe, B_FALSE);
1347 1352
1348 1353 mutex_exit(&ixgbe->gen_lock);
1349 1354
1350 1355 /*
1351 1356 * Disable and stop the watchdog timer
1352 1357 */
1353 1358 ixgbe_disable_watchdog_timer(ixgbe);
1354 1359
1355 1360 return (DDI_SUCCESS);
1356 1361 }
1357 1362
1358 1363 /*
1359 1364 * ixgbe_init - Initialize the device.
1360 1365 */
1361 1366 static int
1362 1367 ixgbe_init(ixgbe_t *ixgbe)
1363 1368 {
1364 1369 struct ixgbe_hw *hw = &ixgbe->hw;
1365 1370 u8 pbanum[IXGBE_PBANUM_LENGTH];
1366 1371 int rv;
1367 1372
1368 1373 mutex_enter(&ixgbe->gen_lock);
1369 1374
1370 1375 /*
1371 1376 * Configure/Initialize hardware
1372 1377 */
1373 1378 rv = ixgbe_init_hw(hw);
1374 1379 if (rv != IXGBE_SUCCESS) {
1375 1380 switch (rv) {
1376 1381
1377 1382 /*
1378 1383 * The first three errors are not prohibitive to us progressing
1379 1384 * further, and are maily advisory in nature. In the case of a
1380 1385 * SFP module not being present or not deemed supported by the
1381 1386 * common code, we adivse the operator of this fact but carry on
1382 1387 * instead of failing hard, as SFPs can be inserted or replaced
1383 1388 * while the driver is running. In the case of a unknown error,
1384 1389 * we fail-hard, logging the reason and emitting a FMA event.
1385 1390 */
1386 1391 case IXGBE_ERR_EEPROM_VERSION:
1387 1392 ixgbe_error(ixgbe,
1388 1393 "This Intel 10Gb Ethernet device is pre-release and"
1389 1394 " contains outdated firmware. Please contact your"
1390 1395 " hardware vendor for a replacement.");
1391 1396 break;
1392 1397 case IXGBE_ERR_SFP_NOT_PRESENT:
1393 1398 ixgbe_error(ixgbe,
1394 1399 "No SFP+ module detected on this interface. Please "
1395 1400 "install a supported SFP+ module for this "
1396 1401 "interface to become operational.");
1397 1402 break;
1398 1403 case IXGBE_ERR_SFP_NOT_SUPPORTED:
1399 1404 ixgbe_error(ixgbe,
1400 1405 "Unsupported SFP+ module detected. Please replace "
1401 1406 "it with a supported SFP+ module per Intel "
1402 1407 "documentation, or bypass this check with "
1403 1408 "allow_unsupported_sfp=1 in ixgbe.conf.");
1404 1409 break;
1405 1410 default:
1406 1411 ixgbe_error(ixgbe,
1407 1412 "Failed to initialize hardware. ixgbe_init_hw "
1408 1413 "returned %d", rv);
1409 1414 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1410 1415 goto init_fail;
1411 1416 }
1412 1417 }
1413 1418
1414 1419 /*
1415 1420 * Need to init eeprom before validating the checksum.
1416 1421 */
1417 1422 if (ixgbe_init_eeprom_params(hw) < 0) {
1418 1423 ixgbe_error(ixgbe,
1419 1424 "Unable to intitialize the eeprom interface.");
1420 1425 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1421 1426 goto init_fail;
1422 1427 }
1423 1428
1424 1429 /*
1425 1430 * NVM validation
1426 1431 */
1427 1432 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1428 1433 /*
1429 1434 * Some PCI-E parts fail the first check due to
1430 1435 * the link being in sleep state. Call it again,
1431 1436 * if it fails a second time it's a real issue.
1432 1437 */
1433 1438 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1434 1439 ixgbe_error(ixgbe,
1435 1440 "Invalid NVM checksum. Please contact "
1436 1441 "the vendor to update the NVM.");
1437 1442 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1438 1443 goto init_fail;
1439 1444 }
1440 1445 }
1441 1446
1442 1447 /*
1443 1448 * Setup default flow control thresholds - enable/disable
1444 1449 * & flow control type is controlled by ixgbe.conf
1445 1450 */
1446 1451 hw->fc.high_water[0] = DEFAULT_FCRTH;
1447 1452 hw->fc.low_water[0] = DEFAULT_FCRTL;
1448 1453 hw->fc.pause_time = DEFAULT_FCPAUSE;
1449 1454 hw->fc.send_xon = B_TRUE;
1450 1455
1451 1456 /*
1452 1457 * Initialize flow control
1453 1458 */
1454 1459 (void) ixgbe_start_hw(hw);
1455 1460
1456 1461 /*
1457 1462 * Initialize link settings
1458 1463 */
1459 1464 (void) ixgbe_driver_setup_link(ixgbe, B_FALSE);
1460 1465
1461 1466 /*
1462 1467 * Initialize the chipset hardware
1463 1468 */
1464 1469 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1465 1470 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1466 1471 goto init_fail;
1467 1472 }
1468 1473
1469 1474 /*
1470 1475 * Read identifying information and place in devinfo.
1471 1476 */
1472 1477 pbanum[0] = '\0';
1473 1478 (void) ixgbe_read_pba_string(hw, pbanum, sizeof (pbanum));
1474 1479 if (*pbanum != '\0') {
1475 1480 (void) ddi_prop_update_string(DDI_DEV_T_NONE, ixgbe->dip,
1476 1481 "printed-board-assembly", (char *)pbanum);
1477 1482 }
1478 1483
1479 1484 /*
1480 1485 * Determine LED index.
1481 1486 */
1482 1487 ixgbe_led_init(ixgbe);
1483 1488
1484 1489 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1485 1490 goto init_fail;
1486 1491 }
1487 1492
1488 1493 mutex_exit(&ixgbe->gen_lock);
1489 1494 return (IXGBE_SUCCESS);
1490 1495
1491 1496 init_fail:
1492 1497 /*
1493 1498 * Reset PHY
1494 1499 */
1495 1500 (void) ixgbe_reset_phy(hw);
1496 1501
1497 1502 mutex_exit(&ixgbe->gen_lock);
1498 1503 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1499 1504 return (IXGBE_FAILURE);
1500 1505 }
1501 1506
1502 1507 /*
1503 1508 * ixgbe_chip_start - Initialize and start the chipset hardware.
1504 1509 */
1505 1510 static int
1506 1511 ixgbe_chip_start(ixgbe_t *ixgbe)
1507 1512 {
1508 1513 struct ixgbe_hw *hw = &ixgbe->hw;
1509 1514 int i;
1510 1515
1511 1516 ASSERT(mutex_owned(&ixgbe->gen_lock));
1512 1517
1513 1518 /*
1514 1519 * Get the mac address
1515 1520 * This function should handle SPARC case correctly.
1516 1521 */
1517 1522 if (!ixgbe_find_mac_address(ixgbe)) {
1518 1523 ixgbe_error(ixgbe, "Failed to get the mac address");
1519 1524 return (IXGBE_FAILURE);
1520 1525 }
1521 1526
1522 1527 /*
1523 1528 * Validate the mac address
1524 1529 */
1525 1530 (void) ixgbe_init_rx_addrs(hw);
1526 1531 if (!is_valid_mac_addr(hw->mac.addr)) {
1527 1532 ixgbe_error(ixgbe, "Invalid mac address");
1528 1533 return (IXGBE_FAILURE);
1529 1534 }
1530 1535
1531 1536 /*
1532 1537 * Re-enable relaxed ordering for performance. It is disabled
1533 1538 * by default in the hardware init.
1534 1539 */
1535 1540 if (ixgbe->relax_order_enable == B_TRUE)
1536 1541 ixgbe_enable_relaxed_ordering(hw);
1537 1542
1538 1543 /*
1539 1544 * Setup adapter interrupt vectors
1540 1545 */
1541 1546 ixgbe_setup_adapter_vector(ixgbe);
1542 1547
1543 1548 /*
1544 1549 * Initialize unicast addresses.
1545 1550 */
1546 1551 ixgbe_init_unicst(ixgbe);
1547 1552
1548 1553 /*
1549 1554 * Setup and initialize the mctable structures.
1550 1555 */
1551 1556 ixgbe_setup_multicst(ixgbe);
1552 1557
1553 1558 /*
1554 1559 * Set interrupt throttling rate
1555 1560 */
1556 1561 for (i = 0; i < ixgbe->intr_cnt; i++) {
1557 1562 IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]);
1558 1563 }
1559 1564
1560 1565 /*
1561 1566 * Disable Wake-on-LAN
1562 1567 */
1563 1568 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
1564 1569
1565 1570 /*
1566 1571 * Some adapters offer Energy Efficient Ethernet (EEE) support.
1567 1572 * Due to issues with EEE in e1000g/igb, we disable this by default
1568 1573 * as a precautionary measure.
1569 1574 *
1570 1575 * Currently, this is present on a number of the X550 family parts.
1571 1576 */
1572 1577 (void) ixgbe_setup_eee(hw, B_FALSE);
1573 1578
1574 1579 /*
1575 1580 * Turn on any present SFP Tx laser
1576 1581 */
1577 1582 ixgbe_enable_tx_laser(hw);
1578 1583
1579 1584 /*
1580 1585 * Power on the PHY
1581 1586 */
1582 1587 (void) ixgbe_set_phy_power(hw, B_TRUE);
1583 1588
1584 1589 /*
1585 1590 * Save the state of the PHY
1586 1591 */
1587 1592 ixgbe_get_hw_state(ixgbe);
1588 1593
1589 1594 /*
1590 1595 * Make sure driver has control
1591 1596 */
1592 1597 ixgbe_get_driver_control(hw);
1593 1598
1594 1599 return (IXGBE_SUCCESS);
1595 1600 }
1596 1601
1597 1602 /*
1598 1603 * ixgbe_chip_stop - Stop the chipset hardware
1599 1604 */
1600 1605 static void
1601 1606 ixgbe_chip_stop(ixgbe_t *ixgbe)
1602 1607 {
1603 1608 struct ixgbe_hw *hw = &ixgbe->hw;
1604 1609 int rv;
1605 1610
1606 1611 ASSERT(mutex_owned(&ixgbe->gen_lock));
1607 1612
1608 1613 /*
1609 1614 * Stop interupt generation and disable Tx unit
1610 1615 */
1611 1616 hw->adapter_stopped = B_FALSE;
1612 1617 (void) ixgbe_stop_adapter(hw);
1613 1618
1614 1619 /*
1615 1620 * Reset the chipset
1616 1621 */
1617 1622 (void) ixgbe_reset_hw(hw);
1618 1623
1619 1624 /*
1620 1625 * Reset PHY
1621 1626 */
1622 1627 (void) ixgbe_reset_phy(hw);
1623 1628
1624 1629 /*
1625 1630 * Enter LPLU (Low Power, Link Up) mode, if available. Avoid resetting
1626 1631 * the PHY while doing so. Else, just power down the PHY.
1627 1632 */
1628 1633 if (hw->phy.ops.enter_lplu != NULL) {
1629 1634 hw->phy.reset_disable = B_TRUE;
1630 1635 rv = hw->phy.ops.enter_lplu(hw);
1631 1636 if (rv != IXGBE_SUCCESS)
1632 1637 ixgbe_error(ixgbe, "Error while entering LPLU: %d", rv);
1633 1638 hw->phy.reset_disable = B_FALSE;
1634 1639 } else {
1635 1640 (void) ixgbe_set_phy_power(hw, B_FALSE);
1636 1641 }
1637 1642
1638 1643 /*
1639 1644 * Turn off any present SFP Tx laser
1640 1645 * Expected for health and safety reasons
1641 1646 */
1642 1647 ixgbe_disable_tx_laser(hw);
1643 1648
1644 1649 /*
1645 1650 * Tell firmware driver is no longer in control
1646 1651 */
1647 1652 ixgbe_release_driver_control(hw);
1648 1653
1649 1654 }
1650 1655
1651 1656 /*
1652 1657 * ixgbe_reset - Reset the chipset and re-start the driver.
1653 1658 *
1654 1659 * It involves stopping and re-starting the chipset,
1655 1660 * and re-configuring the rx/tx rings.
1656 1661 */
1657 1662 static int
1658 1663 ixgbe_reset(ixgbe_t *ixgbe)
1659 1664 {
1660 1665 int i;
1661 1666
1662 1667 /*
1663 1668 * Disable and stop the watchdog timer
1664 1669 */
1665 1670 ixgbe_disable_watchdog_timer(ixgbe);
1666 1671
1667 1672 mutex_enter(&ixgbe->gen_lock);
1668 1673
1669 1674 ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED);
1670 1675 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED);
1671 1676
1672 1677 ixgbe_stop(ixgbe, B_FALSE);
1673 1678
1674 1679 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1675 1680 mutex_exit(&ixgbe->gen_lock);
1676 1681 return (IXGBE_FAILURE);
1677 1682 }
1678 1683
1679 1684 /*
1680 1685 * After resetting, need to recheck the link status.
1681 1686 */
1682 1687 ixgbe->link_check_complete = B_FALSE;
1683 1688 ixgbe->link_check_hrtime = gethrtime() +
1684 1689 (IXGBE_LINK_UP_TIME * 100000000ULL);
1685 1690
1686 1691 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STARTED);
1687 1692
1688 1693 if (!(ixgbe->ixgbe_state & IXGBE_SUSPENDED)) {
1689 1694 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1690 1695 mac_tx_ring_update(ixgbe->mac_hdl,
1691 1696 ixgbe->tx_rings[i].ring_handle);
1692 1697 }
1693 1698 }
1694 1699
1695 1700 mutex_exit(&ixgbe->gen_lock);
1696 1701
1697 1702 /*
1698 1703 * Enable and start the watchdog timer
1699 1704 */
1700 1705 ixgbe_enable_watchdog_timer(ixgbe);
1701 1706
1702 1707 return (IXGBE_SUCCESS);
1703 1708 }
1704 1709
1705 1710 /*
1706 1711 * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources.
1707 1712 */
1708 1713 static void
1709 1714 ixgbe_tx_clean(ixgbe_t *ixgbe)
1710 1715 {
1711 1716 ixgbe_tx_ring_t *tx_ring;
1712 1717 tx_control_block_t *tcb;
1713 1718 link_list_t pending_list;
1714 1719 uint32_t desc_num;
1715 1720 int i, j;
1716 1721
1717 1722 LINK_LIST_INIT(&pending_list);
1718 1723
1719 1724 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1720 1725 tx_ring = &ixgbe->tx_rings[i];
1721 1726
1722 1727 mutex_enter(&tx_ring->recycle_lock);
1723 1728
1724 1729 /*
1725 1730 * Clean the pending tx data - the pending packets in the
1726 1731 * work_list that have no chances to be transmitted again.
1727 1732 *
1728 1733 * We must ensure the chipset is stopped or the link is down
1729 1734 * before cleaning the transmit packets.
1730 1735 */
1731 1736 desc_num = 0;
1732 1737 for (j = 0; j < tx_ring->ring_size; j++) {
1733 1738 tcb = tx_ring->work_list[j];
1734 1739 if (tcb != NULL) {
1735 1740 desc_num += tcb->desc_num;
1736 1741
1737 1742 tx_ring->work_list[j] = NULL;
1738 1743
1739 1744 ixgbe_free_tcb(tcb);
1740 1745
1741 1746 LIST_PUSH_TAIL(&pending_list, &tcb->link);
1742 1747 }
1743 1748 }
1744 1749
1745 1750 if (desc_num > 0) {
1746 1751 atomic_add_32(&tx_ring->tbd_free, desc_num);
1747 1752 ASSERT(tx_ring->tbd_free == tx_ring->ring_size);
1748 1753
1749 1754 /*
1750 1755 * Reset the head and tail pointers of the tbd ring;
1751 1756 * Reset the writeback head if it's enable.
1752 1757 */
1753 1758 tx_ring->tbd_head = 0;
1754 1759 tx_ring->tbd_tail = 0;
1755 1760 if (ixgbe->tx_head_wb_enable)
1756 1761 *tx_ring->tbd_head_wb = 0;
1757 1762
1758 1763 IXGBE_WRITE_REG(&ixgbe->hw,
1759 1764 IXGBE_TDH(tx_ring->index), 0);
1760 1765 IXGBE_WRITE_REG(&ixgbe->hw,
1761 1766 IXGBE_TDT(tx_ring->index), 0);
1762 1767 }
1763 1768
1764 1769 mutex_exit(&tx_ring->recycle_lock);
1765 1770
1766 1771 /*
1767 1772 * Add the tx control blocks in the pending list to
1768 1773 * the free list.
1769 1774 */
1770 1775 ixgbe_put_free_list(tx_ring, &pending_list);
1771 1776 }
1772 1777 }
1773 1778
1774 1779 /*
1775 1780 * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be
1776 1781 * transmitted.
1777 1782 */
1778 1783 static boolean_t
1779 1784 ixgbe_tx_drain(ixgbe_t *ixgbe)
1780 1785 {
1781 1786 ixgbe_tx_ring_t *tx_ring;
1782 1787 boolean_t done;
1783 1788 int i, j;
1784 1789
1785 1790 /*
1786 1791 * Wait for a specific time to allow pending tx packets
1787 1792 * to be transmitted.
1788 1793 *
1789 1794 * Check the counter tbd_free to see if transmission is done.
1790 1795 * No lock protection is needed here.
1791 1796 *
1792 1797 * Return B_TRUE if all pending packets have been transmitted;
1793 1798 * Otherwise return B_FALSE;
1794 1799 */
1795 1800 for (i = 0; i < TX_DRAIN_TIME; i++) {
1796 1801
1797 1802 done = B_TRUE;
1798 1803 for (j = 0; j < ixgbe->num_tx_rings; j++) {
1799 1804 tx_ring = &ixgbe->tx_rings[j];
1800 1805 done = done &&
1801 1806 (tx_ring->tbd_free == tx_ring->ring_size);
1802 1807 }
1803 1808
1804 1809 if (done)
1805 1810 break;
1806 1811
1807 1812 msec_delay(1);
1808 1813 }
1809 1814
1810 1815 return (done);
1811 1816 }
1812 1817
1813 1818 /*
1814 1819 * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer.
1815 1820 */
1816 1821 static boolean_t
1817 1822 ixgbe_rx_drain(ixgbe_t *ixgbe)
1818 1823 {
1819 1824 boolean_t done = B_TRUE;
1820 1825 int i;
1821 1826
1822 1827 /*
1823 1828 * Polling the rx free list to check if those rx buffers held by
1824 1829 * the upper layer are released.
1825 1830 *
1826 1831 * Check the counter rcb_free to see if all pending buffers are
1827 1832 * released. No lock protection is needed here.
1828 1833 *
1829 1834 * Return B_TRUE if all pending buffers have been released;
1830 1835 * Otherwise return B_FALSE;
1831 1836 */
1832 1837 for (i = 0; i < RX_DRAIN_TIME; i++) {
1833 1838 done = (ixgbe->rcb_pending == 0);
1834 1839
1835 1840 if (done)
1836 1841 break;
1837 1842
1838 1843 msec_delay(1);
1839 1844 }
1840 1845
1841 1846 return (done);
1842 1847 }
1843 1848
1844 1849 /*
1845 1850 * ixgbe_start - Start the driver/chipset.
1846 1851 */
1847 1852 int
1848 1853 ixgbe_start(ixgbe_t *ixgbe, boolean_t alloc_buffer)
1849 1854 {
1850 1855 struct ixgbe_hw *hw = &ixgbe->hw;
1851 1856 int i;
1852 1857
1853 1858 ASSERT(mutex_owned(&ixgbe->gen_lock));
1854 1859
1855 1860 if (alloc_buffer) {
1856 1861 if (ixgbe_alloc_rx_data(ixgbe) != IXGBE_SUCCESS) {
1857 1862 ixgbe_error(ixgbe,
1858 1863 "Failed to allocate software receive rings");
1859 1864 return (IXGBE_FAILURE);
1860 1865 }
1861 1866
1862 1867 /* Allocate buffers for all the rx/tx rings */
1863 1868 if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS) {
1864 1869 ixgbe_error(ixgbe, "Failed to allocate DMA resource");
1865 1870 return (IXGBE_FAILURE);
1866 1871 }
1867 1872
1868 1873 ixgbe->tx_ring_init = B_TRUE;
1869 1874 } else {
1870 1875 ixgbe->tx_ring_init = B_FALSE;
1871 1876 }
1872 1877
1873 1878 for (i = 0; i < ixgbe->num_rx_rings; i++)
1874 1879 mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1875 1880 for (i = 0; i < ixgbe->num_tx_rings; i++)
1876 1881 mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1877 1882
1878 1883 /*
1879 1884 * Start the chipset hardware
1880 1885 */
1881 1886 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1882 1887 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1883 1888 goto start_failure;
1884 1889 }
1885 1890
1886 1891 /*
1887 1892 * Configure link now for X550
1888 1893 *
1889 1894 * X550 possesses a LPLU (Low-Power Link Up) mode which keeps the
1890 1895 * resting state of the adapter at a 1Gb FDX speed. Prior to the X550,
1891 1896 * the resting state of the link would be the maximum speed that
1892 1897 * autonegotiation will allow (usually 10Gb, infrastructure allowing)
1893 1898 * so we never bothered with explicitly setting the link to 10Gb as it
1894 1899 * would already be at that state on driver attach. With X550, we must
1895 1900 * trigger a re-negotiation of the link in order to switch from a LPLU
1896 1901 * 1Gb link to 10Gb (cable and link partner permitting.)
1897 1902 */
1898 1903 if (hw->mac.type == ixgbe_mac_X550 ||
1899 1904 hw->mac.type == ixgbe_mac_X550EM_a ||
1900 1905 hw->mac.type == ixgbe_mac_X550EM_x) {
1901 1906 (void) ixgbe_driver_setup_link(ixgbe, B_TRUE);
↓ open down ↓ |
730 lines elided |
↑ open up ↑ |
1902 1907 ixgbe_get_hw_state(ixgbe);
1903 1908 }
1904 1909
1905 1910 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1906 1911 goto start_failure;
1907 1912 }
1908 1913
1909 1914 /*
1910 1915 * Setup the rx/tx rings
1911 1916 */
1912 - ixgbe_setup_rings(ixgbe);
1917 + if (ixgbe_setup_rings(ixgbe) != IXGBE_SUCCESS)
1918 + goto start_failure;
1913 1919
1914 1920 /*
1915 1921 * ixgbe_start() will be called when resetting, however if reset
1916 1922 * happens, we need to clear the ERROR, STALL and OVERTEMP flags
1917 1923 * before enabling the interrupts.
1918 1924 */
1919 1925 atomic_and_32(&ixgbe->ixgbe_state, ~(IXGBE_ERROR
1920 1926 | IXGBE_STALL| IXGBE_OVERTEMP));
1921 1927
1922 1928 /*
1923 1929 * Enable adapter interrupts
1924 1930 * The interrupts must be enabled after the driver state is START
1925 1931 */
1926 1932 ixgbe_enable_adapter_interrupts(ixgbe);
1927 1933
1928 1934 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1929 1935 mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1930 1936 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1931 1937 mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1932 1938
1933 1939 return (IXGBE_SUCCESS);
1934 1940
1935 1941 start_failure:
1936 1942 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1937 1943 mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1938 1944 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1939 1945 mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1940 1946
1941 1947 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1942 1948
1943 1949 return (IXGBE_FAILURE);
1944 1950 }
1945 1951
1946 1952 /*
1947 1953 * ixgbe_stop - Stop the driver/chipset.
1948 1954 */
1949 1955 void
1950 1956 ixgbe_stop(ixgbe_t *ixgbe, boolean_t free_buffer)
1951 1957 {
1952 1958 int i;
1953 1959
1954 1960 ASSERT(mutex_owned(&ixgbe->gen_lock));
1955 1961
1956 1962 /*
1957 1963 * Disable the adapter interrupts
1958 1964 */
1959 1965 ixgbe_disable_adapter_interrupts(ixgbe);
1960 1966
1961 1967 /*
1962 1968 * Drain the pending tx packets
1963 1969 */
1964 1970 (void) ixgbe_tx_drain(ixgbe);
1965 1971
1966 1972 for (i = 0; i < ixgbe->num_rx_rings; i++)
1967 1973 mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1968 1974 for (i = 0; i < ixgbe->num_tx_rings; i++)
1969 1975 mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1970 1976
1971 1977 /*
1972 1978 * Stop the chipset hardware
1973 1979 */
1974 1980 ixgbe_chip_stop(ixgbe);
1975 1981
1976 1982 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1977 1983 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1978 1984 }
1979 1985
1980 1986 /*
1981 1987 * Clean the pending tx data/resources
1982 1988 */
1983 1989 ixgbe_tx_clean(ixgbe);
1984 1990
1985 1991 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1986 1992 mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1987 1993 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1988 1994 mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1989 1995
1990 1996 if (ixgbe->link_state == LINK_STATE_UP) {
1991 1997 ixgbe->link_state = LINK_STATE_UNKNOWN;
1992 1998 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
1993 1999 }
1994 2000
1995 2001 if (free_buffer) {
1996 2002 /*
1997 2003 * Release the DMA/memory resources of rx/tx rings
1998 2004 */
1999 2005 ixgbe_free_dma(ixgbe);
2000 2006 ixgbe_free_rx_data(ixgbe);
2001 2007 }
2002 2008 }
2003 2009
2004 2010 /*
2005 2011 * ixgbe_cbfunc - Driver interface for generic DDI callbacks
2006 2012 */
2007 2013 /* ARGSUSED */
2008 2014 static int
2009 2015 ixgbe_cbfunc(dev_info_t *dip, ddi_cb_action_t cbaction, void *cbarg,
2010 2016 void *arg1, void *arg2)
2011 2017 {
2012 2018 ixgbe_t *ixgbe = (ixgbe_t *)arg1;
2013 2019
2014 2020 switch (cbaction) {
2015 2021 /* IRM callback */
2016 2022 int count;
2017 2023 case DDI_CB_INTR_ADD:
2018 2024 case DDI_CB_INTR_REMOVE:
2019 2025 count = (int)(uintptr_t)cbarg;
2020 2026 ASSERT(ixgbe->intr_type == DDI_INTR_TYPE_MSIX);
2021 2027 DTRACE_PROBE2(ixgbe__irm__callback, int, count,
2022 2028 int, ixgbe->intr_cnt);
2023 2029 if (ixgbe_intr_adjust(ixgbe, cbaction, count) !=
2024 2030 DDI_SUCCESS) {
2025 2031 ixgbe_error(ixgbe,
2026 2032 "IRM CB: Failed to adjust interrupts");
2027 2033 goto cb_fail;
2028 2034 }
2029 2035 break;
2030 2036 default:
2031 2037 IXGBE_DEBUGLOG_1(ixgbe, "DDI CB: action 0x%x NOT supported",
2032 2038 cbaction);
2033 2039 return (DDI_ENOTSUP);
2034 2040 }
2035 2041 return (DDI_SUCCESS);
2036 2042 cb_fail:
2037 2043 return (DDI_FAILURE);
2038 2044 }
2039 2045
2040 2046 /*
2041 2047 * ixgbe_intr_adjust - Adjust interrupt to respond to IRM request.
2042 2048 */
2043 2049 static int
2044 2050 ixgbe_intr_adjust(ixgbe_t *ixgbe, ddi_cb_action_t cbaction, int count)
2045 2051 {
2046 2052 int i, rc, actual;
2047 2053
2048 2054 if (count == 0)
2049 2055 return (DDI_SUCCESS);
2050 2056
2051 2057 if ((cbaction == DDI_CB_INTR_ADD &&
2052 2058 ixgbe->intr_cnt + count > ixgbe->intr_cnt_max) ||
2053 2059 (cbaction == DDI_CB_INTR_REMOVE &&
2054 2060 ixgbe->intr_cnt - count < ixgbe->intr_cnt_min))
2055 2061 return (DDI_FAILURE);
2056 2062
2057 2063 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) {
2058 2064 return (DDI_FAILURE);
2059 2065 }
2060 2066
2061 2067 for (i = 0; i < ixgbe->num_rx_rings; i++)
2062 2068 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle, NULL);
2063 2069 for (i = 0; i < ixgbe->num_tx_rings; i++)
2064 2070 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle, NULL);
2065 2071
2066 2072 mutex_enter(&ixgbe->gen_lock);
2067 2073 ixgbe->ixgbe_state &= ~IXGBE_STARTED;
2068 2074 ixgbe->ixgbe_state |= IXGBE_INTR_ADJUST;
2069 2075 ixgbe->ixgbe_state |= IXGBE_SUSPENDED;
2070 2076 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN);
2071 2077
2072 2078 ixgbe_stop(ixgbe, B_FALSE);
2073 2079 /*
2074 2080 * Disable interrupts
2075 2081 */
2076 2082 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
2077 2083 rc = ixgbe_disable_intrs(ixgbe);
2078 2084 ASSERT(rc == IXGBE_SUCCESS);
2079 2085 }
2080 2086 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ENABLE_INTR;
2081 2087
2082 2088 /*
2083 2089 * Remove interrupt handlers
2084 2090 */
2085 2091 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
2086 2092 ixgbe_rem_intr_handlers(ixgbe);
2087 2093 }
2088 2094 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ADD_INTR;
2089 2095
2090 2096 /*
2091 2097 * Clear vect_map
2092 2098 */
2093 2099 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map));
2094 2100 switch (cbaction) {
2095 2101 case DDI_CB_INTR_ADD:
2096 2102 rc = ddi_intr_alloc(ixgbe->dip, ixgbe->htable,
2097 2103 DDI_INTR_TYPE_MSIX, ixgbe->intr_cnt, count, &actual,
2098 2104 DDI_INTR_ALLOC_NORMAL);
2099 2105 if (rc != DDI_SUCCESS || actual != count) {
2100 2106 ixgbe_log(ixgbe, "Adjust interrupts failed."
2101 2107 "return: %d, irm cb size: %d, actual: %d",
2102 2108 rc, count, actual);
2103 2109 goto intr_adjust_fail;
2104 2110 }
2105 2111 ixgbe->intr_cnt += count;
2106 2112 break;
2107 2113
2108 2114 case DDI_CB_INTR_REMOVE:
2109 2115 for (i = ixgbe->intr_cnt - count;
2110 2116 i < ixgbe->intr_cnt; i ++) {
2111 2117 rc = ddi_intr_free(ixgbe->htable[i]);
2112 2118 ixgbe->htable[i] = NULL;
2113 2119 if (rc != DDI_SUCCESS) {
2114 2120 ixgbe_log(ixgbe, "Adjust interrupts failed."
2115 2121 "return: %d, irm cb size: %d, actual: %d",
2116 2122 rc, count, actual);
2117 2123 goto intr_adjust_fail;
2118 2124 }
2119 2125 }
2120 2126 ixgbe->intr_cnt -= count;
2121 2127 break;
2122 2128 }
2123 2129
2124 2130 /*
2125 2131 * Get priority for first vector, assume remaining are all the same
2126 2132 */
2127 2133 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri);
2128 2134 if (rc != DDI_SUCCESS) {
2129 2135 ixgbe_log(ixgbe,
2130 2136 "Get interrupt priority failed: %d", rc);
2131 2137 goto intr_adjust_fail;
2132 2138 }
2133 2139 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap);
2134 2140 if (rc != DDI_SUCCESS) {
2135 2141 ixgbe_log(ixgbe, "Get interrupt cap failed: %d", rc);
2136 2142 goto intr_adjust_fail;
2137 2143 }
2138 2144 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
2139 2145
2140 2146 /*
2141 2147 * Map rings to interrupt vectors
2142 2148 */
2143 2149 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) {
2144 2150 ixgbe_error(ixgbe,
2145 2151 "IRM CB: Failed to map interrupts to vectors");
2146 2152 goto intr_adjust_fail;
2147 2153 }
2148 2154
2149 2155 /*
2150 2156 * Add interrupt handlers
2151 2157 */
2152 2158 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) {
2153 2159 ixgbe_error(ixgbe, "IRM CB: Failed to add interrupt handlers");
2154 2160 goto intr_adjust_fail;
2155 2161 }
2156 2162 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
2157 2163
2158 2164 /*
2159 2165 * Now that mutex locks are initialized, and the chip is also
2160 2166 * initialized, enable interrupts.
2161 2167 */
2162 2168 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
2163 2169 ixgbe_error(ixgbe, "IRM CB: Failed to enable DDI interrupts");
2164 2170 goto intr_adjust_fail;
2165 2171 }
2166 2172 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
2167 2173 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
2168 2174 ixgbe_error(ixgbe, "IRM CB: Failed to start");
2169 2175 goto intr_adjust_fail;
2170 2176 }
2171 2177 ixgbe->ixgbe_state &= ~IXGBE_INTR_ADJUST;
2172 2178 ixgbe->ixgbe_state &= ~IXGBE_SUSPENDED;
2173 2179 ixgbe->ixgbe_state |= IXGBE_STARTED;
2174 2180 mutex_exit(&ixgbe->gen_lock);
2175 2181
2176 2182 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2177 2183 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle,
2178 2184 ixgbe->htable[ixgbe->rx_rings[i].intr_vector]);
2179 2185 }
2180 2186 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2181 2187 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle,
2182 2188 ixgbe->htable[ixgbe->tx_rings[i].intr_vector]);
2183 2189 }
2184 2190
2185 2191 /* Wakeup all Tx rings */
2186 2192 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2187 2193 mac_tx_ring_update(ixgbe->mac_hdl,
2188 2194 ixgbe->tx_rings[i].ring_handle);
2189 2195 }
2190 2196
2191 2197 IXGBE_DEBUGLOG_3(ixgbe,
2192 2198 "IRM CB: interrupts new value: 0x%x(0x%x:0x%x).",
2193 2199 ixgbe->intr_cnt, ixgbe->intr_cnt_min, ixgbe->intr_cnt_max);
2194 2200 return (DDI_SUCCESS);
2195 2201
2196 2202 intr_adjust_fail:
2197 2203 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
2198 2204 mutex_exit(&ixgbe->gen_lock);
2199 2205 return (DDI_FAILURE);
2200 2206 }
2201 2207
2202 2208 /*
2203 2209 * ixgbe_intr_cb_register - Register interrupt callback function.
2204 2210 */
2205 2211 static int
2206 2212 ixgbe_intr_cb_register(ixgbe_t *ixgbe)
2207 2213 {
2208 2214 if (ddi_cb_register(ixgbe->dip, DDI_CB_FLAG_INTR, ixgbe_cbfunc,
2209 2215 ixgbe, NULL, &ixgbe->cb_hdl) != DDI_SUCCESS) {
2210 2216 return (IXGBE_FAILURE);
2211 2217 }
2212 2218 IXGBE_DEBUGLOG_0(ixgbe, "Interrupt callback function registered.");
2213 2219 return (IXGBE_SUCCESS);
2214 2220 }
2215 2221
2216 2222 /*
2217 2223 * ixgbe_alloc_rings - Allocate memory space for rx/tx rings.
2218 2224 */
2219 2225 static int
2220 2226 ixgbe_alloc_rings(ixgbe_t *ixgbe)
2221 2227 {
2222 2228 /*
2223 2229 * Allocate memory space for rx rings
2224 2230 */
2225 2231 ixgbe->rx_rings = kmem_zalloc(
2226 2232 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings,
2227 2233 KM_NOSLEEP);
2228 2234
2229 2235 if (ixgbe->rx_rings == NULL) {
2230 2236 return (IXGBE_FAILURE);
2231 2237 }
2232 2238
2233 2239 /*
2234 2240 * Allocate memory space for tx rings
2235 2241 */
2236 2242 ixgbe->tx_rings = kmem_zalloc(
2237 2243 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings,
2238 2244 KM_NOSLEEP);
2239 2245
2240 2246 if (ixgbe->tx_rings == NULL) {
2241 2247 kmem_free(ixgbe->rx_rings,
2242 2248 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
2243 2249 ixgbe->rx_rings = NULL;
2244 2250 return (IXGBE_FAILURE);
2245 2251 }
2246 2252
2247 2253 /*
2248 2254 * Allocate memory space for rx ring groups
2249 2255 */
2250 2256 ixgbe->rx_groups = kmem_zalloc(
2251 2257 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups,
2252 2258 KM_NOSLEEP);
2253 2259
2254 2260 if (ixgbe->rx_groups == NULL) {
2255 2261 kmem_free(ixgbe->rx_rings,
2256 2262 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
2257 2263 kmem_free(ixgbe->tx_rings,
2258 2264 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
2259 2265 ixgbe->rx_rings = NULL;
2260 2266 ixgbe->tx_rings = NULL;
2261 2267 return (IXGBE_FAILURE);
2262 2268 }
2263 2269
2264 2270 return (IXGBE_SUCCESS);
2265 2271 }
2266 2272
2267 2273 /*
2268 2274 * ixgbe_free_rings - Free the memory space of rx/tx rings.
2269 2275 */
2270 2276 static void
2271 2277 ixgbe_free_rings(ixgbe_t *ixgbe)
2272 2278 {
2273 2279 if (ixgbe->rx_rings != NULL) {
2274 2280 kmem_free(ixgbe->rx_rings,
↓ open down ↓ |
352 lines elided |
↑ open up ↑ |
2275 2281 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
2276 2282 ixgbe->rx_rings = NULL;
2277 2283 }
2278 2284
2279 2285 if (ixgbe->tx_rings != NULL) {
2280 2286 kmem_free(ixgbe->tx_rings,
2281 2287 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
2282 2288 ixgbe->tx_rings = NULL;
2283 2289 }
2284 2290
2291 + for (uint_t i = 0; i < ixgbe->num_rx_groups; i++) {
2292 + ixgbe_vlan_t *vlp;
2293 + ixgbe_rx_group_t *rx_group = &ixgbe->rx_groups[i];
2294 +
2295 + while ((vlp = list_remove_head(&rx_group->vlans)) != NULL)
2296 + kmem_free(vlp, sizeof (ixgbe_vlan_t));
2297 +
2298 + list_destroy(&rx_group->vlans);
2299 + }
2300 +
2285 2301 if (ixgbe->rx_groups != NULL) {
2286 2302 kmem_free(ixgbe->rx_groups,
2287 2303 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups);
2288 2304 ixgbe->rx_groups = NULL;
2289 2305 }
2290 2306 }
2291 2307
2292 2308 static int
2293 2309 ixgbe_alloc_rx_data(ixgbe_t *ixgbe)
2294 2310 {
2295 2311 ixgbe_rx_ring_t *rx_ring;
2296 2312 int i;
2297 2313
2298 2314 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2299 2315 rx_ring = &ixgbe->rx_rings[i];
2300 2316 if (ixgbe_alloc_rx_ring_data(rx_ring) != IXGBE_SUCCESS)
2301 2317 goto alloc_rx_rings_failure;
2302 2318 }
2303 2319 return (IXGBE_SUCCESS);
2304 2320
2305 2321 alloc_rx_rings_failure:
2306 2322 ixgbe_free_rx_data(ixgbe);
2307 2323 return (IXGBE_FAILURE);
2308 2324 }
2309 2325
2310 2326 static void
2311 2327 ixgbe_free_rx_data(ixgbe_t *ixgbe)
2312 2328 {
2313 2329 ixgbe_rx_ring_t *rx_ring;
2314 2330 ixgbe_rx_data_t *rx_data;
2315 2331 int i;
2316 2332
2317 2333 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2318 2334 rx_ring = &ixgbe->rx_rings[i];
2319 2335
2320 2336 mutex_enter(&ixgbe->rx_pending_lock);
2321 2337 rx_data = rx_ring->rx_data;
2322 2338
2323 2339 if (rx_data != NULL) {
2324 2340 rx_data->flag |= IXGBE_RX_STOPPED;
2325 2341
2326 2342 if (rx_data->rcb_pending == 0) {
2327 2343 ixgbe_free_rx_ring_data(rx_data);
2328 2344 rx_ring->rx_data = NULL;
↓ open down ↓ |
34 lines elided |
↑ open up ↑ |
2329 2345 }
2330 2346 }
2331 2347
2332 2348 mutex_exit(&ixgbe->rx_pending_lock);
2333 2349 }
2334 2350 }
2335 2351
2336 2352 /*
2337 2353 * ixgbe_setup_rings - Setup rx/tx rings.
2338 2354 */
2339 -static void
2355 +static int
2340 2356 ixgbe_setup_rings(ixgbe_t *ixgbe)
2341 2357 {
2342 2358 /*
2343 2359 * Setup the rx/tx rings, including the following:
2344 2360 *
2345 2361 * 1. Setup the descriptor ring and the control block buffers;
2346 2362 * 2. Initialize necessary registers for receive/transmit;
2347 2363 * 3. Initialize software pointers/parameters for receive/transmit;
2348 2364 */
2349 - ixgbe_setup_rx(ixgbe);
2365 + if (ixgbe_setup_rx(ixgbe) != IXGBE_SUCCESS)
2366 + return (IXGBE_FAILURE);
2350 2367
2351 2368 ixgbe_setup_tx(ixgbe);
2369 +
2370 + return (IXGBE_SUCCESS);
2352 2371 }
2353 2372
2354 2373 static void
2355 2374 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring)
2356 2375 {
2357 2376 ixgbe_t *ixgbe = rx_ring->ixgbe;
2358 2377 ixgbe_rx_data_t *rx_data = rx_ring->rx_data;
2359 2378 struct ixgbe_hw *hw = &ixgbe->hw;
2360 2379 rx_control_block_t *rcb;
2361 2380 union ixgbe_adv_rx_desc *rbd;
2362 2381 uint32_t size;
2363 2382 uint32_t buf_low;
2364 2383 uint32_t buf_high;
2365 2384 uint32_t reg_val;
2366 2385 int i;
2367 2386
2368 2387 ASSERT(mutex_owned(&rx_ring->rx_lock));
2369 2388 ASSERT(mutex_owned(&ixgbe->gen_lock));
2370 2389
2371 2390 for (i = 0; i < ixgbe->rx_ring_size; i++) {
2372 2391 rcb = rx_data->work_list[i];
2373 2392 rbd = &rx_data->rbd_ring[i];
2374 2393
2375 2394 rbd->read.pkt_addr = rcb->rx_buf.dma_address;
2376 2395 rbd->read.hdr_addr = 0;
2377 2396 }
2378 2397
2379 2398 /*
2380 2399 * Initialize the length register
2381 2400 */
2382 2401 size = rx_data->ring_size * sizeof (union ixgbe_adv_rx_desc);
2383 2402 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->hw_index), size);
2384 2403
2385 2404 /*
2386 2405 * Initialize the base address registers
2387 2406 */
2388 2407 buf_low = (uint32_t)rx_data->rbd_area.dma_address;
2389 2408 buf_high = (uint32_t)(rx_data->rbd_area.dma_address >> 32);
2390 2409 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->hw_index), buf_high);
2391 2410 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->hw_index), buf_low);
2392 2411
2393 2412 /*
2394 2413 * Setup head & tail pointers
2395 2414 */
2396 2415 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->hw_index),
2397 2416 rx_data->ring_size - 1);
2398 2417 IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->hw_index), 0);
2399 2418
2400 2419 rx_data->rbd_next = 0;
2401 2420 rx_data->lro_first = 0;
2402 2421
2403 2422 /*
2404 2423 * Setup the Receive Descriptor Control Register (RXDCTL)
2405 2424 * PTHRESH=32 descriptors (half the internal cache)
2406 2425 * HTHRESH=0 descriptors (to minimize latency on fetch)
2407 2426 * WTHRESH defaults to 1 (writeback each descriptor)
2408 2427 */
2409 2428 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index));
2410 2429 reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */
2411 2430
2412 2431 /* Not a valid value for 82599, X540 or X550 */
2413 2432 if (hw->mac.type == ixgbe_mac_82598EB) {
2414 2433 reg_val |= 0x0020; /* pthresh */
2415 2434 }
2416 2435 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index), reg_val);
2417 2436
2418 2437 if (hw->mac.type == ixgbe_mac_82599EB ||
2419 2438 hw->mac.type == ixgbe_mac_X540 ||
2420 2439 hw->mac.type == ixgbe_mac_X550 ||
2421 2440 hw->mac.type == ixgbe_mac_X550EM_x ||
2422 2441 hw->mac.type == ixgbe_mac_X550EM_a) {
2423 2442 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2424 2443 reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS);
2425 2444 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2426 2445 }
2427 2446
↓ open down ↓ |
66 lines elided |
↑ open up ↑ |
2428 2447 /*
2429 2448 * Setup the Split and Replication Receive Control Register.
2430 2449 * Set the rx buffer size and the advanced descriptor type.
2431 2450 */
2432 2451 reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
2433 2452 IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2434 2453 reg_val |= IXGBE_SRRCTL_DROP_EN;
2435 2454 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val);
2436 2455 }
2437 2456
2438 -static void
2457 +static int
2439 2458 ixgbe_setup_rx(ixgbe_t *ixgbe)
2440 2459 {
2441 2460 ixgbe_rx_ring_t *rx_ring;
2442 2461 struct ixgbe_hw *hw = &ixgbe->hw;
2443 2462 uint32_t reg_val;
2444 2463 uint32_t i;
2445 2464 uint32_t psrtype_rss_bit;
2446 2465
2447 2466 /*
2448 2467 * Ensure that Rx is disabled while setting up
2449 2468 * the Rx unit and Rx descriptor ring(s)
2450 2469 */
2451 2470 ixgbe_disable_rx(hw);
2452 2471
2453 2472 /* PSRTYPE must be configured for 82599 */
2454 2473 if (ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ &&
2455 2474 ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ_RSS) {
2456 2475 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
2457 2476 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR;
2458 2477 reg_val |= IXGBE_PSRTYPE_L2HDR;
2459 2478 reg_val |= 0x80000000;
2460 2479 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), reg_val);
2461 2480 } else {
2462 2481 if (ixgbe->num_rx_groups > 32) {
2463 2482 psrtype_rss_bit = 0x20000000;
2464 2483 } else {
2465 2484 psrtype_rss_bit = 0x40000000;
2466 2485 }
2467 2486 for (i = 0; i < ixgbe->capab->max_rx_grp_num; i++) {
2468 2487 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
2469 2488 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR;
2470 2489 reg_val |= IXGBE_PSRTYPE_L2HDR;
2471 2490 reg_val |= psrtype_rss_bit;
2472 2491 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(i), reg_val);
2473 2492 }
2474 2493 }
2475 2494
2476 2495 /*
2477 2496 * Set filter control in FCTRL to determine types of packets are passed
2478 2497 * up to the driver.
2479 2498 * - Pass broadcast packets.
2480 2499 * - Do not pass flow control pause frames (82598-specific)
2481 2500 */
2482 2501 reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2483 2502 reg_val |= IXGBE_FCTRL_BAM; /* Broadcast Accept Mode */
2484 2503 if (hw->mac.type == ixgbe_mac_82598EB) {
2485 2504 reg_val |= IXGBE_FCTRL_DPF; /* Discard Pause Frames */
2486 2505 }
2487 2506 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val);
2488 2507
2489 2508 /*
2490 2509 * Hardware checksum settings
2491 2510 */
2492 2511 if (ixgbe->rx_hcksum_enable) {
2493 2512 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2494 2513 reg_val |= IXGBE_RXCSUM_IPPCSE; /* IP checksum */
2495 2514 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val);
2496 2515 }
2497 2516
2498 2517 /*
2499 2518 * Setup VMDq and RSS for multiple receive queues
2500 2519 */
2501 2520 switch (ixgbe->classify_mode) {
2502 2521 case IXGBE_CLASSIFY_RSS:
2503 2522 /*
2504 2523 * One group, only RSS is needed when more than
2505 2524 * one ring enabled.
2506 2525 */
2507 2526 ixgbe_setup_rss(ixgbe);
2508 2527 break;
2509 2528
2510 2529 case IXGBE_CLASSIFY_VMDQ:
2511 2530 /*
2512 2531 * Multiple groups, each group has one ring,
2513 2532 * only VMDq is needed.
2514 2533 */
2515 2534 ixgbe_setup_vmdq(ixgbe);
2516 2535 break;
2517 2536
2518 2537 case IXGBE_CLASSIFY_VMDQ_RSS:
2519 2538 /*
2520 2539 * Multiple groups and multiple rings, both
↓ open down ↓ |
72 lines elided |
↑ open up ↑ |
2521 2540 * VMDq and RSS are needed.
2522 2541 */
2523 2542 ixgbe_setup_vmdq_rss(ixgbe);
2524 2543 break;
2525 2544
2526 2545 default:
2527 2546 break;
2528 2547 }
2529 2548
2530 2549 /*
2550 + * Initialize VLAN SW and HW state if VLAN filtering is
2551 + * enabled.
2552 + */
2553 + if (ixgbe->vlft_enabled) {
2554 + if (ixgbe_init_vlan(ixgbe) != IXGBE_SUCCESS)
2555 + return (IXGBE_FAILURE);
2556 + }
2557 +
2558 + /*
2531 2559 * Enable the receive unit. This must be done after filter
2532 2560 * control is set in FCTRL. On 82598, we disable the descriptor monitor.
2533 2561 * 82598 is the only adapter which defines this RXCTRL option.
2534 2562 */
2535 2563 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2536 2564 if (hw->mac.type == ixgbe_mac_82598EB)
2537 2565 reg_val |= IXGBE_RXCTRL_DMBYPS; /* descriptor monitor bypass */
2538 2566 reg_val |= IXGBE_RXCTRL_RXEN;
2539 2567 (void) ixgbe_enable_rx_dma(hw, reg_val);
2540 2568
2541 2569 /*
2542 2570 * ixgbe_setup_rx_ring must be called after configuring RXCTRL
2543 2571 */
2544 2572 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2545 2573 rx_ring = &ixgbe->rx_rings[i];
2546 2574 ixgbe_setup_rx_ring(rx_ring);
2547 2575 }
2548 2576
2549 2577 /*
2550 2578 * The 82598 controller gives us the RNBC (Receive No Buffer
2551 2579 * Count) register to determine the number of frames dropped
2552 2580 * due to no available descriptors on the destination queue.
2553 2581 * However, this register was removed starting with 82599 and
2554 2582 * it was replaced with the RQSMR/QPRDC registers. The nice
2555 2583 * thing about the new registers is that they allow you to map
2556 2584 * groups of queues to specific stat registers. The bad thing
2557 2585 * is there are only 16 slots in the stat registers, so this
2558 2586 * won't work when we have 32 Rx groups. Instead, we map all
2559 2587 * queues to the zero slot of the stat registers, giving us a
2560 2588 * global counter at QPRDC[0] (with the equivalent semantics
2561 2589 * of RNBC). Perhaps future controllers will have more slots
2562 2590 * and we can implement per-group counters.
2563 2591 */
2564 2592 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2565 2593 uint32_t index = ixgbe->rx_rings[i].hw_index;
2566 2594 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(index >> 2), 0);
2567 2595 }
2568 2596
2569 2597 /*
2570 2598 * The Max Frame Size in MHADD/MAXFRS will be internally increased
2571 2599 * by four bytes if the packet has a VLAN field, so includes MTU,
2572 2600 * ethernet header and frame check sequence.
2573 2601 * Register is MAXFRS in 82599.
2574 2602 */
2575 2603 reg_val = IXGBE_READ_REG(hw, IXGBE_MHADD);
2576 2604 reg_val &= ~IXGBE_MHADD_MFS_MASK;
2577 2605 reg_val |= (ixgbe->default_mtu + sizeof (struct ether_header)
2578 2606 + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT;
2579 2607 IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val);
2580 2608
2581 2609 /*
2582 2610 * Setup Jumbo Frame enable bit
2583 2611 */
2584 2612 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2585 2613 if (ixgbe->default_mtu > ETHERMTU)
2586 2614 reg_val |= IXGBE_HLREG0_JUMBOEN;
2587 2615 else
2588 2616 reg_val &= ~IXGBE_HLREG0_JUMBOEN;
2589 2617 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2590 2618
2591 2619 /*
2592 2620 * Setup RSC for multiple receive queues.
2593 2621 */
2594 2622 if (ixgbe->lro_enable) {
2595 2623 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2596 2624 /*
2597 2625 * Make sure rx_buf_size * MAXDESC not greater
2598 2626 * than 65535.
2599 2627 * Intel recommends 4 for MAXDESC field value.
2600 2628 */
2601 2629 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCCTL(i));
2602 2630 reg_val |= IXGBE_RSCCTL_RSCEN;
2603 2631 if (ixgbe->rx_buf_size == IXGBE_PKG_BUF_16k)
2604 2632 reg_val |= IXGBE_RSCCTL_MAXDESC_1;
2605 2633 else
2606 2634 reg_val |= IXGBE_RSCCTL_MAXDESC_4;
2607 2635 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(i), reg_val);
2608 2636 }
2609 2637
2610 2638 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCDBU);
↓ open down ↓ |
70 lines elided |
↑ open up ↑ |
2611 2639 reg_val |= IXGBE_RSCDBU_RSCACKDIS;
2612 2640 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, reg_val);
2613 2641
2614 2642 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2615 2643 reg_val |= IXGBE_RDRXCTL_RSCACKC;
2616 2644 reg_val |= IXGBE_RDRXCTL_FCOE_WRFIX;
2617 2645 reg_val &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2618 2646
2619 2647 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2620 2648 }
2649 +
2650 + return (IXGBE_SUCCESS);
2621 2651 }
2622 2652
2623 2653 static void
2624 2654 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring)
2625 2655 {
2626 2656 ixgbe_t *ixgbe = tx_ring->ixgbe;
2627 2657 struct ixgbe_hw *hw = &ixgbe->hw;
2628 2658 uint32_t size;
2629 2659 uint32_t buf_low;
2630 2660 uint32_t buf_high;
2631 2661 uint32_t reg_val;
2632 2662
2633 2663 ASSERT(mutex_owned(&tx_ring->tx_lock));
2634 2664 ASSERT(mutex_owned(&ixgbe->gen_lock));
2635 2665
2636 2666 /*
2637 2667 * Initialize the length register
2638 2668 */
2639 2669 size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc);
2640 2670 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size);
2641 2671
2642 2672 /*
2643 2673 * Initialize the base address registers
2644 2674 */
2645 2675 buf_low = (uint32_t)tx_ring->tbd_area.dma_address;
2646 2676 buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32);
2647 2677 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low);
2648 2678 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high);
2649 2679
2650 2680 /*
2651 2681 * Setup head & tail pointers
2652 2682 */
2653 2683 IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0);
2654 2684 IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0);
2655 2685
2656 2686 /*
2657 2687 * Setup head write-back
2658 2688 */
2659 2689 if (ixgbe->tx_head_wb_enable) {
2660 2690 /*
2661 2691 * The memory of the head write-back is allocated using
2662 2692 * the extra tbd beyond the tail of the tbd ring.
2663 2693 */
2664 2694 tx_ring->tbd_head_wb = (uint32_t *)
2665 2695 ((uintptr_t)tx_ring->tbd_area.address + size);
2666 2696 *tx_ring->tbd_head_wb = 0;
2667 2697
2668 2698 buf_low = (uint32_t)
2669 2699 (tx_ring->tbd_area.dma_address + size);
2670 2700 buf_high = (uint32_t)
2671 2701 ((tx_ring->tbd_area.dma_address + size) >> 32);
2672 2702
2673 2703 /* Set the head write-back enable bit */
2674 2704 buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
2675 2705
2676 2706 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low);
2677 2707 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high);
2678 2708
2679 2709 /*
2680 2710 * Turn off relaxed ordering for head write back or it will
2681 2711 * cause problems with the tx recycling
2682 2712 */
2683 2713
2684 2714 reg_val = (hw->mac.type == ixgbe_mac_82598EB) ?
2685 2715 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(tx_ring->index)) :
2686 2716 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(tx_ring->index));
2687 2717 reg_val &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2688 2718 if (hw->mac.type == ixgbe_mac_82598EB) {
2689 2719 IXGBE_WRITE_REG(hw,
2690 2720 IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
2691 2721 } else {
2692 2722 IXGBE_WRITE_REG(hw,
2693 2723 IXGBE_DCA_TXCTRL_82599(tx_ring->index), reg_val);
2694 2724 }
2695 2725 } else {
2696 2726 tx_ring->tbd_head_wb = NULL;
2697 2727 }
2698 2728
2699 2729 tx_ring->tbd_head = 0;
2700 2730 tx_ring->tbd_tail = 0;
2701 2731 tx_ring->tbd_free = tx_ring->ring_size;
2702 2732
2703 2733 if (ixgbe->tx_ring_init == B_TRUE) {
2704 2734 tx_ring->tcb_head = 0;
2705 2735 tx_ring->tcb_tail = 0;
2706 2736 tx_ring->tcb_free = tx_ring->free_list_size;
2707 2737 }
2708 2738
2709 2739 /*
2710 2740 * Initialize the s/w context structure
2711 2741 */
2712 2742 bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t));
2713 2743 }
2714 2744
2715 2745 static void
2716 2746 ixgbe_setup_tx(ixgbe_t *ixgbe)
2717 2747 {
2718 2748 struct ixgbe_hw *hw = &ixgbe->hw;
2719 2749 ixgbe_tx_ring_t *tx_ring;
2720 2750 uint32_t reg_val;
2721 2751 int i;
2722 2752
2723 2753 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2724 2754 tx_ring = &ixgbe->tx_rings[i];
2725 2755 ixgbe_setup_tx_ring(tx_ring);
2726 2756 }
2727 2757
2728 2758 /*
2729 2759 * Setup the per-ring statistics mapping. We map all Tx queues
2730 2760 * to slot 0 to stay consistent with Rx.
2731 2761 */
2732 2762 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2733 2763 switch (hw->mac.type) {
2734 2764 case ixgbe_mac_82598EB:
2735 2765 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), 0);
2736 2766 break;
2737 2767
2738 2768 default:
2739 2769 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), 0);
2740 2770 break;
2741 2771 }
2742 2772 }
2743 2773
2744 2774 /*
2745 2775 * Enable CRC appending and TX padding (for short tx frames)
2746 2776 */
2747 2777 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2748 2778 reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN;
2749 2779 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2750 2780
2751 2781 /*
2752 2782 * enable DMA for 82599, X540 and X550 parts
2753 2783 */
2754 2784 if (hw->mac.type == ixgbe_mac_82599EB ||
2755 2785 hw->mac.type == ixgbe_mac_X540 ||
2756 2786 hw->mac.type == ixgbe_mac_X550 ||
2757 2787 hw->mac.type == ixgbe_mac_X550EM_x ||
2758 2788 hw->mac.type == ixgbe_mac_X550EM_a) {
2759 2789 /* DMATXCTL.TE must be set after all Tx config is complete */
2760 2790 reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2761 2791 reg_val |= IXGBE_DMATXCTL_TE;
2762 2792 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val);
2763 2793
2764 2794 /* Disable arbiter to set MTQC */
2765 2795 reg_val = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2766 2796 reg_val |= IXGBE_RTTDCS_ARBDIS;
2767 2797 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val);
2768 2798 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2769 2799 reg_val &= ~IXGBE_RTTDCS_ARBDIS;
2770 2800 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val);
2771 2801 }
2772 2802
2773 2803 /*
2774 2804 * Enabling tx queues ..
2775 2805 * For 82599 must be done after DMATXCTL.TE is set
2776 2806 */
2777 2807 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2778 2808 tx_ring = &ixgbe->tx_rings[i];
2779 2809 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index));
2780 2810 reg_val |= IXGBE_TXDCTL_ENABLE;
2781 2811 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val);
2782 2812 }
2783 2813 }
2784 2814
2785 2815 /*
2786 2816 * ixgbe_setup_rss - Setup receive-side scaling feature.
2787 2817 */
2788 2818 static void
2789 2819 ixgbe_setup_rss(ixgbe_t *ixgbe)
2790 2820 {
2791 2821 struct ixgbe_hw *hw = &ixgbe->hw;
2792 2822 uint32_t mrqc;
2793 2823
2794 2824 /*
2795 2825 * Initialize RETA/ERETA table
2796 2826 */
2797 2827 ixgbe_setup_rss_table(ixgbe);
2798 2828
2799 2829 /*
2800 2830 * Enable RSS & perform hash on these packet types
2801 2831 */
2802 2832 mrqc = IXGBE_MRQC_RSSEN |
2803 2833 IXGBE_MRQC_RSS_FIELD_IPV4 |
2804 2834 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2805 2835 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2806 2836 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2807 2837 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2808 2838 IXGBE_MRQC_RSS_FIELD_IPV6 |
2809 2839 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2810 2840 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2811 2841 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
↓ open down ↓ |
181 lines elided |
↑ open up ↑ |
2812 2842 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2813 2843 }
2814 2844
2815 2845 /*
2816 2846 * ixgbe_setup_vmdq - Setup MAC classification feature
2817 2847 */
2818 2848 static void
2819 2849 ixgbe_setup_vmdq(ixgbe_t *ixgbe)
2820 2850 {
2821 2851 struct ixgbe_hw *hw = &ixgbe->hw;
2822 - uint32_t vmdctl, i, vtctl;
2852 + uint32_t vmdctl, i, vtctl, vlnctl;
2823 2853
2824 2854 /*
2825 2855 * Setup the VMDq Control register, enable VMDq based on
2826 2856 * packet destination MAC address:
2827 2857 */
2828 2858 switch (hw->mac.type) {
2829 2859 case ixgbe_mac_82598EB:
2830 2860 /*
2831 2861 * VMDq Enable = 1;
2832 2862 * VMDq Filter = 0; MAC filtering
2833 2863 * Default VMDq output index = 0;
2834 2864 */
2835 2865 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2836 2866 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2837 2867 break;
2838 2868
2839 2869 case ixgbe_mac_82599EB:
2840 2870 case ixgbe_mac_X540:
2841 2871 case ixgbe_mac_X550:
2842 2872 case ixgbe_mac_X550EM_x:
2843 2873 case ixgbe_mac_X550EM_a:
2844 2874 /*
2845 2875 * Enable VMDq-only.
2846 2876 */
2847 2877 vmdctl = IXGBE_MRQC_VMDQEN;
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
2848 2878 IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl);
2849 2879
2850 2880 for (i = 0; i < hw->mac.num_rar_entries; i++) {
2851 2881 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2852 2882 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2853 2883 }
2854 2884
2855 2885 /*
2856 2886 * Enable Virtualization and Replication.
2857 2887 */
2858 - vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2888 + vtctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2889 + ixgbe->rx_def_group = vtctl & IXGBE_VT_CTL_POOL_MASK;
2890 + vtctl |= IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2859 2891 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2860 2892
2861 2893 /*
2894 + * Enable VLAN filtering and switching (VFTA and VLVF).
2895 + */
2896 + vlnctl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2897 + vlnctl |= IXGBE_VLNCTRL_VFE;
2898 + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctl);
2899 + ixgbe->vlft_enabled = B_TRUE;
2900 +
2901 + /*
2862 2902 * Enable receiving packets to all VFs
2863 2903 */
2864 2904 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2865 2905 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2866 2906 break;
2867 2907
2868 2908 default:
2869 2909 break;
2870 2910 }
2871 2911 }
2872 2912
2873 2913 /*
2874 2914 * ixgbe_setup_vmdq_rss - Setup both vmdq feature and rss feature.
2875 2915 */
2876 2916 static void
2877 2917 ixgbe_setup_vmdq_rss(ixgbe_t *ixgbe)
2878 2918 {
2879 2919 struct ixgbe_hw *hw = &ixgbe->hw;
2880 2920 uint32_t i, mrqc;
2881 - uint32_t vtctl, vmdctl;
2921 + uint32_t vtctl, vmdctl, vlnctl;
2882 2922
2883 2923 /*
2884 2924 * Initialize RETA/ERETA table
2885 2925 */
2886 2926 ixgbe_setup_rss_table(ixgbe);
2887 2927
2888 2928 /*
2889 2929 * Enable and setup RSS and VMDq
2890 2930 */
2891 2931 switch (hw->mac.type) {
2892 2932 case ixgbe_mac_82598EB:
2893 2933 /*
2894 2934 * Enable RSS & Setup RSS Hash functions
2895 2935 */
2896 2936 mrqc = IXGBE_MRQC_RSSEN |
2897 2937 IXGBE_MRQC_RSS_FIELD_IPV4 |
2898 2938 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2899 2939 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2900 2940 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2901 2941 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2902 2942 IXGBE_MRQC_RSS_FIELD_IPV6 |
2903 2943 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2904 2944 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2905 2945 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2906 2946 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2907 2947
2908 2948 /*
2909 2949 * Enable and Setup VMDq
2910 2950 * VMDq Filter = 0; MAC filtering
2911 2951 * Default VMDq output index = 0;
2912 2952 */
2913 2953 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2914 2954 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2915 2955 break;
2916 2956
2917 2957 case ixgbe_mac_82599EB:
2918 2958 case ixgbe_mac_X540:
2919 2959 case ixgbe_mac_X550:
2920 2960 case ixgbe_mac_X550EM_x:
2921 2961 case ixgbe_mac_X550EM_a:
2922 2962 /*
2923 2963 * Enable RSS & Setup RSS Hash functions
2924 2964 */
2925 2965 mrqc = IXGBE_MRQC_RSS_FIELD_IPV4 |
2926 2966 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2927 2967 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2928 2968 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2929 2969 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2930 2970 IXGBE_MRQC_RSS_FIELD_IPV6 |
2931 2971 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2932 2972 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2933 2973 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2934 2974
2935 2975 /*
2936 2976 * Enable VMDq+RSS.
2937 2977 */
2938 2978 if (ixgbe->num_rx_groups > 32) {
2939 2979 mrqc = mrqc | IXGBE_MRQC_VMDQRSS64EN;
2940 2980 } else {
2941 2981 mrqc = mrqc | IXGBE_MRQC_VMDQRSS32EN;
2942 2982 }
2943 2983
2944 2984 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2945 2985
2946 2986 for (i = 0; i < hw->mac.num_rar_entries; i++) {
2947 2987 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2948 2988 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2949 2989 }
2950 2990 break;
2951 2991
2952 2992 default:
2953 2993 break;
2954 2994
↓ open down ↓ |
63 lines elided |
↑ open up ↑ |
2955 2995 }
2956 2996
2957 2997 if (hw->mac.type == ixgbe_mac_82599EB ||
2958 2998 hw->mac.type == ixgbe_mac_X540 ||
2959 2999 hw->mac.type == ixgbe_mac_X550 ||
2960 3000 hw->mac.type == ixgbe_mac_X550EM_x ||
2961 3001 hw->mac.type == ixgbe_mac_X550EM_a) {
2962 3002 /*
2963 3003 * Enable Virtualization and Replication.
2964 3004 */
3005 + vtctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3006 + ixgbe->rx_def_group = vtctl & IXGBE_VT_CTL_POOL_MASK;
3007 + vtctl |= IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2965 3008 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2966 3009 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2967 3010
2968 3011 /*
3012 + * Enable VLAN filtering and switching (VFTA and VLVF).
3013 + */
3014 + vlnctl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3015 + vlnctl |= IXGBE_VLNCTRL_VFE;
3016 + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctl);
3017 + ixgbe->vlft_enabled = B_TRUE;
3018 +
3019 + /*
2969 3020 * Enable receiving packets to all VFs
2970 3021 */
2971 3022 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2972 3023 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2973 3024 }
2974 3025 }
2975 3026
2976 3027 /*
2977 3028 * ixgbe_setup_rss_table - Setup RSS table
2978 3029 */
2979 3030 static void
2980 3031 ixgbe_setup_rss_table(ixgbe_t *ixgbe)
2981 3032 {
2982 3033 struct ixgbe_hw *hw = &ixgbe->hw;
2983 3034 uint32_t i, j;
2984 3035 uint32_t random;
2985 3036 uint32_t reta;
2986 3037 uint32_t ring_per_group;
2987 3038 uint32_t ring;
2988 3039 uint32_t table_size;
2989 3040 uint32_t index_mult;
2990 3041 uint32_t rxcsum;
2991 3042
2992 3043 /*
2993 3044 * Set multiplier for RETA setup and table size based on MAC type.
2994 3045 * RETA table sizes vary by model:
2995 3046 *
2996 3047 * 82598, 82599, X540: 128 table entries.
2997 3048 * X550: 512 table entries.
2998 3049 */
2999 3050 index_mult = 0x1;
3000 3051 table_size = 128;
3001 3052 switch (ixgbe->hw.mac.type) {
3002 3053 case ixgbe_mac_82598EB:
3003 3054 index_mult = 0x11;
3004 3055 break;
3005 3056 case ixgbe_mac_X550:
3006 3057 case ixgbe_mac_X550EM_x:
3007 3058 case ixgbe_mac_X550EM_a:
3008 3059 table_size = 512;
3009 3060 break;
3010 3061 default:
3011 3062 break;
3012 3063 }
3013 3064
3014 3065 /*
3015 3066 * Fill out RSS redirection table. The configuation of the indices is
3016 3067 * hardware-dependent.
3017 3068 *
3018 3069 * 82598: 8 bits wide containing two 4 bit RSS indices
3019 3070 * 82599, X540: 8 bits wide containing one 4 bit RSS index
3020 3071 * X550: 8 bits wide containing one 6 bit RSS index
3021 3072 */
3022 3073 reta = 0;
3023 3074 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
3024 3075
3025 3076 for (i = 0, j = 0; i < table_size; i++, j++) {
3026 3077 if (j == ring_per_group) j = 0;
3027 3078
3028 3079 /*
3029 3080 * The low 8 bits are for hash value (n+0);
3030 3081 * The next 8 bits are for hash value (n+1), etc.
3031 3082 */
3032 3083 ring = (j * index_mult);
3033 3084 reta = reta >> 8;
3034 3085 reta = reta | (((uint32_t)ring) << 24);
3035 3086
3036 3087 if ((i & 3) == 3) {
3037 3088 /*
3038 3089 * The first 128 table entries are programmed into the
3039 3090 * RETA register, with any beyond that (eg; on X550)
3040 3091 * into ERETA.
3041 3092 */
3042 3093 if (i < 128)
3043 3094 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3044 3095 else
3045 3096 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
3046 3097 reta);
3047 3098 reta = 0;
3048 3099 }
3049 3100 }
3050 3101
3051 3102 /*
3052 3103 * Fill out hash function seeds with a random constant
3053 3104 */
3054 3105 for (i = 0; i < 10; i++) {
3055 3106 (void) random_get_pseudo_bytes((uint8_t *)&random,
3056 3107 sizeof (uint32_t));
3057 3108 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random);
3058 3109 }
3059 3110
3060 3111 /*
3061 3112 * Disable Packet Checksum to enable RSS for multiple receive queues.
3062 3113 * It is an adapter hardware limitation that Packet Checksum is
3063 3114 * mutually exclusive with RSS.
3064 3115 */
3065 3116 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3066 3117 rxcsum |= IXGBE_RXCSUM_PCSD;
3067 3118 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
3068 3119 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3069 3120 }
3070 3121
3071 3122 /*
3072 3123 * ixgbe_init_unicst - Initialize the unicast addresses.
3073 3124 */
3074 3125 static void
3075 3126 ixgbe_init_unicst(ixgbe_t *ixgbe)
3076 3127 {
3077 3128 struct ixgbe_hw *hw = &ixgbe->hw;
3078 3129 uint8_t *mac_addr;
3079 3130 int slot;
3080 3131 /*
3081 3132 * Here we should consider two situations:
3082 3133 *
3083 3134 * 1. Chipset is initialized at the first time,
3084 3135 * Clear all the multiple unicast addresses.
3085 3136 *
3086 3137 * 2. Chipset is reset
3087 3138 * Recover the multiple unicast addresses from the
3088 3139 * software data structure to the RAR registers.
3089 3140 */
3090 3141 if (!ixgbe->unicst_init) {
3091 3142 /*
3092 3143 * Initialize the multiple unicast addresses
3093 3144 */
3094 3145 ixgbe->unicst_total = hw->mac.num_rar_entries;
3095 3146 ixgbe->unicst_avail = ixgbe->unicst_total;
3096 3147 for (slot = 0; slot < ixgbe->unicst_total; slot++) {
3097 3148 mac_addr = ixgbe->unicst_addr[slot].mac.addr;
3098 3149 bzero(mac_addr, ETHERADDRL);
3099 3150 (void) ixgbe_set_rar(hw, slot, mac_addr, 0, 0);
3100 3151 ixgbe->unicst_addr[slot].mac.set = 0;
3101 3152 }
3102 3153 ixgbe->unicst_init = B_TRUE;
3103 3154 } else {
3104 3155 /* Re-configure the RAR registers */
3105 3156 for (slot = 0; slot < ixgbe->unicst_total; slot++) {
3106 3157 mac_addr = ixgbe->unicst_addr[slot].mac.addr;
3107 3158 if (ixgbe->unicst_addr[slot].mac.set == 1) {
3108 3159 (void) ixgbe_set_rar(hw, slot, mac_addr,
3109 3160 ixgbe->unicst_addr[slot].mac.group_index,
3110 3161 IXGBE_RAH_AV);
3111 3162 } else {
3112 3163 bzero(mac_addr, ETHERADDRL);
3113 3164 (void) ixgbe_set_rar(hw, slot, mac_addr, 0, 0);
3114 3165 }
3115 3166 }
3116 3167 }
3117 3168 }
3118 3169
3119 3170 /*
3120 3171 * ixgbe_unicst_find - Find the slot for the specified unicast address
3121 3172 */
3122 3173 int
3123 3174 ixgbe_unicst_find(ixgbe_t *ixgbe, const uint8_t *mac_addr)
3124 3175 {
3125 3176 int slot;
3126 3177
3127 3178 ASSERT(mutex_owned(&ixgbe->gen_lock));
3128 3179
↓ open down ↓ |
150 lines elided |
↑ open up ↑ |
3129 3180 for (slot = 0; slot < ixgbe->unicst_total; slot++) {
3130 3181 if (bcmp(ixgbe->unicst_addr[slot].mac.addr,
3131 3182 mac_addr, ETHERADDRL) == 0)
3132 3183 return (slot);
3133 3184 }
3134 3185
3135 3186 return (-1);
3136 3187 }
3137 3188
3138 3189 /*
3190 + * Restore the HW state to match the SW state during restart.
3191 + */
3192 +static int
3193 +ixgbe_init_vlan(ixgbe_t *ixgbe)
3194 +{
3195 + /*
3196 + * The device is starting for the first time; there is nothing
3197 + * to do.
3198 + */
3199 + if (!ixgbe->vlft_init) {
3200 + ixgbe->vlft_init = B_TRUE;
3201 + return (IXGBE_SUCCESS);
3202 + }
3203 +
3204 + for (uint_t i = 0; i < ixgbe->num_rx_groups; i++) {
3205 + int ret;
3206 + boolean_t vlvf_bypass;
3207 + ixgbe_rx_group_t *rxg = &ixgbe->rx_groups[i];
3208 + struct ixgbe_hw *hw = &ixgbe->hw;
3209 +
3210 + if (rxg->aupe) {
3211 + uint32_t vml2flt;
3212 +
3213 + vml2flt = IXGBE_READ_REG(hw, IXGBE_VMOLR(rxg->index));
3214 + vml2flt |= IXGBE_VMOLR_AUPE;
3215 + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(rxg->index), vml2flt);
3216 + }
3217 +
3218 + vlvf_bypass = (rxg->index == ixgbe->rx_def_group);
3219 + for (ixgbe_vlan_t *vlp = list_head(&rxg->vlans); vlp != NULL;
3220 + vlp = list_next(&rxg->vlans, vlp)) {
3221 + ret = ixgbe_set_vfta(hw, vlp->ixvl_vid, rxg->index,
3222 + B_TRUE, vlvf_bypass);
3223 +
3224 + if (ret != IXGBE_SUCCESS) {
3225 + ixgbe_error(ixgbe, "Failed to program VFTA"
3226 + " for group %u, VID: %u, ret: %d.",
3227 + rxg->index, vlp->ixvl_vid, ret);
3228 + return (IXGBE_FAILURE);
3229 + }
3230 + }
3231 + }
3232 +
3233 + return (IXGBE_SUCCESS);
3234 +}
3235 +
3236 +/*
3139 3237 * ixgbe_multicst_add - Add a multicst address.
3140 3238 */
3141 3239 int
3142 3240 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr)
3143 3241 {
3144 3242 ASSERT(mutex_owned(&ixgbe->gen_lock));
3145 3243
3146 3244 if ((multiaddr[0] & 01) == 0) {
3147 3245 return (EINVAL);
3148 3246 }
3149 3247
3150 3248 if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) {
3151 3249 return (ENOENT);
3152 3250 }
3153 3251
3154 3252 bcopy(multiaddr,
3155 3253 &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL);
3156 3254 ixgbe->mcast_count++;
3157 3255
3158 3256 /*
3159 3257 * Update the multicast table in the hardware
3160 3258 */
3161 3259 ixgbe_setup_multicst(ixgbe);
3162 3260
3163 3261 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
3164 3262 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3165 3263 return (EIO);
3166 3264 }
3167 3265
3168 3266 return (0);
3169 3267 }
3170 3268
3171 3269 /*
3172 3270 * ixgbe_multicst_remove - Remove a multicst address.
3173 3271 */
3174 3272 int
3175 3273 ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr)
3176 3274 {
3177 3275 int i;
3178 3276
3179 3277 ASSERT(mutex_owned(&ixgbe->gen_lock));
3180 3278
3181 3279 for (i = 0; i < ixgbe->mcast_count; i++) {
3182 3280 if (bcmp(multiaddr, &ixgbe->mcast_table[i],
3183 3281 ETHERADDRL) == 0) {
3184 3282 for (i++; i < ixgbe->mcast_count; i++) {
3185 3283 ixgbe->mcast_table[i - 1] =
3186 3284 ixgbe->mcast_table[i];
3187 3285 }
3188 3286 ixgbe->mcast_count--;
3189 3287 break;
3190 3288 }
3191 3289 }
3192 3290
3193 3291 /*
3194 3292 * Update the multicast table in the hardware
3195 3293 */
3196 3294 ixgbe_setup_multicst(ixgbe);
3197 3295
3198 3296 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
3199 3297 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3200 3298 return (EIO);
3201 3299 }
3202 3300
3203 3301 return (0);
3204 3302 }
3205 3303
3206 3304 /*
3207 3305 * ixgbe_setup_multicast - Setup multicast data structures.
3208 3306 *
3209 3307 * This routine initializes all of the multicast related structures
3210 3308 * and save them in the hardware registers.
3211 3309 */
3212 3310 static void
3213 3311 ixgbe_setup_multicst(ixgbe_t *ixgbe)
3214 3312 {
3215 3313 uint8_t *mc_addr_list;
3216 3314 uint32_t mc_addr_count;
3217 3315 struct ixgbe_hw *hw = &ixgbe->hw;
3218 3316
3219 3317 ASSERT(mutex_owned(&ixgbe->gen_lock));
3220 3318
3221 3319 ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES);
3222 3320
3223 3321 mc_addr_list = (uint8_t *)ixgbe->mcast_table;
3224 3322 mc_addr_count = ixgbe->mcast_count;
3225 3323
3226 3324 /*
3227 3325 * Update the multicast addresses to the MTA registers
3228 3326 */
3229 3327 (void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
3230 3328 ixgbe_mc_table_itr, TRUE);
3231 3329 }
3232 3330
3233 3331 /*
3234 3332 * ixgbe_setup_vmdq_rss_conf - Configure vmdq and rss (number and mode).
3235 3333 *
3236 3334 * Configure the rx classification mode (vmdq & rss) and vmdq & rss numbers.
3237 3335 * Different chipsets may have different allowed configuration of vmdq and rss.
3238 3336 */
3239 3337 static void
3240 3338 ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe)
3241 3339 {
3242 3340 struct ixgbe_hw *hw = &ixgbe->hw;
3243 3341 uint32_t ring_per_group;
3244 3342
3245 3343 switch (hw->mac.type) {
3246 3344 case ixgbe_mac_82598EB:
3247 3345 /*
3248 3346 * 82598 supports the following combination:
3249 3347 * vmdq no. x rss no.
3250 3348 * [5..16] x 1
3251 3349 * [1..4] x [1..16]
3252 3350 * However 8 rss queue per pool (vmdq) is sufficient for
3253 3351 * most cases.
3254 3352 */
3255 3353 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
3256 3354 if (ixgbe->num_rx_groups > 4) {
3257 3355 ixgbe->num_rx_rings = ixgbe->num_rx_groups;
3258 3356 } else {
3259 3357 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
3260 3358 min(8, ring_per_group);
3261 3359 }
3262 3360
3263 3361 break;
3264 3362
3265 3363 case ixgbe_mac_82599EB:
3266 3364 case ixgbe_mac_X540:
3267 3365 case ixgbe_mac_X550:
3268 3366 case ixgbe_mac_X550EM_x:
3269 3367 case ixgbe_mac_X550EM_a:
3270 3368 /*
3271 3369 * 82599 supports the following combination:
3272 3370 * vmdq no. x rss no.
3273 3371 * [33..64] x [1..2]
3274 3372 * [2..32] x [1..4]
3275 3373 * 1 x [1..16]
3276 3374 * However 8 rss queue per pool (vmdq) is sufficient for
3277 3375 * most cases.
3278 3376 *
3279 3377 * For now, treat X540 and X550 like the 82599.
3280 3378 */
3281 3379 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
3282 3380 if (ixgbe->num_rx_groups == 1) {
3283 3381 ixgbe->num_rx_rings = min(8, ring_per_group);
3284 3382 } else if (ixgbe->num_rx_groups <= 32) {
3285 3383 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
3286 3384 min(4, ring_per_group);
3287 3385 } else if (ixgbe->num_rx_groups <= 64) {
3288 3386 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
3289 3387 min(2, ring_per_group);
3290 3388 }
3291 3389 break;
3292 3390
3293 3391 default:
3294 3392 break;
3295 3393 }
3296 3394
3297 3395 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
3298 3396
3299 3397 if (ixgbe->num_rx_groups == 1 && ring_per_group == 1) {
3300 3398 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
3301 3399 } else if (ixgbe->num_rx_groups != 1 && ring_per_group == 1) {
3302 3400 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ;
3303 3401 } else if (ixgbe->num_rx_groups != 1 && ring_per_group != 1) {
3304 3402 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ_RSS;
3305 3403 } else {
3306 3404 ixgbe->classify_mode = IXGBE_CLASSIFY_RSS;
3307 3405 }
3308 3406
3309 3407 IXGBE_DEBUGLOG_2(ixgbe, "rx group number:%d, rx ring number:%d",
3310 3408 ixgbe->num_rx_groups, ixgbe->num_rx_rings);
3311 3409 }
3312 3410
3313 3411 /*
3314 3412 * ixgbe_get_conf - Get driver configurations set in driver.conf.
3315 3413 *
3316 3414 * This routine gets user-configured values out of the configuration
3317 3415 * file ixgbe.conf.
3318 3416 *
3319 3417 * For each configurable value, there is a minimum, a maximum, and a
3320 3418 * default.
3321 3419 * If user does not configure a value, use the default.
3322 3420 * If user configures below the minimum, use the minumum.
3323 3421 * If user configures above the maximum, use the maxumum.
3324 3422 */
3325 3423 static void
3326 3424 ixgbe_get_conf(ixgbe_t *ixgbe)
3327 3425 {
3328 3426 struct ixgbe_hw *hw = &ixgbe->hw;
3329 3427 uint32_t flow_control;
3330 3428
3331 3429 /*
3332 3430 * ixgbe driver supports the following user configurations:
3333 3431 *
3334 3432 * Jumbo frame configuration:
3335 3433 * default_mtu
3336 3434 *
3337 3435 * Ethernet flow control configuration:
3338 3436 * flow_control
3339 3437 *
3340 3438 * Multiple rings configurations:
3341 3439 * tx_queue_number
3342 3440 * tx_ring_size
3343 3441 * rx_queue_number
3344 3442 * rx_ring_size
3345 3443 *
3346 3444 * Call ixgbe_get_prop() to get the value for a specific
3347 3445 * configuration parameter.
3348 3446 */
3349 3447
3350 3448 /*
3351 3449 * Jumbo frame configuration - max_frame_size controls host buffer
3352 3450 * allocation, so includes MTU, ethernet header, vlan tag and
3353 3451 * frame check sequence.
3354 3452 */
3355 3453 ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU,
3356 3454 MIN_MTU, ixgbe->capab->max_mtu, DEFAULT_MTU);
3357 3455
3358 3456 ixgbe->max_frame_size = ixgbe->default_mtu +
3359 3457 sizeof (struct ether_vlan_header) + ETHERFCSL;
3360 3458
3361 3459 /*
3362 3460 * Ethernet flow control configuration
3363 3461 */
3364 3462 flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL,
3365 3463 ixgbe_fc_none, 3, ixgbe_fc_none);
3366 3464 if (flow_control == 3)
3367 3465 flow_control = ixgbe_fc_default;
3368 3466
3369 3467 /*
3370 3468 * fc.requested mode is what the user requests. After autoneg,
3371 3469 * fc.current_mode will be the flow_control mode that was negotiated.
3372 3470 */
3373 3471 hw->fc.requested_mode = flow_control;
3374 3472
3375 3473 /*
3376 3474 * Multiple rings configurations
3377 3475 */
3378 3476 ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM,
3379 3477 ixgbe->capab->min_tx_que_num,
3380 3478 ixgbe->capab->max_tx_que_num,
3381 3479 ixgbe->capab->def_tx_que_num);
3382 3480 ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE,
3383 3481 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE);
3384 3482
3385 3483 ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM,
3386 3484 ixgbe->capab->min_rx_que_num,
3387 3485 ixgbe->capab->max_rx_que_num,
3388 3486 ixgbe->capab->def_rx_que_num);
3389 3487 ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE,
3390 3488 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE);
3391 3489
3392 3490 /*
3393 3491 * Multiple groups configuration
3394 3492 */
3395 3493 ixgbe->num_rx_groups = ixgbe_get_prop(ixgbe, PROP_RX_GROUP_NUM,
3396 3494 ixgbe->capab->min_rx_grp_num, ixgbe->capab->max_rx_grp_num,
3397 3495 ixgbe->capab->def_rx_grp_num);
3398 3496
3399 3497 ixgbe->mr_enable = ixgbe_get_prop(ixgbe, PROP_MR_ENABLE,
3400 3498 0, 1, DEFAULT_MR_ENABLE);
3401 3499
3402 3500 if (ixgbe->mr_enable == B_FALSE) {
3403 3501 ixgbe->num_tx_rings = 1;
3404 3502 ixgbe->num_rx_rings = 1;
3405 3503 ixgbe->num_rx_groups = 1;
3406 3504 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
3407 3505 } else {
3408 3506 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
3409 3507 max(ixgbe->num_rx_rings / ixgbe->num_rx_groups, 1);
3410 3508 /*
3411 3509 * The combination of num_rx_rings and num_rx_groups
3412 3510 * may be not supported by h/w. We need to adjust
3413 3511 * them to appropriate values.
3414 3512 */
3415 3513 ixgbe_setup_vmdq_rss_conf(ixgbe);
3416 3514 }
3417 3515
3418 3516 /*
3419 3517 * Tunable used to force an interrupt type. The only use is
3420 3518 * for testing of the lesser interrupt types.
3421 3519 * 0 = don't force interrupt type
3422 3520 * 1 = force interrupt type MSI-X
3423 3521 * 2 = force interrupt type MSI
3424 3522 * 3 = force interrupt type Legacy
3425 3523 */
3426 3524 ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE,
3427 3525 IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE);
3428 3526
3429 3527 ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE,
3430 3528 0, 1, DEFAULT_TX_HCKSUM_ENABLE);
3431 3529 ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE,
3432 3530 0, 1, DEFAULT_RX_HCKSUM_ENABLE);
3433 3531 ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE,
3434 3532 0, 1, DEFAULT_LSO_ENABLE);
3435 3533 ixgbe->lro_enable = ixgbe_get_prop(ixgbe, PROP_LRO_ENABLE,
3436 3534 0, 1, DEFAULT_LRO_ENABLE);
3437 3535 ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE,
3438 3536 0, 1, DEFAULT_TX_HEAD_WB_ENABLE);
3439 3537 ixgbe->relax_order_enable = ixgbe_get_prop(ixgbe,
3440 3538 PROP_RELAX_ORDER_ENABLE, 0, 1, DEFAULT_RELAX_ORDER_ENABLE);
3441 3539
3442 3540 /* Head Write Back not recommended for 82599, X540 and X550 */
3443 3541 if (hw->mac.type == ixgbe_mac_82599EB ||
3444 3542 hw->mac.type == ixgbe_mac_X540 ||
3445 3543 hw->mac.type == ixgbe_mac_X550 ||
3446 3544 hw->mac.type == ixgbe_mac_X550EM_x ||
3447 3545 hw->mac.type == ixgbe_mac_X550EM_a) {
3448 3546 ixgbe->tx_head_wb_enable = B_FALSE;
3449 3547 }
3450 3548
3451 3549 /*
3452 3550 * ixgbe LSO needs the tx h/w checksum support.
3453 3551 * LSO will be disabled if tx h/w checksum is not
3454 3552 * enabled.
3455 3553 */
3456 3554 if (ixgbe->tx_hcksum_enable == B_FALSE) {
3457 3555 ixgbe->lso_enable = B_FALSE;
3458 3556 }
3459 3557
3460 3558 /*
3461 3559 * ixgbe LRO needs the rx h/w checksum support.
3462 3560 * LRO will be disabled if rx h/w checksum is not
3463 3561 * enabled.
3464 3562 */
3465 3563 if (ixgbe->rx_hcksum_enable == B_FALSE) {
3466 3564 ixgbe->lro_enable = B_FALSE;
3467 3565 }
3468 3566
3469 3567 /*
3470 3568 * ixgbe LRO only supported by 82599, X540 and X550
3471 3569 */
3472 3570 if (hw->mac.type == ixgbe_mac_82598EB) {
3473 3571 ixgbe->lro_enable = B_FALSE;
3474 3572 }
3475 3573 ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD,
3476 3574 MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD,
3477 3575 DEFAULT_TX_COPY_THRESHOLD);
3478 3576 ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe,
3479 3577 PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD,
3480 3578 MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD);
3481 3579 ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe,
3482 3580 PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD,
3483 3581 MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD);
3484 3582 ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe,
3485 3583 PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD,
3486 3584 MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD);
3487 3585
3488 3586 ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD,
3489 3587 MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD,
3490 3588 DEFAULT_RX_COPY_THRESHOLD);
3491 3589 ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR,
3492 3590 MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR,
3493 3591 DEFAULT_RX_LIMIT_PER_INTR);
3494 3592
3495 3593 ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING,
3496 3594 ixgbe->capab->min_intr_throttle,
3497 3595 ixgbe->capab->max_intr_throttle,
3498 3596 ixgbe->capab->def_intr_throttle);
3499 3597 /*
3500 3598 * 82599, X540 and X550 require the interrupt throttling rate is
3501 3599 * a multiple of 8. This is enforced by the register definiton.
3502 3600 */
3503 3601 if (hw->mac.type == ixgbe_mac_82599EB ||
3504 3602 hw->mac.type == ixgbe_mac_X540 ||
3505 3603 hw->mac.type == ixgbe_mac_X550 ||
3506 3604 hw->mac.type == ixgbe_mac_X550EM_x ||
3507 3605 hw->mac.type == ixgbe_mac_X550EM_a)
3508 3606 ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8;
3509 3607
3510 3608 hw->allow_unsupported_sfp = ixgbe_get_prop(ixgbe,
3511 3609 PROP_ALLOW_UNSUPPORTED_SFP, 0, 1, DEFAULT_ALLOW_UNSUPPORTED_SFP);
3512 3610 }
3513 3611
3514 3612 static void
3515 3613 ixgbe_init_params(ixgbe_t *ixgbe)
3516 3614 {
3517 3615 struct ixgbe_hw *hw = &ixgbe->hw;
3518 3616 ixgbe_link_speed speeds_supported = 0;
3519 3617 boolean_t negotiate;
3520 3618
3521 3619 /*
3522 3620 * Get a list of speeds the adapter supports. If the hw struct hasn't
3523 3621 * been populated with this information yet, retrieve it from the
3524 3622 * adapter and save it to our own variable.
3525 3623 *
3526 3624 * On certain adapters, such as ones which use SFPs, the contents of
3527 3625 * hw->phy.speeds_supported (and hw->phy.autoneg_advertised) are not
3528 3626 * updated, so we must rely on calling ixgbe_get_link_capabilities()
3529 3627 * in order to ascertain the speeds which we are capable of supporting,
3530 3628 * and in the case of SFP-equipped adapters, which speed we are
3531 3629 * advertising. If ixgbe_get_link_capabilities() fails for some reason,
3532 3630 * we'll go with a default list of speeds as a last resort.
3533 3631 */
3534 3632 speeds_supported = hw->phy.speeds_supported;
3535 3633
3536 3634 if (speeds_supported == 0) {
3537 3635 if (ixgbe_get_link_capabilities(hw, &speeds_supported,
3538 3636 &negotiate) != IXGBE_SUCCESS) {
3539 3637 if (hw->mac.type == ixgbe_mac_82598EB) {
3540 3638 speeds_supported =
3541 3639 IXGBE_LINK_SPEED_82598_AUTONEG;
3542 3640 } else {
3543 3641 speeds_supported =
3544 3642 IXGBE_LINK_SPEED_82599_AUTONEG;
3545 3643 }
3546 3644 }
3547 3645 }
3548 3646 ixgbe->speeds_supported = speeds_supported;
3549 3647
3550 3648 /*
3551 3649 * By default, all supported speeds are enabled and advertised.
3552 3650 */
3553 3651 if (speeds_supported & IXGBE_LINK_SPEED_10GB_FULL) {
3554 3652 ixgbe->param_en_10000fdx_cap = 1;
3555 3653 ixgbe->param_adv_10000fdx_cap = 1;
3556 3654 } else {
3557 3655 ixgbe->param_en_10000fdx_cap = 0;
3558 3656 ixgbe->param_adv_10000fdx_cap = 0;
3559 3657 }
3560 3658
3561 3659 if (speeds_supported & IXGBE_LINK_SPEED_5GB_FULL) {
3562 3660 ixgbe->param_en_5000fdx_cap = 1;
3563 3661 ixgbe->param_adv_5000fdx_cap = 1;
3564 3662 } else {
3565 3663 ixgbe->param_en_5000fdx_cap = 0;
3566 3664 ixgbe->param_adv_5000fdx_cap = 0;
3567 3665 }
3568 3666
3569 3667 if (speeds_supported & IXGBE_LINK_SPEED_2_5GB_FULL) {
3570 3668 ixgbe->param_en_2500fdx_cap = 1;
3571 3669 ixgbe->param_adv_2500fdx_cap = 1;
3572 3670 } else {
3573 3671 ixgbe->param_en_2500fdx_cap = 0;
3574 3672 ixgbe->param_adv_2500fdx_cap = 0;
3575 3673 }
3576 3674
3577 3675 if (speeds_supported & IXGBE_LINK_SPEED_1GB_FULL) {
3578 3676 ixgbe->param_en_1000fdx_cap = 1;
3579 3677 ixgbe->param_adv_1000fdx_cap = 1;
3580 3678 } else {
3581 3679 ixgbe->param_en_1000fdx_cap = 0;
3582 3680 ixgbe->param_adv_1000fdx_cap = 0;
3583 3681 }
3584 3682
3585 3683 if (speeds_supported & IXGBE_LINK_SPEED_100_FULL) {
3586 3684 ixgbe->param_en_100fdx_cap = 1;
3587 3685 ixgbe->param_adv_100fdx_cap = 1;
3588 3686 } else {
3589 3687 ixgbe->param_en_100fdx_cap = 0;
3590 3688 ixgbe->param_adv_100fdx_cap = 0;
3591 3689 }
3592 3690
3593 3691 ixgbe->param_pause_cap = 1;
3594 3692 ixgbe->param_asym_pause_cap = 1;
3595 3693 ixgbe->param_rem_fault = 0;
3596 3694
3597 3695 ixgbe->param_adv_autoneg_cap = 1;
3598 3696 ixgbe->param_adv_pause_cap = 1;
3599 3697 ixgbe->param_adv_asym_pause_cap = 1;
3600 3698 ixgbe->param_adv_rem_fault = 0;
3601 3699
3602 3700 ixgbe->param_lp_10000fdx_cap = 0;
3603 3701 ixgbe->param_lp_5000fdx_cap = 0;
3604 3702 ixgbe->param_lp_2500fdx_cap = 0;
3605 3703 ixgbe->param_lp_1000fdx_cap = 0;
3606 3704 ixgbe->param_lp_100fdx_cap = 0;
3607 3705 ixgbe->param_lp_autoneg_cap = 0;
3608 3706 ixgbe->param_lp_pause_cap = 0;
3609 3707 ixgbe->param_lp_asym_pause_cap = 0;
3610 3708 ixgbe->param_lp_rem_fault = 0;
3611 3709 }
3612 3710
3613 3711 /*
3614 3712 * ixgbe_get_prop - Get a property value out of the configuration file
3615 3713 * ixgbe.conf.
3616 3714 *
3617 3715 * Caller provides the name of the property, a default value, a minimum
3618 3716 * value, and a maximum value.
3619 3717 *
3620 3718 * Return configured value of the property, with default, minimum and
3621 3719 * maximum properly applied.
3622 3720 */
3623 3721 static int
3624 3722 ixgbe_get_prop(ixgbe_t *ixgbe,
3625 3723 char *propname, /* name of the property */
3626 3724 int minval, /* minimum acceptable value */
3627 3725 int maxval, /* maximim acceptable value */
3628 3726 int defval) /* default value */
3629 3727 {
3630 3728 int value;
3631 3729
3632 3730 /*
3633 3731 * Call ddi_prop_get_int() to read the conf settings
3634 3732 */
3635 3733 value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip,
3636 3734 DDI_PROP_DONTPASS, propname, defval);
3637 3735 if (value > maxval)
3638 3736 value = maxval;
3639 3737
3640 3738 if (value < minval)
3641 3739 value = minval;
3642 3740
3643 3741 return (value);
3644 3742 }
3645 3743
3646 3744 /*
3647 3745 * ixgbe_driver_setup_link - Using the link properties to setup the link.
3648 3746 */
3649 3747 int
3650 3748 ixgbe_driver_setup_link(ixgbe_t *ixgbe, boolean_t setup_hw)
3651 3749 {
3652 3750 struct ixgbe_hw *hw = &ixgbe->hw;
3653 3751 ixgbe_link_speed advertised = 0;
3654 3752
3655 3753 /*
3656 3754 * Assemble a list of enabled speeds to auto-negotiate with.
3657 3755 */
3658 3756 if (ixgbe->param_en_10000fdx_cap == 1)
3659 3757 advertised |= IXGBE_LINK_SPEED_10GB_FULL;
3660 3758
3661 3759 if (ixgbe->param_en_5000fdx_cap == 1)
3662 3760 advertised |= IXGBE_LINK_SPEED_5GB_FULL;
3663 3761
3664 3762 if (ixgbe->param_en_2500fdx_cap == 1)
3665 3763 advertised |= IXGBE_LINK_SPEED_2_5GB_FULL;
3666 3764
3667 3765 if (ixgbe->param_en_1000fdx_cap == 1)
3668 3766 advertised |= IXGBE_LINK_SPEED_1GB_FULL;
3669 3767
3670 3768 if (ixgbe->param_en_100fdx_cap == 1)
3671 3769 advertised |= IXGBE_LINK_SPEED_100_FULL;
3672 3770
3673 3771 /*
3674 3772 * As a last resort, autoneg with a default list of speeds.
3675 3773 */
3676 3774 if (ixgbe->param_adv_autoneg_cap == 1 && advertised == 0) {
3677 3775 ixgbe_notice(ixgbe, "Invalid link settings. Setting link "
3678 3776 "to autonegotiate with full capabilities.");
3679 3777
3680 3778 if (hw->mac.type == ixgbe_mac_82598EB)
3681 3779 advertised = IXGBE_LINK_SPEED_82598_AUTONEG;
3682 3780 else
3683 3781 advertised = IXGBE_LINK_SPEED_82599_AUTONEG;
3684 3782 }
3685 3783
3686 3784 if (setup_hw) {
3687 3785 if (ixgbe_setup_link(&ixgbe->hw, advertised,
3688 3786 ixgbe->param_adv_autoneg_cap) != IXGBE_SUCCESS) {
3689 3787 ixgbe_notice(ixgbe, "Setup link failed on this "
3690 3788 "device.");
3691 3789 return (IXGBE_FAILURE);
3692 3790 }
3693 3791 }
3694 3792
3695 3793 return (IXGBE_SUCCESS);
3696 3794 }
3697 3795
3698 3796 /*
3699 3797 * ixgbe_driver_link_check - Link status processing.
3700 3798 *
3701 3799 * This function can be called in both kernel context and interrupt context
3702 3800 */
3703 3801 static void
3704 3802 ixgbe_driver_link_check(ixgbe_t *ixgbe)
3705 3803 {
3706 3804 struct ixgbe_hw *hw = &ixgbe->hw;
3707 3805 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
3708 3806 boolean_t link_up = B_FALSE;
3709 3807 boolean_t link_changed = B_FALSE;
3710 3808
3711 3809 ASSERT(mutex_owned(&ixgbe->gen_lock));
3712 3810
3713 3811 (void) ixgbe_check_link(hw, &speed, &link_up, B_FALSE);
3714 3812 if (link_up) {
3715 3813 ixgbe->link_check_complete = B_TRUE;
3716 3814
3717 3815 /* Link is up, enable flow control settings */
3718 3816 (void) ixgbe_fc_enable(hw);
3719 3817
3720 3818 /*
3721 3819 * The Link is up, check whether it was marked as down earlier
3722 3820 */
3723 3821 if (ixgbe->link_state != LINK_STATE_UP) {
3724 3822 switch (speed) {
3725 3823 case IXGBE_LINK_SPEED_10GB_FULL:
3726 3824 ixgbe->link_speed = SPEED_10GB;
3727 3825 break;
3728 3826 case IXGBE_LINK_SPEED_5GB_FULL:
3729 3827 ixgbe->link_speed = SPEED_5GB;
3730 3828 break;
3731 3829 case IXGBE_LINK_SPEED_2_5GB_FULL:
3732 3830 ixgbe->link_speed = SPEED_2_5GB;
3733 3831 break;
3734 3832 case IXGBE_LINK_SPEED_1GB_FULL:
3735 3833 ixgbe->link_speed = SPEED_1GB;
3736 3834 break;
3737 3835 case IXGBE_LINK_SPEED_100_FULL:
3738 3836 ixgbe->link_speed = SPEED_100;
3739 3837 }
3740 3838 ixgbe->link_duplex = LINK_DUPLEX_FULL;
3741 3839 ixgbe->link_state = LINK_STATE_UP;
3742 3840 link_changed = B_TRUE;
3743 3841 }
3744 3842 } else {
3745 3843 if (ixgbe->link_check_complete == B_TRUE ||
3746 3844 (ixgbe->link_check_complete == B_FALSE &&
3747 3845 gethrtime() >= ixgbe->link_check_hrtime)) {
3748 3846 /*
3749 3847 * The link is really down
3750 3848 */
3751 3849 ixgbe->link_check_complete = B_TRUE;
3752 3850
3753 3851 if (ixgbe->link_state != LINK_STATE_DOWN) {
3754 3852 ixgbe->link_speed = 0;
3755 3853 ixgbe->link_duplex = LINK_DUPLEX_UNKNOWN;
3756 3854 ixgbe->link_state = LINK_STATE_DOWN;
3757 3855 link_changed = B_TRUE;
3758 3856 }
3759 3857 }
3760 3858 }
3761 3859
3762 3860 /*
3763 3861 * If we are in an interrupt context, need to re-enable the
3764 3862 * interrupt, which was automasked
3765 3863 */
3766 3864 if (servicing_interrupt() != 0) {
3767 3865 ixgbe->eims |= IXGBE_EICR_LSC;
3768 3866 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3769 3867 }
3770 3868
3771 3869 if (link_changed) {
3772 3870 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
3773 3871 }
3774 3872 }
3775 3873
3776 3874 /*
3777 3875 * ixgbe_sfp_check - sfp module processing done in taskq only for 82599.
3778 3876 */
3779 3877 static void
3780 3878 ixgbe_sfp_check(void *arg)
3781 3879 {
3782 3880 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3783 3881 uint32_t eicr = ixgbe->eicr;
3784 3882 struct ixgbe_hw *hw = &ixgbe->hw;
3785 3883
3786 3884 mutex_enter(&ixgbe->gen_lock);
3787 3885 (void) hw->phy.ops.identify_sfp(hw);
3788 3886 if (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) {
3789 3887 /* clear the interrupt */
3790 3888 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3791 3889
3792 3890 /* if link up, do multispeed fiber setup */
3793 3891 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG,
3794 3892 B_TRUE);
3795 3893 ixgbe_driver_link_check(ixgbe);
3796 3894 ixgbe_get_hw_state(ixgbe);
3797 3895 } else if (eicr & IXGBE_EICR_GPI_SDP2_BY_MAC(hw)) {
3798 3896 /* clear the interrupt */
3799 3897 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2_BY_MAC(hw));
3800 3898
3801 3899 /* if link up, do sfp module setup */
3802 3900 (void) hw->mac.ops.setup_sfp(hw);
3803 3901
3804 3902 /* do multispeed fiber setup */
3805 3903 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG,
3806 3904 B_TRUE);
3807 3905 ixgbe_driver_link_check(ixgbe);
3808 3906 ixgbe_get_hw_state(ixgbe);
3809 3907 }
3810 3908 mutex_exit(&ixgbe->gen_lock);
3811 3909
3812 3910 /*
3813 3911 * We need to fully re-check the link later.
3814 3912 */
3815 3913 ixgbe->link_check_complete = B_FALSE;
3816 3914 ixgbe->link_check_hrtime = gethrtime() +
3817 3915 (IXGBE_LINK_UP_TIME * 100000000ULL);
3818 3916 }
3819 3917
3820 3918 /*
3821 3919 * ixgbe_overtemp_check - overtemp module processing done in taskq
3822 3920 *
3823 3921 * This routine will only be called on adapters with temperature sensor.
3824 3922 * The indication of over-temperature can be either SDP0 interrupt or the link
3825 3923 * status change interrupt.
3826 3924 */
3827 3925 static void
3828 3926 ixgbe_overtemp_check(void *arg)
3829 3927 {
3830 3928 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3831 3929 struct ixgbe_hw *hw = &ixgbe->hw;
3832 3930 uint32_t eicr = ixgbe->eicr;
3833 3931 ixgbe_link_speed speed;
3834 3932 boolean_t link_up;
3835 3933
3836 3934 mutex_enter(&ixgbe->gen_lock);
3837 3935
3838 3936 /* make sure we know current state of link */
3839 3937 (void) ixgbe_check_link(hw, &speed, &link_up, B_FALSE);
3840 3938
3841 3939 /* check over-temp condition */
3842 3940 if (((eicr & IXGBE_EICR_GPI_SDP0_BY_MAC(hw)) && (!link_up)) ||
3843 3941 (eicr & IXGBE_EICR_LSC)) {
3844 3942 if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP) {
3845 3943 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP);
3846 3944
3847 3945 /*
3848 3946 * Disable the adapter interrupts
3849 3947 */
3850 3948 ixgbe_disable_adapter_interrupts(ixgbe);
3851 3949
3852 3950 /*
3853 3951 * Disable Rx/Tx units
3854 3952 */
3855 3953 (void) ixgbe_stop_adapter(hw);
3856 3954
3857 3955 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
3858 3956 ixgbe_error(ixgbe,
3859 3957 "Problem: Network adapter has been stopped "
3860 3958 "because it has overheated");
3861 3959 ixgbe_error(ixgbe,
3862 3960 "Action: Restart the computer. "
3863 3961 "If the problem persists, power off the system "
3864 3962 "and replace the adapter");
3865 3963 }
3866 3964 }
3867 3965
3868 3966 /* write to clear the interrupt */
3869 3967 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3870 3968
3871 3969 mutex_exit(&ixgbe->gen_lock);
3872 3970 }
3873 3971
3874 3972 /*
3875 3973 * ixgbe_phy_check - taskq to process interrupts from an external PHY
3876 3974 *
3877 3975 * This routine will only be called on adapters with external PHYs
3878 3976 * (such as X550) that may be trying to raise our attention to some event.
3879 3977 * Currently, this is limited to claiming PHY overtemperature and link status
3880 3978 * change (LSC) events, however this may expand to include other things in
3881 3979 * future adapters.
3882 3980 */
3883 3981 static void
3884 3982 ixgbe_phy_check(void *arg)
3885 3983 {
3886 3984 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3887 3985 struct ixgbe_hw *hw = &ixgbe->hw;
3888 3986 int rv;
3889 3987
3890 3988 mutex_enter(&ixgbe->gen_lock);
3891 3989
3892 3990 /*
3893 3991 * X550 baseT PHY overtemp and LSC events are handled here.
3894 3992 *
3895 3993 * If an overtemp event occurs, it will be reflected in the
3896 3994 * return value of phy.ops.handle_lasi() and the common code will
3897 3995 * automatically power off the baseT PHY. This is our cue to trigger
3898 3996 * an FMA event.
3899 3997 *
3900 3998 * If a link status change event occurs, phy.ops.handle_lasi() will
3901 3999 * automatically initiate a link setup between the integrated KR PHY
3902 4000 * and the external X557 PHY to ensure that the link speed between
3903 4001 * them matches the link speed of the baseT link.
3904 4002 */
3905 4003 rv = ixgbe_handle_lasi(hw);
3906 4004
3907 4005 if (rv == IXGBE_ERR_OVERTEMP) {
3908 4006 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP);
3909 4007
3910 4008 /*
3911 4009 * Disable the adapter interrupts
3912 4010 */
3913 4011 ixgbe_disable_adapter_interrupts(ixgbe);
3914 4012
3915 4013 /*
3916 4014 * Disable Rx/Tx units
3917 4015 */
3918 4016 (void) ixgbe_stop_adapter(hw);
3919 4017
3920 4018 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
3921 4019 ixgbe_error(ixgbe,
3922 4020 "Problem: Network adapter has been stopped due to a "
3923 4021 "overtemperature event being detected.");
3924 4022 ixgbe_error(ixgbe,
3925 4023 "Action: Shut down or restart the computer. If the issue "
3926 4024 "persists, please take action in accordance with the "
3927 4025 "recommendations from your system vendor.");
3928 4026 }
3929 4027
3930 4028 mutex_exit(&ixgbe->gen_lock);
3931 4029 }
3932 4030
3933 4031 /*
3934 4032 * ixgbe_link_timer - timer for link status detection
3935 4033 */
3936 4034 static void
3937 4035 ixgbe_link_timer(void *arg)
3938 4036 {
3939 4037 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3940 4038
3941 4039 mutex_enter(&ixgbe->gen_lock);
3942 4040 ixgbe_driver_link_check(ixgbe);
3943 4041 mutex_exit(&ixgbe->gen_lock);
3944 4042 }
3945 4043
3946 4044 /*
3947 4045 * ixgbe_local_timer - Driver watchdog function.
3948 4046 *
3949 4047 * This function will handle the transmit stall check and other routines.
3950 4048 */
3951 4049 static void
3952 4050 ixgbe_local_timer(void *arg)
3953 4051 {
3954 4052 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3955 4053
3956 4054 if (ixgbe->ixgbe_state & IXGBE_OVERTEMP)
3957 4055 goto out;
3958 4056
3959 4057 if (ixgbe->ixgbe_state & IXGBE_ERROR) {
3960 4058 ixgbe->reset_count++;
3961 4059 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
3962 4060 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
3963 4061 goto out;
3964 4062 }
3965 4063
3966 4064 if (ixgbe_stall_check(ixgbe)) {
3967 4065 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STALL);
3968 4066 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3969 4067
3970 4068 ixgbe->reset_count++;
3971 4069 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
3972 4070 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
3973 4071 }
3974 4072
3975 4073 out:
3976 4074 ixgbe_restart_watchdog_timer(ixgbe);
3977 4075 }
3978 4076
3979 4077 /*
3980 4078 * ixgbe_stall_check - Check for transmit stall.
3981 4079 *
3982 4080 * This function checks if the adapter is stalled (in transmit).
3983 4081 *
3984 4082 * It is called each time the watchdog timeout is invoked.
3985 4083 * If the transmit descriptor reclaim continuously fails,
3986 4084 * the watchdog value will increment by 1. If the watchdog
3987 4085 * value exceeds the threshold, the ixgbe is assumed to
3988 4086 * have stalled and need to be reset.
3989 4087 */
3990 4088 static boolean_t
3991 4089 ixgbe_stall_check(ixgbe_t *ixgbe)
3992 4090 {
3993 4091 ixgbe_tx_ring_t *tx_ring;
3994 4092 boolean_t result;
3995 4093 int i;
3996 4094
3997 4095 if (ixgbe->link_state != LINK_STATE_UP)
3998 4096 return (B_FALSE);
3999 4097
4000 4098 /*
4001 4099 * If any tx ring is stalled, we'll reset the chipset
4002 4100 */
4003 4101 result = B_FALSE;
4004 4102 for (i = 0; i < ixgbe->num_tx_rings; i++) {
4005 4103 tx_ring = &ixgbe->tx_rings[i];
4006 4104 if (tx_ring->tbd_free <= ixgbe->tx_recycle_thresh) {
4007 4105 tx_ring->tx_recycle(tx_ring);
4008 4106 }
4009 4107
4010 4108 if (tx_ring->recycle_fail > 0)
4011 4109 tx_ring->stall_watchdog++;
4012 4110 else
4013 4111 tx_ring->stall_watchdog = 0;
4014 4112
4015 4113 if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) {
4016 4114 result = B_TRUE;
4017 4115 break;
4018 4116 }
4019 4117 }
4020 4118
4021 4119 if (result) {
4022 4120 tx_ring->stall_watchdog = 0;
4023 4121 tx_ring->recycle_fail = 0;
4024 4122 }
4025 4123
4026 4124 return (result);
4027 4125 }
4028 4126
4029 4127
4030 4128 /*
4031 4129 * is_valid_mac_addr - Check if the mac address is valid.
4032 4130 */
4033 4131 static boolean_t
4034 4132 is_valid_mac_addr(uint8_t *mac_addr)
4035 4133 {
4036 4134 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
4037 4135 const uint8_t addr_test2[6] =
4038 4136 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4039 4137
4040 4138 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
4041 4139 !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
4042 4140 return (B_FALSE);
4043 4141
4044 4142 return (B_TRUE);
4045 4143 }
4046 4144
4047 4145 static boolean_t
4048 4146 ixgbe_find_mac_address(ixgbe_t *ixgbe)
4049 4147 {
4050 4148 #ifdef __sparc
4051 4149 struct ixgbe_hw *hw = &ixgbe->hw;
4052 4150 uchar_t *bytes;
4053 4151 struct ether_addr sysaddr;
4054 4152 uint_t nelts;
4055 4153 int err;
4056 4154 boolean_t found = B_FALSE;
4057 4155
4058 4156 /*
4059 4157 * The "vendor's factory-set address" may already have
4060 4158 * been extracted from the chip, but if the property
4061 4159 * "local-mac-address" is set we use that instead.
4062 4160 *
4063 4161 * We check whether it looks like an array of 6
4064 4162 * bytes (which it should, if OBP set it). If we can't
4065 4163 * make sense of it this way, we'll ignore it.
4066 4164 */
4067 4165 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
4068 4166 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts);
4069 4167 if (err == DDI_PROP_SUCCESS) {
4070 4168 if (nelts == ETHERADDRL) {
4071 4169 while (nelts--)
4072 4170 hw->mac.addr[nelts] = bytes[nelts];
4073 4171 found = B_TRUE;
4074 4172 }
4075 4173 ddi_prop_free(bytes);
4076 4174 }
4077 4175
4078 4176 /*
4079 4177 * Look up the OBP property "local-mac-address?". If the user has set
4080 4178 * 'local-mac-address? = false', use "the system address" instead.
4081 4179 */
4082 4180 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0,
4083 4181 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
4084 4182 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
4085 4183 if (localetheraddr(NULL, &sysaddr) != 0) {
4086 4184 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
4087 4185 found = B_TRUE;
4088 4186 }
4089 4187 }
4090 4188 ddi_prop_free(bytes);
4091 4189 }
4092 4190
4093 4191 /*
4094 4192 * Finally(!), if there's a valid "mac-address" property (created
4095 4193 * if we netbooted from this interface), we must use this instead
4096 4194 * of any of the above to ensure that the NFS/install server doesn't
4097 4195 * get confused by the address changing as illumos takes over!
4098 4196 */
4099 4197 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
4100 4198 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
4101 4199 if (err == DDI_PROP_SUCCESS) {
4102 4200 if (nelts == ETHERADDRL) {
4103 4201 while (nelts--)
4104 4202 hw->mac.addr[nelts] = bytes[nelts];
4105 4203 found = B_TRUE;
4106 4204 }
4107 4205 ddi_prop_free(bytes);
4108 4206 }
4109 4207
4110 4208 if (found) {
4111 4209 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL);
4112 4210 return (B_TRUE);
4113 4211 }
4114 4212 #else
4115 4213 _NOTE(ARGUNUSED(ixgbe));
4116 4214 #endif
4117 4215
4118 4216 return (B_TRUE);
4119 4217 }
4120 4218
4121 4219 #pragma inline(ixgbe_arm_watchdog_timer)
4122 4220 static void
4123 4221 ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe)
4124 4222 {
4125 4223 /*
4126 4224 * Fire a watchdog timer
4127 4225 */
4128 4226 ixgbe->watchdog_tid =
4129 4227 timeout(ixgbe_local_timer,
4130 4228 (void *)ixgbe, 1 * drv_usectohz(1000000));
4131 4229
4132 4230 }
4133 4231
4134 4232 /*
4135 4233 * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer.
4136 4234 */
4137 4235 void
4138 4236 ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe)
4139 4237 {
4140 4238 mutex_enter(&ixgbe->watchdog_lock);
4141 4239
4142 4240 if (!ixgbe->watchdog_enable) {
4143 4241 ixgbe->watchdog_enable = B_TRUE;
4144 4242 ixgbe->watchdog_start = B_TRUE;
4145 4243 ixgbe_arm_watchdog_timer(ixgbe);
4146 4244 }
4147 4245
4148 4246 mutex_exit(&ixgbe->watchdog_lock);
4149 4247 }
4150 4248
4151 4249 /*
4152 4250 * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer.
4153 4251 */
4154 4252 void
4155 4253 ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe)
4156 4254 {
4157 4255 timeout_id_t tid;
4158 4256
4159 4257 mutex_enter(&ixgbe->watchdog_lock);
4160 4258
4161 4259 ixgbe->watchdog_enable = B_FALSE;
4162 4260 ixgbe->watchdog_start = B_FALSE;
4163 4261 tid = ixgbe->watchdog_tid;
4164 4262 ixgbe->watchdog_tid = 0;
4165 4263
4166 4264 mutex_exit(&ixgbe->watchdog_lock);
4167 4265
4168 4266 if (tid != 0)
4169 4267 (void) untimeout(tid);
4170 4268 }
4171 4269
4172 4270 /*
4173 4271 * ixgbe_start_watchdog_timer - Start the driver watchdog timer.
4174 4272 */
4175 4273 void
4176 4274 ixgbe_start_watchdog_timer(ixgbe_t *ixgbe)
4177 4275 {
4178 4276 mutex_enter(&ixgbe->watchdog_lock);
4179 4277
4180 4278 if (ixgbe->watchdog_enable) {
4181 4279 if (!ixgbe->watchdog_start) {
4182 4280 ixgbe->watchdog_start = B_TRUE;
4183 4281 ixgbe_arm_watchdog_timer(ixgbe);
4184 4282 }
4185 4283 }
4186 4284
4187 4285 mutex_exit(&ixgbe->watchdog_lock);
4188 4286 }
4189 4287
4190 4288 /*
4191 4289 * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer.
4192 4290 */
4193 4291 static void
4194 4292 ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe)
4195 4293 {
4196 4294 mutex_enter(&ixgbe->watchdog_lock);
4197 4295
4198 4296 if (ixgbe->watchdog_start)
4199 4297 ixgbe_arm_watchdog_timer(ixgbe);
4200 4298
4201 4299 mutex_exit(&ixgbe->watchdog_lock);
4202 4300 }
4203 4301
4204 4302 /*
4205 4303 * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer.
4206 4304 */
4207 4305 void
4208 4306 ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe)
4209 4307 {
4210 4308 timeout_id_t tid;
4211 4309
4212 4310 mutex_enter(&ixgbe->watchdog_lock);
4213 4311
4214 4312 ixgbe->watchdog_start = B_FALSE;
4215 4313 tid = ixgbe->watchdog_tid;
4216 4314 ixgbe->watchdog_tid = 0;
4217 4315
4218 4316 mutex_exit(&ixgbe->watchdog_lock);
4219 4317
4220 4318 if (tid != 0)
4221 4319 (void) untimeout(tid);
4222 4320 }
4223 4321
4224 4322 /*
4225 4323 * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts.
4226 4324 */
4227 4325 static void
4228 4326 ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe)
4229 4327 {
4230 4328 struct ixgbe_hw *hw = &ixgbe->hw;
4231 4329
4232 4330 /*
4233 4331 * mask all interrupts off
4234 4332 */
4235 4333 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff);
4236 4334
4237 4335 /*
4238 4336 * for MSI-X, also disable autoclear
4239 4337 */
4240 4338 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
4241 4339 IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0);
4242 4340 }
4243 4341
4244 4342 IXGBE_WRITE_FLUSH(hw);
4245 4343 }
4246 4344
4247 4345 /*
4248 4346 * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts.
4249 4347 */
4250 4348 static void
4251 4349 ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe)
4252 4350 {
4253 4351 struct ixgbe_hw *hw = &ixgbe->hw;
4254 4352 uint32_t eiac, eiam;
4255 4353 uint32_t gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4256 4354
4257 4355 /* interrupt types to enable */
4258 4356 ixgbe->eims = IXGBE_EIMS_ENABLE_MASK; /* shared code default */
4259 4357 ixgbe->eims &= ~IXGBE_EIMS_TCP_TIMER; /* minus tcp timer */
4260 4358 ixgbe->eims |= ixgbe->capab->other_intr; /* "other" interrupt types */
4261 4359
4262 4360 /* enable automask on "other" causes that this adapter can generate */
4263 4361 eiam = ixgbe->capab->other_intr;
4264 4362
4265 4363 /*
4266 4364 * msi-x mode
4267 4365 */
4268 4366 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
4269 4367 /* enable autoclear but not on bits 29:20 */
4270 4368 eiac = (ixgbe->eims & ~IXGBE_OTHER_INTR);
4271 4369
4272 4370 /* general purpose interrupt enable */
4273 4371 gpie |= (IXGBE_GPIE_MSIX_MODE
4274 4372 | IXGBE_GPIE_PBA_SUPPORT
4275 4373 | IXGBE_GPIE_OCD
4276 4374 | IXGBE_GPIE_EIAME);
4277 4375 /*
4278 4376 * non-msi-x mode
4279 4377 */
4280 4378 } else {
4281 4379
4282 4380 /* disable autoclear, leave gpie at default */
4283 4381 eiac = 0;
4284 4382
4285 4383 /*
4286 4384 * General purpose interrupt enable.
4287 4385 * For 82599, X540 and X550, extended interrupt
4288 4386 * automask enable only in MSI or MSI-X mode
4289 4387 */
4290 4388 if ((hw->mac.type == ixgbe_mac_82598EB) ||
4291 4389 (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) {
4292 4390 gpie |= IXGBE_GPIE_EIAME;
4293 4391 }
4294 4392 }
4295 4393
4296 4394 /* Enable specific "other" interrupt types */
4297 4395 switch (hw->mac.type) {
4298 4396 case ixgbe_mac_82598EB:
4299 4397 gpie |= ixgbe->capab->other_gpie;
4300 4398 break;
4301 4399
4302 4400 case ixgbe_mac_82599EB:
4303 4401 case ixgbe_mac_X540:
4304 4402 case ixgbe_mac_X550:
4305 4403 case ixgbe_mac_X550EM_x:
4306 4404 case ixgbe_mac_X550EM_a:
4307 4405 gpie |= ixgbe->capab->other_gpie;
4308 4406
4309 4407 /* Enable RSC Delay 8us when LRO enabled */
4310 4408 if (ixgbe->lro_enable) {
4311 4409 gpie |= (1 << IXGBE_GPIE_RSC_DELAY_SHIFT);
4312 4410 }
4313 4411 break;
4314 4412
4315 4413 default:
4316 4414 break;
4317 4415 }
4318 4416
4319 4417 /* write to interrupt control registers */
4320 4418 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4321 4419 IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac);
4322 4420 IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam);
4323 4421 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4324 4422 IXGBE_WRITE_FLUSH(hw);
4325 4423 }
4326 4424
4327 4425 /*
4328 4426 * ixgbe_loopback_ioctl - Loopback support.
4329 4427 */
4330 4428 enum ioc_reply
4331 4429 ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp)
4332 4430 {
4333 4431 lb_info_sz_t *lbsp;
4334 4432 lb_property_t *lbpp;
4335 4433 uint32_t *lbmp;
4336 4434 uint32_t size;
4337 4435 uint32_t value;
4338 4436
4339 4437 if (mp->b_cont == NULL)
4340 4438 return (IOC_INVAL);
4341 4439
4342 4440 switch (iocp->ioc_cmd) {
4343 4441 default:
4344 4442 return (IOC_INVAL);
4345 4443
4346 4444 case LB_GET_INFO_SIZE:
4347 4445 size = sizeof (lb_info_sz_t);
4348 4446 if (iocp->ioc_count != size)
4349 4447 return (IOC_INVAL);
4350 4448
4351 4449 value = sizeof (lb_normal);
4352 4450 value += sizeof (lb_mac);
4353 4451 value += sizeof (lb_external);
4354 4452
4355 4453 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr;
4356 4454 *lbsp = value;
4357 4455 break;
4358 4456
4359 4457 case LB_GET_INFO:
4360 4458 value = sizeof (lb_normal);
4361 4459 value += sizeof (lb_mac);
4362 4460 value += sizeof (lb_external);
4363 4461
4364 4462 size = value;
4365 4463 if (iocp->ioc_count != size)
4366 4464 return (IOC_INVAL);
4367 4465
4368 4466 value = 0;
4369 4467 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr;
4370 4468
4371 4469 lbpp[value++] = lb_normal;
4372 4470 lbpp[value++] = lb_mac;
4373 4471 lbpp[value++] = lb_external;
4374 4472 break;
4375 4473
4376 4474 case LB_GET_MODE:
4377 4475 size = sizeof (uint32_t);
4378 4476 if (iocp->ioc_count != size)
4379 4477 return (IOC_INVAL);
4380 4478
4381 4479 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
4382 4480 *lbmp = ixgbe->loopback_mode;
4383 4481 break;
4384 4482
4385 4483 case LB_SET_MODE:
4386 4484 size = 0;
4387 4485 if (iocp->ioc_count != sizeof (uint32_t))
4388 4486 return (IOC_INVAL);
4389 4487
4390 4488 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
4391 4489 if (!ixgbe_set_loopback_mode(ixgbe, *lbmp))
4392 4490 return (IOC_INVAL);
4393 4491 break;
4394 4492 }
4395 4493
4396 4494 iocp->ioc_count = size;
4397 4495 iocp->ioc_error = 0;
4398 4496
4399 4497 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
4400 4498 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
4401 4499 return (IOC_INVAL);
4402 4500 }
4403 4501
4404 4502 return (IOC_REPLY);
4405 4503 }
4406 4504
4407 4505 /*
4408 4506 * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode.
4409 4507 */
4410 4508 static boolean_t
4411 4509 ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode)
4412 4510 {
4413 4511 if (mode == ixgbe->loopback_mode)
4414 4512 return (B_TRUE);
4415 4513
4416 4514 ixgbe->loopback_mode = mode;
4417 4515
4418 4516 if (mode == IXGBE_LB_NONE) {
4419 4517 /*
4420 4518 * Reset the chip
4421 4519 */
4422 4520 (void) ixgbe_reset(ixgbe);
4423 4521 return (B_TRUE);
4424 4522 }
4425 4523
4426 4524 mutex_enter(&ixgbe->gen_lock);
4427 4525
4428 4526 switch (mode) {
4429 4527 default:
4430 4528 mutex_exit(&ixgbe->gen_lock);
4431 4529 return (B_FALSE);
4432 4530
4433 4531 case IXGBE_LB_EXTERNAL:
4434 4532 break;
4435 4533
4436 4534 case IXGBE_LB_INTERNAL_MAC:
4437 4535 ixgbe_set_internal_mac_loopback(ixgbe);
4438 4536 break;
4439 4537 }
4440 4538
4441 4539 mutex_exit(&ixgbe->gen_lock);
4442 4540
4443 4541 return (B_TRUE);
4444 4542 }
4445 4543
4446 4544 /*
4447 4545 * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode.
4448 4546 */
4449 4547 static void
4450 4548 ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe)
4451 4549 {
4452 4550 struct ixgbe_hw *hw;
4453 4551 uint32_t reg;
4454 4552 uint8_t atlas;
4455 4553
4456 4554 hw = &ixgbe->hw;
4457 4555
4458 4556 /*
4459 4557 * Setup MAC loopback
4460 4558 */
4461 4559 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0);
4462 4560 reg |= IXGBE_HLREG0_LPBK;
4463 4561 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg);
4464 4562
4465 4563 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
4466 4564 reg &= ~IXGBE_AUTOC_LMS_MASK;
4467 4565 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
4468 4566
4469 4567 /*
4470 4568 * Disable Atlas Tx lanes to keep packets in loopback and not on wire
4471 4569 */
4472 4570 switch (hw->mac.type) {
4473 4571 case ixgbe_mac_82598EB:
4474 4572 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
4475 4573 &atlas);
4476 4574 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
4477 4575 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
4478 4576 atlas);
4479 4577
4480 4578 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
4481 4579 &atlas);
4482 4580 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
4483 4581 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
4484 4582 atlas);
4485 4583
4486 4584 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
4487 4585 &atlas);
4488 4586 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
4489 4587 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
4490 4588 atlas);
4491 4589
4492 4590 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
4493 4591 &atlas);
4494 4592 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
4495 4593 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
4496 4594 atlas);
4497 4595 break;
4498 4596
4499 4597 case ixgbe_mac_82599EB:
4500 4598 case ixgbe_mac_X540:
4501 4599 case ixgbe_mac_X550:
4502 4600 case ixgbe_mac_X550EM_x:
4503 4601 case ixgbe_mac_X550EM_a:
4504 4602 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
4505 4603 reg |= (IXGBE_AUTOC_FLU |
4506 4604 IXGBE_AUTOC_10G_KX4);
4507 4605 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
4508 4606
4509 4607 (void) ixgbe_setup_link(&ixgbe->hw, IXGBE_LINK_SPEED_10GB_FULL,
4510 4608 B_FALSE);
4511 4609 break;
4512 4610
4513 4611 default:
4514 4612 break;
4515 4613 }
4516 4614 }
4517 4615
4518 4616 #pragma inline(ixgbe_intr_rx_work)
4519 4617 /*
4520 4618 * ixgbe_intr_rx_work - RX processing of ISR.
4521 4619 */
4522 4620 static void
4523 4621 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring)
4524 4622 {
4525 4623 mblk_t *mp;
4526 4624
4527 4625 mutex_enter(&rx_ring->rx_lock);
4528 4626
4529 4627 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
4530 4628 mutex_exit(&rx_ring->rx_lock);
4531 4629
4532 4630 if (mp != NULL)
4533 4631 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
4534 4632 rx_ring->ring_gen_num);
4535 4633 }
4536 4634
4537 4635 #pragma inline(ixgbe_intr_tx_work)
4538 4636 /*
4539 4637 * ixgbe_intr_tx_work - TX processing of ISR.
4540 4638 */
4541 4639 static void
4542 4640 ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring)
4543 4641 {
4544 4642 ixgbe_t *ixgbe = tx_ring->ixgbe;
4545 4643
4546 4644 /*
4547 4645 * Recycle the tx descriptors
4548 4646 */
4549 4647 tx_ring->tx_recycle(tx_ring);
4550 4648
4551 4649 /*
4552 4650 * Schedule the re-transmit
4553 4651 */
4554 4652 if (tx_ring->reschedule &&
4555 4653 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)) {
4556 4654 tx_ring->reschedule = B_FALSE;
4557 4655 mac_tx_ring_update(tx_ring->ixgbe->mac_hdl,
4558 4656 tx_ring->ring_handle);
4559 4657 tx_ring->stat_reschedule++;
4560 4658 }
4561 4659 }
4562 4660
4563 4661 #pragma inline(ixgbe_intr_other_work)
4564 4662 /*
4565 4663 * ixgbe_intr_other_work - Process interrupt types other than tx/rx
4566 4664 */
4567 4665 static void
4568 4666 ixgbe_intr_other_work(ixgbe_t *ixgbe, uint32_t eicr)
4569 4667 {
4570 4668 struct ixgbe_hw *hw = &ixgbe->hw;
4571 4669
4572 4670 ASSERT(mutex_owned(&ixgbe->gen_lock));
4573 4671
4574 4672 /*
4575 4673 * handle link status change
4576 4674 */
4577 4675 if (eicr & IXGBE_EICR_LSC) {
4578 4676 ixgbe_driver_link_check(ixgbe);
4579 4677 ixgbe_get_hw_state(ixgbe);
4580 4678 }
4581 4679
4582 4680 /*
4583 4681 * check for fan failure on adapters with fans
4584 4682 */
4585 4683 if ((ixgbe->capab->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
4586 4684 (eicr & IXGBE_EICR_GPI_SDP1)) {
4587 4685 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP);
4588 4686
4589 4687 /*
4590 4688 * Disable the adapter interrupts
4591 4689 */
4592 4690 ixgbe_disable_adapter_interrupts(ixgbe);
4593 4691
4594 4692 /*
4595 4693 * Disable Rx/Tx units
4596 4694 */
4597 4695 (void) ixgbe_stop_adapter(&ixgbe->hw);
4598 4696
4599 4697 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
4600 4698 ixgbe_error(ixgbe,
4601 4699 "Problem: Network adapter has been stopped "
4602 4700 "because the fan has stopped.\n");
4603 4701 ixgbe_error(ixgbe,
4604 4702 "Action: Replace the adapter.\n");
4605 4703
4606 4704 /* re-enable the interrupt, which was automasked */
4607 4705 ixgbe->eims |= IXGBE_EICR_GPI_SDP1;
4608 4706 }
4609 4707
4610 4708 /*
4611 4709 * Do SFP check for adapters with hot-plug capability
4612 4710 */
4613 4711 if ((ixgbe->capab->flags & IXGBE_FLAG_SFP_PLUG_CAPABLE) &&
4614 4712 ((eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) ||
4615 4713 (eicr & IXGBE_EICR_GPI_SDP2_BY_MAC(hw)))) {
4616 4714 ixgbe->eicr = eicr;
4617 4715 if ((ddi_taskq_dispatch(ixgbe->sfp_taskq,
4618 4716 ixgbe_sfp_check, (void *)ixgbe,
4619 4717 DDI_NOSLEEP)) != DDI_SUCCESS) {
4620 4718 ixgbe_log(ixgbe, "No memory available to dispatch "
4621 4719 "taskq for SFP check");
4622 4720 }
4623 4721 }
4624 4722
4625 4723 /*
4626 4724 * Do over-temperature check for adapters with temp sensor
4627 4725 */
4628 4726 if ((ixgbe->capab->flags & IXGBE_FLAG_TEMP_SENSOR_CAPABLE) &&
4629 4727 ((eicr & IXGBE_EICR_GPI_SDP0_BY_MAC(hw)) ||
4630 4728 (eicr & IXGBE_EICR_LSC))) {
4631 4729 ixgbe->eicr = eicr;
4632 4730 if ((ddi_taskq_dispatch(ixgbe->overtemp_taskq,
4633 4731 ixgbe_overtemp_check, (void *)ixgbe,
4634 4732 DDI_NOSLEEP)) != DDI_SUCCESS) {
4635 4733 ixgbe_log(ixgbe, "No memory available to dispatch "
4636 4734 "taskq for overtemp check");
4637 4735 }
4638 4736 }
4639 4737
4640 4738 /*
4641 4739 * Process an external PHY interrupt
4642 4740 */
4643 4741 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
4644 4742 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
4645 4743 ixgbe->eicr = eicr;
4646 4744 if ((ddi_taskq_dispatch(ixgbe->phy_taskq,
4647 4745 ixgbe_phy_check, (void *)ixgbe,
4648 4746 DDI_NOSLEEP)) != DDI_SUCCESS) {
4649 4747 ixgbe_log(ixgbe, "No memory available to dispatch "
4650 4748 "taskq for PHY check");
4651 4749 }
4652 4750 }
4653 4751 }
4654 4752
4655 4753 /*
4656 4754 * ixgbe_intr_legacy - Interrupt handler for legacy interrupts.
4657 4755 */
4658 4756 static uint_t
4659 4757 ixgbe_intr_legacy(void *arg1, void *arg2)
4660 4758 {
4661 4759 ixgbe_t *ixgbe = (ixgbe_t *)arg1;
4662 4760 struct ixgbe_hw *hw = &ixgbe->hw;
4663 4761 ixgbe_tx_ring_t *tx_ring;
4664 4762 ixgbe_rx_ring_t *rx_ring;
4665 4763 uint32_t eicr;
4666 4764 mblk_t *mp;
4667 4765 boolean_t tx_reschedule;
4668 4766 uint_t result;
4669 4767
4670 4768 _NOTE(ARGUNUSED(arg2));
4671 4769
4672 4770 mutex_enter(&ixgbe->gen_lock);
4673 4771 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
4674 4772 mutex_exit(&ixgbe->gen_lock);
4675 4773 return (DDI_INTR_UNCLAIMED);
4676 4774 }
4677 4775
4678 4776 mp = NULL;
4679 4777 tx_reschedule = B_FALSE;
4680 4778
4681 4779 /*
4682 4780 * Any bit set in eicr: claim this interrupt
4683 4781 */
4684 4782 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4685 4783
4686 4784 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
4687 4785 mutex_exit(&ixgbe->gen_lock);
4688 4786 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
4689 4787 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4690 4788 return (DDI_INTR_CLAIMED);
4691 4789 }
4692 4790
4693 4791 if (eicr) {
4694 4792 /*
4695 4793 * For legacy interrupt, we have only one interrupt,
4696 4794 * so we have only one rx ring and one tx ring enabled.
4697 4795 */
4698 4796 ASSERT(ixgbe->num_rx_rings == 1);
4699 4797 ASSERT(ixgbe->num_tx_rings == 1);
4700 4798
4701 4799 /*
4702 4800 * For legacy interrupt, rx rings[0] will use RTxQ[0].
4703 4801 */
4704 4802 if (eicr & 0x1) {
4705 4803 ixgbe->eimc |= IXGBE_EICR_RTX_QUEUE;
4706 4804 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4707 4805 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
4708 4806 /*
4709 4807 * Clean the rx descriptors
4710 4808 */
4711 4809 rx_ring = &ixgbe->rx_rings[0];
4712 4810 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
4713 4811 }
4714 4812
4715 4813 /*
4716 4814 * For legacy interrupt, tx rings[0] will use RTxQ[1].
4717 4815 */
4718 4816 if (eicr & 0x2) {
4719 4817 /*
4720 4818 * Recycle the tx descriptors
4721 4819 */
4722 4820 tx_ring = &ixgbe->tx_rings[0];
4723 4821 tx_ring->tx_recycle(tx_ring);
4724 4822
4725 4823 /*
4726 4824 * Schedule the re-transmit
4727 4825 */
4728 4826 tx_reschedule = (tx_ring->reschedule &&
4729 4827 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh));
4730 4828 }
4731 4829
4732 4830 /* any interrupt type other than tx/rx */
4733 4831 if (eicr & ixgbe->capab->other_intr) {
4734 4832 switch (hw->mac.type) {
4735 4833 case ixgbe_mac_82598EB:
4736 4834 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4737 4835 break;
4738 4836
4739 4837 case ixgbe_mac_82599EB:
4740 4838 case ixgbe_mac_X540:
4741 4839 case ixgbe_mac_X550:
4742 4840 case ixgbe_mac_X550EM_x:
4743 4841 case ixgbe_mac_X550EM_a:
4744 4842 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4745 4843 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4746 4844 break;
4747 4845
4748 4846 default:
4749 4847 break;
4750 4848 }
4751 4849 ixgbe_intr_other_work(ixgbe, eicr);
4752 4850 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4753 4851 }
4754 4852
4755 4853 mutex_exit(&ixgbe->gen_lock);
4756 4854
4757 4855 result = DDI_INTR_CLAIMED;
4758 4856 } else {
4759 4857 mutex_exit(&ixgbe->gen_lock);
4760 4858
4761 4859 /*
4762 4860 * No interrupt cause bits set: don't claim this interrupt.
4763 4861 */
4764 4862 result = DDI_INTR_UNCLAIMED;
4765 4863 }
4766 4864
4767 4865 /* re-enable the interrupts which were automasked */
4768 4866 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4769 4867
4770 4868 /*
4771 4869 * Do the following work outside of the gen_lock
4772 4870 */
4773 4871 if (mp != NULL) {
4774 4872 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
4775 4873 rx_ring->ring_gen_num);
4776 4874 }
4777 4875
4778 4876 if (tx_reschedule) {
4779 4877 tx_ring->reschedule = B_FALSE;
4780 4878 mac_tx_ring_update(ixgbe->mac_hdl, tx_ring->ring_handle);
4781 4879 tx_ring->stat_reschedule++;
4782 4880 }
4783 4881
4784 4882 return (result);
4785 4883 }
4786 4884
4787 4885 /*
4788 4886 * ixgbe_intr_msi - Interrupt handler for MSI.
4789 4887 */
4790 4888 static uint_t
4791 4889 ixgbe_intr_msi(void *arg1, void *arg2)
4792 4890 {
4793 4891 ixgbe_t *ixgbe = (ixgbe_t *)arg1;
4794 4892 struct ixgbe_hw *hw = &ixgbe->hw;
4795 4893 uint32_t eicr;
4796 4894
4797 4895 _NOTE(ARGUNUSED(arg2));
4798 4896
4799 4897 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4800 4898
4801 4899 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
4802 4900 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
4803 4901 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4804 4902 return (DDI_INTR_CLAIMED);
4805 4903 }
4806 4904
4807 4905 /*
4808 4906 * For MSI interrupt, we have only one vector,
4809 4907 * so we have only one rx ring and one tx ring enabled.
4810 4908 */
4811 4909 ASSERT(ixgbe->num_rx_rings == 1);
4812 4910 ASSERT(ixgbe->num_tx_rings == 1);
4813 4911
4814 4912 /*
4815 4913 * For MSI interrupt, rx rings[0] will use RTxQ[0].
4816 4914 */
4817 4915 if (eicr & 0x1) {
4818 4916 ixgbe_intr_rx_work(&ixgbe->rx_rings[0]);
4819 4917 }
4820 4918
4821 4919 /*
4822 4920 * For MSI interrupt, tx rings[0] will use RTxQ[1].
4823 4921 */
4824 4922 if (eicr & 0x2) {
4825 4923 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]);
4826 4924 }
4827 4925
4828 4926 /* any interrupt type other than tx/rx */
4829 4927 if (eicr & ixgbe->capab->other_intr) {
4830 4928 mutex_enter(&ixgbe->gen_lock);
4831 4929 switch (hw->mac.type) {
4832 4930 case ixgbe_mac_82598EB:
4833 4931 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4834 4932 break;
4835 4933
4836 4934 case ixgbe_mac_82599EB:
4837 4935 case ixgbe_mac_X540:
4838 4936 case ixgbe_mac_X550:
4839 4937 case ixgbe_mac_X550EM_x:
4840 4938 case ixgbe_mac_X550EM_a:
4841 4939 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4842 4940 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4843 4941 break;
4844 4942
4845 4943 default:
4846 4944 break;
4847 4945 }
4848 4946 ixgbe_intr_other_work(ixgbe, eicr);
4849 4947 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4850 4948 mutex_exit(&ixgbe->gen_lock);
4851 4949 }
4852 4950
4853 4951 /* re-enable the interrupts which were automasked */
4854 4952 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4855 4953
4856 4954 return (DDI_INTR_CLAIMED);
4857 4955 }
4858 4956
4859 4957 /*
4860 4958 * ixgbe_intr_msix - Interrupt handler for MSI-X.
4861 4959 */
4862 4960 static uint_t
4863 4961 ixgbe_intr_msix(void *arg1, void *arg2)
4864 4962 {
4865 4963 ixgbe_intr_vector_t *vect = (ixgbe_intr_vector_t *)arg1;
4866 4964 ixgbe_t *ixgbe = vect->ixgbe;
4867 4965 struct ixgbe_hw *hw = &ixgbe->hw;
4868 4966 uint32_t eicr;
4869 4967 int r_idx = 0;
4870 4968
4871 4969 _NOTE(ARGUNUSED(arg2));
4872 4970
4873 4971 /*
4874 4972 * Clean each rx ring that has its bit set in the map
4875 4973 */
4876 4974 r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1));
4877 4975 while (r_idx >= 0) {
4878 4976 ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]);
4879 4977 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
4880 4978 (ixgbe->num_rx_rings - 1));
4881 4979 }
4882 4980
4883 4981 /*
4884 4982 * Clean each tx ring that has its bit set in the map
4885 4983 */
4886 4984 r_idx = bt_getlowbit(vect->tx_map, 0, (ixgbe->num_tx_rings - 1));
4887 4985 while (r_idx >= 0) {
4888 4986 ixgbe_intr_tx_work(&ixgbe->tx_rings[r_idx]);
4889 4987 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
4890 4988 (ixgbe->num_tx_rings - 1));
4891 4989 }
4892 4990
4893 4991
4894 4992 /*
4895 4993 * Clean other interrupt (link change) that has its bit set in the map
4896 4994 */
4897 4995 if (BT_TEST(vect->other_map, 0) == 1) {
4898 4996 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4899 4997
4900 4998 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) !=
4901 4999 DDI_FM_OK) {
4902 5000 ddi_fm_service_impact(ixgbe->dip,
4903 5001 DDI_SERVICE_DEGRADED);
4904 5002 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4905 5003 return (DDI_INTR_CLAIMED);
4906 5004 }
4907 5005
4908 5006 /*
4909 5007 * Check "other" cause bits: any interrupt type other than tx/rx
4910 5008 */
4911 5009 if (eicr & ixgbe->capab->other_intr) {
4912 5010 mutex_enter(&ixgbe->gen_lock);
4913 5011 switch (hw->mac.type) {
4914 5012 case ixgbe_mac_82598EB:
4915 5013 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4916 5014 ixgbe_intr_other_work(ixgbe, eicr);
4917 5015 break;
4918 5016
4919 5017 case ixgbe_mac_82599EB:
4920 5018 case ixgbe_mac_X540:
4921 5019 case ixgbe_mac_X550:
4922 5020 case ixgbe_mac_X550EM_x:
4923 5021 case ixgbe_mac_X550EM_a:
4924 5022 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
4925 5023 ixgbe_intr_other_work(ixgbe, eicr);
4926 5024 break;
4927 5025
4928 5026 default:
4929 5027 break;
4930 5028 }
4931 5029 mutex_exit(&ixgbe->gen_lock);
4932 5030 }
4933 5031
4934 5032 /* re-enable the interrupts which were automasked */
4935 5033 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4936 5034 }
4937 5035
4938 5036 return (DDI_INTR_CLAIMED);
4939 5037 }
4940 5038
4941 5039 /*
4942 5040 * ixgbe_alloc_intrs - Allocate interrupts for the driver.
4943 5041 *
4944 5042 * Normal sequence is to try MSI-X; if not sucessful, try MSI;
4945 5043 * if not successful, try Legacy.
4946 5044 * ixgbe->intr_force can be used to force sequence to start with
4947 5045 * any of the 3 types.
4948 5046 * If MSI-X is not used, number of tx/rx rings is forced to 1.
4949 5047 */
4950 5048 static int
4951 5049 ixgbe_alloc_intrs(ixgbe_t *ixgbe)
4952 5050 {
4953 5051 dev_info_t *devinfo;
4954 5052 int intr_types;
4955 5053 int rc;
4956 5054
4957 5055 devinfo = ixgbe->dip;
4958 5056
4959 5057 /*
4960 5058 * Get supported interrupt types
4961 5059 */
4962 5060 rc = ddi_intr_get_supported_types(devinfo, &intr_types);
4963 5061
4964 5062 if (rc != DDI_SUCCESS) {
4965 5063 ixgbe_log(ixgbe,
4966 5064 "Get supported interrupt types failed: %d", rc);
4967 5065 return (IXGBE_FAILURE);
4968 5066 }
4969 5067 IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types);
4970 5068
4971 5069 ixgbe->intr_type = 0;
4972 5070
4973 5071 /*
4974 5072 * Install MSI-X interrupts
4975 5073 */
4976 5074 if ((intr_types & DDI_INTR_TYPE_MSIX) &&
4977 5075 (ixgbe->intr_force <= IXGBE_INTR_MSIX)) {
4978 5076 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX);
4979 5077 if (rc == IXGBE_SUCCESS)
4980 5078 return (IXGBE_SUCCESS);
4981 5079
4982 5080 ixgbe_log(ixgbe,
4983 5081 "Allocate MSI-X failed, trying MSI interrupts...");
4984 5082 }
4985 5083
4986 5084 /*
4987 5085 * MSI-X not used, force rings and groups to 1
4988 5086 */
4989 5087 ixgbe->num_rx_rings = 1;
4990 5088 ixgbe->num_rx_groups = 1;
4991 5089 ixgbe->num_tx_rings = 1;
4992 5090 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
4993 5091 ixgbe_log(ixgbe,
4994 5092 "MSI-X not used, force rings and groups number to 1");
4995 5093
4996 5094 /*
4997 5095 * Install MSI interrupts
4998 5096 */
4999 5097 if ((intr_types & DDI_INTR_TYPE_MSI) &&
5000 5098 (ixgbe->intr_force <= IXGBE_INTR_MSI)) {
5001 5099 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI);
5002 5100 if (rc == IXGBE_SUCCESS)
5003 5101 return (IXGBE_SUCCESS);
5004 5102
5005 5103 ixgbe_log(ixgbe,
5006 5104 "Allocate MSI failed, trying Legacy interrupts...");
5007 5105 }
5008 5106
5009 5107 /*
5010 5108 * Install legacy interrupts
5011 5109 */
5012 5110 if (intr_types & DDI_INTR_TYPE_FIXED) {
5013 5111 /*
5014 5112 * Disallow legacy interrupts for X550. X550 has a silicon
5015 5113 * bug which prevents Shared Legacy interrupts from working.
5016 5114 * For details, please reference:
5017 5115 *
5018 5116 * Intel Ethernet Controller X550 Specification Update rev. 2.1
5019 5117 * May 2016, erratum 22: PCIe Interrupt Status Bit
5020 5118 */
5021 5119 if (ixgbe->hw.mac.type == ixgbe_mac_X550 ||
5022 5120 ixgbe->hw.mac.type == ixgbe_mac_X550EM_x ||
5023 5121 ixgbe->hw.mac.type == ixgbe_mac_X550EM_a ||
5024 5122 ixgbe->hw.mac.type == ixgbe_mac_X550_vf ||
5025 5123 ixgbe->hw.mac.type == ixgbe_mac_X550EM_x_vf ||
5026 5124 ixgbe->hw.mac.type == ixgbe_mac_X550EM_a_vf) {
5027 5125 ixgbe_log(ixgbe,
5028 5126 "Legacy interrupts are not supported on this "
5029 5127 "adapter. Please use MSI or MSI-X instead.");
5030 5128 return (IXGBE_FAILURE);
5031 5129 }
5032 5130 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED);
5033 5131 if (rc == IXGBE_SUCCESS)
5034 5132 return (IXGBE_SUCCESS);
5035 5133
5036 5134 ixgbe_log(ixgbe,
5037 5135 "Allocate Legacy interrupts failed");
5038 5136 }
5039 5137
5040 5138 /*
5041 5139 * If none of the 3 types succeeded, return failure
5042 5140 */
5043 5141 return (IXGBE_FAILURE);
5044 5142 }
5045 5143
5046 5144 /*
5047 5145 * ixgbe_alloc_intr_handles - Allocate interrupt handles.
5048 5146 *
5049 5147 * For legacy and MSI, only 1 handle is needed. For MSI-X,
5050 5148 * if fewer than 2 handles are available, return failure.
5051 5149 * Upon success, this maps the vectors to rx and tx rings for
5052 5150 * interrupts.
5053 5151 */
5054 5152 static int
5055 5153 ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type)
5056 5154 {
5057 5155 dev_info_t *devinfo;
5058 5156 int request, count, actual;
5059 5157 int minimum;
5060 5158 int rc;
5061 5159 uint32_t ring_per_group;
5062 5160
5063 5161 devinfo = ixgbe->dip;
5064 5162
5065 5163 switch (intr_type) {
5066 5164 case DDI_INTR_TYPE_FIXED:
5067 5165 request = 1; /* Request 1 legacy interrupt handle */
5068 5166 minimum = 1;
5069 5167 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy");
5070 5168 break;
5071 5169
5072 5170 case DDI_INTR_TYPE_MSI:
5073 5171 request = 1; /* Request 1 MSI interrupt handle */
5074 5172 minimum = 1;
5075 5173 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI");
5076 5174 break;
5077 5175
5078 5176 case DDI_INTR_TYPE_MSIX:
5079 5177 /*
5080 5178 * Best number of vectors for the adapter is
5081 5179 * (# rx rings + # tx rings), however we will
5082 5180 * limit the request number.
5083 5181 */
5084 5182 request = min(16, ixgbe->num_rx_rings + ixgbe->num_tx_rings);
5085 5183 if (request > ixgbe->capab->max_ring_vect)
5086 5184 request = ixgbe->capab->max_ring_vect;
5087 5185 minimum = 1;
5088 5186 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X");
5089 5187 break;
5090 5188
5091 5189 default:
5092 5190 ixgbe_log(ixgbe,
5093 5191 "invalid call to ixgbe_alloc_intr_handles(): %d\n",
5094 5192 intr_type);
5095 5193 return (IXGBE_FAILURE);
5096 5194 }
5097 5195 IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d minimum: %d",
5098 5196 request, minimum);
5099 5197
5100 5198 /*
5101 5199 * Get number of supported interrupts
5102 5200 */
5103 5201 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
5104 5202 if ((rc != DDI_SUCCESS) || (count < minimum)) {
5105 5203 ixgbe_log(ixgbe,
5106 5204 "Get interrupt number failed. Return: %d, count: %d",
5107 5205 rc, count);
5108 5206 return (IXGBE_FAILURE);
5109 5207 }
5110 5208 IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count);
5111 5209
5112 5210 actual = 0;
5113 5211 ixgbe->intr_cnt = 0;
5114 5212 ixgbe->intr_cnt_max = 0;
5115 5213 ixgbe->intr_cnt_min = 0;
5116 5214
5117 5215 /*
5118 5216 * Allocate an array of interrupt handles
5119 5217 */
5120 5218 ixgbe->intr_size = request * sizeof (ddi_intr_handle_t);
5121 5219 ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP);
5122 5220
5123 5221 rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0,
5124 5222 request, &actual, DDI_INTR_ALLOC_NORMAL);
5125 5223 if (rc != DDI_SUCCESS) {
5126 5224 ixgbe_log(ixgbe, "Allocate interrupts failed. "
5127 5225 "return: %d, request: %d, actual: %d",
5128 5226 rc, request, actual);
5129 5227 goto alloc_handle_fail;
5130 5228 }
5131 5229 IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual);
5132 5230
5133 5231 /*
5134 5232 * upper/lower limit of interrupts
5135 5233 */
5136 5234 ixgbe->intr_cnt = actual;
5137 5235 ixgbe->intr_cnt_max = request;
5138 5236 ixgbe->intr_cnt_min = minimum;
5139 5237
5140 5238 /*
5141 5239 * rss number per group should not exceed the rx interrupt number,
5142 5240 * else need to adjust rx ring number.
5143 5241 */
5144 5242 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
5145 5243 ASSERT((ixgbe->num_rx_rings % ixgbe->num_rx_groups) == 0);
5146 5244 if (actual < ring_per_group) {
5147 5245 ixgbe->num_rx_rings = ixgbe->num_rx_groups * actual;
5148 5246 ixgbe_setup_vmdq_rss_conf(ixgbe);
5149 5247 }
5150 5248
5151 5249 /*
5152 5250 * Now we know the actual number of vectors. Here we map the vector
5153 5251 * to other, rx rings and tx ring.
5154 5252 */
5155 5253 if (actual < minimum) {
5156 5254 ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d",
5157 5255 actual);
5158 5256 goto alloc_handle_fail;
5159 5257 }
5160 5258
5161 5259 /*
5162 5260 * Get priority for first vector, assume remaining are all the same
5163 5261 */
5164 5262 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri);
5165 5263 if (rc != DDI_SUCCESS) {
5166 5264 ixgbe_log(ixgbe,
5167 5265 "Get interrupt priority failed: %d", rc);
5168 5266 goto alloc_handle_fail;
5169 5267 }
5170 5268
5171 5269 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap);
5172 5270 if (rc != DDI_SUCCESS) {
5173 5271 ixgbe_log(ixgbe,
5174 5272 "Get interrupt cap failed: %d", rc);
5175 5273 goto alloc_handle_fail;
5176 5274 }
5177 5275
5178 5276 ixgbe->intr_type = intr_type;
5179 5277
5180 5278 return (IXGBE_SUCCESS);
5181 5279
5182 5280 alloc_handle_fail:
5183 5281 ixgbe_rem_intrs(ixgbe);
5184 5282
5185 5283 return (IXGBE_FAILURE);
5186 5284 }
5187 5285
5188 5286 /*
5189 5287 * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type.
5190 5288 *
5191 5289 * Before adding the interrupt handlers, the interrupt vectors have
5192 5290 * been allocated, and the rx/tx rings have also been allocated.
5193 5291 */
5194 5292 static int
5195 5293 ixgbe_add_intr_handlers(ixgbe_t *ixgbe)
5196 5294 {
5197 5295 int vector = 0;
5198 5296 int rc;
5199 5297
5200 5298 switch (ixgbe->intr_type) {
5201 5299 case DDI_INTR_TYPE_MSIX:
5202 5300 /*
5203 5301 * Add interrupt handler for all vectors
5204 5302 */
5205 5303 for (vector = 0; vector < ixgbe->intr_cnt; vector++) {
5206 5304 /*
5207 5305 * install pointer to vect_map[vector]
5208 5306 */
5209 5307 rc = ddi_intr_add_handler(ixgbe->htable[vector],
5210 5308 (ddi_intr_handler_t *)ixgbe_intr_msix,
5211 5309 (void *)&ixgbe->vect_map[vector], NULL);
5212 5310
5213 5311 if (rc != DDI_SUCCESS) {
5214 5312 ixgbe_log(ixgbe,
5215 5313 "Add interrupt handler failed. "
5216 5314 "return: %d, vector: %d", rc, vector);
5217 5315 for (vector--; vector >= 0; vector--) {
5218 5316 (void) ddi_intr_remove_handler(
5219 5317 ixgbe->htable[vector]);
5220 5318 }
5221 5319 return (IXGBE_FAILURE);
5222 5320 }
5223 5321 }
5224 5322
5225 5323 break;
5226 5324
5227 5325 case DDI_INTR_TYPE_MSI:
5228 5326 /*
5229 5327 * Add interrupt handlers for the only vector
5230 5328 */
5231 5329 rc = ddi_intr_add_handler(ixgbe->htable[vector],
5232 5330 (ddi_intr_handler_t *)ixgbe_intr_msi,
5233 5331 (void *)ixgbe, NULL);
5234 5332
5235 5333 if (rc != DDI_SUCCESS) {
5236 5334 ixgbe_log(ixgbe,
5237 5335 "Add MSI interrupt handler failed: %d", rc);
5238 5336 return (IXGBE_FAILURE);
5239 5337 }
5240 5338
5241 5339 break;
5242 5340
5243 5341 case DDI_INTR_TYPE_FIXED:
5244 5342 /*
5245 5343 * Add interrupt handlers for the only vector
5246 5344 */
5247 5345 rc = ddi_intr_add_handler(ixgbe->htable[vector],
5248 5346 (ddi_intr_handler_t *)ixgbe_intr_legacy,
5249 5347 (void *)ixgbe, NULL);
5250 5348
5251 5349 if (rc != DDI_SUCCESS) {
5252 5350 ixgbe_log(ixgbe,
5253 5351 "Add legacy interrupt handler failed: %d", rc);
5254 5352 return (IXGBE_FAILURE);
5255 5353 }
5256 5354
5257 5355 break;
5258 5356
5259 5357 default:
5260 5358 return (IXGBE_FAILURE);
5261 5359 }
5262 5360
5263 5361 return (IXGBE_SUCCESS);
5264 5362 }
5265 5363
5266 5364 #pragma inline(ixgbe_map_rxring_to_vector)
5267 5365 /*
5268 5366 * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector.
5269 5367 */
5270 5368 static void
5271 5369 ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx)
5272 5370 {
5273 5371 /*
5274 5372 * Set bit in map
5275 5373 */
5276 5374 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
5277 5375
5278 5376 /*
5279 5377 * Count bits set
5280 5378 */
5281 5379 ixgbe->vect_map[v_idx].rxr_cnt++;
5282 5380
5283 5381 /*
5284 5382 * Remember bit position
5285 5383 */
5286 5384 ixgbe->rx_rings[r_idx].intr_vector = v_idx;
5287 5385 ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx;
5288 5386 }
5289 5387
5290 5388 #pragma inline(ixgbe_map_txring_to_vector)
5291 5389 /*
5292 5390 * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector.
5293 5391 */
5294 5392 static void
5295 5393 ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx)
5296 5394 {
5297 5395 /*
5298 5396 * Set bit in map
5299 5397 */
5300 5398 BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx);
5301 5399
5302 5400 /*
5303 5401 * Count bits set
5304 5402 */
5305 5403 ixgbe->vect_map[v_idx].txr_cnt++;
5306 5404
5307 5405 /*
5308 5406 * Remember bit position
5309 5407 */
5310 5408 ixgbe->tx_rings[t_idx].intr_vector = v_idx;
5311 5409 ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx;
5312 5410 }
5313 5411
5314 5412 /*
5315 5413 * ixgbe_setup_ivar - Set the given entry in the given interrupt vector
5316 5414 * allocation register (IVAR).
5317 5415 * cause:
5318 5416 * -1 : other cause
5319 5417 * 0 : rx
5320 5418 * 1 : tx
5321 5419 */
5322 5420 static void
5323 5421 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector,
5324 5422 int8_t cause)
5325 5423 {
5326 5424 struct ixgbe_hw *hw = &ixgbe->hw;
5327 5425 u32 ivar, index;
5328 5426
5329 5427 switch (hw->mac.type) {
5330 5428 case ixgbe_mac_82598EB:
5331 5429 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5332 5430 if (cause == -1) {
5333 5431 cause = 0;
5334 5432 }
5335 5433 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
5336 5434 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
5337 5435 ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3)));
5338 5436 ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3)));
5339 5437 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
5340 5438 break;
5341 5439
5342 5440 case ixgbe_mac_82599EB:
5343 5441 case ixgbe_mac_X540:
5344 5442 case ixgbe_mac_X550:
5345 5443 case ixgbe_mac_X550EM_x:
5346 5444 case ixgbe_mac_X550EM_a:
5347 5445 if (cause == -1) {
5348 5446 /* other causes */
5349 5447 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5350 5448 index = (intr_alloc_entry & 1) * 8;
5351 5449 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
5352 5450 ivar &= ~(0xFF << index);
5353 5451 ivar |= (msix_vector << index);
5354 5452 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
5355 5453 } else {
5356 5454 /* tx or rx causes */
5357 5455 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5358 5456 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
5359 5457 ivar = IXGBE_READ_REG(hw,
5360 5458 IXGBE_IVAR(intr_alloc_entry >> 1));
5361 5459 ivar &= ~(0xFF << index);
5362 5460 ivar |= (msix_vector << index);
5363 5461 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
5364 5462 ivar);
5365 5463 }
5366 5464 break;
5367 5465
5368 5466 default:
5369 5467 break;
5370 5468 }
5371 5469 }
5372 5470
5373 5471 /*
5374 5472 * ixgbe_enable_ivar - Enable the given entry by setting the VAL bit of
5375 5473 * given interrupt vector allocation register (IVAR).
5376 5474 * cause:
5377 5475 * -1 : other cause
5378 5476 * 0 : rx
5379 5477 * 1 : tx
5380 5478 */
5381 5479 static void
5382 5480 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
5383 5481 {
5384 5482 struct ixgbe_hw *hw = &ixgbe->hw;
5385 5483 u32 ivar, index;
5386 5484
5387 5485 switch (hw->mac.type) {
5388 5486 case ixgbe_mac_82598EB:
5389 5487 if (cause == -1) {
5390 5488 cause = 0;
5391 5489 }
5392 5490 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
5393 5491 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
5394 5492 ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 *
5395 5493 (intr_alloc_entry & 0x3)));
5396 5494 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
5397 5495 break;
5398 5496
5399 5497 case ixgbe_mac_82599EB:
5400 5498 case ixgbe_mac_X540:
5401 5499 case ixgbe_mac_X550:
5402 5500 case ixgbe_mac_X550EM_x:
5403 5501 case ixgbe_mac_X550EM_a:
5404 5502 if (cause == -1) {
5405 5503 /* other causes */
5406 5504 index = (intr_alloc_entry & 1) * 8;
5407 5505 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
5408 5506 ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
5409 5507 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
5410 5508 } else {
5411 5509 /* tx or rx causes */
5412 5510 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
5413 5511 ivar = IXGBE_READ_REG(hw,
5414 5512 IXGBE_IVAR(intr_alloc_entry >> 1));
5415 5513 ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
5416 5514 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
5417 5515 ivar);
5418 5516 }
5419 5517 break;
5420 5518
5421 5519 default:
5422 5520 break;
5423 5521 }
5424 5522 }
5425 5523
5426 5524 /*
5427 5525 * ixgbe_disable_ivar - Disble the given entry by clearing the VAL bit of
5428 5526 * given interrupt vector allocation register (IVAR).
5429 5527 * cause:
5430 5528 * -1 : other cause
5431 5529 * 0 : rx
5432 5530 * 1 : tx
5433 5531 */
5434 5532 static void
5435 5533 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
5436 5534 {
5437 5535 struct ixgbe_hw *hw = &ixgbe->hw;
5438 5536 u32 ivar, index;
5439 5537
5440 5538 switch (hw->mac.type) {
5441 5539 case ixgbe_mac_82598EB:
5442 5540 if (cause == -1) {
5443 5541 cause = 0;
5444 5542 }
5445 5543 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
5446 5544 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
5447 5545 ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 *
5448 5546 (intr_alloc_entry & 0x3)));
5449 5547 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
5450 5548 break;
5451 5549
5452 5550 case ixgbe_mac_82599EB:
5453 5551 case ixgbe_mac_X540:
5454 5552 case ixgbe_mac_X550:
5455 5553 case ixgbe_mac_X550EM_x:
5456 5554 case ixgbe_mac_X550EM_a:
5457 5555 if (cause == -1) {
5458 5556 /* other causes */
5459 5557 index = (intr_alloc_entry & 1) * 8;
5460 5558 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
5461 5559 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
5462 5560 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
5463 5561 } else {
5464 5562 /* tx or rx causes */
5465 5563 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
5466 5564 ivar = IXGBE_READ_REG(hw,
5467 5565 IXGBE_IVAR(intr_alloc_entry >> 1));
5468 5566 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
5469 5567 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
5470 5568 ivar);
5471 5569 }
5472 5570 break;
5473 5571
5474 5572 default:
5475 5573 break;
5476 5574 }
5477 5575 }
5478 5576
5479 5577 /*
5480 5578 * Convert the rx ring index driver maintained to the rx ring index
5481 5579 * in h/w.
5482 5580 */
5483 5581 static uint32_t
5484 5582 ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index)
5485 5583 {
5486 5584
5487 5585 struct ixgbe_hw *hw = &ixgbe->hw;
5488 5586 uint32_t rx_ring_per_group, hw_rx_index;
5489 5587
5490 5588 if (ixgbe->classify_mode == IXGBE_CLASSIFY_RSS ||
5491 5589 ixgbe->classify_mode == IXGBE_CLASSIFY_NONE) {
5492 5590 return (sw_rx_index);
5493 5591 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ) {
5494 5592 switch (hw->mac.type) {
5495 5593 case ixgbe_mac_82598EB:
5496 5594 return (sw_rx_index);
5497 5595
5498 5596 case ixgbe_mac_82599EB:
5499 5597 case ixgbe_mac_X540:
5500 5598 case ixgbe_mac_X550:
5501 5599 case ixgbe_mac_X550EM_x:
5502 5600 case ixgbe_mac_X550EM_a:
5503 5601 return (sw_rx_index * 2);
5504 5602
5505 5603 default:
5506 5604 break;
5507 5605 }
5508 5606 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) {
5509 5607 rx_ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
5510 5608
5511 5609 switch (hw->mac.type) {
5512 5610 case ixgbe_mac_82598EB:
5513 5611 hw_rx_index = (sw_rx_index / rx_ring_per_group) *
5514 5612 16 + (sw_rx_index % rx_ring_per_group);
5515 5613 return (hw_rx_index);
5516 5614
5517 5615 case ixgbe_mac_82599EB:
5518 5616 case ixgbe_mac_X540:
5519 5617 case ixgbe_mac_X550:
5520 5618 case ixgbe_mac_X550EM_x:
5521 5619 case ixgbe_mac_X550EM_a:
5522 5620 if (ixgbe->num_rx_groups > 32) {
5523 5621 hw_rx_index = (sw_rx_index /
5524 5622 rx_ring_per_group) * 2 +
5525 5623 (sw_rx_index % rx_ring_per_group);
5526 5624 } else {
5527 5625 hw_rx_index = (sw_rx_index /
5528 5626 rx_ring_per_group) * 4 +
5529 5627 (sw_rx_index % rx_ring_per_group);
5530 5628 }
5531 5629 return (hw_rx_index);
5532 5630
5533 5631 default:
5534 5632 break;
5535 5633 }
5536 5634 }
5537 5635
5538 5636 /*
5539 5637 * Should never reach. Just to make compiler happy.
5540 5638 */
5541 5639 return (sw_rx_index);
5542 5640 }
5543 5641
5544 5642 /*
5545 5643 * ixgbe_map_intrs_to_vectors - Map different interrupts to MSI-X vectors.
5546 5644 *
5547 5645 * For MSI-X, here will map rx interrupt, tx interrupt and other interrupt
5548 5646 * to vector[0 - (intr_cnt -1)].
5549 5647 */
5550 5648 static int
5551 5649 ixgbe_map_intrs_to_vectors(ixgbe_t *ixgbe)
5552 5650 {
5553 5651 int i, vector = 0;
5554 5652
5555 5653 /* initialize vector map */
5556 5654 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map));
5557 5655 for (i = 0; i < ixgbe->intr_cnt; i++) {
5558 5656 ixgbe->vect_map[i].ixgbe = ixgbe;
5559 5657 }
5560 5658
5561 5659 /*
5562 5660 * non-MSI-X case is very simple: rx rings[0] on RTxQ[0],
5563 5661 * tx rings[0] on RTxQ[1].
5564 5662 */
5565 5663 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
5566 5664 ixgbe_map_rxring_to_vector(ixgbe, 0, 0);
5567 5665 ixgbe_map_txring_to_vector(ixgbe, 0, 1);
5568 5666 return (IXGBE_SUCCESS);
5569 5667 }
5570 5668
5571 5669 /*
5572 5670 * Interrupts/vectors mapping for MSI-X
5573 5671 */
5574 5672
5575 5673 /*
5576 5674 * Map other interrupt to vector 0,
5577 5675 * Set bit in map and count the bits set.
5578 5676 */
5579 5677 BT_SET(ixgbe->vect_map[vector].other_map, 0);
5580 5678 ixgbe->vect_map[vector].other_cnt++;
5581 5679
5582 5680 /*
5583 5681 * Map rx ring interrupts to vectors
5584 5682 */
5585 5683 for (i = 0; i < ixgbe->num_rx_rings; i++) {
5586 5684 ixgbe_map_rxring_to_vector(ixgbe, i, vector);
5587 5685 vector = (vector +1) % ixgbe->intr_cnt;
5588 5686 }
5589 5687
5590 5688 /*
5591 5689 * Map tx ring interrupts to vectors
5592 5690 */
5593 5691 for (i = 0; i < ixgbe->num_tx_rings; i++) {
5594 5692 ixgbe_map_txring_to_vector(ixgbe, i, vector);
5595 5693 vector = (vector +1) % ixgbe->intr_cnt;
5596 5694 }
5597 5695
5598 5696 return (IXGBE_SUCCESS);
5599 5697 }
5600 5698
5601 5699 /*
5602 5700 * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s).
5603 5701 *
5604 5702 * This relies on ring/vector mapping already set up in the
5605 5703 * vect_map[] structures
5606 5704 */
5607 5705 static void
5608 5706 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe)
5609 5707 {
5610 5708 struct ixgbe_hw *hw = &ixgbe->hw;
5611 5709 ixgbe_intr_vector_t *vect; /* vector bitmap */
5612 5710 int r_idx; /* ring index */
5613 5711 int v_idx; /* vector index */
5614 5712 uint32_t hw_index;
5615 5713
5616 5714 /*
5617 5715 * Clear any previous entries
5618 5716 */
5619 5717 switch (hw->mac.type) {
5620 5718 case ixgbe_mac_82598EB:
5621 5719 for (v_idx = 0; v_idx < 25; v_idx++)
5622 5720 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
5623 5721 break;
5624 5722
5625 5723 case ixgbe_mac_82599EB:
5626 5724 case ixgbe_mac_X540:
5627 5725 case ixgbe_mac_X550:
5628 5726 case ixgbe_mac_X550EM_x:
5629 5727 case ixgbe_mac_X550EM_a:
5630 5728 for (v_idx = 0; v_idx < 64; v_idx++)
5631 5729 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
5632 5730 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0);
5633 5731 break;
5634 5732
5635 5733 default:
5636 5734 break;
5637 5735 }
5638 5736
5639 5737 /*
5640 5738 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and
5641 5739 * tx rings[0] will use RTxQ[1].
5642 5740 */
5643 5741 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
5644 5742 ixgbe_setup_ivar(ixgbe, 0, 0, 0);
5645 5743 ixgbe_setup_ivar(ixgbe, 0, 1, 1);
5646 5744 return;
5647 5745 }
5648 5746
5649 5747 /*
5650 5748 * For MSI-X interrupt, "Other" is always on vector[0].
5651 5749 */
5652 5750 ixgbe_setup_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0, -1);
5653 5751
5654 5752 /*
5655 5753 * For each interrupt vector, populate the IVAR table
5656 5754 */
5657 5755 for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) {
5658 5756 vect = &ixgbe->vect_map[v_idx];
5659 5757
5660 5758 /*
5661 5759 * For each rx ring bit set
5662 5760 */
5663 5761 r_idx = bt_getlowbit(vect->rx_map, 0,
5664 5762 (ixgbe->num_rx_rings - 1));
5665 5763
5666 5764 while (r_idx >= 0) {
5667 5765 hw_index = ixgbe->rx_rings[r_idx].hw_index;
5668 5766 ixgbe_setup_ivar(ixgbe, hw_index, v_idx, 0);
5669 5767 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
5670 5768 (ixgbe->num_rx_rings - 1));
5671 5769 }
5672 5770
5673 5771 /*
5674 5772 * For each tx ring bit set
5675 5773 */
5676 5774 r_idx = bt_getlowbit(vect->tx_map, 0,
5677 5775 (ixgbe->num_tx_rings - 1));
5678 5776
5679 5777 while (r_idx >= 0) {
5680 5778 ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 1);
5681 5779 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
5682 5780 (ixgbe->num_tx_rings - 1));
5683 5781 }
5684 5782 }
5685 5783 }
5686 5784
5687 5785 /*
5688 5786 * ixgbe_rem_intr_handlers - Remove the interrupt handlers.
5689 5787 */
5690 5788 static void
5691 5789 ixgbe_rem_intr_handlers(ixgbe_t *ixgbe)
5692 5790 {
5693 5791 int i;
5694 5792 int rc;
5695 5793
5696 5794 for (i = 0; i < ixgbe->intr_cnt; i++) {
5697 5795 rc = ddi_intr_remove_handler(ixgbe->htable[i]);
5698 5796 if (rc != DDI_SUCCESS) {
5699 5797 IXGBE_DEBUGLOG_1(ixgbe,
5700 5798 "Remove intr handler failed: %d", rc);
5701 5799 }
5702 5800 }
5703 5801 }
5704 5802
5705 5803 /*
5706 5804 * ixgbe_rem_intrs - Remove the allocated interrupts.
5707 5805 */
5708 5806 static void
5709 5807 ixgbe_rem_intrs(ixgbe_t *ixgbe)
5710 5808 {
5711 5809 int i;
5712 5810 int rc;
5713 5811
5714 5812 for (i = 0; i < ixgbe->intr_cnt; i++) {
5715 5813 rc = ddi_intr_free(ixgbe->htable[i]);
5716 5814 if (rc != DDI_SUCCESS) {
5717 5815 IXGBE_DEBUGLOG_1(ixgbe,
5718 5816 "Free intr failed: %d", rc);
5719 5817 }
5720 5818 }
5721 5819
5722 5820 kmem_free(ixgbe->htable, ixgbe->intr_size);
5723 5821 ixgbe->htable = NULL;
5724 5822 }
5725 5823
5726 5824 /*
5727 5825 * ixgbe_enable_intrs - Enable all the ddi interrupts.
5728 5826 */
5729 5827 static int
5730 5828 ixgbe_enable_intrs(ixgbe_t *ixgbe)
5731 5829 {
5732 5830 int i;
5733 5831 int rc;
5734 5832
5735 5833 /*
5736 5834 * Enable interrupts
5737 5835 */
5738 5836 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
5739 5837 /*
5740 5838 * Call ddi_intr_block_enable() for MSI
5741 5839 */
5742 5840 rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt);
5743 5841 if (rc != DDI_SUCCESS) {
5744 5842 ixgbe_log(ixgbe,
5745 5843 "Enable block intr failed: %d", rc);
5746 5844 return (IXGBE_FAILURE);
5747 5845 }
5748 5846 } else {
5749 5847 /*
5750 5848 * Call ddi_intr_enable() for Legacy/MSI non block enable
5751 5849 */
5752 5850 for (i = 0; i < ixgbe->intr_cnt; i++) {
5753 5851 rc = ddi_intr_enable(ixgbe->htable[i]);
5754 5852 if (rc != DDI_SUCCESS) {
5755 5853 ixgbe_log(ixgbe,
5756 5854 "Enable intr failed: %d", rc);
5757 5855 return (IXGBE_FAILURE);
5758 5856 }
5759 5857 }
5760 5858 }
5761 5859
5762 5860 return (IXGBE_SUCCESS);
5763 5861 }
5764 5862
5765 5863 /*
5766 5864 * ixgbe_disable_intrs - Disable all the interrupts.
5767 5865 */
5768 5866 static int
5769 5867 ixgbe_disable_intrs(ixgbe_t *ixgbe)
5770 5868 {
5771 5869 int i;
5772 5870 int rc;
5773 5871
5774 5872 /*
5775 5873 * Disable all interrupts
5776 5874 */
5777 5875 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
5778 5876 rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt);
5779 5877 if (rc != DDI_SUCCESS) {
5780 5878 ixgbe_log(ixgbe,
5781 5879 "Disable block intr failed: %d", rc);
5782 5880 return (IXGBE_FAILURE);
5783 5881 }
5784 5882 } else {
5785 5883 for (i = 0; i < ixgbe->intr_cnt; i++) {
5786 5884 rc = ddi_intr_disable(ixgbe->htable[i]);
5787 5885 if (rc != DDI_SUCCESS) {
5788 5886 ixgbe_log(ixgbe,
5789 5887 "Disable intr failed: %d", rc);
5790 5888 return (IXGBE_FAILURE);
5791 5889 }
5792 5890 }
5793 5891 }
5794 5892
5795 5893 return (IXGBE_SUCCESS);
5796 5894 }
5797 5895
5798 5896 /*
5799 5897 * ixgbe_get_hw_state - Get and save parameters related to adapter hardware.
5800 5898 */
5801 5899 static void
5802 5900 ixgbe_get_hw_state(ixgbe_t *ixgbe)
5803 5901 {
5804 5902 struct ixgbe_hw *hw = &ixgbe->hw;
5805 5903 ixgbe_link_speed speed = 0;
5806 5904 boolean_t link_up = B_FALSE;
5807 5905 uint32_t pcs1g_anlp = 0;
5808 5906
5809 5907 ASSERT(mutex_owned(&ixgbe->gen_lock));
5810 5908 ixgbe->param_lp_1000fdx_cap = 0;
5811 5909 ixgbe->param_lp_100fdx_cap = 0;
5812 5910
5813 5911 /* check for link, don't wait */
5814 5912 (void) ixgbe_check_link(hw, &speed, &link_up, B_FALSE);
5815 5913
5816 5914 /*
5817 5915 * Update the observed Link Partner's capabilities. Not all adapters
5818 5916 * can provide full information on the LP's capable speeds, so we
5819 5917 * provide what we can.
5820 5918 */
5821 5919 if (link_up) {
5822 5920 pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
5823 5921
5824 5922 ixgbe->param_lp_1000fdx_cap =
5825 5923 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
5826 5924 ixgbe->param_lp_100fdx_cap =
5827 5925 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
5828 5926 }
5829 5927
5830 5928 /*
5831 5929 * Update GLD's notion of the adapter's currently advertised speeds.
5832 5930 * Since the common code doesn't always record the current autonegotiate
5833 5931 * settings in the phy struct for all parts (specifically, adapters with
5834 5932 * SFPs) we first test to see if it is 0, and if so, we fall back to
5835 5933 * using the adapter's speed capabilities which we saved during instance
5836 5934 * init in ixgbe_init_params().
5837 5935 *
5838 5936 * Adapters with SFPs will always be shown as advertising all of their
5839 5937 * supported speeds, and adapters with baseT PHYs (where the phy struct
5840 5938 * is maintained by the common code) will always have a factual view of
5841 5939 * their currently-advertised speeds. In the case of SFPs, this is
5842 5940 * acceptable as we default to advertising all speeds that the adapter
5843 5941 * claims to support, and those properties are immutable; unlike on
5844 5942 * baseT (copper) PHYs, where speeds can be enabled or disabled at will.
5845 5943 */
5846 5944 speed = hw->phy.autoneg_advertised;
5847 5945 if (speed == 0)
5848 5946 speed = ixgbe->speeds_supported;
5849 5947
5850 5948 ixgbe->param_adv_10000fdx_cap =
5851 5949 (speed & IXGBE_LINK_SPEED_10GB_FULL) ? 1 : 0;
5852 5950 ixgbe->param_adv_5000fdx_cap =
5853 5951 (speed & IXGBE_LINK_SPEED_5GB_FULL) ? 1 : 0;
5854 5952 ixgbe->param_adv_2500fdx_cap =
5855 5953 (speed & IXGBE_LINK_SPEED_2_5GB_FULL) ? 1 : 0;
5856 5954 ixgbe->param_adv_1000fdx_cap =
5857 5955 (speed & IXGBE_LINK_SPEED_1GB_FULL) ? 1 : 0;
5858 5956 ixgbe->param_adv_100fdx_cap =
5859 5957 (speed & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0;
5860 5958 }
5861 5959
5862 5960 /*
5863 5961 * ixgbe_get_driver_control - Notify that driver is in control of device.
5864 5962 */
5865 5963 static void
5866 5964 ixgbe_get_driver_control(struct ixgbe_hw *hw)
5867 5965 {
5868 5966 uint32_t ctrl_ext;
5869 5967
5870 5968 /*
5871 5969 * Notify firmware that driver is in control of device
5872 5970 */
5873 5971 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5874 5972 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
5875 5973 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5876 5974 }
5877 5975
5878 5976 /*
5879 5977 * ixgbe_release_driver_control - Notify that driver is no longer in control
5880 5978 * of device.
5881 5979 */
5882 5980 static void
5883 5981 ixgbe_release_driver_control(struct ixgbe_hw *hw)
5884 5982 {
5885 5983 uint32_t ctrl_ext;
5886 5984
5887 5985 /*
5888 5986 * Notify firmware that driver is no longer in control of device
5889 5987 */
5890 5988 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5891 5989 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
5892 5990 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5893 5991 }
5894 5992
5895 5993 /*
5896 5994 * ixgbe_atomic_reserve - Atomic decrease operation.
5897 5995 */
5898 5996 int
5899 5997 ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n)
5900 5998 {
5901 5999 uint32_t oldval;
5902 6000 uint32_t newval;
5903 6001
5904 6002 /*
5905 6003 * ATOMICALLY
5906 6004 */
5907 6005 do {
5908 6006 oldval = *count_p;
5909 6007 if (oldval < n)
5910 6008 return (-1);
5911 6009 newval = oldval - n;
5912 6010 } while (atomic_cas_32(count_p, oldval, newval) != oldval);
5913 6011
5914 6012 return (newval);
5915 6013 }
5916 6014
5917 6015 /*
5918 6016 * ixgbe_mc_table_itr - Traverse the entries in the multicast table.
5919 6017 */
5920 6018 static uint8_t *
5921 6019 ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq)
5922 6020 {
5923 6021 uint8_t *addr = *upd_ptr;
5924 6022 uint8_t *new_ptr;
5925 6023
5926 6024 _NOTE(ARGUNUSED(hw));
5927 6025 _NOTE(ARGUNUSED(vmdq));
5928 6026
5929 6027 new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
5930 6028 *upd_ptr = new_ptr;
5931 6029 return (addr);
5932 6030 }
5933 6031
5934 6032 /*
5935 6033 * FMA support
5936 6034 */
5937 6035 int
5938 6036 ixgbe_check_acc_handle(ddi_acc_handle_t handle)
5939 6037 {
5940 6038 ddi_fm_error_t de;
5941 6039
5942 6040 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
5943 6041 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
5944 6042 return (de.fme_status);
5945 6043 }
5946 6044
5947 6045 int
5948 6046 ixgbe_check_dma_handle(ddi_dma_handle_t handle)
5949 6047 {
5950 6048 ddi_fm_error_t de;
5951 6049
5952 6050 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
5953 6051 return (de.fme_status);
5954 6052 }
5955 6053
5956 6054 /*
5957 6055 * ixgbe_fm_error_cb - The IO fault service error handling callback function.
5958 6056 */
5959 6057 static int
5960 6058 ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
5961 6059 {
5962 6060 _NOTE(ARGUNUSED(impl_data));
5963 6061 /*
5964 6062 * as the driver can always deal with an error in any dma or
5965 6063 * access handle, we can just return the fme_status value.
5966 6064 */
5967 6065 pci_ereport_post(dip, err, NULL);
5968 6066 return (err->fme_status);
5969 6067 }
5970 6068
5971 6069 static void
5972 6070 ixgbe_fm_init(ixgbe_t *ixgbe)
5973 6071 {
5974 6072 ddi_iblock_cookie_t iblk;
5975 6073 int fma_dma_flag;
5976 6074
5977 6075 /*
5978 6076 * Only register with IO Fault Services if we have some capability
5979 6077 */
5980 6078 if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
5981 6079 ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
5982 6080 } else {
5983 6081 ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
5984 6082 }
5985 6083
5986 6084 if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
5987 6085 fma_dma_flag = 1;
5988 6086 } else {
5989 6087 fma_dma_flag = 0;
5990 6088 }
5991 6089
5992 6090 ixgbe_set_fma_flags(fma_dma_flag);
5993 6091
5994 6092 if (ixgbe->fm_capabilities) {
5995 6093
5996 6094 /*
5997 6095 * Register capabilities with IO Fault Services
5998 6096 */
5999 6097 ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk);
6000 6098
6001 6099 /*
6002 6100 * Initialize pci ereport capabilities if ereport capable
6003 6101 */
6004 6102 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
6005 6103 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
6006 6104 pci_ereport_setup(ixgbe->dip);
6007 6105
6008 6106 /*
6009 6107 * Register error callback if error callback capable
6010 6108 */
6011 6109 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
6012 6110 ddi_fm_handler_register(ixgbe->dip,
6013 6111 ixgbe_fm_error_cb, (void*) ixgbe);
6014 6112 }
6015 6113 }
6016 6114
6017 6115 static void
6018 6116 ixgbe_fm_fini(ixgbe_t *ixgbe)
6019 6117 {
6020 6118 /*
6021 6119 * Only unregister FMA capabilities if they are registered
6022 6120 */
6023 6121 if (ixgbe->fm_capabilities) {
6024 6122
6025 6123 /*
6026 6124 * Release any resources allocated by pci_ereport_setup()
6027 6125 */
6028 6126 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
6029 6127 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
6030 6128 pci_ereport_teardown(ixgbe->dip);
6031 6129
6032 6130 /*
6033 6131 * Un-register error callback if error callback capable
6034 6132 */
6035 6133 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
6036 6134 ddi_fm_handler_unregister(ixgbe->dip);
6037 6135
6038 6136 /*
6039 6137 * Unregister from IO Fault Service
6040 6138 */
6041 6139 ddi_fm_fini(ixgbe->dip);
6042 6140 }
6043 6141 }
6044 6142
6045 6143 void
6046 6144 ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail)
6047 6145 {
6048 6146 uint64_t ena;
6049 6147 char buf[FM_MAX_CLASS];
6050 6148
6051 6149 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
6052 6150 ena = fm_ena_generate(0, FM_ENA_FMT1);
6053 6151 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) {
6054 6152 ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP,
6055 6153 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
6056 6154 }
6057 6155 }
6058 6156
6059 6157 static int
6060 6158 ixgbe_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
6061 6159 {
6062 6160 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)rh;
6063 6161
6064 6162 mutex_enter(&rx_ring->rx_lock);
6065 6163 rx_ring->ring_gen_num = mr_gen_num;
6066 6164 mutex_exit(&rx_ring->rx_lock);
6067 6165 return (0);
6068 6166 }
6069 6167
6070 6168 /*
6071 6169 * Get the global ring index by a ring index within a group.
6072 6170 */
6073 6171 static int
6074 6172 ixgbe_get_rx_ring_index(ixgbe_t *ixgbe, int gindex, int rindex)
6075 6173 {
6076 6174 ixgbe_rx_ring_t *rx_ring;
6077 6175 int i;
6078 6176
6079 6177 for (i = 0; i < ixgbe->num_rx_rings; i++) {
6080 6178 rx_ring = &ixgbe->rx_rings[i];
6081 6179 if (rx_ring->group_index == gindex)
6082 6180 rindex--;
6083 6181 if (rindex < 0)
6084 6182 return (i);
6085 6183 }
6086 6184
6087 6185 return (-1);
6088 6186 }
6089 6187
6090 6188 /*
6091 6189 * Callback funtion for MAC layer to register all rings.
6092 6190 */
6093 6191 /* ARGSUSED */
6094 6192 void
6095 6193 ixgbe_fill_ring(void *arg, mac_ring_type_t rtype, const int group_index,
6096 6194 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
6097 6195 {
6098 6196 ixgbe_t *ixgbe = (ixgbe_t *)arg;
6099 6197 mac_intr_t *mintr = &infop->mri_intr;
6100 6198
6101 6199 switch (rtype) {
6102 6200 case MAC_RING_TYPE_RX: {
6103 6201 /*
6104 6202 * 'index' is the ring index within the group.
6105 6203 * Need to get the global ring index by searching in groups.
6106 6204 */
6107 6205 int global_ring_index = ixgbe_get_rx_ring_index(
6108 6206 ixgbe, group_index, ring_index);
6109 6207
6110 6208 ASSERT(global_ring_index >= 0);
6111 6209
6112 6210 ixgbe_rx_ring_t *rx_ring = &ixgbe->rx_rings[global_ring_index];
6113 6211 rx_ring->ring_handle = rh;
6114 6212
6115 6213 infop->mri_driver = (mac_ring_driver_t)rx_ring;
6116 6214 infop->mri_start = ixgbe_ring_start;
6117 6215 infop->mri_stop = NULL;
6118 6216 infop->mri_poll = ixgbe_ring_rx_poll;
6119 6217 infop->mri_stat = ixgbe_rx_ring_stat;
6120 6218
6121 6219 mintr->mi_handle = (mac_intr_handle_t)rx_ring;
6122 6220 mintr->mi_enable = ixgbe_rx_ring_intr_enable;
6123 6221 mintr->mi_disable = ixgbe_rx_ring_intr_disable;
6124 6222 if (ixgbe->intr_type &
6125 6223 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
6126 6224 mintr->mi_ddi_handle =
6127 6225 ixgbe->htable[rx_ring->intr_vector];
6128 6226 }
6129 6227
6130 6228 break;
6131 6229 }
6132 6230 case MAC_RING_TYPE_TX: {
6133 6231 ASSERT(group_index == -1);
6134 6232 ASSERT(ring_index < ixgbe->num_tx_rings);
6135 6233
6136 6234 ixgbe_tx_ring_t *tx_ring = &ixgbe->tx_rings[ring_index];
6137 6235 tx_ring->ring_handle = rh;
6138 6236
6139 6237 infop->mri_driver = (mac_ring_driver_t)tx_ring;
6140 6238 infop->mri_start = NULL;
6141 6239 infop->mri_stop = NULL;
6142 6240 infop->mri_tx = ixgbe_ring_tx;
6143 6241 infop->mri_stat = ixgbe_tx_ring_stat;
6144 6242 if (ixgbe->intr_type &
6145 6243 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
6146 6244 mintr->mi_ddi_handle =
6147 6245 ixgbe->htable[tx_ring->intr_vector];
6148 6246 }
6149 6247 break;
6150 6248 }
6151 6249 default:
6152 6250 break;
6153 6251 }
↓ open down ↓ |
3005 lines elided |
↑ open up ↑ |
6154 6252 }
6155 6253
6156 6254 /*
6157 6255 * Callback funtion for MAC layer to register all groups.
6158 6256 */
6159 6257 void
6160 6258 ixgbe_fill_group(void *arg, mac_ring_type_t rtype, const int index,
6161 6259 mac_group_info_t *infop, mac_group_handle_t gh)
6162 6260 {
6163 6261 ixgbe_t *ixgbe = (ixgbe_t *)arg;
6262 + struct ixgbe_hw *hw = &ixgbe->hw;
6164 6263
6165 6264 switch (rtype) {
6166 6265 case MAC_RING_TYPE_RX: {
6167 6266 ixgbe_rx_group_t *rx_group;
6168 6267
6169 6268 rx_group = &ixgbe->rx_groups[index];
6170 6269 rx_group->group_handle = gh;
6171 6270
6172 6271 infop->mgi_driver = (mac_group_driver_t)rx_group;
6173 6272 infop->mgi_start = NULL;
6174 6273 infop->mgi_stop = NULL;
6175 6274 infop->mgi_addmac = ixgbe_addmac;
6176 6275 infop->mgi_remmac = ixgbe_remmac;
6276 +
6277 + if ((ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ ||
6278 + ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) &&
6279 + (hw->mac.type == ixgbe_mac_82599EB ||
6280 + hw->mac.type == ixgbe_mac_X540 ||
6281 + hw->mac.type == ixgbe_mac_X550 ||
6282 + hw->mac.type == ixgbe_mac_X550EM_x)) {
6283 + infop->mgi_addvlan = ixgbe_addvlan;
6284 + infop->mgi_remvlan = ixgbe_remvlan;
6285 + } else {
6286 + infop->mgi_addvlan = NULL;
6287 + infop->mgi_remvlan = NULL;
6288 + }
6289 +
6177 6290 infop->mgi_count = (ixgbe->num_rx_rings / ixgbe->num_rx_groups);
6178 6291
6179 6292 break;
6180 6293 }
6181 6294 case MAC_RING_TYPE_TX:
6182 6295 break;
6183 6296 default:
6184 6297 break;
6185 6298 }
6186 6299 }
6187 6300
6188 6301 /*
6189 6302 * Enable interrupt on the specificed rx ring.
6190 6303 */
6191 6304 int
6192 6305 ixgbe_rx_ring_intr_enable(mac_intr_handle_t intrh)
6193 6306 {
6194 6307 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
6195 6308 ixgbe_t *ixgbe = rx_ring->ixgbe;
6196 6309 int r_idx = rx_ring->index;
6197 6310 int hw_r_idx = rx_ring->hw_index;
6198 6311 int v_idx = rx_ring->intr_vector;
6199 6312
6200 6313 mutex_enter(&ixgbe->gen_lock);
6201 6314 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) {
6202 6315 mutex_exit(&ixgbe->gen_lock);
6203 6316 /*
6204 6317 * Simply return 0.
6205 6318 * Interrupts are being adjusted. ixgbe_intr_adjust()
6206 6319 * will eventually re-enable the interrupt when it's
6207 6320 * done with the adjustment.
6208 6321 */
6209 6322 return (0);
6210 6323 }
6211 6324
6212 6325 /*
6213 6326 * To enable interrupt by setting the VAL bit of given interrupt
6214 6327 * vector allocation register (IVAR).
6215 6328 */
6216 6329 ixgbe_enable_ivar(ixgbe, hw_r_idx, 0);
6217 6330
6218 6331 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
6219 6332
6220 6333 /*
6221 6334 * Trigger a Rx interrupt on this ring
6222 6335 */
6223 6336 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_EICS, (1 << v_idx));
6224 6337 IXGBE_WRITE_FLUSH(&ixgbe->hw);
6225 6338
6226 6339 mutex_exit(&ixgbe->gen_lock);
6227 6340
6228 6341 return (0);
6229 6342 }
6230 6343
6231 6344 /*
6232 6345 * Disable interrupt on the specificed rx ring.
6233 6346 */
6234 6347 int
6235 6348 ixgbe_rx_ring_intr_disable(mac_intr_handle_t intrh)
6236 6349 {
6237 6350 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
6238 6351 ixgbe_t *ixgbe = rx_ring->ixgbe;
6239 6352 int r_idx = rx_ring->index;
6240 6353 int hw_r_idx = rx_ring->hw_index;
6241 6354 int v_idx = rx_ring->intr_vector;
6242 6355
6243 6356 mutex_enter(&ixgbe->gen_lock);
6244 6357 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) {
6245 6358 mutex_exit(&ixgbe->gen_lock);
6246 6359 /*
6247 6360 * Simply return 0.
6248 6361 * In the rare case where an interrupt is being
6249 6362 * disabled while interrupts are being adjusted,
6250 6363 * we don't fail the operation. No interrupts will
6251 6364 * be generated while they are adjusted, and
6252 6365 * ixgbe_intr_adjust() will cause the interrupts
6253 6366 * to be re-enabled once it completes. Note that
6254 6367 * in this case, packets may be delivered to the
6255 6368 * stack via interrupts before xgbe_rx_ring_intr_enable()
6256 6369 * is called again. This is acceptable since interrupt
6257 6370 * adjustment is infrequent, and the stack will be
6258 6371 * able to handle these packets.
6259 6372 */
6260 6373 return (0);
6261 6374 }
6262 6375
6263 6376 /*
6264 6377 * To disable interrupt by clearing the VAL bit of given interrupt
↓ open down ↓ |
78 lines elided |
↑ open up ↑ |
6265 6378 * vector allocation register (IVAR).
6266 6379 */
6267 6380 ixgbe_disable_ivar(ixgbe, hw_r_idx, 0);
6268 6381
6269 6382 BT_CLEAR(ixgbe->vect_map[v_idx].rx_map, r_idx);
6270 6383
6271 6384 mutex_exit(&ixgbe->gen_lock);
6272 6385
6273 6386 return (0);
6274 6387 }
6388 +
6389 +static ixgbe_vlan_t *
6390 +ixgbe_find_vlan(ixgbe_rx_group_t *rx_group, uint16_t vid)
6391 +{
6392 + for (ixgbe_vlan_t *vlp = list_head(&rx_group->vlans); vlp != NULL;
6393 + vlp = list_next(&rx_group->vlans, vlp)) {
6394 + if (vlp->ixvl_vid == vid)
6395 + return (vlp);
6396 + }
6397 +
6398 + return (NULL);
6399 +}
6400 +
6401 +/*
6402 + * Attempt to use a VLAN HW filter for this group. If the group is
6403 + * interested in untagged packets then set AUPE only. If the group is
6404 + * the default then only set the VFTA. Leave the VLVF slots open for
6405 + * reserved groups to guarantee their use of HW filtering.
6406 + */
6407 +static int
6408 +ixgbe_addvlan(mac_group_driver_t gdriver, uint16_t vid)
6409 +{
6410 + ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)gdriver;
6411 + ixgbe_t *ixgbe = rx_group->ixgbe;
6412 + struct ixgbe_hw *hw = &ixgbe->hw;
6413 + ixgbe_vlan_t *vlp;
6414 + int ret;
6415 + boolean_t is_def_grp;
6416 +
6417 + mutex_enter(&ixgbe->gen_lock);
6418 +
6419 + if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
6420 + mutex_exit(&ixgbe->gen_lock);
6421 + return (ECANCELED);
6422 + }
6423 +
6424 + /*
6425 + * Let's be sure VLAN filtering is enabled.
6426 + */
6427 + VERIFY3B(ixgbe->vlft_enabled, ==, B_TRUE);
6428 + is_def_grp = (rx_group->index == ixgbe->rx_def_group);
6429 +
6430 + /*
6431 + * VLAN filtering is enabled but we want to receive untagged
6432 + * traffic on this group -- set the AUPE bit on the group and
6433 + * leave the VLAN tables alone.
6434 + */
6435 + if (vid == MAC_VLAN_UNTAGGED) {
6436 + /*
6437 + * We never enable AUPE on the default group; it is
6438 + * redundant. Untagged traffic which passes L2
6439 + * filtering is delivered to the default group if no
6440 + * other group is interested.
6441 + */
6442 + if (!is_def_grp) {
6443 + uint32_t vml2flt;
6444 +
6445 + vml2flt = IXGBE_READ_REG(hw,
6446 + IXGBE_VMOLR(rx_group->index));
6447 + vml2flt |= IXGBE_VMOLR_AUPE;
6448 + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(rx_group->index),
6449 + vml2flt);
6450 + rx_group->aupe = B_TRUE;
6451 + }
6452 +
6453 + mutex_exit(&ixgbe->gen_lock);
6454 + return (0);
6455 + }
6456 +
6457 + vlp = ixgbe_find_vlan(rx_group, vid);
6458 + if (vlp != NULL) {
6459 + /* Only the default group supports multiple clients. */
6460 + VERIFY3B(is_def_grp, ==, B_TRUE);
6461 + vlp->ixvl_refs++;
6462 + mutex_exit(&ixgbe->gen_lock);
6463 + return (0);
6464 + }
6465 +
6466 + /*
6467 + * The default group doesn't require a VLVF entry, only a VFTA
6468 + * entry. All traffic passing L2 filtering (MPSAR + VFTA) is
6469 + * delivered to the default group if no other group is
6470 + * interested. The fourth argument, vlvf_bypass, tells the
6471 + * ixgbe common code to avoid using a VLVF slot if one isn't
6472 + * already allocated to this VLAN.
6473 + *
6474 + * This logic is meant to reserve VLVF slots for use by
6475 + * reserved groups: guaranteeing their use of HW filtering.
6476 + */
6477 + ret = ixgbe_set_vfta(hw, vid, rx_group->index, B_TRUE, is_def_grp);
6478 +
6479 + if (ret == IXGBE_SUCCESS) {
6480 + vlp = kmem_zalloc(sizeof (ixgbe_vlan_t), KM_SLEEP);
6481 + vlp->ixvl_vid = vid;
6482 + vlp->ixvl_refs = 1;
6483 + list_insert_tail(&rx_group->vlans, vlp);
6484 + mutex_exit(&ixgbe->gen_lock);
6485 + return (0);
6486 + }
6487 +
6488 + /*
6489 + * We should actually never return ENOSPC because we've set
6490 + * things up so that every reserved group is guaranteed to
6491 + * have a VLVF slot.
6492 + */
6493 + if (ret == IXGBE_ERR_PARAM)
6494 + ret = EINVAL;
6495 + else if (ret == IXGBE_ERR_NO_SPACE)
6496 + ret = ENOSPC;
6497 + else
6498 + ret = EIO;
6499 +
6500 + mutex_exit(&ixgbe->gen_lock);
6501 + return (ret);
6502 +}
6503 +
6504 +/*
6505 + * Attempt to remove the VLAN HW filter associated with this group. If
6506 + * we are removing a HW filter for the default group then we know only
6507 + * the VFTA was set (VLVF is reserved for non-default/reserved
6508 + * groups). If the group wishes to stop receiving untagged traffic
6509 + * then clear the AUPE but leave the VLAN filters alone.
6510 + */
6511 +static int
6512 +ixgbe_remvlan(mac_group_driver_t gdriver, uint16_t vid)
6513 +{
6514 + ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)gdriver;
6515 + ixgbe_t *ixgbe = rx_group->ixgbe;
6516 + struct ixgbe_hw *hw = &ixgbe->hw;
6517 + int ret;
6518 + ixgbe_vlan_t *vlp;
6519 + boolean_t is_def_grp;
6520 +
6521 + mutex_enter(&ixgbe->gen_lock);
6522 +
6523 + if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
6524 + mutex_exit(&ixgbe->gen_lock);
6525 + return (ECANCELED);
6526 + }
6527 +
6528 + is_def_grp = (rx_group->index == ixgbe->rx_def_group);
6529 +
6530 + /* See the AUPE comment in ixgbe_addvlan(). */
6531 + if (vid == MAC_VLAN_UNTAGGED) {
6532 + if (!is_def_grp) {
6533 + uint32_t vml2flt;
6534 +
6535 + vml2flt = IXGBE_READ_REG(hw,
6536 + IXGBE_VMOLR(rx_group->index));
6537 + vml2flt &= ~IXGBE_VMOLR_AUPE;
6538 + IXGBE_WRITE_REG(hw,
6539 + IXGBE_VMOLR(rx_group->index), vml2flt);
6540 + rx_group->aupe = B_FALSE;
6541 + }
6542 + mutex_exit(&ixgbe->gen_lock);
6543 + return (0);
6544 + }
6545 +
6546 + vlp = ixgbe_find_vlan(rx_group, vid);
6547 + if (vlp == NULL)
6548 + return (ENOENT);
6549 +
6550 + /*
6551 + * See the comment in ixgbe_addvlan() about is_def_grp and
6552 + * vlvf_bypass.
6553 + */
6554 + if (vlp->ixvl_refs == 1) {
6555 + ret = ixgbe_set_vfta(hw, vid, rx_group->index, B_FALSE,
6556 + is_def_grp);
6557 + } else {
6558 + /*
6559 + * Only the default group can have multiple clients.
6560 + * If there is more than one client, leave the
6561 + * VFTA[vid] bit alone.
6562 + */
6563 + VERIFY3B(is_def_grp, ==, B_TRUE);
6564 + VERIFY3U(vlp->ixvl_refs, >, 1);
6565 + vlp->ixvl_refs--;
6566 + mutex_exit(&ixgbe->gen_lock);
6567 + return (0);
6568 + }
6569 +
6570 + if (ret != IXGBE_SUCCESS) {
6571 + mutex_exit(&ixgbe->gen_lock);
6572 + /* IXGBE_ERR_PARAM should be the only possible error here. */
6573 + if (ret == IXGBE_ERR_PARAM)
6574 + return (EINVAL);
6575 + else
6576 + return (EIO);
6577 + }
6578 +
6579 + VERIFY3U(vlp->ixvl_refs, ==, 1);
6580 + vlp->ixvl_refs = 0;
6581 + list_remove(&rx_group->vlans, vlp);
6582 + kmem_free(vlp, sizeof (ixgbe_vlan_t));
6583 +
6584 + /*
6585 + * Calling ixgbe_set_vfta() on a non-default group may have
6586 + * cleared the VFTA[vid] bit even though the default group
6587 + * still has clients using the vid. This happens because the
6588 + * ixgbe common code doesn't ref count the use of VLANs. Check
6589 + * for any use of vid on the default group and make sure the
6590 + * VFTA[vid] bit is set. This operation is idempotent: setting
6591 + * VFTA[vid] to true if already true won't hurt anything.
6592 + */
6593 + if (!is_def_grp) {
6594 + ixgbe_rx_group_t *defgrp;
6595 +
6596 + defgrp = &ixgbe->rx_groups[ixgbe->rx_def_group];
6597 + vlp = ixgbe_find_vlan(defgrp, vid);
6598 + if (vlp != NULL) {
6599 + /* This shouldn't fail, but if it does return EIO. */
6600 + ret = ixgbe_set_vfta(hw, vid, rx_group->index, B_TRUE,
6601 + B_TRUE);
6602 + if (ret != IXGBE_SUCCESS)
6603 + return (EIO);
6604 + }
6605 + }
6606 +
6607 + mutex_exit(&ixgbe->gen_lock);
6608 + return (0);
6609 +}
6275 6610
6276 6611 /*
6277 6612 * Add a mac address.
6278 6613 */
6279 6614 static int
6280 6615 ixgbe_addmac(void *arg, const uint8_t *mac_addr)
6281 6616 {
6282 6617 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
6283 6618 ixgbe_t *ixgbe = rx_group->ixgbe;
6284 6619 struct ixgbe_hw *hw = &ixgbe->hw;
6285 6620 int slot, i;
6286 6621
6287 6622 mutex_enter(&ixgbe->gen_lock);
6288 6623
6289 6624 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
6290 6625 mutex_exit(&ixgbe->gen_lock);
6291 6626 return (ECANCELED);
6292 6627 }
6293 6628
6294 6629 if (ixgbe->unicst_avail == 0) {
6295 6630 /* no slots available */
6296 6631 mutex_exit(&ixgbe->gen_lock);
6297 6632 return (ENOSPC);
6298 6633 }
6299 6634
6300 6635 /*
6301 6636 * The first ixgbe->num_rx_groups slots are reserved for each respective
6302 6637 * group. The rest slots are shared by all groups. While adding a
6303 6638 * MAC address, reserved slots are firstly checked then the shared
6304 6639 * slots are searched.
6305 6640 */
6306 6641 slot = -1;
6307 6642 if (ixgbe->unicst_addr[rx_group->index].mac.set == 1) {
6308 6643 for (i = ixgbe->num_rx_groups; i < ixgbe->unicst_total; i++) {
6309 6644 if (ixgbe->unicst_addr[i].mac.set == 0) {
6310 6645 slot = i;
6311 6646 break;
6312 6647 }
6313 6648 }
6314 6649 } else {
6315 6650 slot = rx_group->index;
6316 6651 }
6317 6652
6318 6653 if (slot == -1) {
6319 6654 /* no slots available */
6320 6655 mutex_exit(&ixgbe->gen_lock);
6321 6656 return (ENOSPC);
6322 6657 }
6323 6658
6324 6659 bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
6325 6660 (void) ixgbe_set_rar(hw, slot, ixgbe->unicst_addr[slot].mac.addr,
6326 6661 rx_group->index, IXGBE_RAH_AV);
6327 6662 ixgbe->unicst_addr[slot].mac.set = 1;
6328 6663 ixgbe->unicst_addr[slot].mac.group_index = rx_group->index;
6329 6664 ixgbe->unicst_avail--;
6330 6665
6331 6666 mutex_exit(&ixgbe->gen_lock);
6332 6667
6333 6668 return (0);
6334 6669 }
6335 6670
6336 6671 /*
6337 6672 * Remove a mac address.
6338 6673 */
6339 6674 static int
6340 6675 ixgbe_remmac(void *arg, const uint8_t *mac_addr)
6341 6676 {
6342 6677 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
6343 6678 ixgbe_t *ixgbe = rx_group->ixgbe;
6344 6679 struct ixgbe_hw *hw = &ixgbe->hw;
6345 6680 int slot;
6346 6681
6347 6682 mutex_enter(&ixgbe->gen_lock);
6348 6683
6349 6684 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
6350 6685 mutex_exit(&ixgbe->gen_lock);
6351 6686 return (ECANCELED);
6352 6687 }
6353 6688
6354 6689 slot = ixgbe_unicst_find(ixgbe, mac_addr);
6355 6690 if (slot == -1) {
6356 6691 mutex_exit(&ixgbe->gen_lock);
6357 6692 return (EINVAL);
6358 6693 }
6359 6694
6360 6695 if (ixgbe->unicst_addr[slot].mac.set == 0) {
6361 6696 mutex_exit(&ixgbe->gen_lock);
6362 6697 return (EINVAL);
6363 6698 }
6364 6699
6365 6700 bzero(ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
6366 6701 (void) ixgbe_clear_rar(hw, slot);
6367 6702 ixgbe->unicst_addr[slot].mac.set = 0;
6368 6703 ixgbe->unicst_avail++;
6369 6704
6370 6705 mutex_exit(&ixgbe->gen_lock);
6371 6706
6372 6707 return (0);
6373 6708 }
↓ open down ↓ |
89 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX