diff -pruN ./drivers/net/e1000.lkm81/e1000_compat.h ./drivers/net/e1000/e1000_compat.h --- ./drivers/net/e1000.lkm81/e1000_compat.h 1970-01-01 03:00:00.000000000 +0300 +++ ./drivers/net/e1000/e1000_compat.h 2006-04-06 19:05:40.000000000 +0400 @@ -0,0 +1,18 @@ +#ifndef __E1000_COMPAT_H__ +#define __E1000_COMPAT_H__ + +#define skb_header_cloned(skb) 0 + +typedef u32 pm_message_t; + +typedef int __bitwise pci_power_t; + +#define PCI_D0 ((pci_power_t __force) 0) +#define PCI_D1 ((pci_power_t __force) 1) +#define PCI_D2 ((pci_power_t __force) 2) +#define PCI_D3hot ((pci_power_t __force) 3) +#define PCI_D3cold ((pci_power_t __force) 4) + +#define pci_choose_state(pdev, state) (state) + +#endif /* __E1000_COMPAT_H__ */ diff -pruN ./drivers/net/e1000.lkm81/e1000_ethtool.c ./drivers/net/e1000/e1000_ethtool.c --- ./drivers/net/e1000.lkm81/e1000_ethtool.c 2006-04-06 19:04:00.000000000 +0400 +++ ./drivers/net/e1000/e1000_ethtool.c 2006-04-06 19:05:40.000000000 +0400 @@ -1,7 +1,7 @@ /******************************************************************************* - Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. + Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free @@ -69,6 +69,7 @@ static const struct e1000_stats e1000_gs { "rx_crc_errors", E1000_STAT(net_stats.rx_crc_errors) }, { "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) }, { "rx_fifo_errors", E1000_STAT(net_stats.rx_fifo_errors) }, + { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, { "rx_missed_errors", E1000_STAT(net_stats.rx_missed_errors) }, { "tx_aborted_errors", E1000_STAT(net_stats.tx_aborted_errors) }, { "tx_carrier_errors", E1000_STAT(net_stats.tx_carrier_errors) }, @@ -88,9 +89,9 @@ static const struct e1000_stats e1000_gs { "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) }, { "tx_flow_control_xon", E1000_STAT(stats.xontxc) }, { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) }, + { "rx_long_byte_count", E1000_STAT(stats.gorcl) }, { "rx_csum_offload_good", E1000_STAT(hw_csum_good) }, - { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }, - { "rx_long_byte_count", E1000_STAT(stats.gorcl) } + { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) } }; #define E1000_STATS_LEN \ sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats) @@ -104,7 +105,7 @@ static const char e1000_gstrings_test[][ static int e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; if(hw->media_type == e1000_media_type_copper) { @@ -140,9 +141,9 @@ e1000_get_settings(struct net_device *ne SUPPORTED_FIBRE | SUPPORTED_Autoneg); - ecmd->advertising = (SUPPORTED_1000baseT_Full | - SUPPORTED_FIBRE | - SUPPORTED_Autoneg); + ecmd->advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg); ecmd->port = PORT_FIBRE; @@ -170,20 +171,32 @@ e1000_get_settings(struct net_device *ne ecmd->duplex = -1; } - ecmd->autoneg = (hw->autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) || + hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; return 0; } static int e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; if(ecmd->autoneg == AUTONEG_ENABLE) { hw->autoneg = 1; - hw->autoneg_advertised = 0x002F; - ecmd->advertising = 0x002F; + if(hw->media_type == e1000_media_type_fiber) + hw->autoneg_advertised = ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg; + else + hw->autoneg_advertised = ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Full| + ADVERTISED_Autoneg | + ADVERTISED_TP; + ecmd->advertising = hw->autoneg_advertised; } else if(e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) return -EINVAL; @@ -192,6 +205,7 @@ e1000_set_settings(struct net_device *ne if(netif_running(adapter->netdev)) { e1000_down(adapter); + e1000_reset(adapter); e1000_up(adapter); } else e1000_reset(adapter); @@ -199,12 +213,13 @@ e1000_set_settings(struct net_device *ne return 0; } -static void +static void e1000_get_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) + struct ethtool_pauseparam *pause) { - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + pause->autoneg = (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); @@ -218,11 +233,11 @@ e1000_get_pauseparam(struct net_device * } } -static int +static int e1000_set_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pause) + struct ethtool_pauseparam *pause) { - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; adapter->fc_autoneg = pause->autoneg; @@ -246,7 +261,8 @@ e1000_set_pauseparam(struct net_device * e1000_reset(adapter); } else - return e1000_force_mac_fc(hw); + return ((hw->media_type == e1000_media_type_fiber) ? + e1000_setup_link(hw) : e1000_force_mac_fc(hw)); return 0; } @@ -254,14 +270,14 @@ e1000_set_pauseparam(struct net_device * static uint32_t e1000_get_rx_csum(struct net_device *netdev) { - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); return adapter->rx_csum; } static int e1000_set_rx_csum(struct net_device *netdev, uint32_t data) { - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); adapter->rx_csum = data; if(netif_running(netdev)) { @@ -271,7 +287,7 @@ e1000_set_rx_csum(struct net_device *net e1000_reset(adapter); return 0; } - + static uint32_t e1000_get_tx_csum(struct net_device *netdev) { @@ -281,7 +297,7 @@ e1000_get_tx_csum(struct net_device *net static int e1000_set_tx_csum(struct net_device *netdev, uint32_t data) { - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); if(adapter->hw.mac_type < e1000_82543) { if (!data) @@ -301,8 +317,8 @@ e1000_set_tx_csum(struct net_device *net static int e1000_set_tso(struct net_device *netdev, uint32_t data) { - struct e1000_adapter *adapter = netdev->priv; - if ((adapter->hw.mac_type < e1000_82544) || + struct e1000_adapter *adapter = netdev_priv(netdev); + if((adapter->hw.mac_type < e1000_82544) || (adapter->hw.mac_type == e1000_82547)) return data ? -EINVAL : 0; @@ -317,14 +333,14 @@ e1000_set_tso(struct net_device *netdev, static uint32_t e1000_get_msglevel(struct net_device *netdev) { - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); return adapter->msg_enable; } static void e1000_set_msglevel(struct net_device *netdev, uint32_t data) { - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); adapter->msg_enable = data; } @@ -337,9 +353,9 @@ e1000_get_regs_len(struct net_device *ne static void e1000_get_regs(struct net_device *netdev, - struct ethtool_regs *regs, void *p) + struct ethtool_regs *regs, void *p) { - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; uint32_t *regs_buff = p; uint16_t phy_data; @@ -418,12 +434,16 @@ e1000_get_regs(struct net_device *netdev e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); regs_buff[24] = (uint32_t)phy_data; /* phy local receiver status */ regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ + if(hw->mac_type >= e1000_82540 && + hw->media_type == e1000_media_type_copper) { + regs_buff[26] = E1000_READ_REG(hw, MANC); + } } static int e1000_get_eeprom_len(struct net_device *netdev) { - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); return adapter->hw.eeprom.word_size * 2; } @@ -431,14 +451,14 @@ static int e1000_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, uint8_t *bytes) { - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; uint16_t *eeprom_buff; int first_word, last_word; int ret_val = 0; uint16_t i; - if(eeprom->len == 0) + if(eeprom->len == 0) return -EINVAL; eeprom->magic = hw->vendor_id | (hw->device_id << 16); @@ -446,9 +466,9 @@ e1000_get_eeprom(struct net_device *netd first_word = eeprom->offset >> 1; last_word = (eeprom->offset + eeprom->len - 1) >> 1; - eeprom_buff = kmalloc(sizeof(uint16_t) * + eeprom_buff = kmalloc(sizeof(uint16_t) * (last_word - first_word + 1), GFP_KERNEL); - if (!eeprom_buff) + if(!eeprom_buff) return -ENOMEM; if(hw->eeprom.type == e1000_eeprom_spi) @@ -466,9 +486,8 @@ e1000_get_eeprom(struct net_device *netd for (i = 0; i < last_word - first_word + 1; i++) le16_to_cpus(&eeprom_buff[i]); - - memcpy(bytes, (uint8_t *)eeprom_buff + (eeprom->offset%2), - eeprom->len); + memcpy(bytes, (uint8_t *)eeprom_buff + (eeprom->offset & 1), + eeprom->len); kfree(eeprom_buff); return ret_val; @@ -478,7 +497,7 @@ static int e1000_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, uint8_t *bytes) { - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; uint16_t *eeprom_buff; void *ptr; @@ -520,6 +539,7 @@ e1000_set_eeprom(struct net_device *netd le16_to_cpus(&eeprom_buff[i]); memcpy(ptr, bytes, eeprom->len); + for (i = 0; i < last_word - first_word + 1; i++) eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); @@ -538,7 +558,7 @@ static void e1000_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); strncpy(drvinfo->driver, e1000_driver_name, 32); strncpy(drvinfo->version, e1000_driver_version, 32); @@ -554,7 +574,7 @@ static void e1000_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); e1000_mac_type mac_type = adapter->hw.mac_type; struct e1000_desc_ring *txdr = &adapter->tx_ring; struct e1000_desc_ring *rxdr = &adapter->rx_ring; @@ -575,17 +595,19 @@ static int e1000_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { - int err; - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); e1000_mac_type mac_type = adapter->hw.mac_type; struct e1000_desc_ring *txdr = &adapter->tx_ring; struct e1000_desc_ring *rxdr = &adapter->rx_ring; - struct e1000_desc_ring tx_old, tx_new; - struct e1000_desc_ring rx_old, rx_new; + struct e1000_desc_ring tx_old, tx_new, rx_old, rx_new; + int err; tx_old = adapter->tx_ring; rx_old = adapter->rx_ring; - + + if((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + if(netif_running(adapter->netdev)) e1000_down(adapter); @@ -600,15 +622,15 @@ e1000_set_ringparam(struct net_device *n E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); if(netif_running(adapter->netdev)) { - /* try to get new resources before deleting old */ + /* Try to get new resources before deleting old */ if((err = e1000_setup_rx_resources(adapter))) goto err_setup_rx; if((err = e1000_setup_tx_resources(adapter))) goto err_setup_tx; /* save the new, restore the old in order to free it, - * then restore the new back again */ - + * then restore the new back again */ + rx_new = adapter->rx_ring; tx_new = adapter->tx_ring; adapter->rx_ring = rx_old; @@ -620,6 +642,7 @@ e1000_set_ringparam(struct net_device *n if((err = e1000_up(adapter))) return err; } + return 0; err_setup_tx: e1000_free_rx_resources(adapter); @@ -630,7 +653,6 @@ err_setup_rx: return err; } - #define REG_PATTERN_TEST(R, M, W) \ { \ uint32_t pat, value; \ @@ -640,6 +662,9 @@ err_setup_rx: E1000_WRITE_REG(&adapter->hw, R, (test[pat] & W)); \ value = E1000_READ_REG(&adapter->hw, R); \ if(value != (test[pat] & W & M)) { \ + DPRINTK(DRV, ERR, "pattern test reg %04X failed: got " \ + "0x%08X expected 0x%08X\n", \ + E1000_##R, value, (test[pat] & W & M)); \ *data = (adapter->hw.mac_type < e1000_82543) ? \ E1000_82542_##R : E1000_##R; \ return 1; \ @@ -652,7 +677,9 @@ err_setup_rx: uint32_t value; \ E1000_WRITE_REG(&adapter->hw, R, W & M); \ value = E1000_READ_REG(&adapter->hw, R); \ - if ((W & M) != (value & M)) { \ + if((W & M) != (value & M)) { \ + DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\ + "expected 0x%08X\n", E1000_##R, (value & M), (W & M)); \ *data = (adapter->hw.mac_type < e1000_82543) ? \ E1000_82542_##R : E1000_##R; \ return 1; \ @@ -662,18 +689,38 @@ err_setup_rx: static int e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data) { - uint32_t value; - uint32_t i; + uint32_t value, before, after; + uint32_t i, toggle; /* The status register is Read Only, so a write should fail. * Some bits that get toggled are ignored. */ - value = (E1000_READ_REG(&adapter->hw, STATUS) & (0xFFFFF833)); - E1000_WRITE_REG(&adapter->hw, STATUS, (0xFFFFFFFF)); - if(value != (E1000_READ_REG(&adapter->hw, STATUS) & (0xFFFFF833))) { + switch (adapter->hw.mac_type) { + /* there are several bits on newer hardware that are r/w */ + case e1000_82571: + case e1000_82572: + toggle = 0x7FFFF3FF; + break; + case e1000_82573: + toggle = 0x7FFFF033; + break; + default: + toggle = 0xFFFFF833; + break; + } + + before = E1000_READ_REG(&adapter->hw, STATUS); + value = (E1000_READ_REG(&adapter->hw, STATUS) & toggle); + E1000_WRITE_REG(&adapter->hw, STATUS, toggle); + after = E1000_READ_REG(&adapter->hw, STATUS) & toggle; + if(value != after) { + DPRINTK(DRV, ERR, "failed STATUS register test got: " + "0x%08X expected: 0x%08X\n", after, value); *data = 1; return 1; } + /* restore previous status */ + E1000_WRITE_REG(&adapter->hw, STATUS, before); REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF); REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF); @@ -755,7 +802,7 @@ e1000_test_intr(int irq, struct pt_regs *regs) { struct net_device *netdev = (struct net_device *) data; - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); adapter->test_icr |= E1000_READ_REG(&adapter->hw, ICR); @@ -766,13 +813,16 @@ static int e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data) { struct net_device *netdev = adapter->netdev; - uint32_t icr, mask, i=0; + uint32_t mask, i=0, shared_int = TRUE; + uint32_t irq = adapter->pdev->irq; *data = 0; /* Hook up test interrupt handler just for this test */ - if(request_irq(adapter->pdev->irq, &e1000_test_intr, SA_SHIRQ, - netdev->name, netdev)) { + if(!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) { + shared_int = FALSE; + } else if(request_irq(irq, &e1000_test_intr, SA_SHIRQ, + netdev->name, netdev)){ *data = 1; return -1; } @@ -781,41 +831,28 @@ e1000_intr_test(struct e1000_adapter *ad E1000_WRITE_REG(&adapter->hw, IMC, 0xFFFFFFFF); msec_delay(10); - /* Interrupts are disabled, so read interrupt cause - * register (icr) twice to verify that there are no interrupts - * pending. icr is clear on read. - */ - icr = E1000_READ_REG(&adapter->hw, ICR); - icr = E1000_READ_REG(&adapter->hw, ICR); - - if(icr != 0) { - /* if icr is non-zero, there is no point - * running other interrupt tests. - */ - *data = 2; - i = 10; - } - /* Test each interrupt */ for(; i < 10; i++) { /* Interrupt to test */ mask = 1 << i; - /* Disable the interrupt to be reported in - * the cause register and then force the same - * interrupt and see if one gets posted. If - * an interrupt was posted to the bus, the - * test failed. - */ - adapter->test_icr = 0; - E1000_WRITE_REG(&adapter->hw, IMC, mask); - E1000_WRITE_REG(&adapter->hw, ICS, mask); - msec_delay(10); - - if(adapter->test_icr & mask) { - *data = 3; - break; + if(!shared_int) { + /* Disable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + E1000_WRITE_REG(&adapter->hw, IMC, mask); + E1000_WRITE_REG(&adapter->hw, ICS, mask); + msec_delay(10); + + if(adapter->test_icr & mask) { + *data = 3; + break; + } } /* Enable the interrupt to be reported in @@ -834,20 +871,22 @@ e1000_intr_test(struct e1000_adapter *ad break; } - /* Disable the other interrupts to be reported in - * the cause register and then force the other - * interrupts and see if any get posted. If - * an interrupt was posted to the bus, the - * test failed. - */ - adapter->test_icr = 0; - E1000_WRITE_REG(&adapter->hw, IMC, ~mask); - E1000_WRITE_REG(&adapter->hw, ICS, ~mask); - msec_delay(10); + if(!shared_int) { + /* Disable the other interrupts to be reported in + * the cause register and then force the other + * interrupts and see if any get posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + E1000_WRITE_REG(&adapter->hw, IMC, ~mask & 0x00007FFF); + E1000_WRITE_REG(&adapter->hw, ICS, ~mask & 0x00007FFF); + msec_delay(10); - if(adapter->test_icr) { - *data = 5; - break; + if(adapter->test_icr) { + *data = 5; + break; + } } } @@ -856,7 +895,7 @@ e1000_intr_test(struct e1000_adapter *ad msec_delay(10); /* Unhook test interrupt handler */ - free_irq(adapter->pdev->irq, netdev); + free_irq(irq, netdev); return *data; } @@ -915,7 +954,8 @@ e1000_setup_desc_rings(struct e1000_adap /* Setup Tx descriptor ring and Tx buffers */ - txdr->count = 80; + if(!txdr->count) + txdr->count = E1000_DEFAULT_TXD; size = txdr->count * sizeof(struct e1000_buffer); if(!(txdr->buffer_info = kmalloc(size, GFP_KERNEL))) { @@ -970,7 +1010,8 @@ e1000_setup_desc_rings(struct e1000_adap /* Setup Rx descriptor ring and Rx buffers */ - rxdr->count = 80; + if(!rxdr->count) + rxdr->count = E1000_DEFAULT_RXD; size = rxdr->count * sizeof(struct e1000_buffer); if(!(rxdr->buffer_info = kmalloc(size, GFP_KERNEL))) { @@ -1005,7 +1046,7 @@ e1000_setup_desc_rings(struct e1000_adap struct sk_buff *skb; if(!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, - GFP_KERNEL))) { + GFP_KERNEL))) { ret_val = 6; goto err_nomem; } @@ -1021,7 +1062,7 @@ e1000_setup_desc_rings(struct e1000_adap return 0; - err_nomem: +err_nomem: e1000_free_desc_rings(adapter); return ret_val; } @@ -1209,6 +1250,9 @@ e1000_set_phy_loopback(struct e1000_adap case e1000_82541_rev_2: case e1000_82547: case e1000_82547_rev_2: + case e1000_82571: + case e1000_82572: + case e1000_82573: return e1000_integrated_phy_loopback(adapter); break; @@ -1306,24 +1350,63 @@ e1000_run_loopback_test(struct e1000_ada struct e1000_desc_ring *txdr = &adapter->test_tx_ring; struct e1000_desc_ring *rxdr = &adapter->test_rx_ring; struct pci_dev *pdev = adapter->pdev; - int i; + int i, j, k, l, lc, good_cnt, ret_val=0; + unsigned long time; E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1); - for(i = 0; i < 64; i++) { - e1000_create_lbtest_frame(txdr->buffer_info[i].skb, 1024); - pci_dma_sync_single(pdev, txdr->buffer_info[i].dma, - txdr->buffer_info[i].length, - PCI_DMA_TODEVICE); - } - E1000_WRITE_REG(&adapter->hw, TDT, i); - - msec_delay(200); + /* Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ - pci_dma_sync_single(pdev, rxdr->buffer_info[0].dma, - rxdr->buffer_info[0].length, PCI_DMA_FROMDEVICE); + if(rxdr->count <= txdr->count) + lc = ((txdr->count / 64) * 2) + 1; + else + lc = ((rxdr->count / 64) * 2) + 1; - return e1000_check_lbtest_frame(rxdr->buffer_info[0].skb, 1024); + k = l = 0; + for(j = 0; j <= lc; j++) { /* loop count loop */ + for(i = 0; i < 64; i++) { /* send the packets */ + e1000_create_lbtest_frame(txdr->buffer_info[i].skb, + 1024); + pci_dma_sync_single_for_device(pdev, + txdr->buffer_info[k].dma, + txdr->buffer_info[k].length, + PCI_DMA_TODEVICE); + if(unlikely(++k == txdr->count)) k = 0; + } + E1000_WRITE_REG(&adapter->hw, TDT, k); + msec_delay(200); + time = jiffies; /* set the start time for the receive */ + good_cnt = 0; + do { /* receive the sent packets */ + pci_dma_sync_single_for_cpu(pdev, + rxdr->buffer_info[l].dma, + rxdr->buffer_info[l].length, + PCI_DMA_FROMDEVICE); + + ret_val = e1000_check_lbtest_frame( + rxdr->buffer_info[l].skb, + 1024); + if(!ret_val) + good_cnt++; + if(unlikely(++l == rxdr->count)) l = 0; + /* time + 20 msecs (200 msecs on 2.4) is more than + * enough time to complete the receives, if it's + * exceeded, break and error off + */ + } while (good_cnt < 64 && jiffies < (time + 20)); + if(good_cnt != 64) { + ret_val = 13; /* ret_val is the same as mis-compare */ + break; + } + if(jiffies >= (time + 2)) { + ret_val = 14; /* error code for time out error */ + break; + } + } /* end loop count loop */ + return ret_val; } static int @@ -1342,10 +1425,28 @@ static int e1000_link_test(struct e1000_adapter *adapter, uint64_t *data) { *data = 0; - e1000_check_for_link(&adapter->hw); + if (adapter->hw.media_type == e1000_media_type_internal_serdes) { + int i = 0; + adapter->hw.serdes_link_down = TRUE; + + /* On some blade server designs, link establishment + * could take as long as 2-3 minutes */ + do { + e1000_check_for_link(&adapter->hw); + if (adapter->hw.serdes_link_down == FALSE) + return *data; + msec_delay(20); + } while (i++ < 3750); - if(!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) { *data = 1; + } else { + e1000_check_for_link(&adapter->hw); + if(adapter->hw.autoneg) /* if auto_neg is set wait for it */ + msec_delay(4000); + + if(!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) { + *data = 1; + } } return *data; } @@ -1357,10 +1458,10 @@ e1000_diag_test_count(struct net_device } static void -e1000_diag_test(struct net_device *netdev, +e1000_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, uint64_t *data) { - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); boolean_t if_running = netif_running(netdev); if(eth_test->flags == ETH_TEST_FL_OFFLINE) { @@ -1368,7 +1469,7 @@ e1000_diag_test(struct net_device *netde /* save speed, duplex, autoneg settings */ uint16_t autoneg_advertised = adapter->hw.autoneg_advertised; - uint8_t forced_speed_duplex = adapter->hw.forced_speed_duplex; + uint8_t forced_speed_duplex = adapter->hw.forced_speed_duplex; uint8_t autoneg = adapter->hw.autoneg; /* Link test performed before hardware reset so autoneg doesn't @@ -1396,10 +1497,11 @@ e1000_diag_test(struct net_device *netde if(e1000_loopback_test(adapter, &data[3])) eth_test->flags |= ETH_TEST_FL_FAILED; - /* restore Autoneg/speed/duplex settings */ + /* restore speed, duplex, autoneg settings */ adapter->hw.autoneg_advertised = autoneg_advertised; - adapter->hw.forced_speed_duplex = forced_speed_duplex; - adapter->hw.autoneg = autoneg; + adapter->hw.forced_speed_duplex = forced_speed_duplex; + adapter->hw.autoneg = autoneg; + e1000_reset(adapter); if(if_running) e1000_up(adapter); @@ -1419,7 +1521,7 @@ e1000_diag_test(struct net_device *netde static void e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; switch(adapter->hw.device_id) { @@ -1427,6 +1529,9 @@ e1000_get_wol(struct net_device *netdev, case E1000_DEV_ID_82543GC_FIBER: case E1000_DEV_ID_82543GC_COPPER: case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + case E1000_DEV_ID_82545EM_COPPER: wol->supported = 0; wol->wolopts = 0; return; @@ -1461,7 +1566,7 @@ e1000_get_wol(struct net_device *netdev, static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; switch(adapter->hw.device_id) { @@ -1469,6 +1574,9 @@ e1000_set_wol(struct net_device *netdev, case E1000_DEV_ID_82543GC_FIBER: case E1000_DEV_ID_82543GC_COPPER: case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + case E1000_DEV_ID_82545EM_COPPER: return wol->wolopts ? -EOPNOTSUPP : 0; case E1000_DEV_ID_82546EB_FIBER: @@ -1519,24 +1627,31 @@ e1000_led_blink_callback(unsigned long d static int e1000_phys_id(struct net_device *netdev, uint32_t data) { - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); if(!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ)) data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ); - if(!adapter->blink_timer.function) { - init_timer(&adapter->blink_timer); - adapter->blink_timer.function = e1000_led_blink_callback; - adapter->blink_timer.data = (unsigned long) adapter; + if(adapter->hw.mac_type < e1000_82571) { + if(!adapter->blink_timer.function) { + init_timer(&adapter->blink_timer); + adapter->blink_timer.function = e1000_led_blink_callback; + adapter->blink_timer.data = (unsigned long) adapter; + } + e1000_setup_led(&adapter->hw); + mod_timer(&adapter->blink_timer, jiffies); + msleep_interruptible(data * 1000); + del_timer_sync(&adapter->blink_timer); + } + else { + E1000_WRITE_REG(&adapter->hw, LEDCTL, (E1000_LEDCTL_LED2_BLINK_RATE | + E1000_LEDCTL_LED1_BLINK | E1000_LEDCTL_LED2_BLINK | + (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) | + (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED1_MODE_SHIFT) | + (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED0_MODE_SHIFT))); + msleep_interruptible(data * 1000); } - e1000_setup_led(&adapter->hw); - mod_timer(&adapter->blink_timer, jiffies); - - set_current_state(TASK_INTERRUPTIBLE); - - schedule_timeout(data * HZ); - del_timer_sync(&adapter->blink_timer); e1000_led_off(&adapter->hw); clear_bit(E1000_LED_ON, &adapter->led_status); e1000_cleanup_led(&adapter->hw); @@ -1547,7 +1662,7 @@ e1000_phys_id(struct net_device *netdev, static int e1000_nway_reset(struct net_device *netdev) { - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); if(netif_running(netdev)) { e1000_down(adapter); e1000_up(adapter); @@ -1565,14 +1680,14 @@ static void e1000_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, uint64_t *data) { - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); int i; e1000_update_stats(adapter); for(i = 0; i < E1000_STATS_LEN; i++) { char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset; - data[i] = (e1000_gstrings_stats[i].sizeof_stat == sizeof(uint64_t)) - ? *(uint64_t *)p : *(uint32_t *)p; + data[i] = (e1000_gstrings_stats[i].sizeof_stat == + sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p; } } @@ -1633,7 +1748,7 @@ struct ethtool_ops e1000_ethtool_ops = { .get_ethtool_stats = e1000_get_ethtool_stats, }; -void set_ethtool_ops(struct net_device *netdev) +void e1000_set_ethtool_ops(struct net_device *netdev) { SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops); } diff -pruN ./drivers/net/e1000.lkm81/e1000.h ./drivers/net/e1000/e1000.h --- ./drivers/net/e1000.lkm81/e1000.h 2006-04-06 19:04:00.000000000 +0400 +++ ./drivers/net/e1000/e1000.h 2006-04-06 19:05:40.000000000 +0400 @@ -1,7 +1,7 @@ /******************************************************************************* - Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. + Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free @@ -49,11 +49,12 @@ #include #include #include +#include #include #include #include #include -#include +#include #include #include #include @@ -71,15 +72,17 @@ #include #include #include -#include #define BAR_0 0 #define BAR_1 1 #define BAR_5 5 +#define INTEL_E1000_ETHERNET_DEVICE(device_id) {\ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} struct e1000_adapter; +#include "e1000_compat.h" #include "e1000_hw.h" #ifdef DBG @@ -98,17 +101,20 @@ struct e1000_adapter; #define E1000_MAX_INTR 10 -/* How many descriptors for TX and RX ? */ +/* TX/RX descriptor defines */ #define E1000_DEFAULT_TXD 256 #define E1000_MAX_TXD 256 #define E1000_MIN_TXD 80 #define E1000_MAX_82544_TXD 4096 + #define E1000_DEFAULT_RXD 256 #define E1000_MAX_RXD 256 #define E1000_MIN_RXD 80 #define E1000_MAX_82544_RXD 4096 /* Supported Rx Buffer Sizes */ +#define E1000_RXBUFFER_128 128 /* Used for packet split */ +#define E1000_RXBUFFER_256 256 /* Used for packet split */ #define E1000_RXBUFFER_2048 2048 #define E1000_RXBUFFER_4096 4096 #define E1000_RXBUFFER_8192 8192 @@ -123,28 +129,30 @@ struct e1000_adapter; #define E1000_TX_HEAD_ADDR_SHIFT 7 #define E1000_PBA_TX_MASK 0xFFFF0000 -/* Flow Control High-Watermark: 5688 bytes below Rx FIFO size */ -#define E1000_FC_HIGH_DIFF 0x1638 - -/* Flow Control Low-Watermark: 5696 bytes below Rx FIFO size */ -#define E1000_FC_LOW_DIFF 0x1640 +/* Flow Control Watermarks */ +#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */ +#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */ -/* Flow Control Pause Time: 858 usec */ -#define E1000_FC_PAUSE_TIME 0x0680 +#define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */ /* How many Tx Descriptors do we need to call netif_wake_queue ? */ #define E1000_TX_QUEUE_WAKE 16 /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ -#define AUTO_ALL_MODES 0 -#define E1000_EEPROM_APME 0x0400 +#define AUTO_ALL_MODES 0 +#define E1000_EEPROM_82544_APM 0x0004 +#define E1000_EEPROM_APME 0x0400 #ifndef E1000_MASTER_SLAVE /* Switch to override PHY master/slave setting */ #define E1000_MASTER_SLAVE e1000_ms_hw_default #endif +#define E1000_MNG_VLAN_NONE -1 +/* Number of packet split data buffers (not including the header buffer) */ +#define PS_PAGE_BUFFERS MAX_PS_BUFFERS-1 + /* only works for sizes that are powers of 2 */ #define E1000_ROUNDUP(i, size) ((i) = (((i) + (size) - 1) & ~((size) - 1))) @@ -152,12 +160,15 @@ struct e1000_adapter; * so a DMA handle can be stored along with the buffer */ struct e1000_buffer { struct sk_buff *skb; - uint64_t dma; - unsigned long length; + dma_addr_t dma; unsigned long time_stamp; - unsigned int next_to_watch; + uint16_t length; + uint16_t next_to_watch; }; +struct e1000_ps_page { struct page *ps_page[MAX_PS_BUFFERS]; }; +struct e1000_ps_page_dma { uint64_t ps_page_dma[MAX_PS_BUFFERS]; }; + struct e1000_desc_ring { /* pointer to the descriptor ring memory */ void *desc; @@ -173,12 +184,19 @@ struct e1000_desc_ring { unsigned int next_to_clean; /* array of buffer information structs */ struct e1000_buffer *buffer_info; + /* arrays of page information for packet split */ + struct e1000_ps_page *ps_page; + struct e1000_ps_page_dma *ps_page_dma; }; #define E1000_DESC_UNUSED(R) \ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ (R)->next_to_clean - (R)->next_to_use - 1) +#define E1000_RX_DESC_PS(R, i) \ + (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) +#define E1000_RX_DESC_EXT(R, i) \ + (&(((union e1000_rx_desc_extended *)((R).desc))[i])) #define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) #define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc) #define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) @@ -191,6 +209,7 @@ struct e1000_adapter { struct timer_list watchdog_timer; struct timer_list phy_info_timer; struct vlan_group *vlgrp; + uint16_t mng_vlan_id; uint32_t bd_number; uint32_t rx_buffer_len; uint32_t part_num; @@ -202,13 +221,14 @@ struct e1000_adapter { spinlock_t stats_lock; atomic_t irq_sem; struct work_struct tx_timeout_task; - uint8_t fc_autoneg; + uint8_t fc_autoneg; struct timer_list blink_timer; unsigned long led_status; /* TX */ struct e1000_desc_ring tx_ring; + struct e1000_buffer previous_buffer_info; spinlock_t tx_lock; uint32_t txd_cmd; uint32_t tx_int_delay; @@ -222,16 +242,26 @@ struct e1000_adapter { uint32_t tx_fifo_size; atomic_t tx_fifo_stall; boolean_t pcix_82544; + boolean_t detect_tx_hung; /* RX */ +#ifdef CONFIG_E1000_NAPI + boolean_t (*clean_rx) (struct e1000_adapter *adapter, int *work_done, + int work_to_do); +#else + boolean_t (*clean_rx) (struct e1000_adapter *adapter); +#endif + void (*alloc_rx_buf) (struct e1000_adapter *adapter); struct e1000_desc_ring rx_ring; uint64_t hw_csum_err; uint64_t hw_csum_good; uint32_t rx_int_delay; uint32_t rx_abs_int_delay; boolean_t rx_csum; + boolean_t rx_ps; uint32_t gorcl; uint64_t gorcl_old; + uint16_t rx_ps_bsize0; /* Interrupt Throttle Rate */ uint32_t itr; @@ -254,5 +284,8 @@ struct e1000_adapter { uint32_t pci_state[16]; int msg_enable; +#ifdef CONFIG_PCI_MSI + boolean_t have_msi; +#endif }; #endif /* _E1000_H_ */ diff -pruN ./drivers/net/e1000.lkm81/e1000_hw.c ./drivers/net/e1000/e1000_hw.c --- ./drivers/net/e1000.lkm81/e1000_hw.c 2006-04-06 19:04:00.000000000 +0400 +++ ./drivers/net/e1000/e1000_hw.c 2006-04-06 19:05:40.000000000 +0400 @@ -1,7 +1,7 @@ /******************************************************************************* - Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. + Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free @@ -63,9 +63,11 @@ static uint16_t e1000_shift_in_ee_bits(s static int32_t e1000_acquire_eeprom(struct e1000_hw *hw); static void e1000_release_eeprom(struct e1000_hw *hw); static void e1000_standby_eeprom(struct e1000_hw *hw); -static int32_t e1000_id_led_init(struct e1000_hw * hw); static int32_t e1000_set_vco_speed(struct e1000_hw *hw); +static int32_t e1000_polarity_reversal_workaround(struct e1000_hw *hw); static int32_t e1000_set_phy_mode(struct e1000_hw *hw); +static int32_t e1000_host_if_read_cookie(struct e1000_hw *hw, uint8_t *buffer); +static uint8_t e1000_calculate_mng_checksum(char *buffer, uint32_t length); /* IGP cable length table */ static const @@ -79,6 +81,17 @@ uint16_t e1000_igp_cable_length_table[IG 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120}; +static const +uint16_t e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] = + { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, + 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, + 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, + 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, + 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, + 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, + 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124, + 104, 109, 114, 118, 121, 124}; + /****************************************************************************** * Set the phy type member in the hw struct. @@ -90,10 +103,14 @@ e1000_set_phy_type(struct e1000_hw *hw) { DEBUGFUNC("e1000_set_phy_type"); + if(hw->mac_type == e1000_undefined) + return -E1000_ERR_PHY_TYPE; + switch(hw->phy_id) { case M88E1000_E_PHY_ID: case M88E1000_I_PHY_ID: case M88E1011_I_PHY_ID: + case M88E1111_I_PHY_ID: hw->phy_type = e1000_phy_m88; break; case IGP01E1000_I_PHY_ID: @@ -122,16 +139,30 @@ e1000_set_phy_type(struct e1000_hw *hw) static void e1000_phy_init_script(struct e1000_hw *hw) { + uint32_t ret_val; + uint16_t phy_saved_data; + DEBUGFUNC("e1000_phy_init_script"); if(hw->phy_init_script) { msec_delay(20); + /* Save off the current value of register 0x2F5B to be restored at + * the end of this routine. */ + ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + /* Disabled the PHY transmitter */ + e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + + msec_delay(20); + e1000_write_phy_reg(hw,0x0000,0x0140); msec_delay(5); - if(hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547) { + switch(hw->mac_type) { + case e1000_82541: + case e1000_82547: e1000_write_phy_reg(hw, 0x1F95, 0x0001); e1000_write_phy_reg(hw, 0x1F71, 0xBD21); @@ -149,12 +180,23 @@ e1000_phy_init_script(struct e1000_hw *h e1000_write_phy_reg(hw, 0x1F96, 0x003F); e1000_write_phy_reg(hw, 0x2010, 0x0008); - } else { + break; + + case e1000_82541_rev_2: + case e1000_82547_rev_2: e1000_write_phy_reg(hw, 0x1F73, 0x0099); + break; + default: + break; } e1000_write_phy_reg(hw, 0x0000, 0x3300); + msec_delay(20); + + /* Now enable the transmitter */ + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + if(hw->mac_type == e1000_82547) { uint16_t fused, fine, coarse; @@ -243,6 +285,8 @@ e1000_set_mac_type(struct e1000_hw *hw) case E1000_DEV_ID_82546GB_COPPER: case E1000_DEV_ID_82546GB_FIBER: case E1000_DEV_ID_82546GB_SERDES: + case E1000_DEV_ID_82546GB_PCIE: + case E1000_DEV_ID_82546GB_QUAD_COPPER: hw->mac_type = e1000_82546_rev_3; break; case E1000_DEV_ID_82541EI: @@ -251,6 +295,7 @@ e1000_set_mac_type(struct e1000_hw *hw) break; case E1000_DEV_ID_82541ER: case E1000_DEV_ID_82541GI: + case E1000_DEV_ID_82541GI_LF: case E1000_DEV_ID_82541GI_MOBILE: hw->mac_type = e1000_82541_rev_2; break; @@ -260,12 +305,32 @@ e1000_set_mac_type(struct e1000_hw *hw) case E1000_DEV_ID_82547GI: hw->mac_type = e1000_82547_rev_2; break; + case E1000_DEV_ID_82571EB_COPPER: + case E1000_DEV_ID_82571EB_FIBER: + case E1000_DEV_ID_82571EB_SERDES: + hw->mac_type = e1000_82571; + break; + case E1000_DEV_ID_82572EI_COPPER: + case E1000_DEV_ID_82572EI_FIBER: + case E1000_DEV_ID_82572EI_SERDES: + hw->mac_type = e1000_82572; + break; + case E1000_DEV_ID_82573E: + case E1000_DEV_ID_82573E_IAMT: + case E1000_DEV_ID_82573L: + hw->mac_type = e1000_82573; + break; default: /* Should never have loaded on this device */ return -E1000_ERR_MAC_TYPE; } switch(hw->mac_type) { + case e1000_82571: + case e1000_82572: + case e1000_82573: + hw->eeprom_semaphore_present = TRUE; + /* fall through */ case e1000_82541: case e1000_82547: case e1000_82541_rev_2: @@ -299,21 +364,32 @@ e1000_set_media_type(struct e1000_hw *hw switch (hw->device_id) { case E1000_DEV_ID_82545GM_SERDES: case E1000_DEV_ID_82546GB_SERDES: + case E1000_DEV_ID_82571EB_SERDES: + case E1000_DEV_ID_82572EI_SERDES: hw->media_type = e1000_media_type_internal_serdes; break; default: - if(hw->mac_type >= e1000_82543) { + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + hw->media_type = e1000_media_type_fiber; + break; + case e1000_82573: + /* The STATUS_TBIMODE bit is reserved or reused for the this + * device. + */ + hw->media_type = e1000_media_type_copper; + break; + default: status = E1000_READ_REG(hw, STATUS); - if(status & E1000_STATUS_TBIMODE) { + if (status & E1000_STATUS_TBIMODE) { hw->media_type = e1000_media_type_fiber; /* tbi_compatibility not valid on fiber */ hw->tbi_compatibility_en = FALSE; } else { hw->media_type = e1000_media_type_copper; } - } else { - /* This is an 82542 (fiber only) */ - hw->media_type = e1000_media_type_fiber; + break; } } } @@ -331,6 +407,9 @@ e1000_reset_hw(struct e1000_hw *hw) uint32_t icr; uint32_t manc; uint32_t led_ctrl; + uint32_t timeout; + uint32_t extcnf_ctrl; + int32_t ret_val; DEBUGFUNC("e1000_reset_hw"); @@ -340,6 +419,15 @@ e1000_reset_hw(struct e1000_hw *hw) e1000_pci_clear_mwi(hw); } + if(hw->bus_type == e1000_bus_type_pci_express) { + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + if(e1000_disable_pciex_master(hw) != E1000_SUCCESS) { + DEBUGOUT("PCI-E Master disable polling has failed.\n"); + } + } + /* Clear interrupt mask to stop board from generating interrupts */ DEBUGOUT("Masking off all interrupts\n"); E1000_WRITE_REG(hw, IMC, 0xffffffff); @@ -364,10 +452,32 @@ e1000_reset_hw(struct e1000_hw *hw) /* Must reset the PHY before resetting the MAC */ if((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { - E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_PHY_RST)); + E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_PHY_RST)); msec_delay(5); } + /* Must acquire the MDIO ownership before MAC reset. + * Ownership defaults to firmware after a reset. */ + if(hw->mac_type == e1000_82573) { + timeout = 10; + + extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL); + extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; + + do { + E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl); + extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL); + + if(extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP) + break; + else + extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; + + msec_delay(2); + timeout--; + } while(timeout); + } + /* Issue a global reset to the MAC. This will reset the chip's * transmit, receive, DMA, and link units. It will not effect * the current PCI configuration. The global reset bit is self- @@ -421,6 +531,20 @@ e1000_reset_hw(struct e1000_hw *hw) /* Wait for EEPROM reload */ msec_delay(20); break; + case e1000_82573: + udelay(10); + ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); + /* fall through */ + case e1000_82571: + case e1000_82572: + ret_val = e1000_get_auto_rd_done(hw); + if(ret_val) + /* We don't want to continue accessing MAC registers. */ + return ret_val; + break; default: /* Wait for EEPROM reload (it happens automatically) */ msec_delay(5); @@ -428,7 +552,7 @@ e1000_reset_hw(struct e1000_hw *hw) } /* Disable HW ARPs on ASF enabled adapters */ - if(hw->mac_type >= e1000_82540) { + if(hw->mac_type >= e1000_82540 && hw->mac_type <= e1000_82547_rev_2) { manc = E1000_READ_REG(hw, MANC); manc &= ~(E1000_MANC_ARP_EN); E1000_WRITE_REG(hw, MANC, manc); @@ -481,6 +605,8 @@ e1000_init_hw(struct e1000_hw *hw) uint16_t pcix_stat_hi_word; uint16_t cmd_mmrbc; uint16_t stat_mmrbc; + uint32_t mta_size; + DEBUGFUNC("e1000_init_hw"); /* Initialize Identification LED */ @@ -495,8 +621,8 @@ e1000_init_hw(struct e1000_hw *hw) /* Disabling VLAN filtering. */ DEBUGOUT("Initializing the IEEE VLAN\n"); - E1000_WRITE_REG(hw, VET, 0); - + if (hw->mac_type < e1000_82545_rev_3) + E1000_WRITE_REG(hw, VET, 0); e1000_clear_vfta(hw); /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */ @@ -524,14 +650,16 @@ e1000_init_hw(struct e1000_hw *hw) /* Zero out the Multicast HASH table */ DEBUGOUT("Zeroing the MTA\n"); - for(i = 0; i < E1000_MC_TBL_SIZE; i++) + mta_size = E1000_MC_TBL_SIZE; + for(i = 0; i < mta_size; i++) E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); /* Set the PCI priority bit correctly in the CTRL register. This * determines if the adapter gives priority to receives, or if it - * gives equal priority to transmits and receives. + * gives equal priority to transmits and receives. Valid only on + * 82542 and 82543 silicon. */ - if(hw->dma_fairness) { + if(hw->dma_fairness && hw->mac_type <= e1000_82543) { ctrl = E1000_READ_REG(hw, CTRL); E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PRIOR); } @@ -569,9 +697,44 @@ e1000_init_hw(struct e1000_hw *hw) if(hw->mac_type > e1000_82544) { ctrl = E1000_READ_REG(hw, TXDCTL); ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB; + switch (hw->mac_type) { + default: + break; + case e1000_82571: + case e1000_82572: + ctrl |= (1 << 22); + case e1000_82573: + ctrl |= E1000_TXDCTL_COUNT_DESC; + break; + } E1000_WRITE_REG(hw, TXDCTL, ctrl); } + if (hw->mac_type == e1000_82573) { + e1000_enable_tx_pkt_filtering(hw); + } + + switch (hw->mac_type) { + default: + break; + case e1000_82571: + case e1000_82572: + ctrl = E1000_READ_REG(hw, TXDCTL1); + ctrl &= ~E1000_TXDCTL_WTHRESH; + ctrl |= E1000_TXDCTL_COUNT_DESC | E1000_TXDCTL_FULL_TX_DESC_WB; + ctrl |= (1 << 22); + E1000_WRITE_REG(hw, TXDCTL1, ctrl); + break; + } + + + + if (hw->mac_type == e1000_82573) { + uint32_t gcr = E1000_READ_REG(hw, GCR); + gcr |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX; + E1000_WRITE_REG(hw, GCR, gcr); + } + /* Clear all of the statistics registers (clear on read). It is * important that we do this after we have tried to establish link * because the symbol error count will increment wildly if there @@ -650,7 +813,7 @@ e1000_setup_link(struct e1000_hw *hw) * control setting, then the variable hw->fc will * be initialized based on a value in the EEPROM. */ - if(e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data) < 0) { + if(e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data)) { DEBUGOUT("EEPROM Read Error\n"); return -E1000_ERR_EEPROM; } @@ -707,6 +870,7 @@ e1000_setup_link(struct e1000_hw *hw) E1000_WRITE_REG(hw, FCAL, FLOW_CONTROL_ADDRESS_LOW); E1000_WRITE_REG(hw, FCAH, FLOW_CONTROL_ADDRESS_HIGH); E1000_WRITE_REG(hw, FCT, FLOW_CONTROL_TYPE); + E1000_WRITE_REG(hw, FCTTV, hw->fc_pause_time); /* Set the flow control receive threshold registers. Normally, @@ -754,6 +918,14 @@ e1000_setup_fiber_serdes_link(struct e10 DEBUGFUNC("e1000_setup_fiber_serdes_link"); + /* On 82571 and 82572 Fiber connections, SerDes loopback mode persists + * until explicitly turned off or a power cycle is performed. A read to + * the register does not indicate its status. Therefore, we ensure + * loopback mode is disabled during initialization. + */ + if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) + E1000_WRITE_REG(hw, SCTL, E1000_DISABLE_SERDES_LOOPBACK); + /* On adapters with a MAC newer than 82544, SW Defineable pin 1 will be * set when the optics detect a signal. On older adapters, it will be * cleared when there is a signal. This applies to fiber media only. @@ -877,20 +1049,18 @@ e1000_setup_fiber_serdes_link(struct e10 } /****************************************************************************** -* Detects which PHY is present and the speed and duplex +* Make sure we have a valid PHY and change PHY mode before link setup. * * hw - Struct containing variables accessed by shared code ******************************************************************************/ static int32_t -e1000_setup_copper_link(struct e1000_hw *hw) +e1000_copper_link_preconfig(struct e1000_hw *hw) { uint32_t ctrl; - uint32_t led_ctrl; int32_t ret_val; - uint16_t i; uint16_t phy_data; - DEBUGFUNC("e1000_setup_copper_link"); + DEBUGFUNC("e1000_copper_link_preconfig"); ctrl = E1000_READ_REG(hw, CTRL); /* With 82543, we need to force speed and duplex on the MAC equal to what @@ -904,7 +1074,9 @@ e1000_setup_copper_link(struct e1000_hw } else { ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU); E1000_WRITE_REG(hw, CTRL, ctrl); - e1000_phy_hw_reset(hw); + ret_val = e1000_phy_hw_reset(hw); + if(ret_val) + return ret_val; } /* Make sure we have a valid PHY */ @@ -920,7 +1092,8 @@ e1000_setup_copper_link(struct e1000_hw if(ret_val) return ret_val; - if(hw->mac_type == e1000_82545_rev_3) { + if((hw->mac_type == e1000_82545_rev_3) || + (hw->mac_type == e1000_82546_rev_3)) { ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); phy_data |= 0x00000008; ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); @@ -931,365 +1104,463 @@ e1000_setup_copper_link(struct e1000_hw hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) hw->phy_reset_disable = FALSE; - if(!hw->phy_reset_disable) { - if (hw->phy_type == e1000_phy_igp) { + return E1000_SUCCESS; +} - ret_val = e1000_phy_reset(hw); - if(ret_val) { - DEBUGOUT("Error Resetting the PHY\n"); - return ret_val; - } - /* Wait 10ms for MAC to configure PHY from eeprom settings */ - msec_delay(15); +/******************************************************************** +* Copper link setup for e1000_phy_igp series. +* +* hw - Struct containing variables accessed by shared code +*********************************************************************/ +static int32_t +e1000_copper_link_igp_setup(struct e1000_hw *hw) +{ + uint32_t led_ctrl; + int32_t ret_val; + uint16_t phy_data; - /* Configure activity LED after PHY reset */ - led_ctrl = E1000_READ_REG(hw, LEDCTL); - led_ctrl &= IGP_ACTIVITY_LED_MASK; - led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); - E1000_WRITE_REG(hw, LEDCTL, led_ctrl); + DEBUGFUNC("e1000_copper_link_igp_setup"); - /* disable lplu d3 during driver init */ - ret_val = e1000_set_d3_lplu_state(hw, FALSE); - if(ret_val) { - DEBUGOUT("Error Disabling LPLU D3\n"); - return ret_val; - } + if (hw->phy_reset_disable) + return E1000_SUCCESS; + + ret_val = e1000_phy_reset(hw); + if (ret_val) { + DEBUGOUT("Error Resetting the PHY\n"); + return ret_val; + } - /* Configure mdi-mdix settings */ - ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, - &phy_data); - if(ret_val) - return ret_val; + /* Wait 10ms for MAC to configure PHY from eeprom settings */ + msec_delay(15); - if((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { - hw->dsp_config_state = e1000_dsp_config_disabled; - /* Force MDI for IGP B-0 PHY */ - phy_data &= ~(IGP01E1000_PSCR_AUTO_MDIX | - IGP01E1000_PSCR_FORCE_MDI_MDIX); - hw->mdix = 1; + /* Configure activity LED after PHY reset */ + led_ctrl = E1000_READ_REG(hw, LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + E1000_WRITE_REG(hw, LEDCTL, led_ctrl); - } else { - hw->dsp_config_state = e1000_dsp_config_enabled; - phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + /* disable lplu d3 during driver init */ + ret_val = e1000_set_d3_lplu_state(hw, FALSE); + if (ret_val) { + DEBUGOUT("Error Disabling LPLU D3\n"); + return ret_val; + } - switch (hw->mdix) { - case 1: - phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; - break; - case 2: - phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; - break; - case 0: - default: - phy_data |= IGP01E1000_PSCR_AUTO_MDIX; - break; - } - } - ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, - phy_data); - if(ret_val) - return ret_val; + /* disable lplu d0 during driver init */ + ret_val = e1000_set_d0_lplu_state(hw, FALSE); + if (ret_val) { + DEBUGOUT("Error Disabling LPLU D0\n"); + return ret_val; + } + /* Configure mdi-mdix settings */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; - /* set auto-master slave resolution settings */ - if(hw->autoneg) { - e1000_ms_type phy_ms_setting = hw->master_slave; - - if(hw->ffe_config_state == e1000_ffe_config_active) - hw->ffe_config_state = e1000_ffe_config_enabled; - - if(hw->dsp_config_state == e1000_dsp_config_activated) - hw->dsp_config_state = e1000_dsp_config_enabled; - - /* when autonegotiation advertisment is only 1000Mbps then we - * should disable SmartSpeed and enable Auto MasterSlave - * resolution as hardware default. */ - if(hw->autoneg_advertised == ADVERTISE_1000_FULL) { - /* Disable SmartSpeed */ - ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, - &phy_data); - if(ret_val) - return ret_val; - phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; - ret_val = e1000_write_phy_reg(hw, - IGP01E1000_PHY_PORT_CONFIG, - phy_data); - if(ret_val) - return ret_val; - /* Set auto Master/Slave resolution process */ - ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); - if(ret_val) - return ret_val; - phy_data &= ~CR_1000T_MS_ENABLE; - ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); - if(ret_val) - return ret_val; - } + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + hw->dsp_config_state = e1000_dsp_config_disabled; + /* Force MDI for earlier revs of the IGP PHY */ + phy_data &= ~(IGP01E1000_PSCR_AUTO_MDIX | IGP01E1000_PSCR_FORCE_MDI_MDIX); + hw->mdix = 1; - ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); - if(ret_val) - return ret_val; + } else { + hw->dsp_config_state = e1000_dsp_config_enabled; + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; - /* load defaults for future use */ - hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ? - ((phy_data & CR_1000T_MS_VALUE) ? - e1000_ms_force_master : - e1000_ms_force_slave) : - e1000_ms_auto; - - switch (phy_ms_setting) { - case e1000_ms_force_master: - phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); - break; - case e1000_ms_force_slave: - phy_data |= CR_1000T_MS_ENABLE; - phy_data &= ~(CR_1000T_MS_VALUE); - break; - case e1000_ms_auto: - phy_data &= ~CR_1000T_MS_ENABLE; - default: - break; - } - ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); - if(ret_val) - return ret_val; - } - } else { - /* Enable CRS on TX. This must be set for half-duplex operation. */ - ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, - &phy_data); - if(ret_val) - return ret_val; + switch (hw->mdix) { + case 1: + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 2: + phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 0: + default: + phy_data |= IGP01E1000_PSCR_AUTO_MDIX; + break; + } + } + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if(ret_val) + return ret_val; - phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + /* set auto-master slave resolution settings */ + if(hw->autoneg) { + e1000_ms_type phy_ms_setting = hw->master_slave; - /* Options: - * MDI/MDI-X = 0 (default) - * 0 - Auto for all speeds - * 1 - MDI mode - * 2 - MDI-X mode - * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) - */ - phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + if(hw->ffe_config_state == e1000_ffe_config_active) + hw->ffe_config_state = e1000_ffe_config_enabled; - switch (hw->mdix) { - case 1: - phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; - break; - case 2: - phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; - break; - case 3: - phy_data |= M88E1000_PSCR_AUTO_X_1000T; - break; - case 0: - default: - phy_data |= M88E1000_PSCR_AUTO_X_MODE; - break; - } + if(hw->dsp_config_state == e1000_dsp_config_activated) + hw->dsp_config_state = e1000_dsp_config_enabled; - /* Options: - * disable_polarity_correction = 0 (default) - * Automatic Correction for Reversed Cable Polarity - * 0 - Disabled - * 1 - Enabled - */ - phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; - if(hw->disable_polarity_correction == 1) - phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; - ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, - phy_data); + /* when autonegotiation advertisment is only 1000Mbps then we + * should disable SmartSpeed and enable Auto MasterSlave + * resolution as hardware default. */ + if(hw->autoneg_advertised == ADVERTISE_1000_FULL) { + /* Disable SmartSpeed */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data); if(ret_val) return ret_val; - - /* Force TX_CLK in the Extended PHY Specific Control Register - * to 25MHz clock. - */ - ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, - &phy_data); + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + phy_data); if(ret_val) return ret_val; - - phy_data |= M88E1000_EPSCR_TX_CLK_25; - - if (hw->phy_revision < M88E1011_I_REV_4) { - /* Configure Master and Slave downshift values */ - phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | - M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); - phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | - M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); - ret_val = e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, - phy_data); - if(ret_val) - return ret_val; - } - - /* SW Reset the PHY so all changes take effect */ - ret_val = e1000_phy_reset(hw); - if(ret_val) { - DEBUGOUT("Error Resetting the PHY\n"); - return ret_val; - } - } - - /* Options: - * autoneg = 1 (default) - * PHY will advertise value(s) parsed from - * autoneg_advertised and fc - * autoneg = 0 - * PHY will be set to 10H, 10F, 100H, or 100F - * depending on value parsed from forced_speed_duplex. - */ - - /* Is autoneg enabled? This is enabled by default or by software - * override. If so, call e1000_phy_setup_autoneg routine to parse the - * autoneg_advertised and fc options. If autoneg is NOT enabled, then - * the user should have provided a speed/duplex override. If so, then - * call e1000_phy_force_speed_duplex to parse and set this up. - */ - if(hw->autoneg) { - /* Perform some bounds checking on the hw->autoneg_advertised - * parameter. If this variable is zero, then set it to the default. - */ - hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT; - - /* If autoneg_advertised is zero, we assume it was not defaulted - * by the calling code so we set to advertise full capability. - */ - if(hw->autoneg_advertised == 0) - hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; - - DEBUGOUT("Reconfiguring auto-neg advertisement params\n"); - ret_val = e1000_phy_setup_autoneg(hw); - if(ret_val) { - DEBUGOUT("Error Setting up Auto-Negotiation\n"); - return ret_val; - } - DEBUGOUT("Restarting Auto-Neg\n"); - - /* Restart auto-negotiation by setting the Auto Neg Enable bit and - * the Auto Neg Restart bit in the PHY control register. - */ - ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + /* Set auto Master/Slave resolution process */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); if(ret_val) return ret_val; - - phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); - ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); + phy_data &= ~CR_1000T_MS_ENABLE; + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); if(ret_val) return ret_val; - - /* Does the user want to wait for Auto-Neg to complete here, or - * check at a later time (for example, callback routine). - */ - if(hw->wait_autoneg_complete) { - ret_val = e1000_wait_autoneg(hw); - if(ret_val) { - DEBUGOUT("Error while waiting for autoneg to complete\n"); - return ret_val; - } - } - hw->get_link_status = TRUE; - } else { - DEBUGOUT("Forcing speed and duplex\n"); - ret_val = e1000_phy_force_speed_duplex(hw); - if(ret_val) { - DEBUGOUT("Error Forcing Speed and Duplex\n"); - return ret_val; - } } - } /* !hw->phy_reset_disable */ - /* Check link status. Wait up to 100 microseconds for link to become - * valid. - */ - for(i = 0; i < 10; i++) { - ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); - if(ret_val) - return ret_val; - ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); if(ret_val) return ret_val; - if(phy_data & MII_SR_LINK_STATUS) { - /* We have link, so we need to finish the config process: - * 1) Set up the MAC to the current PHY speed/duplex - * if we are on 82543. If we - * are on newer silicon, we only need to configure - * collision distance in the Transmit Control Register. - * 2) Set up flow control on the MAC to that established with - * the link partner. - */ - if(hw->mac_type >= e1000_82544) { - e1000_config_collision_dist(hw); - } else { - ret_val = e1000_config_mac_to_phy(hw); - if(ret_val) { - DEBUGOUT("Error configuring MAC to PHY settings\n"); - return ret_val; - } - } - ret_val = e1000_config_fc_after_link_up(hw); - if(ret_val) { - DEBUGOUT("Error Configuring Flow Control\n"); - return ret_val; - } - DEBUGOUT("Valid link established!!!\n"); + /* load defaults for future use */ + hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ? + ((phy_data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : + e1000_ms_auto; - if(hw->phy_type == e1000_phy_igp) { - ret_val = e1000_config_dsp_after_link_change(hw, TRUE); - if(ret_val) { - DEBUGOUT("Error Configuring DSP after link up\n"); - return ret_val; - } - } - DEBUGOUT("Valid link established!!!\n"); - return E1000_SUCCESS; + switch (phy_ms_setting) { + case e1000_ms_force_master: + phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + phy_data |= CR_1000T_MS_ENABLE; + phy_data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + phy_data &= ~CR_1000T_MS_ENABLE; + default: + break; } - udelay(10); + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); + if(ret_val) + return ret_val; } - DEBUGOUT("Unable to establish link!!!\n"); return E1000_SUCCESS; } -/****************************************************************************** -* Configures PHY autoneg and flow control advertisement settings + +/******************************************************************** +* Copper link setup for e1000_phy_m88 series. * * hw - Struct containing variables accessed by shared code -******************************************************************************/ -int32_t -e1000_phy_setup_autoneg(struct e1000_hw *hw) +*********************************************************************/ +static int32_t +e1000_copper_link_mgp_setup(struct e1000_hw *hw) { int32_t ret_val; - uint16_t mii_autoneg_adv_reg; - uint16_t mii_1000t_ctrl_reg; - - DEBUGFUNC("e1000_phy_setup_autoneg"); + uint16_t phy_data; - /* Read the MII Auto-Neg Advertisement Register (Address 4). */ - ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); - if(ret_val) - return ret_val; + DEBUGFUNC("e1000_copper_link_mgp_setup"); - /* Read the MII 1000Base-T Control Register (Address 9). */ - ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); + if(hw->phy_reset_disable) + return E1000_SUCCESS; + + /* Enable CRS on TX. This must be set for half-duplex operation. */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); if(ret_val) return ret_val; - /* Need to parse both autoneg_advertised and fc and set up - * the appropriate PHY registers. First we will parse for - * autoneg_advertised software override. Since we can advertise - * a plethora of combinations, we need to check each bit - * individually. - */ + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; - /* First we clear all the 10/100 mb speed bits in the Auto-Neg - * Advertisement Register (Address 4) and the 1000 mb speed bits in - * the 1000Base-T Control Register (Address 9). + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) */ - mii_autoneg_adv_reg &= ~REG4_SPEED_MASK; - mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK; + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (hw->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if(hw->disable_polarity_correction == 1) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if(ret_val) + return ret_val; + + /* Force TX_CLK in the Extended PHY Specific Control Register + * to 25MHz clock. + */ + ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if(ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + + if (hw->phy_revision < M88E1011_I_REV_4) { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + ret_val = e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if(ret_val) + return ret_val; + } + + /* SW Reset the PHY so all changes take effect */ + ret_val = e1000_phy_reset(hw); + if(ret_val) { + DEBUGOUT("Error Resetting the PHY\n"); + return ret_val; + } + + return E1000_SUCCESS; +} + +/******************************************************************** +* Setup auto-negotiation and flow control advertisements, +* and then perform auto-negotiation. +* +* hw - Struct containing variables accessed by shared code +*********************************************************************/ +static int32_t +e1000_copper_link_autoneg(struct e1000_hw *hw) +{ + int32_t ret_val; + uint16_t phy_data; + + DEBUGFUNC("e1000_copper_link_autoneg"); + + /* Perform some bounds checking on the hw->autoneg_advertised + * parameter. If this variable is zero, then set it to the default. + */ + hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if(hw->autoneg_advertised == 0) + hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; + + DEBUGOUT("Reconfiguring auto-neg advertisement params\n"); + ret_val = e1000_phy_setup_autoneg(hw); + if(ret_val) { + DEBUGOUT("Error Setting up Auto-Negotiation\n"); + return ret_val; + } + DEBUGOUT("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if(ret_val) + return ret_val; + + phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); + if(ret_val) + return ret_val; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if(hw->wait_autoneg_complete) { + ret_val = e1000_wait_autoneg(hw); + if(ret_val) { + DEBUGOUT("Error while waiting for autoneg to complete\n"); + return ret_val; + } + } + + hw->get_link_status = TRUE; + + return E1000_SUCCESS; +} + + +/****************************************************************************** +* Config the MAC and the PHY after link is up. +* 1) Set up the MAC to the current PHY speed/duplex +* if we are on 82543. If we +* are on newer silicon, we only need to configure +* collision distance in the Transmit Control Register. +* 2) Set up flow control on the MAC to that established with +* the link partner. +* 3) Config DSP to improve Gigabit link quality for some PHY revisions. +* +* hw - Struct containing variables accessed by shared code +******************************************************************************/ +static int32_t +e1000_copper_link_postconfig(struct e1000_hw *hw) +{ + int32_t ret_val; + DEBUGFUNC("e1000_copper_link_postconfig"); + + if(hw->mac_type >= e1000_82544) { + e1000_config_collision_dist(hw); + } else { + ret_val = e1000_config_mac_to_phy(hw); + if(ret_val) { + DEBUGOUT("Error configuring MAC to PHY settings\n"); + return ret_val; + } + } + ret_val = e1000_config_fc_after_link_up(hw); + if(ret_val) { + DEBUGOUT("Error Configuring Flow Control\n"); + return ret_val; + } + + /* Config DSP to improve Giga link quality */ + if(hw->phy_type == e1000_phy_igp) { + ret_val = e1000_config_dsp_after_link_change(hw, TRUE); + if(ret_val) { + DEBUGOUT("Error Configuring DSP after link up\n"); + return ret_val; + } + } + + return E1000_SUCCESS; +} + +/****************************************************************************** +* Detects which PHY is present and setup the speed and duplex +* +* hw - Struct containing variables accessed by shared code +******************************************************************************/ +static int32_t +e1000_setup_copper_link(struct e1000_hw *hw) +{ + int32_t ret_val; + uint16_t i; + uint16_t phy_data; + + DEBUGFUNC("e1000_setup_copper_link"); + + /* Check if it is a valid PHY and set PHY mode if necessary. */ + ret_val = e1000_copper_link_preconfig(hw); + if(ret_val) + return ret_val; + + if (hw->phy_type == e1000_phy_igp || + hw->phy_type == e1000_phy_igp_2) { + ret_val = e1000_copper_link_igp_setup(hw); + if(ret_val) + return ret_val; + } else if (hw->phy_type == e1000_phy_m88) { + ret_val = e1000_copper_link_mgp_setup(hw); + if(ret_val) + return ret_val; + } + + if(hw->autoneg) { + /* Setup autoneg and flow control advertisement + * and perform autonegotiation */ + ret_val = e1000_copper_link_autoneg(hw); + if(ret_val) + return ret_val; + } else { + /* PHY will be set to 10H, 10F, 100H,or 100F + * depending on value from forced_speed_duplex. */ + DEBUGOUT("Forcing speed and duplex\n"); + ret_val = e1000_phy_force_speed_duplex(hw); + if(ret_val) { + DEBUGOUT("Error Forcing Speed and Duplex\n"); + return ret_val; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + for(i = 0; i < 10; i++) { + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if(ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if(ret_val) + return ret_val; + + if(phy_data & MII_SR_LINK_STATUS) { + /* Config the MAC and PHY after link is up */ + ret_val = e1000_copper_link_postconfig(hw); + if(ret_val) + return ret_val; + + DEBUGOUT("Valid link established!!!\n"); + return E1000_SUCCESS; + } + udelay(10); + } + + DEBUGOUT("Unable to establish link!!!\n"); + return E1000_SUCCESS; +} + +/****************************************************************************** +* Configures PHY autoneg and flow control advertisement settings +* +* hw - Struct containing variables accessed by shared code +******************************************************************************/ +int32_t +e1000_phy_setup_autoneg(struct e1000_hw *hw) +{ + int32_t ret_val; + uint16_t mii_autoneg_adv_reg; + uint16_t mii_1000t_ctrl_reg; + + DEBUGFUNC("e1000_phy_setup_autoneg"); + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if(ret_val) + return ret_val; + + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); + if(ret_val) + return ret_val; + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~REG4_SPEED_MASK; + mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK; DEBUGOUT1("autoneg_advertised %x\n", hw->autoneg_advertised); @@ -1387,7 +1658,7 @@ e1000_phy_setup_autoneg(struct e1000_hw DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); - ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); if(ret_val) return ret_val; @@ -1542,7 +1813,8 @@ e1000_phy_force_speed_duplex(struct e100 if(mii_status_reg & MII_SR_LINK_STATUS) break; msec_delay(100); } - if((i == 0) && (hw->phy_type == e1000_phy_m88)) { + if((i == 0) && + (hw->phy_type == e1000_phy_m88)) { /* We didn't get link. Reset the DSP and wait again for link. */ ret_val = e1000_phy_reset_dsp(hw); if(ret_val) { @@ -1592,6 +1864,15 @@ e1000_phy_force_speed_duplex(struct e100 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); if(ret_val) return ret_val; + + if((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) && + (!hw->autoneg) && + (hw->forced_speed_duplex == e1000_10_full || + hw->forced_speed_duplex == e1000_10_half)) { + ret_val = e1000_polarity_reversal_workaround(hw); + if(ret_val) + return ret_val; + } } return E1000_SUCCESS; } @@ -1638,6 +1919,11 @@ e1000_config_mac_to_phy(struct e1000_hw DEBUGFUNC("e1000_config_mac_to_phy"); + /* 82544 or newer MAC, Auto Speed Detection takes care of + * MAC speed/duplex configuration.*/ + if (hw->mac_type >= e1000_82544) + return E1000_SUCCESS; + /* Read the Device Control Register and set the bits to Force Speed * and Duplex. */ @@ -1648,45 +1934,25 @@ e1000_config_mac_to_phy(struct e1000_hw /* Set up duplex in the Device Control and Transmit Control * registers depending on negotiated values. */ - if (hw->phy_type == e1000_phy_igp) { - ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, - &phy_data); - if(ret_val) - return ret_val; - - if(phy_data & IGP01E1000_PSSR_FULL_DUPLEX) ctrl |= E1000_CTRL_FD; - else ctrl &= ~E1000_CTRL_FD; - - e1000_config_collision_dist(hw); + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if(ret_val) + return ret_val; - /* Set up speed in the Device Control register depending on - * negotiated values. - */ - if((phy_data & IGP01E1000_PSSR_SPEED_MASK) == - IGP01E1000_PSSR_SPEED_1000MBPS) - ctrl |= E1000_CTRL_SPD_1000; - else if((phy_data & IGP01E1000_PSSR_SPEED_MASK) == - IGP01E1000_PSSR_SPEED_100MBPS) - ctrl |= E1000_CTRL_SPD_100; - } else { - ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, - &phy_data); - if(ret_val) - return ret_val; + if(phy_data & M88E1000_PSSR_DPLX) + ctrl |= E1000_CTRL_FD; + else + ctrl &= ~E1000_CTRL_FD; - if(phy_data & M88E1000_PSSR_DPLX) ctrl |= E1000_CTRL_FD; - else ctrl &= ~E1000_CTRL_FD; + e1000_config_collision_dist(hw); - e1000_config_collision_dist(hw); + /* Set up speed in the Device Control register depending on + * negotiated values. + */ + if((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) + ctrl |= E1000_CTRL_SPD_1000; + else if((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS) + ctrl |= E1000_CTRL_SPD_100; - /* Set up speed in the Device Control register depending on - * negotiated values. - */ - if((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) - ctrl |= E1000_CTRL_SPD_1000; - else if((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS) - ctrl |= E1000_CTRL_SPD_100; - } /* Write the configured values back to the Device Control Reg. */ E1000_WRITE_REG(hw, CTRL, ctrl); return E1000_SUCCESS; @@ -1981,6 +2247,7 @@ e1000_check_for_link(struct e1000_hw *hw uint32_t ctrl; uint32_t status; uint32_t rctl; + uint32_t icr; uint32_t signal = 0; int32_t ret_val; uint16_t phy_data; @@ -2030,6 +2297,25 @@ e1000_check_for_link(struct e1000_hw *hw * link-up */ e1000_check_downshift(hw); + /* If we are on 82544 or 82543 silicon and speed/duplex + * are forced to 10H or 10F, then we will implement the polarity + * reversal workaround. We disable interrupts first, and upon + * returning, place the devices interrupt state to its previous + * value except for the link status change interrupt which will + * happen due to the execution of this workaround. + */ + + if((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) && + (!hw->autoneg) && + (hw->forced_speed_duplex == e1000_10_full || + hw->forced_speed_duplex == e1000_10_half)) { + E1000_WRITE_REG(hw, IMC, 0xffffffff); + ret_val = e1000_polarity_reversal_workaround(hw); + icr = E1000_READ_REG(hw, ICR); + E1000_WRITE_REG(hw, ICS, (icr & ~E1000_ICS_LSC)); + E1000_WRITE_REG(hw, IMS, IMS_ENABLE_MASK); + } + } else { /* No link detected */ e1000_config_dsp_after_link_change(hw, FALSE); @@ -2079,7 +2365,7 @@ e1000_check_for_link(struct e1000_hw *hw * at gigabit speed, then TBI compatibility is not needed. If we are * at gigabit speed, we turn on TBI compatibility. */ - if(hw->tbi_compatibility_en) { + if(hw->tbi_compatibility_en) { uint16_t speed, duplex; e1000_get_speed_and_duplex(hw, &speed, &duplex); if(speed != SPEED_1000) { @@ -2434,15 +2720,17 @@ e1000_read_phy_reg(struct e1000_hw *hw, DEBUGFUNC("e1000_read_phy_reg"); - if(hw->phy_type == e1000_phy_igp && + if((hw->phy_type == e1000_phy_igp || + hw->phy_type == e1000_phy_igp_2) && (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, (uint16_t)reg_addr); - if(ret_val) + if(ret_val) { return ret_val; + } } - ret_val = e1000_read_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT & reg_addr, + ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, phy_data); return ret_val; @@ -2538,15 +2826,17 @@ e1000_write_phy_reg(struct e1000_hw *hw, DEBUGFUNC("e1000_write_phy_reg"); - if(hw->phy_type == e1000_phy_igp && + if((hw->phy_type == e1000_phy_igp || + hw->phy_type == e1000_phy_igp_2) && (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, (uint16_t)reg_addr); - if(ret_val) + if(ret_val) { return ret_val; + } } - ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT & reg_addr, + ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, phy_data); return ret_val; @@ -2615,19 +2905,27 @@ e1000_write_phy_reg_ex(struct e1000_hw * return E1000_SUCCESS; } + /****************************************************************************** * Returns the PHY to the power-on reset state * * hw - Struct containing variables accessed by shared code ******************************************************************************/ -void +int32_t e1000_phy_hw_reset(struct e1000_hw *hw) { uint32_t ctrl, ctrl_ext; uint32_t led_ctrl; + int32_t ret_val; DEBUGFUNC("e1000_phy_hw_reset"); + /* In the case of the phy reset being blocked, it's not an error, we + * simply return success without performing the reset. */ + ret_val = e1000_check_phy_reset_block(hw); + if (ret_val) + return E1000_SUCCESS; + DEBUGOUT("Resetting Phy...\n"); if(hw->mac_type > e1000_82543) { @@ -2663,6 +2961,11 @@ e1000_phy_hw_reset(struct e1000_hw *hw) led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); E1000_WRITE_REG(hw, LEDCTL, led_ctrl); } + + /* Wait for FW to finish PHY configuration. */ + ret_val = e1000_get_phy_cfg_done(hw); + + return ret_val; } /****************************************************************************** @@ -2680,7 +2983,21 @@ e1000_phy_reset(struct e1000_hw *hw) DEBUGFUNC("e1000_phy_reset"); - if(hw->mac_type != e1000_82541_rev_2) { + /* In the case of the phy reset being blocked, it's not an error, we + * simply return success without performing the reset. */ + ret_val = e1000_check_phy_reset_block(hw); + if (ret_val) + return E1000_SUCCESS; + + switch (hw->mac_type) { + case e1000_82541_rev_2: + case e1000_82571: + case e1000_82572: + ret_val = e1000_phy_hw_reset(hw); + if(ret_val) + return ret_val; + break; + default: ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); if(ret_val) return ret_val; @@ -2691,9 +3008,10 @@ e1000_phy_reset(struct e1000_hw *hw) return ret_val; udelay(1); - } else e1000_phy_hw_reset(hw); + break; + } - if(hw->phy_type == e1000_phy_igp) + if(hw->phy_type == e1000_phy_igp || hw->phy_type == e1000_phy_igp_2) e1000_phy_init_script(hw); return E1000_SUCCESS; @@ -2713,6 +3031,16 @@ e1000_detect_gig_phy(struct e1000_hw *hw DEBUGFUNC("e1000_detect_gig_phy"); + /* The 82571 firmware may still be configuring the PHY. In this + * case, we cannot access the PHY until the configuration is done. So + * we explicitly set the PHY values. */ + if(hw->mac_type == e1000_82571 || + hw->mac_type == e1000_82572) { + hw->phy_id = IGP01E1000_I_PHY_ID; + hw->phy_type = e1000_phy_igp_2; + return E1000_SUCCESS; + } + /* Read the PHY ID Registers to identify which PHY is onboard. */ ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high); if(ret_val) @@ -2747,6 +3075,9 @@ e1000_detect_gig_phy(struct e1000_hw *hw case e1000_82547_rev_2: if(hw->phy_id == IGP01E1000_I_PHY_ID) match = TRUE; break; + case e1000_82573: + if(hw->phy_id == M88E1111_I_PHY_ID) match = TRUE; + break; default: DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type); return -E1000_ERR_CONFIG; @@ -2802,7 +3133,7 @@ e1000_phy_igp_get_info(struct e1000_hw * /* The downshift status is checked only once, after link is established, * and it stored in the hw->speed_downgraded parameter. */ - phy_info->downshift = hw->speed_downgraded; + phy_info->downshift = (e1000_downshift)hw->speed_downgraded; /* IGP01E1000 does not need to support it. */ phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal; @@ -2841,7 +3172,7 @@ e1000_phy_igp_get_info(struct e1000_hw * if(ret_val) return ret_val; - /* transalte to old method */ + /* Translate to old method */ average = (max_length + min_length) / 2; if(average <= e1000_igp_cable_length_50) @@ -2876,7 +3207,7 @@ e1000_phy_m88_get_info(struct e1000_hw * /* The downshift status is checked only once, after link is established, * and it stored in the hw->speed_downgraded parameter. */ - phy_info->downshift = hw->speed_downgraded; + phy_info->downshift = (e1000_downshift)hw->speed_downgraded; ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); if(ret_val) @@ -2892,8 +3223,7 @@ e1000_phy_m88_get_info(struct e1000_hw * /* Check polarity status */ ret_val = e1000_check_polarity(hw, &polarity); if(ret_val) - return ret_val; - + return ret_val; phy_info->cable_polarity = polarity; ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); @@ -2903,9 +3233,9 @@ e1000_phy_m88_get_info(struct e1000_hw * phy_info->mdix_mode = (phy_data & M88E1000_PSSR_MDIX) >> M88E1000_PSSR_MDIX_SHIFT; - if(phy_data & M88E1000_PSSR_1000MBS) { - /* Cable Length Estimation and Local/Remote Receiver Informatoion - * are only valid at 1000 Mbps + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { + /* Cable Length Estimation and Local/Remote Receiver Information + * are only valid at 1000 Mbps. */ phy_info->cable_length = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >> M88E1000_PSSR_CABLE_LENGTH_SHIFT); @@ -2966,7 +3296,8 @@ e1000_phy_get_info(struct e1000_hw *hw, return -E1000_ERR_CONFIG; } - if(hw->phy_type == e1000_phy_igp) + if(hw->phy_type == e1000_phy_igp || + hw->phy_type == e1000_phy_igp_2) return e1000_phy_igp_get_info(hw, phy_info); else return e1000_phy_m88_get_info(hw, phy_info); @@ -2992,11 +3323,12 @@ e1000_validate_mdi_setting(struct e1000_ * * hw - Struct containing variables accessed by shared code *****************************************************************************/ -void +int32_t e1000_init_eeprom_params(struct e1000_hw *hw) { struct e1000_eeprom_info *eeprom = &hw->eeprom; uint32_t eecd = E1000_READ_REG(hw, EECD); + int32_t ret_val = E1000_SUCCESS; uint16_t eeprom_size; DEBUGFUNC("e1000_init_eeprom_params"); @@ -3011,6 +3343,8 @@ e1000_init_eeprom_params(struct e1000_hw eeprom->opcode_bits = 3; eeprom->address_bits = 6; eeprom->delay_usec = 50; + eeprom->use_eerd = FALSE; + eeprom->use_eewr = FALSE; break; case e1000_82540: case e1000_82545: @@ -3027,6 +3361,8 @@ e1000_init_eeprom_params(struct e1000_hw eeprom->word_size = 64; eeprom->address_bits = 6; } + eeprom->use_eerd = FALSE; + eeprom->use_eewr = FALSE; break; case e1000_82541: case e1000_82541_rev_2: @@ -3055,8 +3391,11 @@ e1000_init_eeprom_params(struct e1000_hw eeprom->address_bits = 6; } } + eeprom->use_eerd = FALSE; + eeprom->use_eewr = FALSE; break; - default: + case e1000_82571: + case e1000_82572: eeprom->type = e1000_eeprom_spi; eeprom->opcode_bits = 8; eeprom->delay_usec = 1; @@ -3067,40 +3406,60 @@ e1000_init_eeprom_params(struct e1000_hw eeprom->page_size = 8; eeprom->address_bits = 8; } + eeprom->use_eerd = FALSE; + eeprom->use_eewr = FALSE; break; - } - - if (eeprom->type == e1000_eeprom_spi) { - eeprom->word_size = 64; - if (e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size) == 0) { - eeprom_size &= EEPROM_SIZE_MASK; + case e1000_82573: + eeprom->type = e1000_eeprom_spi; + eeprom->opcode_bits = 8; + eeprom->delay_usec = 1; + if (eecd & E1000_EECD_ADDR_BITS) { + eeprom->page_size = 32; + eeprom->address_bits = 16; + } else { + eeprom->page_size = 8; + eeprom->address_bits = 8; + } + eeprom->use_eerd = TRUE; + eeprom->use_eewr = TRUE; + if(e1000_is_onboard_nvm_eeprom(hw) == FALSE) { + eeprom->type = e1000_eeprom_flash; + eeprom->word_size = 2048; + + /* Ensure that the Autonomous FLASH update bit is cleared due to + * Flash update issue on parts which use a FLASH for NVM. */ + eecd &= ~E1000_EECD_AUPDEN; + E1000_WRITE_REG(hw, EECD, eecd); + } + break; + default: + break; + } - switch (eeprom_size) { - case EEPROM_SIZE_16KB: - eeprom->word_size = 8192; - break; - case EEPROM_SIZE_8KB: - eeprom->word_size = 4096; - break; - case EEPROM_SIZE_4KB: - eeprom->word_size = 2048; - break; - case EEPROM_SIZE_2KB: - eeprom->word_size = 1024; - break; - case EEPROM_SIZE_1KB: - eeprom->word_size = 512; - break; - case EEPROM_SIZE_512B: - eeprom->word_size = 256; - break; - case EEPROM_SIZE_128B: - default: - eeprom->word_size = 64; - break; - } + if (eeprom->type == e1000_eeprom_spi) { + /* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to + * 32KB (incremented by powers of 2). + */ + if(hw->mac_type <= e1000_82547_rev_2) { + /* Set to default value for initial eeprom read. */ + eeprom->word_size = 64; + ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size); + if(ret_val) + return ret_val; + eeprom_size = (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT; + /* 256B eeprom size was not supported in earlier hardware, so we + * bump eeprom_size up one to ensure that "1" (which maps to 256B) + * is never the result used in the shifting logic below. */ + if(eeprom_size) + eeprom_size++; + } else { + eeprom_size = (uint16_t)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); } + + eeprom->word_size = 1 << (eeprom_size + EEPROM_WORD_SIZE_SHIFT); } + return ret_val; } /****************************************************************************** @@ -3253,24 +3612,30 @@ e1000_acquire_eeprom(struct e1000_hw *hw DEBUGFUNC("e1000_acquire_eeprom"); + if(e1000_get_hw_eeprom_semaphore(hw)) + return -E1000_ERR_EEPROM; + eecd = E1000_READ_REG(hw, EECD); - /* Request EEPROM Access */ - if(hw->mac_type > e1000_82544) { - eecd |= E1000_EECD_REQ; - E1000_WRITE_REG(hw, EECD, eecd); - eecd = E1000_READ_REG(hw, EECD); - while((!(eecd & E1000_EECD_GNT)) && - (i < E1000_EEPROM_GRANT_ATTEMPTS)) { - i++; - udelay(5); - eecd = E1000_READ_REG(hw, EECD); - } - if(!(eecd & E1000_EECD_GNT)) { - eecd &= ~E1000_EECD_REQ; + if (hw->mac_type != e1000_82573) { + /* Request EEPROM Access */ + if(hw->mac_type > e1000_82544) { + eecd |= E1000_EECD_REQ; E1000_WRITE_REG(hw, EECD, eecd); - DEBUGOUT("Could not acquire EEPROM grant\n"); - return -E1000_ERR_EEPROM; + eecd = E1000_READ_REG(hw, EECD); + while((!(eecd & E1000_EECD_GNT)) && + (i < E1000_EEPROM_GRANT_ATTEMPTS)) { + i++; + udelay(5); + eecd = E1000_READ_REG(hw, EECD); + } + if(!(eecd & E1000_EECD_GNT)) { + eecd &= ~E1000_EECD_REQ; + E1000_WRITE_REG(hw, EECD, eecd); + DEBUGOUT("Could not acquire EEPROM grant\n"); + e1000_put_hw_eeprom_semaphore(hw); + return -E1000_ERR_EEPROM; + } } } @@ -3390,6 +3755,8 @@ e1000_release_eeprom(struct e1000_hw *hw eecd &= ~E1000_EECD_REQ; E1000_WRITE_REG(hw, EECD, eecd); } + + e1000_put_hw_eeprom_semaphore(hw); } /****************************************************************************** @@ -3451,21 +3818,36 @@ e1000_read_eeprom(struct e1000_hw *hw, { struct e1000_eeprom_info *eeprom = &hw->eeprom; uint32_t i = 0; + int32_t ret_val; DEBUGFUNC("e1000_read_eeprom"); /* A check for invalid values: offset too large, too many words, and not * enough words. */ - if((offset > eeprom->word_size) || (words > eeprom->word_size - offset) || + if((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) || (words == 0)) { DEBUGOUT("\"words\" parameter out of bounds\n"); return -E1000_ERR_EEPROM; } - /* Prepare the EEPROM for reading */ - if(e1000_acquire_eeprom(hw) != E1000_SUCCESS) - return -E1000_ERR_EEPROM; + /* FLASH reads without acquiring the semaphore are safe in 82573-based + * controllers. + */ + if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) || + (hw->mac_type != e1000_82573)) { + /* Prepare the EEPROM for reading */ + if(e1000_acquire_eeprom(hw) != E1000_SUCCESS) + return -E1000_ERR_EEPROM; + } + + if(eeprom->use_eerd == TRUE) { + ret_val = e1000_read_eeprom_eerd(hw, offset, words, data); + if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) || + (hw->mac_type != e1000_82573)) + e1000_release_eeprom(hw); + return ret_val; + } if(eeprom->type == e1000_eeprom_spi) { uint16_t word_in; @@ -3517,6 +3899,132 @@ e1000_read_eeprom(struct e1000_hw *hw, } /****************************************************************************** + * Reads a 16 bit word from the EEPROM using the EERD register. + * + * hw - Struct containing variables accessed by shared code + * offset - offset of word in the EEPROM to read + * data - word read from the EEPROM + * words - number of words to read + *****************************************************************************/ +int32_t +e1000_read_eeprom_eerd(struct e1000_hw *hw, + uint16_t offset, + uint16_t words, + uint16_t *data) +{ + uint32_t i, eerd = 0; + int32_t error = 0; + + for (i = 0; i < words; i++) { + eerd = ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) + + E1000_EEPROM_RW_REG_START; + + E1000_WRITE_REG(hw, EERD, eerd); + error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_READ); + + if(error) { + break; + } + data[i] = (E1000_READ_REG(hw, EERD) >> E1000_EEPROM_RW_REG_DATA); + + } + + return error; +} + +/****************************************************************************** + * Writes a 16 bit word from the EEPROM using the EEWR register. + * + * hw - Struct containing variables accessed by shared code + * offset - offset of word in the EEPROM to read + * data - word read from the EEPROM + * words - number of words to read + *****************************************************************************/ +int32_t +e1000_write_eeprom_eewr(struct e1000_hw *hw, + uint16_t offset, + uint16_t words, + uint16_t *data) +{ + uint32_t register_value = 0; + uint32_t i = 0; + int32_t error = 0; + + for (i = 0; i < words; i++) { + register_value = (data[i] << E1000_EEPROM_RW_REG_DATA) | + ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) | + E1000_EEPROM_RW_REG_START; + + error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE); + if(error) { + break; + } + + E1000_WRITE_REG(hw, EEWR, register_value); + + error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE); + + if(error) { + break; + } + } + + return error; +} + +/****************************************************************************** + * Polls the status bit (bit 1) of the EERD to determine when the read is done. + * + * hw - Struct containing variables accessed by shared code + *****************************************************************************/ +int32_t +e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd) +{ + uint32_t attempts = 100000; + uint32_t i, reg = 0; + int32_t done = E1000_ERR_EEPROM; + + for(i = 0; i < attempts; i++) { + if(eerd == E1000_EEPROM_POLL_READ) + reg = E1000_READ_REG(hw, EERD); + else + reg = E1000_READ_REG(hw, EEWR); + + if(reg & E1000_EEPROM_RW_REG_DONE) { + done = E1000_SUCCESS; + break; + } + udelay(5); + } + + return done; +} + +/*************************************************************************** +* Description: Determines if the onboard NVM is FLASH or EEPROM. +* +* hw - Struct containing variables accessed by shared code +****************************************************************************/ +boolean_t +e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw) +{ + uint32_t eecd = 0; + + if(hw->mac_type == e1000_82573) { + eecd = E1000_READ_REG(hw, EECD); + + /* Isolate bits 15 & 16 */ + eecd = ((eecd >> 15) & 0x03); + + /* If both bits are set, device is Flash type */ + if(eecd == 0x03) { + return FALSE; + } + } + return TRUE; +} + +/****************************************************************************** * Verifies that the EEPROM has a valid checksum * * hw - Struct containing variables accessed by shared code @@ -3533,6 +4041,25 @@ e1000_validate_eeprom_checksum(struct e1 DEBUGFUNC("e1000_validate_eeprom_checksum"); + if ((hw->mac_type == e1000_82573) && + (e1000_is_onboard_nvm_eeprom(hw) == FALSE)) { + /* Check bit 4 of word 10h. If it is 0, firmware is done updating + * 10h-12h. Checksum may need to be fixed. */ + e1000_read_eeprom(hw, 0x10, 1, &eeprom_data); + if ((eeprom_data & 0x10) == 0) { + /* Read 0x23 and check bit 15. This bit is a 1 when the checksum + * has already been fixed. If the checksum is still wrong and this + * bit is a 1, we need to return bad checksum. Otherwise, we need + * to set this bit to a 1 and update the checksum. */ + e1000_read_eeprom(hw, 0x23, 1, &eeprom_data); + if ((eeprom_data & 0x8000) == 0) { + eeprom_data |= 0x8000; + e1000_write_eeprom(hw, 0x23, 1, &eeprom_data); + e1000_update_eeprom_checksum(hw); + } + } + } + for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { if(e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { DEBUGOUT("EEPROM Read Error\n"); @@ -3576,6 +4103,8 @@ e1000_update_eeprom_checksum(struct e100 if(e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) { DEBUGOUT("EEPROM Write Error\n"); return -E1000_ERR_EEPROM; + } else if (hw->eeprom.type == e1000_eeprom_flash) { + e1000_commit_shadow_ram(hw); } return E1000_SUCCESS; } @@ -3605,12 +4134,16 @@ e1000_write_eeprom(struct e1000_hw *hw, /* A check for invalid values: offset too large, too many words, and not * enough words. */ - if((offset > eeprom->word_size) || (words > eeprom->word_size - offset) || + if((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) || (words == 0)) { DEBUGOUT("\"words\" parameter out of bounds\n"); return -E1000_ERR_EEPROM; } + /* 82573 writes only through eewr */ + if(eeprom->use_eewr == TRUE) + return e1000_write_eeprom_eewr(hw, offset, words, data); + /* Prepare the EEPROM for writing */ if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) return -E1000_ERR_EEPROM; @@ -3781,6 +4314,65 @@ e1000_write_eeprom_microwire(struct e100 } /****************************************************************************** + * Flushes the cached eeprom to NVM. This is done by saving the modified values + * in the eeprom cache and the non modified values in the currently active bank + * to the new bank. + * + * hw - Struct containing variables accessed by shared code + * offset - offset of word in the EEPROM to read + * data - word read from the EEPROM + * words - number of words to read + *****************************************************************************/ +int32_t +e1000_commit_shadow_ram(struct e1000_hw *hw) +{ + uint32_t attempts = 100000; + uint32_t eecd = 0; + uint32_t flop = 0; + uint32_t i = 0; + int32_t error = E1000_SUCCESS; + + /* The flop register will be used to determine if flash type is STM */ + flop = E1000_READ_REG(hw, FLOP); + + if (hw->mac_type == e1000_82573) { + for (i=0; i < attempts; i++) { + eecd = E1000_READ_REG(hw, EECD); + if ((eecd & E1000_EECD_FLUPD) == 0) { + break; + } + udelay(5); + } + + if (i == attempts) { + return -E1000_ERR_EEPROM; + } + + /* If STM opcode located in bits 15:8 of flop, reset firmware */ + if ((flop & 0xFF00) == E1000_STM_OPCODE) { + E1000_WRITE_REG(hw, HICR, E1000_HICR_FW_RESET); + } + + /* Perform the flash update */ + E1000_WRITE_REG(hw, EECD, eecd | E1000_EECD_FLUPD); + + for (i=0; i < attempts; i++) { + eecd = E1000_READ_REG(hw, EECD); + if ((eecd & E1000_EECD_FLUPD) == 0) { + break; + } + udelay(5); + } + + if (i == attempts) { + return -E1000_ERR_EEPROM; + } + } + + return error; +} + +/****************************************************************************** * Reads the adapter's part number from the EEPROM * * hw - Struct containing variables accessed by shared code @@ -3837,9 +4429,16 @@ e1000_read_mac_addr(struct e1000_hw * hw hw->perm_mac_addr[i] = (uint8_t) (eeprom_data & 0x00FF); hw->perm_mac_addr[i+1] = (uint8_t) (eeprom_data >> 8); } - if(((hw->mac_type == e1000_82546) || (hw->mac_type == e1000_82546_rev_3)) && - (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) + switch (hw->mac_type) { + default: + break; + case e1000_82546: + case e1000_82546_rev_3: + case e1000_82571: + if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) hw->perm_mac_addr[5] ^= 0x01; + break; + } for(i = 0; i < NODE_ADDRESS_SIZE; i++) hw->mac_addr[i] = hw->perm_mac_addr[i]; @@ -3859,6 +4458,7 @@ void e1000_init_rx_addrs(struct e1000_hw *hw) { uint32_t i; + uint32_t rar_num; DEBUGFUNC("e1000_init_rx_addrs"); @@ -3867,9 +4467,16 @@ e1000_init_rx_addrs(struct e1000_hw *hw) e1000_rar_set(hw, hw->mac_addr, 0); + rar_num = E1000_RAR_ENTRIES; + + /* Reserve a spot for the Locally Administered Address to work around + * an 82571 issue in which a reset on one port will reload the MAC on + * the other port. */ + if ((hw->mac_type == e1000_82571) && (hw->laa_is_present == TRUE)) + rar_num -= 1; /* Zero out the other 15 receive addresses. */ DEBUGOUT("Clearing RAR[1-15]\n"); - for(i = 1; i < E1000_RAR_ENTRIES; i++) { + for(i = 1; i < rar_num; i++) { E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); } @@ -3898,7 +4505,9 @@ e1000_mc_addr_list_update(struct e1000_h { uint32_t hash_value; uint32_t i; - + uint32_t num_rar_entry; + uint32_t num_mta_entry; + DEBUGFUNC("e1000_mc_addr_list_update"); /* Set the new number of MC addresses that we are being requested to use. */ @@ -3906,14 +4515,22 @@ e1000_mc_addr_list_update(struct e1000_h /* Clear RAR[1-15] */ DEBUGOUT(" Clearing RAR[1-15]\n"); - for(i = rar_used_count; i < E1000_RAR_ENTRIES; i++) { + num_rar_entry = E1000_RAR_ENTRIES; + /* Reserve a spot for the Locally Administered Address to work around + * an 82571 issue in which a reset on one port will reload the MAC on + * the other port. */ + if ((hw->mac_type == e1000_82571) && (hw->laa_is_present == TRUE)) + num_rar_entry -= 1; + + for(i = rar_used_count; i < num_rar_entry; i++) { E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); } /* Clear the MTA */ DEBUGOUT(" Clearing MTA\n"); - for(i = 0; i < E1000_NUM_MTA_REGISTERS; i++) { + num_mta_entry = E1000_NUM_MTA_REGISTERS; + for(i = 0; i < num_mta_entry; i++) { E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); } @@ -3937,7 +4554,7 @@ e1000_mc_addr_list_update(struct e1000_h /* Place this multicast address in the RAR if there is room, * * else put it in the MTA */ - if(rar_used_count < E1000_RAR_ENTRIES) { + if (rar_used_count < num_rar_entry) { e1000_rar_set(hw, mc_addr_list + (i * (ETH_LENGTH_OF_ADDRESS + pad)), rar_used_count); @@ -3988,6 +4605,7 @@ e1000_hash_mc_addr(struct e1000_hw *hw, } hash_value &= 0xFFF; + return hash_value; } @@ -4092,12 +4710,33 @@ void e1000_clear_vfta(struct e1000_hw *hw) { uint32_t offset; - - for(offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) - E1000_WRITE_REG_ARRAY(hw, VFTA, offset, 0); + uint32_t vfta_value = 0; + uint32_t vfta_offset = 0; + uint32_t vfta_bit_in_reg = 0; + + if (hw->mac_type == e1000_82573) { + if (hw->mng_cookie.vlan_id != 0) { + /* The VFTA is a 4096b bit-field, each identifying a single VLAN + * ID. The following operations determine which 32b entry + * (i.e. offset) into the array we want to set the VLAN ID + * (i.e. bit) of the manageability unit. */ + vfta_offset = (hw->mng_cookie.vlan_id >> + E1000_VFTA_ENTRY_SHIFT) & + E1000_VFTA_ENTRY_MASK; + vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id & + E1000_VFTA_ENTRY_BIT_SHIFT_MASK); + } + } + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + /* If the offset we want to clear is the same offset of the + * manageability VLAN ID, then clear all bits except that of the + * manageability unit */ + vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value); + } } -static int32_t +int32_t e1000_id_led_init(struct e1000_hw * hw) { uint32_t ledctl; @@ -4428,6 +5067,18 @@ e1000_clear_hw_cntrs(struct e1000_hw *hw temp = E1000_READ_REG(hw, MGTPRC); temp = E1000_READ_REG(hw, MGTPDC); temp = E1000_READ_REG(hw, MGTPTC); + + if(hw->mac_type <= e1000_82547_rev_2) return; + + temp = E1000_READ_REG(hw, IAC); + temp = E1000_READ_REG(hw, ICRXOC); + temp = E1000_READ_REG(hw, ICRXPTC); + temp = E1000_READ_REG(hw, ICRXATC); + temp = E1000_READ_REG(hw, ICTXPTC); + temp = E1000_READ_REG(hw, ICTXATC); + temp = E1000_READ_REG(hw, ICTXQEC); + temp = E1000_READ_REG(hw, ICTXQMTC); + temp = E1000_READ_REG(hw, ICRXDMTC); } /****************************************************************************** @@ -4587,41 +5238,51 @@ e1000_get_bus_info(struct e1000_hw *hw) { uint32_t status; - if(hw->mac_type < e1000_82543) { + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: hw->bus_type = e1000_bus_type_unknown; hw->bus_speed = e1000_bus_speed_unknown; hw->bus_width = e1000_bus_width_unknown; - return; - } - - status = E1000_READ_REG(hw, STATUS); - hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ? - e1000_bus_type_pcix : e1000_bus_type_pci; + break; + case e1000_82571: + case e1000_82572: + case e1000_82573: + hw->bus_type = e1000_bus_type_pci_express; + hw->bus_speed = e1000_bus_speed_2500; + hw->bus_width = e1000_bus_width_pciex_4; + break; + default: + status = E1000_READ_REG(hw, STATUS); + hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ? + e1000_bus_type_pcix : e1000_bus_type_pci; - if(hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) { - hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ? - e1000_bus_speed_66 : e1000_bus_speed_120; - } else if(hw->bus_type == e1000_bus_type_pci) { - hw->bus_speed = (status & E1000_STATUS_PCI66) ? - e1000_bus_speed_66 : e1000_bus_speed_33; - } else { - switch (status & E1000_STATUS_PCIX_SPEED) { - case E1000_STATUS_PCIX_SPEED_66: - hw->bus_speed = e1000_bus_speed_66; - break; - case E1000_STATUS_PCIX_SPEED_100: - hw->bus_speed = e1000_bus_speed_100; - break; - case E1000_STATUS_PCIX_SPEED_133: - hw->bus_speed = e1000_bus_speed_133; - break; - default: - hw->bus_speed = e1000_bus_speed_reserved; - break; + if(hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) { + hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ? + e1000_bus_speed_66 : e1000_bus_speed_120; + } else if(hw->bus_type == e1000_bus_type_pci) { + hw->bus_speed = (status & E1000_STATUS_PCI66) ? + e1000_bus_speed_66 : e1000_bus_speed_33; + } else { + switch (status & E1000_STATUS_PCIX_SPEED) { + case E1000_STATUS_PCIX_SPEED_66: + hw->bus_speed = e1000_bus_speed_66; + break; + case E1000_STATUS_PCIX_SPEED_100: + hw->bus_speed = e1000_bus_speed_100; + break; + case E1000_STATUS_PCIX_SPEED_133: + hw->bus_speed = e1000_bus_speed_133; + break; + default: + hw->bus_speed = e1000_bus_speed_reserved; + break; + } } + hw->bus_width = (status & E1000_STATUS_BUS64) ? + e1000_bus_width_64 : e1000_bus_width_32; + break; } - hw->bus_width = (status & E1000_STATUS_BUS64) ? - e1000_bus_width_64 : e1000_bus_width_32; } /****************************************************************************** * Reads a value from one of the devices registers using port I/O (as opposed @@ -4685,7 +5346,9 @@ e1000_get_cable_length(struct e1000_hw * int32_t ret_val; uint16_t agc_value = 0; uint16_t cur_agc, min_agc = IGP01E1000_AGC_LENGTH_TABLE_SIZE; + uint16_t max_agc = 0; uint16_t i, phy_data; + uint16_t cable_length; DEBUGFUNC("e1000_get_cable_length"); @@ -4693,14 +5356,16 @@ e1000_get_cable_length(struct e1000_hw * /* Use old method for Phy older than IGP */ if(hw->phy_type == e1000_phy_m88) { + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); if(ret_val) return ret_val; + cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; /* Convert the enum value to ranged values */ - switch((phy_data & M88E1000_PSSR_CABLE_LENGTH) >> - M88E1000_PSSR_CABLE_LENGTH_SHIFT) { + switch (cable_length) { case e1000_cable_length_50: *min_length = 0; *max_length = e1000_igp_cable_length_50; @@ -4770,6 +5435,40 @@ e1000_get_cable_length(struct e1000_hw * IGP01E1000_AGC_RANGE) : 0; *max_length = e1000_igp_cable_length_table[agc_value] + IGP01E1000_AGC_RANGE; + } else if (hw->phy_type == e1000_phy_igp_2) { + uint16_t agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = + {IGP02E1000_PHY_AGC_A, + IGP02E1000_PHY_AGC_B, + IGP02E1000_PHY_AGC_C, + IGP02E1000_PHY_AGC_D}; + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { + ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data); + if (ret_val) + return ret_val; + + /* Getting bits 15:9, which represent the combination of course and + * fine gain values. The result is a number that can be put into + * the lookup table to obtain the approximate cable length. */ + cur_agc = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & + IGP02E1000_AGC_LENGTH_MASK; + + /* Remove min & max AGC values from calculation. */ + if (e1000_igp_2_cable_length_table[min_agc] > e1000_igp_2_cable_length_table[cur_agc]) + min_agc = cur_agc; + if (e1000_igp_2_cable_length_table[max_agc] < e1000_igp_2_cable_length_table[cur_agc]) + max_agc = cur_agc; + + agc_value += e1000_igp_2_cable_length_table[cur_agc]; + } + + agc_value -= (e1000_igp_2_cable_length_table[min_agc] + e1000_igp_2_cable_length_table[max_agc]); + agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); + + /* Calculate cable length with the error range of +/- 10 meters. */ + *min_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ? + (agc_value - IGP02E1000_AGC_RANGE) : 0; + *max_length = agc_value + IGP02E1000_AGC_RANGE; } return E1000_SUCCESS; @@ -4808,7 +5507,8 @@ e1000_check_polarity(struct e1000_hw *hw return ret_val; *polarity = (phy_data & M88E1000_PSSR_REV_POLARITY) >> M88E1000_PSSR_REV_POLARITY_SHIFT; - } else if(hw->phy_type == e1000_phy_igp) { + } else if(hw->phy_type == e1000_phy_igp || + hw->phy_type == e1000_phy_igp_2) { /* Read the Status register to check the speed */ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data); @@ -4860,15 +5560,15 @@ e1000_check_downshift(struct e1000_hw *h DEBUGFUNC("e1000_check_downshift"); - if(hw->phy_type == e1000_phy_igp) { + if(hw->phy_type == e1000_phy_igp || + hw->phy_type == e1000_phy_igp_2) { ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, &phy_data); if(ret_val) return ret_val; hw->speed_downgraded = (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0; - } - else if(hw->phy_type == e1000_phy_m88) { + } else if(hw->phy_type == e1000_phy_m88) { ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); if(ret_val) @@ -4877,6 +5577,7 @@ e1000_check_downshift(struct e1000_hw *h hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >> M88E1000_PSSR_DOWNSHIFT_SHIFT; } + return E1000_SUCCESS; } @@ -4897,7 +5598,7 @@ e1000_config_dsp_after_link_change(struc boolean_t link_up) { int32_t ret_val; - uint16_t phy_data, speed, duplex, i; + uint16_t phy_data, phy_saved_data, speed, duplex, i; uint16_t dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = {IGP01E1000_PHY_AGC_PARAM_A, IGP01E1000_PHY_AGC_PARAM_B, @@ -4978,6 +5679,21 @@ e1000_config_dsp_after_link_change(struc } } else { if(hw->dsp_config_state == e1000_dsp_config_activated) { + /* Save off the current value of register 0x2F5B to be restored at + * the end of the routines. */ + ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + if(ret_val) + return ret_val; + + /* Disable the PHY transmitter */ + ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + + if(ret_val) + return ret_val; + + msec_delay_irq(20); + ret_val = e1000_write_phy_reg(hw, 0x0000, IGP01E1000_IEEE_FORCE_GIGA); if(ret_val) @@ -5000,10 +5716,33 @@ e1000_config_dsp_after_link_change(struc if(ret_val) return ret_val; + msec_delay_irq(20); + + /* Now enable the transmitter */ + ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if(ret_val) + return ret_val; + hw->dsp_config_state = e1000_dsp_config_enabled; } if(hw->ffe_config_state == e1000_ffe_config_active) { + /* Save off the current value of register 0x2F5B to be restored at + * the end of the routines. */ + ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + if(ret_val) + return ret_val; + + /* Disable the PHY transmitter */ + ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + + if(ret_val) + return ret_val; + + msec_delay_irq(20); + ret_val = e1000_write_phy_reg(hw, 0x0000, IGP01E1000_IEEE_FORCE_GIGA); if(ret_val) @@ -5017,6 +5756,15 @@ e1000_config_dsp_after_link_change(struc IGP01E1000_IEEE_RESTART_AUTONEG); if(ret_val) return ret_val; + + msec_delay_irq(20); + + /* Now enable the transmitter */ + ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if(ret_val) + return ret_val; + hw->ffe_config_state = e1000_ffe_config_enabled; } } @@ -5084,45 +5832,168 @@ e1000_set_d3_lplu_state(struct e1000_hw uint16_t phy_data; DEBUGFUNC("e1000_set_d3_lplu_state"); - if(!((hw->mac_type == e1000_82541_rev_2) || - (hw->mac_type == e1000_82547_rev_2))) + if(hw->phy_type != e1000_phy_igp && hw->phy_type != e1000_phy_igp_2) return E1000_SUCCESS; /* During driver activity LPLU should not be used or it will attain link * from the lowest speeds starting from 10Mbps. The capability is used for * Dx transitions and states */ - ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data); - if(ret_val) - return ret_val; - - if(!active) { - phy_data &= ~IGP01E1000_GMII_FLEX_SPD; - ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data); + if(hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) { + ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data); if(ret_val) return ret_val; + } else { + ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); + if(ret_val) + return ret_val; + } + + if(!active) { + if(hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { + phy_data &= ~IGP01E1000_GMII_FLEX_SPD; + ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data); + if(ret_val) + return ret_val; + } else { + phy_data &= ~IGP02E1000_PM_D3_LPLU; + ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, + phy_data); + if (ret_val) + return ret_val; + } /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during * Dx states where the power conservation is most important. During * driver activity we should enable SmartSpeed, so performance is * maintained. */ + if (hw->smart_speed == e1000_smart_speed_on) { + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if(ret_val) + return ret_val; + + phy_data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if(ret_val) + return ret_val; + } else if (hw->smart_speed == e1000_smart_speed_off) { + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if(ret_val) + return ret_val; + } + + } else if((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) || + (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL ) || + (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) { + + if(hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { + phy_data |= IGP01E1000_GMII_FLEX_SPD; + ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data); + if(ret_val) + return ret_val; + } else { + phy_data |= IGP02E1000_PM_D3_LPLU; + ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, + phy_data); + if (ret_val) + return ret_val; + } + + /* When LPLU is enabled we should disable SmartSpeed */ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data); if(ret_val) return ret_val; - phy_data |= IGP01E1000_PSCFR_SMART_SPEED; + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data); if(ret_val) return ret_val; - } else if((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) || - (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL ) || - (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) { + } + return E1000_SUCCESS; +} - phy_data |= IGP01E1000_GMII_FLEX_SPD; - ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data); +/***************************************************************************** + * + * This function sets the lplu d0 state according to the active flag. When + * activating lplu this function also disables smart speed and vise versa. + * lplu will not be activated unless the device autonegotiation advertisment + * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes. + * hw: Struct containing variables accessed by shared code + * active - true to enable lplu false to disable lplu. + * + * returns: - E1000_ERR_PHY if fail to read/write the PHY + * E1000_SUCCESS at any other case. + * + ****************************************************************************/ + +int32_t +e1000_set_d0_lplu_state(struct e1000_hw *hw, + boolean_t active) +{ + int32_t ret_val; + uint16_t phy_data; + DEBUGFUNC("e1000_set_d0_lplu_state"); + + if(hw->mac_type <= e1000_82547_rev_2) + return E1000_SUCCESS; + + ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); if(ret_val) return ret_val; + if (!active) { + phy_data &= ~IGP02E1000_PM_D0_LPLU; + ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); + if (ret_val) + return ret_val; + + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during + * Dx states where the power conservation is most important. During + * driver activity we should enable SmartSpeed, so performance is + * maintained. */ + if (hw->smart_speed == e1000_smart_speed_on) { + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if(ret_val) + return ret_val; + + phy_data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if(ret_val) + return ret_val; + } else if (hw->smart_speed == e1000_smart_speed_off) { + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if(ret_val) + return ret_val; + } + + + } else { + + phy_data |= IGP02E1000_PM_D0_LPLU; + ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); + if (ret_val) + return ret_val; + /* When LPLU is enabled we should disable SmartSpeed */ ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data); if(ret_val) @@ -5200,6 +6071,303 @@ e1000_set_vco_speed(struct e1000_hw *hw) return E1000_SUCCESS; } + +/***************************************************************************** + * This function reads the cookie from ARC ram. + * + * returns: - E1000_SUCCESS . + ****************************************************************************/ +int32_t +e1000_host_if_read_cookie(struct e1000_hw * hw, uint8_t *buffer) +{ + uint8_t i; + uint32_t offset = E1000_MNG_DHCP_COOKIE_OFFSET; + uint8_t length = E1000_MNG_DHCP_COOKIE_LENGTH; + + length = (length >> 2); + offset = (offset >> 2); + + for (i = 0; i < length; i++) { + *((uint32_t *) buffer + i) = + E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset + i); + } + return E1000_SUCCESS; +} + + +/***************************************************************************** + * This function checks whether the HOST IF is enabled for command operaton + * and also checks whether the previous command is completed. + * It busy waits in case of previous command is not completed. + * + * returns: - E1000_ERR_HOST_INTERFACE_COMMAND in case if is not ready or + * timeout + * - E1000_SUCCESS for success. + ****************************************************************************/ +int32_t +e1000_mng_enable_host_if(struct e1000_hw * hw) +{ + uint32_t hicr; + uint8_t i; + + /* Check that the host interface is enabled. */ + hicr = E1000_READ_REG(hw, HICR); + if ((hicr & E1000_HICR_EN) == 0) { + DEBUGOUT("E1000_HOST_EN bit disabled.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + /* check the previous command is completed */ + for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) { + hicr = E1000_READ_REG(hw, HICR); + if (!(hicr & E1000_HICR_C)) + break; + msec_delay_irq(1); + } + + if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { + DEBUGOUT("Previous command timeout failed .\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + return E1000_SUCCESS; +} + +/***************************************************************************** + * This function writes the buffer content at the offset given on the host if. + * It also does alignment considerations to do the writes in most efficient way. + * Also fills up the sum of the buffer in *buffer parameter. + * + * returns - E1000_SUCCESS for success. + ****************************************************************************/ +int32_t +e1000_mng_host_if_write(struct e1000_hw * hw, uint8_t *buffer, + uint16_t length, uint16_t offset, uint8_t *sum) +{ + uint8_t *tmp; + uint8_t *bufptr = buffer; + uint32_t data; + uint16_t remaining, i, j, prev_bytes; + + /* sum = only sum of the data and it is not checksum */ + + if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) { + return -E1000_ERR_PARAM; + } + + tmp = (uint8_t *)&data; + prev_bytes = offset & 0x3; + offset &= 0xFFFC; + offset >>= 2; + + if (prev_bytes) { + data = E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset); + for (j = prev_bytes; j < sizeof(uint32_t); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset, data); + length -= j - prev_bytes; + offset++; + } + + remaining = length & 0x3; + length -= remaining; + + /* Calculate length in DWORDs */ + length >>= 2; + + /* The device driver writes the relevant command block into the + * ram area. */ + for (i = 0; i < length; i++) { + for (j = 0; j < sizeof(uint32_t); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + + E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data); + } + if (remaining) { + for (j = 0; j < sizeof(uint32_t); j++) { + if (j < remaining) + *(tmp + j) = *bufptr++; + else + *(tmp + j) = 0; + + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data); + } + + return E1000_SUCCESS; +} + + +/***************************************************************************** + * This function writes the command header after does the checksum calculation. + * + * returns - E1000_SUCCESS for success. + ****************************************************************************/ +int32_t +e1000_mng_write_cmd_header(struct e1000_hw * hw, + struct e1000_host_mng_command_header * hdr) +{ + uint16_t i; + uint8_t sum; + uint8_t *buffer; + + /* Write the whole command header structure which includes sum of + * the buffer */ + + uint16_t length = sizeof(struct e1000_host_mng_command_header); + + sum = hdr->checksum; + hdr->checksum = 0; + + buffer = (uint8_t *) hdr; + i = length; + while(i--) + sum += buffer[i]; + + hdr->checksum = 0 - sum; + + length >>= 2; + /* The device driver writes the relevant command block into the ram area. */ + for (i = 0; i < length; i++) + E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((uint32_t *) hdr + i)); + + return E1000_SUCCESS; +} + + +/***************************************************************************** + * This function indicates to ARC that a new command is pending which completes + * one write operation by the driver. + * + * returns - E1000_SUCCESS for success. + ****************************************************************************/ +int32_t +e1000_mng_write_commit( + struct e1000_hw * hw) +{ + uint32_t hicr; + + hicr = E1000_READ_REG(hw, HICR); + /* Setting this bit tells the ARC that a new command is pending. */ + E1000_WRITE_REG(hw, HICR, hicr | E1000_HICR_C); + + return E1000_SUCCESS; +} + + +/***************************************************************************** + * This function checks the mode of the firmware. + * + * returns - TRUE when the mode is IAMT or FALSE. + ****************************************************************************/ +boolean_t +e1000_check_mng_mode( + struct e1000_hw *hw) +{ + uint32_t fwsm; + + fwsm = E1000_READ_REG(hw, FWSM); + + if((fwsm & E1000_FWSM_MODE_MASK) == + (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)) + return TRUE; + + return FALSE; +} + + +/***************************************************************************** + * This function writes the dhcp info . + ****************************************************************************/ +int32_t +e1000_mng_write_dhcp_info(struct e1000_hw * hw, uint8_t *buffer, + uint16_t length) +{ + int32_t ret_val; + struct e1000_host_mng_command_header hdr; + + hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; + hdr.command_length = length; + hdr.reserved1 = 0; + hdr.reserved2 = 0; + hdr.checksum = 0; + + ret_val = e1000_mng_enable_host_if(hw); + if (ret_val == E1000_SUCCESS) { + ret_val = e1000_mng_host_if_write(hw, buffer, length, sizeof(hdr), + &(hdr.checksum)); + if (ret_val == E1000_SUCCESS) { + ret_val = e1000_mng_write_cmd_header(hw, &hdr); + if (ret_val == E1000_SUCCESS) + ret_val = e1000_mng_write_commit(hw); + } + } + return ret_val; +} + + +/***************************************************************************** + * This function calculates the checksum. + * + * returns - checksum of buffer contents. + ****************************************************************************/ +uint8_t +e1000_calculate_mng_checksum(char *buffer, uint32_t length) +{ + uint8_t sum = 0; + uint32_t i; + + if (!buffer) + return 0; + + for (i=0; i < length; i++) + sum += buffer[i]; + + return (uint8_t) (0 - sum); +} + +/***************************************************************************** + * This function checks whether tx pkt filtering needs to be enabled or not. + * + * returns - TRUE for packet filtering or FALSE. + ****************************************************************************/ +boolean_t +e1000_enable_tx_pkt_filtering(struct e1000_hw *hw) +{ + /* called in init as well as watchdog timer functions */ + + int32_t ret_val, checksum; + boolean_t tx_filter = FALSE; + struct e1000_host_mng_dhcp_cookie *hdr = &(hw->mng_cookie); + uint8_t *buffer = (uint8_t *) &(hw->mng_cookie); + + if (e1000_check_mng_mode(hw)) { + ret_val = e1000_mng_enable_host_if(hw); + if (ret_val == E1000_SUCCESS) { + ret_val = e1000_host_if_read_cookie(hw, buffer); + if (ret_val == E1000_SUCCESS) { + checksum = hdr->checksum; + hdr->checksum = 0; + if ((hdr->signature == E1000_IAMT_SIGNATURE) && + checksum == e1000_calculate_mng_checksum((char *)buffer, + E1000_MNG_DHCP_COOKIE_LENGTH)) { + if (hdr->status & + E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT) + tx_filter = TRUE; + } else + tx_filter = TRUE; + } else + tx_filter = TRUE; + } + } + + hw->tx_pkt_filtering = tx_filter; + return tx_filter; +} + /****************************************************************************** * Verifies the hardware needs to allow ARPs to be processed by the host * @@ -5212,6 +6380,7 @@ uint32_t e1000_enable_mng_pass_thru(struct e1000_hw *hw) { uint32_t manc; + uint32_t fwsm, factps; if (hw->asf_firmware_present) { manc = E1000_READ_REG(hw, MANC); @@ -5219,8 +6388,389 @@ e1000_enable_mng_pass_thru(struct e1000_ if (!(manc & E1000_MANC_RCV_TCO_EN) || !(manc & E1000_MANC_EN_MAC_ADDR_FILTER)) return FALSE; - if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN)) + if (e1000_arc_subsystem_valid(hw) == TRUE) { + fwsm = E1000_READ_REG(hw, FWSM); + factps = E1000_READ_REG(hw, FACTPS); + + if (((fwsm & E1000_FWSM_MODE_MASK) == + (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT)) && + (factps & E1000_FACTPS_MNGCG)) + return TRUE; + } else + if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN)) + return TRUE; + } + return FALSE; +} + +static int32_t +e1000_polarity_reversal_workaround(struct e1000_hw *hw) +{ + int32_t ret_val; + uint16_t mii_status_reg; + uint16_t i; + + /* Polarity reversal workaround for forced 10F/10H links. */ + + /* Disable the transmitter on the PHY */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); + if(ret_val) + return ret_val; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF); + if(ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); + if(ret_val) + return ret_val; + + /* This loop will early-out if the NO link condition has been met. */ + for(i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Link Status bit + * to be clear. + */ + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if(ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if(ret_val) + return ret_val; + + if((mii_status_reg & ~MII_SR_LINK_STATUS) == 0) break; + msec_delay_irq(100); + } + + /* Recommended delay time after link has been lost */ + msec_delay_irq(1000); + + /* Now we will re-enable th transmitter on the PHY */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); + if(ret_val) + return ret_val; + msec_delay_irq(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0); + if(ret_val) + return ret_val; + msec_delay_irq(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00); + if(ret_val) + return ret_val; + msec_delay_irq(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000); + if(ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); + if(ret_val) + return ret_val; + + /* This loop will early-out if the link condition has been met. */ + for(i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Link Status bit + * to be set. + */ + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if(ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if(ret_val) + return ret_val; + + if(mii_status_reg & MII_SR_LINK_STATUS) break; + msec_delay_irq(100); + } + return E1000_SUCCESS; +} + +/*************************************************************************** + * + * Disables PCI-Express master access. + * + * hw: Struct containing variables accessed by shared code + * + * returns: - none. + * + ***************************************************************************/ +void +e1000_set_pci_express_master_disable(struct e1000_hw *hw) +{ + uint32_t ctrl; + + DEBUGFUNC("e1000_set_pci_express_master_disable"); + + if (hw->bus_type != e1000_bus_type_pci_express) + return; + + ctrl = E1000_READ_REG(hw, CTRL); + ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; + E1000_WRITE_REG(hw, CTRL, ctrl); +} + +/*************************************************************************** + * + * Enables PCI-Express master access. + * + * hw: Struct containing variables accessed by shared code + * + * returns: - none. + * + ***************************************************************************/ +void +e1000_enable_pciex_master(struct e1000_hw *hw) +{ + uint32_t ctrl; + + DEBUGFUNC("e1000_enable_pciex_master"); + + if (hw->bus_type != e1000_bus_type_pci_express) + return; + + ctrl = E1000_READ_REG(hw, CTRL); + ctrl &= ~E1000_CTRL_GIO_MASTER_DISABLE; + E1000_WRITE_REG(hw, CTRL, ctrl); +} + +/******************************************************************************* + * + * Disables PCI-Express master access and verifies there are no pending requests + * + * hw: Struct containing variables accessed by shared code + * + * returns: - E1000_ERR_MASTER_REQUESTS_PENDING if master disable bit hasn't + * caused the master requests to be disabled. + * E1000_SUCCESS master requests disabled. + * + ******************************************************************************/ +int32_t +e1000_disable_pciex_master(struct e1000_hw *hw) +{ + int32_t timeout = MASTER_DISABLE_TIMEOUT; /* 80ms */ + + DEBUGFUNC("e1000_disable_pciex_master"); + + if (hw->bus_type != e1000_bus_type_pci_express) + return E1000_SUCCESS; + + e1000_set_pci_express_master_disable(hw); + + while(timeout) { + if(!(E1000_READ_REG(hw, STATUS) & E1000_STATUS_GIO_MASTER_ENABLE)) + break; + else + udelay(100); + timeout--; + } + + if(!timeout) { + DEBUGOUT("Master requests are pending.\n"); + return -E1000_ERR_MASTER_REQUESTS_PENDING; + } + + return E1000_SUCCESS; +} + +/******************************************************************************* + * + * Check for EEPROM Auto Read bit done. + * + * hw: Struct containing variables accessed by shared code + * + * returns: - E1000_ERR_RESET if fail to reset MAC + * E1000_SUCCESS at any other case. + * + ******************************************************************************/ +int32_t +e1000_get_auto_rd_done(struct e1000_hw *hw) +{ + int32_t timeout = AUTO_READ_DONE_TIMEOUT; + + DEBUGFUNC("e1000_get_auto_rd_done"); + + switch (hw->mac_type) { + default: + msec_delay(5); + break; + case e1000_82571: + case e1000_82572: + case e1000_82573: + while(timeout) { + if (E1000_READ_REG(hw, EECD) & E1000_EECD_AUTO_RD) break; + else msec_delay(1); + timeout--; + } + + if(!timeout) { + DEBUGOUT("Auto read by HW from EEPROM has not completed.\n"); + return -E1000_ERR_RESET; + } + break; + } + + return E1000_SUCCESS; +} + +/*************************************************************************** + * Checks if the PHY configuration is done + * + * hw: Struct containing variables accessed by shared code + * + * returns: - E1000_ERR_RESET if fail to reset MAC + * E1000_SUCCESS at any other case. + * + ***************************************************************************/ +int32_t +e1000_get_phy_cfg_done(struct e1000_hw *hw) +{ + int32_t timeout = PHY_CFG_TIMEOUT; + uint32_t cfg_mask = E1000_EEPROM_CFG_DONE; + + DEBUGFUNC("e1000_get_phy_cfg_done"); + + switch (hw->mac_type) { + default: + msec_delay(10); + break; + case e1000_82571: + case e1000_82572: + while (timeout) { + if (E1000_READ_REG(hw, EEMNGCTL) & cfg_mask) + break; + else + msec_delay(1); + timeout--; + } + + if (!timeout) { + DEBUGOUT("MNG configuration cycle has not completed.\n"); + return -E1000_ERR_RESET; + } + break; + } + + return E1000_SUCCESS; +} + +/*************************************************************************** + * + * Using the combination of SMBI and SWESMBI semaphore bits when resetting + * adapter or Eeprom access. + * + * hw: Struct containing variables accessed by shared code + * + * returns: - E1000_ERR_EEPROM if fail to access EEPROM. + * E1000_SUCCESS at any other case. + * + ***************************************************************************/ +int32_t +e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw) +{ + int32_t timeout; + uint32_t swsm; + + DEBUGFUNC("e1000_get_hw_eeprom_semaphore"); + + if(!hw->eeprom_semaphore_present) + return E1000_SUCCESS; + + + /* Get the FW semaphore. */ + timeout = hw->eeprom.word_size + 1; + while(timeout) { + swsm = E1000_READ_REG(hw, SWSM); + swsm |= E1000_SWSM_SWESMBI; + E1000_WRITE_REG(hw, SWSM, swsm); + /* if we managed to set the bit we got the semaphore. */ + swsm = E1000_READ_REG(hw, SWSM); + if(swsm & E1000_SWSM_SWESMBI) + break; + + udelay(50); + timeout--; + } + + if(!timeout) { + /* Release semaphores */ + e1000_put_hw_eeprom_semaphore(hw); + DEBUGOUT("Driver can't access the Eeprom - SWESMBI bit is set.\n"); + return -E1000_ERR_EEPROM; + } + + return E1000_SUCCESS; +} + +/*************************************************************************** + * This function clears HW semaphore bits. + * + * hw: Struct containing variables accessed by shared code + * + * returns: - None. + * + ***************************************************************************/ +void +e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw) +{ + uint32_t swsm; + + DEBUGFUNC("e1000_put_hw_eeprom_semaphore"); + + if(!hw->eeprom_semaphore_present) + return; + + swsm = E1000_READ_REG(hw, SWSM); + swsm &= ~(E1000_SWSM_SWESMBI); + E1000_WRITE_REG(hw, SWSM, swsm); +} + +/****************************************************************************** + * Checks if PHY reset is blocked due to SOL/IDER session, for example. + * Returning E1000_BLK_PHY_RESET isn't necessarily an error. But it's up to + * the caller to figure out how to deal with it. + * + * hw - Struct containing variables accessed by shared code + * + * returns: - E1000_BLK_PHY_RESET + * E1000_SUCCESS + * + *****************************************************************************/ +int32_t +e1000_check_phy_reset_block(struct e1000_hw *hw) +{ + uint32_t manc = 0; + if(hw->mac_type > e1000_82547_rev_2) + manc = E1000_READ_REG(hw, MANC); + return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? + E1000_BLK_PHY_RESET : E1000_SUCCESS; +} + +uint8_t +e1000_arc_subsystem_valid(struct e1000_hw *hw) +{ + uint32_t fwsm; + + /* On 8257x silicon, registers in the range of 0x8800 - 0x8FFC + * may not be provided a DMA clock when no manageability features are + * enabled. We do not want to perform any reads/writes to these registers + * if this is the case. We read FWSM to determine the manageability mode. + */ + switch (hw->mac_type) { + case e1000_82571: + case e1000_82572: + case e1000_82573: + fwsm = E1000_READ_REG(hw, FWSM); + if((fwsm & E1000_FWSM_MODE_MASK) != 0) return TRUE; + break; + default: + break; } return FALSE; } + + + diff -pruN ./drivers/net/e1000.lkm81/e1000_hw.h ./drivers/net/e1000/e1000_hw.h --- ./drivers/net/e1000.lkm81/e1000_hw.h 2006-04-06 19:04:00.000000000 +0400 +++ ./drivers/net/e1000/e1000_hw.h 2006-04-06 19:05:40.000000000 +0400 @@ -1,7 +1,7 @@ /******************************************************************************* - Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. + Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free @@ -57,6 +57,9 @@ typedef enum { e1000_82541_rev_2, e1000_82547, e1000_82547_rev_2, + e1000_82571, + e1000_82572, + e1000_82573, e1000_num_macs } e1000_mac_type; @@ -64,6 +67,8 @@ typedef enum { e1000_eeprom_uninitialized = 0, e1000_eeprom_spi, e1000_eeprom_microwire, + e1000_eeprom_flash, + e1000_eeprom_none, /* No NVM support */ e1000_num_eeprom_types } e1000_eeprom_type; @@ -96,6 +101,7 @@ typedef enum { e1000_bus_type_unknown = 0, e1000_bus_type_pci, e1000_bus_type_pcix, + e1000_bus_type_pci_express, e1000_bus_type_reserved } e1000_bus_type; @@ -107,6 +113,7 @@ typedef enum { e1000_bus_speed_100, e1000_bus_speed_120, e1000_bus_speed_133, + e1000_bus_speed_2500, e1000_bus_speed_reserved } e1000_bus_speed; @@ -115,6 +122,8 @@ typedef enum { e1000_bus_width_unknown = 0, e1000_bus_width_32, e1000_bus_width_64, + e1000_bus_width_pciex_1, + e1000_bus_width_pciex_4, e1000_bus_width_reserved } e1000_bus_width; @@ -168,6 +177,12 @@ typedef enum { } e1000_downshift; typedef enum { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +} e1000_smart_speed; + +typedef enum { e1000_polarity_reversal_enabled = 0, e1000_polarity_reversal_disabled, e1000_polarity_reversal_undefined = 0xFF @@ -190,6 +205,7 @@ typedef enum { typedef enum { e1000_phy_m88 = 0, e1000_phy_igp, + e1000_phy_igp_2, e1000_phy_undefined = 0xFF } e1000_phy_type; @@ -236,8 +252,19 @@ struct e1000_eeprom_info { uint16_t address_bits; uint16_t delay_usec; uint16_t page_size; + boolean_t use_eerd; + boolean_t use_eewr; }; +/* Flex ASF Information */ +#define E1000_HOST_IF_MAX_SIZE 2048 + +typedef enum { + e1000_byte_align = 0, + e1000_word_align = 1, + e1000_dword_align = 2 +} e1000_align_type; + /* Error Codes */ @@ -248,11 +275,16 @@ struct e1000_eeprom_info { #define E1000_ERR_PARAM 4 #define E1000_ERR_MAC_TYPE 5 #define E1000_ERR_PHY_TYPE 6 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_ERR_HOST_INTERFACE_COMMAND 11 +#define E1000_BLK_PHY_RESET 12 /* Function prototypes */ /* Initialization */ int32_t e1000_reset_hw(struct e1000_hw *hw); int32_t e1000_init_hw(struct e1000_hw *hw); +int32_t e1000_id_led_init(struct e1000_hw * hw); int32_t e1000_set_mac_type(struct e1000_hw *hw); void e1000_set_media_type(struct e1000_hw *hw); @@ -269,7 +301,7 @@ int32_t e1000_force_mac_fc(struct e1000_ /* PHY */ int32_t e1000_read_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *phy_data); int32_t e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data); -void e1000_phy_hw_reset(struct e1000_hw *hw); +int32_t e1000_phy_hw_reset(struct e1000_hw *hw); int32_t e1000_phy_reset(struct e1000_hw *hw); int32_t e1000_detect_gig_phy(struct e1000_hw *hw); int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); @@ -281,13 +313,86 @@ int32_t e1000_check_downshift(struct e10 int32_t e1000_validate_mdi_setting(struct e1000_hw *hw); /* EEPROM Functions */ -void e1000_init_eeprom_params(struct e1000_hw *hw); +int32_t e1000_init_eeprom_params(struct e1000_hw *hw); +boolean_t e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw); +int32_t e1000_read_eeprom_eerd(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data); +int32_t e1000_write_eeprom_eewr(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data); +int32_t e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd); + +/* MNG HOST IF functions */ +uint32_t e1000_enable_mng_pass_thru(struct e1000_hw *hw); + +#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */ + +#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 /* Time in ms to process MNG command */ +#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */ +#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */ +#define E1000_MNG_IAMT_MODE 0x3 +#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */ + +#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */ +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT 0x2 /* DHCP parsing enabled */ +#define E1000_VFTA_ENTRY_SHIFT 0x5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +struct e1000_host_mng_command_header { + uint8_t command_id; + uint8_t checksum; + uint16_t reserved1; + uint16_t reserved2; + uint16_t command_length; +}; + +struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ + uint8_t command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; /* Command data can length 0..0x658*/ +}; +#ifdef __BIG_ENDIAN +struct e1000_host_mng_dhcp_cookie{ + uint32_t signature; + uint16_t vlan_id; + uint8_t reserved0; + uint8_t status; + uint32_t reserved1; + uint8_t checksum; + uint8_t reserved3; + uint16_t reserved2; +}; +#else +struct e1000_host_mng_dhcp_cookie{ + uint32_t signature; + uint8_t status; + uint8_t reserved0; + uint16_t vlan_id; + uint32_t reserved1; + uint16_t reserved2; + uint8_t reserved3; + uint8_t checksum; +}; +#endif + +int32_t e1000_mng_write_dhcp_info(struct e1000_hw *hw, uint8_t *buffer, + uint16_t length); +boolean_t e1000_check_mng_mode(struct e1000_hw *hw); +boolean_t e1000_enable_tx_pkt_filtering(struct e1000_hw *hw); +int32_t e1000_mng_enable_host_if(struct e1000_hw *hw); +int32_t e1000_mng_host_if_write(struct e1000_hw *hw, uint8_t *buffer, + uint16_t length, uint16_t offset, uint8_t *sum); +int32_t e1000_mng_write_cmd_header(struct e1000_hw* hw, + struct e1000_host_mng_command_header* hdr); + +int32_t e1000_mng_write_commit(struct e1000_hw *hw); + int32_t e1000_read_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data); int32_t e1000_validate_eeprom_checksum(struct e1000_hw *hw); int32_t e1000_update_eeprom_checksum(struct e1000_hw *hw); int32_t e1000_write_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data); int32_t e1000_read_part_num(struct e1000_hw *hw, uint32_t * part_num); int32_t e1000_read_mac_addr(struct e1000_hw * hw); +int32_t e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask); +void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask); /* Filters (multicast, vlan, receive) */ void e1000_init_rx_addrs(struct e1000_hw *hw); @@ -307,7 +412,6 @@ int32_t e1000_led_off(struct e1000_hw *h /* Adaptive IFS Functions */ /* Everything else */ -uint32_t e1000_enable_mng_pass_thru(struct e1000_hw *hw); void e1000_clear_hw_cntrs(struct e1000_hw *hw); void e1000_reset_adaptive(struct e1000_hw *hw); void e1000_update_adaptive(struct e1000_hw *hw); @@ -324,6 +428,19 @@ void e1000_io_write(struct e1000_hw *hw, void e1000_write_reg_io(struct e1000_hw *hw, uint32_t offset, uint32_t value); int32_t e1000_config_dsp_after_link_change(struct e1000_hw *hw, boolean_t link_up); int32_t e1000_set_d3_lplu_state(struct e1000_hw *hw, boolean_t active); +int32_t e1000_set_d0_lplu_state(struct e1000_hw *hw, boolean_t active); +void e1000_set_pci_express_master_disable(struct e1000_hw *hw); +void e1000_enable_pciex_master(struct e1000_hw *hw); +int32_t e1000_disable_pciex_master(struct e1000_hw *hw); +int32_t e1000_get_auto_rd_done(struct e1000_hw *hw); +int32_t e1000_get_phy_cfg_done(struct e1000_hw *hw); +int32_t e1000_get_software_semaphore(struct e1000_hw *hw); +void e1000_release_software_semaphore(struct e1000_hw *hw); +int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); +int32_t e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw); +void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw); +int32_t e1000_commit_shadow_ram(struct e1000_hw *hw); +uint8_t e1000_arc_subsystem_valid(struct e1000_hw *hw); #define E1000_READ_REG_IO(a, reg) \ e1000_read_reg_io((a), E1000_##reg) @@ -357,10 +474,23 @@ int32_t e1000_set_d3_lplu_state(struct e #define E1000_DEV_ID_82547GI 0x1075 #define E1000_DEV_ID_82541GI 0x1076 #define E1000_DEV_ID_82541GI_MOBILE 0x1077 +#define E1000_DEV_ID_82541GI_LF 0x107C #define E1000_DEV_ID_82546GB_COPPER 0x1079 #define E1000_DEV_ID_82546GB_FIBER 0x107A #define E1000_DEV_ID_82546GB_SERDES 0x107B +#define E1000_DEV_ID_82546GB_PCIE 0x108A #define E1000_DEV_ID_82547EI 0x1019 +#define E1000_DEV_ID_82571EB_COPPER 0x105E +#define E1000_DEV_ID_82571EB_FIBER 0x105F +#define E1000_DEV_ID_82571EB_SERDES 0x1060 +#define E1000_DEV_ID_82572EI_COPPER 0x107D +#define E1000_DEV_ID_82572EI_FIBER 0x107E +#define E1000_DEV_ID_82572EI_SERDES 0x107F +#define E1000_DEV_ID_82573E 0x108B +#define E1000_DEV_ID_82573E_IAMT 0x108C +#define E1000_DEV_ID_82573L 0x109A + +#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099 #define NODE_ADDRESS_SIZE 6 #define ETH_LENGTH_OF_ADDRESS 6 @@ -373,6 +503,7 @@ int32_t e1000_set_d3_lplu_state(struct e #define E1000_REVISION_0 0 #define E1000_REVISION_1 1 #define E1000_REVISION_2 2 +#define E1000_REVISION_3 3 #define SPEED_10 10 #define SPEED_100 100 @@ -429,6 +560,7 @@ int32_t e1000_set_d3_lplu_state(struct e E1000_IMS_RXSEQ | \ E1000_IMS_LSC) + /* Number of high/low register pairs in the RAR. The RAR (Receive Address * Registers) holds the directed and multicast addresses that we monitor. We * reserve one of these spots for our directed address, allowing us room for @@ -449,14 +581,74 @@ struct e1000_rx_desc { uint16_t special; }; +/* Receive Descriptor - Extended */ +union e1000_rx_desc_extended { + struct { + uint64_t buffer_addr; + uint64_t reserved; + } read; + struct { + struct { + uint32_t mrq; /* Multiple Rx Queues */ + union { + uint32_t rss; /* RSS Hash */ + struct { + uint16_t ip_id; /* IP id */ + uint16_t csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + uint32_t status_error; /* ext status/error */ + uint16_t length; + uint16_t vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define MAX_PS_BUFFERS 4 +/* Receive Descriptor - Packet Split */ +union e1000_rx_desc_packet_split { + struct { + /* one buffer for protocol header(s), three data buffers */ + uint64_t buffer_addr[MAX_PS_BUFFERS]; + } read; + struct { + struct { + uint32_t mrq; /* Multiple Rx Queues */ + union { + uint32_t rss; /* RSS Hash */ + struct { + uint16_t ip_id; /* IP id */ + uint16_t csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + uint32_t status_error; /* ext status/error */ + uint16_t length0; /* length of buffer 0 */ + uint16_t vlan; /* VLAN tag */ + } middle; + struct { + uint16_t header_status; + uint16_t length[3]; /* length of buffers 1-3 */ + } upper; + uint64_t reserved; + } wb; /* writeback */ +}; + /* Receive Decriptor bit definitions */ #define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ #define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ #define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ #define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */ #define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ #define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ #define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */ +#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ #define E1000_RXD_ERR_CE 0x01 /* CRC Error */ #define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ #define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ @@ -466,9 +658,20 @@ struct e1000_rx_desc { #define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ #define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ #define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ -#define E1000_RXD_SPC_PRI_SHIFT 0x000D /* Priority is in upper 3 of 16 */ +#define E1000_RXD_SPC_PRI_SHIFT 13 #define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */ -#define E1000_RXD_SPC_CFI_SHIFT 0x000C /* CFI is bit 12 */ +#define E1000_RXD_SPC_CFI_SHIFT 12 + +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 +#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF /* mask to determine if packets should be dropped due to frame errors */ #define E1000_RXD_ERR_FRAME_ERR_MASK ( \ @@ -478,6 +681,15 @@ struct e1000_rx_desc { E1000_RXD_ERR_CXE | \ E1000_RXD_ERR_RXE) + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + /* Transmit Descriptor */ struct e1000_tx_desc { uint64_t buffer_addr; /* Address of the descriptor's data buffer */ @@ -630,6 +842,8 @@ struct e1000_ffvt_entry { #define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX #define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX +#define E1000_DISABLE_SERDES_LOOPBACK 0x0400 + /* Register Set. (82543, 82544) * * Registers are defined to be 32 bits and should be accessed as 32 bit values. @@ -650,6 +864,7 @@ struct e1000_ffvt_entry { #define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ #define E1000_FLA 0x0001C /* Flash Access - RW */ #define E1000_MDIC 0x00020 /* MDI Control - RW */ +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ #define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ #define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ #define E1000_FCT 0x00030 /* Flow Control Type - RW */ @@ -659,7 +874,14 @@ struct e1000_ffvt_entry { #define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ #define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ #define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ #define E1000_RCTL 0x00100 /* RX Control - RW */ +#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */ +#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */ +#define E1000_RDBAH1 0x02904 /* RX Descriptor Base Address High (1) - RW */ +#define E1000_RDLEN1 0x02908 /* RX Descriptor Length (1) - RW */ +#define E1000_RDH1 0x02910 /* RX Descriptor Head (1) - RW */ +#define E1000_RDT1 0x02918 /* RX Descriptor Tail (1) - RW */ #define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ #define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ #define E1000_RXCW 0x00180 /* RX Configuration Word - RO */ @@ -668,18 +890,39 @@ struct e1000_ffvt_entry { #define E1000_TBT 0x00448 /* TX Burst Timer - RW */ #define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ #define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ +#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ #define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +#define E1000_PBS 0x01008 /* Packet Buffer Size */ +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_FLASH_UPDATES 1000 +#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ +#define E1000_FLASHT 0x01028 /* FLASH Timer Register */ +#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +#define E1000_FLSWCTL 0x01030 /* FLASH control register */ +#define E1000_FLSWDATA 0x01034 /* FLASH data register */ +#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */ +#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ +#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ #define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ #define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ #define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */ #define E1000_RDBAH 0x02804 /* RX Descriptor Base Address High - RW */ #define E1000_RDLEN 0x02808 /* RX Descriptor Length - RW */ #define E1000_RDH 0x02810 /* RX Descriptor Head - RW */ #define E1000_RDT 0x02818 /* RX Descriptor Tail - RW */ #define E1000_RDTR 0x02820 /* RX Delay Timer - RW */ +#define E1000_RDBAL0 E1000_RDBAL /* RX Desc Base Address Low (0) - RW */ +#define E1000_RDBAH0 E1000_RDBAH /* RX Desc Base Address High (0) - RW */ +#define E1000_RDLEN0 E1000_RDLEN /* RX Desc Length (0) - RW */ +#define E1000_RDH0 E1000_RDH /* RX Desc Head (0) - RW */ +#define E1000_RDT0 E1000_RDT /* RX Desc Tail (0) - RW */ +#define E1000_RDTR0 E1000_RDTR /* RX Delay Timer (0) - RW */ #define E1000_RXDCTL 0x02828 /* RX Descriptor Control - RW */ #define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */ #define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */ +#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ #define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */ #define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ #define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ @@ -695,6 +938,14 @@ struct e1000_ffvt_entry { #define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */ #define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */ #define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ +#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */ +#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */ +#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */ +#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */ +#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */ +#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */ +#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */ +#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */ #define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ #define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ #define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ @@ -753,7 +1004,17 @@ struct e1000_ffvt_entry { #define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ #define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ #define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Packet Timer Expire Count */ +#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Absolute Timer Expire Count */ +#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Packet Timer Expire Count */ +#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Absolute Timer Expire Count */ +#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ +#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Minimum Threshold Count */ +#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */ +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ #define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ +#define E1000_RFCTL 0x05008 /* Receive Filter Control*/ #define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ #define E1000_RA 0x05400 /* Receive Address - RW Array */ #define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ @@ -771,6 +1032,24 @@ struct e1000_ffvt_entry { #define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ #define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */ +#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ +#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ +#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ +#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ +#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +#define E1000_SWSM 0x05B50 /* SW Semaphore */ +#define E1000_FWSM 0x05B54 /* FW Semaphore */ +#define E1000_FFLT_DBG 0x05F04 /* Debug Register */ +#define E1000_HICR 0x08F00 /* Host Inteface Control */ + +/* RSS registers */ +#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_RETA 0x05C00 /* Redirection Table - RW Array */ +#define E1000_RSSRK 0x05C80 /* RSS Random Key - RW Array */ +#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */ +#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */ /* Register Set (82542) * * Some of the 82542 registers are located at different offsets than they are @@ -785,6 +1064,7 @@ struct e1000_ffvt_entry { #define E1000_82542_CTRL_EXT E1000_CTRL_EXT #define E1000_82542_FLA E1000_FLA #define E1000_82542_MDIC E1000_MDIC +#define E1000_82542_SCTL E1000_SCTL #define E1000_82542_FCAL E1000_FCAL #define E1000_82542_FCAH E1000_FCAH #define E1000_82542_FCT E1000_FCT @@ -802,6 +1082,18 @@ struct e1000_ffvt_entry { #define E1000_82542_RDLEN 0x00118 #define E1000_82542_RDH 0x00120 #define E1000_82542_RDT 0x00128 +#define E1000_82542_RDTR0 E1000_82542_RDTR +#define E1000_82542_RDBAL0 E1000_82542_RDBAL +#define E1000_82542_RDBAH0 E1000_82542_RDBAH +#define E1000_82542_RDLEN0 E1000_82542_RDLEN +#define E1000_82542_RDH0 E1000_82542_RDH +#define E1000_82542_RDT0 E1000_82542_RDT +#define E1000_82542_RDTR1 0x00130 +#define E1000_82542_RDBAL1 0x00138 +#define E1000_82542_RDBAH1 0x0013C +#define E1000_82542_RDLEN1 0x00140 +#define E1000_82542_RDH1 0x00148 +#define E1000_82542_RDT1 0x00150 #define E1000_82542_FCRTH 0x00160 #define E1000_82542_FCRTL 0x00168 #define E1000_82542_FCTTV E1000_FCTTV @@ -821,6 +1113,18 @@ struct e1000_ffvt_entry { #define E1000_82542_VFTA 0x00600 #define E1000_82542_LEDCTL E1000_LEDCTL #define E1000_82542_PBA E1000_PBA +#define E1000_82542_PBS E1000_PBS +#define E1000_82542_EEMNGCTL E1000_EEMNGCTL +#define E1000_82542_EEARBC E1000_EEARBC +#define E1000_82542_FLASHT E1000_FLASHT +#define E1000_82542_EEWR E1000_EEWR +#define E1000_82542_FLSWCTL E1000_FLSWCTL +#define E1000_82542_FLSWDATA E1000_FLSWDATA +#define E1000_82542_FLSWCNT E1000_FLSWCNT +#define E1000_82542_FLOP E1000_FLOP +#define E1000_82542_EXTCNF_CTRL E1000_EXTCNF_CTRL +#define E1000_82542_EXTCNF_SIZE E1000_EXTCNF_SIZE +#define E1000_82542_ERT E1000_ERT #define E1000_82542_RXDCTL E1000_RXDCTL #define E1000_82542_RADV E1000_RADV #define E1000_82542_RSRPD E1000_RSRPD @@ -905,6 +1209,45 @@ struct e1000_ffvt_entry { #define E1000_82542_FFMT E1000_FFMT #define E1000_82542_FFVT E1000_FFVT #define E1000_82542_HOST_IF E1000_HOST_IF +#define E1000_82542_IAM E1000_IAM +#define E1000_82542_EEMNGCTL E1000_EEMNGCTL +#define E1000_82542_PSRCTL E1000_PSRCTL +#define E1000_82542_RAID E1000_RAID +#define E1000_82542_TARC0 E1000_TARC0 +#define E1000_82542_TDBAL1 E1000_TDBAL1 +#define E1000_82542_TDBAH1 E1000_TDBAH1 +#define E1000_82542_TDLEN1 E1000_TDLEN1 +#define E1000_82542_TDH1 E1000_TDH1 +#define E1000_82542_TDT1 E1000_TDT1 +#define E1000_82542_TXDCTL1 E1000_TXDCTL1 +#define E1000_82542_TARC1 E1000_TARC1 +#define E1000_82542_RFCTL E1000_RFCTL +#define E1000_82542_GCR E1000_GCR +#define E1000_82542_GSCL_1 E1000_GSCL_1 +#define E1000_82542_GSCL_2 E1000_GSCL_2 +#define E1000_82542_GSCL_3 E1000_GSCL_3 +#define E1000_82542_GSCL_4 E1000_GSCL_4 +#define E1000_82542_FACTPS E1000_FACTPS +#define E1000_82542_SWSM E1000_SWSM +#define E1000_82542_FWSM E1000_FWSM +#define E1000_82542_FFLT_DBG E1000_FFLT_DBG +#define E1000_82542_IAC E1000_IAC +#define E1000_82542_ICRXPTC E1000_ICRXPTC +#define E1000_82542_ICRXATC E1000_ICRXATC +#define E1000_82542_ICTXPTC E1000_ICTXPTC +#define E1000_82542_ICTXATC E1000_ICTXATC +#define E1000_82542_ICTXQEC E1000_ICTXQEC +#define E1000_82542_ICTXQMTC E1000_ICTXQMTC +#define E1000_82542_ICRXDMTC E1000_ICRXDMTC +#define E1000_82542_ICRXOC E1000_ICRXOC +#define E1000_82542_HICR E1000_HICR + +#define E1000_82542_CPUVEC E1000_CPUVEC +#define E1000_82542_MRQC E1000_MRQC +#define E1000_82542_RETA E1000_RETA +#define E1000_82542_RSSRK E1000_RSSRK +#define E1000_82542_RSSIM E1000_RSSIM +#define E1000_82542_RSSIR E1000_RSSIR /* Statistics counters collected by the MAC */ struct e1000_hw_stats { @@ -966,11 +1309,21 @@ struct e1000_hw_stats { uint64_t bptc; uint64_t tsctc; uint64_t tsctfc; + uint64_t iac; + uint64_t icrxptc; + uint64_t icrxatc; + uint64_t ictxptc; + uint64_t ictxatc; + uint64_t ictxqec; + uint64_t ictxqmtc; + uint64_t icrxdmtc; + uint64_t icrxoc; }; /* Structure containing variables used by the shared code (e1000_hw.c) */ struct e1000_hw { - uint8_t *hw_addr; + uint8_t __iomem *hw_addr; + uint8_t *flash_address; e1000_mac_type mac_type; e1000_phy_type phy_type; uint32_t phy_init_script; @@ -985,6 +1338,7 @@ struct e1000_hw { e1000_ms_type original_master_slave; e1000_ffe_config ffe_config_state; uint32_t asf_firmware_present; + uint32_t eeprom_semaphore_present; unsigned long io_base; uint32_t phy_id; uint32_t phy_revision; @@ -1001,6 +1355,8 @@ struct e1000_hw { uint32_t ledctl_default; uint32_t ledctl_mode1; uint32_t ledctl_mode2; + boolean_t tx_pkt_filtering; + struct e1000_host_mng_dhcp_cookie mng_cookie; uint16_t phy_spd_default; uint16_t autoneg_advertised; uint16_t pci_cmd_word; @@ -1026,11 +1382,13 @@ struct e1000_hw { uint8_t perm_mac_addr[NODE_ADDRESS_SIZE]; boolean_t disable_polarity_correction; boolean_t speed_downgraded; + e1000_smart_speed smart_speed; e1000_dsp_config dsp_config_state; boolean_t get_link_status; boolean_t serdes_link_down; boolean_t tbi_compatibility_en; boolean_t tbi_compatibility_on; + boolean_t laa_is_present; boolean_t phy_reset_disable; boolean_t fc_send_xon; boolean_t fc_strict_ieee; @@ -1038,17 +1396,24 @@ struct e1000_hw { boolean_t adaptive_ifs; boolean_t ifs_params_forced; boolean_t in_ifs_mode; + boolean_t mng_reg_access_disabled; }; #define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */ #define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */ - +#define E1000_EEPROM_RW_REG_DATA 16 /* Offset to data in EEPROM read/write registers */ +#define E1000_EEPROM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_EEPROM_RW_REG_START 1 /* First bit for telling part to start operation */ +#define E1000_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_EEPROM_POLL_WRITE 1 /* Flag for polling for write complete */ +#define E1000_EEPROM_POLL_READ 0 /* Flag for polling for read complete */ /* Register Bit Masks */ /* Device Control */ #define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ #define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */ #define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ #define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ #define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */ #define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */ @@ -1062,6 +1427,8 @@ struct e1000_hw { #define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */ #define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ #define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */ +#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */ #define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ #define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ #define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ @@ -1081,6 +1448,7 @@ struct e1000_hw { #define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ #define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ #define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 #define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */ #define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ #define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ @@ -1090,6 +1458,8 @@ struct e1000_hw { #define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ #define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ #define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */ +#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ #define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */ #define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */ #define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ @@ -1120,6 +1490,18 @@ struct e1000_hw { #ifndef E1000_EEPROM_GRANT_ATTEMPTS #define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ #endif +#define E1000_EECD_AUTO_RD 0x00000200 /* EEPROM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* EEprom Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */ +#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */ +#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */ +#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ +#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ +#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */ +#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ +#define E1000_STM_OPCODE 0xDB00 +#define E1000_HICR_FW_RESET 0xC0 /* EEPROM Read */ #define E1000_EERD_START 0x00000001 /* Start Read */ @@ -1163,6 +1545,10 @@ struct e1000_hw { #define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 #define E1000_CTRL_EXT_WR_WMARK_384 0x02000000 #define E1000_CTRL_EXT_WR_WMARK_448 0x03000000 +#define E1000_CTRL_EXT_CANC 0x04000000 /* Interrupt delay cancellation */ +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ +#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ +#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ /* MDI Control */ #define E1000_MDIC_DATA_MASK 0x0000FFFF @@ -1179,18 +1565,22 @@ struct e1000_hw { /* LED Control */ #define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F #define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_BLINK_RATE 0x0000020 #define E1000_LEDCTL_LED0_IVRT 0x00000040 #define E1000_LEDCTL_LED0_BLINK 0x00000080 #define E1000_LEDCTL_LED1_MODE_MASK 0x00000F00 #define E1000_LEDCTL_LED1_MODE_SHIFT 8 +#define E1000_LEDCTL_LED1_BLINK_RATE 0x0002000 #define E1000_LEDCTL_LED1_IVRT 0x00004000 #define E1000_LEDCTL_LED1_BLINK 0x00008000 #define E1000_LEDCTL_LED2_MODE_MASK 0x000F0000 #define E1000_LEDCTL_LED2_MODE_SHIFT 16 +#define E1000_LEDCTL_LED2_BLINK_RATE 0x00200000 #define E1000_LEDCTL_LED2_IVRT 0x00400000 #define E1000_LEDCTL_LED2_BLINK 0x00800000 #define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000 #define E1000_LEDCTL_LED3_MODE_SHIFT 24 +#define E1000_LEDCTL_LED3_BLINK_RATE 0x20000000 #define E1000_LEDCTL_LED3_IVRT 0x40000000 #define E1000_LEDCTL_LED3_BLINK 0x80000000 @@ -1230,6 +1620,10 @@ struct e1000_hw { #define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ #define E1000_ICR_TXD_LOW 0x00008000 #define E1000_ICR_SRPD 0x00010000 +#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */ +#define E1000_ICR_MNG 0x00040000 /* Manageability event */ +#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */ +#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ /* Interrupt Cause Set */ #define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ @@ -1247,6 +1641,9 @@ struct e1000_hw { #define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ #define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW #define E1000_ICS_SRPD E1000_ICR_SRPD +#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */ /* Interrupt Mask Set */ #define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ @@ -1264,6 +1661,9 @@ struct e1000_hw { #define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ #define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW #define E1000_IMS_SRPD E1000_ICR_SRPD +#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */ /* Interrupt Mask Clear */ #define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */ @@ -1281,6 +1681,9 @@ struct e1000_hw { #define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ #define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW #define E1000_IMC_SRPD E1000_ICR_SRPD +#define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */ /* Receive Control */ #define E1000_RCTL_RST 0x00000001 /* Software reset */ @@ -1293,6 +1696,8 @@ struct e1000_hw { #define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ #define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */ #define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */ +#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ #define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ #define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */ #define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */ @@ -1319,6 +1724,34 @@ struct e1000_hw { #define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ #define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ #define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ +#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */ +#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */ + +/* Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ /* Receive Descriptor */ #define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */ @@ -1333,6 +1766,23 @@ struct e1000_hw { #define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ #define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ +/* Header split receive */ +#define E1000_RFCTL_ISCSI_DIS 0x00000001 +#define E1000_RFCTL_ISCSI_DWC_MASK 0x0000003E +#define E1000_RFCTL_ISCSI_DWC_SHIFT 1 +#define E1000_RFCTL_NFSW_DIS 0x00000040 +#define E1000_RFCTL_NFSR_DIS 0x00000080 +#define E1000_RFCTL_NFS_VER_MASK 0x00000300 +#define E1000_RFCTL_NFS_VER_SHIFT 8 +#define E1000_RFCTL_IPV6_DIS 0x00000400 +#define E1000_RFCTL_IPV6_XSUM_DIS 0x00000800 +#define E1000_RFCTL_ACK_DIS 0x00001000 +#define E1000_RFCTL_ACKD_DIS 0x00002000 +#define E1000_RFCTL_IPFRSP_DIS 0x00004000 +#define E1000_RFCTL_EXTEN 0x00008000 +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 + /* Receive Descriptor Control */ #define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */ #define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */ @@ -1346,6 +1796,8 @@ struct e1000_hw { #define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ #define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */ #define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ +#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc. + still to be processed. */ /* Transmit Configuration Word */ #define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ @@ -1379,12 +1831,26 @@ struct e1000_hw { #define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */ #define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ #define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */ +#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ /* Receive Checksum Control */ #define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */ #define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ #define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ #define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */ +#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* Multiple Receive Queue Control */ +#define E1000_MRQC_ENABLE_MASK 0x00000003 +#define E1000_MRQC_ENABLE_RSS_2Q 0x00000001 +#define E1000_MRQC_ENABLE_RSS_INT 0x00000004 +#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00040000 +#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000 +#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 /* Definitions for power management and wakeup registers */ /* Wake Up Control */ @@ -1403,6 +1869,7 @@ struct e1000_hw { #define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ #define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ #define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ +#define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ #define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ #define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ #define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ @@ -1438,13 +1905,19 @@ struct e1000_hw { #define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ #define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery * Filtering */ +#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */ #define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */ #define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ #define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ #define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address * filtering */ #define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host * memory */ +#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address + * filtering */ +#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */ +#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */ #define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */ #define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */ #define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */ @@ -1455,11 +1928,98 @@ struct e1000_hw { #define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */ #define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */ +/* SW Semaphore Register */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ +#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ + +/* FW Semaphore Register */ +#define E1000_FWSM_MODE_MASK 0x0000000E /* FW mode */ +#define E1000_FWSM_MODE_SHIFT 1 +#define E1000_FWSM_FW_VALID 0x00008000 /* FW established a valid mode */ + +/* FFLT Debug Register */ +#define E1000_FFLT_DBG_INVC 0x00100000 /* Invalid /C/ code handling */ + +typedef enum { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_interface_only +} e1000_mng_mode; + +/* Host Inteface Control Register */ +#define E1000_HICR_EN 0x00000001 /* Enable Bit - RO */ +#define E1000_HICR_C 0x00000002 /* Driver sets this bit when done + * to put command in RAM */ +#define E1000_HICR_SV 0x00000004 /* Status Validity */ +#define E1000_HICR_FWR 0x00000080 /* FW reset. Set by the Host */ + +/* Host Interface Command Interface - Address range 0x8800-0x8EFF */ +#define E1000_HI_MAX_DATA_LENGTH 252 /* Host Interface data length */ +#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Number of bytes in range */ +#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Number of dwords in range */ +#define E1000_HI_COMMAND_TIMEOUT 500 /* Time in ms to process HI command */ + +struct e1000_host_command_header { + uint8_t command_id; + uint8_t command_length; + uint8_t command_options; /* I/F bits for command, status for return */ + uint8_t checksum; +}; +struct e1000_host_command_info { + struct e1000_host_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ + uint8_t command_data[E1000_HI_MAX_DATA_LENGTH]; /* Command data can length 0..252 */ +}; + +/* Host SMB register #0 */ +#define E1000_HSMC0R_CLKIN 0x00000001 /* SMB Clock in */ +#define E1000_HSMC0R_DATAIN 0x00000002 /* SMB Data in */ +#define E1000_HSMC0R_DATAOUT 0x00000004 /* SMB Data out */ +#define E1000_HSMC0R_CLKOUT 0x00000008 /* SMB Clock out */ + +/* Host SMB register #1 */ +#define E1000_HSMC1R_CLKIN E1000_HSMC0R_CLKIN +#define E1000_HSMC1R_DATAIN E1000_HSMC0R_DATAIN +#define E1000_HSMC1R_DATAOUT E1000_HSMC0R_DATAOUT +#define E1000_HSMC1R_CLKOUT E1000_HSMC0R_CLKOUT + +/* FW Status Register */ +#define E1000_FWSTS_FWS_MASK 0x000000FF /* FW Status */ + /* Wake Up Packet Length */ #define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */ #define E1000_MDALIGN 4096 +#define E1000_GCR_BEM32 0x00400000 +#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 +/* Function Active and Power State to MNG */ +#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003 +#define E1000_FACTPS_LAN0_VALID 0x00000004 +#define E1000_FACTPS_FUNC0_AUX_EN 0x00000008 +#define E1000_FACTPS_FUNC1_POWER_STATE_MASK 0x000000C0 +#define E1000_FACTPS_FUNC1_POWER_STATE_SHIFT 6 +#define E1000_FACTPS_LAN1_VALID 0x00000100 +#define E1000_FACTPS_FUNC1_AUX_EN 0x00000200 +#define E1000_FACTPS_FUNC2_POWER_STATE_MASK 0x00003000 +#define E1000_FACTPS_FUNC2_POWER_STATE_SHIFT 12 +#define E1000_FACTPS_IDE_ENABLE 0x00004000 +#define E1000_FACTPS_FUNC2_AUX_EN 0x00008000 +#define E1000_FACTPS_FUNC3_POWER_STATE_MASK 0x000C0000 +#define E1000_FACTPS_FUNC3_POWER_STATE_SHIFT 18 +#define E1000_FACTPS_SP_ENABLE 0x00100000 +#define E1000_FACTPS_FUNC3_AUX_EN 0x00200000 +#define E1000_FACTPS_FUNC4_POWER_STATE_MASK 0x03000000 +#define E1000_FACTPS_FUNC4_POWER_STATE_SHIFT 24 +#define E1000_FACTPS_IPMI_ENABLE 0x04000000 +#define E1000_FACTPS_FUNC4_AUX_EN 0x08000000 +#define E1000_FACTPS_MNGCG 0x20000000 +#define E1000_FACTPS_LAN_FUNC_SEL 0x40000000 +#define E1000_FACTPS_PM_STATE_CHANGED 0x80000000 + /* EEPROM Commands - Microwire */ #define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */ #define EEPROM_WRITE_OPCODE_MICROWIRE 0x5 /* EEPROM write opcode */ @@ -1469,27 +2029,26 @@ struct e1000_hw { /* EEPROM Commands - SPI */ #define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ -#define EEPROM_READ_OPCODE_SPI 0x3 /* EEPROM read opcode */ -#define EEPROM_WRITE_OPCODE_SPI 0x2 /* EEPROM write opcode */ -#define EEPROM_A8_OPCODE_SPI 0x8 /* opcode bit-3 = address bit-8 */ -#define EEPROM_WREN_OPCODE_SPI 0x6 /* EEPROM set Write Enable latch */ -#define EEPROM_WRDI_OPCODE_SPI 0x4 /* EEPROM reset Write Enable latch */ -#define EEPROM_RDSR_OPCODE_SPI 0x5 /* EEPROM read Status register */ -#define EEPROM_WRSR_OPCODE_SPI 0x1 /* EEPROM write Status register */ +#define EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ +#define EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ +#define EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Enable latch */ +#define EEPROM_WRDI_OPCODE_SPI 0x04 /* EEPROM reset Write Enable latch */ +#define EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status register */ +#define EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status register */ +#define EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ +#define EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ +#define EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ /* EEPROM Size definitions */ -#define EEPROM_SIZE_16KB 0x1800 -#define EEPROM_SIZE_8KB 0x1400 -#define EEPROM_SIZE_4KB 0x1000 -#define EEPROM_SIZE_2KB 0x0C00 -#define EEPROM_SIZE_1KB 0x0800 -#define EEPROM_SIZE_512B 0x0400 -#define EEPROM_SIZE_128B 0x0000 +#define EEPROM_WORD_SIZE_SHIFT 6 +#define EEPROM_SIZE_SHIFT 10 #define EEPROM_SIZE_MASK 0x1C00 /* EEPROM Word Offsets */ #define EEPROM_COMPAT 0x0003 #define EEPROM_ID_LED_SETTINGS 0x0004 +#define EEPROM_VERSION 0x0005 #define EEPROM_SERDES_AMPLITUDE 0x0006 /* For SERDES output amplitude adjustment. */ #define EEPROM_PHY_CLASS_WORD 0x0007 #define EEPROM_INIT_CONTROL1_REG 0x000A @@ -1500,6 +2059,8 @@ struct e1000_hw { #define EEPROM_FLASH_VERSION 0x0032 #define EEPROM_CHECKSUM_REG 0x003F +#define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */ + /* Word definitions for ID LED Settings */ #define ID_LED_RESERVED_0000 0x0000 #define ID_LED_RESERVED_FFFF 0xFFFF @@ -1598,11 +2159,28 @@ struct e1000_hw { #define IFS_MIN 40 #define IFS_RATIO 4 +/* Extended Configuration Control and Size */ +#define E1000_EXTCNF_CTRL_PCIE_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_PHY_WRITE_ENABLE 0x00000002 +#define E1000_EXTCNF_CTRL_D_UD_ENABLE 0x00000004 +#define E1000_EXTCNF_CTRL_D_UD_LATENCY 0x00000008 +#define E1000_EXTCNF_CTRL_D_UD_OWNER 0x00000010 +#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 +#define E1000_EXTCNF_CTRL_MDIO_HW_OWNERSHIP 0x00000040 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER 0x1FFF0000 + +#define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH 0x000000FF +#define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH 0x0000FF00 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH 0x00FF0000 + /* PBA constants */ +#define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */ #define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ #define E1000_PBA_22K 0x0016 #define E1000_PBA_24K 0x0018 #define E1000_PBA_30K 0x001E +#define E1000_PBA_32K 0x0020 +#define E1000_PBA_38K 0x0026 #define E1000_PBA_40K 0x0028 #define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */ @@ -1655,6 +2233,13 @@ struct e1000_hw { /* Number of milliseconds we wait for auto-negotiation to complete */ #define LINK_UP_TIMEOUT 500 +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define MASTER_DISABLE_TIMEOUT 800 +/* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */ +#define AUTO_READ_DONE_TIMEOUT 10 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */ +#define PHY_CFG_TIMEOUT 40 + #define E1000_TX_BUFFER_SIZE ((uint32_t)1514) /* The carrier extension symbol, as received by the NIC. */ @@ -1727,6 +2312,9 @@ struct e1000_hw { #define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ #define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF /* Registers equal on all pages */ + /* M88E1000 Specific Registers */ #define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ #define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ @@ -1752,6 +2340,7 @@ struct e1000_hw { #define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */ #define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO Register */ #define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 #define IGP01E1000_PHY_PAGE_SELECT 0x1F /* PHY Page Select Core Register */ /* IGP01E1000 AGC Registers - stores the cable length values*/ @@ -1760,12 +2349,20 @@ struct e1000_hw { #define IGP01E1000_PHY_AGC_C 0x1472 #define IGP01E1000_PHY_AGC_D 0x1872 +/* IGP02E1000 AGC Registers for cable length values */ +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 + /* IGP01E1000 DSP Reset Register */ #define IGP01E1000_PHY_DSP_RESET 0x1F33 #define IGP01E1000_PHY_DSP_SET 0x1F71 #define IGP01E1000_PHY_DSP_FFE 0x1F35 #define IGP01E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_CHANNEL_NUM 4 + #define IGP01E1000_PHY_AGC_PARAM_A 0x1171 #define IGP01E1000_PHY_AGC_PARAM_B 0x1271 #define IGP01E1000_PHY_AGC_PARAM_C 0x1471 @@ -1787,8 +2384,7 @@ struct e1000_hw { #define IGP01E1000_ANALOG_REGS_PAGE 0x20C0 -#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ -#define MAX_PHY_MULTI_PAGE_REG 0xF /*Registers that are equal on all pages*/ + /* PHY Control Register */ #define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ #define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ @@ -2050,20 +2646,30 @@ struct e1000_hw { #define IGP01E1000_MSE_CHANNEL_B 0x0F00 #define IGP01E1000_MSE_CHANNEL_A 0xF000 +#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* Enable LPLU in non-D0a modes */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* Enable LPLU in D0a mode */ + /* IGP01E1000 DSP reset macros */ #define DSP_RESET_ENABLE 0x0 #define DSP_RESET_DISABLE 0x2 #define E1000_MAX_DSP_RESETS 10 -/* IGP01E1000 AGC Registers */ +/* IGP01E1000 & IGP02E1000 AGC Registers */ #define IGP01E1000_AGC_LENGTH_SHIFT 7 /* Coarse - 13:11, Fine - 10:7 */ +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Coarse - 15:13, Fine - 12:9 */ + +/* IGP02E1000 AGC Register Length 9-bit mask */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F /* 7 bits (3 Coarse + 4 Fine) --> 128 optional values */ #define IGP01E1000_AGC_LENGTH_TABLE_SIZE 128 +#define IGP02E1000_AGC_LENGTH_TABLE_SIZE 113 -/* The precision of the length is +/- 10 meters */ +/* The precision error of the cable length is +/- 10 meters */ #define IGP01E1000_AGC_RANGE 10 +#define IGP02E1000_AGC_RANGE 15 /* IGP01E1000 PCS Initialization register */ /* bits 3:6 in the PCS registers stores the channels polarity */ @@ -2091,7 +2697,11 @@ struct e1000_hw { #define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080 #define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500 + /* Bit definitions for valid PHY IDs. */ +/* I = Integrated + * E = External + */ #define M88E1000_E_PHY_ID 0x01410C50 #define M88E1000_I_PHY_ID 0x01410C30 #define M88E1011_I_PHY_ID 0x01410C20 @@ -2099,6 +2709,8 @@ struct e1000_hw { #define M88E1000_12_PHY_ID M88E1000_E_PHY_ID #define M88E1000_14_PHY_ID M88E1000_E_PHY_ID #define M88E1011_I_REV_4 0x04 +#define M88E1111_I_PHY_ID 0x01410CC0 +#define L1LXT971A_PHY_ID 0x001378E0 /* Miscellaneous PHY bit definitions. */ #define PHY_PREAMBLE 0xFFFFFFFF diff -pruN ./drivers/net/e1000.lkm81/e1000_main.c ./drivers/net/e1000/e1000_main.c --- ./drivers/net/e1000.lkm81/e1000_main.c 2006-04-06 19:04:00.000000000 +0400 +++ ./drivers/net/e1000/e1000_main.c 2006-04-06 19:05:40.000000000 +0400 @@ -1,7 +1,7 @@ /******************************************************************************* - Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. + Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free @@ -27,73 +27,79 @@ *******************************************************************************/ #include "e1000.h" -#include /* Change Log - * - * 5.2.51 5/14/04 - * o set default configuration to 'NAPI disabled'. NAPI enabled driver - * causes kernel panic when the interface is shutdown while data is being - * transferred. - * 5.2.47 5/04/04 - * o fixed ethtool -t implementation - * 5.2.45 4/29/04 - * o fixed ethtool -e implementation - * o Support for ethtool ops [Stephen Hemminger (shemminger@osdl.org)] - * 5.2.42 4/26/04 - * o Added support for the DPRINTK macro for enhanced error logging. Some - * parts of the patch were supplied by Jon Mason. - * o Move the register_netdevice() donw in the probe routine due to a - * loading/unloading test issue. - * o Added a long RX byte count the the extra ethtool data members for BER - * testing purposes. - * 5.2.39 3/12/04 + * 6.0.58 4/20/05 + * o Accepted ethtool cleanup patch from Stephen Hemminger + * 6.0.44+ 2/15/05 + * o applied Anton's patch to resolve tx hang in hardware + * o Applied Andrew Mortons patch - e1000 stops working after resume */ char e1000_driver_name[] = "e1000"; char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; -char e1000_driver_version[] = "5.2.52-k4"; -char e1000_copyright[] = "Copyright (c) 1999-2004 Intel Corporation."; +#ifndef CONFIG_E1000_NAPI +#define DRIVERNAPI +#else +#define DRIVERNAPI "-NAPI" +#endif +#define DRV_VERSION "6.1.16-k3"DRIVERNAPI +char e1000_driver_version[] = DRV_VERSION; +char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; /* e1000_pci_tbl - PCI Device ID Table * - * Wildcard entries (PCI_ANY_ID) should come last * Last entry must be all 0s * - * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, - * Class, Class Mask, private data (not used) } + * Macro expands to... + * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} */ static struct pci_device_id e1000_pci_tbl[] = { - {0x8086, 0x1000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {0x8086, 0x1001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {0x8086, 0x1004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {0x8086, 0x1008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {0x8086, 0x1009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {0x8086, 0x100C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {0x8086, 0x100D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {0x8086, 0x100E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {0x8086, 0x100F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {0x8086, 0x1010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {0x8086, 0x1011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {0x8086, 0x1012, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {0x8086, 0x1013, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {0x8086, 0x1015, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {0x8086, 0x1016, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {0x8086, 0x1017, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {0x8086, 0x1018, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {0x8086, 0x1019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {0x8086, 0x101D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {0x8086, 0x101E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {0x8086, 0x1026, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {0x8086, 0x1027, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {0x8086, 0x1028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {0x8086, 0x1075, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {0x8086, 0x1076, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {0x8086, 0x1077, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {0x8086, 0x1078, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {0x8086, 0x1079, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {0x8086, 0x107A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {0x8086, 0x107B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + INTEL_E1000_ETHERNET_DEVICE(0x1000), + INTEL_E1000_ETHERNET_DEVICE(0x1001), + INTEL_E1000_ETHERNET_DEVICE(0x1004), + INTEL_E1000_ETHERNET_DEVICE(0x1008), + INTEL_E1000_ETHERNET_DEVICE(0x1009), + INTEL_E1000_ETHERNET_DEVICE(0x100C), + INTEL_E1000_ETHERNET_DEVICE(0x100D), + INTEL_E1000_ETHERNET_DEVICE(0x100E), + INTEL_E1000_ETHERNET_DEVICE(0x100F), + INTEL_E1000_ETHERNET_DEVICE(0x1010), + INTEL_E1000_ETHERNET_DEVICE(0x1011), + INTEL_E1000_ETHERNET_DEVICE(0x1012), + INTEL_E1000_ETHERNET_DEVICE(0x1013), + INTEL_E1000_ETHERNET_DEVICE(0x1014), + INTEL_E1000_ETHERNET_DEVICE(0x1015), + INTEL_E1000_ETHERNET_DEVICE(0x1016), + INTEL_E1000_ETHERNET_DEVICE(0x1017), + INTEL_E1000_ETHERNET_DEVICE(0x1018), + INTEL_E1000_ETHERNET_DEVICE(0x1019), + INTEL_E1000_ETHERNET_DEVICE(0x101A), + INTEL_E1000_ETHERNET_DEVICE(0x101D), + INTEL_E1000_ETHERNET_DEVICE(0x101E), + INTEL_E1000_ETHERNET_DEVICE(0x1026), + INTEL_E1000_ETHERNET_DEVICE(0x1027), + INTEL_E1000_ETHERNET_DEVICE(0x1028), + INTEL_E1000_ETHERNET_DEVICE(0x105E), + INTEL_E1000_ETHERNET_DEVICE(0x105F), + INTEL_E1000_ETHERNET_DEVICE(0x1060), + INTEL_E1000_ETHERNET_DEVICE(0x1075), + INTEL_E1000_ETHERNET_DEVICE(0x1076), + INTEL_E1000_ETHERNET_DEVICE(0x1077), + INTEL_E1000_ETHERNET_DEVICE(0x1078), + INTEL_E1000_ETHERNET_DEVICE(0x1079), + INTEL_E1000_ETHERNET_DEVICE(0x107A), + INTEL_E1000_ETHERNET_DEVICE(0x107B), + INTEL_E1000_ETHERNET_DEVICE(0x107C), + INTEL_E1000_ETHERNET_DEVICE(0x107D), + INTEL_E1000_ETHERNET_DEVICE(0x107E), + INTEL_E1000_ETHERNET_DEVICE(0x107F), + INTEL_E1000_ETHERNET_DEVICE(0x108A), + INTEL_E1000_ETHERNET_DEVICE(0x108B), + INTEL_E1000_ETHERNET_DEVICE(0x108C), + INTEL_E1000_ETHERNET_DEVICE(0x1099), + INTEL_E1000_ETHERNET_DEVICE(0x109A), + INTEL_E1000_ETHERNET_DEVICE(0x10B5), /* required last entry */ {0,} }; @@ -132,27 +138,26 @@ static int e1000_xmit_frame(struct sk_bu static struct net_device_stats * e1000_get_stats(struct net_device *netdev); static int e1000_change_mtu(struct net_device *netdev, int new_mtu); static int e1000_set_mac(struct net_device *netdev, void *p); -static inline void e1000_irq_disable(struct e1000_adapter *adapter); -static inline void e1000_irq_enable(struct e1000_adapter *adapter); static irqreturn_t e1000_intr(int irq, void *data, struct pt_regs *regs); static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter); #ifdef CONFIG_E1000_NAPI static int e1000_clean(struct net_device *netdev, int *budget); static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter, int *work_done, int work_to_do); +static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, + int *work_done, int work_to_do); #else static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter); +static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter); #endif static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter); +static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter); static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); -void set_ethtool_ops(struct net_device *netdev); +void e1000_set_ethtool_ops(struct net_device *netdev); static void e1000_enter_82542_rst(struct e1000_adapter *adapter); static void e1000_leave_82542_rst(struct e1000_adapter *adapter); -static inline void e1000_rx_checksum(struct e1000_adapter *adapter, - struct e1000_rx_desc *rx_desc, - struct sk_buff *skb); static void e1000_tx_timeout(struct net_device *dev); static void e1000_tx_timeout_task(struct net_device *dev); static void e1000_smartspeed(struct e1000_adapter *adapter); @@ -164,28 +169,20 @@ static void e1000_vlan_rx_add_vid(struct static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); static void e1000_restore_vlan(struct e1000_adapter *adapter); -static int e1000_notify_reboot(struct notifier_block *, unsigned long event, void *ptr); -static int e1000_suspend(struct pci_dev *pdev, uint32_t state); +static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); #ifdef CONFIG_PM static int e1000_resume(struct pci_dev *pdev); #endif #ifdef CONFIG_NET_POLL_CONTROLLER /* for netdump / net console */ -static void e1000_netpoll (struct net_device *dev); +static void e1000_netpoll (struct net_device *netdev); #endif -struct notifier_block e1000_notifier_reboot = { - .notifier_call = e1000_notify_reboot, - .next = NULL, - .priority = 0 -}; - /* Exported from other modules */ extern void e1000_check_options(struct e1000_adapter *adapter); - static struct pci_driver e1000_driver = { .name = e1000_driver_name, .id_table = e1000_pci_tbl, @@ -201,8 +198,9 @@ static struct pci_driver e1000_driver = MODULE_AUTHOR("Intel Corporation, "); MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); -static int debug = 3; +static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); @@ -223,9 +221,7 @@ e1000_init_module(void) printk(KERN_INFO "%s\n", e1000_copyright); ret = pci_module_init(&e1000_driver); - if(ret >= 0) { - register_reboot_notifier(&e1000_notifier_reboot); - } + return ret; } @@ -241,12 +237,128 @@ module_init(e1000_init_module); static void __exit e1000_exit_module(void) { - unregister_reboot_notifier(&e1000_notifier_reboot); pci_unregister_driver(&e1000_driver); } module_exit(e1000_exit_module); +/** + * e1000_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ + +static inline void +e1000_irq_disable(struct e1000_adapter *adapter) +{ + atomic_inc(&adapter->irq_sem); + E1000_WRITE_REG(&adapter->hw, IMC, ~0); + E1000_WRITE_FLUSH(&adapter->hw); + synchronize_irq(adapter->pdev->irq); +} + +/** + * e1000_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ + +static inline void +e1000_irq_enable(struct e1000_adapter *adapter) +{ + if(likely(atomic_dec_and_test(&adapter->irq_sem))) { + E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK); + E1000_WRITE_FLUSH(&adapter->hw); + } +} +void +e1000_update_mng_vlan(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + uint16_t vid = adapter->hw.mng_cookie.vlan_id; + uint16_t old_vid = adapter->mng_vlan_id; + if(adapter->vlgrp) { + if(!adapter->vlgrp->vlan_devices[vid]) { + if(adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { + e1000_vlan_rx_add_vid(netdev, vid); + adapter->mng_vlan_id = vid; + } else + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + + if((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) && + (vid != old_vid) && + !adapter->vlgrp->vlan_devices[old_vid]) + e1000_vlan_rx_kill_vid(netdev, old_vid); + } + } +} + +/** + * e1000_release_hw_control - release control of the h/w to f/w + * @adapter: address of board private structure + * + * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that the + * driver is no longer loaded. For AMT version (only with 82573) i + * of the f/w this means that the netowrk i/f is closed. + * + **/ + +static inline void +e1000_release_hw_control(struct e1000_adapter *adapter) +{ + uint32_t ctrl_ext; + uint32_t swsm; + + /* Let firmware taken over control of h/w */ + switch (adapter->hw.mac_type) { + case e1000_82571: + case e1000_82572: + ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); + E1000_WRITE_REG(&adapter->hw, CTRL_EXT, + ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); + break; + case e1000_82573: + swsm = E1000_READ_REG(&adapter->hw, SWSM); + E1000_WRITE_REG(&adapter->hw, SWSM, + swsm & ~E1000_SWSM_DRV_LOAD); + default: + break; + } +} + +/** + * e1000_get_hw_control - get control of the h/w from f/w + * @adapter: address of board private structure + * + * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that + * the driver is loaded. For AMT version (only with 82573) + * of the f/w this means that the netowrk i/f is open. + * + **/ + +static inline void +e1000_get_hw_control(struct e1000_adapter *adapter) +{ + uint32_t ctrl_ext; + uint32_t swsm; + /* Let firmware know the driver has taken over */ + switch (adapter->hw.mac_type) { + case e1000_82571: + case e1000_82572: + ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); + E1000_WRITE_REG(&adapter->hw, CTRL_EXT, + ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); + break; + case e1000_82573: + swsm = E1000_READ_REG(&adapter->hw, SWSM); + E1000_WRITE_REG(&adapter->hw, SWSM, + swsm | E1000_SWSM_DRV_LOAD); + break; + default: + break; + } +} int e1000_up(struct e1000_adapter *adapter) @@ -256,6 +368,14 @@ e1000_up(struct e1000_adapter *adapter) /* hardware has been reset, we need to reload some things */ + /* Reset the PHY if it was previously powered down */ + if(adapter->hw.media_type == e1000_media_type_copper) { + uint16_t mii_reg; + e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); + if(mii_reg & MII_CR_POWER_DOWN) + e1000_phy_reset(&adapter->hw); + } + e1000_set_multi(netdev); e1000_restore_vlan(adapter); @@ -263,14 +383,31 @@ e1000_up(struct e1000_adapter *adapter) e1000_configure_tx(adapter); e1000_setup_rctl(adapter); e1000_configure_rx(adapter); - e1000_alloc_rx_buffers(adapter); + adapter->alloc_rx_buf(adapter); +#ifdef CONFIG_PCI_MSI + if(adapter->hw.mac_type > e1000_82547_rev_2) { + adapter->have_msi = TRUE; + if((err = pci_enable_msi(adapter->pdev))) { + DPRINTK(PROBE, ERR, + "Unable to allocate MSI interrupt Error: %d\n", err); + adapter->have_msi = FALSE; + } + } +#endif if((err = request_irq(adapter->pdev->irq, &e1000_intr, SA_SHIRQ | SA_SAMPLE_RANDOM, - netdev->name, netdev))) + netdev->name, netdev))) { + DPRINTK(PROBE, ERR, + "Unable to allocate interrupt Error: %d\n", err); return err; + } mod_timer(&adapter->watchdog_timer, jiffies); + +#ifdef CONFIG_E1000_NAPI + netif_poll_enable(netdev); +#endif e1000_irq_enable(adapter); return 0; @@ -280,12 +417,23 @@ void e1000_down(struct e1000_adapter *adapter) { struct net_device *netdev = adapter->netdev; + boolean_t mng_mode_enabled = (adapter->hw.mac_type >= e1000_82571) && + e1000_check_mng_mode(&adapter->hw); e1000_irq_disable(adapter); free_irq(adapter->pdev->irq, netdev); +#ifdef CONFIG_PCI_MSI + if(adapter->hw.mac_type > e1000_82547_rev_2 && + adapter->have_msi == TRUE) + pci_disable_msi(adapter->pdev); +#endif del_timer_sync(&adapter->tx_fifo_stall_timer); del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); + +#ifdef CONFIG_E1000_NAPI + netif_poll_disable(netdev); +#endif adapter->link_speed = 0; adapter->link_duplex = 0; netif_carrier_off(netdev); @@ -294,55 +442,98 @@ e1000_down(struct e1000_adapter *adapter e1000_reset(adapter); e1000_clean_tx_ring(adapter); e1000_clean_rx_ring(adapter); + + /* Power down the PHY so no link is implied when interface is down * + * The PHY cannot be powered down if any of the following is TRUE * + * (a) WoL is enabled + * (b) AMT is active + * (c) SoL/IDER session is active */ + if(!adapter->wol && adapter->hw.mac_type >= e1000_82540 && + adapter->hw.media_type == e1000_media_type_copper && + !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN) && + !mng_mode_enabled && + !e1000_check_phy_reset_block(&adapter->hw)) { + uint16_t mii_reg; + e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg); + mdelay(1); + } } void e1000_reset(struct e1000_adapter *adapter) { + struct net_device *netdev = adapter->netdev; uint32_t pba, manc; + uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF; + uint16_t fc_low_water_mark = E1000_FC_LOW_DIFF; + /* Repartition Pba for greater than 9k mtu * To take effect CTRL.RST is required. */ - if(adapter->hw.mac_type < e1000_82547) { - if(adapter->rx_buffer_len > E1000_RXBUFFER_8192) - pba = E1000_PBA_40K; - else - pba = E1000_PBA_48K; - } else { - if(adapter->rx_buffer_len > E1000_RXBUFFER_8192) - pba = E1000_PBA_22K; - else - pba = E1000_PBA_30K; + switch (adapter->hw.mac_type) { + case e1000_82547: + case e1000_82547_rev_2: + pba = E1000_PBA_30K; + break; + case e1000_82571: + case e1000_82572: + pba = E1000_PBA_38K; + break; + case e1000_82573: + pba = E1000_PBA_12K; + break; + default: + pba = E1000_PBA_48K; + break; + } + + if((adapter->hw.mac_type != e1000_82573) && + (adapter->netdev->mtu > E1000_RXBUFFER_8192)) { + pba -= 8; /* allocate more FIFO for Tx */ + /* send an XOFF when there is enough space in the + * Rx FIFO to hold one extra full size Rx packet + */ + fc_high_water_mark = netdev->mtu + ENET_HEADER_SIZE + + ETHERNET_FCS_SIZE + 1; + fc_low_water_mark = fc_high_water_mark + 8; + } + + + if(adapter->hw.mac_type == e1000_82547) { adapter->tx_fifo_head = 0; adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; adapter->tx_fifo_size = (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; atomic_set(&adapter->tx_fifo_stall, 0); } + E1000_WRITE_REG(&adapter->hw, PBA, pba); /* flow control settings */ - adapter->hw.fc_high_water = - (pba << E1000_PBA_BYTES_SHIFT) - E1000_FC_HIGH_DIFF; - adapter->hw.fc_low_water = - (pba << E1000_PBA_BYTES_SHIFT) - E1000_FC_LOW_DIFF; + adapter->hw.fc_high_water = (pba << E1000_PBA_BYTES_SHIFT) - + fc_high_water_mark; + adapter->hw.fc_low_water = (pba << E1000_PBA_BYTES_SHIFT) - + fc_low_water_mark; adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME; adapter->hw.fc_send_xon = 1; adapter->hw.fc = adapter->hw.original_fc; + /* Allow time for pending master requests to run */ e1000_reset_hw(&adapter->hw); if(adapter->hw.mac_type >= e1000_82544) E1000_WRITE_REG(&adapter->hw, WUC, 0); - e1000_init_hw(&adapter->hw); - + if(e1000_init_hw(&adapter->hw)) + DPRINTK(PROBE, ERR, "Hardware Error\n"); + e1000_update_mng_vlan(adapter); /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE); e1000_reset_adaptive(&adapter->hw); e1000_phy_get_info(&adapter->hw, &adapter->phy_info); - - if(adapter->en_mng_pt) { + if (adapter->en_mng_pt) { manc = E1000_READ_REG(&adapter->hw, MANC); manc |= (E1000_MANC_ARP_EN | E1000_MANC_EN_MNG2HOST); E1000_WRITE_REG(&adapter->hw, MANC, manc); @@ -367,14 +558,12 @@ e1000_probe(struct pci_dev *pdev, { struct net_device *netdev; struct e1000_adapter *adapter; + unsigned long mmio_start, mmio_len; + static int cards_found = 0; - unsigned long mmio_start; - int mmio_len; - int pci_using_dac; - int i; - int err; + int i, err, pci_using_dac; uint16_t eeprom_data; - + uint16_t eeprom_apme_mask = E1000_EEPROM_APME; if((err = pci_enable_device(pdev))) return err; @@ -403,17 +592,12 @@ e1000_probe(struct pci_dev *pdev, SET_NETDEV_DEV(netdev, &pdev->dev); pci_set_drvdata(pdev, netdev); - adapter = netdev->priv; + adapter = netdev_priv(netdev); adapter->netdev = netdev; adapter->pdev = pdev; adapter->hw.back = adapter; adapter->msg_enable = (1 << debug) - 1; - rtnl_lock(); - /* we need to set the name early since the DPRINTK macro needs it set */ - if (dev_alloc_name(netdev, netdev->name) < 0) - goto err_free_unlock; - mmio_start = pci_resource_start(pdev, BAR_0); mmio_len = pci_resource_len(pdev, BAR_0); @@ -440,7 +624,7 @@ e1000_probe(struct pci_dev *pdev, netdev->set_mac_address = &e1000_set_mac; netdev->change_mtu = &e1000_change_mtu; netdev->do_ioctl = &e1000_ioctl; - set_ethtool_ops(netdev); + e1000_set_ethtool_ops(netdev); netdev->tx_timeout = &e1000_tx_timeout; netdev->watchdog_timeo = 5 * HZ; #ifdef CONFIG_E1000_NAPI @@ -453,6 +637,7 @@ e1000_probe(struct pci_dev *pdev, #ifdef CONFIG_NET_POLL_CONTROLLER netdev->poll_controller = e1000_netpoll; #endif + strcpy(netdev->name, pci_name(pdev)); netdev->mem_start = mmio_start; netdev->mem_end = mmio_start + mmio_len; @@ -465,30 +650,33 @@ e1000_probe(struct pci_dev *pdev, if((err = e1000_sw_init(adapter))) goto err_sw_init; + if((err = e1000_check_phy_reset_block(&adapter->hw))) + DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); + if(adapter->hw.mac_type >= e1000_82543) { netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; - } else { - netdev->features = NETIF_F_SG; } #ifdef NETIF_F_TSO -#ifdef BROKEN_ON_NON_IA_ARCHS - /* Disbaled for now until root-cause is found for - * hangs reported against non-IA archs. TSO can be - * enabled using ethtool -K eth tso on */ if((adapter->hw.mac_type >= e1000_82544) && (adapter->hw.mac_type != e1000_82547)) netdev->features |= NETIF_F_TSO; + +#ifdef NETIF_F_TSO_IPV6 + if(adapter->hw.mac_type > e1000_82547_rev_2) + netdev->features |= NETIF_F_TSO_IPV6; #endif #endif - if(pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; + /* hard_start_xmit is safe against parallel locking */ + netdev->features |= NETIF_F_LLTX; + adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw); /* before reading the EEPROM, reset the controller to @@ -506,10 +694,12 @@ e1000_probe(struct pci_dev *pdev, /* copy the MAC address out of the EEPROM */ - e1000_read_mac_addr(&adapter->hw); + if(e1000_read_mac_addr(&adapter->hw)) + DPRINTK(PROBE, ERR, "EEPROM Read Error\n"); memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); if(!is_valid_ether_addr(netdev->dev_addr)) { + DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); err = -EIO; goto err_eeprom; } @@ -538,7 +728,6 @@ e1000_probe(struct pci_dev *pdev, netif_carrier_off(netdev); netif_stop_queue(netdev); - DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n"); e1000_check_options(adapter); /* Initial Wake on LAN setting @@ -551,8 +740,14 @@ e1000_probe(struct pci_dev *pdev, case e1000_82542_rev2_1: case e1000_82543: break; + case e1000_82544: + e1000_read_eeprom(&adapter->hw, + EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); + eeprom_apme_mask = E1000_EEPROM_82544_APM; + break; case e1000_82546: case e1000_82546_rev_3: + case e1000_82571: if((E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1) && (adapter->hw.media_type == e1000_media_type_copper)) { e1000_read_eeprom(&adapter->hw, @@ -565,19 +760,27 @@ e1000_probe(struct pci_dev *pdev, EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); break; } - if(eeprom_data & E1000_EEPROM_APME) + if(eeprom_data & eeprom_apme_mask) adapter->wol |= E1000_WUFC_MAG; /* reset the hardware with the new settings */ - e1000_reset(adapter); - /* since we are holding the rtnl lock already, call the no-lock version */ - if((err = register_netdevice(netdev))) + /* If the controller is 82573 and f/w is AMT, do not set + * DRV_LOAD until the interface is up. For all other cases, + * let the f/w know that the h/w is now under the control + * of the driver. */ + if (adapter->hw.mac_type != e1000_82573 || + !e1000_check_mng_mode(&adapter->hw)) + e1000_get_hw_control(adapter); + + strcpy(netdev->name, "eth%d"); + if((err = register_netdev(netdev))) goto err_register; + DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n"); + cards_found++; - rtnl_unlock(); return 0; err_register: @@ -585,8 +788,6 @@ err_sw_init: err_eeprom: iounmap(adapter->hw.hw_addr); err_ioremap: -err_free_unlock: - rtnl_unlock(); free_netdev(netdev); err_alloc_etherdev: pci_release_regions(pdev); @@ -607,9 +808,11 @@ static void __devexit e1000_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); uint32_t manc; + flush_scheduled_work(); + if(adapter->hw.mac_type >= e1000_82540 && adapter->hw.media_type == e1000_media_type_copper) { manc = E1000_READ_REG(&adapter->hw, MANC); @@ -619,14 +822,21 @@ e1000_remove(struct pci_dev *pdev) } } + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. */ + e1000_release_hw_control(adapter); + unregister_netdev(netdev); - e1000_phy_hw_reset(&adapter->hw); + if(!e1000_check_phy_reset_block(&adapter->hw)) + e1000_phy_hw_reset(&adapter->hw); iounmap(adapter->hw.hw_addr); pci_release_regions(pdev); free_netdev(netdev); + + pci_disable_device(pdev); } /** @@ -657,34 +867,38 @@ e1000_sw_init(struct e1000_adapter *adap pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); adapter->rx_buffer_len = E1000_RXBUFFER_2048; + adapter->rx_ps_bsize0 = E1000_RXBUFFER_256; hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; /* identify the MAC */ - if (e1000_set_mac_type(hw)) { + if(e1000_set_mac_type(hw)) { DPRINTK(PROBE, ERR, "Unknown MAC Type\n"); return -EIO; } /* initialize eeprom parameters */ - e1000_init_eeprom_params(hw); + if(e1000_init_eeprom_params(hw)) { + E1000_ERR("EEPROM initialization failed\n"); + return -EIO; + } - if((hw->mac_type == e1000_82541) || - (hw->mac_type == e1000_82547) || - (hw->mac_type == e1000_82541_rev_2) || - (hw->mac_type == e1000_82547_rev_2)) + switch(hw->mac_type) { + default: + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: hw->phy_init_script = 1; + break; + } e1000_set_media_type(hw); - if(hw->mac_type < e1000_82543) - hw->report_tx_early = 0; - else - hw->report_tx_early = 1; - hw->wait_autoneg_complete = FALSE; hw->tbi_compatibility_en = TRUE; hw->adaptive_ifs = TRUE; @@ -720,7 +934,7 @@ e1000_sw_init(struct e1000_adapter *adap static int e1000_open(struct net_device *netdev) { - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); int err; /* allocate transmit descriptors */ @@ -735,8 +949,19 @@ e1000_open(struct net_device *netdev) if((err = e1000_up(adapter))) goto err_up; + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + if((adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { + e1000_update_mng_vlan(adapter); + } - return 0; + /* If AMT is enabled, let the firmware know that the network + * interface is now open */ + if (adapter->hw.mac_type == e1000_82573 && + e1000_check_mng_mode(&adapter->hw)) + e1000_get_hw_control(adapter); + + return E1000_SUCCESS; err_up: e1000_free_rx_resources(adapter); @@ -763,17 +988,51 @@ err_setup_tx: static int e1000_close(struct net_device *netdev) { - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); e1000_down(adapter); e1000_free_tx_resources(adapter); e1000_free_rx_resources(adapter); + if((adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { + e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); + } + + /* If AMT is enabled, let the firmware know that the network + * interface is now closed */ + if (adapter->hw.mac_type == e1000_82573 && + e1000_check_mng_mode(&adapter->hw)) + e1000_release_hw_control(adapter); + return 0; } /** + * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary + * @adapter: address of board private structure + * @start: address of beginning of memory + * @len: length of memory + **/ +static inline boolean_t +e1000_check_64k_bound(struct e1000_adapter *adapter, + void *start, unsigned long len) +{ + unsigned long begin = (unsigned long) start; + unsigned long end = begin + len; + + /* First rev 82545 and 82546 need to not allow any memory + * write location to cross 64k boundary due to errata 23 */ + if (adapter->hw.mac_type == e1000_82545 || + adapter->hw.mac_type == e1000_82546) { + return ((begin ^ (end - 1)) >> 16) != 0 ? FALSE : TRUE; + } + + return TRUE; +} + +/** * e1000_setup_tx_resources - allocate Tx resources (Descriptors) * @adapter: board private structure * @@ -788,8 +1047,10 @@ e1000_setup_tx_resources(struct e1000_ad int size; size = sizeof(struct e1000_buffer) * txdr->count; - txdr->buffer_info = kmalloc(size, GFP_KERNEL); + txdr->buffer_info = vmalloc(size); if(!txdr->buffer_info) { + DPRINTK(PROBE, ERR, + "Unable to allocate memory for the transmit descriptor ring\n"); return -ENOMEM; } memset(txdr->buffer_info, 0, size); @@ -801,9 +1062,42 @@ e1000_setup_tx_resources(struct e1000_ad txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); if(!txdr->desc) { - kfree(txdr->buffer_info); +setup_tx_desc_die: + vfree(txdr->buffer_info); + DPRINTK(PROBE, ERR, + "Unable to allocate memory for the transmit descriptor ring\n"); return -ENOMEM; } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { + void *olddesc = txdr->desc; + dma_addr_t olddma = txdr->dma; + DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes " + "at %p\n", txdr->size, txdr->desc); + /* Try again, without freeing the previous */ + txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); + if(!txdr->desc) { + /* Failed allocation, critical failure */ + pci_free_consistent(pdev, txdr->size, olddesc, olddma); + goto setup_tx_desc_die; + } + + if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { + /* give up */ + pci_free_consistent(pdev, txdr->size, txdr->desc, + txdr->dma); + pci_free_consistent(pdev, txdr->size, olddesc, olddma); + DPRINTK(PROBE, ERR, + "Unable to allocate aligned memory " + "for the transmit descriptor ring\n"); + vfree(txdr->buffer_info); + return -ENOMEM; + } else { + /* Free old allocation, new allocation was successful */ + pci_free_consistent(pdev, txdr->size, olddesc, olddma); + } + } memset(txdr->desc, 0, txdr->size); txdr->next_to_use = 0; @@ -824,7 +1118,7 @@ e1000_configure_tx(struct e1000_adapter { uint64_t tdba = adapter->tx_ring.dma; uint32_t tdlen = adapter->tx_ring.count * sizeof(struct e1000_tx_desc); - uint32_t tctl, tipg; + uint32_t tctl, tipg, tarc; E1000_WRITE_REG(&adapter->hw, TDBAL, (tdba & 0x00000000ffffffffULL)); E1000_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32)); @@ -872,16 +1166,30 @@ e1000_configure_tx(struct e1000_adapter E1000_WRITE_REG(&adapter->hw, TCTL, tctl); + if (adapter->hw.mac_type == e1000_82571 + || adapter->hw.mac_type == e1000_82572) { + tarc = E1000_READ_REG(&adapter->hw, TARC0); + tarc |= ((1 << 25) /*| (1 << 21) */); + E1000_WRITE_REG(&adapter->hw, TARC0, tarc); + tarc = E1000_READ_REG(&adapter->hw, TARC1); + tarc |= (1 << 25); + if (tctl & E1000_TCTL_MULR) + tarc &= ~(1 << 28); + else + tarc |= (1 << 28); + E1000_WRITE_REG(&adapter->hw, TARC1, tarc); + } + e1000_config_collision_dist(&adapter->hw); /* Setup Transmit Descriptor Settings for eop descriptor */ adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; - if(adapter->hw.report_tx_early == 1) - adapter->txd_cmd |= E1000_TXD_CMD_RS; - else + if(adapter->hw.mac_type < e1000_82543) adapter->txd_cmd |= E1000_TXD_CMD_RPS; + else + adapter->txd_cmd |= E1000_TXD_CMD_RS; /* Cache if we're 82544 running in PCI-X because we'll * need this to apply a workaround later in the send path. */ @@ -902,26 +1210,91 @@ e1000_setup_rx_resources(struct e1000_ad { struct e1000_desc_ring *rxdr = &adapter->rx_ring; struct pci_dev *pdev = adapter->pdev; - int size; + int size, desc_len; size = sizeof(struct e1000_buffer) * rxdr->count; - rxdr->buffer_info = kmalloc(size, GFP_KERNEL); + rxdr->buffer_info = vmalloc(size); if(!rxdr->buffer_info) { + DPRINTK(PROBE, ERR, + "Unable to allocate memory for the receive descriptor ring\n"); return -ENOMEM; } memset(rxdr->buffer_info, 0, size); + size = sizeof(struct e1000_ps_page) * rxdr->count; + rxdr->ps_page = kmalloc(size, GFP_KERNEL); + if(!rxdr->ps_page) { + vfree(rxdr->buffer_info); + DPRINTK(PROBE, ERR, + "Unable to allocate memory for the receive descriptor ring\n"); + return -ENOMEM; + } + memset(rxdr->ps_page, 0, size); + + size = sizeof(struct e1000_ps_page_dma) * rxdr->count; + rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL); + if(!rxdr->ps_page_dma) { + vfree(rxdr->buffer_info); + kfree(rxdr->ps_page); + DPRINTK(PROBE, ERR, + "Unable to allocate memory for the receive descriptor ring\n"); + return -ENOMEM; + } + memset(rxdr->ps_page_dma, 0, size); + + if(adapter->hw.mac_type <= e1000_82547_rev_2) + desc_len = sizeof(struct e1000_rx_desc); + else + desc_len = sizeof(union e1000_rx_desc_packet_split); + /* Round up to nearest 4K */ - rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); + rxdr->size = rxdr->count * desc_len; E1000_ROUNDUP(rxdr->size, 4096); rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); if(!rxdr->desc) { - kfree(rxdr->buffer_info); +setup_rx_desc_die: + vfree(rxdr->buffer_info); + kfree(rxdr->ps_page); + kfree(rxdr->ps_page_dma); + DPRINTK(PROBE, ERR, + "Unable to allocate memory for the receive descriptor ring\n"); return -ENOMEM; } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { + void *olddesc = rxdr->desc; + dma_addr_t olddma = rxdr->dma; + DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes " + "at %p\n", rxdr->size, rxdr->desc); + /* Try again, without freeing the previous */ + rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); + if(!rxdr->desc) { + /* Failed allocation, critical failure */ + pci_free_consistent(pdev, rxdr->size, olddesc, olddma); + goto setup_rx_desc_die; + } + + if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { + /* give up */ + pci_free_consistent(pdev, rxdr->size, rxdr->desc, + rxdr->dma); + pci_free_consistent(pdev, rxdr->size, olddesc, olddma); + DPRINTK(PROBE, ERR, + "Unable to allocate aligned memory " + "for the receive descriptor ring\n"); + vfree(rxdr->buffer_info); + kfree(rxdr->ps_page); + kfree(rxdr->ps_page_dma); + return -ENOMEM; + } else { + /* Free old allocation, new allocation was successful */ + pci_free_consistent(pdev, rxdr->size, olddesc, olddma); + } + } memset(rxdr->desc, 0, rxdr->size); rxdr->next_to_clean = 0; @@ -931,14 +1304,15 @@ e1000_setup_rx_resources(struct e1000_ad } /** - * e1000_setup_rctl - configure the receive control register + * e1000_setup_rctl - configure the receive control registers * @adapter: Board private structure **/ static void e1000_setup_rctl(struct e1000_adapter *adapter) { - uint32_t rctl; + uint32_t rctl, rfctl; + uint32_t psrctl = 0; rctl = E1000_READ_REG(&adapter->hw, RCTL); @@ -953,22 +1327,69 @@ e1000_setup_rctl(struct e1000_adapter *a else rctl &= ~E1000_RCTL_SBP; - rctl &= ~(E1000_RCTL_SZ_4096); - switch (adapter->rx_buffer_len) { - case E1000_RXBUFFER_2048: - default: - rctl |= E1000_RCTL_SZ_2048; - rctl &= ~(E1000_RCTL_BSEX | E1000_RCTL_LPE); - break; - case E1000_RXBUFFER_4096: - rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE; - break; - case E1000_RXBUFFER_8192: - rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE; - break; - case E1000_RXBUFFER_16384: - rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE; - break; + if (adapter->netdev->mtu <= ETH_DATA_LEN) + rctl &= ~E1000_RCTL_LPE; + else + rctl |= E1000_RCTL_LPE; + + /* Setup buffer sizes */ + if(adapter->hw.mac_type >= e1000_82571) { + /* We can now specify buffers in 1K increments. + * BSIZE and BSEX are ignored in this case. */ + rctl |= adapter->rx_buffer_len << 0x11; + } else { + rctl &= ~E1000_RCTL_SZ_4096; + rctl |= E1000_RCTL_BSEX; + switch (adapter->rx_buffer_len) { + case E1000_RXBUFFER_2048: + default: + rctl |= E1000_RCTL_SZ_2048; + rctl &= ~E1000_RCTL_BSEX; + break; + case E1000_RXBUFFER_4096: + rctl |= E1000_RCTL_SZ_4096; + break; + case E1000_RXBUFFER_8192: + rctl |= E1000_RCTL_SZ_8192; + break; + case E1000_RXBUFFER_16384: + rctl |= E1000_RCTL_SZ_16384; + break; + } + } + +#ifdef CONFIG_E1000_PACKET_SPLIT + /* 82571 and greater support packet-split where the protocol + * header is placed in skb->data and the packet data is + * placed in pages hanging off of skb_shinfo(skb)->nr_frags. + * In the case of a non-split, skb->data is linearly filled, + * followed by the page buffers. Therefore, skb->data is + * sized to hold the largest protocol header. + */ + adapter->rx_ps = (adapter->hw.mac_type > e1000_82547_rev_2) + && (adapter->netdev->mtu + < ((3 * PAGE_SIZE) + adapter->rx_ps_bsize0)); +#endif + if(adapter->rx_ps) { + /* Configure extra packet-split registers */ + rfctl = E1000_READ_REG(&adapter->hw, RFCTL); + rfctl |= E1000_RFCTL_EXTEN; + /* disable IPv6 packet split support */ + rfctl |= E1000_RFCTL_IPV6_DIS; + E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl); + + rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC; + + psrctl |= adapter->rx_ps_bsize0 >> + E1000_PSRCTL_BSIZE0_SHIFT; + psrctl |= PAGE_SIZE >> + E1000_PSRCTL_BSIZE1_SHIFT; + psrctl |= PAGE_SIZE << + E1000_PSRCTL_BSIZE2_SHIFT; + psrctl |= PAGE_SIZE << + E1000_PSRCTL_BSIZE3_SHIFT; + + E1000_WRITE_REG(&adapter->hw, PSRCTL, psrctl); } E1000_WRITE_REG(&adapter->hw, RCTL, rctl); @@ -985,17 +1406,24 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) { uint64_t rdba = adapter->rx_ring.dma; - uint32_t rdlen = adapter->rx_ring.count * sizeof(struct e1000_rx_desc); - uint32_t rctl; - uint32_t rxcsum; + uint32_t rdlen, rctl, rxcsum, ctrl_ext; - /* make sure receives are disabled while setting up the descriptors */ + if(adapter->rx_ps) { + rdlen = adapter->rx_ring.count * + sizeof(union e1000_rx_desc_packet_split); + adapter->clean_rx = e1000_clean_rx_irq_ps; + adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; + } else { + rdlen = adapter->rx_ring.count * sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_rx_buffers; + } + /* disable receives while setting up the descriptors */ rctl = E1000_READ_REG(&adapter->hw, RCTL); E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN); /* set the Receive Delay Timer Register */ - E1000_WRITE_REG(&adapter->hw, RDTR, adapter->rx_int_delay); if(adapter->hw.mac_type >= e1000_82540) { @@ -1005,8 +1433,15 @@ e1000_configure_rx(struct e1000_adapter 1000000000 / (adapter->itr * 256)); } + if (adapter->hw.mac_type >= e1000_82571) { + /* Reset delay timers after every interrupt */ + ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_CANC; + E1000_WRITE_REG(&adapter->hw, CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(&adapter->hw); + } + /* Setup the Base and Length of the Rx Descriptor Ring */ - E1000_WRITE_REG(&adapter->hw, RDBAL, (rdba & 0x00000000ffffffffULL)); E1000_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32)); @@ -1017,15 +1452,28 @@ e1000_configure_rx(struct e1000_adapter E1000_WRITE_REG(&adapter->hw, RDT, 0); /* Enable 82543 Receive Checksum Offload for TCP and UDP */ - if((adapter->hw.mac_type >= e1000_82543) && - (adapter->rx_csum == TRUE)) { + if(adapter->hw.mac_type >= e1000_82543) { rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM); - rxcsum |= E1000_RXCSUM_TUOFL; + if(adapter->rx_csum == TRUE) { + rxcsum |= E1000_RXCSUM_TUOFL; + + /* Enable 82571 IPv4 payload checksum for UDP fragments + * Must be used in conjunction with packet-split. */ + if((adapter->hw.mac_type > e1000_82547_rev_2) && + (adapter->rx_ps)) { + rxcsum |= E1000_RXCSUM_IPPCSE; + } + } else { + rxcsum &= ~E1000_RXCSUM_TUOFL; + /* don't need to clear IPPCSE as it defaults to 0 */ + } E1000_WRITE_REG(&adapter->hw, RXCSUM, rxcsum); } - /* Enable Receives */ + if (adapter->hw.mac_type == e1000_82573) + E1000_WRITE_REG(&adapter->hw, ERT, 0x0100); + /* Enable Receives */ E1000_WRITE_REG(&adapter->hw, RCTL, rctl); } @@ -1043,7 +1491,7 @@ e1000_free_tx_resources(struct e1000_ada e1000_clean_tx_ring(adapter); - kfree(adapter->tx_ring.buffer_info); + vfree(adapter->tx_ring.buffer_info); adapter->tx_ring.buffer_info = NULL; pci_free_consistent(pdev, adapter->tx_ring.size, @@ -1052,6 +1500,23 @@ e1000_free_tx_resources(struct e1000_ada adapter->tx_ring.desc = NULL; } +static inline void +e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, + struct e1000_buffer *buffer_info) +{ + if(buffer_info->dma) { + pci_unmap_page(adapter->pdev, + buffer_info->dma, + buffer_info->length, + PCI_DMA_TODEVICE); + buffer_info->dma = 0; + } + if(buffer_info->skb) { + dev_kfree_skb_any(buffer_info->skb); + buffer_info->skb = NULL; + } +} + /** * e1000_clean_tx_ring - Free Tx Buffers * @adapter: board private structure @@ -1062,25 +1527,19 @@ e1000_clean_tx_ring(struct e1000_adapter { struct e1000_desc_ring *tx_ring = &adapter->tx_ring; struct e1000_buffer *buffer_info; - struct pci_dev *pdev = adapter->pdev; unsigned long size; unsigned int i; /* Free all the Tx ring sk_buffs */ + if (likely(adapter->previous_buffer_info.skb != NULL)) { + e1000_unmap_and_free_tx_resource(adapter, + &adapter->previous_buffer_info); + } + for(i = 0; i < tx_ring->count; i++) { buffer_info = &tx_ring->buffer_info[i]; - if(buffer_info->skb) { - - pci_unmap_page(pdev, - buffer_info->dma, - buffer_info->length, - PCI_DMA_TODEVICE); - - dev_kfree_skb(buffer_info->skb); - - buffer_info->skb = NULL; - } + e1000_unmap_and_free_tx_resource(adapter, buffer_info); } size = sizeof(struct e1000_buffer) * tx_ring->count; @@ -1112,8 +1571,12 @@ e1000_free_rx_resources(struct e1000_ada e1000_clean_rx_ring(adapter); - kfree(rx_ring->buffer_info); + vfree(rx_ring->buffer_info); rx_ring->buffer_info = NULL; + kfree(rx_ring->ps_page); + rx_ring->ps_page = NULL; + kfree(rx_ring->ps_page_dma); + rx_ring->ps_page_dma = NULL; pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); @@ -1130,29 +1593,45 @@ e1000_clean_rx_ring(struct e1000_adapter { struct e1000_desc_ring *rx_ring = &adapter->rx_ring; struct e1000_buffer *buffer_info; + struct e1000_ps_page *ps_page; + struct e1000_ps_page_dma *ps_page_dma; struct pci_dev *pdev = adapter->pdev; unsigned long size; - unsigned int i; + unsigned int i, j; /* Free all the Rx ring sk_buffs */ for(i = 0; i < rx_ring->count; i++) { buffer_info = &rx_ring->buffer_info[i]; if(buffer_info->skb) { - + ps_page = &rx_ring->ps_page[i]; + ps_page_dma = &rx_ring->ps_page_dma[i]; pci_unmap_single(pdev, - buffer_info->dma, - buffer_info->length, - PCI_DMA_FROMDEVICE); + buffer_info->dma, + buffer_info->length, + PCI_DMA_FROMDEVICE); dev_kfree_skb(buffer_info->skb); - buffer_info->skb = NULL; + + for(j = 0; j < PS_PAGE_BUFFERS; j++) { + if(!ps_page->ps_page[j]) break; + pci_unmap_single(pdev, + ps_page_dma->ps_page_dma[j], + PAGE_SIZE, PCI_DMA_FROMDEVICE); + ps_page_dma->ps_page_dma[j] = 0; + put_page(ps_page->ps_page[j]); + ps_page->ps_page[j] = NULL; + } } } size = sizeof(struct e1000_buffer) * rx_ring->count; memset(rx_ring->buffer_info, 0, size); + size = sizeof(struct e1000_ps_page) * rx_ring->count; + memset(rx_ring->ps_page, 0, size); + size = sizeof(struct e1000_ps_page_dma) * rx_ring->count; + memset(rx_ring->ps_page_dma, 0, size); /* Zero out the descriptor ring */ @@ -1218,7 +1697,7 @@ e1000_leave_82542_rst(struct e1000_adapt static int e1000_set_mac(struct net_device *netdev, void *p) { - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); struct sockaddr *addr = p; if(!is_valid_ether_addr(addr->sa_data)) @@ -1234,6 +1713,22 @@ e1000_set_mac(struct net_device *netdev, e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0); + /* With 82571 controllers, LAA may be overwritten (with the default) + * due to controller reset from the other port. */ + if (adapter->hw.mac_type == e1000_82571) { + /* activate the work around */ + adapter->hw.laa_is_present = 1; + + /* Hold a copy of the LAA in RAR[14] This is done so that + * between the time RAR[0] gets clobbered and the time it + * gets fixed (in e1000_watchdog), the actual LAA is in one + * of the RARs and no incoming packets directed to this port + * are dropped. Eventaully the LAA will be in RAR[0] and + * RAR[14] */ + e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, + E1000_RAR_ENTRIES - 1); + } + if(adapter->hw.mac_type == e1000_82542_rev2_0) e1000_leave_82542_rst(adapter); @@ -1253,12 +1748,18 @@ e1000_set_mac(struct net_device *netdev, static void e1000_set_multi(struct net_device *netdev) { - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; struct dev_mc_list *mc_ptr; + unsigned long flags; uint32_t rctl; uint32_t hash_value; - int i; + int i, rar_entries = E1000_RAR_ENTRIES; + + spin_lock_irqsave(&adapter->tx_lock, flags); + /* reserve RAR[14] for LAA over-write work-around */ + if (adapter->hw.mac_type == e1000_82571) + rar_entries--; /* Check for Promiscuous and All Multicast modes */ @@ -1283,11 +1784,12 @@ e1000_set_multi(struct net_device *netde /* load the first 14 multicast address into the exact filters 1-14 * RAR 0 is used for the station MAC adddress * if there are not 14 addresses, go ahead and clear the filters + * -- with 82571 controllers only 0-13 entries are filled here */ mc_ptr = netdev->mc_list; - for(i = 1; i < E1000_RAR_ENTRIES; i++) { - if(mc_ptr) { + for(i = 1; i < rar_entries; i++) { + if (mc_ptr) { e1000_rar_set(hw, mc_ptr->dmi_addr, i); mc_ptr = mc_ptr->next; } else { @@ -1310,9 +1812,12 @@ e1000_set_multi(struct net_device *netde if(hw->mac_type == e1000_82542_rev2_0) e1000_leave_82542_rst(adapter); + + spin_unlock_irqrestore(&adapter->tx_lock, flags); } -/* need to wait a few seconds after link up to get diagnostic information from the phy */ +/* Need to wait a few seconds after link up to get diagnostic information from + * the phy */ static void e1000_update_phy_info(unsigned long data) @@ -1374,10 +1879,14 @@ e1000_watchdog(unsigned long data) struct e1000_adapter *adapter = (struct e1000_adapter *) data; struct net_device *netdev = adapter->netdev; struct e1000_desc_ring *txdr = &adapter->tx_ring; - unsigned int i; uint32_t link; e1000_check_for_link(&adapter->hw); + if (adapter->hw.mac_type == e1000_82573) { + e1000_enable_tx_pkt_filtering(&adapter->hw); + if(adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id) + e1000_update_mng_vlan(adapter); + } if((adapter->hw.media_type == e1000_media_type_internal_serdes) && !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE)) @@ -1420,7 +1929,7 @@ e1000_watchdog(unsigned long data) adapter->tpt_old = adapter->stats.tpt; adapter->hw.collision_delta = adapter->stats.colc - adapter->colc_old; adapter->colc_old = adapter->stats.colc; - + adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; adapter->gorcl_old = adapter->stats.gorcl; adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; @@ -1454,12 +1963,13 @@ e1000_watchdog(unsigned long data) /* Cause software interrupt to ensure rx ring is cleaned */ E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0); - /* Early detection of hung controller */ - i = txdr->next_to_clean; - if(txdr->buffer_info[i].dma && - time_after(jiffies, txdr->buffer_info[i].time_stamp + HZ) && - !(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF)) - netif_stop_queue(netdev); + /* Force detection of hung controller every watchdog period */ + adapter->detect_tx_hung = TRUE; + + /* With 82571 controllers, LAA may be overwritten due to controller + * reset from the other port. Set the appropriate LAA in RAR[0] */ + if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present) + e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0); /* Reset the timer */ mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); @@ -1468,35 +1978,62 @@ e1000_watchdog(unsigned long data) #define E1000_TX_FLAGS_CSUM 0x00000001 #define E1000_TX_FLAGS_VLAN 0x00000002 #define E1000_TX_FLAGS_TSO 0x00000004 +#define E1000_TX_FLAGS_IPV4 0x00000008 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 #define E1000_TX_FLAGS_VLAN_SHIFT 16 -static inline boolean_t +static inline int e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb) { #ifdef NETIF_F_TSO struct e1000_context_desc *context_desc; unsigned int i; + uint32_t cmd_length = 0; + uint16_t ipcse = 0, tucse, mss; uint8_t ipcss, ipcso, tucss, tucso, hdr_len; - uint16_t ipcse, tucse, mss; + int err; if(skb_shinfo(skb)->tso_size) { + if (skb_header_cloned(skb)) { + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (err) + return err; + } + hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); mss = skb_shinfo(skb)->tso_size; - skb->nh.iph->tot_len = 0; - skb->nh.iph->check = 0; - skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr, - skb->nh.iph->daddr, - 0, - IPPROTO_TCP, - 0); + if(skb->protocol == ntohs(ETH_P_IP)) { + skb->nh.iph->tot_len = 0; + skb->nh.iph->check = 0; + skb->h.th->check = + ~csum_tcpudp_magic(skb->nh.iph->saddr, + skb->nh.iph->daddr, + 0, + IPPROTO_TCP, + 0); + cmd_length = E1000_TXD_CMD_IP; + ipcse = skb->h.raw - skb->data - 1; +#ifdef NETIF_F_TSO_IPV6 + } else if(skb->protocol == ntohs(ETH_P_IPV6)) { + skb->nh.ipv6h->payload_len = 0; + skb->h.th->check = + ~csum_ipv6_magic(&skb->nh.ipv6h->saddr, + &skb->nh.ipv6h->daddr, + 0, + IPPROTO_TCP, + 0); + ipcse = 0; +#endif + } ipcss = skb->nh.raw - skb->data; ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data; - ipcse = skb->h.raw - skb->data - 1; tucss = skb->h.raw - skb->data; tucso = (void *)&(skb->h.th->check) - (void *)skb->data; tucse = 0; + cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | + E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); + i = adapter->tx_ring.next_to_use; context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i); @@ -1508,19 +2045,16 @@ e1000_tso(struct e1000_adapter *adapter, context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; - context_desc->cmd_and_length = cpu_to_le32( - E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | - E1000_TXD_CMD_IP | E1000_TXD_CMD_TCP | - (skb->len - (hdr_len))); + context_desc->cmd_and_length = cpu_to_le32(cmd_length); if(++i == adapter->tx_ring.count) i = 0; adapter->tx_ring.next_to_use = i; - return TRUE; + return 1; } #endif - return FALSE; + return 0; } static inline boolean_t @@ -1528,22 +2062,21 @@ e1000_tx_csum(struct e1000_adapter *adap { struct e1000_context_desc *context_desc; unsigned int i; - uint8_t css, cso; + uint8_t css; - if(skb->ip_summed == CHECKSUM_HW) { + if(likely(skb->ip_summed == CHECKSUM_HW)) { css = skb->h.raw - skb->data; - cso = (skb->h.raw + skb->csum) - skb->data; i = adapter->tx_ring.next_to_use; context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i); context_desc->upper_setup.tcp_fields.tucss = css; - context_desc->upper_setup.tcp_fields.tucso = cso; + context_desc->upper_setup.tcp_fields.tucso = css + skb->csum; context_desc->upper_setup.tcp_fields.tucse = 0; context_desc->tcp_seg_setup.data = 0; context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT); - if(++i == adapter->tx_ring.count) i = 0; + if(unlikely(++i == adapter->tx_ring.count)) i = 0; adapter->tx_ring.next_to_use = i; return TRUE; @@ -1567,7 +2100,6 @@ e1000_tx_map(struct e1000_adapter *adapt unsigned int f; len -= skb->data_len; - i = tx_ring->next_to_use; while(len) { @@ -1576,14 +2108,23 @@ e1000_tx_map(struct e1000_adapter *adapt #ifdef NETIF_F_TSO /* Workaround for premature desc write-backs * in TSO mode. Append 4-byte sentinel desc */ - if(mss && !nr_frags && size == len && size > 8) + if(unlikely(mss && !nr_frags && size == len && size > 8)) size -= 4; #endif + /* work-around for errata 10 and it applies + * to all controllers in PCI-X mode + * The fix is to make sure that the first descriptor of a + * packet is smaller than 2048 - 16 - 16 (or 2016) bytes + */ + if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) && + (size > 2015) && count == 0)) + size = 2015; + /* Workaround for potential 82544 hang in PCI-X. Avoid * terminating buffers within evenly-aligned dwords. */ - if(adapter->pcix_82544 && + if(unlikely(adapter->pcix_82544 && !((unsigned long)(skb->data + offset + size - 1) & 4) && - size > 4) + size > 4)) size -= 4; buffer_info->length = size; @@ -1597,7 +2138,7 @@ e1000_tx_map(struct e1000_adapter *adapt len -= size; offset += size; count++; - if(++i == tx_ring->count) i = 0; + if(unlikely(++i == tx_ring->count)) i = 0; } for(f = 0; f < nr_frags; f++) { @@ -1613,15 +2154,15 @@ e1000_tx_map(struct e1000_adapter *adapt #ifdef NETIF_F_TSO /* Workaround for premature desc write-backs * in TSO mode. Append 4-byte sentinel desc */ - if(mss && f == (nr_frags-1) && size == len && size > 8) + if(unlikely(mss && f == (nr_frags-1) && size == len && size > 8)) size -= 4; #endif /* Workaround for potential 82544 hang in PCI-X. * Avoid terminating buffers within evenly-aligned * dwords. */ - if(adapter->pcix_82544 && + if(unlikely(adapter->pcix_82544 && !((unsigned long)(frag->page+offset+size-1) & 4) && - size > 4) + size > 4)) size -= 4; buffer_info->length = size; @@ -1636,13 +2177,14 @@ e1000_tx_map(struct e1000_adapter *adapt len -= size; offset += size; count++; - if(++i == tx_ring->count) i = 0; + if(unlikely(++i == tx_ring->count)) i = 0; } } + i = (i == 0) ? tx_ring->count - 1 : i - 1; tx_ring->buffer_info[i].skb = skb; tx_ring->buffer_info[first].next_to_watch = i; - + return count; } @@ -1655,18 +2197,21 @@ e1000_tx_queue(struct e1000_adapter *ada uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; unsigned int i; - if(tx_flags & E1000_TX_FLAGS_TSO) { + if(likely(tx_flags & E1000_TX_FLAGS_TSO)) { txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | E1000_TXD_CMD_TSE; - txd_upper |= (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8; - } + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + + if(likely(tx_flags & E1000_TX_FLAGS_IPV4)) + txd_upper |= E1000_TXD_POPTS_IXSM << 8; + } - if(tx_flags & E1000_TX_FLAGS_CSUM) { + if(likely(tx_flags & E1000_TX_FLAGS_CSUM)) { txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; txd_upper |= E1000_TXD_POPTS_TXSM << 8; } - if(tx_flags & E1000_TX_FLAGS_VLAN) { + if(unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { txd_lower |= E1000_TXD_CMD_VLE; txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); } @@ -1680,7 +2225,7 @@ e1000_tx_queue(struct e1000_adapter *ada tx_desc->lower.data = cpu_to_le32(txd_lower | buffer_info->length); tx_desc->upper.data = cpu_to_le32(txd_upper); - if(++i == tx_ring->count) i = 0; + if(unlikely(++i == tx_ring->count)) i = 0; } tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); @@ -1733,29 +2278,84 @@ no_fifo_stall_required: return 0; } -#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) +#define MINIMUM_DHCP_PACKET_SIZE 282 +static inline int +e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb) +{ + struct e1000_hw *hw = &adapter->hw; + uint16_t length, offset; + if(vlan_tx_tag_present(skb)) { + if(!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && + ( adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) ) + return 0; + } + if ((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) { + struct ethhdr *eth = (struct ethhdr *) skb->data; + if ((htons(ETH_P_IP) == eth->h_proto)) { + const struct iphdr *ip = + (struct iphdr *)((uint8_t *)skb->data+14); + if(IPPROTO_UDP == ip->protocol) { + struct udphdr *udp = + (struct udphdr *)((uint8_t *)ip + + (ip->ihl << 2)); + if(ntohs(udp->dest) == 67) { + offset = (uint8_t *)udp + 8 - skb->data; + length = skb->len - offset; + + return e1000_mng_write_dhcp_info(hw, + (uint8_t *)udp + 8, + length); + } + } + } + } + return 0; +} + +#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; unsigned int tx_flags = 0; - unsigned long flags; unsigned int len = skb->len; - int count = 0; - unsigned int mss = 0; + unsigned long flags; unsigned int nr_frags = 0; + unsigned int mss = 0; + int count = 0; + int tso; unsigned int f; - nr_frags = skb_shinfo(skb)->nr_frags; len -= skb->data_len; - if(skb->len <= 0) { + + if(unlikely(skb->len <= 0)) { dev_kfree_skb_any(skb); - return 0; + return NETDEV_TX_OK; } #ifdef NETIF_F_TSO mss = skb_shinfo(skb)->tso_size; + /* TSO Workaround for 82571/2 Controllers -- if skb->data + * points to just header, pull a few bytes of payload from + * frags into skb->data */ + if (mss) { + uint8_t hdr_len; + hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); + if (skb->data_len && (hdr_len == (skb->len - skb->data_len)) && + (adapter->hw.mac_type == e1000_82571 || + adapter->hw.mac_type == e1000_82572 || + adapter->hw.mac_type ==e1000_82573)) { + unsigned int pull_size; + pull_size = min((unsigned int)4, skb->data_len); + if (!__pskb_pull_tail(skb, pull_size)) { + printk(KERN_ERR "__pskb_pull_tail failed.\n"); + dev_kfree_skb_any(skb); + return -EFAULT; + } + } + } /* The controller does a simple calculation to * make sure there is enough room in the FIFO before * initiating the DMA for each buffer. The calc is: @@ -1766,62 +2366,97 @@ e1000_xmit_frame(struct sk_buff *skb, st max_per_txd = min(mss << 2, max_per_txd); max_txd_pwr = fls(max_per_txd) - 1; } + if((mss) || (skb->ip_summed == CHECKSUM_HW)) count++; - count++; /*for sentinel desc*/ + count++; #else if(skb->ip_summed == CHECKSUM_HW) count++; #endif - count += TXD_USE_COUNT(len, max_txd_pwr); + if(adapter->pcix_82544) count++; + /* work-around for errata 10 and it applies to all controllers + * in PCI-X mode, so add one more descriptor to the count + */ + if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) && + (len > 2015))) + count++; + nr_frags = skb_shinfo(skb)->nr_frags; for(f = 0; f < nr_frags; f++) count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size, - max_txd_pwr); + max_txd_pwr); if(adapter->pcix_82544) count += nr_frags; - - spin_lock_irqsave(&adapter->tx_lock, flags); - /* need: count + 2 desc gap to keep tail from touching + + if (adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) ) + e1000_transfer_dhcp_info(adapter, skb); + + local_irq_save(flags); + if (!spin_trylock(&adapter->tx_lock)) { + /* Collision - tell upper layer to requeue */ + local_irq_restore(flags); + return NETDEV_TX_LOCKED; + } + + /* need: count + 2 desc gap to keep tail from touching * head, otherwise try next time */ - if(E1000_DESC_UNUSED(&adapter->tx_ring) < count + 2 ) { + if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < count + 2)) { netif_stop_queue(netdev); spin_unlock_irqrestore(&adapter->tx_lock, flags); - return 1; + return NETDEV_TX_BUSY; } - spin_unlock_irqrestore(&adapter->tx_lock, flags); - if(adapter->hw.mac_type == e1000_82547) { - if(e1000_82547_fifo_workaround(adapter, skb)) { + if(unlikely(adapter->hw.mac_type == e1000_82547)) { + if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) { netif_stop_queue(netdev); mod_timer(&adapter->tx_fifo_stall_timer, jiffies); - return 1; + spin_unlock_irqrestore(&adapter->tx_lock, flags); + return NETDEV_TX_BUSY; } } - if(adapter->vlgrp && vlan_tx_tag_present(skb)) { + if(unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) { tx_flags |= E1000_TX_FLAGS_VLAN; tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); } first = adapter->tx_ring.next_to_use; - if(e1000_tso(adapter, skb)) + tso = e1000_tso(adapter, skb); + if (tso < 0) { + dev_kfree_skb_any(skb); + spin_unlock_irqrestore(&adapter->tx_lock, flags); + return NETDEV_TX_OK; + } + + if (likely(tso)) tx_flags |= E1000_TX_FLAGS_TSO; - else if(e1000_tx_csum(adapter, skb)) + else if(likely(e1000_tx_csum(adapter, skb))) tx_flags |= E1000_TX_FLAGS_CSUM; - e1000_tx_queue(adapter, - e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss), + /* Old method was to assume IPv4 packet by default if TSO was enabled. + * 82571 hardware supports TSO capabilities for IPv6 as well... + * no longer assume, we must. */ + if(likely(skb->protocol == ntohs(ETH_P_IP))) + tx_flags |= E1000_TX_FLAGS_IPV4; + + e1000_tx_queue(adapter, + e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss), tx_flags); netdev->trans_start = jiffies; - return 0; + /* Make sure there is space in the ring for the next send. */ + if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < MAX_SKB_FRAGS + 2)) + netif_stop_queue(netdev); + + spin_unlock_irqrestore(&adapter->tx_lock, flags); + return NETDEV_TX_OK; } /** @@ -1832,7 +2467,7 @@ e1000_xmit_frame(struct sk_buff *skb, st static void e1000_tx_timeout(struct net_device *netdev) { - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); /* Do the reset outside of interrupt context */ schedule_work(&adapter->tx_timeout_task); @@ -1841,12 +2476,10 @@ e1000_tx_timeout(struct net_device *netd static void e1000_tx_timeout_task(struct net_device *netdev) { - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); - netif_device_detach(netdev); e1000_down(adapter); e1000_up(adapter); - netif_device_attach(netdev); } /** @@ -1860,9 +2493,9 @@ e1000_tx_timeout_task(struct net_device static struct net_device_stats * e1000_get_stats(struct net_device *netdev) { - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); - e1000_update_stats(adapter); + /* only return the current stats */ return &adapter->net_stats; } @@ -1877,40 +2510,63 @@ e1000_get_stats(struct net_device *netde static int e1000_change_mtu(struct net_device *netdev, int new_mtu) { - struct e1000_adapter *adapter = netdev->priv; - int old_mtu = adapter->rx_buffer_len; + struct e1000_adapter *adapter = netdev_priv(netdev); int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || - (max_frame > MAX_JUMBO_FRAME_SIZE)) { - DPRINTK(PROBE, ERR, "Invalid MTU setting\n"); - return -EINVAL; + (max_frame > MAX_JUMBO_FRAME_SIZE)) { + DPRINTK(PROBE, ERR, "Invalid MTU setting\n"); + return -EINVAL; } - if(max_frame <= MAXIMUM_ETHERNET_FRAME_SIZE) { - adapter->rx_buffer_len = E1000_RXBUFFER_2048; - - } else if(adapter->hw.mac_type < e1000_82543) { - DPRINTK(PROBE, ERR, "Jumbo Frames not supported on 82542\n"); +#define MAX_STD_JUMBO_FRAME_SIZE 9234 + /* might want this to be bigger enum check... */ + /* 82571 controllers limit jumbo frame size to 10500 bytes */ + if ((adapter->hw.mac_type == e1000_82571 || + adapter->hw.mac_type == e1000_82572) && + max_frame > MAX_STD_JUMBO_FRAME_SIZE) { + DPRINTK(PROBE, ERR, "MTU > 9216 bytes not supported " + "on 82571 and 82572 controllers.\n"); return -EINVAL; + } - } else if(max_frame <= E1000_RXBUFFER_4096) { - adapter->rx_buffer_len = E1000_RXBUFFER_4096; - - } else if(max_frame <= E1000_RXBUFFER_8192) { - adapter->rx_buffer_len = E1000_RXBUFFER_8192; + if(adapter->hw.mac_type == e1000_82573 && + max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { + DPRINTK(PROBE, ERR, "Jumbo Frames not supported " + "on 82573\n"); + return -EINVAL; + } + if(adapter->hw.mac_type > e1000_82547_rev_2) { + adapter->rx_buffer_len = max_frame; + E1000_ROUNDUP(adapter->rx_buffer_len, 1024); } else { - adapter->rx_buffer_len = E1000_RXBUFFER_16384; + if(unlikely((adapter->hw.mac_type < e1000_82543) && + (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE))) { + DPRINTK(PROBE, ERR, "Jumbo Frames not supported " + "on 82542\n"); + return -EINVAL; + + } else { + if(max_frame <= E1000_RXBUFFER_2048) { + adapter->rx_buffer_len = E1000_RXBUFFER_2048; + } else if(max_frame <= E1000_RXBUFFER_4096) { + adapter->rx_buffer_len = E1000_RXBUFFER_4096; + } else if(max_frame <= E1000_RXBUFFER_8192) { + adapter->rx_buffer_len = E1000_RXBUFFER_8192; + } else if(max_frame <= E1000_RXBUFFER_16384) { + adapter->rx_buffer_len = E1000_RXBUFFER_16384; + } + } } - if(old_mtu != adapter->rx_buffer_len && netif_running(netdev)) { + netdev->mtu = new_mtu; + if(netif_running(netdev)) { e1000_down(adapter); e1000_up(adapter); } - netdev->mtu = new_mtu; adapter->hw.max_frame_size = max_frame; return 0; @@ -1951,8 +2607,6 @@ e1000_update_stats(struct e1000_adapter adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023); adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522); - /* the rest of the counters are only modified here */ - adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS); adapter->stats.mpc += E1000_READ_REG(hw, MPC); adapter->stats.scc += E1000_READ_REG(hw, SCC); @@ -2003,6 +2657,17 @@ e1000_update_stats(struct e1000_adapter adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC); adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC); } + if(hw->mac_type > e1000_82547_rev_2) { + adapter->stats.iac += E1000_READ_REG(hw, IAC); + adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC); + adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC); + adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC); + adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC); + adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC); + adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC); + adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC); + adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC); + } /* Fill out the OS statistics structure */ @@ -2017,9 +2682,8 @@ e1000_update_stats(struct e1000_adapter adapter->net_stats.rx_errors = adapter->stats.rxerrc + adapter->stats.crcerrs + adapter->stats.algnerrc + - adapter->stats.rlec + adapter->stats.rnbc + - adapter->stats.mpc + adapter->stats.cexterr; - adapter->net_stats.rx_dropped = adapter->stats.rnbc; + adapter->stats.rlec + adapter->stats.mpc + + adapter->stats.cexterr; adapter->net_stats.rx_length_errors = adapter->stats.rlec; adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; @@ -2055,34 +2719,6 @@ e1000_update_stats(struct e1000_adapter } /** - * e1000_irq_disable - Mask off interrupt generation on the NIC - * @adapter: board private structure - **/ - -static inline void -e1000_irq_disable(struct e1000_adapter *adapter) -{ - atomic_inc(&adapter->irq_sem); - E1000_WRITE_REG(&adapter->hw, IMC, ~0); - E1000_WRITE_FLUSH(&adapter->hw); - synchronize_irq(adapter->pdev->irq); -} - -/** - * e1000_irq_enable - Enable default interrupt generation settings - * @adapter: board private structure - **/ - -static inline void -e1000_irq_enable(struct e1000_adapter *adapter) -{ - if(atomic_dec_and_test(&adapter->irq_sem)) { - E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK); - E1000_WRITE_FLUSH(&adapter->hw); - } -} - -/** * e1000_intr - Interrupt Handler * @irq: interrupt number * @data: pointer to a network interface device structure @@ -2093,23 +2729,23 @@ static irqreturn_t e1000_intr(int irq, void *data, struct pt_regs *regs) { struct net_device *netdev = data; - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - uint32_t icr = E1000_READ_REG(&adapter->hw, ICR); + uint32_t icr = E1000_READ_REG(hw, ICR); #ifndef CONFIG_E1000_NAPI unsigned int i; #endif - if(!icr) + if(unlikely(!icr)) return IRQ_NONE; /* Not our interrupt */ - if(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { + if(unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { hw->get_link_status = 1; mod_timer(&adapter->watchdog_timer, jiffies); } #ifdef CONFIG_E1000_NAPI - if(netif_rx_schedule_prep(netdev)) { + if(likely(netif_rx_schedule_prep(netdev))) { /* Disable interrupts and register for poll. The flush of the posted write is intentionally left out. @@ -2120,10 +2756,28 @@ e1000_intr(int irq, void *data, struct p __netif_rx_schedule(netdev); } #else + /* Writing IMC and IMS is needed for 82547. + Due to Hub Link bus being occupied, an interrupt + de-assertion message is not able to be sent. + When an interrupt assertion message is generated later, + two messages are re-ordered and sent out. + That causes APIC to think 82547 is in de-assertion + state, while 82547 is in assertion state, resulting + in dead lock. Writing IMC forces 82547 into + de-assertion state. + */ + if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2){ + atomic_inc(&adapter->irq_sem); + E1000_WRITE_REG(hw, IMC, ~0); + } + for(i = 0; i < E1000_MAX_INTR; i++) - if(!e1000_clean_rx_irq(adapter) & - !e1000_clean_tx_irq(adapter)) + if(unlikely(!adapter->clean_rx(adapter) & + !e1000_clean_tx_irq(adapter))) break; + + if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) + e1000_irq_enable(adapter); #endif return IRQ_HANDLED; @@ -2138,26 +2792,28 @@ e1000_intr(int irq, void *data, struct p static int e1000_clean(struct net_device *netdev, int *budget) { - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); int work_to_do = min(*budget, netdev->quota); + int tx_cleaned; int work_done = 0; - - e1000_clean_tx_irq(adapter); - e1000_clean_rx_irq(adapter, &work_done, work_to_do); + + tx_cleaned = e1000_clean_tx_irq(adapter); + adapter->clean_rx(adapter, &work_done, work_to_do); *budget -= work_done; netdev->quota -= work_done; - if(work_done < work_to_do || !netif_running(netdev)) { + if ((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) { + /* If no Tx and not enough Rx work done, exit the polling mode */ netif_rx_complete(netdev); e1000_irq_enable(adapter); return 0; } - return (work_done >= work_to_do); + return 1; } -#endif +#endif /** * e1000_clean_tx_irq - Reclaim resources after transmit completes * @adapter: board private structure @@ -2168,46 +2824,53 @@ e1000_clean_tx_irq(struct e1000_adapter { struct e1000_desc_ring *tx_ring = &adapter->tx_ring; struct net_device *netdev = adapter->netdev; - struct pci_dev *pdev = adapter->pdev; struct e1000_tx_desc *tx_desc, *eop_desc; struct e1000_buffer *buffer_info; unsigned int i, eop; boolean_t cleaned = FALSE; - i = tx_ring->next_to_clean; eop = tx_ring->buffer_info[i].next_to_watch; eop_desc = E1000_TX_DESC(*tx_ring, eop); while(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) { + /* Premature writeback of Tx descriptors clear (free buffers + * and unmap pci_mapping) previous_buffer_info */ + if (likely(adapter->previous_buffer_info.skb != NULL)) { + e1000_unmap_and_free_tx_resource(adapter, + &adapter->previous_buffer_info); + } for(cleaned = FALSE; !cleaned; ) { tx_desc = E1000_TX_DESC(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; + cleaned = (i == eop); - if(buffer_info->dma) { - - pci_unmap_page(pdev, - buffer_info->dma, - buffer_info->length, - PCI_DMA_TODEVICE); - - buffer_info->dma = 0; - } - - if(buffer_info->skb) { - - dev_kfree_skb_any(buffer_info->skb); - - buffer_info->skb = NULL; +#ifdef NETIF_F_TSO + if (!(netdev->features & NETIF_F_TSO)) { +#endif + e1000_unmap_and_free_tx_resource(adapter, + buffer_info); +#ifdef NETIF_F_TSO + } else { + if (cleaned) { + memcpy(&adapter->previous_buffer_info, + buffer_info, + sizeof(struct e1000_buffer)); + memset(buffer_info, 0, + sizeof(struct e1000_buffer)); + } else { + e1000_unmap_and_free_tx_resource( + adapter, buffer_info); + } } +#endif tx_desc->buffer_addr = 0; tx_desc->lower.data = 0; tx_desc->upper.data = 0; - cleaned = (i == eop); - if(++i == tx_ring->count) i = 0; + if(unlikely(++i == tx_ring->count)) i = 0; } eop = tx_ring->buffer_info[i].next_to_watch; @@ -2218,16 +2881,112 @@ e1000_clean_tx_irq(struct e1000_adapter spin_lock(&adapter->tx_lock); - if(cleaned && netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) + if(unlikely(cleaned && netif_queue_stopped(netdev) && + netif_carrier_ok(netdev))) netif_wake_queue(netdev); spin_unlock(&adapter->tx_lock); + if(adapter->detect_tx_hung) { + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i */ + adapter->detect_tx_hung = FALSE; + if (tx_ring->buffer_info[i].dma && + time_after(jiffies, tx_ring->buffer_info[i].time_stamp + HZ) + && !(E1000_READ_REG(&adapter->hw, STATUS) & + E1000_STATUS_TXOFF)) { + + /* detected Tx unit hang */ + i = tx_ring->next_to_clean; + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]\n" + " dma <%llx>\n" + " time_stamp <%lx>\n" + " next_to_watch <%x>\n" + " jiffies <%lx>\n" + " next_to_watch.status <%x>\n", + E1000_READ_REG(&adapter->hw, TDH), + E1000_READ_REG(&adapter->hw, TDT), + tx_ring->next_to_use, + i, + (unsigned long long)tx_ring->buffer_info[i].dma, + tx_ring->buffer_info[i].time_stamp, + eop, + jiffies, + eop_desc->upper.fields.status); + netif_stop_queue(netdev); + } + } +#ifdef NETIF_F_TSO + + if( unlikely(!(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && + time_after(jiffies, adapter->previous_buffer_info.time_stamp + HZ))) + e1000_unmap_and_free_tx_resource( + adapter, &adapter->previous_buffer_info); + +#endif return cleaned; } /** - * e1000_clean_rx_irq - Send received data up the network stack, + * e1000_rx_checksum - Receive Checksum Offload for 82543 + * @adapter: board private structure + * @status_err: receive descriptor status and error fields + * @csum: receive descriptor csum field + * @sk_buff: socket buffer with received data + **/ + +static inline void +e1000_rx_checksum(struct e1000_adapter *adapter, + uint32_t status_err, uint32_t csum, + struct sk_buff *skb) +{ + uint16_t status = (uint16_t)status_err; + uint8_t errors = (uint8_t)(status_err >> 24); + skb->ip_summed = CHECKSUM_NONE; + + /* 82543 or newer only */ + if(unlikely(adapter->hw.mac_type < e1000_82543)) return; + /* Ignore Checksum bit is set */ + if(unlikely(status & E1000_RXD_STAT_IXSM)) return; + /* TCP/UDP checksum error bit is set */ + if(unlikely(errors & E1000_RXD_ERR_TCPE)) { + /* let the stack verify checksum errors */ + adapter->hw_csum_err++; + return; + } + /* TCP/UDP Checksum has not been calculated */ + if(adapter->hw.mac_type <= e1000_82547_rev_2) { + if(!(status & E1000_RXD_STAT_TCPCS)) + return; + } else { + if(!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) + return; + } + /* It must be a TCP or UDP packet with a valid checksum */ + if (likely(status & E1000_RXD_STAT_TCPCS)) { + /* TCP checksum is good */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else if (adapter->hw.mac_type > e1000_82547_rev_2) { + /* IP fragment with UDP payload */ + /* Hardware complements the payload checksum, so we undo it + * and then put the value in host order for further stack use. + */ + csum = ntohl(csum ^ 0xFFFF); + skb->csum = csum; + skb->ip_summed = CHECKSUM_HW; + } + adapter->hw_csum_good++; +} + +/** + * e1000_clean_rx_irq - Send received data up the network stack; legacy * @adapter: board private structure **/ @@ -2256,14 +3015,11 @@ e1000_clean_rx_irq(struct e1000_adapter while(rx_desc->status & E1000_RXD_STAT_DD) { buffer_info = &rx_ring->buffer_info[i]; - #ifdef CONFIG_E1000_NAPI if(*work_done >= work_to_do) break; - (*work_done)++; #endif - cleaned = TRUE; pci_unmap_single(pdev, @@ -2274,49 +3030,28 @@ e1000_clean_rx_irq(struct e1000_adapter skb = buffer_info->skb; length = le16_to_cpu(rx_desc->length); - if(!(rx_desc->status & E1000_RXD_STAT_EOP)) { - + if(unlikely(!(rx_desc->status & E1000_RXD_STAT_EOP))) { /* All receives must fit into a single buffer */ - - E1000_DBG("%s: Receive packet consumed multiple buffers\n", - netdev->name); - + E1000_DBG("%s: Receive packet consumed multiple" + " buffers\n", netdev->name); dev_kfree_skb_irq(skb); - rx_desc->status = 0; - buffer_info->skb = NULL; - - if(++i == rx_ring->count) i = 0; - - rx_desc = E1000_RX_DESC(*rx_ring, i); - continue; + goto next_desc; } - if(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) { - + if(unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { last_byte = *(skb->data + length - 1); - if(TBI_ACCEPT(&adapter->hw, rx_desc->status, rx_desc->errors, length, last_byte)) { - spin_lock_irqsave(&adapter->stats_lock, flags); - e1000_tbi_adjust_stats(&adapter->hw, &adapter->stats, length, skb->data); - spin_unlock_irqrestore(&adapter->stats_lock, flags); length--; } else { - dev_kfree_skb_irq(skb); - rx_desc->status = 0; - buffer_info->skb = NULL; - - if(++i == rx_ring->count) i = 0; - - rx_desc = E1000_RX_DESC(*rx_ring, i); - continue; + goto next_desc; } } @@ -2324,45 +3059,175 @@ e1000_clean_rx_irq(struct e1000_adapter skb_put(skb, length - ETHERNET_FCS_SIZE); /* Receive Checksum Offload */ - e1000_rx_checksum(adapter, rx_desc, skb); - + e1000_rx_checksum(adapter, + (uint32_t)(rx_desc->status) | + ((uint32_t)(rx_desc->errors) << 24), + rx_desc->csum, skb); skb->protocol = eth_type_trans(skb, netdev); #ifdef CONFIG_E1000_NAPI - if(adapter->vlgrp && (rx_desc->status & E1000_RXD_STAT_VP)) { + if(unlikely(adapter->vlgrp && + (rx_desc->status & E1000_RXD_STAT_VP))) { vlan_hwaccel_receive_skb(skb, adapter->vlgrp, - le16_to_cpu(rx_desc->special & - E1000_RXD_SPC_VLAN_MASK)); + le16_to_cpu(rx_desc->special) & + E1000_RXD_SPC_VLAN_MASK); } else { netif_receive_skb(skb); } #else /* CONFIG_E1000_NAPI */ - if(adapter->vlgrp && (rx_desc->status & E1000_RXD_STAT_VP)) { + if(unlikely(adapter->vlgrp && + (rx_desc->status & E1000_RXD_STAT_VP))) { vlan_hwaccel_rx(skb, adapter->vlgrp, - le16_to_cpu(rx_desc->special & - E1000_RXD_SPC_VLAN_MASK)); + le16_to_cpu(rx_desc->special) & + E1000_RXD_SPC_VLAN_MASK); } else { netif_rx(skb); } #endif /* CONFIG_E1000_NAPI */ netdev->last_rx = jiffies; +next_desc: rx_desc->status = 0; buffer_info->skb = NULL; - - if(++i == rx_ring->count) i = 0; + if(unlikely(++i == rx_ring->count)) i = 0; rx_desc = E1000_RX_DESC(*rx_ring, i); } - rx_ring->next_to_clean = i; + adapter->alloc_rx_buf(adapter); + + return cleaned; +} + +/** + * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split + * @adapter: board private structure + **/ + +static boolean_t +#ifdef CONFIG_E1000_NAPI +e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, int *work_done, + int work_to_do) +#else +e1000_clean_rx_irq_ps(struct e1000_adapter *adapter) +#endif +{ + struct e1000_desc_ring *rx_ring = &adapter->rx_ring; + union e1000_rx_desc_packet_split *rx_desc; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_buffer *buffer_info; + struct e1000_ps_page *ps_page; + struct e1000_ps_page_dma *ps_page_dma; + struct sk_buff *skb; + unsigned int i, j; + uint32_t length, staterr; + boolean_t cleaned = FALSE; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC_PS(*rx_ring, i); + staterr = le32_to_cpu(rx_desc->wb.middle.status_error); + + while(staterr & E1000_RXD_STAT_DD) { + buffer_info = &rx_ring->buffer_info[i]; + ps_page = &rx_ring->ps_page[i]; + ps_page_dma = &rx_ring->ps_page_dma[i]; +#ifdef CONFIG_E1000_NAPI + if(unlikely(*work_done >= work_to_do)) + break; + (*work_done)++; +#endif + cleaned = TRUE; + pci_unmap_single(pdev, buffer_info->dma, + buffer_info->length, + PCI_DMA_FROMDEVICE); + + skb = buffer_info->skb; + + if(unlikely(!(staterr & E1000_RXD_STAT_EOP))) { + E1000_DBG("%s: Packet Split buffers didn't pick up" + " the full packet\n", netdev->name); + dev_kfree_skb_irq(skb); + goto next_desc; + } - e1000_alloc_rx_buffers(adapter); + if(unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { + dev_kfree_skb_irq(skb); + goto next_desc; + } + + length = le16_to_cpu(rx_desc->wb.middle.length0); + + if(unlikely(!length)) { + E1000_DBG("%s: Last part of the packet spanning" + " multiple descriptors\n", netdev->name); + dev_kfree_skb_irq(skb); + goto next_desc; + } + + /* Good Receive */ + skb_put(skb, length); + + for(j = 0; j < PS_PAGE_BUFFERS; j++) { + if(!(length = le16_to_cpu(rx_desc->wb.upper.length[j]))) + break; + + pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j], + PAGE_SIZE, PCI_DMA_FROMDEVICE); + ps_page_dma->ps_page_dma[j] = 0; + skb_shinfo(skb)->frags[j].page = + ps_page->ps_page[j]; + ps_page->ps_page[j] = NULL; + skb_shinfo(skb)->frags[j].page_offset = 0; + skb_shinfo(skb)->frags[j].size = length; + skb_shinfo(skb)->nr_frags++; + skb->len += length; + skb->data_len += length; + } + + e1000_rx_checksum(adapter, staterr, + rx_desc->wb.lower.hi_dword.csum_ip.csum, skb); + skb->protocol = eth_type_trans(skb, netdev); + +#ifdef HAVE_RX_ZERO_COPY + if(likely(rx_desc->wb.upper.header_status & + E1000_RXDPS_HDRSTAT_HDRSP)) + skb_shinfo(skb)->zero_copy = TRUE; +#endif +#ifdef CONFIG_E1000_NAPI + if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { + vlan_hwaccel_receive_skb(skb, adapter->vlgrp, + le16_to_cpu(rx_desc->wb.middle.vlan) & + E1000_RXD_SPC_VLAN_MASK); + } else { + netif_receive_skb(skb); + } +#else /* CONFIG_E1000_NAPI */ + if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { + vlan_hwaccel_rx(skb, adapter->vlgrp, + le16_to_cpu(rx_desc->wb.middle.vlan) & + E1000_RXD_SPC_VLAN_MASK); + } else { + netif_rx(skb); + } +#endif /* CONFIG_E1000_NAPI */ + netdev->last_rx = jiffies; + +next_desc: + rx_desc->wb.middle.status_error &= ~0xFF; + buffer_info->skb = NULL; + if(unlikely(++i == rx_ring->count)) i = 0; + + rx_desc = E1000_RX_DESC_PS(*rx_ring, i); + staterr = le32_to_cpu(rx_desc->wb.middle.status_error); + } + rx_ring->next_to_clean = i; + adapter->alloc_rx_buf(adapter); return cleaned; } /** - * e1000_alloc_rx_buffers - Replace used receive buffers + * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended * @adapter: address of board private structure **/ @@ -2376,20 +3241,42 @@ e1000_alloc_rx_buffers(struct e1000_adap struct e1000_buffer *buffer_info; struct sk_buff *skb; unsigned int i; + unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN; i = rx_ring->next_to_use; buffer_info = &rx_ring->buffer_info[i]; while(!buffer_info->skb) { - rx_desc = E1000_RX_DESC(*rx_ring, i); - - skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN); + skb = dev_alloc_skb(bufsz); - if(!skb) { + if(unlikely(!skb)) { /* Better luck next round */ break; } + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { + struct sk_buff *oldskb = skb; + DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes " + "at %p\n", bufsz, skb->data); + /* Try again, without freeing the previous */ + skb = dev_alloc_skb(bufsz); + /* Failed allocation, critical failure */ + if (!skb) { + dev_kfree_skb(oldskb); + break; + } + + if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { + /* give up */ + dev_kfree_skb(skb); + dev_kfree_skb(oldskb); + break; /* while !buffer_info->skb */ + } else { + /* Use new allocation */ + dev_kfree_skb(oldskb); + } + } /* Make buffer alignment 2 beyond a 16 byte boundary * this will result in a 16 byte aligned IP header after * the 14 byte MAC header is removed @@ -2400,28 +3287,133 @@ e1000_alloc_rx_buffers(struct e1000_adap buffer_info->skb = skb; buffer_info->length = adapter->rx_buffer_len; - buffer_info->dma = - pci_map_single(pdev, - skb->data, - adapter->rx_buffer_len, - PCI_DMA_FROMDEVICE); + buffer_info->dma = pci_map_single(pdev, + skb->data, + adapter->rx_buffer_len, + PCI_DMA_FROMDEVICE); + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, + (void *)(unsigned long)buffer_info->dma, + adapter->rx_buffer_len)) { + DPRINTK(RX_ERR, ERR, + "dma align check failed: %u bytes at %p\n", + adapter->rx_buffer_len, + (void *)(unsigned long)buffer_info->dma); + dev_kfree_skb(skb); + buffer_info->skb = NULL; + + pci_unmap_single(pdev, buffer_info->dma, + adapter->rx_buffer_len, + PCI_DMA_FROMDEVICE); + break; /* while !buffer_info->skb */ + } + rx_desc = E1000_RX_DESC(*rx_ring, i); rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); - if((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i) { + if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) { /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). */ wmb(); - E1000_WRITE_REG(&adapter->hw, RDT, i); } - if(++i == rx_ring->count) i = 0; + if(unlikely(++i == rx_ring->count)) i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + rx_ring->next_to_use = i; +} + +/** + * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split + * @adapter: address of board private structure + **/ + +static void +e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter) +{ + struct e1000_desc_ring *rx_ring = &adapter->rx_ring; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + union e1000_rx_desc_packet_split *rx_desc; + struct e1000_buffer *buffer_info; + struct e1000_ps_page *ps_page; + struct e1000_ps_page_dma *ps_page_dma; + struct sk_buff *skb; + unsigned int i, j; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + ps_page = &rx_ring->ps_page[i]; + ps_page_dma = &rx_ring->ps_page_dma[i]; + + while(!buffer_info->skb) { + rx_desc = E1000_RX_DESC_PS(*rx_ring, i); + + for(j = 0; j < PS_PAGE_BUFFERS; j++) { + if(unlikely(!ps_page->ps_page[j])) { + ps_page->ps_page[j] = + alloc_page(GFP_ATOMIC); + if(unlikely(!ps_page->ps_page[j])) + goto no_buffers; + ps_page_dma->ps_page_dma[j] = + pci_map_page(pdev, + ps_page->ps_page[j], + 0, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + } + /* Refresh the desc even if buffer_addrs didn't + * change because each write-back erases this info. + */ + rx_desc->read.buffer_addr[j+1] = + cpu_to_le64(ps_page_dma->ps_page_dma[j]); + } + + skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN); + + if(unlikely(!skb)) + break; + + /* Make buffer alignment 2 beyond a 16 byte boundary + * this will result in a 16 byte aligned IP header after + * the 14 byte MAC header is removed + */ + skb_reserve(skb, NET_IP_ALIGN); + + skb->dev = netdev; + + buffer_info->skb = skb; + buffer_info->length = adapter->rx_ps_bsize0; + buffer_info->dma = pci_map_single(pdev, skb->data, + adapter->rx_ps_bsize0, + PCI_DMA_FROMDEVICE); + + rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); + + if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) { + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). */ + wmb(); + /* Hardware increments by 16 bytes, but packet split + * descriptors are 32 bytes...so we increment tail + * twice as much. + */ + E1000_WRITE_REG(&adapter->hw, RDT, i<<1); + } + + if(unlikely(++i == rx_ring->count)) i = 0; buffer_info = &rx_ring->buffer_info[i]; + ps_page = &rx_ring->ps_page[i]; + ps_page_dma = &rx_ring->ps_page_dma[i]; } +no_buffers: rx_ring->next_to_use = i; } @@ -2510,11 +3502,12 @@ e1000_ioctl(struct net_device *netdev, s static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); struct mii_ioctl_data *data = if_mii(ifr); int retval; uint16_t mii_reg; uint16_t spddplx; + unsigned long flags; if(adapter->hw.media_type != e1000_media_type_copper) return -EOPNOTSUPP; @@ -2524,41 +3517,54 @@ e1000_mii_ioctl(struct net_device *netde data->phy_id = adapter->hw.phy_addr; break; case SIOCGMIIREG: - if (!capable(CAP_NET_ADMIN)) + if(!capable(CAP_NET_ADMIN)) return -EPERM; - if (e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, - &data->val_out)) + spin_lock_irqsave(&adapter->stats_lock, flags); + if(e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, + &data->val_out)) { + spin_unlock_irqrestore(&adapter->stats_lock, flags); return -EIO; + } + spin_unlock_irqrestore(&adapter->stats_lock, flags); break; case SIOCSMIIREG: - if (!capable(CAP_NET_ADMIN)) + if(!capable(CAP_NET_ADMIN)) return -EPERM; - if (data->reg_num & ~(0x1F)) + if(data->reg_num & ~(0x1F)) return -EFAULT; mii_reg = data->val_in; - if (e1000_write_phy_reg(&adapter->hw, data->reg_num, - data->val_in)) + spin_lock_irqsave(&adapter->stats_lock, flags); + if(e1000_write_phy_reg(&adapter->hw, data->reg_num, + mii_reg)) { + spin_unlock_irqrestore(&adapter->stats_lock, flags); return -EIO; - if (adapter->hw.phy_type == e1000_phy_m88) { + } + if(adapter->hw.phy_type == e1000_phy_m88) { switch (data->reg_num) { case PHY_CTRL: - if(data->val_in & MII_CR_AUTO_NEG_EN) { + if(mii_reg & MII_CR_POWER_DOWN) + break; + if(mii_reg & MII_CR_AUTO_NEG_EN) { adapter->hw.autoneg = 1; adapter->hw.autoneg_advertised = 0x2F; } else { - if (data->val_in & 0x40) + if (mii_reg & 0x40) spddplx = SPEED_1000; - else if (data->val_in & 0x2000) + else if (mii_reg & 0x2000) spddplx = SPEED_100; else spddplx = SPEED_10; - spddplx += (data->val_in & 0x100) + spddplx += (mii_reg & 0x100) ? FULL_DUPLEX : HALF_DUPLEX; retval = e1000_set_spd_dplx(adapter, spddplx); - if(retval) + if(retval) { + spin_unlock_irqrestore( + &adapter->stats_lock, + flags); return retval; + } } if(netif_running(adapter->netdev)) { e1000_down(adapter); @@ -2568,11 +3574,27 @@ e1000_mii_ioctl(struct net_device *netde break; case M88E1000_PHY_SPEC_CTRL: case M88E1000_EXT_PHY_SPEC_CTRL: - if (e1000_phy_reset(&adapter->hw)) + if(e1000_phy_reset(&adapter->hw)) { + spin_unlock_irqrestore( + &adapter->stats_lock, flags); return -EIO; + } + break; + } + } else { + switch (data->reg_num) { + case PHY_CTRL: + if(mii_reg & MII_CR_POWER_DOWN) + break; + if(netif_running(adapter->netdev)) { + e1000_down(adapter); + e1000_up(adapter); + } else + e1000_reset(adapter); break; } } + spin_unlock_irqrestore(&adapter->stats_lock, flags); break; default: return -EOPNOTSUPP; @@ -2580,47 +3602,14 @@ e1000_mii_ioctl(struct net_device *netde return E1000_SUCCESS; } -/** - * e1000_rx_checksum - Receive Checksum Offload for 82543 - * @adapter: board private structure - * @rx_desc: receive descriptor - * @sk_buff: socket buffer with received data - **/ - -static inline void -e1000_rx_checksum(struct e1000_adapter *adapter, - struct e1000_rx_desc *rx_desc, - struct sk_buff *skb) -{ - /* 82543 or newer only */ - if((adapter->hw.mac_type < e1000_82543) || - /* Ignore Checksum bit is set */ - (rx_desc->status & E1000_RXD_STAT_IXSM) || - /* TCP Checksum has not been calculated */ - (!(rx_desc->status & E1000_RXD_STAT_TCPCS))) { - skb->ip_summed = CHECKSUM_NONE; - return; - } - - /* At this point we know the hardware did the TCP checksum */ - /* now look at the TCP checksum error bit */ - if(rx_desc->errors & E1000_RXD_ERR_TCPE) { - /* let the stack verify checksum errors */ - skb->ip_summed = CHECKSUM_NONE; - adapter->hw_csum_err++; - } else { - /* TCP checksum is good */ - skb->ip_summed = CHECKSUM_UNNECESSARY; - adapter->hw_csum_good++; - } -} - void e1000_pci_set_mwi(struct e1000_hw *hw) { struct e1000_adapter *adapter = hw->back; + int ret_val = pci_set_mwi(adapter->pdev); - pci_set_mwi(adapter->pdev); + if(ret_val) + DPRINTK(PROBE, ERR, "Error in setting MWI\n"); } void @@ -2662,7 +3651,7 @@ e1000_io_write(struct e1000_hw *hw, unsi static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) { - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); uint32_t ctrl, rctl; e1000_irq_disable(adapter); @@ -2670,29 +3659,30 @@ e1000_vlan_rx_register(struct net_device if(grp) { /* enable VLAN tag insert/strip */ - ctrl = E1000_READ_REG(&adapter->hw, CTRL); ctrl |= E1000_CTRL_VME; E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); /* enable VLAN receive filtering */ - rctl = E1000_READ_REG(&adapter->hw, RCTL); rctl |= E1000_RCTL_VFE; rctl &= ~E1000_RCTL_CFIEN; E1000_WRITE_REG(&adapter->hw, RCTL, rctl); + e1000_update_mng_vlan(adapter); } else { /* disable VLAN tag insert/strip */ - ctrl = E1000_READ_REG(&adapter->hw, CTRL); ctrl &= ~E1000_CTRL_VME; E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); /* disable VLAN filtering */ - rctl = E1000_READ_REG(&adapter->hw, RCTL); rctl &= ~E1000_RCTL_VFE; E1000_WRITE_REG(&adapter->hw, RCTL, rctl); + if(adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) { + e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + } } e1000_irq_enable(adapter); @@ -2701,11 +3691,13 @@ e1000_vlan_rx_register(struct net_device static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid) { - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); uint32_t vfta, index; - + if((adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && + (vid == adapter->mng_vlan_id)) + return; /* add VID to filter table */ - index = (vid >> 5) & 0x7F; vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index); vfta |= (1 << (vid & 0x1F)); @@ -2715,7 +3707,7 @@ e1000_vlan_rx_add_vid(struct net_device static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid) { - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); uint32_t vfta, index; e1000_irq_disable(adapter); @@ -2725,8 +3717,11 @@ e1000_vlan_rx_kill_vid(struct net_device e1000_irq_enable(adapter); - /* remove VID from filter table*/ - + if((adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && + (vid == adapter->mng_vlan_id)) + return; + /* remove VID from filter table */ index = (vid >> 5) & 0x7F; vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index); vfta &= ~(1 << (vid & 0x1F)); @@ -2753,6 +3748,13 @@ e1000_set_spd_dplx(struct e1000_adapter { adapter->hw.autoneg = 0; + /* Fiber NICs only allow 1000 gbps Full duplex */ + if((adapter->hw.media_type == e1000_media_type_fiber) && + spddplx != (SPEED_1000 + DUPLEX_FULL)) { + DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); + return -EINVAL; + } + switch(spddplx) { case SPEED_10 + DUPLEX_HALF: adapter->hw.forced_speed_duplex = e1000_10_half; @@ -2772,33 +3774,17 @@ e1000_set_spd_dplx(struct e1000_adapter break; case SPEED_1000 + DUPLEX_HALF: /* not supported */ default: + DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); return -EINVAL; } return 0; } static int -e1000_notify_reboot(struct notifier_block *nb, unsigned long event, void *p) -{ - struct pci_dev *pdev = NULL; - - switch(event) { - case SYS_DOWN: - case SYS_HALT: - case SYS_POWER_OFF: - while((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) { - if(pci_dev_driver(pdev) == &e1000_driver) - e1000_suspend(pdev, 3); - } - } - return NOTIFY_DONE; -} - -static int -e1000_suspend(struct pci_dev *pdev, uint32_t state) +e1000_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *netdev = pci_get_drvdata(pdev); - struct e1000_adapter *adapter = netdev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); uint32_t ctrl, ctrl_ext, rctl, manc, status; uint32_t wufc = adapter->wol; @@ -2841,6 +3827,9 @@ e1000_suspend(struct pci_dev *pdev, uint E1000_WRITE_REG(&adapter->hw, CTRL_EXT, ctrl_ext); } + /* Allow time for pending master requests to run */ + e1000_disable_pciex_master(&adapter->hw); + E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN); E1000_WRITE_REG(&adapter->hw, WUFC, wufc); pci_enable_wake(pdev, 3, 1); @@ -2865,8 +3854,12 @@ e1000_suspend(struct pci_dev *pdev, uint } } - state = (state > 0) ? 3 : 0; - pci_set_power_state(pdev, state); + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. */ + e1000_release_hw_control(adapter); + + pci_disable_device(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } @@ -2876,14 +3869,16 @@ static int e1000_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); - struct e1000_adapter *adapter = netdev->priv; - uint32_t manc; + struct e1000_adapter *adapter = netdev_priv(netdev); + uint32_t manc, ret_val; - pci_set_power_state(pdev, 0); + pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev, adapter->pci_state); + ret_val = pci_enable_device(pdev); + pci_set_master(pdev); - pci_enable_wake(pdev, 3, 0); - pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */ + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); e1000_reset(adapter); E1000_WRITE_REG(&adapter->hw, WUS, ~0); @@ -2900,22 +3895,33 @@ e1000_resume(struct pci_dev *pdev) E1000_WRITE_REG(&adapter->hw, MANC, manc); } + /* If the controller is 82573 and f/w is AMT, do not set + * DRV_LOAD until the interface is up. For all other cases, + * let the f/w know that the h/w is now under the control + * of the driver. */ + if (adapter->hw.mac_type != e1000_82573 || + !e1000_check_mng_mode(&adapter->hw)) + e1000_get_hw_control(adapter); + return 0; } #endif - #ifdef CONFIG_NET_POLL_CONTROLLER /* * Polling 'interrupt' - used by things like netconsole to send skbs * without having to re-enable interrupts. It's not called while * the interrupt routine is executing. */ - -static void e1000_netpoll (struct net_device *dev) +static void +e1000_netpoll(struct net_device *netdev) { - struct e1000_adapter *adapter = dev->priv; + struct e1000_adapter *adapter = netdev_priv(netdev); disable_irq(adapter->pdev->irq); - e1000_intr (adapter->pdev->irq, dev, NULL); + e1000_intr(adapter->pdev->irq, netdev, NULL); + e1000_clean_tx_irq(adapter); +#ifndef CONFIG_E1000_NAPI + adapter->clean_rx(adapter); +#endif enable_irq(adapter->pdev->irq); } #endif diff -pruN ./drivers/net/e1000.lkm81/e1000_osdep.h ./drivers/net/e1000/e1000_osdep.h --- ./drivers/net/e1000.lkm81/e1000_osdep.h 2006-04-06 19:04:00.000000000 +0400 +++ ./drivers/net/e1000/e1000_osdep.h 2006-04-06 19:05:40.000000000 +0400 @@ -1,7 +1,7 @@ /******************************************************************************* - Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. + Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free @@ -46,9 +46,15 @@ /* Don't mdelay in interrupt context! */ \ BUG(); \ } else { \ - set_current_state(TASK_UNINTERRUPTIBLE); \ - schedule_timeout((x * HZ)/1000 + 2); \ + msleep(x); \ } } while(0) + +/* Some workarounds require millisecond delays and are run during interrupt + * context. Most notably, when establishing link, the phy may need tweaking + * but cannot process phy register reads/writes faster than millisecond + * intervals...and we establish link due to a "link status change" interrupt. + */ +#define msec_delay_irq(x) mdelay(x) #endif #define PCI_COMMAND_REGISTER PCI_COMMAND @@ -95,6 +101,29 @@ typedef enum { (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ ((offset) << 2))) +#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY +#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY + +#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \ + writew((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 1)))) + +#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \ + readw((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 1))) + +#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \ + writeb((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + (offset)))) + +#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \ + readb((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + (offset))) + #define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, STATUS) #endif /* _E1000_OSDEP_H_ */ diff -pruN ./drivers/net/e1000.lkm81/e1000_param.c ./drivers/net/e1000/e1000_param.c --- ./drivers/net/e1000.lkm81/e1000_param.c 2006-04-06 19:04:00.000000000 +0400 +++ ./drivers/net/e1000/e1000_param.c 2006-04-06 19:05:40.000000000 +0400 @@ -1,7 +1,7 @@ /******************************************************************************* - Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. + Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free @@ -34,31 +34,21 @@ #define E1000_MAX_NIC 32 -#define OPTION_UNSET -1 +#define OPTION_UNSET -1 #define OPTION_DISABLED 0 #define OPTION_ENABLED 1 -/* Module Parameters are always initialized to -1, so that the driver - * can tell the difference between no user specified value or the - * user asking for the default value. - * The true default values are loaded in when e1000_check_options is called. - * - * This is a GCC extension to ANSI C. - * See the item "Labeled Elements in Initializers" in the section - * "Extensions to the C Language Family" of the GCC documentation. - */ - -#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } - /* All parameters are treated the same, as an integer array of values. * This macro just reduces the need to repeat the same declaration code * over and over (plus this helps to avoid typo bugs). */ -#define E1000_PARAM(X, S) \ -static const int __devinitdata X[E1000_MAX_NIC + 1] = E1000_PARAM_INIT; \ -MODULE_PARM(X, "1-" __MODULE_STRING(E1000_MAX_NIC) "i"); \ -MODULE_PARM_DESC(X, S); +#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } +#define E1000_PARAM(X, desc) \ + static int __devinitdata X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \ + static int num_##X = 0; \ + module_param_array_named(X, X, int, num_##X, 0); \ + MODULE_PARM_DESC(X, desc); /* Transmit Descriptor Count * @@ -212,7 +202,7 @@ E1000_PARAM(InterruptThrottleRate, "Inte #define MAX_TXABSDELAY 0xFFFF #define MIN_TXABSDELAY 0 -#define DEFAULT_ITR 1 +#define DEFAULT_ITR 8000 #define MAX_ITR 100000 #define MIN_ITR 100 @@ -235,7 +225,7 @@ struct e1000_option { static int __devinit e1000_validate_option(int *value, struct e1000_option *opt, - struct e1000_adapter *adapter) + struct e1000_adapter *adapter) { if(*value == OPTION_UNSET) { *value = opt->def; @@ -256,7 +246,7 @@ e1000_validate_option(int *value, struct case range_option: if(*value >= opt->arg.r.min && *value <= opt->arg.r.max) { DPRINTK(PROBE, INFO, - "%s set to %i\n", opt->name, *value); + "%s set to %i\n", opt->name, *value); return 0; } break; @@ -305,7 +295,6 @@ e1000_check_options(struct e1000_adapter DPRINTK(PROBE, NOTICE, "Warning: no configuration for board #%i\n", bd); DPRINTK(PROBE, NOTICE, "Using defaults for all values\n"); - bd = E1000_MAX_NIC; } { /* Transmit Descriptor Count */ @@ -322,9 +311,14 @@ e1000_check_options(struct e1000_adapter opt.arg.r.max = mac_type < e1000_82544 ? E1000_MAX_TXD : E1000_MAX_82544_TXD; - tx_ring->count = TxDescriptors[bd]; - e1000_validate_option(&tx_ring->count, &opt, adapter); - E1000_ROUNDUP(tx_ring->count, REQ_TX_DESCRIPTOR_MULTIPLE); + if (num_TxDescriptors > bd) { + tx_ring->count = TxDescriptors[bd]; + e1000_validate_option(&tx_ring->count, &opt, adapter); + E1000_ROUNDUP(tx_ring->count, + REQ_TX_DESCRIPTOR_MULTIPLE); + } else { + tx_ring->count = opt.def; + } } { /* Receive Descriptor Count */ struct e1000_option opt = { @@ -340,9 +334,14 @@ e1000_check_options(struct e1000_adapter opt.arg.r.max = mac_type < e1000_82544 ? E1000_MAX_RXD : E1000_MAX_82544_RXD; - rx_ring->count = RxDescriptors[bd]; - e1000_validate_option(&rx_ring->count, &opt, adapter); - E1000_ROUNDUP(rx_ring->count, REQ_RX_DESCRIPTOR_MULTIPLE); + if (num_RxDescriptors > bd) { + rx_ring->count = RxDescriptors[bd]; + e1000_validate_option(&rx_ring->count, &opt, adapter); + E1000_ROUNDUP(rx_ring->count, + REQ_RX_DESCRIPTOR_MULTIPLE); + } else { + rx_ring->count = opt.def; + } } { /* Checksum Offload Enable/Disable */ struct e1000_option opt = { @@ -352,9 +351,13 @@ e1000_check_options(struct e1000_adapter .def = OPTION_ENABLED }; - int rx_csum = XsumRX[bd]; - e1000_validate_option(&rx_csum, &opt, adapter); - adapter->rx_csum = rx_csum; + if (num_XsumRX > bd) { + int rx_csum = XsumRX[bd]; + e1000_validate_option(&rx_csum, &opt, adapter); + adapter->rx_csum = rx_csum; + } else { + adapter->rx_csum = opt.def; + } } { /* Flow Control */ @@ -374,9 +377,13 @@ e1000_check_options(struct e1000_adapter .p = fc_list }} }; - int fc = FlowControl[bd]; - e1000_validate_option(&fc, &opt, adapter); - adapter->hw.fc = adapter->hw.original_fc = fc; + if (num_FlowControl > bd) { + int fc = FlowControl[bd]; + e1000_validate_option(&fc, &opt, adapter); + adapter->hw.fc = adapter->hw.original_fc = fc; + } else { + adapter->hw.fc = opt.def; + } } { /* Transmit Interrupt Delay */ struct e1000_option opt = { @@ -388,8 +395,13 @@ e1000_check_options(struct e1000_adapter .max = MAX_TXDELAY }} }; - adapter->tx_int_delay = TxIntDelay[bd]; - e1000_validate_option(&adapter->tx_int_delay, &opt, adapter); + if (num_TxIntDelay > bd) { + adapter->tx_int_delay = TxIntDelay[bd]; + e1000_validate_option(&adapter->tx_int_delay, &opt, + adapter); + } else { + adapter->tx_int_delay = opt.def; + } } { /* Transmit Absolute Interrupt Delay */ struct e1000_option opt = { @@ -401,8 +413,13 @@ e1000_check_options(struct e1000_adapter .max = MAX_TXABSDELAY }} }; - adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; - e1000_validate_option(&adapter->tx_abs_int_delay, &opt, adapter); + if (num_TxAbsIntDelay > bd) { + adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; + e1000_validate_option(&adapter->tx_abs_int_delay, &opt, + adapter); + } else { + adapter->tx_abs_int_delay = opt.def; + } } { /* Receive Interrupt Delay */ struct e1000_option opt = { @@ -414,8 +431,13 @@ e1000_check_options(struct e1000_adapter .max = MAX_RXDELAY }} }; - adapter->rx_int_delay = RxIntDelay[bd]; - e1000_validate_option(&adapter->rx_int_delay, &opt, adapter); + if (num_RxIntDelay > bd) { + adapter->rx_int_delay = RxIntDelay[bd]; + e1000_validate_option(&adapter->rx_int_delay, &opt, + adapter); + } else { + adapter->rx_int_delay = opt.def; + } } { /* Receive Absolute Interrupt Delay */ struct e1000_option opt = { @@ -427,8 +449,13 @@ e1000_check_options(struct e1000_adapter .max = MAX_RXABSDELAY }} }; - adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; - e1000_validate_option(&adapter->rx_abs_int_delay, &opt, adapter); + if (num_RxAbsIntDelay > bd) { + adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; + e1000_validate_option(&adapter->rx_abs_int_delay, &opt, + adapter); + } else { + adapter->rx_abs_int_delay = opt.def; + } } { /* Interrupt Throttling Rate */ struct e1000_option opt = { @@ -440,21 +467,24 @@ e1000_check_options(struct e1000_adapter .max = MAX_ITR }} }; - adapter->itr = InterruptThrottleRate[bd]; - switch(adapter->itr) { - case -1: - adapter->itr = 1; - break; - case 0: - DPRINTK(PROBE, INFO, "%s turned off\n", opt.name); - break; - case 1: - DPRINTK(PROBE, INFO, - "%s set to dynamic mode\n", opt.name); - break; - default: - e1000_validate_option(&adapter->itr, &opt, adapter); - break; + if (num_InterruptThrottleRate > bd) { + adapter->itr = InterruptThrottleRate[bd]; + switch(adapter->itr) { + case 0: + DPRINTK(PROBE, INFO, "%s turned off\n", + opt.name); + break; + case 1: + DPRINTK(PROBE, INFO, "%s set to dynamic mode\n", + opt.name); + break; + default: + e1000_validate_option(&adapter->itr, &opt, + adapter); + break; + } + } else { + adapter->itr = opt.def; } } @@ -482,19 +512,20 @@ static void __devinit e1000_check_fiber_options(struct e1000_adapter *adapter) { int bd = adapter->bd_number; - bd = bd > E1000_MAX_NIC ? E1000_MAX_NIC : bd; - - if((Speed[bd] != OPTION_UNSET)) { + if(num_Speed > bd) { DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, " "parameter ignored\n"); } - if((Duplex[bd] != OPTION_UNSET)) { + + if(num_Duplex > bd) { DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, " "parameter ignored\n"); } - if((AutoNeg[bd] != OPTION_UNSET) && (AutoNeg[bd] != 0x20)) { - DPRINTK(PROBE, INFO, "AutoNeg other than Full/1000 is " - "not valid for fiber adapters, parameter ignored\n"); + + if((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) { + DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is " + "not valid for fiber adapters, " + "parameter ignored\n"); } } @@ -510,7 +541,6 @@ e1000_check_copper_options(struct e1000_ { int speed, dplx; int bd = adapter->bd_number; - bd = bd > E1000_MAX_NIC ? E1000_MAX_NIC : bd; { /* Speed */ struct e1000_opt_list speed_list[] = {{ 0, "" }, @@ -527,8 +557,12 @@ e1000_check_copper_options(struct e1000_ .p = speed_list }} }; - speed = Speed[bd]; - e1000_validate_option(&speed, &opt, adapter); + if (num_Speed > bd) { + speed = Speed[bd]; + e1000_validate_option(&speed, &opt, adapter); + } else { + speed = opt.def; + } } { /* Duplex */ struct e1000_opt_list dplx_list[] = {{ 0, "" }, @@ -544,11 +578,15 @@ e1000_check_copper_options(struct e1000_ .p = dplx_list }} }; - dplx = Duplex[bd]; - e1000_validate_option(&dplx, &opt, adapter); + if (num_Duplex > bd) { + dplx = Duplex[bd]; + e1000_validate_option(&dplx, &opt, adapter); + } else { + dplx = opt.def; + } } - if(AutoNeg[bd] != OPTION_UNSET && (speed != 0 || dplx != 0)) { + if((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) { DPRINTK(PROBE, INFO, "AutoNeg specified along with Speed or Duplex, " "parameter ignored\n"); @@ -605,30 +643,30 @@ e1000_check_copper_options(struct e1000_ switch (speed + dplx) { case 0: adapter->hw.autoneg = adapter->fc_autoneg = 1; - if(Speed[bd] != OPTION_UNSET || Duplex[bd] != OPTION_UNSET) + if((num_Speed > bd) && (speed != 0 || dplx != 0)) DPRINTK(PROBE, INFO, "Speed and duplex autonegotiation enabled\n"); break; case HALF_DUPLEX: DPRINTK(PROBE, INFO, "Half Duplex specified without Speed\n"); - DPRINTK(PROBE, INFO, - "Using Autonegotiation at Half Duplex only\n"); + DPRINTK(PROBE, INFO, "Using Autonegotiation at " + "Half Duplex only\n"); adapter->hw.autoneg = adapter->fc_autoneg = 1; adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | ADVERTISE_100_HALF; break; case FULL_DUPLEX: DPRINTK(PROBE, INFO, "Full Duplex specified without Speed\n"); - DPRINTK(PROBE, INFO, - "Using Autonegotiation at Full Duplex only\n"); + DPRINTK(PROBE, INFO, "Using Autonegotiation at " + "Full Duplex only\n"); adapter->hw.autoneg = adapter->fc_autoneg = 1; adapter->hw.autoneg_advertised = ADVERTISE_10_FULL | ADVERTISE_100_FULL | ADVERTISE_1000_FULL; break; case SPEED_10: - DPRINTK(PROBE, INFO, - "10 Mbps Speed specified without Duplex\n"); + DPRINTK(PROBE, INFO, "10 Mbps Speed specified " + "without Duplex\n"); DPRINTK(PROBE, INFO, "Using Autonegotiation at 10 Mbps only\n"); adapter->hw.autoneg = adapter->fc_autoneg = 1; adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | @@ -647,10 +685,10 @@ e1000_check_copper_options(struct e1000_ adapter->hw.autoneg_advertised = 0; break; case SPEED_100: - DPRINTK(PROBE, INFO, - "100 Mbps Speed specified without Duplex\n"); - DPRINTK(PROBE, INFO, - "Using Autonegotiation at 100 Mbps only\n"); + DPRINTK(PROBE, INFO, "100 Mbps Speed specified " + "without Duplex\n"); + DPRINTK(PROBE, INFO, "Using Autonegotiation at " + "100 Mbps only\n"); adapter->hw.autoneg = adapter->fc_autoneg = 1; adapter->hw.autoneg_advertised = ADVERTISE_100_HALF | ADVERTISE_100_FULL; @@ -668,10 +706,11 @@ e1000_check_copper_options(struct e1000_ adapter->hw.autoneg_advertised = 0; break; case SPEED_1000: + DPRINTK(PROBE, INFO, "1000 Mbps Speed specified without " + "Duplex\n"); DPRINTK(PROBE, INFO, - "1000 Mbps Speed specified without Duplex\n"); - DPRINTK(PROBE, INFO, - "Using Autonegotiation at 1000 Mbps Full Duplex only\n"); + "Using Autonegotiation at 1000 Mbps " + "Full Duplex only\n"); adapter->hw.autoneg = adapter->fc_autoneg = 1; adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL; break; @@ -679,7 +718,8 @@ e1000_check_copper_options(struct e1000_ DPRINTK(PROBE, INFO, "Half Duplex is not supported at 1000 Mbps\n"); DPRINTK(PROBE, INFO, - "Using Autonegotiation at 1000 Mbps Full Duplex only\n"); + "Using Autonegotiation at 1000 Mbps " + "Full Duplex only\n"); adapter->hw.autoneg = adapter->fc_autoneg = 1; adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL; break; @@ -696,8 +736,8 @@ e1000_check_copper_options(struct e1000_ /* Speed, AutoNeg and MDI/MDI-X must all play nice */ if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) { DPRINTK(PROBE, INFO, - "Speed, AutoNeg and MDI-X specifications are " - "incompatible. Setting MDI-X to a compatible value.\n"); + "Speed, AutoNeg and MDI-X specifications are " + "incompatible. Setting MDI-X to a compatible value.\n"); } }