r8169: modify the flow of the hw reset.

- Disable tx and rx by resetting hw, so replace rtl8169_asic_down
  with rtl8169_hw_reset.
- RxConfig bits 0 ~ 5 have to be cleared before hw reset to avoid
  receiving spurious data.
- Certain chips need to do some checking before reset.
- Remove hw reset which is done before hw_start. It is done in close,
  down or device probe functions.
- Move rtl8169_init_ring_indexes function into rtl_hw_reset function.
  The indexes of tx and rx only need to be zero when the hw resets.

Signed-off-by: Hayes Wang <hayeswang@realtek.com>
Acked-by: Francois Romieu <romieu@fr.zoreil.com>
This commit is contained in:
Hayes Wang 2011-07-06 15:58:03 +08:00 committed by Francois Romieu
parent 4f6b00e5f1
commit 92fc43b415

View File

@ -1071,13 +1071,6 @@ static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
RTL_W16(IntrStatus, 0xffff);
}
static void rtl8169_asic_down(void __iomem *ioaddr)
{
RTL_W8(ChipCmd, 0x00);
rtl8169_irq_mask_and_ack(ioaddr);
RTL_R16(CPlusCmd);
}
static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
@ -3337,6 +3330,11 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
}
}
static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
{
tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
}
static void rtl_hw_reset(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
@ -3349,8 +3347,10 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
for (i = 0; i < 100; i++) {
if ((RTL_R8(ChipCmd) & CmdReset) == 0)
break;
msleep_interruptible(1);
udelay(100);
}
rtl8169_init_ring_indexes(tp);
}
static int __devinit
@ -3732,6 +3732,16 @@ static int rtl8169_open(struct net_device *dev)
goto out;
}
static void rtl_rx_close(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
u32 rxcfg = RTL_R32(RxConfig);
rxcfg &= ~(AcceptErr | AcceptRunt | AcceptBroadcast | AcceptMulticast |
AcceptMyPhys | AcceptAllPhys);
RTL_W32(RxConfig, rxcfg);
}
static void rtl8169_hw_reset(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
@ -3739,19 +3749,19 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
/* Disable interrupts */
rtl8169_irq_mask_and_ack(ioaddr);
rtl_rx_close(tp);
if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
tp->mac_version == RTL_GIGA_MAC_VER_28 ||
tp->mac_version == RTL_GIGA_MAC_VER_31) {
while (RTL_R8(TxPoll) & NPQ)
udelay(20);
} else {
RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
udelay(100);
}
/* Reset the chipset */
RTL_W8(ChipCmd, CmdReset);
/* PCI commit */
RTL_R8(ChipCmd);
rtl_hw_reset(tp);
}
static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
@ -3771,8 +3781,6 @@ static void rtl_hw_start(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
rtl_hw_reset(tp);
tp->hw_start(dev);
netif_start_queue(dev);
@ -4581,11 +4589,6 @@ static int rtl8169_rx_fill(struct rtl8169_private *tp)
return -ENOMEM;
}
static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
{
tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
}
static int rtl8169_init_ring(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
@ -4713,7 +4716,7 @@ static void rtl8169_reset_task(struct work_struct *work)
rtl8169_tx_clear(tp);
rtl8169_init_ring_indexes(tp);
rtl8169_hw_reset(tp);
rtl_hw_start(dev);
netif_wake_queue(dev);
rtl8169_check_link_status(dev, tp, tp->mmio_addr);
@ -5127,7 +5130,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
* the chip, so just exit the loop.
*/
if (unlikely(!netif_running(dev))) {
rtl8169_asic_down(ioaddr);
rtl8169_hw_reset(tp);
break;
}
@ -5250,7 +5253,7 @@ static void rtl8169_down(struct net_device *dev)
spin_lock_irq(&tp->lock);
rtl8169_asic_down(ioaddr);
rtl8169_hw_reset(tp);
/*
* At this point device interrupts can not be enabled in any function,
* as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task,
@ -5504,7 +5507,7 @@ static void rtl_shutdown(struct pci_dev *pdev)
spin_lock_irq(&tp->lock);
rtl8169_asic_down(ioaddr);
rtl8169_hw_reset(tp);
spin_unlock_irq(&tp->lock);