aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/skge.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/skge.c')
-rw-r--r--drivers/net/skge.c110
1 files changed, 64 insertions, 46 deletions
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 8fecf1b817f..39c6677dff5 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -105,7 +105,8 @@ static const int txqaddr[] = { Q_XA1, Q_XA2 };
105static const int rxqaddr[] = { Q_R1, Q_R2 }; 105static const int rxqaddr[] = { Q_R1, Q_R2 };
106static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F }; 106static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F };
107static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; 107static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F };
108static const u32 irqmask[] = { IS_R1_F|IS_XA1_F, IS_R2_F|IS_XA2_F }; 108static const u32 napimask[] = { IS_R1_F|IS_XA1_F, IS_R2_F|IS_XA2_F };
109static const u32 portmask[] = { IS_PORT_1, IS_PORT_2 };
109 110
110static int skge_get_regs_len(struct net_device *dev) 111static int skge_get_regs_len(struct net_device *dev)
111{ 112{
@@ -671,7 +672,7 @@ static void skge_led(struct skge_port *skge, enum led_mode mode)
671 struct skge_hw *hw = skge->hw; 672 struct skge_hw *hw = skge->hw;
672 int port = skge->port; 673 int port = skge->port;
673 674
674 mutex_lock(&hw->phy_mutex); 675 spin_lock_bh(&hw->phy_lock);
675 if (hw->chip_id == CHIP_ID_GENESIS) { 676 if (hw->chip_id == CHIP_ID_GENESIS) {
676 switch (mode) { 677 switch (mode) {
677 case LED_MODE_OFF: 678 case LED_MODE_OFF:
@@ -742,7 +743,7 @@ static void skge_led(struct skge_port *skge, enum led_mode mode)
742 PHY_M_LED_MO_RX(MO_LED_ON)); 743 PHY_M_LED_MO_RX(MO_LED_ON));
743 } 744 }
744 } 745 }
745 mutex_unlock(&hw->phy_mutex); 746 spin_unlock_bh(&hw->phy_lock);
746} 747}
747 748
748/* blink LED's for finding board */ 749/* blink LED's for finding board */
@@ -1316,7 +1317,7 @@ static void xm_phy_init(struct skge_port *skge)
1316 xm_phy_write(hw, port, PHY_XMAC_CTRL, ctrl); 1317 xm_phy_write(hw, port, PHY_XMAC_CTRL, ctrl);
1317 1318
1318 /* Poll PHY for status changes */ 1319 /* Poll PHY for status changes */
1319 schedule_delayed_work(&skge->link_thread, LINK_HZ); 1320 mod_timer(&skge->link_timer, jiffies + LINK_HZ);
1320} 1321}
1321 1322
1322static void xm_check_link(struct net_device *dev) 1323static void xm_check_link(struct net_device *dev)
@@ -1391,10 +1392,9 @@ static void xm_check_link(struct net_device *dev)
1391 * Since internal PHY is wired to a level triggered pin, can't 1392 * Since internal PHY is wired to a level triggered pin, can't
1392 * get an interrupt when carrier is detected. 1393 * get an interrupt when carrier is detected.
1393 */ 1394 */
1394static void xm_link_timer(struct work_struct *work) 1395static void xm_link_timer(unsigned long arg)
1395{ 1396{
1396 struct skge_port *skge = 1397 struct skge_port *skge = (struct skge_port *) arg;
1397 container_of(work, struct skge_port, link_thread.work);
1398 struct net_device *dev = skge->netdev; 1398 struct net_device *dev = skge->netdev;
1399 struct skge_hw *hw = skge->hw; 1399 struct skge_hw *hw = skge->hw;
1400 int port = skge->port; 1400 int port = skge->port;
@@ -1414,13 +1414,13 @@ static void xm_link_timer(struct work_struct *work)
1414 goto nochange; 1414 goto nochange;
1415 } 1415 }
1416 1416
1417 mutex_lock(&hw->phy_mutex); 1417 spin_lock(&hw->phy_lock);
1418 xm_check_link(dev); 1418 xm_check_link(dev);
1419 mutex_unlock(&hw->phy_mutex); 1419 spin_unlock(&hw->phy_lock);
1420 1420
1421nochange: 1421nochange:
1422 if (netif_running(dev)) 1422 if (netif_running(dev))
1423 schedule_delayed_work(&skge->link_thread, LINK_HZ); 1423 mod_timer(&skge->link_timer, jiffies + LINK_HZ);
1424} 1424}
1425 1425
1426static void genesis_mac_init(struct skge_hw *hw, int port) 1426static void genesis_mac_init(struct skge_hw *hw, int port)
@@ -2323,7 +2323,7 @@ static void skge_phy_reset(struct skge_port *skge)
2323 netif_stop_queue(skge->netdev); 2323 netif_stop_queue(skge->netdev);
2324 netif_carrier_off(skge->netdev); 2324 netif_carrier_off(skge->netdev);
2325 2325
2326 mutex_lock(&hw->phy_mutex); 2326 spin_lock_bh(&hw->phy_lock);
2327 if (hw->chip_id == CHIP_ID_GENESIS) { 2327 if (hw->chip_id == CHIP_ID_GENESIS) {
2328 genesis_reset(hw, port); 2328 genesis_reset(hw, port);
2329 genesis_mac_init(hw, port); 2329 genesis_mac_init(hw, port);
@@ -2331,7 +2331,7 @@ static void skge_phy_reset(struct skge_port *skge)
2331 yukon_reset(hw, port); 2331 yukon_reset(hw, port);
2332 yukon_init(hw, port); 2332 yukon_init(hw, port);
2333 } 2333 }
2334 mutex_unlock(&hw->phy_mutex); 2334 spin_unlock_bh(&hw->phy_lock);
2335 2335
2336 dev->set_multicast_list(dev); 2336 dev->set_multicast_list(dev);
2337} 2337}
@@ -2354,12 +2354,12 @@ static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2354 /* fallthru */ 2354 /* fallthru */
2355 case SIOCGMIIREG: { 2355 case SIOCGMIIREG: {
2356 u16 val = 0; 2356 u16 val = 0;
2357 mutex_lock(&hw->phy_mutex); 2357 spin_lock_bh(&hw->phy_lock);
2358 if (hw->chip_id == CHIP_ID_GENESIS) 2358 if (hw->chip_id == CHIP_ID_GENESIS)
2359 err = __xm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); 2359 err = __xm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val);
2360 else 2360 else
2361 err = __gm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); 2361 err = __gm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val);
2362 mutex_unlock(&hw->phy_mutex); 2362 spin_unlock_bh(&hw->phy_lock);
2363 data->val_out = val; 2363 data->val_out = val;
2364 break; 2364 break;
2365 } 2365 }
@@ -2368,14 +2368,14 @@ static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2368 if (!capable(CAP_NET_ADMIN)) 2368 if (!capable(CAP_NET_ADMIN))
2369 return -EPERM; 2369 return -EPERM;
2370 2370
2371 mutex_lock(&hw->phy_mutex); 2371 spin_lock_bh(&hw->phy_lock);
2372 if (hw->chip_id == CHIP_ID_GENESIS) 2372 if (hw->chip_id == CHIP_ID_GENESIS)
2373 err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f, 2373 err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f,
2374 data->val_in); 2374 data->val_in);
2375 else 2375 else
2376 err = gm_phy_write(hw, skge->port, data->reg_num & 0x1f, 2376 err = gm_phy_write(hw, skge->port, data->reg_num & 0x1f,
2377 data->val_in); 2377 data->val_in);
2378 mutex_unlock(&hw->phy_mutex); 2378 spin_unlock_bh(&hw->phy_lock);
2379 break; 2379 break;
2380 } 2380 }
2381 return err; 2381 return err;
@@ -2481,12 +2481,12 @@ static int skge_up(struct net_device *dev)
2481 goto free_rx_ring; 2481 goto free_rx_ring;
2482 2482
2483 /* Initialize MAC */ 2483 /* Initialize MAC */
2484 mutex_lock(&hw->phy_mutex); 2484 spin_lock_bh(&hw->phy_lock);
2485 if (hw->chip_id == CHIP_ID_GENESIS) 2485 if (hw->chip_id == CHIP_ID_GENESIS)
2486 genesis_mac_init(hw, port); 2486 genesis_mac_init(hw, port);
2487 else 2487 else
2488 yukon_mac_init(hw, port); 2488 yukon_mac_init(hw, port);
2489 mutex_unlock(&hw->phy_mutex); 2489 spin_unlock_bh(&hw->phy_lock);
2490 2490
2491 /* Configure RAMbuffers */ 2491 /* Configure RAMbuffers */
2492 chunk = hw->ram_size / ((hw->ports + 1)*2); 2492 chunk = hw->ram_size / ((hw->ports + 1)*2);
@@ -2504,6 +2504,11 @@ static int skge_up(struct net_device *dev)
2504 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F); 2504 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F);
2505 skge_led(skge, LED_MODE_ON); 2505 skge_led(skge, LED_MODE_ON);
2506 2506
2507 spin_lock_irq(&hw->hw_lock);
2508 hw->intr_mask |= portmask[port];
2509 skge_write32(hw, B0_IMSK, hw->intr_mask);
2510 spin_unlock_irq(&hw->hw_lock);
2511
2507 netif_poll_enable(dev); 2512 netif_poll_enable(dev);
2508 return 0; 2513 return 0;
2509 2514
@@ -2531,7 +2536,14 @@ static int skge_down(struct net_device *dev)
2531 2536
2532 netif_stop_queue(dev); 2537 netif_stop_queue(dev);
2533 if (hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC) 2538 if (hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC)
2534 cancel_delayed_work(&skge->link_thread); 2539 del_timer_sync(&skge->link_timer);
2540
2541 netif_poll_disable(dev);
2542
2543 spin_lock_irq(&hw->hw_lock);
2544 hw->intr_mask &= ~portmask[port];
2545 skge_write32(hw, B0_IMSK, hw->intr_mask);
2546 spin_unlock_irq(&hw->hw_lock);
2535 2547
2536 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF); 2548 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
2537 if (hw->chip_id == CHIP_ID_GENESIS) 2549 if (hw->chip_id == CHIP_ID_GENESIS)
@@ -2575,8 +2587,10 @@ static int skge_down(struct net_device *dev)
2575 2587
2576 skge_led(skge, LED_MODE_OFF); 2588 skge_led(skge, LED_MODE_OFF);
2577 2589
2578 netif_poll_disable(dev); 2590 netif_tx_lock_bh(dev);
2579 skge_tx_clean(dev); 2591 skge_tx_clean(dev);
2592 netif_tx_unlock_bh(dev);
2593
2580 skge_rx_clean(skge); 2594 skge_rx_clean(skge);
2581 2595
2582 kfree(skge->rx_ring.start); 2596 kfree(skge->rx_ring.start);
@@ -2721,7 +2735,6 @@ static void skge_tx_clean(struct net_device *dev)
2721 struct skge_port *skge = netdev_priv(dev); 2735 struct skge_port *skge = netdev_priv(dev);
2722 struct skge_element *e; 2736 struct skge_element *e;
2723 2737
2724 netif_tx_lock_bh(dev);
2725 for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { 2738 for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
2726 struct skge_tx_desc *td = e->desc; 2739 struct skge_tx_desc *td = e->desc;
2727 skge_tx_free(skge, e, td->control); 2740 skge_tx_free(skge, e, td->control);
@@ -2730,7 +2743,6 @@ static void skge_tx_clean(struct net_device *dev)
2730 2743
2731 skge->tx_ring.to_clean = e; 2744 skge->tx_ring.to_clean = e;
2732 netif_wake_queue(dev); 2745 netif_wake_queue(dev);
2733 netif_tx_unlock_bh(dev);
2734} 2746}
2735 2747
2736static void skge_tx_timeout(struct net_device *dev) 2748static void skge_tx_timeout(struct net_device *dev)
@@ -3049,7 +3061,7 @@ static int skge_poll(struct net_device *dev, int *budget)
3049 3061
3050 spin_lock_irqsave(&hw->hw_lock, flags); 3062 spin_lock_irqsave(&hw->hw_lock, flags);
3051 __netif_rx_complete(dev); 3063 __netif_rx_complete(dev);
3052 hw->intr_mask |= irqmask[skge->port]; 3064 hw->intr_mask |= napimask[skge->port];
3053 skge_write32(hw, B0_IMSK, hw->intr_mask); 3065 skge_write32(hw, B0_IMSK, hw->intr_mask);
3054 skge_read32(hw, B0_IMSK); 3066 skge_read32(hw, B0_IMSK);
3055 spin_unlock_irqrestore(&hw->hw_lock, flags); 3067 spin_unlock_irqrestore(&hw->hw_lock, flags);
@@ -3160,28 +3172,29 @@ static void skge_error_irq(struct skge_hw *hw)
3160} 3172}
3161 3173
3162/* 3174/*
3163 * Interrupt from PHY are handled in work queue 3175 * Interrupt from PHY are handled in tasklet (softirq)
3164 * because accessing phy registers requires spin wait which might 3176 * because accessing phy registers requires spin wait which might
3165 * cause excess interrupt latency. 3177 * cause excess interrupt latency.
3166 */ 3178 */
3167static void skge_extirq(struct work_struct *work) 3179static void skge_extirq(unsigned long arg)
3168{ 3180{
3169 struct skge_hw *hw = container_of(work, struct skge_hw, phy_work); 3181 struct skge_hw *hw = (struct skge_hw *) arg;
3170 int port; 3182 int port;
3171 3183
3172 mutex_lock(&hw->phy_mutex);
3173 for (port = 0; port < hw->ports; port++) { 3184 for (port = 0; port < hw->ports; port++) {
3174 struct net_device *dev = hw->dev[port]; 3185 struct net_device *dev = hw->dev[port];
3175 struct skge_port *skge = netdev_priv(dev);
3176 3186
3177 if (netif_running(dev)) { 3187 if (netif_running(dev)) {
3188 struct skge_port *skge = netdev_priv(dev);
3189
3190 spin_lock(&hw->phy_lock);
3178 if (hw->chip_id != CHIP_ID_GENESIS) 3191 if (hw->chip_id != CHIP_ID_GENESIS)
3179 yukon_phy_intr(skge); 3192 yukon_phy_intr(skge);
3180 else if (hw->phy_type == SK_PHY_BCOM) 3193 else if (hw->phy_type == SK_PHY_BCOM)
3181 bcom_phy_intr(skge); 3194 bcom_phy_intr(skge);
3195 spin_unlock(&hw->phy_lock);
3182 } 3196 }
3183 } 3197 }
3184 mutex_unlock(&hw->phy_mutex);
3185 3198
3186 spin_lock_irq(&hw->hw_lock); 3199 spin_lock_irq(&hw->hw_lock);
3187 hw->intr_mask |= IS_EXT_REG; 3200 hw->intr_mask |= IS_EXT_REG;
@@ -3206,7 +3219,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id)
3206 status &= hw->intr_mask; 3219 status &= hw->intr_mask;
3207 if (status & IS_EXT_REG) { 3220 if (status & IS_EXT_REG) {
3208 hw->intr_mask &= ~IS_EXT_REG; 3221 hw->intr_mask &= ~IS_EXT_REG;
3209 schedule_work(&hw->phy_work); 3222 tasklet_schedule(&hw->phy_task);
3210 } 3223 }
3211 3224
3212 if (status & (IS_XA1_F|IS_R1_F)) { 3225 if (status & (IS_XA1_F|IS_R1_F)) {
@@ -3282,23 +3295,28 @@ static int skge_set_mac_address(struct net_device *dev, void *p)
3282 3295
3283 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 3296 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
3284 3297
3285 /* disable Rx */ 3298 if (!netif_running(dev)) {
3286 ctrl = gma_read16(hw, port, GM_GP_CTRL); 3299 memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN);
3287 gma_write16(hw, port, GM_GP_CTRL, ctrl & ~GM_GPCR_RX_ENA); 3300 memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN);
3301 } else {
3302 /* disable Rx */
3303 spin_lock_bh(&hw->phy_lock);
3304 ctrl = gma_read16(hw, port, GM_GP_CTRL);
3305 gma_write16(hw, port, GM_GP_CTRL, ctrl & ~GM_GPCR_RX_ENA);
3288 3306
3289 memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN); 3307 memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN);
3290 memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN); 3308 memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN);
3291 3309
3292 if (netif_running(dev)) {
3293 if (hw->chip_id == CHIP_ID_GENESIS) 3310 if (hw->chip_id == CHIP_ID_GENESIS)
3294 xm_outaddr(hw, port, XM_SA, dev->dev_addr); 3311 xm_outaddr(hw, port, XM_SA, dev->dev_addr);
3295 else { 3312 else {
3296 gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr); 3313 gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr);
3297 gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr); 3314 gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr);
3298 } 3315 }
3299 }
3300 3316
3301 gma_write16(hw, port, GM_GP_CTRL, ctrl); 3317 gma_write16(hw, port, GM_GP_CTRL, ctrl);
3318 spin_unlock_bh(&hw->phy_lock);
3319 }
3302 3320
3303 return 0; 3321 return 0;
3304} 3322}
@@ -3413,10 +3431,9 @@ static int skge_reset(struct skge_hw *hw)
3413 else 3431 else
3414 hw->ram_size = t8 * 4096; 3432 hw->ram_size = t8 * 4096;
3415 3433
3416 hw->intr_mask = IS_HW_ERR | IS_PORT_1; 3434 hw->intr_mask = IS_HW_ERR;
3417 if (hw->ports > 1)
3418 hw->intr_mask |= IS_PORT_2;
3419 3435
3436 /* Use PHY IRQ for all but fiber based Genesis board */
3420 if (!(hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC)) 3437 if (!(hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC))
3421 hw->intr_mask |= IS_EXT_REG; 3438 hw->intr_mask |= IS_EXT_REG;
3422 3439
@@ -3484,14 +3501,12 @@ static int skge_reset(struct skge_hw *hw)
3484 3501
3485 skge_write32(hw, B0_IMSK, hw->intr_mask); 3502 skge_write32(hw, B0_IMSK, hw->intr_mask);
3486 3503
3487 mutex_lock(&hw->phy_mutex);
3488 for (i = 0; i < hw->ports; i++) { 3504 for (i = 0; i < hw->ports; i++) {
3489 if (hw->chip_id == CHIP_ID_GENESIS) 3505 if (hw->chip_id == CHIP_ID_GENESIS)
3490 genesis_reset(hw, i); 3506 genesis_reset(hw, i);
3491 else 3507 else
3492 yukon_reset(hw, i); 3508 yukon_reset(hw, i);
3493 } 3509 }
3494 mutex_unlock(&hw->phy_mutex);
3495 3510
3496 return 0; 3511 return 0;
3497} 3512}
@@ -3539,6 +3554,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3539 skge->netdev = dev; 3554 skge->netdev = dev;
3540 skge->hw = hw; 3555 skge->hw = hw;
3541 skge->msg_enable = netif_msg_init(debug, default_msg); 3556 skge->msg_enable = netif_msg_init(debug, default_msg);
3557
3542 skge->tx_ring.count = DEFAULT_TX_RING_SIZE; 3558 skge->tx_ring.count = DEFAULT_TX_RING_SIZE;
3543 skge->rx_ring.count = DEFAULT_RX_RING_SIZE; 3559 skge->rx_ring.count = DEFAULT_RX_RING_SIZE;
3544 3560
@@ -3555,7 +3571,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3555 skge->port = port; 3571 skge->port = port;
3556 3572
3557 /* Only used for Genesis XMAC */ 3573 /* Only used for Genesis XMAC */
3558 INIT_DELAYED_WORK(&skge->link_thread, xm_link_timer); 3574 setup_timer(&skge->link_timer, xm_link_timer, (unsigned long) skge);
3559 3575
3560 if (hw->chip_id != CHIP_ID_GENESIS) { 3576 if (hw->chip_id != CHIP_ID_GENESIS) {
3561 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 3577 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
@@ -3637,9 +3653,9 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3637 } 3653 }
3638 3654
3639 hw->pdev = pdev; 3655 hw->pdev = pdev;
3640 mutex_init(&hw->phy_mutex);
3641 INIT_WORK(&hw->phy_work, skge_extirq);
3642 spin_lock_init(&hw->hw_lock); 3656 spin_lock_init(&hw->hw_lock);
3657 spin_lock_init(&hw->phy_lock);
3658 tasklet_init(&hw->phy_task, &skge_extirq, (unsigned long) hw);
3643 3659
3644 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); 3660 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
3645 if (!hw->regs) { 3661 if (!hw->regs) {
@@ -3725,6 +3741,8 @@ static void __devexit skge_remove(struct pci_dev *pdev)
3725 dev0 = hw->dev[0]; 3741 dev0 = hw->dev[0];
3726 unregister_netdev(dev0); 3742 unregister_netdev(dev0);
3727 3743
3744 tasklet_disable(&hw->phy_task);
3745
3728 spin_lock_irq(&hw->hw_lock); 3746 spin_lock_irq(&hw->hw_lock);
3729 hw->intr_mask = 0; 3747 hw->intr_mask = 0;
3730 skge_write32(hw, B0_IMSK, 0); 3748 skge_write32(hw, B0_IMSK, 0);