diff options
author | Thomas Klein <osstklei@de.ibm.com> | 2006-11-03 11:48:23 -0500 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2006-11-06 02:55:07 -0500 |
commit | a1d261c561522151cb96c75f1dd1a51cf17665cf (patch) | |
tree | 30be5e219405b5cc739ab5b83b0e91ad4dbb89cb /drivers | |
parent | 07fd06b3bc1589e44aefd02eb28700a51b3c9d12 (diff) |
[PATCH] ehea: 64K page support fix
This patch fixes 64k page support by using PAGE_MASK and appropriate pagesize defines in several places.
Signed-off-by: Thomas Klein <tklein@de.ibm.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/ehea/ehea_ethtool.c | 2 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_main.c | 26 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_phyp.c | 2 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_phyp.h | 6 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_qmr.c | 13 |
5 files changed, 26 insertions, 23 deletions
diff --git a/drivers/net/ehea/ehea_ethtool.c b/drivers/net/ehea/ehea_ethtool.c index 82eb2fb8c75e..9f57c2e78ced 100644 --- a/drivers/net/ehea/ehea_ethtool.c +++ b/drivers/net/ehea/ehea_ethtool.c | |||
@@ -238,7 +238,7 @@ static void ehea_get_ethtool_stats(struct net_device *dev, | |||
238 | data[i++] = port->port_res[0].swqe_refill_th; | 238 | data[i++] = port->port_res[0].swqe_refill_th; |
239 | data[i++] = port->resets; | 239 | data[i++] = port->resets; |
240 | 240 | ||
241 | cb6 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | 241 | cb6 = kzalloc(PAGE_SIZE, GFP_KERNEL); |
242 | if (!cb6) { | 242 | if (!cb6) { |
243 | ehea_error("no mem for cb6"); | 243 | ehea_error("no mem for cb6"); |
244 | return; | 244 | return; |
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 4538c99733fd..6ad696101418 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c | |||
@@ -92,7 +92,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev) | |||
92 | 92 | ||
93 | memset(stats, 0, sizeof(*stats)); | 93 | memset(stats, 0, sizeof(*stats)); |
94 | 94 | ||
95 | cb2 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | 95 | cb2 = kzalloc(PAGE_SIZE, GFP_KERNEL); |
96 | if (!cb2) { | 96 | if (!cb2) { |
97 | ehea_error("no mem for cb2"); | 97 | ehea_error("no mem for cb2"); |
98 | goto out; | 98 | goto out; |
@@ -586,8 +586,8 @@ int ehea_sense_port_attr(struct ehea_port *port) | |||
586 | u64 hret; | 586 | u64 hret; |
587 | struct hcp_ehea_port_cb0 *cb0; | 587 | struct hcp_ehea_port_cb0 *cb0; |
588 | 588 | ||
589 | cb0 = kzalloc(H_CB_ALIGNMENT, GFP_ATOMIC); /* May be called via */ | 589 | cb0 = kzalloc(PAGE_SIZE, GFP_ATOMIC); /* May be called via */ |
590 | if (!cb0) { /* ehea_neq_tasklet() */ | 590 | if (!cb0) { /* ehea_neq_tasklet() */ |
591 | ehea_error("no mem for cb0"); | 591 | ehea_error("no mem for cb0"); |
592 | ret = -ENOMEM; | 592 | ret = -ENOMEM; |
593 | goto out; | 593 | goto out; |
@@ -670,7 +670,7 @@ int ehea_set_portspeed(struct ehea_port *port, u32 port_speed) | |||
670 | u64 hret; | 670 | u64 hret; |
671 | int ret = 0; | 671 | int ret = 0; |
672 | 672 | ||
673 | cb4 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | 673 | cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL); |
674 | if (!cb4) { | 674 | if (!cb4) { |
675 | ehea_error("no mem for cb4"); | 675 | ehea_error("no mem for cb4"); |
676 | ret = -ENOMEM; | 676 | ret = -ENOMEM; |
@@ -985,7 +985,7 @@ static int ehea_configure_port(struct ehea_port *port) | |||
985 | struct hcp_ehea_port_cb0 *cb0; | 985 | struct hcp_ehea_port_cb0 *cb0; |
986 | 986 | ||
987 | ret = -ENOMEM; | 987 | ret = -ENOMEM; |
988 | cb0 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | 988 | cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); |
989 | if (!cb0) | 989 | if (!cb0) |
990 | goto out; | 990 | goto out; |
991 | 991 | ||
@@ -1443,7 +1443,7 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa) | |||
1443 | goto out; | 1443 | goto out; |
1444 | } | 1444 | } |
1445 | 1445 | ||
1446 | cb0 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | 1446 | cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); |
1447 | if (!cb0) { | 1447 | if (!cb0) { |
1448 | ehea_error("no mem for cb0"); | 1448 | ehea_error("no mem for cb0"); |
1449 | ret = -ENOMEM; | 1449 | ret = -ENOMEM; |
@@ -1501,7 +1501,7 @@ static void ehea_promiscuous(struct net_device *dev, int enable) | |||
1501 | if ((enable && port->promisc) || (!enable && !port->promisc)) | 1501 | if ((enable && port->promisc) || (!enable && !port->promisc)) |
1502 | return; | 1502 | return; |
1503 | 1503 | ||
1504 | cb7 = kzalloc(H_CB_ALIGNMENT, GFP_ATOMIC); | 1504 | cb7 = kzalloc(PAGE_SIZE, GFP_ATOMIC); |
1505 | if (!cb7) { | 1505 | if (!cb7) { |
1506 | ehea_error("no mem for cb7"); | 1506 | ehea_error("no mem for cb7"); |
1507 | goto out; | 1507 | goto out; |
@@ -1870,7 +1870,7 @@ static void ehea_vlan_rx_register(struct net_device *dev, | |||
1870 | 1870 | ||
1871 | port->vgrp = grp; | 1871 | port->vgrp = grp; |
1872 | 1872 | ||
1873 | cb1 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | 1873 | cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL); |
1874 | if (!cb1) { | 1874 | if (!cb1) { |
1875 | ehea_error("no mem for cb1"); | 1875 | ehea_error("no mem for cb1"); |
1876 | goto out; | 1876 | goto out; |
@@ -1899,7 +1899,7 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) | |||
1899 | int index; | 1899 | int index; |
1900 | u64 hret; | 1900 | u64 hret; |
1901 | 1901 | ||
1902 | cb1 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | 1902 | cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL); |
1903 | if (!cb1) { | 1903 | if (!cb1) { |
1904 | ehea_error("no mem for cb1"); | 1904 | ehea_error("no mem for cb1"); |
1905 | goto out; | 1905 | goto out; |
@@ -1935,7 +1935,7 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
1935 | if (port->vgrp) | 1935 | if (port->vgrp) |
1936 | port->vgrp->vlan_devices[vid] = NULL; | 1936 | port->vgrp->vlan_devices[vid] = NULL; |
1937 | 1937 | ||
1938 | cb1 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | 1938 | cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL); |
1939 | if (!cb1) { | 1939 | if (!cb1) { |
1940 | ehea_error("no mem for cb1"); | 1940 | ehea_error("no mem for cb1"); |
1941 | goto out; | 1941 | goto out; |
@@ -1968,7 +1968,7 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp) | |||
1968 | u64 dummy64 = 0; | 1968 | u64 dummy64 = 0; |
1969 | struct hcp_modify_qp_cb0* cb0; | 1969 | struct hcp_modify_qp_cb0* cb0; |
1970 | 1970 | ||
1971 | cb0 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | 1971 | cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); |
1972 | if (!cb0) { | 1972 | if (!cb0) { |
1973 | ret = -ENOMEM; | 1973 | ret = -ENOMEM; |
1974 | goto out; | 1974 | goto out; |
@@ -2269,7 +2269,7 @@ int ehea_sense_adapter_attr(struct ehea_adapter *adapter) | |||
2269 | u64 hret; | 2269 | u64 hret; |
2270 | int ret; | 2270 | int ret; |
2271 | 2271 | ||
2272 | cb = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | 2272 | cb = kzalloc(PAGE_SIZE, GFP_KERNEL); |
2273 | if (!cb) { | 2273 | if (!cb) { |
2274 | ret = -ENOMEM; | 2274 | ret = -ENOMEM; |
2275 | goto out; | 2275 | goto out; |
@@ -2340,7 +2340,7 @@ static int ehea_setup_single_port(struct ehea_port *port, | |||
2340 | goto out; | 2340 | goto out; |
2341 | 2341 | ||
2342 | /* Enable Jumbo frames */ | 2342 | /* Enable Jumbo frames */ |
2343 | cb4 = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); | 2343 | cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL); |
2344 | if (!cb4) { | 2344 | if (!cb4) { |
2345 | ehea_error("no mem for cb4"); | 2345 | ehea_error("no mem for cb4"); |
2346 | } else { | 2346 | } else { |
diff --git a/drivers/net/ehea/ehea_phyp.c b/drivers/net/ehea/ehea_phyp.c index 0b51a8cea077..0cfc2bc1a27b 100644 --- a/drivers/net/ehea/ehea_phyp.c +++ b/drivers/net/ehea/ehea_phyp.c | |||
@@ -506,7 +506,7 @@ u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle, | |||
506 | const u8 pagesize, const u8 queue_type, | 506 | const u8 pagesize, const u8 queue_type, |
507 | const u64 log_pageaddr, const u64 count) | 507 | const u64 log_pageaddr, const u64 count) |
508 | { | 508 | { |
509 | if ((count > 1) && (log_pageaddr & 0xfff)) { | 509 | if ((count > 1) && (log_pageaddr & ~PAGE_MASK)) { |
510 | ehea_error("not on pageboundary"); | 510 | ehea_error("not on pageboundary"); |
511 | return H_PARAMETER; | 511 | return H_PARAMETER; |
512 | } | 512 | } |
diff --git a/drivers/net/ehea/ehea_phyp.h b/drivers/net/ehea/ehea_phyp.h index fa51e3b5bb05..919f94b75933 100644 --- a/drivers/net/ehea/ehea_phyp.h +++ b/drivers/net/ehea/ehea_phyp.h | |||
@@ -81,14 +81,16 @@ static inline u32 get_longbusy_msecs(int long_busy_ret_code) | |||
81 | static inline void hcp_epas_ctor(struct h_epas *epas, u64 paddr_kernel, | 81 | static inline void hcp_epas_ctor(struct h_epas *epas, u64 paddr_kernel, |
82 | u64 paddr_user) | 82 | u64 paddr_user) |
83 | { | 83 | { |
84 | epas->kernel.addr = ioremap(paddr_kernel, PAGE_SIZE); | 84 | /* To support 64k pages we must round to 64k page boundary */ |
85 | epas->kernel.addr = ioremap((paddr_kernel & PAGE_MASK), PAGE_SIZE) + | ||
86 | (paddr_kernel & ~PAGE_MASK); | ||
85 | epas->user.addr = paddr_user; | 87 | epas->user.addr = paddr_user; |
86 | } | 88 | } |
87 | 89 | ||
88 | static inline void hcp_epas_dtor(struct h_epas *epas) | 90 | static inline void hcp_epas_dtor(struct h_epas *epas) |
89 | { | 91 | { |
90 | if (epas->kernel.addr) | 92 | if (epas->kernel.addr) |
91 | iounmap(epas->kernel.addr); | 93 | iounmap((void __iomem*)((u64)epas->kernel.addr & PAGE_MASK)); |
92 | 94 | ||
93 | epas->user.addr = 0; | 95 | epas->user.addr = 0; |
94 | epas->kernel.addr = 0; | 96 | epas->kernel.addr = 0; |
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c index 161559315c0e..72ef7bde3346 100644 --- a/drivers/net/ehea/ehea_qmr.c +++ b/drivers/net/ehea/ehea_qmr.c | |||
@@ -512,7 +512,7 @@ int ehea_reg_mr_adapter(struct ehea_adapter *adapter) | |||
512 | 512 | ||
513 | start = KERNELBASE; | 513 | start = KERNELBASE; |
514 | end = (u64)high_memory; | 514 | end = (u64)high_memory; |
515 | nr_pages = (end - start) / PAGE_SIZE; | 515 | nr_pages = (end - start) / EHEA_PAGESIZE; |
516 | 516 | ||
517 | pt = kzalloc(PAGE_SIZE, GFP_KERNEL); | 517 | pt = kzalloc(PAGE_SIZE, GFP_KERNEL); |
518 | if (!pt) { | 518 | if (!pt) { |
@@ -538,9 +538,9 @@ int ehea_reg_mr_adapter(struct ehea_adapter *adapter) | |||
538 | if (nr_pages > 1) { | 538 | if (nr_pages > 1) { |
539 | u64 num_pages = min(nr_pages, (u64)512); | 539 | u64 num_pages = min(nr_pages, (u64)512); |
540 | for (i = 0; i < num_pages; i++) | 540 | for (i = 0; i < num_pages; i++) |
541 | pt[i] = virt_to_abs((void*)(((u64)start) | 541 | pt[i] = virt_to_abs((void*)(((u64)start) + |
542 | + ((k++) * | 542 | ((k++) * |
543 | PAGE_SIZE))); | 543 | EHEA_PAGESIZE))); |
544 | 544 | ||
545 | hret = ehea_h_register_rpage_mr(adapter->handle, | 545 | hret = ehea_h_register_rpage_mr(adapter->handle, |
546 | adapter->mr.handle, 0, | 546 | adapter->mr.handle, 0, |
@@ -548,8 +548,9 @@ int ehea_reg_mr_adapter(struct ehea_adapter *adapter) | |||
548 | num_pages); | 548 | num_pages); |
549 | nr_pages -= num_pages; | 549 | nr_pages -= num_pages; |
550 | } else { | 550 | } else { |
551 | u64 abs_adr = virt_to_abs((void*)(((u64)start) | 551 | u64 abs_adr = virt_to_abs((void*)(((u64)start) + |
552 | + (k * PAGE_SIZE))); | 552 | (k * EHEA_PAGESIZE))); |
553 | |||
553 | hret = ehea_h_register_rpage_mr(adapter->handle, | 554 | hret = ehea_h_register_rpage_mr(adapter->handle, |
554 | adapter->mr.handle, 0, | 555 | adapter->mr.handle, 0, |
555 | 0, abs_adr,1); | 556 | 0, abs_adr,1); |