diff options
78 files changed, 2190 insertions, 695 deletions
diff --git a/arch/um/Kconfig.i386 b/arch/um/Kconfig.i386 index 9876d80d85dd..e0ac74e5d4c4 100644 --- a/arch/um/Kconfig.i386 +++ b/arch/um/Kconfig.i386 | |||
@@ -1,6 +1,6 @@ | |||
1 | menu "Host processor type and features" | 1 | menu "Host processor type and features" |
2 | 2 | ||
3 | source "arch/i386/Kconfig.cpu" | 3 | source "arch/x86/Kconfig.cpu" |
4 | 4 | ||
5 | endmenu | 5 | endmenu |
6 | 6 | ||
diff --git a/arch/um/Makefile-i386 b/arch/um/Makefile-i386 index 0178df306939..b01dfb00e5f8 100644 --- a/arch/um/Makefile-i386 +++ b/arch/um/Makefile-i386 | |||
@@ -9,6 +9,7 @@ ELF_ARCH := $(SUBARCH) | |||
9 | ELF_FORMAT := elf32-$(SUBARCH) | 9 | ELF_FORMAT := elf32-$(SUBARCH) |
10 | OBJCOPYFLAGS := -O binary -R .note -R .comment -S | 10 | OBJCOPYFLAGS := -O binary -R .note -R .comment -S |
11 | HEADER_ARCH := x86 | 11 | HEADER_ARCH := x86 |
12 | CHECKFLAGS += -D__i386__ | ||
12 | 13 | ||
13 | ifeq ("$(origin SUBARCH)", "command line") | 14 | ifeq ("$(origin SUBARCH)", "command line") |
14 | ifneq ("$(shell uname -m | sed -e s/i.86/i386/)", "$(SUBARCH)") | 15 | ifneq ("$(shell uname -m | sed -e s/i.86/i386/)", "$(SUBARCH)") |
@@ -26,10 +27,8 @@ AFLAGS += -DCONFIG_X86_32 | |||
26 | CONFIG_X86_32 := y | 27 | CONFIG_X86_32 := y |
27 | export CONFIG_X86_32 | 28 | export CONFIG_X86_32 |
28 | 29 | ||
29 | ARCH_KERNEL_DEFINES += -U__$(SUBARCH)__ -U$(SUBARCH) | ||
30 | |||
31 | # First of all, tune CFLAGS for the specific CPU. This actually sets cflags-y. | 30 | # First of all, tune CFLAGS for the specific CPU. This actually sets cflags-y. |
32 | include $(srctree)/arch/i386/Makefile.cpu | 31 | include $(srctree)/arch/x86/Makefile_32.cpu |
33 | 32 | ||
34 | # prevent gcc from keeping the stack 16 byte aligned. Taken from i386. | 33 | # prevent gcc from keeping the stack 16 byte aligned. Taken from i386. |
35 | cflags-y += $(call cc-option,-mpreferred-stack-boundary=2) | 34 | cflags-y += $(call cc-option,-mpreferred-stack-boundary=2) |
diff --git a/arch/um/Makefile-x86_64 b/arch/um/Makefile-x86_64 index fe5316f0c6a5..8ed362f93582 100644 --- a/arch/um/Makefile-x86_64 +++ b/arch/um/Makefile-x86_64 | |||
@@ -6,12 +6,9 @@ START := 0x60000000 | |||
6 | 6 | ||
7 | _extra_flags_ = -fno-builtin -m64 | 7 | _extra_flags_ = -fno-builtin -m64 |
8 | 8 | ||
9 | #We #undef __x86_64__ for kernelspace, not for userspace where | ||
10 | #it's needed for headers to work! | ||
11 | ARCH_KERNEL_DEFINES = -U__$(SUBARCH)__ | ||
12 | KBUILD_CFLAGS += $(_extra_flags_) | 9 | KBUILD_CFLAGS += $(_extra_flags_) |
13 | 10 | ||
14 | CHECKFLAGS += -m64 | 11 | CHECKFLAGS += -m64 -D__x86_64__ |
15 | KBUILD_AFLAGS += -m64 | 12 | KBUILD_AFLAGS += -m64 |
16 | LDFLAGS += -m elf_x86_64 | 13 | LDFLAGS += -m elf_x86_64 |
17 | KBUILD_CPPFLAGS += -m64 | 14 | KBUILD_CPPFLAGS += -m64 |
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c index 8456397f5f4d..59822dee438a 100644 --- a/arch/um/kernel/mem.c +++ b/arch/um/kernel/mem.c | |||
@@ -165,7 +165,7 @@ static void __init kmap_init(void) | |||
165 | kmap_prot = PAGE_KERNEL; | 165 | kmap_prot = PAGE_KERNEL; |
166 | } | 166 | } |
167 | 167 | ||
168 | static void init_highmem(void) | 168 | static void __init init_highmem(void) |
169 | { | 169 | { |
170 | pgd_t *pgd; | 170 | pgd_t *pgd; |
171 | pud_t *pud; | 171 | pud_t *pud; |
diff --git a/arch/um/sys-i386/ptrace.c b/arch/um/sys-i386/ptrace.c index 9657c89fdf31..bd3da8a61f64 100644 --- a/arch/um/sys-i386/ptrace.c +++ b/arch/um/sys-i386/ptrace.c | |||
@@ -155,7 +155,7 @@ int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) | |||
155 | if (err) | 155 | if (err) |
156 | return err; | 156 | return err; |
157 | 157 | ||
158 | n = copy_to_user((void *) buf, fpregs, sizeof(fpregs)); | 158 | n = copy_to_user(buf, fpregs, sizeof(fpregs)); |
159 | if(n > 0) | 159 | if(n > 0) |
160 | return -EFAULT; | 160 | return -EFAULT; |
161 | 161 | ||
@@ -168,7 +168,7 @@ int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) | |||
168 | long fpregs[HOST_FP_SIZE]; | 168 | long fpregs[HOST_FP_SIZE]; |
169 | 169 | ||
170 | BUG_ON(sizeof(*buf) != sizeof(fpregs)); | 170 | BUG_ON(sizeof(*buf) != sizeof(fpregs)); |
171 | n = copy_from_user(fpregs, (void *) buf, sizeof(fpregs)); | 171 | n = copy_from_user(fpregs, buf, sizeof(fpregs)); |
172 | if (n > 0) | 172 | if (n > 0) |
173 | return -EFAULT; | 173 | return -EFAULT; |
174 | 174 | ||
@@ -185,7 +185,7 @@ int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child) | |||
185 | if (err) | 185 | if (err) |
186 | return err; | 186 | return err; |
187 | 187 | ||
188 | n = copy_to_user((void *) buf, fpregs, sizeof(fpregs)); | 188 | n = copy_to_user(buf, fpregs, sizeof(fpregs)); |
189 | if(n > 0) | 189 | if(n > 0) |
190 | return -EFAULT; | 190 | return -EFAULT; |
191 | 191 | ||
@@ -198,7 +198,7 @@ int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child) | |||
198 | long fpregs[HOST_XFP_SIZE]; | 198 | long fpregs[HOST_XFP_SIZE]; |
199 | 199 | ||
200 | BUG_ON(sizeof(*buf) != sizeof(fpregs)); | 200 | BUG_ON(sizeof(*buf) != sizeof(fpregs)); |
201 | n = copy_from_user(fpregs, (void *) buf, sizeof(fpregs)); | 201 | n = copy_from_user(fpregs, buf, sizeof(fpregs)); |
202 | if (n > 0) | 202 | if (n > 0) |
203 | return -EFAULT; | 203 | return -EFAULT; |
204 | 204 | ||
diff --git a/arch/um/sys-x86_64/ptrace.c b/arch/um/sys-x86_64/ptrace.c index a3cfeed17af4..b7631b0e9ddc 100644 --- a/arch/um/sys-x86_64/ptrace.c +++ b/arch/um/sys-x86_64/ptrace.c | |||
@@ -154,7 +154,7 @@ int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) | |||
154 | if (err) | 154 | if (err) |
155 | return err; | 155 | return err; |
156 | 156 | ||
157 | n = copy_to_user((void *) buf, fpregs, sizeof(fpregs)); | 157 | n = copy_to_user(buf, fpregs, sizeof(fpregs)); |
158 | if(n > 0) | 158 | if(n > 0) |
159 | return -EFAULT; | 159 | return -EFAULT; |
160 | 160 | ||
@@ -167,7 +167,7 @@ int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child) | |||
167 | long fpregs[HOST_FP_SIZE]; | 167 | long fpregs[HOST_FP_SIZE]; |
168 | 168 | ||
169 | BUG_ON(sizeof(*buf) != sizeof(fpregs)); | 169 | BUG_ON(sizeof(*buf) != sizeof(fpregs)); |
170 | n = copy_from_user(fpregs, (void *) buf, sizeof(fpregs)); | 170 | n = copy_from_user(fpregs, buf, sizeof(fpregs)); |
171 | if (n > 0) | 171 | if (n > 0) |
172 | return -EFAULT; | 172 | return -EFAULT; |
173 | 173 | ||
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index ae7e0161ce46..79b514b381b1 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -435,7 +435,7 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
435 | 435 | ||
436 | error: | 436 | error: |
437 | flush_gart(); | 437 | flush_gart(); |
438 | gart_unmap_sg(dev, sg, nents, dir); | 438 | gart_unmap_sg(dev, sg, out, dir); |
439 | /* When it was forced or merged try again in a dumb way */ | 439 | /* When it was forced or merged try again in a dumb way */ |
440 | if (force_iommu || iommu_merge) { | 440 | if (force_iommu || iommu_merge) { |
441 | out = dma_map_sg_nonforce(dev, sg, nents, dir); | 441 | out = dma_map_sg_nonforce(dev, sg, nents, dir); |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 54dc05439009..e47a9309eb48 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -1443,8 +1443,11 @@ cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk, | |||
1443 | cfqq = *async_cfqq; | 1443 | cfqq = *async_cfqq; |
1444 | } | 1444 | } |
1445 | 1445 | ||
1446 | if (!cfqq) | 1446 | if (!cfqq) { |
1447 | cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask); | 1447 | cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask); |
1448 | if (!cfqq) | ||
1449 | return NULL; | ||
1450 | } | ||
1448 | 1451 | ||
1449 | /* | 1452 | /* |
1450 | * pin the queue now that it's allocated, scheduler exit will prune it | 1453 | * pin the queue now that it's allocated, scheduler exit will prune it |
@@ -2053,7 +2056,7 @@ static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) | |||
2053 | { | 2056 | { |
2054 | del_timer_sync(&cfqd->idle_slice_timer); | 2057 | del_timer_sync(&cfqd->idle_slice_timer); |
2055 | del_timer_sync(&cfqd->idle_class_timer); | 2058 | del_timer_sync(&cfqd->idle_class_timer); |
2056 | blk_sync_queue(cfqd->queue); | 2059 | kblockd_flush_work(&cfqd->unplug_work); |
2057 | } | 2060 | } |
2058 | 2061 | ||
2059 | static void cfq_put_async_queues(struct cfq_data *cfqd) | 2062 | static void cfq_put_async_queues(struct cfq_data *cfqd) |
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c index f84093b97f70..cae0a852619e 100644 --- a/block/compat_ioctl.c +++ b/block/compat_ioctl.c | |||
@@ -581,7 +581,7 @@ static int compat_blkdev_driver_ioctl(struct inode *inode, struct file *file, | |||
581 | { | 581 | { |
582 | int ret; | 582 | int ret; |
583 | 583 | ||
584 | switch (arg) { | 584 | switch (cmd) { |
585 | case HDIO_GET_UNMASKINTR: | 585 | case HDIO_GET_UNMASKINTR: |
586 | case HDIO_GET_MULTCOUNT: | 586 | case HDIO_GET_MULTCOUNT: |
587 | case HDIO_GET_KEEPSETTINGS: | 587 | case HDIO_GET_KEEPSETTINGS: |
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index b01dee3ae7f3..56f2646612e6 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -39,7 +39,7 @@ | |||
39 | 39 | ||
40 | static void blk_unplug_work(struct work_struct *work); | 40 | static void blk_unplug_work(struct work_struct *work); |
41 | static void blk_unplug_timeout(unsigned long data); | 41 | static void blk_unplug_timeout(unsigned long data); |
42 | static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); | 42 | static void drive_stat_acct(struct request *rq, int new_io); |
43 | static void init_request_from_bio(struct request *req, struct bio *bio); | 43 | static void init_request_from_bio(struct request *req, struct bio *bio); |
44 | static int __make_request(struct request_queue *q, struct bio *bio); | 44 | static int __make_request(struct request_queue *q, struct bio *bio); |
45 | static struct io_context *current_io_context(gfp_t gfp_flags, int node); | 45 | static struct io_context *current_io_context(gfp_t gfp_flags, int node); |
@@ -791,7 +791,6 @@ static int __blk_free_tags(struct blk_queue_tag *bqt) | |||
791 | retval = atomic_dec_and_test(&bqt->refcnt); | 791 | retval = atomic_dec_and_test(&bqt->refcnt); |
792 | if (retval) { | 792 | if (retval) { |
793 | BUG_ON(bqt->busy); | 793 | BUG_ON(bqt->busy); |
794 | BUG_ON(!list_empty(&bqt->busy_list)); | ||
795 | 794 | ||
796 | kfree(bqt->tag_index); | 795 | kfree(bqt->tag_index); |
797 | bqt->tag_index = NULL; | 796 | bqt->tag_index = NULL; |
@@ -903,7 +902,6 @@ static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q, | |||
903 | if (init_tag_map(q, tags, depth)) | 902 | if (init_tag_map(q, tags, depth)) |
904 | goto fail; | 903 | goto fail; |
905 | 904 | ||
906 | INIT_LIST_HEAD(&tags->busy_list); | ||
907 | tags->busy = 0; | 905 | tags->busy = 0; |
908 | atomic_set(&tags->refcnt, 1); | 906 | atomic_set(&tags->refcnt, 1); |
909 | return tags; | 907 | return tags; |
@@ -954,6 +952,7 @@ int blk_queue_init_tags(struct request_queue *q, int depth, | |||
954 | */ | 952 | */ |
955 | q->queue_tags = tags; | 953 | q->queue_tags = tags; |
956 | q->queue_flags |= (1 << QUEUE_FLAG_QUEUED); | 954 | q->queue_flags |= (1 << QUEUE_FLAG_QUEUED); |
955 | INIT_LIST_HEAD(&q->tag_busy_list); | ||
957 | return 0; | 956 | return 0; |
958 | fail: | 957 | fail: |
959 | kfree(tags); | 958 | kfree(tags); |
@@ -1057,18 +1056,16 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq) | |||
1057 | 1056 | ||
1058 | bqt->tag_index[tag] = NULL; | 1057 | bqt->tag_index[tag] = NULL; |
1059 | 1058 | ||
1060 | /* | 1059 | if (unlikely(!test_bit(tag, bqt->tag_map))) { |
1061 | * We use test_and_clear_bit's memory ordering properties here. | ||
1062 | * The tag_map bit acts as a lock for tag_index[bit], so we need | ||
1063 | * a barrer before clearing the bit (precisely: release semantics). | ||
1064 | * Could use clear_bit_unlock when it is merged. | ||
1065 | */ | ||
1066 | if (unlikely(!test_and_clear_bit(tag, bqt->tag_map))) { | ||
1067 | printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n", | 1060 | printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n", |
1068 | __FUNCTION__, tag); | 1061 | __FUNCTION__, tag); |
1069 | return; | 1062 | return; |
1070 | } | 1063 | } |
1071 | 1064 | /* | |
1065 | * The tag_map bit acts as a lock for tag_index[bit], so we need | ||
1066 | * unlock memory barrier semantics. | ||
1067 | */ | ||
1068 | clear_bit_unlock(tag, bqt->tag_map); | ||
1072 | bqt->busy--; | 1069 | bqt->busy--; |
1073 | } | 1070 | } |
1074 | 1071 | ||
@@ -1114,17 +1111,17 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) | |||
1114 | if (tag >= bqt->max_depth) | 1111 | if (tag >= bqt->max_depth) |
1115 | return 1; | 1112 | return 1; |
1116 | 1113 | ||
1117 | } while (test_and_set_bit(tag, bqt->tag_map)); | 1114 | } while (test_and_set_bit_lock(tag, bqt->tag_map)); |
1118 | /* | 1115 | /* |
1119 | * We rely on test_and_set_bit providing lock memory ordering semantics | 1116 | * We need lock ordering semantics given by test_and_set_bit_lock. |
1120 | * (could use test_and_set_bit_lock when it is merged). | 1117 | * See blk_queue_end_tag for details. |
1121 | */ | 1118 | */ |
1122 | 1119 | ||
1123 | rq->cmd_flags |= REQ_QUEUED; | 1120 | rq->cmd_flags |= REQ_QUEUED; |
1124 | rq->tag = tag; | 1121 | rq->tag = tag; |
1125 | bqt->tag_index[tag] = rq; | 1122 | bqt->tag_index[tag] = rq; |
1126 | blkdev_dequeue_request(rq); | 1123 | blkdev_dequeue_request(rq); |
1127 | list_add(&rq->queuelist, &bqt->busy_list); | 1124 | list_add(&rq->queuelist, &q->tag_busy_list); |
1128 | bqt->busy++; | 1125 | bqt->busy++; |
1129 | return 0; | 1126 | return 0; |
1130 | } | 1127 | } |
@@ -1145,11 +1142,10 @@ EXPORT_SYMBOL(blk_queue_start_tag); | |||
1145 | **/ | 1142 | **/ |
1146 | void blk_queue_invalidate_tags(struct request_queue *q) | 1143 | void blk_queue_invalidate_tags(struct request_queue *q) |
1147 | { | 1144 | { |
1148 | struct blk_queue_tag *bqt = q->queue_tags; | ||
1149 | struct list_head *tmp, *n; | 1145 | struct list_head *tmp, *n; |
1150 | struct request *rq; | 1146 | struct request *rq; |
1151 | 1147 | ||
1152 | list_for_each_safe(tmp, n, &bqt->busy_list) { | 1148 | list_for_each_safe(tmp, n, &q->tag_busy_list) { |
1153 | rq = list_entry_rq(tmp); | 1149 | rq = list_entry_rq(tmp); |
1154 | 1150 | ||
1155 | if (rq->tag == -1) { | 1151 | if (rq->tag == -1) { |
@@ -1738,6 +1734,7 @@ EXPORT_SYMBOL(blk_stop_queue); | |||
1738 | void blk_sync_queue(struct request_queue *q) | 1734 | void blk_sync_queue(struct request_queue *q) |
1739 | { | 1735 | { |
1740 | del_timer_sync(&q->unplug_timer); | 1736 | del_timer_sync(&q->unplug_timer); |
1737 | kblockd_flush_work(&q->unplug_work); | ||
1741 | } | 1738 | } |
1742 | EXPORT_SYMBOL(blk_sync_queue); | 1739 | EXPORT_SYMBOL(blk_sync_queue); |
1743 | 1740 | ||
@@ -2341,7 +2338,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq, | |||
2341 | if (blk_rq_tagged(rq)) | 2338 | if (blk_rq_tagged(rq)) |
2342 | blk_queue_end_tag(q, rq); | 2339 | blk_queue_end_tag(q, rq); |
2343 | 2340 | ||
2344 | drive_stat_acct(rq, rq->nr_sectors, 1); | 2341 | drive_stat_acct(rq, 1); |
2345 | __elv_add_request(q, rq, where, 0); | 2342 | __elv_add_request(q, rq, where, 0); |
2346 | blk_start_queueing(q); | 2343 | blk_start_queueing(q); |
2347 | spin_unlock_irqrestore(q->queue_lock, flags); | 2344 | spin_unlock_irqrestore(q->queue_lock, flags); |
@@ -2736,7 +2733,7 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector) | |||
2736 | 2733 | ||
2737 | EXPORT_SYMBOL(blkdev_issue_flush); | 2734 | EXPORT_SYMBOL(blkdev_issue_flush); |
2738 | 2735 | ||
2739 | static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io) | 2736 | static void drive_stat_acct(struct request *rq, int new_io) |
2740 | { | 2737 | { |
2741 | int rw = rq_data_dir(rq); | 2738 | int rw = rq_data_dir(rq); |
2742 | 2739 | ||
@@ -2758,7 +2755,7 @@ static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io) | |||
2758 | */ | 2755 | */ |
2759 | static inline void add_request(struct request_queue * q, struct request * req) | 2756 | static inline void add_request(struct request_queue * q, struct request * req) |
2760 | { | 2757 | { |
2761 | drive_stat_acct(req, req->nr_sectors, 1); | 2758 | drive_stat_acct(req, 1); |
2762 | 2759 | ||
2763 | /* | 2760 | /* |
2764 | * elevator indicated where it wants this request to be | 2761 | * elevator indicated where it wants this request to be |
@@ -3015,7 +3012,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
3015 | req->biotail = bio; | 3012 | req->biotail = bio; |
3016 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; | 3013 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; |
3017 | req->ioprio = ioprio_best(req->ioprio, prio); | 3014 | req->ioprio = ioprio_best(req->ioprio, prio); |
3018 | drive_stat_acct(req, nr_sectors, 0); | 3015 | drive_stat_acct(req, 0); |
3019 | if (!attempt_back_merge(q, req)) | 3016 | if (!attempt_back_merge(q, req)) |
3020 | elv_merged_request(q, req, el_ret); | 3017 | elv_merged_request(q, req, el_ret); |
3021 | goto out; | 3018 | goto out; |
@@ -3042,7 +3039,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
3042 | req->sector = req->hard_sector = bio->bi_sector; | 3039 | req->sector = req->hard_sector = bio->bi_sector; |
3043 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; | 3040 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; |
3044 | req->ioprio = ioprio_best(req->ioprio, prio); | 3041 | req->ioprio = ioprio_best(req->ioprio, prio); |
3045 | drive_stat_acct(req, nr_sectors, 0); | 3042 | drive_stat_acct(req, 0); |
3046 | if (!attempt_front_merge(q, req)) | 3043 | if (!attempt_front_merge(q, req)) |
3047 | elv_merged_request(q, req, el_ret); | 3044 | elv_merged_request(q, req, el_ret); |
3048 | goto out; | 3045 | goto out; |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 3891cdc6bd3d..e512903b8dbb 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -4722,6 +4722,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc) | |||
4722 | * data in this function or read data in ata_sg_clean. | 4722 | * data in this function or read data in ata_sg_clean. |
4723 | */ | 4723 | */ |
4724 | offset = lsg->offset + lsg->length - qc->pad_len; | 4724 | offset = lsg->offset + lsg->length - qc->pad_len; |
4725 | sg_init_table(psg, 1); | ||
4725 | sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT), | 4726 | sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT), |
4726 | qc->pad_len, offset_in_page(offset)); | 4727 | qc->pad_len, offset_in_page(offset)); |
4727 | 4728 | ||
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 5a6fe17fc638..7d704968765f 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -1,20 +1,20 @@ | |||
1 | /* | 1 | /* |
2 | * Disk Array driver for HP SA 5xxx and 6xxx Controllers | 2 | * Disk Array driver for HP Smart Array controllers. |
3 | * Copyright 2000, 2006 Hewlett-Packard Development Company, L.P. | 3 | * (C) Copyright 2000, 2007 Hewlett-Packard Development Company, L.P. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
7 | * the Free Software Foundation; either version 2 of the License, or | 7 | * the Free Software Foundation; version 2 of the License. |
8 | * (at your option) any later version. | ||
9 | * | 8 | * |
10 | * This program is distributed in the hope that it will be useful, | 9 | * This program is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | 11 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | * NON INFRINGEMENT. See the GNU General Public License for more details. | 12 | * General Public License for more details. |
14 | * | 13 | * |
15 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
16 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, write to the Free Software |
17 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA |
17 | * 02111-1307, USA. | ||
18 | * | 18 | * |
19 | * Questions/Comments/Bugfixes to iss_storagedev@hp.com | 19 | * Questions/Comments/Bugfixes to iss_storagedev@hp.com |
20 | * | 20 | * |
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c index 4aca7ddfdddf..63ee6c076cb3 100644 --- a/drivers/block/cciss_scsi.c +++ b/drivers/block/cciss_scsi.c | |||
@@ -1,20 +1,20 @@ | |||
1 | /* | 1 | /* |
2 | * Disk Array driver for Compaq SA53xx Controllers, SCSI Tape module | 2 | * Disk Array driver for HP Smart Array controllers, SCSI Tape module. |
3 | * Copyright 2001 Compaq Computer Corporation | 3 | * (C) Copyright 2001, 2007 Hewlett-Packard Development Company, L.P. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
7 | * the Free Software Foundation; either version 2 of the License, or | 7 | * the Free Software Foundation; version 2 of the License. |
8 | * (at your option) any later version. | ||
9 | * | 8 | * |
10 | * This program is distributed in the hope that it will be useful, | 9 | * This program is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | 11 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | * NON INFRINGEMENT. See the GNU General Public License for more details. | 12 | * General Public License for more details. |
14 | * | 13 | * |
15 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
16 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, write to the Free Software |
17 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 16 | * Foundation, Inc., 59 Temple Place, Suite 300, Boston, MA |
17 | * 02111-1307, USA. | ||
18 | * | 18 | * |
19 | * Questions/Comments/Bugfixes to iss_storagedev@hp.com | 19 | * Questions/Comments/Bugfixes to iss_storagedev@hp.com |
20 | * | 20 | * |
diff --git a/drivers/block/cciss_scsi.h b/drivers/block/cciss_scsi.h index 5e7e06c07d6c..d9c2c586502f 100644 --- a/drivers/block/cciss_scsi.h +++ b/drivers/block/cciss_scsi.h | |||
@@ -1,20 +1,20 @@ | |||
1 | /* | 1 | /* |
2 | * Disk Array driver for Compaq SA53xx Controllers, SCSI Tape module | 2 | * Disk Array driver for HP Smart Array controllers, SCSI Tape module. |
3 | * Copyright 2001 Compaq Computer Corporation | 3 | * (C) Copyright 2001, 2007 Hewlett-Packard Development Company, L.P. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
7 | * the Free Software Foundation; either version 2 of the License, or | 7 | * the Free Software Foundation; version 2 of the License. |
8 | * (at your option) any later version. | ||
9 | * | 8 | * |
10 | * This program is distributed in the hope that it will be useful, | 9 | * This program is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | 11 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | * NON INFRINGEMENT. See the GNU General Public License for more details. | 12 | * General Public License for more details. |
14 | * | 13 | * |
15 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
16 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, write to the Free Software |
17 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 16 | * Foundation, Inc., 59 Temple Place, Suite 300, Boston, MA |
17 | * 02111-1307, USA. | ||
18 | * | 18 | * |
19 | * Questions/Comments/Bugfixes to iss_storagedev@hp.com | 19 | * Questions/Comments/Bugfixes to iss_storagedev@hp.com |
20 | * | 20 | * |
diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 0e937f64a789..20070b7c573d 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c | |||
@@ -41,7 +41,7 @@ | |||
41 | */ | 41 | */ |
42 | static inline int uncached_access(struct file *file, unsigned long addr) | 42 | static inline int uncached_access(struct file *file, unsigned long addr) |
43 | { | 43 | { |
44 | #if defined(__i386__) | 44 | #if defined(__i386__) && !defined(__arch_um__) |
45 | /* | 45 | /* |
46 | * On the PPro and successors, the MTRRs are used to set | 46 | * On the PPro and successors, the MTRRs are used to set |
47 | * memory types for physical addresses outside main memory, | 47 | * memory types for physical addresses outside main memory, |
@@ -57,7 +57,7 @@ static inline int uncached_access(struct file *file, unsigned long addr) | |||
57 | test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) || | 57 | test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) || |
58 | test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) ) | 58 | test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) ) |
59 | && addr >= __pa(high_memory); | 59 | && addr >= __pa(high_memory); |
60 | #elif defined(__x86_64__) | 60 | #elif defined(__x86_64__) && !defined(__arch_um__) |
61 | /* | 61 | /* |
62 | * This is broken because it can generate memory type aliases, | 62 | * This is broken because it can generate memory type aliases, |
63 | * which can cause cache corruptions | 63 | * which can cause cache corruptions |
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c index 476012b6dfac..48c1775ef5b3 100644 --- a/drivers/isdn/capi/capidrv.c +++ b/drivers/isdn/capi/capidrv.c | |||
@@ -1843,6 +1843,7 @@ static int if_sendbuf(int id, int channel, int doack, struct sk_buff *skb) | |||
1843 | int msglen; | 1843 | int msglen; |
1844 | u16 errcode; | 1844 | u16 errcode; |
1845 | u16 datahandle; | 1845 | u16 datahandle; |
1846 | u32 data; | ||
1846 | 1847 | ||
1847 | if (!card) { | 1848 | if (!card) { |
1848 | printk(KERN_ERR "capidrv: if_sendbuf called with invalid driverId %d!\n", | 1849 | printk(KERN_ERR "capidrv: if_sendbuf called with invalid driverId %d!\n", |
@@ -1860,9 +1861,26 @@ static int if_sendbuf(int id, int channel, int doack, struct sk_buff *skb) | |||
1860 | return 0; | 1861 | return 0; |
1861 | } | 1862 | } |
1862 | datahandle = nccip->datahandle; | 1863 | datahandle = nccip->datahandle; |
1864 | |||
1865 | /* | ||
1866 | * Here we copy pointer skb->data into the 32-bit 'Data' field. | ||
1867 | * The 'Data' field is not used in practice in linux kernel | ||
1868 | * (neither in 32 or 64 bit), but should have some value, | ||
1869 | * since a CAPI message trace will display it. | ||
1870 | * | ||
1871 | * The correct value in the 32 bit case is the address of the | ||
1872 | * data, in 64 bit it makes no sense, we use 0 there. | ||
1873 | */ | ||
1874 | |||
1875 | #ifdef CONFIG_64BIT | ||
1876 | data = 0; | ||
1877 | #else | ||
1878 | data = (unsigned long) skb->data; | ||
1879 | #endif | ||
1880 | |||
1863 | capi_fill_DATA_B3_REQ(&sendcmsg, global.ap.applid, card->msgid++, | 1881 | capi_fill_DATA_B3_REQ(&sendcmsg, global.ap.applid, card->msgid++, |
1864 | nccip->ncci, /* adr */ | 1882 | nccip->ncci, /* adr */ |
1865 | (u32) skb->data, /* Data */ | 1883 | data, /* Data */ |
1866 | skb->len, /* DataLength */ | 1884 | skb->len, /* DataLength */ |
1867 | datahandle, /* DataHandle */ | 1885 | datahandle, /* DataHandle */ |
1868 | 0 /* Flags */ | 1886 | 0 /* Flags */ |
@@ -2123,7 +2141,10 @@ static int capidrv_delcontr(u16 contr) | |||
2123 | printk(KERN_ERR "capidrv: delcontr: no contr %u\n", contr); | 2141 | printk(KERN_ERR "capidrv: delcontr: no contr %u\n", contr); |
2124 | return -1; | 2142 | return -1; |
2125 | } | 2143 | } |
2126 | #warning FIXME: maybe a race condition the card should be removed here from global list /kkeil | 2144 | |
2145 | /* FIXME: maybe a race condition the card should be removed | ||
2146 | * here from global list /kkeil | ||
2147 | */ | ||
2127 | spin_unlock_irqrestore(&global_lock, flags); | 2148 | spin_unlock_irqrestore(&global_lock, flags); |
2128 | 2149 | ||
2129 | del_timer(&card->listentimer); | 2150 | del_timer(&card->listentimer); |
diff --git a/drivers/md/raid6algos.c b/drivers/md/raid6algos.c index 926576156578..77a6e4bf503d 100644 --- a/drivers/md/raid6algos.c +++ b/drivers/md/raid6algos.c | |||
@@ -52,7 +52,7 @@ const struct raid6_calls * const raid6_algos[] = { | |||
52 | &raid6_intx16, | 52 | &raid6_intx16, |
53 | &raid6_intx32, | 53 | &raid6_intx32, |
54 | #endif | 54 | #endif |
55 | #if defined(__i386__) | 55 | #if defined(__i386__) && !defined(__arch_um__) |
56 | &raid6_mmxx1, | 56 | &raid6_mmxx1, |
57 | &raid6_mmxx2, | 57 | &raid6_mmxx2, |
58 | &raid6_sse1x1, | 58 | &raid6_sse1x1, |
@@ -60,7 +60,7 @@ const struct raid6_calls * const raid6_algos[] = { | |||
60 | &raid6_sse2x1, | 60 | &raid6_sse2x1, |
61 | &raid6_sse2x2, | 61 | &raid6_sse2x2, |
62 | #endif | 62 | #endif |
63 | #if defined(__x86_64__) | 63 | #if defined(__x86_64__) && !defined(__arch_um__) |
64 | &raid6_sse2x1, | 64 | &raid6_sse2x1, |
65 | &raid6_sse2x2, | 65 | &raid6_sse2x2, |
66 | &raid6_sse2x4, | 66 | &raid6_sse2x4, |
diff --git a/drivers/md/raid6mmx.c b/drivers/md/raid6mmx.c index 6181a5a3365a..d4e4a1bd70ad 100644 --- a/drivers/md/raid6mmx.c +++ b/drivers/md/raid6mmx.c | |||
@@ -16,7 +16,7 @@ | |||
16 | * MMX implementation of RAID-6 syndrome functions | 16 | * MMX implementation of RAID-6 syndrome functions |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #if defined(__i386__) | 19 | #if defined(__i386__) && !defined(__arch_um__) |
20 | 20 | ||
21 | #include "raid6.h" | 21 | #include "raid6.h" |
22 | #include "raid6x86.h" | 22 | #include "raid6x86.h" |
diff --git a/drivers/md/raid6sse1.c b/drivers/md/raid6sse1.c index f0a1ba8f40ba..0666237276ff 100644 --- a/drivers/md/raid6sse1.c +++ b/drivers/md/raid6sse1.c | |||
@@ -21,7 +21,7 @@ | |||
21 | * worthwhile as a separate implementation. | 21 | * worthwhile as a separate implementation. |
22 | */ | 22 | */ |
23 | 23 | ||
24 | #if defined(__i386__) | 24 | #if defined(__i386__) && !defined(__arch_um__) |
25 | 25 | ||
26 | #include "raid6.h" | 26 | #include "raid6.h" |
27 | #include "raid6x86.h" | 27 | #include "raid6x86.h" |
diff --git a/drivers/md/raid6sse2.c b/drivers/md/raid6sse2.c index 0f019762a7c3..b034ad868039 100644 --- a/drivers/md/raid6sse2.c +++ b/drivers/md/raid6sse2.c | |||
@@ -17,7 +17,7 @@ | |||
17 | * | 17 | * |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #if defined(__i386__) || defined(__x86_64__) | 20 | #if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__) |
21 | 21 | ||
22 | #include "raid6.h" | 22 | #include "raid6.h" |
23 | #include "raid6x86.h" | 23 | #include "raid6x86.h" |
@@ -161,7 +161,7 @@ const struct raid6_calls raid6_sse2x2 = { | |||
161 | 161 | ||
162 | #endif | 162 | #endif |
163 | 163 | ||
164 | #ifdef __x86_64__ | 164 | #if defined(__x86_64__) && !defined(__arch_um__) |
165 | 165 | ||
166 | /* | 166 | /* |
167 | * Unrolled-by-4 SSE2 implementation | 167 | * Unrolled-by-4 SSE2 implementation |
diff --git a/drivers/md/raid6x86.h b/drivers/md/raid6x86.h index 9111950414ff..99fea7a70ca7 100644 --- a/drivers/md/raid6x86.h +++ b/drivers/md/raid6x86.h | |||
@@ -19,7 +19,7 @@ | |||
19 | #ifndef LINUX_RAID_RAID6X86_H | 19 | #ifndef LINUX_RAID_RAID6X86_H |
20 | #define LINUX_RAID_RAID6X86_H | 20 | #define LINUX_RAID_RAID6X86_H |
21 | 21 | ||
22 | #if defined(__i386__) || defined(__x86_64__) | 22 | #if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__) |
23 | 23 | ||
24 | #ifdef __KERNEL__ /* Real code */ | 24 | #ifdef __KERNEL__ /* Real code */ |
25 | 25 | ||
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c index fc72e1fadb6a..f2070a19cfa7 100644 --- a/drivers/mmc/host/imxmmc.c +++ b/drivers/mmc/host/imxmmc.c | |||
@@ -262,7 +262,7 @@ static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data) | |||
262 | } | 262 | } |
263 | 263 | ||
264 | /* Convert back to virtual address */ | 264 | /* Convert back to virtual address */ |
265 | host->data_ptr = (u16*)sg_virt(sg); | 265 | host->data_ptr = (u16*)sg_virt(data->sg); |
266 | host->data_cnt = 0; | 266 | host->data_cnt = 0; |
267 | 267 | ||
268 | clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events); | 268 | clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events); |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 86b8641b4664..867cb7345b5f 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -166,13 +166,14 @@ config NET_SB1000 | |||
166 | If you don't have this card, of course say N. | 166 | If you don't have this card, of course say N. |
167 | 167 | ||
168 | config IP1000 | 168 | config IP1000 |
169 | tristate "IP1000 Gigabit Ethernet support" | 169 | tristate "IP1000 Gigabit Ethernet support" |
170 | depends on PCI && EXPERIMENTAL | 170 | depends on PCI && EXPERIMENTAL |
171 | ---help--- | 171 | select MII |
172 | This driver supports IP1000 gigabit Ethernet cards. | 172 | ---help--- |
173 | This driver supports IP1000 gigabit Ethernet cards. | ||
173 | 174 | ||
174 | To compile this driver as a module, choose M here: the module | 175 | To compile this driver as a module, choose M here: the module |
175 | will be called ipg. This is recommended. | 176 | will be called ipg. This is recommended. |
176 | 177 | ||
177 | source "drivers/net/arcnet/Kconfig" | 178 | source "drivers/net/arcnet/Kconfig" |
178 | 179 | ||
@@ -1880,6 +1881,30 @@ config FEC2 | |||
1880 | Say Y here if you want to use the second built-in 10/100 Fast | 1881 | Say Y here if you want to use the second built-in 10/100 Fast |
1881 | ethernet controller on some Motorola ColdFire processors. | 1882 | ethernet controller on some Motorola ColdFire processors. |
1882 | 1883 | ||
1884 | config FEC_MPC52xx | ||
1885 | tristate "MPC52xx FEC driver" | ||
1886 | depends on PPC_MPC52xx | ||
1887 | select PPC_BESTCOMM | ||
1888 | select PPC_BESTCOMM_FEC | ||
1889 | select CRC32 | ||
1890 | select PHYLIB | ||
1891 | ---help--- | ||
1892 | This option enables support for the MPC5200's on-chip | ||
1893 | Fast Ethernet Controller | ||
1894 | If compiled as module, it will be called 'fec_mpc52xx.ko'. | ||
1895 | |||
1896 | config FEC_MPC52xx_MDIO | ||
1897 | bool "MPC52xx FEC MDIO bus driver" | ||
1898 | depends on FEC_MPC52xx | ||
1899 | default y | ||
1900 | ---help--- | ||
1901 | The MPC5200's FEC can connect to the Ethernet either with | ||
1902 | an external MII PHY chip or 10 Mbps 7-wire interface | ||
1903 | (Motorola? industry standard). | ||
1904 | If your board uses an external PHY connected to FEC, enable this. | ||
1905 | If not sure, enable. | ||
1906 | If compiled as module, it will be called 'fec_mpc52xx_phy.ko'. | ||
1907 | |||
1883 | config NE_H8300 | 1908 | config NE_H8300 |
1884 | tristate "NE2000 compatible support for H8/300" | 1909 | tristate "NE2000 compatible support for H8/300" |
1885 | depends on H8300 | 1910 | depends on H8300 |
diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 593262065c9b..0e5fde4a1b2c 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile | |||
@@ -96,6 +96,10 @@ obj-$(CONFIG_SHAPER) += shaper.o | |||
96 | obj-$(CONFIG_HP100) += hp100.o | 96 | obj-$(CONFIG_HP100) += hp100.o |
97 | obj-$(CONFIG_SMC9194) += smc9194.o | 97 | obj-$(CONFIG_SMC9194) += smc9194.o |
98 | obj-$(CONFIG_FEC) += fec.o | 98 | obj-$(CONFIG_FEC) += fec.o |
99 | obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o | ||
100 | ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y) | ||
101 | obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o | ||
102 | endif | ||
99 | obj-$(CONFIG_68360_ENET) += 68360enet.o | 103 | obj-$(CONFIG_68360_ENET) += 68360enet.o |
100 | obj-$(CONFIG_WD80x3) += wd.o 8390.o | 104 | obj-$(CONFIG_WD80x3) += wd.o 8390.o |
101 | obj-$(CONFIG_EL2) += 3c503.o 8390.o | 105 | obj-$(CONFIG_EL2) += 3c503.o 8390.o |
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index d2499bb07c13..473f78de4be0 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h | |||
@@ -122,7 +122,8 @@ struct e1000_buffer { | |||
122 | u16 next_to_watch; | 122 | u16 next_to_watch; |
123 | }; | 123 | }; |
124 | /* RX */ | 124 | /* RX */ |
125 | struct page *page; | 125 | /* arrays of page information for packet split */ |
126 | struct e1000_ps_page *ps_pages; | ||
126 | }; | 127 | }; |
127 | 128 | ||
128 | }; | 129 | }; |
@@ -142,8 +143,6 @@ struct e1000_ring { | |||
142 | /* array of buffer information structs */ | 143 | /* array of buffer information structs */ |
143 | struct e1000_buffer *buffer_info; | 144 | struct e1000_buffer *buffer_info; |
144 | 145 | ||
145 | /* arrays of page information for packet split */ | ||
146 | struct e1000_ps_page *ps_pages; | ||
147 | struct sk_buff *rx_skb_top; | 146 | struct sk_buff *rx_skb_top; |
148 | 147 | ||
149 | struct e1000_queue_stats stats; | 148 | struct e1000_queue_stats stats; |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 033e124d1c1f..4fd2e23720b6 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -245,37 +245,36 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
245 | rx_desc = E1000_RX_DESC_PS(*rx_ring, i); | 245 | rx_desc = E1000_RX_DESC_PS(*rx_ring, i); |
246 | 246 | ||
247 | for (j = 0; j < PS_PAGE_BUFFERS; j++) { | 247 | for (j = 0; j < PS_PAGE_BUFFERS; j++) { |
248 | ps_page = &rx_ring->ps_pages[(i * PS_PAGE_BUFFERS) | 248 | ps_page = &buffer_info->ps_pages[j]; |
249 | + j]; | 249 | if (j >= adapter->rx_ps_pages) { |
250 | if (j < adapter->rx_ps_pages) { | 250 | /* all unused desc entries get hw null ptr */ |
251 | rx_desc->read.buffer_addr[j+1] = ~0; | ||
252 | continue; | ||
253 | } | ||
254 | if (!ps_page->page) { | ||
255 | ps_page->page = alloc_page(GFP_ATOMIC); | ||
251 | if (!ps_page->page) { | 256 | if (!ps_page->page) { |
252 | ps_page->page = alloc_page(GFP_ATOMIC); | 257 | adapter->alloc_rx_buff_failed++; |
253 | if (!ps_page->page) { | 258 | goto no_buffers; |
254 | adapter->alloc_rx_buff_failed++; | 259 | } |
255 | goto no_buffers; | 260 | ps_page->dma = pci_map_page(pdev, |
256 | } | 261 | ps_page->page, |
257 | ps_page->dma = pci_map_page(pdev, | 262 | 0, PAGE_SIZE, |
258 | ps_page->page, | 263 | PCI_DMA_FROMDEVICE); |
259 | 0, PAGE_SIZE, | 264 | if (pci_dma_mapping_error(ps_page->dma)) { |
260 | PCI_DMA_FROMDEVICE); | 265 | dev_err(&adapter->pdev->dev, |
261 | if (pci_dma_mapping_error( | 266 | "RX DMA page map failed\n"); |
262 | ps_page->dma)) { | 267 | adapter->rx_dma_failed++; |
263 | dev_err(&adapter->pdev->dev, | 268 | goto no_buffers; |
264 | "RX DMA page map failed\n"); | ||
265 | adapter->rx_dma_failed++; | ||
266 | goto no_buffers; | ||
267 | } | ||
268 | } | 269 | } |
269 | /* | ||
270 | * Refresh the desc even if buffer_addrs | ||
271 | * didn't change because each write-back | ||
272 | * erases this info. | ||
273 | */ | ||
274 | rx_desc->read.buffer_addr[j+1] = | ||
275 | cpu_to_le64(ps_page->dma); | ||
276 | } else { | ||
277 | rx_desc->read.buffer_addr[j+1] = ~0; | ||
278 | } | 270 | } |
271 | /* | ||
272 | * Refresh the desc even if buffer_addrs | ||
273 | * didn't change because each write-back | ||
274 | * erases this info. | ||
275 | */ | ||
276 | rx_desc->read.buffer_addr[j+1] = | ||
277 | cpu_to_le64(ps_page->dma); | ||
279 | } | 278 | } |
280 | 279 | ||
281 | skb = netdev_alloc_skb(netdev, | 280 | skb = netdev_alloc_skb(netdev, |
@@ -334,94 +333,6 @@ no_buffers: | |||
334 | } | 333 | } |
335 | 334 | ||
336 | /** | 335 | /** |
337 | * e1000_alloc_rx_buffers_jumbo - Replace used jumbo receive buffers | ||
338 | * | ||
339 | * @adapter: address of board private structure | ||
340 | * @cleaned_count: number of buffers to allocate this pass | ||
341 | **/ | ||
342 | static void e1000_alloc_rx_buffers_jumbo(struct e1000_adapter *adapter, | ||
343 | int cleaned_count) | ||
344 | { | ||
345 | struct net_device *netdev = adapter->netdev; | ||
346 | struct pci_dev *pdev = adapter->pdev; | ||
347 | struct e1000_ring *rx_ring = adapter->rx_ring; | ||
348 | struct e1000_rx_desc *rx_desc; | ||
349 | struct e1000_buffer *buffer_info; | ||
350 | struct sk_buff *skb; | ||
351 | unsigned int i; | ||
352 | unsigned int bufsz = 256 - | ||
353 | 16 /*for skb_reserve */ - | ||
354 | NET_IP_ALIGN; | ||
355 | |||
356 | i = rx_ring->next_to_use; | ||
357 | buffer_info = &rx_ring->buffer_info[i]; | ||
358 | |||
359 | while (cleaned_count--) { | ||
360 | skb = buffer_info->skb; | ||
361 | if (skb) { | ||
362 | skb_trim(skb, 0); | ||
363 | goto check_page; | ||
364 | } | ||
365 | |||
366 | skb = netdev_alloc_skb(netdev, bufsz); | ||
367 | if (!skb) { | ||
368 | /* Better luck next round */ | ||
369 | adapter->alloc_rx_buff_failed++; | ||
370 | break; | ||
371 | } | ||
372 | |||
373 | /* Make buffer alignment 2 beyond a 16 byte boundary | ||
374 | * this will result in a 16 byte aligned IP header after | ||
375 | * the 14 byte MAC header is removed | ||
376 | */ | ||
377 | skb_reserve(skb, NET_IP_ALIGN); | ||
378 | |||
379 | buffer_info->skb = skb; | ||
380 | check_page: | ||
381 | /* allocate a new page if necessary */ | ||
382 | if (!buffer_info->page) { | ||
383 | buffer_info->page = alloc_page(GFP_ATOMIC); | ||
384 | if (!buffer_info->page) { | ||
385 | adapter->alloc_rx_buff_failed++; | ||
386 | break; | ||
387 | } | ||
388 | } | ||
389 | |||
390 | if (!buffer_info->dma) | ||
391 | buffer_info->dma = pci_map_page(pdev, | ||
392 | buffer_info->page, 0, | ||
393 | PAGE_SIZE, | ||
394 | PCI_DMA_FROMDEVICE); | ||
395 | if (pci_dma_mapping_error(buffer_info->dma)) { | ||
396 | dev_err(&adapter->pdev->dev, "RX DMA page map failed\n"); | ||
397 | adapter->rx_dma_failed++; | ||
398 | break; | ||
399 | } | ||
400 | |||
401 | rx_desc = E1000_RX_DESC(*rx_ring, i); | ||
402 | rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); | ||
403 | |||
404 | i++; | ||
405 | if (i == rx_ring->count) | ||
406 | i = 0; | ||
407 | buffer_info = &rx_ring->buffer_info[i]; | ||
408 | } | ||
409 | |||
410 | if (rx_ring->next_to_use != i) { | ||
411 | rx_ring->next_to_use = i; | ||
412 | if (i-- == 0) | ||
413 | i = (rx_ring->count - 1); | ||
414 | |||
415 | /* Force memory writes to complete before letting h/w | ||
416 | * know there are new descriptors to fetch. (Only | ||
417 | * applicable for weak-ordered memory model archs, | ||
418 | * such as IA-64). */ | ||
419 | wmb(); | ||
420 | writel(i, adapter->hw.hw_addr + rx_ring->tail); | ||
421 | } | ||
422 | } | ||
423 | |||
424 | /** | ||
425 | * e1000_clean_rx_irq - Send received data up the network stack; legacy | 336 | * e1000_clean_rx_irq - Send received data up the network stack; legacy |
426 | * @adapter: board private structure | 337 | * @adapter: board private structure |
427 | * | 338 | * |
@@ -495,10 +406,6 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
495 | goto next_desc; | 406 | goto next_desc; |
496 | } | 407 | } |
497 | 408 | ||
498 | /* adjust length to remove Ethernet CRC */ | ||
499 | length -= 4; | ||
500 | |||
501 | /* probably a little skewed due to removing CRC */ | ||
502 | total_rx_bytes += length; | 409 | total_rx_bytes += length; |
503 | total_rx_packets++; | 410 | total_rx_packets++; |
504 | 411 | ||
@@ -554,15 +461,6 @@ next_desc: | |||
554 | return cleaned; | 461 | return cleaned; |
555 | } | 462 | } |
556 | 463 | ||
557 | static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, | ||
558 | u16 length) | ||
559 | { | ||
560 | bi->page = NULL; | ||
561 | skb->len += length; | ||
562 | skb->data_len += length; | ||
563 | skb->truesize += length; | ||
564 | } | ||
565 | |||
566 | static void e1000_put_txbuf(struct e1000_adapter *adapter, | 464 | static void e1000_put_txbuf(struct e1000_adapter *adapter, |
567 | struct e1000_buffer *buffer_info) | 465 | struct e1000_buffer *buffer_info) |
568 | { | 466 | { |
@@ -699,174 +597,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) | |||
699 | } | 597 | } |
700 | 598 | ||
701 | /** | 599 | /** |
702 | * e1000_clean_rx_irq_jumbo - Send received data up the network stack; legacy | ||
703 | * @adapter: board private structure | ||
704 | * | ||
705 | * the return value indicates whether actual cleaning was done, there | ||
706 | * is no guarantee that everything was cleaned | ||
707 | **/ | ||
708 | static bool e1000_clean_rx_irq_jumbo(struct e1000_adapter *adapter, | ||
709 | int *work_done, int work_to_do) | ||
710 | { | ||
711 | struct net_device *netdev = adapter->netdev; | ||
712 | struct pci_dev *pdev = adapter->pdev; | ||
713 | struct e1000_ring *rx_ring = adapter->rx_ring; | ||
714 | struct e1000_rx_desc *rx_desc, *next_rxd; | ||
715 | struct e1000_buffer *buffer_info, *next_buffer; | ||
716 | u32 length; | ||
717 | unsigned int i; | ||
718 | int cleaned_count = 0; | ||
719 | bool cleaned = 0; | ||
720 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; | ||
721 | |||
722 | i = rx_ring->next_to_clean; | ||
723 | rx_desc = E1000_RX_DESC(*rx_ring, i); | ||
724 | buffer_info = &rx_ring->buffer_info[i]; | ||
725 | |||
726 | while (rx_desc->status & E1000_RXD_STAT_DD) { | ||
727 | struct sk_buff *skb; | ||
728 | u8 status; | ||
729 | |||
730 | if (*work_done >= work_to_do) | ||
731 | break; | ||
732 | (*work_done)++; | ||
733 | |||
734 | status = rx_desc->status; | ||
735 | skb = buffer_info->skb; | ||
736 | buffer_info->skb = NULL; | ||
737 | |||
738 | i++; | ||
739 | if (i == rx_ring->count) | ||
740 | i = 0; | ||
741 | next_rxd = E1000_RX_DESC(*rx_ring, i); | ||
742 | prefetch(next_rxd); | ||
743 | |||
744 | next_buffer = &rx_ring->buffer_info[i]; | ||
745 | |||
746 | cleaned = 1; | ||
747 | cleaned_count++; | ||
748 | pci_unmap_page(pdev, | ||
749 | buffer_info->dma, | ||
750 | PAGE_SIZE, | ||
751 | PCI_DMA_FROMDEVICE); | ||
752 | buffer_info->dma = 0; | ||
753 | |||
754 | length = le16_to_cpu(rx_desc->length); | ||
755 | |||
756 | /* errors is only valid for DD + EOP descriptors */ | ||
757 | if ((status & E1000_RXD_STAT_EOP) && | ||
758 | (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { | ||
759 | /* recycle both page and skb */ | ||
760 | buffer_info->skb = skb; | ||
761 | /* an error means any chain goes out the window too */ | ||
762 | if (rx_ring->rx_skb_top) | ||
763 | dev_kfree_skb(rx_ring->rx_skb_top); | ||
764 | rx_ring->rx_skb_top = NULL; | ||
765 | goto next_desc; | ||
766 | } | ||
767 | |||
768 | #define rxtop rx_ring->rx_skb_top | ||
769 | if (!(status & E1000_RXD_STAT_EOP)) { | ||
770 | /* this descriptor is only the beginning (or middle) */ | ||
771 | if (!rxtop) { | ||
772 | /* this is the beginning of a chain */ | ||
773 | rxtop = skb; | ||
774 | skb_fill_page_desc(rxtop, 0, buffer_info->page, | ||
775 | 0, length); | ||
776 | } else { | ||
777 | /* this is the middle of a chain */ | ||
778 | skb_fill_page_desc(rxtop, | ||
779 | skb_shinfo(rxtop)->nr_frags, | ||
780 | buffer_info->page, 0, | ||
781 | length); | ||
782 | /* re-use the skb, only consumed the page */ | ||
783 | buffer_info->skb = skb; | ||
784 | } | ||
785 | e1000_consume_page(buffer_info, rxtop, length); | ||
786 | goto next_desc; | ||
787 | } else { | ||
788 | if (rxtop) { | ||
789 | /* end of the chain */ | ||
790 | skb_fill_page_desc(rxtop, | ||
791 | skb_shinfo(rxtop)->nr_frags, | ||
792 | buffer_info->page, 0, length); | ||
793 | /* re-use the current skb, we only consumed the | ||
794 | * page */ | ||
795 | buffer_info->skb = skb; | ||
796 | skb = rxtop; | ||
797 | rxtop = NULL; | ||
798 | e1000_consume_page(buffer_info, skb, length); | ||
799 | } else { | ||
800 | /* no chain, got EOP, this buf is the packet | ||
801 | * copybreak to save the put_page/alloc_page */ | ||
802 | if (length <= copybreak && | ||
803 | skb_tailroom(skb) >= length) { | ||
804 | u8 *vaddr; | ||
805 | vaddr = kmap_atomic(buffer_info->page, | ||
806 | KM_SKB_DATA_SOFTIRQ); | ||
807 | memcpy(skb_tail_pointer(skb), | ||
808 | vaddr, length); | ||
809 | kunmap_atomic(vaddr, | ||
810 | KM_SKB_DATA_SOFTIRQ); | ||
811 | /* re-use the page, so don't erase | ||
812 | * buffer_info->page */ | ||
813 | skb_put(skb, length); | ||
814 | } else { | ||
815 | skb_fill_page_desc(skb, 0, | ||
816 | buffer_info->page, 0, | ||
817 | length); | ||
818 | e1000_consume_page(buffer_info, skb, | ||
819 | length); | ||
820 | } | ||
821 | } | ||
822 | } | ||
823 | |||
824 | /* Receive Checksum Offload XXX recompute due to CRC strip? */ | ||
825 | e1000_rx_checksum(adapter, | ||
826 | (u32)(status) | | ||
827 | ((u32)(rx_desc->errors) << 24), | ||
828 | le16_to_cpu(rx_desc->csum), skb); | ||
829 | |||
830 | pskb_trim(skb, skb->len - 4); | ||
831 | |||
832 | /* probably a little skewed due to removing CRC */ | ||
833 | total_rx_bytes += skb->len; | ||
834 | total_rx_packets++; | ||
835 | |||
836 | /* eth type trans needs skb->data to point to something */ | ||
837 | if (!pskb_may_pull(skb, ETH_HLEN)) { | ||
838 | ndev_err(netdev, "__pskb_pull_tail failed.\n"); | ||
839 | dev_kfree_skb(skb); | ||
840 | goto next_desc; | ||
841 | } | ||
842 | |||
843 | e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special); | ||
844 | |||
845 | next_desc: | ||
846 | rx_desc->status = 0; | ||
847 | |||
848 | /* return some buffers to hardware, one at a time is too slow */ | ||
849 | if (cleaned_count >= E1000_RX_BUFFER_WRITE) { | ||
850 | adapter->alloc_rx_buf(adapter, cleaned_count); | ||
851 | cleaned_count = 0; | ||
852 | } | ||
853 | |||
854 | /* use prefetched values */ | ||
855 | rx_desc = next_rxd; | ||
856 | buffer_info = next_buffer; | ||
857 | } | ||
858 | rx_ring->next_to_clean = i; | ||
859 | |||
860 | cleaned_count = e1000_desc_unused(rx_ring); | ||
861 | if (cleaned_count) | ||
862 | adapter->alloc_rx_buf(adapter, cleaned_count); | ||
863 | |||
864 | adapter->total_rx_packets += total_rx_packets; | ||
865 | adapter->total_rx_bytes += total_rx_bytes; | ||
866 | return cleaned; | ||
867 | } | ||
868 | |||
869 | /** | ||
870 | * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split | 600 | * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split |
871 | * @adapter: board private structure | 601 | * @adapter: board private structure |
872 | * | 602 | * |
@@ -953,7 +683,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
953 | ((length + l1) <= adapter->rx_ps_bsize0)) { | 683 | ((length + l1) <= adapter->rx_ps_bsize0)) { |
954 | u8 *vaddr; | 684 | u8 *vaddr; |
955 | 685 | ||
956 | ps_page = &rx_ring->ps_pages[i * PS_PAGE_BUFFERS]; | 686 | ps_page = &buffer_info->ps_pages[0]; |
957 | 687 | ||
958 | /* there is no documentation about how to call | 688 | /* there is no documentation about how to call |
959 | * kmap_atomic, so we can't hold the mapping | 689 | * kmap_atomic, so we can't hold the mapping |
@@ -965,8 +695,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
965 | kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); | 695 | kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); |
966 | pci_dma_sync_single_for_device(pdev, ps_page->dma, | 696 | pci_dma_sync_single_for_device(pdev, ps_page->dma, |
967 | PAGE_SIZE, PCI_DMA_FROMDEVICE); | 697 | PAGE_SIZE, PCI_DMA_FROMDEVICE); |
968 | /* remove the CRC */ | 698 | |
969 | l1 -= 4; | ||
970 | skb_put(skb, l1); | 699 | skb_put(skb, l1); |
971 | goto copydone; | 700 | goto copydone; |
972 | } /* if */ | 701 | } /* if */ |
@@ -977,7 +706,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
977 | if (!length) | 706 | if (!length) |
978 | break; | 707 | break; |
979 | 708 | ||
980 | ps_page = &rx_ring->ps_pages[(i * PS_PAGE_BUFFERS) + j]; | 709 | ps_page = &buffer_info->ps_pages[j]; |
981 | pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE, | 710 | pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE, |
982 | PCI_DMA_FROMDEVICE); | 711 | PCI_DMA_FROMDEVICE); |
983 | ps_page->dma = 0; | 712 | ps_page->dma = 0; |
@@ -988,10 +717,6 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
988 | skb->truesize += length; | 717 | skb->truesize += length; |
989 | } | 718 | } |
990 | 719 | ||
991 | /* strip the ethernet crc, problem is we're using pages now so | ||
992 | * this whole operation can get a little cpu intensive */ | ||
993 | pskb_trim(skb, skb->len - 4); | ||
994 | |||
995 | copydone: | 720 | copydone: |
996 | total_rx_bytes += skb->len; | 721 | total_rx_bytes += skb->len; |
997 | total_rx_packets++; | 722 | total_rx_packets++; |
@@ -1043,7 +768,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter) | |||
1043 | struct e1000_buffer *buffer_info; | 768 | struct e1000_buffer *buffer_info; |
1044 | struct e1000_ps_page *ps_page; | 769 | struct e1000_ps_page *ps_page; |
1045 | struct pci_dev *pdev = adapter->pdev; | 770 | struct pci_dev *pdev = adapter->pdev; |
1046 | unsigned long size; | ||
1047 | unsigned int i, j; | 771 | unsigned int i, j; |
1048 | 772 | ||
1049 | /* Free all the Rx ring sk_buffs */ | 773 | /* Free all the Rx ring sk_buffs */ |
@@ -1054,9 +778,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter) | |||
1054 | pci_unmap_single(pdev, buffer_info->dma, | 778 | pci_unmap_single(pdev, buffer_info->dma, |
1055 | adapter->rx_buffer_len, | 779 | adapter->rx_buffer_len, |
1056 | PCI_DMA_FROMDEVICE); | 780 | PCI_DMA_FROMDEVICE); |
1057 | else if (adapter->clean_rx == e1000_clean_rx_irq_jumbo) | ||
1058 | pci_unmap_page(pdev, buffer_info->dma, | ||
1059 | PAGE_SIZE, PCI_DMA_FROMDEVICE); | ||
1060 | else if (adapter->clean_rx == e1000_clean_rx_irq_ps) | 781 | else if (adapter->clean_rx == e1000_clean_rx_irq_ps) |
1061 | pci_unmap_single(pdev, buffer_info->dma, | 782 | pci_unmap_single(pdev, buffer_info->dma, |
1062 | adapter->rx_ps_bsize0, | 783 | adapter->rx_ps_bsize0, |
@@ -1064,19 +785,13 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter) | |||
1064 | buffer_info->dma = 0; | 785 | buffer_info->dma = 0; |
1065 | } | 786 | } |
1066 | 787 | ||
1067 | if (buffer_info->page) { | ||
1068 | put_page(buffer_info->page); | ||
1069 | buffer_info->page = NULL; | ||
1070 | } | ||
1071 | |||
1072 | if (buffer_info->skb) { | 788 | if (buffer_info->skb) { |
1073 | dev_kfree_skb(buffer_info->skb); | 789 | dev_kfree_skb(buffer_info->skb); |
1074 | buffer_info->skb = NULL; | 790 | buffer_info->skb = NULL; |
1075 | } | 791 | } |
1076 | 792 | ||
1077 | for (j = 0; j < PS_PAGE_BUFFERS; j++) { | 793 | for (j = 0; j < PS_PAGE_BUFFERS; j++) { |
1078 | ps_page = &rx_ring->ps_pages[(i * PS_PAGE_BUFFERS) | 794 | ps_page = &buffer_info->ps_pages[j]; |
1079 | + j]; | ||
1080 | if (!ps_page->page) | 795 | if (!ps_page->page) |
1081 | break; | 796 | break; |
1082 | pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE, | 797 | pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE, |
@@ -1093,12 +808,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter) | |||
1093 | rx_ring->rx_skb_top = NULL; | 808 | rx_ring->rx_skb_top = NULL; |
1094 | } | 809 | } |
1095 | 810 | ||
1096 | size = sizeof(struct e1000_buffer) * rx_ring->count; | ||
1097 | memset(rx_ring->buffer_info, 0, size); | ||
1098 | size = sizeof(struct e1000_ps_page) | ||
1099 | * (rx_ring->count * PS_PAGE_BUFFERS); | ||
1100 | memset(rx_ring->ps_pages, 0, size); | ||
1101 | |||
1102 | /* Zero out the descriptor ring */ | 811 | /* Zero out the descriptor ring */ |
1103 | memset(rx_ring->desc, 0, rx_ring->size); | 812 | memset(rx_ring->desc, 0, rx_ring->size); |
1104 | 813 | ||
@@ -1421,7 +1130,8 @@ err: | |||
1421 | int e1000e_setup_rx_resources(struct e1000_adapter *adapter) | 1130 | int e1000e_setup_rx_resources(struct e1000_adapter *adapter) |
1422 | { | 1131 | { |
1423 | struct e1000_ring *rx_ring = adapter->rx_ring; | 1132 | struct e1000_ring *rx_ring = adapter->rx_ring; |
1424 | int size, desc_len, err = -ENOMEM; | 1133 | struct e1000_buffer *buffer_info; |
1134 | int i, size, desc_len, err = -ENOMEM; | ||
1425 | 1135 | ||
1426 | size = sizeof(struct e1000_buffer) * rx_ring->count; | 1136 | size = sizeof(struct e1000_buffer) * rx_ring->count; |
1427 | rx_ring->buffer_info = vmalloc(size); | 1137 | rx_ring->buffer_info = vmalloc(size); |
@@ -1429,11 +1139,14 @@ int e1000e_setup_rx_resources(struct e1000_adapter *adapter) | |||
1429 | goto err; | 1139 | goto err; |
1430 | memset(rx_ring->buffer_info, 0, size); | 1140 | memset(rx_ring->buffer_info, 0, size); |
1431 | 1141 | ||
1432 | rx_ring->ps_pages = kcalloc(rx_ring->count * PS_PAGE_BUFFERS, | 1142 | for (i = 0; i < rx_ring->count; i++) { |
1433 | sizeof(struct e1000_ps_page), | 1143 | buffer_info = &rx_ring->buffer_info[i]; |
1434 | GFP_KERNEL); | 1144 | buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS, |
1435 | if (!rx_ring->ps_pages) | 1145 | sizeof(struct e1000_ps_page), |
1436 | goto err; | 1146 | GFP_KERNEL); |
1147 | if (!buffer_info->ps_pages) | ||
1148 | goto err_pages; | ||
1149 | } | ||
1437 | 1150 | ||
1438 | desc_len = sizeof(union e1000_rx_desc_packet_split); | 1151 | desc_len = sizeof(union e1000_rx_desc_packet_split); |
1439 | 1152 | ||
@@ -1443,16 +1156,21 @@ int e1000e_setup_rx_resources(struct e1000_adapter *adapter) | |||
1443 | 1156 | ||
1444 | err = e1000_alloc_ring_dma(adapter, rx_ring); | 1157 | err = e1000_alloc_ring_dma(adapter, rx_ring); |
1445 | if (err) | 1158 | if (err) |
1446 | goto err; | 1159 | goto err_pages; |
1447 | 1160 | ||
1448 | rx_ring->next_to_clean = 0; | 1161 | rx_ring->next_to_clean = 0; |
1449 | rx_ring->next_to_use = 0; | 1162 | rx_ring->next_to_use = 0; |
1450 | rx_ring->rx_skb_top = NULL; | 1163 | rx_ring->rx_skb_top = NULL; |
1451 | 1164 | ||
1452 | return 0; | 1165 | return 0; |
1166 | |||
1167 | err_pages: | ||
1168 | for (i = 0; i < rx_ring->count; i++) { | ||
1169 | buffer_info = &rx_ring->buffer_info[i]; | ||
1170 | kfree(buffer_info->ps_pages); | ||
1171 | } | ||
1453 | err: | 1172 | err: |
1454 | vfree(rx_ring->buffer_info); | 1173 | vfree(rx_ring->buffer_info); |
1455 | kfree(rx_ring->ps_pages); | ||
1456 | ndev_err(adapter->netdev, | 1174 | ndev_err(adapter->netdev, |
1457 | "Unable to allocate memory for the transmit descriptor ring\n"); | 1175 | "Unable to allocate memory for the transmit descriptor ring\n"); |
1458 | return err; | 1176 | return err; |
@@ -1518,15 +1236,17 @@ void e1000e_free_rx_resources(struct e1000_adapter *adapter) | |||
1518 | { | 1236 | { |
1519 | struct pci_dev *pdev = adapter->pdev; | 1237 | struct pci_dev *pdev = adapter->pdev; |
1520 | struct e1000_ring *rx_ring = adapter->rx_ring; | 1238 | struct e1000_ring *rx_ring = adapter->rx_ring; |
1239 | int i; | ||
1521 | 1240 | ||
1522 | e1000_clean_rx_ring(adapter); | 1241 | e1000_clean_rx_ring(adapter); |
1523 | 1242 | ||
1243 | for (i = 0; i < rx_ring->count; i++) { | ||
1244 | kfree(rx_ring->buffer_info[i].ps_pages); | ||
1245 | } | ||
1246 | |||
1524 | vfree(rx_ring->buffer_info); | 1247 | vfree(rx_ring->buffer_info); |
1525 | rx_ring->buffer_info = NULL; | 1248 | rx_ring->buffer_info = NULL; |
1526 | 1249 | ||
1527 | kfree(rx_ring->ps_pages); | ||
1528 | rx_ring->ps_pages = NULL; | ||
1529 | |||
1530 | dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, | 1250 | dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, |
1531 | rx_ring->dma); | 1251 | rx_ring->dma); |
1532 | rx_ring->desc = NULL; | 1252 | rx_ring->desc = NULL; |
@@ -2032,9 +1752,11 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) | |||
2032 | 1752 | ||
2033 | ew32(RFCTL, rfctl); | 1753 | ew32(RFCTL, rfctl); |
2034 | 1754 | ||
2035 | /* disable the stripping of CRC because it breaks | 1755 | /* Enable Packet split descriptors */ |
2036 | * BMC firmware connected over SMBUS */ | 1756 | rctl |= E1000_RCTL_DTYP_PS; |
2037 | rctl |= E1000_RCTL_DTYP_PS /* | E1000_RCTL_SECRC */; | 1757 | |
1758 | /* Enable hardware CRC frame stripping */ | ||
1759 | rctl |= E1000_RCTL_SECRC; | ||
2038 | 1760 | ||
2039 | psrctl |= adapter->rx_ps_bsize0 >> | 1761 | psrctl |= adapter->rx_ps_bsize0 >> |
2040 | E1000_PSRCTL_BSIZE0_SHIFT; | 1762 | E1000_PSRCTL_BSIZE0_SHIFT; |
@@ -2077,11 +1799,6 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) | |||
2077 | sizeof(union e1000_rx_desc_packet_split); | 1799 | sizeof(union e1000_rx_desc_packet_split); |
2078 | adapter->clean_rx = e1000_clean_rx_irq_ps; | 1800 | adapter->clean_rx = e1000_clean_rx_irq_ps; |
2079 | adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; | 1801 | adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; |
2080 | } else if (adapter->netdev->mtu > ETH_FRAME_LEN + VLAN_HLEN + 4) { | ||
2081 | rdlen = rx_ring->count * | ||
2082 | sizeof(struct e1000_rx_desc); | ||
2083 | adapter->clean_rx = e1000_clean_rx_irq_jumbo; | ||
2084 | adapter->alloc_rx_buf = e1000_alloc_rx_buffers_jumbo; | ||
2085 | } else { | 1802 | } else { |
2086 | rdlen = rx_ring->count * | 1803 | rdlen = rx_ring->count * |
2087 | sizeof(struct e1000_rx_desc); | 1804 | sizeof(struct e1000_rx_desc); |
@@ -2326,8 +2043,11 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
2326 | struct e1000_mac_info *mac = &adapter->hw.mac; | 2043 | struct e1000_mac_info *mac = &adapter->hw.mac; |
2327 | struct e1000_hw *hw = &adapter->hw; | 2044 | struct e1000_hw *hw = &adapter->hw; |
2328 | u32 tx_space, min_tx_space, min_rx_space; | 2045 | u32 tx_space, min_tx_space, min_rx_space; |
2046 | u32 pba; | ||
2329 | u16 hwm; | 2047 | u16 hwm; |
2330 | 2048 | ||
2049 | ew32(PBA, adapter->pba); | ||
2050 | |||
2331 | if (mac->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN ) { | 2051 | if (mac->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN ) { |
2332 | /* To maintain wire speed transmits, the Tx FIFO should be | 2052 | /* To maintain wire speed transmits, the Tx FIFO should be |
2333 | * large enough to accommodate two full transmit packets, | 2053 | * large enough to accommodate two full transmit packets, |
@@ -2335,11 +2055,11 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
2335 | * the Rx FIFO should be large enough to accommodate at least | 2055 | * the Rx FIFO should be large enough to accommodate at least |
2336 | * one full receive packet and is similarly rounded up and | 2056 | * one full receive packet and is similarly rounded up and |
2337 | * expressed in KB. */ | 2057 | * expressed in KB. */ |
2338 | adapter->pba = er32(PBA); | 2058 | pba = er32(PBA); |
2339 | /* upper 16 bits has Tx packet buffer allocation size in KB */ | 2059 | /* upper 16 bits has Tx packet buffer allocation size in KB */ |
2340 | tx_space = adapter->pba >> 16; | 2060 | tx_space = pba >> 16; |
2341 | /* lower 16 bits has Rx packet buffer allocation size in KB */ | 2061 | /* lower 16 bits has Rx packet buffer allocation size in KB */ |
2342 | adapter->pba &= 0xffff; | 2062 | pba &= 0xffff; |
2343 | /* the tx fifo also stores 16 bytes of information about the tx | 2063 | /* the tx fifo also stores 16 bytes of information about the tx |
2344 | * but don't include ethernet FCS because hardware appends it */ | 2064 | * but don't include ethernet FCS because hardware appends it */ |
2345 | min_tx_space = (mac->max_frame_size + | 2065 | min_tx_space = (mac->max_frame_size + |
@@ -2355,20 +2075,21 @@ void e1000e_reset(struct e1000_adapter *adapter) | |||
2355 | /* If current Tx allocation is less than the min Tx FIFO size, | 2075 | /* If current Tx allocation is less than the min Tx FIFO size, |
2356 | * and the min Tx FIFO size is less than the current Rx FIFO | 2076 | * and the min Tx FIFO size is less than the current Rx FIFO |
2357 | * allocation, take space away from current Rx allocation */ | 2077 | * allocation, take space away from current Rx allocation */ |
2358 | if (tx_space < min_tx_space && | 2078 | if ((tx_space < min_tx_space) && |
2359 | ((min_tx_space - tx_space) < adapter->pba)) { | 2079 | ((min_tx_space - tx_space) < pba)) { |
2360 | adapter->pba -= - (min_tx_space - tx_space); | 2080 | pba -= min_tx_space - tx_space; |
2361 | 2081 | ||
2362 | /* if short on rx space, rx wins and must trump tx | 2082 | /* if short on rx space, rx wins and must trump tx |
2363 | * adjustment or use Early Receive if available */ | 2083 | * adjustment or use Early Receive if available */ |
2364 | if ((adapter->pba < min_rx_space) && | 2084 | if ((pba < min_rx_space) && |
2365 | (!(adapter->flags & FLAG_HAS_ERT))) | 2085 | (!(adapter->flags & FLAG_HAS_ERT))) |
2366 | /* ERT enabled in e1000_configure_rx */ | 2086 | /* ERT enabled in e1000_configure_rx */ |
2367 | adapter->pba = min_rx_space; | 2087 | pba = min_rx_space; |
2368 | } | 2088 | } |
2089 | |||
2090 | ew32(PBA, pba); | ||
2369 | } | 2091 | } |
2370 | 2092 | ||
2371 | ew32(PBA, adapter->pba); | ||
2372 | 2093 | ||
2373 | /* flow control settings */ | 2094 | /* flow control settings */ |
2374 | /* The high water mark must be low enough to fit one full frame | 2095 | /* The high water mark must be low enough to fit one full frame |
@@ -3624,9 +3345,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |||
3624 | /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN | 3345 | /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN |
3625 | * means we reserve 2 more, this pushes us to allocate from the next | 3346 | * means we reserve 2 more, this pushes us to allocate from the next |
3626 | * larger slab size. | 3347 | * larger slab size. |
3627 | * i.e. RXBUFFER_2048 --> size-4096 slab | 3348 | * i.e. RXBUFFER_2048 --> size-4096 slab */ |
3628 | * however with the new *_jumbo* routines, jumbo receives will use | ||
3629 | * fragmented skbs */ | ||
3630 | 3349 | ||
3631 | if (max_frame <= 256) | 3350 | if (max_frame <= 256) |
3632 | adapter->rx_buffer_len = 256; | 3351 | adapter->rx_buffer_len = 256; |
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index 4b4b74e47a67..f78e5bf7cb33 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h | |||
@@ -40,7 +40,7 @@ | |||
40 | #include <asm/io.h> | 40 | #include <asm/io.h> |
41 | 41 | ||
42 | #define DRV_NAME "ehea" | 42 | #define DRV_NAME "ehea" |
43 | #define DRV_VERSION "EHEA_0079" | 43 | #define DRV_VERSION "EHEA_0080" |
44 | 44 | ||
45 | /* eHEA capability flags */ | 45 | /* eHEA capability flags */ |
46 | #define DLPAR_PORT_ADD_REM 1 | 46 | #define DLPAR_PORT_ADD_REM 1 |
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 0a7e78925540..f0319f1e8e05 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c | |||
@@ -33,6 +33,9 @@ | |||
33 | #include <linux/if.h> | 33 | #include <linux/if.h> |
34 | #include <linux/list.h> | 34 | #include <linux/list.h> |
35 | #include <linux/if_ether.h> | 35 | #include <linux/if_ether.h> |
36 | #include <linux/notifier.h> | ||
37 | #include <linux/reboot.h> | ||
38 | |||
36 | #include <net/ip.h> | 39 | #include <net/ip.h> |
37 | 40 | ||
38 | #include "ehea.h" | 41 | #include "ehea.h" |
@@ -3295,6 +3298,20 @@ static int __devexit ehea_remove(struct of_device *dev) | |||
3295 | return 0; | 3298 | return 0; |
3296 | } | 3299 | } |
3297 | 3300 | ||
3301 | static int ehea_reboot_notifier(struct notifier_block *nb, | ||
3302 | unsigned long action, void *unused) | ||
3303 | { | ||
3304 | if (action == SYS_RESTART) { | ||
3305 | ehea_info("Reboot: freeing all eHEA resources"); | ||
3306 | ibmebus_unregister_driver(&ehea_driver); | ||
3307 | } | ||
3308 | return NOTIFY_DONE; | ||
3309 | } | ||
3310 | |||
3311 | static struct notifier_block ehea_reboot_nb = { | ||
3312 | .notifier_call = ehea_reboot_notifier, | ||
3313 | }; | ||
3314 | |||
3298 | static int check_module_parm(void) | 3315 | static int check_module_parm(void) |
3299 | { | 3316 | { |
3300 | int ret = 0; | 3317 | int ret = 0; |
@@ -3351,6 +3368,8 @@ int __init ehea_module_init(void) | |||
3351 | if (ret) | 3368 | if (ret) |
3352 | goto out; | 3369 | goto out; |
3353 | 3370 | ||
3371 | register_reboot_notifier(&ehea_reboot_nb); | ||
3372 | |||
3354 | ret = ibmebus_register_driver(&ehea_driver); | 3373 | ret = ibmebus_register_driver(&ehea_driver); |
3355 | if (ret) { | 3374 | if (ret) { |
3356 | ehea_error("failed registering eHEA device driver on ebus"); | 3375 | ehea_error("failed registering eHEA device driver on ebus"); |
@@ -3362,6 +3381,7 @@ int __init ehea_module_init(void) | |||
3362 | if (ret) { | 3381 | if (ret) { |
3363 | ehea_error("failed to register capabilities attribute, ret=%d", | 3382 | ehea_error("failed to register capabilities attribute, ret=%d", |
3364 | ret); | 3383 | ret); |
3384 | unregister_reboot_notifier(&ehea_reboot_nb); | ||
3365 | ibmebus_unregister_driver(&ehea_driver); | 3385 | ibmebus_unregister_driver(&ehea_driver); |
3366 | goto out; | 3386 | goto out; |
3367 | } | 3387 | } |
@@ -3375,6 +3395,7 @@ static void __exit ehea_module_exit(void) | |||
3375 | flush_scheduled_work(); | 3395 | flush_scheduled_work(); |
3376 | driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities); | 3396 | driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities); |
3377 | ibmebus_unregister_driver(&ehea_driver); | 3397 | ibmebus_unregister_driver(&ehea_driver); |
3398 | unregister_reboot_notifier(&ehea_reboot_nb); | ||
3378 | ehea_destroy_busmap(); | 3399 | ehea_destroy_busmap(); |
3379 | } | 3400 | } |
3380 | 3401 | ||
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c new file mode 100644 index 000000000000..fc1cf0b742b0 --- /dev/null +++ b/drivers/net/fec_mpc52xx.c | |||
@@ -0,0 +1,1112 @@ | |||
1 | /* | ||
2 | * Driver for the MPC5200 Fast Ethernet Controller | ||
3 | * | ||
4 | * Originally written by Dale Farnsworth <dfarnsworth@mvista.com> and | ||
5 | * now maintained by Sylvain Munaut <tnt@246tNt.com> | ||
6 | * | ||
7 | * Copyright (C) 2007 Domen Puncer, Telargo, Inc. | ||
8 | * Copyright (C) 2007 Sylvain Munaut <tnt@246tNt.com> | ||
9 | * Copyright (C) 2003-2004 MontaVista, Software, Inc. | ||
10 | * | ||
11 | * This file is licensed under the terms of the GNU General Public License | ||
12 | * version 2. This program is licensed "as is" without any warranty of any | ||
13 | * kind, whether express or implied. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #include <linux/module.h> | ||
18 | |||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/types.h> | ||
21 | #include <linux/spinlock.h> | ||
22 | #include <linux/errno.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/crc32.h> | ||
25 | #include <linux/hardirq.h> | ||
26 | #include <linux/delay.h> | ||
27 | #include <linux/of_device.h> | ||
28 | #include <linux/of_platform.h> | ||
29 | |||
30 | #include <linux/netdevice.h> | ||
31 | #include <linux/etherdevice.h> | ||
32 | #include <linux/ethtool.h> | ||
33 | #include <linux/skbuff.h> | ||
34 | |||
35 | #include <asm/io.h> | ||
36 | #include <asm/delay.h> | ||
37 | #include <asm/mpc52xx.h> | ||
38 | |||
39 | #include <sysdev/bestcomm/bestcomm.h> | ||
40 | #include <sysdev/bestcomm/fec.h> | ||
41 | |||
42 | #include "fec_mpc52xx.h" | ||
43 | |||
44 | #define DRIVER_NAME "mpc52xx-fec" | ||
45 | |||
46 | static irqreturn_t mpc52xx_fec_interrupt(int, void *); | ||
47 | static irqreturn_t mpc52xx_fec_rx_interrupt(int, void *); | ||
48 | static irqreturn_t mpc52xx_fec_tx_interrupt(int, void *); | ||
49 | static void mpc52xx_fec_stop(struct net_device *dev); | ||
50 | static void mpc52xx_fec_start(struct net_device *dev); | ||
51 | static void mpc52xx_fec_reset(struct net_device *dev); | ||
52 | |||
53 | static u8 mpc52xx_fec_mac_addr[6]; | ||
54 | module_param_array_named(mac, mpc52xx_fec_mac_addr, byte, NULL, 0); | ||
55 | MODULE_PARM_DESC(mac, "six hex digits, ie. 0x1,0x2,0xc0,0x01,0xba,0xbe"); | ||
56 | |||
57 | #define MPC52xx_MESSAGES_DEFAULT ( NETIF_MSG_DRV | NETIF_MSG_PROBE | \ | ||
58 | NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFDOWN ) | ||
59 | static int debug = -1; /* the above default */ | ||
60 | module_param(debug, int, 0); | ||
61 | MODULE_PARM_DESC(debug, "debugging messages level"); | ||
62 | |||
63 | static void mpc52xx_fec_tx_timeout(struct net_device *dev) | ||
64 | { | ||
65 | dev_warn(&dev->dev, "transmit timed out\n"); | ||
66 | |||
67 | mpc52xx_fec_reset(dev); | ||
68 | |||
69 | dev->stats.tx_errors++; | ||
70 | |||
71 | netif_wake_queue(dev); | ||
72 | } | ||
73 | |||
74 | static void mpc52xx_fec_set_paddr(struct net_device *dev, u8 *mac) | ||
75 | { | ||
76 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
77 | struct mpc52xx_fec __iomem *fec = priv->fec; | ||
78 | |||
79 | out_be32(&fec->paddr1, *(u32 *)(&mac[0])); | ||
80 | out_be32(&fec->paddr2, (*(u16 *)(&mac[4]) << 16) | FEC_PADDR2_TYPE); | ||
81 | } | ||
82 | |||
83 | static void mpc52xx_fec_get_paddr(struct net_device *dev, u8 *mac) | ||
84 | { | ||
85 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
86 | struct mpc52xx_fec __iomem *fec = priv->fec; | ||
87 | |||
88 | *(u32 *)(&mac[0]) = in_be32(&fec->paddr1); | ||
89 | *(u16 *)(&mac[4]) = in_be32(&fec->paddr2) >> 16; | ||
90 | } | ||
91 | |||
92 | static int mpc52xx_fec_set_mac_address(struct net_device *dev, void *addr) | ||
93 | { | ||
94 | struct sockaddr *sock = addr; | ||
95 | |||
96 | memcpy(dev->dev_addr, sock->sa_data, dev->addr_len); | ||
97 | |||
98 | mpc52xx_fec_set_paddr(dev, sock->sa_data); | ||
99 | return 0; | ||
100 | } | ||
101 | |||
102 | static void mpc52xx_fec_free_rx_buffers(struct net_device *dev, struct bcom_task *s) | ||
103 | { | ||
104 | while (!bcom_queue_empty(s)) { | ||
105 | struct bcom_fec_bd *bd; | ||
106 | struct sk_buff *skb; | ||
107 | |||
108 | skb = bcom_retrieve_buffer(s, NULL, (struct bcom_bd **)&bd); | ||
109 | dma_unmap_single(&dev->dev, bd->skb_pa, skb->len, DMA_FROM_DEVICE); | ||
110 | kfree_skb(skb); | ||
111 | } | ||
112 | } | ||
113 | |||
114 | static int mpc52xx_fec_alloc_rx_buffers(struct net_device *dev, struct bcom_task *rxtsk) | ||
115 | { | ||
116 | while (!bcom_queue_full(rxtsk)) { | ||
117 | struct sk_buff *skb; | ||
118 | struct bcom_fec_bd *bd; | ||
119 | |||
120 | skb = dev_alloc_skb(FEC_RX_BUFFER_SIZE); | ||
121 | if (skb == NULL) | ||
122 | return -EAGAIN; | ||
123 | |||
124 | /* zero out the initial receive buffers to aid debugging */ | ||
125 | memset(skb->data, 0, FEC_RX_BUFFER_SIZE); | ||
126 | |||
127 | bd = (struct bcom_fec_bd *)bcom_prepare_next_buffer(rxtsk); | ||
128 | |||
129 | bd->status = FEC_RX_BUFFER_SIZE; | ||
130 | bd->skb_pa = dma_map_single(&dev->dev, skb->data, | ||
131 | FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE); | ||
132 | |||
133 | bcom_submit_next_buffer(rxtsk, skb); | ||
134 | } | ||
135 | |||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | /* based on generic_adjust_link from fs_enet-main.c */ | ||
140 | static void mpc52xx_fec_adjust_link(struct net_device *dev) | ||
141 | { | ||
142 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
143 | struct phy_device *phydev = priv->phydev; | ||
144 | int new_state = 0; | ||
145 | |||
146 | if (phydev->link != PHY_DOWN) { | ||
147 | if (phydev->duplex != priv->duplex) { | ||
148 | struct mpc52xx_fec __iomem *fec = priv->fec; | ||
149 | u32 rcntrl; | ||
150 | u32 tcntrl; | ||
151 | |||
152 | new_state = 1; | ||
153 | priv->duplex = phydev->duplex; | ||
154 | |||
155 | rcntrl = in_be32(&fec->r_cntrl); | ||
156 | tcntrl = in_be32(&fec->x_cntrl); | ||
157 | |||
158 | rcntrl &= ~FEC_RCNTRL_DRT; | ||
159 | tcntrl &= ~FEC_TCNTRL_FDEN; | ||
160 | if (phydev->duplex == DUPLEX_FULL) | ||
161 | tcntrl |= FEC_TCNTRL_FDEN; /* FD enable */ | ||
162 | else | ||
163 | rcntrl |= FEC_RCNTRL_DRT; /* disable Rx on Tx (HD) */ | ||
164 | |||
165 | out_be32(&fec->r_cntrl, rcntrl); | ||
166 | out_be32(&fec->x_cntrl, tcntrl); | ||
167 | } | ||
168 | |||
169 | if (phydev->speed != priv->speed) { | ||
170 | new_state = 1; | ||
171 | priv->speed = phydev->speed; | ||
172 | } | ||
173 | |||
174 | if (priv->link == PHY_DOWN) { | ||
175 | new_state = 1; | ||
176 | priv->link = phydev->link; | ||
177 | netif_schedule(dev); | ||
178 | netif_carrier_on(dev); | ||
179 | netif_start_queue(dev); | ||
180 | } | ||
181 | |||
182 | } else if (priv->link) { | ||
183 | new_state = 1; | ||
184 | priv->link = PHY_DOWN; | ||
185 | priv->speed = 0; | ||
186 | priv->duplex = -1; | ||
187 | netif_stop_queue(dev); | ||
188 | netif_carrier_off(dev); | ||
189 | } | ||
190 | |||
191 | if (new_state && netif_msg_link(priv)) | ||
192 | phy_print_status(phydev); | ||
193 | } | ||
194 | |||
195 | static int mpc52xx_fec_init_phy(struct net_device *dev) | ||
196 | { | ||
197 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
198 | struct phy_device *phydev; | ||
199 | char phy_id[BUS_ID_SIZE]; | ||
200 | |||
201 | snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, | ||
202 | (unsigned int)dev->base_addr, priv->phy_addr); | ||
203 | |||
204 | priv->link = PHY_DOWN; | ||
205 | priv->speed = 0; | ||
206 | priv->duplex = -1; | ||
207 | |||
208 | phydev = phy_connect(dev, phy_id, &mpc52xx_fec_adjust_link, 0, PHY_INTERFACE_MODE_MII); | ||
209 | if (IS_ERR(phydev)) { | ||
210 | dev_err(&dev->dev, "phy_connect failed\n"); | ||
211 | return PTR_ERR(phydev); | ||
212 | } | ||
213 | dev_info(&dev->dev, "attached phy %i to driver %s\n", | ||
214 | phydev->addr, phydev->drv->name); | ||
215 | |||
216 | priv->phydev = phydev; | ||
217 | |||
218 | return 0; | ||
219 | } | ||
220 | |||
221 | static int mpc52xx_fec_phy_start(struct net_device *dev) | ||
222 | { | ||
223 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
224 | int err; | ||
225 | |||
226 | if (!priv->has_phy) | ||
227 | return 0; | ||
228 | |||
229 | err = mpc52xx_fec_init_phy(dev); | ||
230 | if (err) { | ||
231 | dev_err(&dev->dev, "mpc52xx_fec_init_phy failed\n"); | ||
232 | return err; | ||
233 | } | ||
234 | |||
235 | /* reset phy - this also wakes it from PDOWN */ | ||
236 | phy_write(priv->phydev, MII_BMCR, BMCR_RESET); | ||
237 | phy_start(priv->phydev); | ||
238 | |||
239 | return 0; | ||
240 | } | ||
241 | |||
242 | static void mpc52xx_fec_phy_stop(struct net_device *dev) | ||
243 | { | ||
244 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
245 | |||
246 | if (!priv->has_phy) | ||
247 | return; | ||
248 | |||
249 | phy_disconnect(priv->phydev); | ||
250 | /* power down phy */ | ||
251 | phy_stop(priv->phydev); | ||
252 | phy_write(priv->phydev, MII_BMCR, BMCR_PDOWN); | ||
253 | } | ||
254 | |||
255 | static int mpc52xx_fec_phy_mii_ioctl(struct mpc52xx_fec_priv *priv, | ||
256 | struct mii_ioctl_data *mii_data, int cmd) | ||
257 | { | ||
258 | if (!priv->has_phy) | ||
259 | return -ENOTSUPP; | ||
260 | |||
261 | return phy_mii_ioctl(priv->phydev, mii_data, cmd); | ||
262 | } | ||
263 | |||
264 | static void mpc52xx_fec_phy_hw_init(struct mpc52xx_fec_priv *priv) | ||
265 | { | ||
266 | struct mpc52xx_fec __iomem *fec = priv->fec; | ||
267 | |||
268 | if (!priv->has_phy) | ||
269 | return; | ||
270 | |||
271 | out_be32(&fec->mii_speed, priv->phy_speed); | ||
272 | } | ||
273 | |||
274 | static int mpc52xx_fec_open(struct net_device *dev) | ||
275 | { | ||
276 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
277 | int err = -EBUSY; | ||
278 | |||
279 | if (request_irq(dev->irq, &mpc52xx_fec_interrupt, IRQF_SHARED, | ||
280 | DRIVER_NAME "_ctrl", dev)) { | ||
281 | dev_err(&dev->dev, "ctrl interrupt request failed\n"); | ||
282 | goto out; | ||
283 | } | ||
284 | if (request_irq(priv->r_irq, &mpc52xx_fec_rx_interrupt, 0, | ||
285 | DRIVER_NAME "_rx", dev)) { | ||
286 | dev_err(&dev->dev, "rx interrupt request failed\n"); | ||
287 | goto free_ctrl_irq; | ||
288 | } | ||
289 | if (request_irq(priv->t_irq, &mpc52xx_fec_tx_interrupt, 0, | ||
290 | DRIVER_NAME "_tx", dev)) { | ||
291 | dev_err(&dev->dev, "tx interrupt request failed\n"); | ||
292 | goto free_2irqs; | ||
293 | } | ||
294 | |||
295 | bcom_fec_rx_reset(priv->rx_dmatsk); | ||
296 | bcom_fec_tx_reset(priv->tx_dmatsk); | ||
297 | |||
298 | err = mpc52xx_fec_alloc_rx_buffers(dev, priv->rx_dmatsk); | ||
299 | if (err) { | ||
300 | dev_err(&dev->dev, "mpc52xx_fec_alloc_rx_buffers failed\n"); | ||
301 | goto free_irqs; | ||
302 | } | ||
303 | |||
304 | err = mpc52xx_fec_phy_start(dev); | ||
305 | if (err) | ||
306 | goto free_skbs; | ||
307 | |||
308 | bcom_enable(priv->rx_dmatsk); | ||
309 | bcom_enable(priv->tx_dmatsk); | ||
310 | |||
311 | mpc52xx_fec_start(dev); | ||
312 | |||
313 | netif_start_queue(dev); | ||
314 | |||
315 | return 0; | ||
316 | |||
317 | free_skbs: | ||
318 | mpc52xx_fec_free_rx_buffers(dev, priv->rx_dmatsk); | ||
319 | |||
320 | free_irqs: | ||
321 | free_irq(priv->t_irq, dev); | ||
322 | free_2irqs: | ||
323 | free_irq(priv->r_irq, dev); | ||
324 | free_ctrl_irq: | ||
325 | free_irq(dev->irq, dev); | ||
326 | out: | ||
327 | |||
328 | return err; | ||
329 | } | ||
330 | |||
331 | static int mpc52xx_fec_close(struct net_device *dev) | ||
332 | { | ||
333 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
334 | |||
335 | netif_stop_queue(dev); | ||
336 | |||
337 | mpc52xx_fec_stop(dev); | ||
338 | |||
339 | mpc52xx_fec_free_rx_buffers(dev, priv->rx_dmatsk); | ||
340 | |||
341 | free_irq(dev->irq, dev); | ||
342 | free_irq(priv->r_irq, dev); | ||
343 | free_irq(priv->t_irq, dev); | ||
344 | |||
345 | mpc52xx_fec_phy_stop(dev); | ||
346 | |||
347 | return 0; | ||
348 | } | ||
349 | |||
350 | /* This will only be invoked if your driver is _not_ in XOFF state. | ||
351 | * What this means is that you need not check it, and that this | ||
352 | * invariant will hold if you make sure that the netif_*_queue() | ||
353 | * calls are done at the proper times. | ||
354 | */ | ||
355 | static int mpc52xx_fec_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
356 | { | ||
357 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
358 | struct bcom_fec_bd *bd; | ||
359 | |||
360 | if (bcom_queue_full(priv->tx_dmatsk)) { | ||
361 | if (net_ratelimit()) | ||
362 | dev_err(&dev->dev, "transmit queue overrun\n"); | ||
363 | return 1; | ||
364 | } | ||
365 | |||
366 | spin_lock_irq(&priv->lock); | ||
367 | dev->trans_start = jiffies; | ||
368 | |||
369 | bd = (struct bcom_fec_bd *) | ||
370 | bcom_prepare_next_buffer(priv->tx_dmatsk); | ||
371 | |||
372 | bd->status = skb->len | BCOM_FEC_TX_BD_TFD | BCOM_FEC_TX_BD_TC; | ||
373 | bd->skb_pa = dma_map_single(&dev->dev, skb->data, skb->len, DMA_TO_DEVICE); | ||
374 | |||
375 | bcom_submit_next_buffer(priv->tx_dmatsk, skb); | ||
376 | |||
377 | if (bcom_queue_full(priv->tx_dmatsk)) { | ||
378 | netif_stop_queue(dev); | ||
379 | } | ||
380 | |||
381 | spin_unlock_irq(&priv->lock); | ||
382 | |||
383 | return 0; | ||
384 | } | ||
385 | |||
386 | /* This handles BestComm transmit task interrupts | ||
387 | */ | ||
388 | static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id) | ||
389 | { | ||
390 | struct net_device *dev = dev_id; | ||
391 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
392 | |||
393 | spin_lock(&priv->lock); | ||
394 | |||
395 | while (bcom_buffer_done(priv->tx_dmatsk)) { | ||
396 | struct sk_buff *skb; | ||
397 | struct bcom_fec_bd *bd; | ||
398 | skb = bcom_retrieve_buffer(priv->tx_dmatsk, NULL, | ||
399 | (struct bcom_bd **)&bd); | ||
400 | dma_unmap_single(&dev->dev, bd->skb_pa, skb->len, DMA_TO_DEVICE); | ||
401 | |||
402 | dev_kfree_skb_irq(skb); | ||
403 | } | ||
404 | |||
405 | netif_wake_queue(dev); | ||
406 | |||
407 | spin_unlock(&priv->lock); | ||
408 | |||
409 | return IRQ_HANDLED; | ||
410 | } | ||
411 | |||
412 | static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id) | ||
413 | { | ||
414 | struct net_device *dev = dev_id; | ||
415 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
416 | |||
417 | while (bcom_buffer_done(priv->rx_dmatsk)) { | ||
418 | struct sk_buff *skb; | ||
419 | struct sk_buff *rskb; | ||
420 | struct bcom_fec_bd *bd; | ||
421 | u32 status; | ||
422 | |||
423 | rskb = bcom_retrieve_buffer(priv->rx_dmatsk, &status, | ||
424 | (struct bcom_bd **)&bd); | ||
425 | dma_unmap_single(&dev->dev, bd->skb_pa, skb->len, DMA_FROM_DEVICE); | ||
426 | |||
427 | /* Test for errors in received frame */ | ||
428 | if (status & BCOM_FEC_RX_BD_ERRORS) { | ||
429 | /* Drop packet and reuse the buffer */ | ||
430 | bd = (struct bcom_fec_bd *) | ||
431 | bcom_prepare_next_buffer(priv->rx_dmatsk); | ||
432 | |||
433 | bd->status = FEC_RX_BUFFER_SIZE; | ||
434 | bd->skb_pa = dma_map_single(&dev->dev, rskb->data, | ||
435 | FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE); | ||
436 | |||
437 | bcom_submit_next_buffer(priv->rx_dmatsk, rskb); | ||
438 | |||
439 | dev->stats.rx_dropped++; | ||
440 | |||
441 | continue; | ||
442 | } | ||
443 | |||
444 | /* skbs are allocated on open, so now we allocate a new one, | ||
445 | * and remove the old (with the packet) */ | ||
446 | skb = dev_alloc_skb(FEC_RX_BUFFER_SIZE); | ||
447 | if (skb) { | ||
448 | /* Process the received skb */ | ||
449 | int length = status & BCOM_FEC_RX_BD_LEN_MASK; | ||
450 | |||
451 | skb_put(rskb, length - 4); /* length without CRC32 */ | ||
452 | |||
453 | rskb->dev = dev; | ||
454 | rskb->protocol = eth_type_trans(rskb, dev); | ||
455 | |||
456 | netif_rx(rskb); | ||
457 | dev->last_rx = jiffies; | ||
458 | } else { | ||
459 | /* Can't get a new one : reuse the same & drop pkt */ | ||
460 | dev_notice(&dev->dev, "Memory squeeze, dropping packet.\n"); | ||
461 | dev->stats.rx_dropped++; | ||
462 | |||
463 | skb = rskb; | ||
464 | } | ||
465 | |||
466 | bd = (struct bcom_fec_bd *) | ||
467 | bcom_prepare_next_buffer(priv->rx_dmatsk); | ||
468 | |||
469 | bd->status = FEC_RX_BUFFER_SIZE; | ||
470 | bd->skb_pa = dma_map_single(&dev->dev, rskb->data, | ||
471 | FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE); | ||
472 | |||
473 | bcom_submit_next_buffer(priv->rx_dmatsk, skb); | ||
474 | } | ||
475 | |||
476 | return IRQ_HANDLED; | ||
477 | } | ||
478 | |||
479 | static irqreturn_t mpc52xx_fec_interrupt(int irq, void *dev_id) | ||
480 | { | ||
481 | struct net_device *dev = dev_id; | ||
482 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
483 | struct mpc52xx_fec __iomem *fec = priv->fec; | ||
484 | u32 ievent; | ||
485 | |||
486 | ievent = in_be32(&fec->ievent); | ||
487 | |||
488 | ievent &= ~FEC_IEVENT_MII; /* mii is handled separately */ | ||
489 | if (!ievent) | ||
490 | return IRQ_NONE; | ||
491 | |||
492 | out_be32(&fec->ievent, ievent); /* clear pending events */ | ||
493 | |||
494 | if (ievent & ~(FEC_IEVENT_RFIFO_ERROR | FEC_IEVENT_XFIFO_ERROR)) { | ||
495 | if (ievent & ~FEC_IEVENT_TFINT) | ||
496 | dev_dbg(&dev->dev, "ievent: %08x\n", ievent); | ||
497 | return IRQ_HANDLED; | ||
498 | } | ||
499 | |||
500 | if (net_ratelimit() && (ievent & FEC_IEVENT_RFIFO_ERROR)) | ||
501 | dev_warn(&dev->dev, "FEC_IEVENT_RFIFO_ERROR\n"); | ||
502 | if (net_ratelimit() && (ievent & FEC_IEVENT_XFIFO_ERROR)) | ||
503 | dev_warn(&dev->dev, "FEC_IEVENT_XFIFO_ERROR\n"); | ||
504 | |||
505 | mpc52xx_fec_reset(dev); | ||
506 | |||
507 | netif_wake_queue(dev); | ||
508 | return IRQ_HANDLED; | ||
509 | } | ||
510 | |||
511 | /* | ||
512 | * Get the current statistics. | ||
513 | * This may be called with the card open or closed. | ||
514 | */ | ||
515 | static struct net_device_stats *mpc52xx_fec_get_stats(struct net_device *dev) | ||
516 | { | ||
517 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
518 | struct net_device_stats *stats = &dev->stats; | ||
519 | struct mpc52xx_fec __iomem *fec = priv->fec; | ||
520 | |||
521 | stats->rx_bytes = in_be32(&fec->rmon_r_octets); | ||
522 | stats->rx_packets = in_be32(&fec->rmon_r_packets); | ||
523 | stats->rx_errors = in_be32(&fec->rmon_r_crc_align) + | ||
524 | in_be32(&fec->rmon_r_undersize) + | ||
525 | in_be32(&fec->rmon_r_oversize) + | ||
526 | in_be32(&fec->rmon_r_frag) + | ||
527 | in_be32(&fec->rmon_r_jab); | ||
528 | |||
529 | stats->tx_bytes = in_be32(&fec->rmon_t_octets); | ||
530 | stats->tx_packets = in_be32(&fec->rmon_t_packets); | ||
531 | stats->tx_errors = in_be32(&fec->rmon_t_crc_align) + | ||
532 | in_be32(&fec->rmon_t_undersize) + | ||
533 | in_be32(&fec->rmon_t_oversize) + | ||
534 | in_be32(&fec->rmon_t_frag) + | ||
535 | in_be32(&fec->rmon_t_jab); | ||
536 | |||
537 | stats->multicast = in_be32(&fec->rmon_r_mc_pkt); | ||
538 | stats->collisions = in_be32(&fec->rmon_t_col); | ||
539 | |||
540 | /* detailed rx_errors: */ | ||
541 | stats->rx_length_errors = in_be32(&fec->rmon_r_undersize) | ||
542 | + in_be32(&fec->rmon_r_oversize) | ||
543 | + in_be32(&fec->rmon_r_frag) | ||
544 | + in_be32(&fec->rmon_r_jab); | ||
545 | stats->rx_over_errors = in_be32(&fec->r_macerr); | ||
546 | stats->rx_crc_errors = in_be32(&fec->ieee_r_crc); | ||
547 | stats->rx_frame_errors = in_be32(&fec->ieee_r_align); | ||
548 | stats->rx_fifo_errors = in_be32(&fec->rmon_r_drop); | ||
549 | stats->rx_missed_errors = in_be32(&fec->rmon_r_drop); | ||
550 | |||
551 | /* detailed tx_errors: */ | ||
552 | stats->tx_aborted_errors = 0; | ||
553 | stats->tx_carrier_errors = in_be32(&fec->ieee_t_cserr); | ||
554 | stats->tx_fifo_errors = in_be32(&fec->rmon_t_drop); | ||
555 | stats->tx_heartbeat_errors = in_be32(&fec->ieee_t_sqe); | ||
556 | stats->tx_window_errors = in_be32(&fec->ieee_t_lcol); | ||
557 | |||
558 | return stats; | ||
559 | } | ||
560 | |||
561 | /* | ||
562 | * Read MIB counters in order to reset them, | ||
563 | * then zero all the stats fields in memory | ||
564 | */ | ||
565 | static void mpc52xx_fec_reset_stats(struct net_device *dev) | ||
566 | { | ||
567 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
568 | struct mpc52xx_fec __iomem *fec = priv->fec; | ||
569 | |||
570 | out_be32(&fec->mib_control, FEC_MIB_DISABLE); | ||
571 | memset_io(&fec->rmon_t_drop, 0, (__force u32)&fec->reserved10 - | ||
572 | (__force u32)&fec->rmon_t_drop); | ||
573 | out_be32(&fec->mib_control, 0); | ||
574 | |||
575 | memset(&dev->stats, 0, sizeof(dev->stats)); | ||
576 | } | ||
577 | |||
578 | /* | ||
579 | * Set or clear the multicast filter for this adaptor. | ||
580 | */ | ||
581 | static void mpc52xx_fec_set_multicast_list(struct net_device *dev) | ||
582 | { | ||
583 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
584 | struct mpc52xx_fec __iomem *fec = priv->fec; | ||
585 | u32 rx_control; | ||
586 | |||
587 | rx_control = in_be32(&fec->r_cntrl); | ||
588 | |||
589 | if (dev->flags & IFF_PROMISC) { | ||
590 | rx_control |= FEC_RCNTRL_PROM; | ||
591 | out_be32(&fec->r_cntrl, rx_control); | ||
592 | } else { | ||
593 | rx_control &= ~FEC_RCNTRL_PROM; | ||
594 | out_be32(&fec->r_cntrl, rx_control); | ||
595 | |||
596 | if (dev->flags & IFF_ALLMULTI) { | ||
597 | out_be32(&fec->gaddr1, 0xffffffff); | ||
598 | out_be32(&fec->gaddr2, 0xffffffff); | ||
599 | } else { | ||
600 | u32 crc; | ||
601 | int i; | ||
602 | struct dev_mc_list *dmi; | ||
603 | u32 gaddr1 = 0x00000000; | ||
604 | u32 gaddr2 = 0x00000000; | ||
605 | |||
606 | dmi = dev->mc_list; | ||
607 | for (i=0; i<dev->mc_count; i++) { | ||
608 | crc = ether_crc_le(6, dmi->dmi_addr) >> 26; | ||
609 | if (crc >= 32) | ||
610 | gaddr1 |= 1 << (crc-32); | ||
611 | else | ||
612 | gaddr2 |= 1 << crc; | ||
613 | dmi = dmi->next; | ||
614 | } | ||
615 | out_be32(&fec->gaddr1, gaddr1); | ||
616 | out_be32(&fec->gaddr2, gaddr2); | ||
617 | } | ||
618 | } | ||
619 | } | ||
620 | |||
621 | /** | ||
622 | * mpc52xx_fec_hw_init | ||
623 | * @dev: network device | ||
624 | * | ||
625 | * Setup various hardware setting, only needed once on start | ||
626 | */ | ||
627 | static void mpc52xx_fec_hw_init(struct net_device *dev) | ||
628 | { | ||
629 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
630 | struct mpc52xx_fec __iomem *fec = priv->fec; | ||
631 | int i; | ||
632 | |||
633 | /* Whack a reset. We should wait for this. */ | ||
634 | out_be32(&fec->ecntrl, FEC_ECNTRL_RESET); | ||
635 | for (i = 0; i < FEC_RESET_DELAY; ++i) { | ||
636 | if ((in_be32(&fec->ecntrl) & FEC_ECNTRL_RESET) == 0) | ||
637 | break; | ||
638 | udelay(1); | ||
639 | } | ||
640 | if (i == FEC_RESET_DELAY) | ||
641 | dev_err(&dev->dev, "FEC Reset timeout!\n"); | ||
642 | |||
643 | /* set pause to 0x20 frames */ | ||
644 | out_be32(&fec->op_pause, FEC_OP_PAUSE_OPCODE | 0x20); | ||
645 | |||
646 | /* high service request will be deasserted when there's < 7 bytes in fifo | ||
647 | * low service request will be deasserted when there's < 4*7 bytes in fifo | ||
648 | */ | ||
649 | out_be32(&fec->rfifo_cntrl, FEC_FIFO_CNTRL_FRAME | FEC_FIFO_CNTRL_LTG_7); | ||
650 | out_be32(&fec->tfifo_cntrl, FEC_FIFO_CNTRL_FRAME | FEC_FIFO_CNTRL_LTG_7); | ||
651 | |||
652 | /* alarm when <= x bytes in FIFO */ | ||
653 | out_be32(&fec->rfifo_alarm, 0x0000030c); | ||
654 | out_be32(&fec->tfifo_alarm, 0x00000100); | ||
655 | |||
656 | /* begin transmittion when 256 bytes are in FIFO (or EOF or FIFO full) */ | ||
657 | out_be32(&fec->x_wmrk, FEC_FIFO_WMRK_256B); | ||
658 | |||
659 | /* enable crc generation */ | ||
660 | out_be32(&fec->xmit_fsm, FEC_XMIT_FSM_APPEND_CRC | FEC_XMIT_FSM_ENABLE_CRC); | ||
661 | out_be32(&fec->iaddr1, 0x00000000); /* No individual filter */ | ||
662 | out_be32(&fec->iaddr2, 0x00000000); /* No individual filter */ | ||
663 | |||
664 | /* set phy speed. | ||
665 | * this can't be done in phy driver, since it needs to be called | ||
666 | * before fec stuff (even on resume) */ | ||
667 | mpc52xx_fec_phy_hw_init(priv); | ||
668 | } | ||
669 | |||
670 | /** | ||
671 | * mpc52xx_fec_start | ||
672 | * @dev: network device | ||
673 | * | ||
674 | * This function is called to start or restart the FEC during a link | ||
675 | * change. This happens on fifo errors or when switching between half | ||
676 | * and full duplex. | ||
677 | */ | ||
678 | static void mpc52xx_fec_start(struct net_device *dev) | ||
679 | { | ||
680 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
681 | struct mpc52xx_fec __iomem *fec = priv->fec; | ||
682 | u32 rcntrl; | ||
683 | u32 tcntrl; | ||
684 | u32 tmp; | ||
685 | |||
686 | /* clear sticky error bits */ | ||
687 | tmp = FEC_FIFO_STATUS_ERR | FEC_FIFO_STATUS_UF | FEC_FIFO_STATUS_OF; | ||
688 | out_be32(&fec->rfifo_status, in_be32(&fec->rfifo_status) & tmp); | ||
689 | out_be32(&fec->tfifo_status, in_be32(&fec->tfifo_status) & tmp); | ||
690 | |||
691 | /* FIFOs will reset on mpc52xx_fec_enable */ | ||
692 | out_be32(&fec->reset_cntrl, FEC_RESET_CNTRL_ENABLE_IS_RESET); | ||
693 | |||
694 | /* Set station address. */ | ||
695 | mpc52xx_fec_set_paddr(dev, dev->dev_addr); | ||
696 | |||
697 | mpc52xx_fec_set_multicast_list(dev); | ||
698 | |||
699 | /* set max frame len, enable flow control, select mii mode */ | ||
700 | rcntrl = FEC_RX_BUFFER_SIZE << 16; /* max frame length */ | ||
701 | rcntrl |= FEC_RCNTRL_FCE; | ||
702 | |||
703 | if (priv->has_phy) | ||
704 | rcntrl |= FEC_RCNTRL_MII_MODE; | ||
705 | |||
706 | if (priv->duplex == DUPLEX_FULL) | ||
707 | tcntrl = FEC_TCNTRL_FDEN; /* FD enable */ | ||
708 | else { | ||
709 | rcntrl |= FEC_RCNTRL_DRT; /* disable Rx on Tx (HD) */ | ||
710 | tcntrl = 0; | ||
711 | } | ||
712 | out_be32(&fec->r_cntrl, rcntrl); | ||
713 | out_be32(&fec->x_cntrl, tcntrl); | ||
714 | |||
715 | /* Clear any outstanding interrupt. */ | ||
716 | out_be32(&fec->ievent, 0xffffffff); | ||
717 | |||
718 | /* Enable interrupts we wish to service. */ | ||
719 | out_be32(&fec->imask, FEC_IMASK_ENABLE); | ||
720 | |||
721 | /* And last, enable the transmit and receive processing. */ | ||
722 | out_be32(&fec->ecntrl, FEC_ECNTRL_ETHER_EN); | ||
723 | out_be32(&fec->r_des_active, 0x01000000); | ||
724 | } | ||
725 | |||
726 | /** | ||
727 | * mpc52xx_fec_stop | ||
728 | * @dev: network device | ||
729 | * | ||
730 | * stop all activity on fec and empty dma buffers | ||
731 | */ | ||
732 | static void mpc52xx_fec_stop(struct net_device *dev) | ||
733 | { | ||
734 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
735 | struct mpc52xx_fec __iomem *fec = priv->fec; | ||
736 | unsigned long timeout; | ||
737 | |||
738 | /* disable all interrupts */ | ||
739 | out_be32(&fec->imask, 0); | ||
740 | |||
741 | /* Disable the rx task. */ | ||
742 | bcom_disable(priv->rx_dmatsk); | ||
743 | |||
744 | /* Wait for tx queue to drain, but only if we're in process context */ | ||
745 | if (!in_interrupt()) { | ||
746 | timeout = jiffies + msecs_to_jiffies(2000); | ||
747 | while (time_before(jiffies, timeout) && | ||
748 | !bcom_queue_empty(priv->tx_dmatsk)) | ||
749 | msleep(100); | ||
750 | |||
751 | if (time_after_eq(jiffies, timeout)) | ||
752 | dev_err(&dev->dev, "queues didn't drain\n"); | ||
753 | #if 1 | ||
754 | if (time_after_eq(jiffies, timeout)) { | ||
755 | dev_err(&dev->dev, " tx: index: %i, outdex: %i\n", | ||
756 | priv->tx_dmatsk->index, | ||
757 | priv->tx_dmatsk->outdex); | ||
758 | dev_err(&dev->dev, " rx: index: %i, outdex: %i\n", | ||
759 | priv->rx_dmatsk->index, | ||
760 | priv->rx_dmatsk->outdex); | ||
761 | } | ||
762 | #endif | ||
763 | } | ||
764 | |||
765 | bcom_disable(priv->tx_dmatsk); | ||
766 | |||
767 | /* Stop FEC */ | ||
768 | out_be32(&fec->ecntrl, in_be32(&fec->ecntrl) & ~FEC_ECNTRL_ETHER_EN); | ||
769 | |||
770 | return; | ||
771 | } | ||
772 | |||
773 | /* reset fec and bestcomm tasks */ | ||
774 | static void mpc52xx_fec_reset(struct net_device *dev) | ||
775 | { | ||
776 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
777 | struct mpc52xx_fec __iomem *fec = priv->fec; | ||
778 | |||
779 | mpc52xx_fec_stop(dev); | ||
780 | |||
781 | out_be32(&fec->rfifo_status, in_be32(&fec->rfifo_status)); | ||
782 | out_be32(&fec->reset_cntrl, FEC_RESET_CNTRL_RESET_FIFO); | ||
783 | |||
784 | mpc52xx_fec_free_rx_buffers(dev, priv->rx_dmatsk); | ||
785 | |||
786 | mpc52xx_fec_hw_init(dev); | ||
787 | |||
788 | phy_stop(priv->phydev); | ||
789 | phy_write(priv->phydev, MII_BMCR, BMCR_RESET); | ||
790 | phy_start(priv->phydev); | ||
791 | |||
792 | bcom_fec_rx_reset(priv->rx_dmatsk); | ||
793 | bcom_fec_tx_reset(priv->tx_dmatsk); | ||
794 | |||
795 | mpc52xx_fec_alloc_rx_buffers(dev, priv->rx_dmatsk); | ||
796 | |||
797 | bcom_enable(priv->rx_dmatsk); | ||
798 | bcom_enable(priv->tx_dmatsk); | ||
799 | |||
800 | mpc52xx_fec_start(dev); | ||
801 | } | ||
802 | |||
803 | |||
804 | /* ethtool interface */ | ||
805 | static void mpc52xx_fec_get_drvinfo(struct net_device *dev, | ||
806 | struct ethtool_drvinfo *info) | ||
807 | { | ||
808 | strcpy(info->driver, DRIVER_NAME); | ||
809 | } | ||
810 | |||
811 | static int mpc52xx_fec_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
812 | { | ||
813 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
814 | return phy_ethtool_gset(priv->phydev, cmd); | ||
815 | } | ||
816 | |||
817 | static int mpc52xx_fec_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
818 | { | ||
819 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
820 | return phy_ethtool_sset(priv->phydev, cmd); | ||
821 | } | ||
822 | |||
823 | static u32 mpc52xx_fec_get_msglevel(struct net_device *dev) | ||
824 | { | ||
825 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
826 | return priv->msg_enable; | ||
827 | } | ||
828 | |||
829 | static void mpc52xx_fec_set_msglevel(struct net_device *dev, u32 level) | ||
830 | { | ||
831 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
832 | priv->msg_enable = level; | ||
833 | } | ||
834 | |||
835 | static const struct ethtool_ops mpc52xx_fec_ethtool_ops = { | ||
836 | .get_drvinfo = mpc52xx_fec_get_drvinfo, | ||
837 | .get_settings = mpc52xx_fec_get_settings, | ||
838 | .set_settings = mpc52xx_fec_set_settings, | ||
839 | .get_link = ethtool_op_get_link, | ||
840 | .get_msglevel = mpc52xx_fec_get_msglevel, | ||
841 | .set_msglevel = mpc52xx_fec_set_msglevel, | ||
842 | }; | ||
843 | |||
844 | |||
845 | static int mpc52xx_fec_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | ||
846 | { | ||
847 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
848 | |||
849 | return mpc52xx_fec_phy_mii_ioctl(priv, if_mii(rq), cmd); | ||
850 | } | ||
851 | |||
852 | /* ======================================================================== */ | ||
853 | /* OF Driver */ | ||
854 | /* ======================================================================== */ | ||
855 | |||
856 | static int __devinit | ||
857 | mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match) | ||
858 | { | ||
859 | int rv; | ||
860 | struct net_device *ndev; | ||
861 | struct mpc52xx_fec_priv *priv = NULL; | ||
862 | struct resource mem; | ||
863 | const phandle *ph; | ||
864 | |||
865 | phys_addr_t rx_fifo; | ||
866 | phys_addr_t tx_fifo; | ||
867 | |||
868 | /* Get the ether ndev & it's private zone */ | ||
869 | ndev = alloc_etherdev(sizeof(struct mpc52xx_fec_priv)); | ||
870 | if (!ndev) | ||
871 | return -ENOMEM; | ||
872 | |||
873 | priv = netdev_priv(ndev); | ||
874 | |||
875 | /* Reserve FEC control zone */ | ||
876 | rv = of_address_to_resource(op->node, 0, &mem); | ||
877 | if (rv) { | ||
878 | printk(KERN_ERR DRIVER_NAME ": " | ||
879 | "Error while parsing device node resource\n" ); | ||
880 | return rv; | ||
881 | } | ||
882 | if ((mem.end - mem.start + 1) != sizeof(struct mpc52xx_fec)) { | ||
883 | printk(KERN_ERR DRIVER_NAME | ||
884 | " - invalid resource size (%lx != %x), check mpc52xx_devices.c\n", | ||
885 | (unsigned long)(mem.end - mem.start + 1), sizeof(struct mpc52xx_fec)); | ||
886 | return -EINVAL; | ||
887 | } | ||
888 | |||
889 | if (!request_mem_region(mem.start, sizeof(struct mpc52xx_fec), DRIVER_NAME)) | ||
890 | return -EBUSY; | ||
891 | |||
892 | /* Init ether ndev with what we have */ | ||
893 | ndev->open = mpc52xx_fec_open; | ||
894 | ndev->stop = mpc52xx_fec_close; | ||
895 | ndev->hard_start_xmit = mpc52xx_fec_hard_start_xmit; | ||
896 | ndev->do_ioctl = mpc52xx_fec_ioctl; | ||
897 | ndev->ethtool_ops = &mpc52xx_fec_ethtool_ops; | ||
898 | ndev->get_stats = mpc52xx_fec_get_stats; | ||
899 | ndev->set_mac_address = mpc52xx_fec_set_mac_address; | ||
900 | ndev->set_multicast_list = mpc52xx_fec_set_multicast_list; | ||
901 | ndev->tx_timeout = mpc52xx_fec_tx_timeout; | ||
902 | ndev->watchdog_timeo = FEC_WATCHDOG_TIMEOUT; | ||
903 | ndev->base_addr = mem.start; | ||
904 | |||
905 | priv->t_irq = priv->r_irq = ndev->irq = NO_IRQ; /* IRQ are free for now */ | ||
906 | |||
907 | spin_lock_init(&priv->lock); | ||
908 | |||
909 | /* ioremap the zones */ | ||
910 | priv->fec = ioremap(mem.start, sizeof(struct mpc52xx_fec)); | ||
911 | |||
912 | if (!priv->fec) { | ||
913 | rv = -ENOMEM; | ||
914 | goto probe_error; | ||
915 | } | ||
916 | |||
917 | /* Bestcomm init */ | ||
918 | rx_fifo = ndev->base_addr + offsetof(struct mpc52xx_fec, rfifo_data); | ||
919 | tx_fifo = ndev->base_addr + offsetof(struct mpc52xx_fec, tfifo_data); | ||
920 | |||
921 | priv->rx_dmatsk = bcom_fec_rx_init(FEC_RX_NUM_BD, rx_fifo, FEC_RX_BUFFER_SIZE); | ||
922 | priv->tx_dmatsk = bcom_fec_tx_init(FEC_TX_NUM_BD, tx_fifo); | ||
923 | |||
924 | if (!priv->rx_dmatsk || !priv->tx_dmatsk) { | ||
925 | printk(KERN_ERR DRIVER_NAME ": Can not init SDMA tasks\n" ); | ||
926 | rv = -ENOMEM; | ||
927 | goto probe_error; | ||
928 | } | ||
929 | |||
930 | /* Get the IRQ we need one by one */ | ||
931 | /* Control */ | ||
932 | ndev->irq = irq_of_parse_and_map(op->node, 0); | ||
933 | |||
934 | /* RX */ | ||
935 | priv->r_irq = bcom_get_task_irq(priv->rx_dmatsk); | ||
936 | |||
937 | /* TX */ | ||
938 | priv->t_irq = bcom_get_task_irq(priv->tx_dmatsk); | ||
939 | |||
940 | /* MAC address init */ | ||
941 | if (!is_zero_ether_addr(mpc52xx_fec_mac_addr)) | ||
942 | memcpy(ndev->dev_addr, mpc52xx_fec_mac_addr, 6); | ||
943 | else | ||
944 | mpc52xx_fec_get_paddr(ndev, ndev->dev_addr); | ||
945 | |||
946 | priv->msg_enable = netif_msg_init(debug, MPC52xx_MESSAGES_DEFAULT); | ||
947 | priv->duplex = DUPLEX_FULL; | ||
948 | |||
949 | /* is the phy present in device tree? */ | ||
950 | ph = of_get_property(op->node, "phy-handle", NULL); | ||
951 | if (ph) { | ||
952 | const unsigned int *prop; | ||
953 | struct device_node *phy_dn; | ||
954 | priv->has_phy = 1; | ||
955 | |||
956 | phy_dn = of_find_node_by_phandle(*ph); | ||
957 | prop = of_get_property(phy_dn, "reg", NULL); | ||
958 | priv->phy_addr = *prop; | ||
959 | |||
960 | of_node_put(phy_dn); | ||
961 | |||
962 | /* Phy speed */ | ||
963 | priv->phy_speed = ((mpc52xx_find_ipb_freq(op->node) >> 20) / 5) << 1; | ||
964 | } else { | ||
965 | dev_info(&ndev->dev, "can't find \"phy-handle\" in device" | ||
966 | " tree, using 7-wire mode\n"); | ||
967 | } | ||
968 | |||
969 | /* Hardware init */ | ||
970 | mpc52xx_fec_hw_init(ndev); | ||
971 | |||
972 | mpc52xx_fec_reset_stats(ndev); | ||
973 | |||
974 | /* Register the new network device */ | ||
975 | rv = register_netdev(ndev); | ||
976 | if (rv < 0) | ||
977 | goto probe_error; | ||
978 | |||
979 | /* We're done ! */ | ||
980 | dev_set_drvdata(&op->dev, ndev); | ||
981 | |||
982 | return 0; | ||
983 | |||
984 | |||
985 | /* Error handling - free everything that might be allocated */ | ||
986 | probe_error: | ||
987 | |||
988 | irq_dispose_mapping(ndev->irq); | ||
989 | |||
990 | if (priv->rx_dmatsk) | ||
991 | bcom_fec_rx_release(priv->rx_dmatsk); | ||
992 | if (priv->tx_dmatsk) | ||
993 | bcom_fec_tx_release(priv->tx_dmatsk); | ||
994 | |||
995 | if (priv->fec) | ||
996 | iounmap(priv->fec); | ||
997 | |||
998 | release_mem_region(mem.start, sizeof(struct mpc52xx_fec)); | ||
999 | |||
1000 | free_netdev(ndev); | ||
1001 | |||
1002 | return rv; | ||
1003 | } | ||
1004 | |||
1005 | static int | ||
1006 | mpc52xx_fec_remove(struct of_device *op) | ||
1007 | { | ||
1008 | struct net_device *ndev; | ||
1009 | struct mpc52xx_fec_priv *priv; | ||
1010 | |||
1011 | ndev = dev_get_drvdata(&op->dev); | ||
1012 | priv = netdev_priv(ndev); | ||
1013 | |||
1014 | unregister_netdev(ndev); | ||
1015 | |||
1016 | irq_dispose_mapping(ndev->irq); | ||
1017 | |||
1018 | bcom_fec_rx_release(priv->rx_dmatsk); | ||
1019 | bcom_fec_tx_release(priv->tx_dmatsk); | ||
1020 | |||
1021 | iounmap(priv->fec); | ||
1022 | |||
1023 | release_mem_region(ndev->base_addr, sizeof(struct mpc52xx_fec)); | ||
1024 | |||
1025 | free_netdev(ndev); | ||
1026 | |||
1027 | dev_set_drvdata(&op->dev, NULL); | ||
1028 | return 0; | ||
1029 | } | ||
1030 | |||
1031 | #ifdef CONFIG_PM | ||
1032 | static int mpc52xx_fec_of_suspend(struct of_device *op, pm_message_t state) | ||
1033 | { | ||
1034 | struct net_device *dev = dev_get_drvdata(&op->dev); | ||
1035 | |||
1036 | if (netif_running(dev)) | ||
1037 | mpc52xx_fec_close(dev); | ||
1038 | |||
1039 | return 0; | ||
1040 | } | ||
1041 | |||
1042 | static int mpc52xx_fec_of_resume(struct of_device *op) | ||
1043 | { | ||
1044 | struct net_device *dev = dev_get_drvdata(&op->dev); | ||
1045 | |||
1046 | mpc52xx_fec_hw_init(dev); | ||
1047 | mpc52xx_fec_reset_stats(dev); | ||
1048 | |||
1049 | if (netif_running(dev)) | ||
1050 | mpc52xx_fec_open(dev); | ||
1051 | |||
1052 | return 0; | ||
1053 | } | ||
1054 | #endif | ||
1055 | |||
1056 | static struct of_device_id mpc52xx_fec_match[] = { | ||
1057 | { | ||
1058 | .type = "network", | ||
1059 | .compatible = "mpc5200-fec", | ||
1060 | }, | ||
1061 | { } | ||
1062 | }; | ||
1063 | |||
1064 | MODULE_DEVICE_TABLE(of, mpc52xx_fec_match); | ||
1065 | |||
1066 | static struct of_platform_driver mpc52xx_fec_driver = { | ||
1067 | .owner = THIS_MODULE, | ||
1068 | .name = DRIVER_NAME, | ||
1069 | .match_table = mpc52xx_fec_match, | ||
1070 | .probe = mpc52xx_fec_probe, | ||
1071 | .remove = mpc52xx_fec_remove, | ||
1072 | #ifdef CONFIG_PM | ||
1073 | .suspend = mpc52xx_fec_of_suspend, | ||
1074 | .resume = mpc52xx_fec_of_resume, | ||
1075 | #endif | ||
1076 | }; | ||
1077 | |||
1078 | |||
1079 | /* ======================================================================== */ | ||
1080 | /* Module */ | ||
1081 | /* ======================================================================== */ | ||
1082 | |||
1083 | static int __init | ||
1084 | mpc52xx_fec_init(void) | ||
1085 | { | ||
1086 | #ifdef CONFIG_FEC_MPC52xx_MDIO | ||
1087 | int ret; | ||
1088 | ret = of_register_platform_driver(&mpc52xx_fec_mdio_driver); | ||
1089 | if (ret) { | ||
1090 | printk(KERN_ERR DRIVER_NAME ": failed to register mdio driver\n"); | ||
1091 | return ret; | ||
1092 | } | ||
1093 | #endif | ||
1094 | return of_register_platform_driver(&mpc52xx_fec_driver); | ||
1095 | } | ||
1096 | |||
1097 | static void __exit | ||
1098 | mpc52xx_fec_exit(void) | ||
1099 | { | ||
1100 | of_unregister_platform_driver(&mpc52xx_fec_driver); | ||
1101 | #ifdef CONFIG_FEC_MPC52xx_MDIO | ||
1102 | of_unregister_platform_driver(&mpc52xx_fec_mdio_driver); | ||
1103 | #endif | ||
1104 | } | ||
1105 | |||
1106 | |||
1107 | module_init(mpc52xx_fec_init); | ||
1108 | module_exit(mpc52xx_fec_exit); | ||
1109 | |||
1110 | MODULE_LICENSE("GPL"); | ||
1111 | MODULE_AUTHOR("Dale Farnsworth"); | ||
1112 | MODULE_DESCRIPTION("Ethernet driver for the Freescale MPC52xx FEC"); | ||
diff --git a/drivers/net/fec_mpc52xx.h b/drivers/net/fec_mpc52xx.h new file mode 100644 index 000000000000..8b1f75397b9a --- /dev/null +++ b/drivers/net/fec_mpc52xx.h | |||
@@ -0,0 +1,313 @@ | |||
1 | /* | ||
2 | * drivers/drivers/net/fec_mpc52xx/fec.h | ||
3 | * | ||
4 | * Driver for the MPC5200 Fast Ethernet Controller | ||
5 | * | ||
6 | * Author: Dale Farnsworth <dfarnsworth@mvista.com> | ||
7 | * | ||
8 | * 2003-2004 (c) MontaVista, Software, Inc. This file is licensed under | ||
9 | * the terms of the GNU General Public License version 2. This program | ||
10 | * is licensed "as is" without any warranty of any kind, whether express | ||
11 | * or implied. | ||
12 | */ | ||
13 | |||
14 | #ifndef __DRIVERS_NET_MPC52XX_FEC_H__ | ||
15 | #define __DRIVERS_NET_MPC52XX_FEC_H__ | ||
16 | |||
17 | #include <linux/phy.h> | ||
18 | |||
19 | /* Tunable constant */ | ||
20 | /* FEC_RX_BUFFER_SIZE includes 4 bytes for CRC32 */ | ||
21 | #define FEC_RX_BUFFER_SIZE 1522 /* max receive packet size */ | ||
22 | #define FEC_RX_NUM_BD 256 | ||
23 | #define FEC_TX_NUM_BD 64 | ||
24 | |||
25 | #define FEC_RESET_DELAY 50 /* uS */ | ||
26 | |||
27 | #define FEC_WATCHDOG_TIMEOUT ((400*HZ)/1000) | ||
28 | |||
29 | struct mpc52xx_fec_priv { | ||
30 | int duplex; | ||
31 | int r_irq; | ||
32 | int t_irq; | ||
33 | struct mpc52xx_fec __iomem *fec; | ||
34 | struct bcom_task *rx_dmatsk; | ||
35 | struct bcom_task *tx_dmatsk; | ||
36 | spinlock_t lock; | ||
37 | int msg_enable; | ||
38 | |||
39 | int has_phy; | ||
40 | unsigned int phy_speed; | ||
41 | unsigned int phy_addr; | ||
42 | struct phy_device *phydev; | ||
43 | enum phy_state link; | ||
44 | int speed; | ||
45 | }; | ||
46 | |||
47 | |||
48 | /* ======================================================================== */ | ||
49 | /* Hardware register sets & bits */ | ||
50 | /* ======================================================================== */ | ||
51 | |||
52 | struct mpc52xx_fec { | ||
53 | u32 fec_id; /* FEC + 0x000 */ | ||
54 | u32 ievent; /* FEC + 0x004 */ | ||
55 | u32 imask; /* FEC + 0x008 */ | ||
56 | |||
57 | u32 reserved0[1]; /* FEC + 0x00C */ | ||
58 | u32 r_des_active; /* FEC + 0x010 */ | ||
59 | u32 x_des_active; /* FEC + 0x014 */ | ||
60 | u32 r_des_active_cl; /* FEC + 0x018 */ | ||
61 | u32 x_des_active_cl; /* FEC + 0x01C */ | ||
62 | u32 ivent_set; /* FEC + 0x020 */ | ||
63 | u32 ecntrl; /* FEC + 0x024 */ | ||
64 | |||
65 | u32 reserved1[6]; /* FEC + 0x028-03C */ | ||
66 | u32 mii_data; /* FEC + 0x040 */ | ||
67 | u32 mii_speed; /* FEC + 0x044 */ | ||
68 | u32 mii_status; /* FEC + 0x048 */ | ||
69 | |||
70 | u32 reserved2[5]; /* FEC + 0x04C-05C */ | ||
71 | u32 mib_data; /* FEC + 0x060 */ | ||
72 | u32 mib_control; /* FEC + 0x064 */ | ||
73 | |||
74 | u32 reserved3[6]; /* FEC + 0x068-7C */ | ||
75 | u32 r_activate; /* FEC + 0x080 */ | ||
76 | u32 r_cntrl; /* FEC + 0x084 */ | ||
77 | u32 r_hash; /* FEC + 0x088 */ | ||
78 | u32 r_data; /* FEC + 0x08C */ | ||
79 | u32 ar_done; /* FEC + 0x090 */ | ||
80 | u32 r_test; /* FEC + 0x094 */ | ||
81 | u32 r_mib; /* FEC + 0x098 */ | ||
82 | u32 r_da_low; /* FEC + 0x09C */ | ||
83 | u32 r_da_high; /* FEC + 0x0A0 */ | ||
84 | |||
85 | u32 reserved4[7]; /* FEC + 0x0A4-0BC */ | ||
86 | u32 x_activate; /* FEC + 0x0C0 */ | ||
87 | u32 x_cntrl; /* FEC + 0x0C4 */ | ||
88 | u32 backoff; /* FEC + 0x0C8 */ | ||
89 | u32 x_data; /* FEC + 0x0CC */ | ||
90 | u32 x_status; /* FEC + 0x0D0 */ | ||
91 | u32 x_mib; /* FEC + 0x0D4 */ | ||
92 | u32 x_test; /* FEC + 0x0D8 */ | ||
93 | u32 fdxfc_da1; /* FEC + 0x0DC */ | ||
94 | u32 fdxfc_da2; /* FEC + 0x0E0 */ | ||
95 | u32 paddr1; /* FEC + 0x0E4 */ | ||
96 | u32 paddr2; /* FEC + 0x0E8 */ | ||
97 | u32 op_pause; /* FEC + 0x0EC */ | ||
98 | |||
99 | u32 reserved5[4]; /* FEC + 0x0F0-0FC */ | ||
100 | u32 instr_reg; /* FEC + 0x100 */ | ||
101 | u32 context_reg; /* FEC + 0x104 */ | ||
102 | u32 test_cntrl; /* FEC + 0x108 */ | ||
103 | u32 acc_reg; /* FEC + 0x10C */ | ||
104 | u32 ones; /* FEC + 0x110 */ | ||
105 | u32 zeros; /* FEC + 0x114 */ | ||
106 | u32 iaddr1; /* FEC + 0x118 */ | ||
107 | u32 iaddr2; /* FEC + 0x11C */ | ||
108 | u32 gaddr1; /* FEC + 0x120 */ | ||
109 | u32 gaddr2; /* FEC + 0x124 */ | ||
110 | u32 random; /* FEC + 0x128 */ | ||
111 | u32 rand1; /* FEC + 0x12C */ | ||
112 | u32 tmp; /* FEC + 0x130 */ | ||
113 | |||
114 | u32 reserved6[3]; /* FEC + 0x134-13C */ | ||
115 | u32 fifo_id; /* FEC + 0x140 */ | ||
116 | u32 x_wmrk; /* FEC + 0x144 */ | ||
117 | u32 fcntrl; /* FEC + 0x148 */ | ||
118 | u32 r_bound; /* FEC + 0x14C */ | ||
119 | u32 r_fstart; /* FEC + 0x150 */ | ||
120 | u32 r_count; /* FEC + 0x154 */ | ||
121 | u32 r_lag; /* FEC + 0x158 */ | ||
122 | u32 r_read; /* FEC + 0x15C */ | ||
123 | u32 r_write; /* FEC + 0x160 */ | ||
124 | u32 x_count; /* FEC + 0x164 */ | ||
125 | u32 x_lag; /* FEC + 0x168 */ | ||
126 | u32 x_retry; /* FEC + 0x16C */ | ||
127 | u32 x_write; /* FEC + 0x170 */ | ||
128 | u32 x_read; /* FEC + 0x174 */ | ||
129 | |||
130 | u32 reserved7[2]; /* FEC + 0x178-17C */ | ||
131 | u32 fm_cntrl; /* FEC + 0x180 */ | ||
132 | u32 rfifo_data; /* FEC + 0x184 */ | ||
133 | u32 rfifo_status; /* FEC + 0x188 */ | ||
134 | u32 rfifo_cntrl; /* FEC + 0x18C */ | ||
135 | u32 rfifo_lrf_ptr; /* FEC + 0x190 */ | ||
136 | u32 rfifo_lwf_ptr; /* FEC + 0x194 */ | ||
137 | u32 rfifo_alarm; /* FEC + 0x198 */ | ||
138 | u32 rfifo_rdptr; /* FEC + 0x19C */ | ||
139 | u32 rfifo_wrptr; /* FEC + 0x1A0 */ | ||
140 | u32 tfifo_data; /* FEC + 0x1A4 */ | ||
141 | u32 tfifo_status; /* FEC + 0x1A8 */ | ||
142 | u32 tfifo_cntrl; /* FEC + 0x1AC */ | ||
143 | u32 tfifo_lrf_ptr; /* FEC + 0x1B0 */ | ||
144 | u32 tfifo_lwf_ptr; /* FEC + 0x1B4 */ | ||
145 | u32 tfifo_alarm; /* FEC + 0x1B8 */ | ||
146 | u32 tfifo_rdptr; /* FEC + 0x1BC */ | ||
147 | u32 tfifo_wrptr; /* FEC + 0x1C0 */ | ||
148 | |||
149 | u32 reset_cntrl; /* FEC + 0x1C4 */ | ||
150 | u32 xmit_fsm; /* FEC + 0x1C8 */ | ||
151 | |||
152 | u32 reserved8[3]; /* FEC + 0x1CC-1D4 */ | ||
153 | u32 rdes_data0; /* FEC + 0x1D8 */ | ||
154 | u32 rdes_data1; /* FEC + 0x1DC */ | ||
155 | u32 r_length; /* FEC + 0x1E0 */ | ||
156 | u32 x_length; /* FEC + 0x1E4 */ | ||
157 | u32 x_addr; /* FEC + 0x1E8 */ | ||
158 | u32 cdes_data; /* FEC + 0x1EC */ | ||
159 | u32 status; /* FEC + 0x1F0 */ | ||
160 | u32 dma_control; /* FEC + 0x1F4 */ | ||
161 | u32 des_cmnd; /* FEC + 0x1F8 */ | ||
162 | u32 data; /* FEC + 0x1FC */ | ||
163 | |||
164 | u32 rmon_t_drop; /* FEC + 0x200 */ | ||
165 | u32 rmon_t_packets; /* FEC + 0x204 */ | ||
166 | u32 rmon_t_bc_pkt; /* FEC + 0x208 */ | ||
167 | u32 rmon_t_mc_pkt; /* FEC + 0x20C */ | ||
168 | u32 rmon_t_crc_align; /* FEC + 0x210 */ | ||
169 | u32 rmon_t_undersize; /* FEC + 0x214 */ | ||
170 | u32 rmon_t_oversize; /* FEC + 0x218 */ | ||
171 | u32 rmon_t_frag; /* FEC + 0x21C */ | ||
172 | u32 rmon_t_jab; /* FEC + 0x220 */ | ||
173 | u32 rmon_t_col; /* FEC + 0x224 */ | ||
174 | u32 rmon_t_p64; /* FEC + 0x228 */ | ||
175 | u32 rmon_t_p65to127; /* FEC + 0x22C */ | ||
176 | u32 rmon_t_p128to255; /* FEC + 0x230 */ | ||
177 | u32 rmon_t_p256to511; /* FEC + 0x234 */ | ||
178 | u32 rmon_t_p512to1023; /* FEC + 0x238 */ | ||
179 | u32 rmon_t_p1024to2047; /* FEC + 0x23C */ | ||
180 | u32 rmon_t_p_gte2048; /* FEC + 0x240 */ | ||
181 | u32 rmon_t_octets; /* FEC + 0x244 */ | ||
182 | u32 ieee_t_drop; /* FEC + 0x248 */ | ||
183 | u32 ieee_t_frame_ok; /* FEC + 0x24C */ | ||
184 | u32 ieee_t_1col; /* FEC + 0x250 */ | ||
185 | u32 ieee_t_mcol; /* FEC + 0x254 */ | ||
186 | u32 ieee_t_def; /* FEC + 0x258 */ | ||
187 | u32 ieee_t_lcol; /* FEC + 0x25C */ | ||
188 | u32 ieee_t_excol; /* FEC + 0x260 */ | ||
189 | u32 ieee_t_macerr; /* FEC + 0x264 */ | ||
190 | u32 ieee_t_cserr; /* FEC + 0x268 */ | ||
191 | u32 ieee_t_sqe; /* FEC + 0x26C */ | ||
192 | u32 t_fdxfc; /* FEC + 0x270 */ | ||
193 | u32 ieee_t_octets_ok; /* FEC + 0x274 */ | ||
194 | |||
195 | u32 reserved9[2]; /* FEC + 0x278-27C */ | ||
196 | u32 rmon_r_drop; /* FEC + 0x280 */ | ||
197 | u32 rmon_r_packets; /* FEC + 0x284 */ | ||
198 | u32 rmon_r_bc_pkt; /* FEC + 0x288 */ | ||
199 | u32 rmon_r_mc_pkt; /* FEC + 0x28C */ | ||
200 | u32 rmon_r_crc_align; /* FEC + 0x290 */ | ||
201 | u32 rmon_r_undersize; /* FEC + 0x294 */ | ||
202 | u32 rmon_r_oversize; /* FEC + 0x298 */ | ||
203 | u32 rmon_r_frag; /* FEC + 0x29C */ | ||
204 | u32 rmon_r_jab; /* FEC + 0x2A0 */ | ||
205 | |||
206 | u32 rmon_r_resvd_0; /* FEC + 0x2A4 */ | ||
207 | |||
208 | u32 rmon_r_p64; /* FEC + 0x2A8 */ | ||
209 | u32 rmon_r_p65to127; /* FEC + 0x2AC */ | ||
210 | u32 rmon_r_p128to255; /* FEC + 0x2B0 */ | ||
211 | u32 rmon_r_p256to511; /* FEC + 0x2B4 */ | ||
212 | u32 rmon_r_p512to1023; /* FEC + 0x2B8 */ | ||
213 | u32 rmon_r_p1024to2047; /* FEC + 0x2BC */ | ||
214 | u32 rmon_r_p_gte2048; /* FEC + 0x2C0 */ | ||
215 | u32 rmon_r_octets; /* FEC + 0x2C4 */ | ||
216 | u32 ieee_r_drop; /* FEC + 0x2C8 */ | ||
217 | u32 ieee_r_frame_ok; /* FEC + 0x2CC */ | ||
218 | u32 ieee_r_crc; /* FEC + 0x2D0 */ | ||
219 | u32 ieee_r_align; /* FEC + 0x2D4 */ | ||
220 | u32 r_macerr; /* FEC + 0x2D8 */ | ||
221 | u32 r_fdxfc; /* FEC + 0x2DC */ | ||
222 | u32 ieee_r_octets_ok; /* FEC + 0x2E0 */ | ||
223 | |||
224 | u32 reserved10[7]; /* FEC + 0x2E4-2FC */ | ||
225 | |||
226 | u32 reserved11[64]; /* FEC + 0x300-3FF */ | ||
227 | }; | ||
228 | |||
229 | #define FEC_MIB_DISABLE 0x80000000 | ||
230 | |||
231 | #define FEC_IEVENT_HBERR 0x80000000 | ||
232 | #define FEC_IEVENT_BABR 0x40000000 | ||
233 | #define FEC_IEVENT_BABT 0x20000000 | ||
234 | #define FEC_IEVENT_GRA 0x10000000 | ||
235 | #define FEC_IEVENT_TFINT 0x08000000 | ||
236 | #define FEC_IEVENT_MII 0x00800000 | ||
237 | #define FEC_IEVENT_LATE_COL 0x00200000 | ||
238 | #define FEC_IEVENT_COL_RETRY_LIM 0x00100000 | ||
239 | #define FEC_IEVENT_XFIFO_UN 0x00080000 | ||
240 | #define FEC_IEVENT_XFIFO_ERROR 0x00040000 | ||
241 | #define FEC_IEVENT_RFIFO_ERROR 0x00020000 | ||
242 | |||
243 | #define FEC_IMASK_HBERR 0x80000000 | ||
244 | #define FEC_IMASK_BABR 0x40000000 | ||
245 | #define FEC_IMASK_BABT 0x20000000 | ||
246 | #define FEC_IMASK_GRA 0x10000000 | ||
247 | #define FEC_IMASK_MII 0x00800000 | ||
248 | #define FEC_IMASK_LATE_COL 0x00200000 | ||
249 | #define FEC_IMASK_COL_RETRY_LIM 0x00100000 | ||
250 | #define FEC_IMASK_XFIFO_UN 0x00080000 | ||
251 | #define FEC_IMASK_XFIFO_ERROR 0x00040000 | ||
252 | #define FEC_IMASK_RFIFO_ERROR 0x00020000 | ||
253 | |||
254 | /* all but MII, which is enabled separately */ | ||
255 | #define FEC_IMASK_ENABLE (FEC_IMASK_HBERR | FEC_IMASK_BABR | \ | ||
256 | FEC_IMASK_BABT | FEC_IMASK_GRA | FEC_IMASK_LATE_COL | \ | ||
257 | FEC_IMASK_COL_RETRY_LIM | FEC_IMASK_XFIFO_UN | \ | ||
258 | FEC_IMASK_XFIFO_ERROR | FEC_IMASK_RFIFO_ERROR) | ||
259 | |||
260 | #define FEC_RCNTRL_MAX_FL_SHIFT 16 | ||
261 | #define FEC_RCNTRL_LOOP 0x01 | ||
262 | #define FEC_RCNTRL_DRT 0x02 | ||
263 | #define FEC_RCNTRL_MII_MODE 0x04 | ||
264 | #define FEC_RCNTRL_PROM 0x08 | ||
265 | #define FEC_RCNTRL_BC_REJ 0x10 | ||
266 | #define FEC_RCNTRL_FCE 0x20 | ||
267 | |||
268 | #define FEC_TCNTRL_GTS 0x00000001 | ||
269 | #define FEC_TCNTRL_HBC 0x00000002 | ||
270 | #define FEC_TCNTRL_FDEN 0x00000004 | ||
271 | #define FEC_TCNTRL_TFC_PAUSE 0x00000008 | ||
272 | #define FEC_TCNTRL_RFC_PAUSE 0x00000010 | ||
273 | |||
274 | #define FEC_ECNTRL_RESET 0x00000001 | ||
275 | #define FEC_ECNTRL_ETHER_EN 0x00000002 | ||
276 | |||
277 | #define FEC_MII_DATA_ST 0x40000000 /* Start frame */ | ||
278 | #define FEC_MII_DATA_OP_RD 0x20000000 /* Perform read */ | ||
279 | #define FEC_MII_DATA_OP_WR 0x10000000 /* Perform write */ | ||
280 | #define FEC_MII_DATA_PA_MSK 0x0f800000 /* PHY Address mask */ | ||
281 | #define FEC_MII_DATA_RA_MSK 0x007c0000 /* PHY Register mask */ | ||
282 | #define FEC_MII_DATA_TA 0x00020000 /* Turnaround */ | ||
283 | #define FEC_MII_DATA_DATAMSK 0x0000ffff /* PHY data mask */ | ||
284 | |||
285 | #define FEC_MII_READ_FRAME (FEC_MII_DATA_ST | FEC_MII_DATA_OP_RD | FEC_MII_DATA_TA) | ||
286 | #define FEC_MII_WRITE_FRAME (FEC_MII_DATA_ST | FEC_MII_DATA_OP_WR | FEC_MII_DATA_TA) | ||
287 | |||
288 | #define FEC_MII_DATA_RA_SHIFT 0x12 /* MII reg addr bits */ | ||
289 | #define FEC_MII_DATA_PA_SHIFT 0x17 /* MII PHY addr bits */ | ||
290 | |||
291 | #define FEC_PADDR2_TYPE 0x8808 | ||
292 | |||
293 | #define FEC_OP_PAUSE_OPCODE 0x00010000 | ||
294 | |||
295 | #define FEC_FIFO_WMRK_256B 0x3 | ||
296 | |||
297 | #define FEC_FIFO_STATUS_ERR 0x00400000 | ||
298 | #define FEC_FIFO_STATUS_UF 0x00200000 | ||
299 | #define FEC_FIFO_STATUS_OF 0x00100000 | ||
300 | |||
301 | #define FEC_FIFO_CNTRL_FRAME 0x08000000 | ||
302 | #define FEC_FIFO_CNTRL_LTG_7 0x07000000 | ||
303 | |||
304 | #define FEC_RESET_CNTRL_RESET_FIFO 0x02000000 | ||
305 | #define FEC_RESET_CNTRL_ENABLE_IS_RESET 0x01000000 | ||
306 | |||
307 | #define FEC_XMIT_FSM_APPEND_CRC 0x02000000 | ||
308 | #define FEC_XMIT_FSM_ENABLE_CRC 0x01000000 | ||
309 | |||
310 | |||
311 | extern struct of_platform_driver mpc52xx_fec_mdio_driver; | ||
312 | |||
313 | #endif /* __DRIVERS_NET_MPC52XX_FEC_H__ */ | ||
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c new file mode 100644 index 000000000000..ba6e8b218e0a --- /dev/null +++ b/drivers/net/fec_mpc52xx_phy.c | |||
@@ -0,0 +1,198 @@ | |||
1 | /* | ||
2 | * Driver for the MPC5200 Fast Ethernet Controller - MDIO bus driver | ||
3 | * | ||
4 | * Copyright (C) 2007 Domen Puncer, Telargo, Inc. | ||
5 | * | ||
6 | * This file is licensed under the terms of the GNU General Public License | ||
7 | * version 2. This program is licensed "as is" without any warranty of any | ||
8 | * kind, whether express or implied. | ||
9 | */ | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/netdevice.h> | ||
14 | #include <linux/phy.h> | ||
15 | #include <linux/of_platform.h> | ||
16 | #include <asm/io.h> | ||
17 | #include <asm/mpc52xx.h> | ||
18 | #include "fec_mpc52xx.h" | ||
19 | |||
20 | struct mpc52xx_fec_mdio_priv { | ||
21 | struct mpc52xx_fec __iomem *regs; | ||
22 | }; | ||
23 | |||
24 | static int mpc52xx_fec_mdio_read(struct mii_bus *bus, int phy_id, int reg) | ||
25 | { | ||
26 | struct mpc52xx_fec_mdio_priv *priv = bus->priv; | ||
27 | struct mpc52xx_fec __iomem *fec; | ||
28 | int tries = 100; | ||
29 | u32 request = FEC_MII_READ_FRAME; | ||
30 | |||
31 | fec = priv->regs; | ||
32 | out_be32(&fec->ievent, FEC_IEVENT_MII); | ||
33 | |||
34 | request |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK; | ||
35 | request |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK; | ||
36 | |||
37 | out_be32(&priv->regs->mii_data, request); | ||
38 | |||
39 | /* wait for it to finish, this takes about 23 us on lite5200b */ | ||
40 | while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries) | ||
41 | udelay(5); | ||
42 | |||
43 | if (tries == 0) | ||
44 | return -ETIMEDOUT; | ||
45 | |||
46 | return in_be32(&priv->regs->mii_data) & FEC_MII_DATA_DATAMSK; | ||
47 | } | ||
48 | |||
49 | static int mpc52xx_fec_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 data) | ||
50 | { | ||
51 | struct mpc52xx_fec_mdio_priv *priv = bus->priv; | ||
52 | struct mpc52xx_fec __iomem *fec; | ||
53 | u32 value = data; | ||
54 | int tries = 100; | ||
55 | |||
56 | fec = priv->regs; | ||
57 | out_be32(&fec->ievent, FEC_IEVENT_MII); | ||
58 | |||
59 | value |= FEC_MII_WRITE_FRAME; | ||
60 | value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK; | ||
61 | value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK; | ||
62 | |||
63 | out_be32(&priv->regs->mii_data, value); | ||
64 | |||
65 | /* wait for request to finish */ | ||
66 | while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries) | ||
67 | udelay(5); | ||
68 | |||
69 | if (tries == 0) | ||
70 | return -ETIMEDOUT; | ||
71 | |||
72 | return 0; | ||
73 | } | ||
74 | |||
75 | static int mpc52xx_fec_mdio_probe(struct of_device *of, const struct of_device_id *match) | ||
76 | { | ||
77 | struct device *dev = &of->dev; | ||
78 | struct device_node *np = of->node; | ||
79 | struct device_node *child = NULL; | ||
80 | struct mii_bus *bus; | ||
81 | struct mpc52xx_fec_mdio_priv *priv; | ||
82 | struct resource res = {}; | ||
83 | int err; | ||
84 | int i; | ||
85 | |||
86 | bus = kzalloc(sizeof(*bus), GFP_KERNEL); | ||
87 | if (bus == NULL) | ||
88 | return -ENOMEM; | ||
89 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
90 | if (priv == NULL) { | ||
91 | err = -ENOMEM; | ||
92 | goto out_free; | ||
93 | } | ||
94 | |||
95 | bus->name = "mpc52xx MII bus"; | ||
96 | bus->read = mpc52xx_fec_mdio_read; | ||
97 | bus->write = mpc52xx_fec_mdio_write; | ||
98 | |||
99 | /* setup irqs */ | ||
100 | bus->irq = kmalloc(sizeof(bus->irq[0]) * PHY_MAX_ADDR, GFP_KERNEL); | ||
101 | if (bus->irq == NULL) { | ||
102 | err = -ENOMEM; | ||
103 | goto out_free; | ||
104 | } | ||
105 | for (i=0; i<PHY_MAX_ADDR; i++) | ||
106 | bus->irq[i] = PHY_POLL; | ||
107 | |||
108 | while ((child = of_get_next_child(np, child)) != NULL) { | ||
109 | int irq = irq_of_parse_and_map(child, 0); | ||
110 | if (irq != NO_IRQ) { | ||
111 | const u32 *id = of_get_property(child, "reg", NULL); | ||
112 | bus->irq[*id] = irq; | ||
113 | } | ||
114 | } | ||
115 | |||
116 | /* setup registers */ | ||
117 | err = of_address_to_resource(np, 0, &res); | ||
118 | if (err) | ||
119 | goto out_free; | ||
120 | priv->regs = ioremap(res.start, res.end - res.start + 1); | ||
121 | if (priv->regs == NULL) { | ||
122 | err = -ENOMEM; | ||
123 | goto out_free; | ||
124 | } | ||
125 | |||
126 | bus->id = res.start; | ||
127 | bus->priv = priv; | ||
128 | |||
129 | bus->dev = dev; | ||
130 | dev_set_drvdata(dev, bus); | ||
131 | |||
132 | /* set MII speed */ | ||
133 | out_be32(&priv->regs->mii_speed, ((mpc52xx_find_ipb_freq(of->node) >> 20) / 5) << 1); | ||
134 | |||
135 | /* enable MII interrupt */ | ||
136 | out_be32(&priv->regs->imask, in_be32(&priv->regs->imask) | FEC_IMASK_MII); | ||
137 | |||
138 | err = mdiobus_register(bus); | ||
139 | if (err) | ||
140 | goto out_unmap; | ||
141 | |||
142 | return 0; | ||
143 | |||
144 | out_unmap: | ||
145 | iounmap(priv->regs); | ||
146 | out_free: | ||
147 | for (i=0; i<PHY_MAX_ADDR; i++) | ||
148 | if (bus->irq[i] != PHY_POLL) | ||
149 | irq_dispose_mapping(bus->irq[i]); | ||
150 | kfree(bus->irq); | ||
151 | kfree(priv); | ||
152 | kfree(bus); | ||
153 | |||
154 | return err; | ||
155 | } | ||
156 | |||
157 | static int mpc52xx_fec_mdio_remove(struct of_device *of) | ||
158 | { | ||
159 | struct device *dev = &of->dev; | ||
160 | struct mii_bus *bus = dev_get_drvdata(dev); | ||
161 | struct mpc52xx_fec_mdio_priv *priv = bus->priv; | ||
162 | int i; | ||
163 | |||
164 | mdiobus_unregister(bus); | ||
165 | dev_set_drvdata(dev, NULL); | ||
166 | |||
167 | iounmap(priv->regs); | ||
168 | for (i=0; i<PHY_MAX_ADDR; i++) | ||
169 | if (bus->irq[i]) | ||
170 | irq_dispose_mapping(bus->irq[i]); | ||
171 | kfree(priv); | ||
172 | kfree(bus->irq); | ||
173 | kfree(bus); | ||
174 | |||
175 | return 0; | ||
176 | } | ||
177 | |||
178 | |||
179 | static struct of_device_id mpc52xx_fec_mdio_match[] = { | ||
180 | { | ||
181 | .type = "mdio", | ||
182 | .compatible = "mpc5200b-fec-phy", | ||
183 | }, | ||
184 | {}, | ||
185 | }; | ||
186 | |||
187 | struct of_platform_driver mpc52xx_fec_mdio_driver = { | ||
188 | .name = "mpc5200b-fec-phy", | ||
189 | .probe = mpc52xx_fec_mdio_probe, | ||
190 | .remove = mpc52xx_fec_mdio_remove, | ||
191 | .match_table = mpc52xx_fec_mdio_match, | ||
192 | }; | ||
193 | |||
194 | /* let fec driver call it, since this has to be registered before it */ | ||
195 | EXPORT_SYMBOL_GPL(mpc52xx_fec_mdio_driver); | ||
196 | |||
197 | |||
198 | MODULE_LICENSE("Dual BSD/GPL"); | ||
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c index 4dbdfaaf37bf..a1e4508717c8 100644 --- a/drivers/net/irda/au1k_ir.c +++ b/drivers/net/irda/au1k_ir.c | |||
@@ -627,19 +627,16 @@ static int au1k_irda_rx(struct net_device *dev) | |||
627 | } | 627 | } |
628 | 628 | ||
629 | 629 | ||
630 | void au1k_irda_interrupt(int irq, void *dev_id) | 630 | static irqreturn_t au1k_irda_interrupt(int dummy, void *dev_id) |
631 | { | 631 | { |
632 | struct net_device *dev = (struct net_device *) dev_id; | 632 | struct net_device *dev = dev_id; |
633 | |||
634 | if (dev == NULL) { | ||
635 | printk(KERN_ERR "%s: isr: null dev ptr\n", dev->name); | ||
636 | return; | ||
637 | } | ||
638 | 633 | ||
639 | writel(0, IR_INT_CLEAR); /* ack irda interrupts */ | 634 | writel(0, IR_INT_CLEAR); /* ack irda interrupts */ |
640 | 635 | ||
641 | au1k_irda_rx(dev); | 636 | au1k_irda_rx(dev); |
642 | au1k_tx_ack(dev); | 637 | au1k_tx_ack(dev); |
638 | |||
639 | return IRQ_HANDLED; | ||
643 | } | 640 | } |
644 | 641 | ||
645 | 642 | ||
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c index 73dcbb7296da..ad134a61302a 100644 --- a/drivers/net/pcmcia/3c574_cs.c +++ b/drivers/net/pcmcia/3c574_cs.c | |||
@@ -274,7 +274,7 @@ static int tc574_probe(struct pcmcia_device *link) | |||
274 | spin_lock_init(&lp->window_lock); | 274 | spin_lock_init(&lp->window_lock); |
275 | link->io.NumPorts1 = 32; | 275 | link->io.NumPorts1 = 32; |
276 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; | 276 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; |
277 | link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; | 277 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_HANDLE_PRESENT; |
278 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | 278 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; |
279 | link->irq.Handler = &el3_interrupt; | 279 | link->irq.Handler = &el3_interrupt; |
280 | link->irq.Instance = dev; | 280 | link->irq.Instance = dev; |
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c index 32076ca6a9e1..a98fe07cce70 100644 --- a/drivers/net/pcmcia/3c589_cs.c +++ b/drivers/net/pcmcia/3c589_cs.c | |||
@@ -188,7 +188,7 @@ static int tc589_probe(struct pcmcia_device *link) | |||
188 | spin_lock_init(&lp->lock); | 188 | spin_lock_init(&lp->lock); |
189 | link->io.NumPorts1 = 16; | 189 | link->io.NumPorts1 = 16; |
190 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; | 190 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; |
191 | link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; | 191 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_HANDLE_PRESENT; |
192 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | 192 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; |
193 | link->irq.Handler = &el3_interrupt; | 193 | link->irq.Handler = &el3_interrupt; |
194 | link->irq.Instance = dev; | 194 | link->irq.Instance = dev; |
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c index a95a2cae6b23..8d910a372f89 100644 --- a/drivers/net/pcmcia/axnet_cs.c +++ b/drivers/net/pcmcia/axnet_cs.c | |||
@@ -158,7 +158,7 @@ static int axnet_probe(struct pcmcia_device *link) | |||
158 | info = PRIV(dev); | 158 | info = PRIV(dev); |
159 | info->p_dev = link; | 159 | info->p_dev = link; |
160 | link->priv = dev; | 160 | link->priv = dev; |
161 | link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; | 161 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
162 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | 162 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; |
163 | link->conf.Attributes = CONF_ENABLE_IRQ; | 163 | link->conf.Attributes = CONF_ENABLE_IRQ; |
164 | link->conf.IntType = INT_MEMORY_AND_IO; | 164 | link->conf.IntType = INT_MEMORY_AND_IO; |
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c index 62844677c784..8c719b4df544 100644 --- a/drivers/net/pcmcia/fmvj18x_cs.c +++ b/drivers/net/pcmcia/fmvj18x_cs.c | |||
@@ -249,7 +249,7 @@ static int fmvj18x_probe(struct pcmcia_device *link) | |||
249 | link->io.IOAddrLines = 5; | 249 | link->io.IOAddrLines = 5; |
250 | 250 | ||
251 | /* Interrupt setup */ | 251 | /* Interrupt setup */ |
252 | link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; | 252 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_HANDLE_PRESENT; |
253 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | 253 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; |
254 | link->irq.Handler = &fjn_interrupt; | 254 | link->irq.Handler = &fjn_interrupt; |
255 | link->irq.Instance = dev; | 255 | link->irq.Instance = dev; |
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index 9d45e9696e16..db6a97d1d7b1 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c | |||
@@ -254,7 +254,7 @@ static int pcnet_probe(struct pcmcia_device *link) | |||
254 | info->p_dev = link; | 254 | info->p_dev = link; |
255 | link->priv = dev; | 255 | link->priv = dev; |
256 | 256 | ||
257 | link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; | 257 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
258 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | 258 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; |
259 | link->conf.Attributes = CONF_ENABLE_IRQ; | 259 | link->conf.Attributes = CONF_ENABLE_IRQ; |
260 | link->conf.IntType = INT_MEMORY_AND_IO; | 260 | link->conf.IntType = INT_MEMORY_AND_IO; |
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c index 58d716fd17cf..c9868e9dac4c 100644 --- a/drivers/net/pcmcia/smc91c92_cs.c +++ b/drivers/net/pcmcia/smc91c92_cs.c | |||
@@ -328,7 +328,7 @@ static int smc91c92_probe(struct pcmcia_device *link) | |||
328 | link->io.NumPorts1 = 16; | 328 | link->io.NumPorts1 = 16; |
329 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; | 329 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; |
330 | link->io.IOAddrLines = 4; | 330 | link->io.IOAddrLines = 4; |
331 | link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; | 331 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_HANDLE_PRESENT; |
332 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | 332 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; |
333 | link->irq.Handler = &smc_interrupt; | 333 | link->irq.Handler = &smc_interrupt; |
334 | link->irq.Instance = dev; | 334 | link->irq.Instance = dev; |
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c index c3b69602e275..1f09bea6db5a 100644 --- a/drivers/net/pcmcia/xirc2ps_cs.c +++ b/drivers/net/pcmcia/xirc2ps_cs.c | |||
@@ -886,7 +886,7 @@ xirc2ps_config(struct pcmcia_device * link) | |||
886 | } | 886 | } |
887 | printk(KNOT_XIRC "no ports available\n"); | 887 | printk(KNOT_XIRC "no ports available\n"); |
888 | } else { | 888 | } else { |
889 | link->irq.Attributes |= IRQ_TYPE_EXCLUSIVE; | 889 | link->irq.Attributes |= IRQ_TYPE_DYNAMIC_SHARING; |
890 | link->io.NumPorts1 = 16; | 890 | link->io.NumPorts1 = 16; |
891 | for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) { | 891 | for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) { |
892 | link->io.BasePort1 = ioaddr; | 892 | link->io.BasePort1 = ioaddr; |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index e8960f294a6e..b94fa7ef1955 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -392,7 +392,9 @@ struct rtl8169_private { | |||
392 | void __iomem *mmio_addr; /* memory map physical address */ | 392 | void __iomem *mmio_addr; /* memory map physical address */ |
393 | struct pci_dev *pci_dev; /* Index of PCI device */ | 393 | struct pci_dev *pci_dev; /* Index of PCI device */ |
394 | struct net_device *dev; | 394 | struct net_device *dev; |
395 | #ifdef CONFIG_R8169_NAPI | ||
395 | struct napi_struct napi; | 396 | struct napi_struct napi; |
397 | #endif | ||
396 | spinlock_t lock; /* spin lock flag */ | 398 | spinlock_t lock; /* spin lock flag */ |
397 | u32 msg_enable; | 399 | u32 msg_enable; |
398 | int chipset; | 400 | int chipset; |
@@ -2989,13 +2991,16 @@ static void rtl8169_down(struct net_device *dev) | |||
2989 | { | 2991 | { |
2990 | struct rtl8169_private *tp = netdev_priv(dev); | 2992 | struct rtl8169_private *tp = netdev_priv(dev); |
2991 | void __iomem *ioaddr = tp->mmio_addr; | 2993 | void __iomem *ioaddr = tp->mmio_addr; |
2992 | unsigned int poll_locked = 0; | ||
2993 | unsigned int intrmask; | 2994 | unsigned int intrmask; |
2994 | 2995 | ||
2995 | rtl8169_delete_timer(dev); | 2996 | rtl8169_delete_timer(dev); |
2996 | 2997 | ||
2997 | netif_stop_queue(dev); | 2998 | netif_stop_queue(dev); |
2998 | 2999 | ||
3000 | #ifdef CONFIG_R8169_NAPI | ||
3001 | napi_disable(&tp->napi); | ||
3002 | #endif | ||
3003 | |||
2999 | core_down: | 3004 | core_down: |
3000 | spin_lock_irq(&tp->lock); | 3005 | spin_lock_irq(&tp->lock); |
3001 | 3006 | ||
@@ -3009,11 +3014,6 @@ core_down: | |||
3009 | 3014 | ||
3010 | synchronize_irq(dev->irq); | 3015 | synchronize_irq(dev->irq); |
3011 | 3016 | ||
3012 | if (!poll_locked) { | ||
3013 | napi_disable(&tp->napi); | ||
3014 | poll_locked++; | ||
3015 | } | ||
3016 | |||
3017 | /* Give a racing hard_start_xmit a few cycles to complete. */ | 3017 | /* Give a racing hard_start_xmit a few cycles to complete. */ |
3018 | synchronize_sched(); /* FIXME: should this be synchronize_irq()? */ | 3018 | synchronize_sched(); /* FIXME: should this be synchronize_irq()? */ |
3019 | 3019 | ||
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index 9741d613ba6f..a3ff270593f1 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c | |||
@@ -2214,9 +2214,7 @@ static void ucc_geth_set_multi(struct net_device *dev) | |||
2214 | struct dev_mc_list *dmi; | 2214 | struct dev_mc_list *dmi; |
2215 | struct ucc_fast *uf_regs; | 2215 | struct ucc_fast *uf_regs; |
2216 | struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; | 2216 | struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; |
2217 | u8 tempaddr[6]; | 2217 | int i; |
2218 | u8 *mcptr, *tdptr; | ||
2219 | int i, j; | ||
2220 | 2218 | ||
2221 | ugeth = netdev_priv(dev); | 2219 | ugeth = netdev_priv(dev); |
2222 | 2220 | ||
@@ -2255,19 +2253,10 @@ static void ucc_geth_set_multi(struct net_device *dev) | |||
2255 | if (!(dmi->dmi_addr[0] & 1)) | 2253 | if (!(dmi->dmi_addr[0] & 1)) |
2256 | continue; | 2254 | continue; |
2257 | 2255 | ||
2258 | /* The address in dmi_addr is LSB first, | ||
2259 | * and taddr is MSB first. We have to | ||
2260 | * copy bytes MSB first from dmi_addr. | ||
2261 | */ | ||
2262 | mcptr = (u8 *) dmi->dmi_addr + 5; | ||
2263 | tdptr = (u8 *) tempaddr; | ||
2264 | for (j = 0; j < 6; j++) | ||
2265 | *tdptr++ = *mcptr--; | ||
2266 | |||
2267 | /* Ask CPM to run CRC and set bit in | 2256 | /* Ask CPM to run CRC and set bit in |
2268 | * filter mask. | 2257 | * filter mask. |
2269 | */ | 2258 | */ |
2270 | hw_add_addr_in_hash(ugeth, tempaddr); | 2259 | hw_add_addr_in_hash(ugeth, dmi->dmi_addr); |
2271 | } | 2260 | } |
2272 | } | 2261 | } |
2273 | } | 2262 | } |
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 9d9ff76a9bc6..5058e60e5703 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c | |||
@@ -2391,7 +2391,7 @@ out_requeue: | |||
2391 | if (b43_debug(dev, B43_DBG_PWORK_FAST)) | 2391 | if (b43_debug(dev, B43_DBG_PWORK_FAST)) |
2392 | delay = msecs_to_jiffies(50); | 2392 | delay = msecs_to_jiffies(50); |
2393 | else | 2393 | else |
2394 | delay = round_jiffies(HZ * 15); | 2394 | delay = round_jiffies_relative(HZ * 15); |
2395 | queue_delayed_work(wl->hw->workqueue, &dev->periodic_work, delay); | 2395 | queue_delayed_work(wl->hw->workqueue, &dev->periodic_work, delay); |
2396 | out: | 2396 | out: |
2397 | mutex_unlock(&wl->mutex); | 2397 | mutex_unlock(&wl->mutex); |
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c index d09479e816cd..f0e56dfc9ecf 100644 --- a/drivers/net/wireless/b43legacy/main.c +++ b/drivers/net/wireless/b43legacy/main.c | |||
@@ -2260,7 +2260,7 @@ out_requeue: | |||
2260 | if (b43legacy_debug(dev, B43legacy_DBG_PWORK_FAST)) | 2260 | if (b43legacy_debug(dev, B43legacy_DBG_PWORK_FAST)) |
2261 | delay = msecs_to_jiffies(50); | 2261 | delay = msecs_to_jiffies(50); |
2262 | else | 2262 | else |
2263 | delay = round_jiffies(HZ); | 2263 | delay = round_jiffies_relative(HZ); |
2264 | queue_delayed_work(dev->wl->hw->workqueue, | 2264 | queue_delayed_work(dev->wl->hw->workqueue, |
2265 | &dev->periodic_work, delay); | 2265 | &dev->periodic_work, delay); |
2266 | out: | 2266 | out: |
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c index a6c7904de282..8d53d08b9691 100644 --- a/drivers/net/wireless/ipw2100.c +++ b/drivers/net/wireless/ipw2100.c | |||
@@ -1769,7 +1769,7 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred) | |||
1769 | if (priv->stop_rf_kill) { | 1769 | if (priv->stop_rf_kill) { |
1770 | priv->stop_rf_kill = 0; | 1770 | priv->stop_rf_kill = 0; |
1771 | queue_delayed_work(priv->workqueue, &priv->rf_kill, | 1771 | queue_delayed_work(priv->workqueue, &priv->rf_kill, |
1772 | round_jiffies(HZ)); | 1772 | round_jiffies_relative(HZ)); |
1773 | } | 1773 | } |
1774 | 1774 | ||
1775 | deferred = 1; | 1775 | deferred = 1; |
@@ -2086,7 +2086,8 @@ static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status) | |||
2086 | /* Make sure the RF Kill check timer is running */ | 2086 | /* Make sure the RF Kill check timer is running */ |
2087 | priv->stop_rf_kill = 0; | 2087 | priv->stop_rf_kill = 0; |
2088 | cancel_delayed_work(&priv->rf_kill); | 2088 | cancel_delayed_work(&priv->rf_kill); |
2089 | queue_delayed_work(priv->workqueue, &priv->rf_kill, round_jiffies(HZ)); | 2089 | queue_delayed_work(priv->workqueue, &priv->rf_kill, |
2090 | round_jiffies_relative(HZ)); | ||
2090 | } | 2091 | } |
2091 | 2092 | ||
2092 | static void send_scan_event(void *data) | 2093 | static void send_scan_event(void *data) |
@@ -2123,7 +2124,7 @@ static void isr_scan_complete(struct ipw2100_priv *priv, u32 status) | |||
2123 | if (!delayed_work_pending(&priv->scan_event_later)) | 2124 | if (!delayed_work_pending(&priv->scan_event_later)) |
2124 | queue_delayed_work(priv->workqueue, | 2125 | queue_delayed_work(priv->workqueue, |
2125 | &priv->scan_event_later, | 2126 | &priv->scan_event_later, |
2126 | round_jiffies(msecs_to_jiffies(4000))); | 2127 | round_jiffies_relative(msecs_to_jiffies(4000))); |
2127 | } else { | 2128 | } else { |
2128 | priv->user_requested_scan = 0; | 2129 | priv->user_requested_scan = 0; |
2129 | cancel_delayed_work(&priv->scan_event_later); | 2130 | cancel_delayed_work(&priv->scan_event_later); |
@@ -4242,7 +4243,7 @@ static int ipw_radio_kill_sw(struct ipw2100_priv *priv, int disable_radio) | |||
4242 | priv->stop_rf_kill = 0; | 4243 | priv->stop_rf_kill = 0; |
4243 | cancel_delayed_work(&priv->rf_kill); | 4244 | cancel_delayed_work(&priv->rf_kill); |
4244 | queue_delayed_work(priv->workqueue, &priv->rf_kill, | 4245 | queue_delayed_work(priv->workqueue, &priv->rf_kill, |
4245 | round_jiffies(HZ)); | 4246 | round_jiffies_relative(HZ)); |
4246 | } else | 4247 | } else |
4247 | schedule_reset(priv); | 4248 | schedule_reset(priv); |
4248 | } | 4249 | } |
@@ -5981,7 +5982,7 @@ static void ipw2100_rf_kill(struct work_struct *work) | |||
5981 | IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n"); | 5982 | IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n"); |
5982 | if (!priv->stop_rf_kill) | 5983 | if (!priv->stop_rf_kill) |
5983 | queue_delayed_work(priv->workqueue, &priv->rf_kill, | 5984 | queue_delayed_work(priv->workqueue, &priv->rf_kill, |
5984 | round_jiffies(HZ)); | 5985 | round_jiffies_relative(HZ)); |
5985 | goto exit_unlock; | 5986 | goto exit_unlock; |
5986 | } | 5987 | } |
5987 | 5988 | ||
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c index e3c828401b9a..54f44e5473c0 100644 --- a/drivers/net/wireless/ipw2200.c +++ b/drivers/net/wireless/ipw2200.c | |||
@@ -1753,7 +1753,7 @@ static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio) | |||
1753 | /* Make sure the RF_KILL check timer is running */ | 1753 | /* Make sure the RF_KILL check timer is running */ |
1754 | cancel_delayed_work(&priv->rf_kill); | 1754 | cancel_delayed_work(&priv->rf_kill); |
1755 | queue_delayed_work(priv->workqueue, &priv->rf_kill, | 1755 | queue_delayed_work(priv->workqueue, &priv->rf_kill, |
1756 | round_jiffies(2 * HZ)); | 1756 | round_jiffies_relative(2 * HZ)); |
1757 | } else | 1757 | } else |
1758 | queue_work(priv->workqueue, &priv->up); | 1758 | queue_work(priv->workqueue, &priv->up); |
1759 | } | 1759 | } |
@@ -4364,7 +4364,7 @@ static void handle_scan_event(struct ipw_priv *priv) | |||
4364 | if (!priv->user_requested_scan) { | 4364 | if (!priv->user_requested_scan) { |
4365 | if (!delayed_work_pending(&priv->scan_event)) | 4365 | if (!delayed_work_pending(&priv->scan_event)) |
4366 | queue_delayed_work(priv->workqueue, &priv->scan_event, | 4366 | queue_delayed_work(priv->workqueue, &priv->scan_event, |
4367 | round_jiffies(msecs_to_jiffies(4000))); | 4367 | round_jiffies_relative(msecs_to_jiffies(4000))); |
4368 | } else { | 4368 | } else { |
4369 | union iwreq_data wrqu; | 4369 | union iwreq_data wrqu; |
4370 | 4370 | ||
@@ -4728,7 +4728,7 @@ static void ipw_rx_notification(struct ipw_priv *priv, | |||
4728 | && priv->status & STATUS_ASSOCIATED) | 4728 | && priv->status & STATUS_ASSOCIATED) |
4729 | queue_delayed_work(priv->workqueue, | 4729 | queue_delayed_work(priv->workqueue, |
4730 | &priv->request_scan, | 4730 | &priv->request_scan, |
4731 | round_jiffies(HZ)); | 4731 | round_jiffies_relative(HZ)); |
4732 | 4732 | ||
4733 | /* Send an empty event to user space. | 4733 | /* Send an empty event to user space. |
4734 | * We don't send the received data on the event because | 4734 | * We don't send the received data on the event because |
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c index 557deebca1b9..891f90d2f019 100644 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c | |||
@@ -3232,9 +3232,7 @@ int iwl4965_tx_cmd(struct iwl_priv *priv, struct iwl_cmd *out_cmd, | |||
3232 | tx->rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[rate_index].plcp, | 3232 | tx->rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[rate_index].plcp, |
3233 | rate_flags); | 3233 | rate_flags); |
3234 | 3234 | ||
3235 | if (ieee80211_is_probe_request(fc)) | 3235 | if (ieee80211_is_back_request(fc)) |
3236 | tx->tx_flags |= TX_CMD_FLG_TSF_MSK; | ||
3237 | else if (ieee80211_is_back_request(fc)) | ||
3238 | tx->tx_flags |= TX_CMD_FLG_ACK_MSK | | 3236 | tx->tx_flags |= TX_CMD_FLG_ACK_MSK | |
3239 | TX_CMD_FLG_IMM_BA_RSP_MASK; | 3237 | TX_CMD_FLG_IMM_BA_RSP_MASK; |
3240 | #ifdef CONFIG_IWLWIFI_HT | 3238 | #ifdef CONFIG_IWLWIFI_HT |
@@ -3872,7 +3870,7 @@ static void iwl4965_rx_reply_rx(struct iwl_priv *priv, | |||
3872 | */ | 3870 | */ |
3873 | case IEEE80211_STYPE_ASSOC_RESP: | 3871 | case IEEE80211_STYPE_ASSOC_RESP: |
3874 | case IEEE80211_STYPE_REASSOC_RESP: | 3872 | case IEEE80211_STYPE_REASSOC_RESP: |
3875 | if (network_packet && iwl_is_associated(priv)) { | 3873 | if (network_packet) { |
3876 | #ifdef CONFIG_IWLWIFI_HT | 3874 | #ifdef CONFIG_IWLWIFI_HT |
3877 | u8 *pos = NULL; | 3875 | u8 *pos = NULL; |
3878 | struct ieee802_11_elems elems; | 3876 | struct ieee802_11_elems elems; |
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index 83019d1d7ccc..4f22a7174caf 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c | |||
@@ -6478,8 +6478,9 @@ static void iwl_bg_scan_check(struct work_struct *data) | |||
6478 | IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, | 6478 | IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, |
6479 | "Scan completion watchdog resetting adapter (%dms)\n", | 6479 | "Scan completion watchdog resetting adapter (%dms)\n", |
6480 | jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG)); | 6480 | jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG)); |
6481 | |||
6481 | if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) | 6482 | if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) |
6482 | queue_work(priv->workqueue, &priv->restart); | 6483 | iwl_send_scan_abort(priv); |
6483 | } | 6484 | } |
6484 | mutex_unlock(&priv->mutex); | 6485 | mutex_unlock(&priv->mutex); |
6485 | } | 6486 | } |
@@ -6575,7 +6576,7 @@ static void iwl_bg_request_scan(struct work_struct *data) | |||
6575 | spin_unlock_irqrestore(&priv->lock, flags); | 6576 | spin_unlock_irqrestore(&priv->lock, flags); |
6576 | 6577 | ||
6577 | scan->suspend_time = 0; | 6578 | scan->suspend_time = 0; |
6578 | scan->max_out_time = cpu_to_le32(600 * 1024); | 6579 | scan->max_out_time = cpu_to_le32(200 * 1024); |
6579 | if (!interval) | 6580 | if (!interval) |
6580 | interval = suspend_time; | 6581 | interval = suspend_time; |
6581 | /* | 6582 | /* |
@@ -6605,7 +6606,7 @@ static void iwl_bg_request_scan(struct work_struct *data) | |||
6605 | memcpy(scan->direct_scan[0].ssid, | 6606 | memcpy(scan->direct_scan[0].ssid, |
6606 | priv->direct_ssid, priv->direct_ssid_len); | 6607 | priv->direct_ssid, priv->direct_ssid_len); |
6607 | direct_mask = 1; | 6608 | direct_mask = 1; |
6608 | } else if (!iwl_is_associated(priv)) { | 6609 | } else if (!iwl_is_associated(priv) && priv->essid_len) { |
6609 | scan->direct_scan[0].id = WLAN_EID_SSID; | 6610 | scan->direct_scan[0].id = WLAN_EID_SSID; |
6610 | scan->direct_scan[0].len = priv->essid_len; | 6611 | scan->direct_scan[0].len = priv->essid_len; |
6611 | memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len); | 6612 | memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len); |
@@ -6744,6 +6745,12 @@ static void iwl_bg_post_associate(struct work_struct *data) | |||
6744 | 6745 | ||
6745 | mutex_lock(&priv->mutex); | 6746 | mutex_lock(&priv->mutex); |
6746 | 6747 | ||
6748 | if (!priv->interface_id || !priv->is_open) { | ||
6749 | mutex_unlock(&priv->mutex); | ||
6750 | return; | ||
6751 | } | ||
6752 | iwl_scan_cancel_timeout(priv, 200); | ||
6753 | |||
6747 | conf = ieee80211_get_hw_conf(priv->hw); | 6754 | conf = ieee80211_get_hw_conf(priv->hw); |
6748 | 6755 | ||
6749 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | 6756 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; |
@@ -6882,9 +6889,19 @@ static void iwl_mac_stop(struct ieee80211_hw *hw) | |||
6882 | struct iwl_priv *priv = hw->priv; | 6889 | struct iwl_priv *priv = hw->priv; |
6883 | 6890 | ||
6884 | IWL_DEBUG_MAC80211("enter\n"); | 6891 | IWL_DEBUG_MAC80211("enter\n"); |
6892 | |||
6893 | |||
6894 | mutex_lock(&priv->mutex); | ||
6895 | /* stop mac, cancel any scan request and clear | ||
6896 | * RXON_FILTER_ASSOC_MSK BIT | ||
6897 | */ | ||
6885 | priv->is_open = 0; | 6898 | priv->is_open = 0; |
6886 | /*netif_stop_queue(dev); */ | 6899 | iwl_scan_cancel_timeout(priv, 100); |
6887 | flush_workqueue(priv->workqueue); | 6900 | cancel_delayed_work(&priv->post_associate); |
6901 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | ||
6902 | iwl_commit_rxon(priv); | ||
6903 | mutex_unlock(&priv->mutex); | ||
6904 | |||
6888 | IWL_DEBUG_MAC80211("leave\n"); | 6905 | IWL_DEBUG_MAC80211("leave\n"); |
6889 | } | 6906 | } |
6890 | 6907 | ||
@@ -7169,8 +7186,6 @@ static int iwl_mac_config_interface(struct ieee80211_hw *hw, int if_id, | |||
7169 | if (priv->iw_mode == IEEE80211_IF_TYPE_AP) | 7186 | if (priv->iw_mode == IEEE80211_IF_TYPE_AP) |
7170 | iwl_config_ap(priv); | 7187 | iwl_config_ap(priv); |
7171 | else { | 7188 | else { |
7172 | priv->staging_rxon.filter_flags |= | ||
7173 | RXON_FILTER_ASSOC_MSK; | ||
7174 | rc = iwl_commit_rxon(priv); | 7189 | rc = iwl_commit_rxon(priv); |
7175 | if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc) | 7190 | if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc) |
7176 | iwl_add_station(priv, | 7191 | iwl_add_station(priv, |
@@ -7178,6 +7193,7 @@ static int iwl_mac_config_interface(struct ieee80211_hw *hw, int if_id, | |||
7178 | } | 7193 | } |
7179 | 7194 | ||
7180 | } else { | 7195 | } else { |
7196 | iwl_scan_cancel_timeout(priv, 100); | ||
7181 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | 7197 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; |
7182 | iwl_commit_rxon(priv); | 7198 | iwl_commit_rxon(priv); |
7183 | } | 7199 | } |
@@ -7217,6 +7233,12 @@ static void iwl_mac_remove_interface(struct ieee80211_hw *hw, | |||
7217 | IWL_DEBUG_MAC80211("enter\n"); | 7233 | IWL_DEBUG_MAC80211("enter\n"); |
7218 | 7234 | ||
7219 | mutex_lock(&priv->mutex); | 7235 | mutex_lock(&priv->mutex); |
7236 | |||
7237 | iwl_scan_cancel_timeout(priv, 100); | ||
7238 | cancel_delayed_work(&priv->post_associate); | ||
7239 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | ||
7240 | iwl_commit_rxon(priv); | ||
7241 | |||
7220 | if (priv->interface_id == conf->if_id) { | 7242 | if (priv->interface_id == conf->if_id) { |
7221 | priv->interface_id = 0; | 7243 | priv->interface_id = 0; |
7222 | memset(priv->bssid, 0, ETH_ALEN); | 7244 | memset(priv->bssid, 0, ETH_ALEN); |
@@ -7238,6 +7260,7 @@ static int iwl_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len) | |||
7238 | 7260 | ||
7239 | IWL_DEBUG_MAC80211("enter\n"); | 7261 | IWL_DEBUG_MAC80211("enter\n"); |
7240 | 7262 | ||
7263 | mutex_lock(&priv->mutex); | ||
7241 | spin_lock_irqsave(&priv->lock, flags); | 7264 | spin_lock_irqsave(&priv->lock, flags); |
7242 | 7265 | ||
7243 | if (!iwl_is_ready_rf(priv)) { | 7266 | if (!iwl_is_ready_rf(priv)) { |
@@ -7268,7 +7291,8 @@ static int iwl_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len) | |||
7268 | priv->direct_ssid_len = (u8) | 7291 | priv->direct_ssid_len = (u8) |
7269 | min((u8) len, (u8) IW_ESSID_MAX_SIZE); | 7292 | min((u8) len, (u8) IW_ESSID_MAX_SIZE); |
7270 | memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len); | 7293 | memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len); |
7271 | } | 7294 | } else |
7295 | priv->one_direct_scan = 0; | ||
7272 | 7296 | ||
7273 | rc = iwl_scan_initiate(priv); | 7297 | rc = iwl_scan_initiate(priv); |
7274 | 7298 | ||
@@ -7276,6 +7300,7 @@ static int iwl_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len) | |||
7276 | 7300 | ||
7277 | out_unlock: | 7301 | out_unlock: |
7278 | spin_unlock_irqrestore(&priv->lock, flags); | 7302 | spin_unlock_irqrestore(&priv->lock, flags); |
7303 | mutex_unlock(&priv->mutex); | ||
7279 | 7304 | ||
7280 | return rc; | 7305 | return rc; |
7281 | } | 7306 | } |
@@ -7310,6 +7335,8 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, | |||
7310 | 7335 | ||
7311 | mutex_lock(&priv->mutex); | 7336 | mutex_lock(&priv->mutex); |
7312 | 7337 | ||
7338 | iwl_scan_cancel_timeout(priv, 100); | ||
7339 | |||
7313 | switch (cmd) { | 7340 | switch (cmd) { |
7314 | case SET_KEY: | 7341 | case SET_KEY: |
7315 | rc = iwl_update_sta_key_info(priv, key, sta_id); | 7342 | rc = iwl_update_sta_key_info(priv, key, sta_id); |
@@ -7479,8 +7506,18 @@ static void iwl_mac_reset_tsf(struct ieee80211_hw *hw) | |||
7479 | 7506 | ||
7480 | spin_unlock_irqrestore(&priv->lock, flags); | 7507 | spin_unlock_irqrestore(&priv->lock, flags); |
7481 | 7508 | ||
7509 | /* we are restarting association process | ||
7510 | * clear RXON_FILTER_ASSOC_MSK bit | ||
7511 | */ | ||
7512 | if (priv->iw_mode != IEEE80211_IF_TYPE_AP) { | ||
7513 | iwl_scan_cancel_timeout(priv, 100); | ||
7514 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | ||
7515 | iwl_commit_rxon(priv); | ||
7516 | } | ||
7517 | |||
7482 | /* Per mac80211.h: This is only used in IBSS mode... */ | 7518 | /* Per mac80211.h: This is only used in IBSS mode... */ |
7483 | if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) { | 7519 | if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) { |
7520 | |||
7484 | IWL_DEBUG_MAC80211("leave - not in IBSS\n"); | 7521 | IWL_DEBUG_MAC80211("leave - not in IBSS\n"); |
7485 | mutex_unlock(&priv->mutex); | 7522 | mutex_unlock(&priv->mutex); |
7486 | return; | 7523 | return; |
@@ -8558,6 +8595,9 @@ static void iwl_pci_remove(struct pci_dev *pdev) | |||
8558 | iwl_rate_control_unregister(priv->hw); | 8595 | iwl_rate_control_unregister(priv->hw); |
8559 | } | 8596 | } |
8560 | 8597 | ||
8598 | /*netif_stop_queue(dev); */ | ||
8599 | flush_workqueue(priv->workqueue); | ||
8600 | |||
8561 | /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes | 8601 | /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes |
8562 | * priv->workqueue... so we can't take down the workqueue | 8602 | * priv->workqueue... so we can't take down the workqueue |
8563 | * until now... */ | 8603 | * until now... */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl4965-base.c b/drivers/net/wireless/iwlwifi/iwl4965-base.c index 5e1279263b22..d60adcb9bd4a 100644 --- a/drivers/net/wireless/iwlwifi/iwl4965-base.c +++ b/drivers/net/wireless/iwlwifi/iwl4965-base.c | |||
@@ -6845,8 +6845,9 @@ static void iwl_bg_scan_check(struct work_struct *data) | |||
6845 | IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, | 6845 | IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, |
6846 | "Scan completion watchdog resetting adapter (%dms)\n", | 6846 | "Scan completion watchdog resetting adapter (%dms)\n", |
6847 | jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG)); | 6847 | jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG)); |
6848 | |||
6848 | if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) | 6849 | if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) |
6849 | queue_work(priv->workqueue, &priv->restart); | 6850 | iwl_send_scan_abort(priv); |
6850 | } | 6851 | } |
6851 | mutex_unlock(&priv->mutex); | 6852 | mutex_unlock(&priv->mutex); |
6852 | } | 6853 | } |
@@ -6942,7 +6943,7 @@ static void iwl_bg_request_scan(struct work_struct *data) | |||
6942 | spin_unlock_irqrestore(&priv->lock, flags); | 6943 | spin_unlock_irqrestore(&priv->lock, flags); |
6943 | 6944 | ||
6944 | scan->suspend_time = 0; | 6945 | scan->suspend_time = 0; |
6945 | scan->max_out_time = cpu_to_le32(600 * 1024); | 6946 | scan->max_out_time = cpu_to_le32(200 * 1024); |
6946 | if (!interval) | 6947 | if (!interval) |
6947 | interval = suspend_time; | 6948 | interval = suspend_time; |
6948 | 6949 | ||
@@ -6965,7 +6966,7 @@ static void iwl_bg_request_scan(struct work_struct *data) | |||
6965 | memcpy(scan->direct_scan[0].ssid, | 6966 | memcpy(scan->direct_scan[0].ssid, |
6966 | priv->direct_ssid, priv->direct_ssid_len); | 6967 | priv->direct_ssid, priv->direct_ssid_len); |
6967 | direct_mask = 1; | 6968 | direct_mask = 1; |
6968 | } else if (!iwl_is_associated(priv)) { | 6969 | } else if (!iwl_is_associated(priv) && priv->essid_len) { |
6969 | scan->direct_scan[0].id = WLAN_EID_SSID; | 6970 | scan->direct_scan[0].id = WLAN_EID_SSID; |
6970 | scan->direct_scan[0].len = priv->essid_len; | 6971 | scan->direct_scan[0].len = priv->essid_len; |
6971 | memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len); | 6972 | memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len); |
@@ -7118,6 +7119,12 @@ static void iwl_bg_post_associate(struct work_struct *data) | |||
7118 | 7119 | ||
7119 | mutex_lock(&priv->mutex); | 7120 | mutex_lock(&priv->mutex); |
7120 | 7121 | ||
7122 | if (!priv->interface_id || !priv->is_open) { | ||
7123 | mutex_unlock(&priv->mutex); | ||
7124 | return; | ||
7125 | } | ||
7126 | iwl_scan_cancel_timeout(priv, 200); | ||
7127 | |||
7121 | conf = ieee80211_get_hw_conf(priv->hw); | 7128 | conf = ieee80211_get_hw_conf(priv->hw); |
7122 | 7129 | ||
7123 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | 7130 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; |
@@ -7271,9 +7278,19 @@ static void iwl_mac_stop(struct ieee80211_hw *hw) | |||
7271 | struct iwl_priv *priv = hw->priv; | 7278 | struct iwl_priv *priv = hw->priv; |
7272 | 7279 | ||
7273 | IWL_DEBUG_MAC80211("enter\n"); | 7280 | IWL_DEBUG_MAC80211("enter\n"); |
7281 | |||
7282 | |||
7283 | mutex_lock(&priv->mutex); | ||
7284 | /* stop mac, cancel any scan request and clear | ||
7285 | * RXON_FILTER_ASSOC_MSK BIT | ||
7286 | */ | ||
7274 | priv->is_open = 0; | 7287 | priv->is_open = 0; |
7275 | /*netif_stop_queue(dev); */ | 7288 | iwl_scan_cancel_timeout(priv, 100); |
7276 | flush_workqueue(priv->workqueue); | 7289 | cancel_delayed_work(&priv->post_associate); |
7290 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | ||
7291 | iwl_commit_rxon(priv); | ||
7292 | mutex_unlock(&priv->mutex); | ||
7293 | |||
7277 | IWL_DEBUG_MAC80211("leave\n"); | 7294 | IWL_DEBUG_MAC80211("leave\n"); |
7278 | } | 7295 | } |
7279 | 7296 | ||
@@ -7573,8 +7590,6 @@ static int iwl_mac_config_interface(struct ieee80211_hw *hw, int if_id, | |||
7573 | if (priv->iw_mode == IEEE80211_IF_TYPE_AP) | 7590 | if (priv->iw_mode == IEEE80211_IF_TYPE_AP) |
7574 | iwl_config_ap(priv); | 7591 | iwl_config_ap(priv); |
7575 | else { | 7592 | else { |
7576 | priv->staging_rxon.filter_flags |= | ||
7577 | RXON_FILTER_ASSOC_MSK; | ||
7578 | rc = iwl_commit_rxon(priv); | 7593 | rc = iwl_commit_rxon(priv); |
7579 | if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc) | 7594 | if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc) |
7580 | iwl_rxon_add_station( | 7595 | iwl_rxon_add_station( |
@@ -7582,6 +7597,7 @@ static int iwl_mac_config_interface(struct ieee80211_hw *hw, int if_id, | |||
7582 | } | 7597 | } |
7583 | 7598 | ||
7584 | } else { | 7599 | } else { |
7600 | iwl_scan_cancel_timeout(priv, 100); | ||
7585 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | 7601 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; |
7586 | iwl_commit_rxon(priv); | 7602 | iwl_commit_rxon(priv); |
7587 | } | 7603 | } |
@@ -7621,6 +7637,12 @@ static void iwl_mac_remove_interface(struct ieee80211_hw *hw, | |||
7621 | IWL_DEBUG_MAC80211("enter\n"); | 7637 | IWL_DEBUG_MAC80211("enter\n"); |
7622 | 7638 | ||
7623 | mutex_lock(&priv->mutex); | 7639 | mutex_lock(&priv->mutex); |
7640 | |||
7641 | iwl_scan_cancel_timeout(priv, 100); | ||
7642 | cancel_delayed_work(&priv->post_associate); | ||
7643 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | ||
7644 | iwl_commit_rxon(priv); | ||
7645 | |||
7624 | if (priv->interface_id == conf->if_id) { | 7646 | if (priv->interface_id == conf->if_id) { |
7625 | priv->interface_id = 0; | 7647 | priv->interface_id = 0; |
7626 | memset(priv->bssid, 0, ETH_ALEN); | 7648 | memset(priv->bssid, 0, ETH_ALEN); |
@@ -7642,6 +7664,7 @@ static int iwl_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len) | |||
7642 | 7664 | ||
7643 | IWL_DEBUG_MAC80211("enter\n"); | 7665 | IWL_DEBUG_MAC80211("enter\n"); |
7644 | 7666 | ||
7667 | mutex_lock(&priv->mutex); | ||
7645 | spin_lock_irqsave(&priv->lock, flags); | 7668 | spin_lock_irqsave(&priv->lock, flags); |
7646 | 7669 | ||
7647 | if (!iwl_is_ready_rf(priv)) { | 7670 | if (!iwl_is_ready_rf(priv)) { |
@@ -7672,7 +7695,8 @@ static int iwl_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len) | |||
7672 | priv->direct_ssid_len = (u8) | 7695 | priv->direct_ssid_len = (u8) |
7673 | min((u8) len, (u8) IW_ESSID_MAX_SIZE); | 7696 | min((u8) len, (u8) IW_ESSID_MAX_SIZE); |
7674 | memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len); | 7697 | memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len); |
7675 | } | 7698 | } else |
7699 | priv->one_direct_scan = 0; | ||
7676 | 7700 | ||
7677 | rc = iwl_scan_initiate(priv); | 7701 | rc = iwl_scan_initiate(priv); |
7678 | 7702 | ||
@@ -7680,6 +7704,7 @@ static int iwl_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len) | |||
7680 | 7704 | ||
7681 | out_unlock: | 7705 | out_unlock: |
7682 | spin_unlock_irqrestore(&priv->lock, flags); | 7706 | spin_unlock_irqrestore(&priv->lock, flags); |
7707 | mutex_unlock(&priv->mutex); | ||
7683 | 7708 | ||
7684 | return rc; | 7709 | return rc; |
7685 | } | 7710 | } |
@@ -7713,6 +7738,8 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, | |||
7713 | 7738 | ||
7714 | mutex_lock(&priv->mutex); | 7739 | mutex_lock(&priv->mutex); |
7715 | 7740 | ||
7741 | iwl_scan_cancel_timeout(priv, 100); | ||
7742 | |||
7716 | switch (cmd) { | 7743 | switch (cmd) { |
7717 | case SET_KEY: | 7744 | case SET_KEY: |
7718 | rc = iwl_update_sta_key_info(priv, key, sta_id); | 7745 | rc = iwl_update_sta_key_info(priv, key, sta_id); |
@@ -7903,8 +7930,18 @@ static void iwl_mac_reset_tsf(struct ieee80211_hw *hw) | |||
7903 | 7930 | ||
7904 | spin_unlock_irqrestore(&priv->lock, flags); | 7931 | spin_unlock_irqrestore(&priv->lock, flags); |
7905 | 7932 | ||
7933 | /* we are restarting association process | ||
7934 | * clear RXON_FILTER_ASSOC_MSK bit | ||
7935 | */ | ||
7936 | if (priv->iw_mode != IEEE80211_IF_TYPE_AP) { | ||
7937 | iwl_scan_cancel_timeout(priv, 100); | ||
7938 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | ||
7939 | iwl_commit_rxon(priv); | ||
7940 | } | ||
7941 | |||
7906 | /* Per mac80211.h: This is only used in IBSS mode... */ | 7942 | /* Per mac80211.h: This is only used in IBSS mode... */ |
7907 | if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) { | 7943 | if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) { |
7944 | |||
7908 | IWL_DEBUG_MAC80211("leave - not in IBSS\n"); | 7945 | IWL_DEBUG_MAC80211("leave - not in IBSS\n"); |
7909 | mutex_unlock(&priv->mutex); | 7946 | mutex_unlock(&priv->mutex); |
7910 | return; | 7947 | return; |
@@ -9152,6 +9189,9 @@ static void iwl_pci_remove(struct pci_dev *pdev) | |||
9152 | iwl_rate_control_unregister(priv->hw); | 9189 | iwl_rate_control_unregister(priv->hw); |
9153 | } | 9190 | } |
9154 | 9191 | ||
9192 | /*netif_stop_queue(dev); */ | ||
9193 | flush_workqueue(priv->workqueue); | ||
9194 | |||
9155 | /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes | 9195 | /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes |
9156 | * priv->workqueue... so we can't take down the workqueue | 9196 | * priv->workqueue... so we can't take down the workqueue |
9157 | * until now... */ | 9197 | * until now... */ |
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h index 298faa9d3f61..06d9bc0015c0 100644 --- a/drivers/net/wireless/rt2x00/rt2x00lib.h +++ b/drivers/net/wireless/rt2x00/rt2x00lib.h | |||
@@ -30,7 +30,7 @@ | |||
30 | * Interval defines | 30 | * Interval defines |
31 | * Both the link tuner as the rfkill will be called once per second. | 31 | * Both the link tuner as the rfkill will be called once per second. |
32 | */ | 32 | */ |
33 | #define LINK_TUNE_INTERVAL ( round_jiffies(HZ) ) | 33 | #define LINK_TUNE_INTERVAL ( round_jiffies_relative(HZ) ) |
34 | #define RFKILL_POLL_INTERVAL ( 1000 ) | 34 | #define RFKILL_POLL_INTERVAL ( 1000 ) |
35 | 35 | ||
36 | /* | 36 | /* |
diff --git a/drivers/net/wireless/rtl8187_dev.c b/drivers/net/wireless/rtl8187_dev.c index de61c8fe6492..e454ae83e97a 100644 --- a/drivers/net/wireless/rtl8187_dev.c +++ b/drivers/net/wireless/rtl8187_dev.c | |||
@@ -433,6 +433,9 @@ static int rtl8187_start(struct ieee80211_hw *dev) | |||
433 | 433 | ||
434 | rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0xFFFF); | 434 | rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0xFFFF); |
435 | 435 | ||
436 | rtl818x_iowrite32(priv, &priv->map->MAR[0], ~0); | ||
437 | rtl818x_iowrite32(priv, &priv->map->MAR[1], ~0); | ||
438 | |||
436 | rtl8187_init_urbs(dev); | 439 | rtl8187_init_urbs(dev); |
437 | 440 | ||
438 | reg = RTL818X_RX_CONF_ONLYERLPKT | | 441 | reg = RTL818X_RX_CONF_ONLYERLPKT | |
@@ -582,32 +585,31 @@ static int rtl8187_config_interface(struct ieee80211_hw *dev, int if_id, | |||
582 | static void rtl8187_configure_filter(struct ieee80211_hw *dev, | 585 | static void rtl8187_configure_filter(struct ieee80211_hw *dev, |
583 | unsigned int changed_flags, | 586 | unsigned int changed_flags, |
584 | unsigned int *total_flags, | 587 | unsigned int *total_flags, |
585 | int mc_count, struct dev_addr_list *mc_list) | 588 | int mc_count, struct dev_addr_list *mclist) |
586 | { | 589 | { |
587 | struct rtl8187_priv *priv = dev->priv; | 590 | struct rtl8187_priv *priv = dev->priv; |
588 | 591 | ||
589 | *total_flags = 0; | ||
590 | |||
591 | if (changed_flags & FIF_ALLMULTI) | ||
592 | priv->rx_conf ^= RTL818X_RX_CONF_MULTICAST; | ||
593 | if (changed_flags & FIF_FCSFAIL) | 592 | if (changed_flags & FIF_FCSFAIL) |
594 | priv->rx_conf ^= RTL818X_RX_CONF_FCS; | 593 | priv->rx_conf ^= RTL818X_RX_CONF_FCS; |
595 | if (changed_flags & FIF_CONTROL) | 594 | if (changed_flags & FIF_CONTROL) |
596 | priv->rx_conf ^= RTL818X_RX_CONF_CTRL; | 595 | priv->rx_conf ^= RTL818X_RX_CONF_CTRL; |
597 | if (changed_flags & FIF_OTHER_BSS) | 596 | if (changed_flags & FIF_OTHER_BSS) |
598 | priv->rx_conf ^= RTL818X_RX_CONF_MONITOR; | 597 | priv->rx_conf ^= RTL818X_RX_CONF_MONITOR; |
599 | 598 | if (*total_flags & FIF_ALLMULTI || mc_count > 0) | |
600 | if (mc_count > 0) | ||
601 | priv->rx_conf |= RTL818X_RX_CONF_MULTICAST; | 599 | priv->rx_conf |= RTL818X_RX_CONF_MULTICAST; |
600 | else | ||
601 | priv->rx_conf &= ~RTL818X_RX_CONF_MULTICAST; | ||
602 | |||
603 | *total_flags = 0; | ||
602 | 604 | ||
603 | if (priv->rx_conf & RTL818X_RX_CONF_MULTICAST) | ||
604 | *total_flags |= FIF_ALLMULTI; | ||
605 | if (priv->rx_conf & RTL818X_RX_CONF_FCS) | 605 | if (priv->rx_conf & RTL818X_RX_CONF_FCS) |
606 | *total_flags |= FIF_FCSFAIL; | 606 | *total_flags |= FIF_FCSFAIL; |
607 | if (priv->rx_conf & RTL818X_RX_CONF_CTRL) | 607 | if (priv->rx_conf & RTL818X_RX_CONF_CTRL) |
608 | *total_flags |= FIF_CONTROL; | 608 | *total_flags |= FIF_CONTROL; |
609 | if (priv->rx_conf & RTL818X_RX_CONF_MONITOR) | 609 | if (priv->rx_conf & RTL818X_RX_CONF_MONITOR) |
610 | *total_flags |= FIF_OTHER_BSS; | 610 | *total_flags |= FIF_OTHER_BSS; |
611 | if (priv->rx_conf & RTL818X_RX_CONF_MULTICAST) | ||
612 | *total_flags |= FIF_ALLMULTI; | ||
611 | 613 | ||
612 | rtl818x_iowrite32_async(priv, &priv->map->RX_CONF, priv->rx_conf); | 614 | rtl818x_iowrite32_async(priv, &priv->map->RX_CONF, priv->rx_conf); |
613 | } | 615 | } |
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 0c4ab3b07274..9b35259eecfa 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -745,7 +745,7 @@ static char *fault_reason_strings[] = | |||
745 | "non-zero reserved fields in PTE", | 745 | "non-zero reserved fields in PTE", |
746 | "Unknown" | 746 | "Unknown" |
747 | }; | 747 | }; |
748 | #define MAX_FAULT_REASON_IDX ARRAY_SIZE(fault_reason_strings) | 748 | #define MAX_FAULT_REASON_IDX ARRAY_SIZE(fault_reason_strings) - 1 |
749 | 749 | ||
750 | char *dmar_get_fault_reason(u8 fault_reason) | 750 | char *dmar_get_fault_reason(u8 fault_reason) |
751 | { | 751 | { |
@@ -995,7 +995,6 @@ static struct intel_iommu *alloc_iommu(struct dmar_drhd_unit *drhd) | |||
995 | return iommu; | 995 | return iommu; |
996 | error_unmap: | 996 | error_unmap: |
997 | iounmap(iommu->reg); | 997 | iounmap(iommu->reg); |
998 | iommu->reg = 0; | ||
999 | error: | 998 | error: |
1000 | kfree(iommu); | 999 | kfree(iommu); |
1001 | return NULL; | 1000 | return NULL; |
@@ -1808,7 +1807,7 @@ get_valid_domain_for_dev(struct pci_dev *pdev) | |||
1808 | if (!domain) { | 1807 | if (!domain) { |
1809 | printk(KERN_ERR | 1808 | printk(KERN_ERR |
1810 | "Allocating domain for %s failed", pci_name(pdev)); | 1809 | "Allocating domain for %s failed", pci_name(pdev)); |
1811 | return 0; | 1810 | return NULL; |
1812 | } | 1811 | } |
1813 | 1812 | ||
1814 | /* make sure context mapping is ok */ | 1813 | /* make sure context mapping is ok */ |
@@ -1818,7 +1817,7 @@ get_valid_domain_for_dev(struct pci_dev *pdev) | |||
1818 | printk(KERN_ERR | 1817 | printk(KERN_ERR |
1819 | "Domain context map for %s failed", | 1818 | "Domain context map for %s failed", |
1820 | pci_name(pdev)); | 1819 | pci_name(pdev)); |
1821 | return 0; | 1820 | return NULL; |
1822 | } | 1821 | } |
1823 | } | 1822 | } |
1824 | 1823 | ||
diff --git a/drivers/pci/intel-iommu.h b/drivers/pci/intel-iommu.h index ee88dd2400cb..459ad1f9dc54 100644 --- a/drivers/pci/intel-iommu.h +++ b/drivers/pci/intel-iommu.h | |||
@@ -58,7 +58,7 @@ | |||
58 | hi = readl(dmar + reg + 4); \ | 58 | hi = readl(dmar + reg + 4); \ |
59 | (((u64) hi) << 32) + lo; }) | 59 | (((u64) hi) << 32) + lo; }) |
60 | */ | 60 | */ |
61 | static inline u64 dmar_readq(void *addr) | 61 | static inline u64 dmar_readq(void __iomem *addr) |
62 | { | 62 | { |
63 | u32 lo, hi; | 63 | u32 lo, hi; |
64 | lo = readl(addr); | 64 | lo = readl(addr); |
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 0754542978b6..e268f79bdbd2 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h | |||
@@ -70,11 +70,12 @@ zfcp_sg_to_address(struct scatterlist *list) | |||
70 | * zfcp_address_to_sg - set up struct scatterlist from kernel address | 70 | * zfcp_address_to_sg - set up struct scatterlist from kernel address |
71 | * @address: kernel address | 71 | * @address: kernel address |
72 | * @list: struct scatterlist | 72 | * @list: struct scatterlist |
73 | * @size: buffer size | ||
73 | */ | 74 | */ |
74 | static inline void | 75 | static inline void |
75 | zfcp_address_to_sg(void *address, struct scatterlist *list) | 76 | zfcp_address_to_sg(void *address, struct scatterlist *list, unsigned int size) |
76 | { | 77 | { |
77 | sg_set_buf(list, address, 0); | 78 | sg_set_buf(list, address, size); |
78 | } | 79 | } |
79 | 80 | ||
80 | #define REQUEST_LIST_SIZE 128 | 81 | #define REQUEST_LIST_SIZE 128 |
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index 72b0393b4596..1e6d7a9c75bf 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c | |||
@@ -391,7 +391,7 @@ static int close_getadapter_fib(struct aac_dev * dev, void __user *arg) | |||
391 | /* | 391 | /* |
392 | * Extract the fibctx from the input parameters | 392 | * Extract the fibctx from the input parameters |
393 | */ | 393 | */ |
394 | if (fibctx->unique == (u32)(ptrdiff_t)arg) /* We found a winner */ | 394 | if (fibctx->unique == (u32)(uintptr_t)arg) /* We found a winner */ |
395 | break; | 395 | break; |
396 | entry = entry->next; | 396 | entry = entry->next; |
397 | fibctx = NULL; | 397 | fibctx = NULL; |
@@ -590,7 +590,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) | |||
590 | } | 590 | } |
591 | addr = (u64)upsg->sg[i].addr[0]; | 591 | addr = (u64)upsg->sg[i].addr[0]; |
592 | addr += ((u64)upsg->sg[i].addr[1]) << 32; | 592 | addr += ((u64)upsg->sg[i].addr[1]) << 32; |
593 | sg_user[i] = (void __user *)(ptrdiff_t)addr; | 593 | sg_user[i] = (void __user *)(uintptr_t)addr; |
594 | sg_list[i] = p; // save so we can clean up later | 594 | sg_list[i] = p; // save so we can clean up later |
595 | sg_indx = i; | 595 | sg_indx = i; |
596 | 596 | ||
@@ -633,7 +633,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) | |||
633 | rcode = -ENOMEM; | 633 | rcode = -ENOMEM; |
634 | goto cleanup; | 634 | goto cleanup; |
635 | } | 635 | } |
636 | sg_user[i] = (void __user *)(ptrdiff_t)usg->sg[i].addr; | 636 | sg_user[i] = (void __user *)(uintptr_t)usg->sg[i].addr; |
637 | sg_list[i] = p; // save so we can clean up later | 637 | sg_list[i] = p; // save so we can clean up later |
638 | sg_indx = i; | 638 | sg_indx = i; |
639 | 639 | ||
@@ -664,7 +664,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) | |||
664 | if (actual_fibsize64 == fibsize) { | 664 | if (actual_fibsize64 == fibsize) { |
665 | struct user_sgmap64* usg = (struct user_sgmap64 *)upsg; | 665 | struct user_sgmap64* usg = (struct user_sgmap64 *)upsg; |
666 | for (i = 0; i < upsg->count; i++) { | 666 | for (i = 0; i < upsg->count; i++) { |
667 | u64 addr; | 667 | uintptr_t addr; |
668 | void* p; | 668 | void* p; |
669 | /* Does this really need to be GFP_DMA? */ | 669 | /* Does this really need to be GFP_DMA? */ |
670 | p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); | 670 | p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); |
@@ -676,7 +676,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) | |||
676 | } | 676 | } |
677 | addr = (u64)usg->sg[i].addr[0]; | 677 | addr = (u64)usg->sg[i].addr[0]; |
678 | addr += ((u64)usg->sg[i].addr[1]) << 32; | 678 | addr += ((u64)usg->sg[i].addr[1]) << 32; |
679 | sg_user[i] = (void __user *)(ptrdiff_t)addr; | 679 | sg_user[i] = (void __user *)addr; |
680 | sg_list[i] = p; // save so we can clean up later | 680 | sg_list[i] = p; // save so we can clean up later |
681 | sg_indx = i; | 681 | sg_indx = i; |
682 | 682 | ||
@@ -704,7 +704,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) | |||
704 | rcode = -ENOMEM; | 704 | rcode = -ENOMEM; |
705 | goto cleanup; | 705 | goto cleanup; |
706 | } | 706 | } |
707 | sg_user[i] = (void __user *)(ptrdiff_t)upsg->sg[i].addr; | 707 | sg_user[i] = (void __user *)(uintptr_t)upsg->sg[i].addr; |
708 | sg_list[i] = p; // save so we can clean up later | 708 | sg_list[i] = p; // save so we can clean up later |
709 | sg_indx = i; | 709 | sg_indx = i; |
710 | 710 | ||
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 3009ad8c4073..8736813a0296 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c | |||
@@ -110,7 +110,7 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co | |||
110 | /* | 110 | /* |
111 | * Align the beginning of Headers to commalign | 111 | * Align the beginning of Headers to commalign |
112 | */ | 112 | */ |
113 | align = (commalign - ((ptrdiff_t)(base) & (commalign - 1))); | 113 | align = (commalign - ((uintptr_t)(base) & (commalign - 1))); |
114 | base = base + align; | 114 | base = base + align; |
115 | phys = phys + align; | 115 | phys = phys + align; |
116 | /* | 116 | /* |
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c index fcd25f7d0bc6..e6032ffc66a6 100644 --- a/drivers/scsi/aacraid/dpcsup.c +++ b/drivers/scsi/aacraid/dpcsup.c | |||
@@ -254,7 +254,7 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index) | |||
254 | kfree (fib); | 254 | kfree (fib); |
255 | return 1; | 255 | return 1; |
256 | } | 256 | } |
257 | memcpy(hw_fib, (struct hw_fib *)(((ptrdiff_t)(dev->regs.sa)) + | 257 | memcpy(hw_fib, (struct hw_fib *)(((uintptr_t)(dev->regs.sa)) + |
258 | (index & ~0x00000002L)), sizeof(struct hw_fib)); | 258 | (index & ~0x00000002L)), sizeof(struct hw_fib)); |
259 | INIT_LIST_HEAD(&fib->fiblink); | 259 | INIT_LIST_HEAD(&fib->fiblink); |
260 | fib->type = FSAFS_NTC_FIB_CONTEXT; | 260 | fib->type = FSAFS_NTC_FIB_CONTEXT; |
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h index ace7a15b413e..a67e29f83ae5 100644 --- a/drivers/scsi/arcmsr/arcmsr.h +++ b/drivers/scsi/arcmsr/arcmsr.h | |||
@@ -141,14 +141,14 @@ struct CMD_MESSAGE_FIELD | |||
141 | #define IS_SG64_ADDR 0x01000000 /* bit24 */ | 141 | #define IS_SG64_ADDR 0x01000000 /* bit24 */ |
142 | struct SG32ENTRY | 142 | struct SG32ENTRY |
143 | { | 143 | { |
144 | uint32_t length; | 144 | __le32 length; |
145 | uint32_t address; | 145 | __le32 address; |
146 | }; | 146 | }; |
147 | struct SG64ENTRY | 147 | struct SG64ENTRY |
148 | { | 148 | { |
149 | uint32_t length; | 149 | __le32 length; |
150 | uint32_t address; | 150 | __le32 address; |
151 | uint32_t addresshigh; | 151 | __le32 addresshigh; |
152 | }; | 152 | }; |
153 | struct SGENTRY_UNION | 153 | struct SGENTRY_UNION |
154 | { | 154 | { |
@@ -339,23 +339,15 @@ struct MessageUnit_B | |||
339 | uint32_t done_qbuffer[ARCMSR_MAX_HBB_POSTQUEUE]; | 339 | uint32_t done_qbuffer[ARCMSR_MAX_HBB_POSTQUEUE]; |
340 | uint32_t postq_index; | 340 | uint32_t postq_index; |
341 | uint32_t doneq_index; | 341 | uint32_t doneq_index; |
342 | uint32_t *drv2iop_doorbell_reg; | 342 | uint32_t __iomem *drv2iop_doorbell_reg; |
343 | uint32_t *drv2iop_doorbell_mask_reg; | 343 | uint32_t __iomem *drv2iop_doorbell_mask_reg; |
344 | uint32_t *iop2drv_doorbell_reg; | 344 | uint32_t __iomem *iop2drv_doorbell_reg; |
345 | uint32_t *iop2drv_doorbell_mask_reg; | 345 | uint32_t __iomem *iop2drv_doorbell_mask_reg; |
346 | uint32_t *msgcode_rwbuffer_reg; | 346 | uint32_t __iomem *msgcode_rwbuffer_reg; |
347 | uint32_t *ioctl_wbuffer_reg; | 347 | uint32_t __iomem *ioctl_wbuffer_reg; |
348 | uint32_t *ioctl_rbuffer_reg; | 348 | uint32_t __iomem *ioctl_rbuffer_reg; |
349 | }; | 349 | }; |
350 | 350 | ||
351 | struct MessageUnit | ||
352 | { | ||
353 | union | ||
354 | { | ||
355 | struct MessageUnit_A pmu_A; | ||
356 | struct MessageUnit_B pmu_B; | ||
357 | } u; | ||
358 | }; | ||
359 | /* | 351 | /* |
360 | ******************************************************************************* | 352 | ******************************************************************************* |
361 | ** Adapter Control Block | 353 | ** Adapter Control Block |
@@ -374,7 +366,10 @@ struct AdapterControlBlock | |||
374 | /* Offset is used in making arc cdb physical to virtual calculations */ | 366 | /* Offset is used in making arc cdb physical to virtual calculations */ |
375 | uint32_t outbound_int_enable; | 367 | uint32_t outbound_int_enable; |
376 | 368 | ||
377 | struct MessageUnit * pmu; | 369 | union { |
370 | struct MessageUnit_A __iomem * pmuA; | ||
371 | struct MessageUnit_B * pmuB; | ||
372 | }; | ||
378 | /* message unit ATU inbound base address0 */ | 373 | /* message unit ATU inbound base address0 */ |
379 | 374 | ||
380 | uint32_t acb_flags; | 375 | uint32_t acb_flags; |
@@ -558,7 +553,7 @@ struct SENSE_DATA | |||
558 | 553 | ||
559 | extern void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *); | 554 | extern void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *); |
560 | extern void arcmsr_iop_message_read(struct AdapterControlBlock *); | 555 | extern void arcmsr_iop_message_read(struct AdapterControlBlock *); |
561 | extern struct QBUFFER *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *); | 556 | extern struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *); |
562 | extern struct class_device_attribute *arcmsr_host_attrs[]; | 557 | extern struct class_device_attribute *arcmsr_host_attrs[]; |
563 | extern int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *); | 558 | extern int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *); |
564 | void arcmsr_free_sysfs_attr(struct AdapterControlBlock *acb); | 559 | void arcmsr_free_sysfs_attr(struct AdapterControlBlock *acb); |
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c index d04d1aa28fa4..7d7b0a554276 100644 --- a/drivers/scsi/arcmsr/arcmsr_attr.c +++ b/drivers/scsi/arcmsr/arcmsr_attr.c | |||
@@ -85,13 +85,13 @@ static ssize_t arcmsr_sysfs_iop_message_read(struct kobject *kobj, | |||
85 | allxfer_len++; | 85 | allxfer_len++; |
86 | } | 86 | } |
87 | if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { | 87 | if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { |
88 | struct QBUFFER *prbuffer; | 88 | struct QBUFFER __iomem *prbuffer; |
89 | uint8_t *iop_data; | 89 | uint8_t __iomem *iop_data; |
90 | int32_t iop_len; | 90 | int32_t iop_len; |
91 | 91 | ||
92 | acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; | 92 | acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; |
93 | prbuffer = arcmsr_get_iop_rqbuffer(acb); | 93 | prbuffer = arcmsr_get_iop_rqbuffer(acb); |
94 | iop_data = (uint8_t *)prbuffer->data; | 94 | iop_data = prbuffer->data; |
95 | iop_len = readl(&prbuffer->data_len); | 95 | iop_len = readl(&prbuffer->data_len); |
96 | while (iop_len > 0) { | 96 | while (iop_len > 0) { |
97 | acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data); | 97 | acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data); |
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index f7a252885a5c..d466a2dac1db 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c | |||
@@ -236,18 +236,22 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) | |||
236 | uint32_t intmask_org; | 236 | uint32_t intmask_org; |
237 | int i, j; | 237 | int i, j; |
238 | 238 | ||
239 | acb->pmu = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); | 239 | acb->pmuA = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); |
240 | if (!acb->pmu) { | 240 | if (!acb->pmuA) { |
241 | printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", | 241 | printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", |
242 | acb->host->host_no); | 242 | acb->host->host_no); |
243 | return -ENOMEM; | ||
243 | } | 244 | } |
244 | 245 | ||
245 | dma_coherent = dma_alloc_coherent(&pdev->dev, | 246 | dma_coherent = dma_alloc_coherent(&pdev->dev, |
246 | ARCMSR_MAX_FREECCB_NUM * | 247 | ARCMSR_MAX_FREECCB_NUM * |
247 | sizeof (struct CommandControlBlock) + 0x20, | 248 | sizeof (struct CommandControlBlock) + 0x20, |
248 | &dma_coherent_handle, GFP_KERNEL); | 249 | &dma_coherent_handle, GFP_KERNEL); |
249 | if (!dma_coherent) | 250 | |
251 | if (!dma_coherent) { | ||
252 | iounmap(acb->pmuA); | ||
250 | return -ENOMEM; | 253 | return -ENOMEM; |
254 | } | ||
251 | 255 | ||
252 | acb->dma_coherent = dma_coherent; | 256 | acb->dma_coherent = dma_coherent; |
253 | acb->dma_coherent_handle = dma_coherent_handle; | 257 | acb->dma_coherent_handle = dma_coherent_handle; |
@@ -287,7 +291,7 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) | |||
287 | 291 | ||
288 | struct pci_dev *pdev = acb->pdev; | 292 | struct pci_dev *pdev = acb->pdev; |
289 | struct MessageUnit_B *reg; | 293 | struct MessageUnit_B *reg; |
290 | void *mem_base0, *mem_base1; | 294 | void __iomem *mem_base0, *mem_base1; |
291 | void *dma_coherent; | 295 | void *dma_coherent; |
292 | dma_addr_t dma_coherent_handle, dma_addr; | 296 | dma_addr_t dma_coherent_handle, dma_addr; |
293 | uint32_t intmask_org; | 297 | uint32_t intmask_org; |
@@ -328,25 +332,28 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) | |||
328 | 332 | ||
329 | reg = (struct MessageUnit_B *)(dma_coherent + | 333 | reg = (struct MessageUnit_B *)(dma_coherent + |
330 | ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock)); | 334 | ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock)); |
331 | acb->pmu = (struct MessageUnit *)reg; | 335 | acb->pmuB = reg; |
332 | mem_base0 = ioremap(pci_resource_start(pdev, 0), | 336 | mem_base0 = ioremap(pci_resource_start(pdev, 0), |
333 | pci_resource_len(pdev, 0)); | 337 | pci_resource_len(pdev, 0)); |
338 | if (!mem_base0) | ||
339 | goto out; | ||
340 | |||
334 | mem_base1 = ioremap(pci_resource_start(pdev, 2), | 341 | mem_base1 = ioremap(pci_resource_start(pdev, 2), |
335 | pci_resource_len(pdev, 2)); | 342 | pci_resource_len(pdev, 2)); |
336 | reg->drv2iop_doorbell_reg = (uint32_t *)((char *)mem_base0 + | 343 | if (!mem_base1) { |
337 | ARCMSR_DRV2IOP_DOORBELL); | 344 | iounmap(mem_base0); |
338 | reg->drv2iop_doorbell_mask_reg = (uint32_t *)((char *)mem_base0 + | 345 | goto out; |
339 | ARCMSR_DRV2IOP_DOORBELL_MASK); | 346 | } |
340 | reg->iop2drv_doorbell_reg = (uint32_t *)((char *)mem_base0 + | 347 | |
341 | ARCMSR_IOP2DRV_DOORBELL); | 348 | reg->drv2iop_doorbell_reg = mem_base0 + ARCMSR_DRV2IOP_DOORBELL; |
342 | reg->iop2drv_doorbell_mask_reg = (uint32_t *)((char *)mem_base0 + | 349 | reg->drv2iop_doorbell_mask_reg = mem_base0 + |
343 | ARCMSR_IOP2DRV_DOORBELL_MASK); | 350 | ARCMSR_DRV2IOP_DOORBELL_MASK; |
344 | reg->ioctl_wbuffer_reg = (uint32_t *)((char *)mem_base1 + | 351 | reg->iop2drv_doorbell_reg = mem_base0 + ARCMSR_IOP2DRV_DOORBELL; |
345 | ARCMSR_IOCTL_WBUFFER); | 352 | reg->iop2drv_doorbell_mask_reg = mem_base0 + |
346 | reg->ioctl_rbuffer_reg = (uint32_t *)((char *)mem_base1 + | 353 | ARCMSR_IOP2DRV_DOORBELL_MASK; |
347 | ARCMSR_IOCTL_RBUFFER); | 354 | reg->ioctl_wbuffer_reg = mem_base1 + ARCMSR_IOCTL_WBUFFER; |
348 | reg->msgcode_rwbuffer_reg = (uint32_t *)((char *)mem_base1 + | 355 | reg->ioctl_rbuffer_reg = mem_base1 + ARCMSR_IOCTL_RBUFFER; |
349 | ARCMSR_MSGCODE_RWBUFFER); | 356 | reg->msgcode_rwbuffer_reg = mem_base1 + ARCMSR_MSGCODE_RWBUFFER; |
350 | 357 | ||
351 | acb->vir2phy_offset = (unsigned long)ccb_tmp -(unsigned long)dma_addr; | 358 | acb->vir2phy_offset = (unsigned long)ccb_tmp -(unsigned long)dma_addr; |
352 | for (i = 0; i < ARCMSR_MAX_TARGETID; i++) | 359 | for (i = 0; i < ARCMSR_MAX_TARGETID; i++) |
@@ -362,6 +369,12 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) | |||
362 | break; | 369 | break; |
363 | } | 370 | } |
364 | return 0; | 371 | return 0; |
372 | |||
373 | out: | ||
374 | dma_free_coherent(&acb->pdev->dev, | ||
375 | ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20, | ||
376 | acb->dma_coherent, acb->dma_coherent_handle); | ||
377 | return -ENOMEM; | ||
365 | } | 378 | } |
366 | 379 | ||
367 | static int arcmsr_probe(struct pci_dev *pdev, | 380 | static int arcmsr_probe(struct pci_dev *pdev, |
@@ -454,7 +467,6 @@ static int arcmsr_probe(struct pci_dev *pdev, | |||
454 | free_irq(pdev->irq, acb); | 467 | free_irq(pdev->irq, acb); |
455 | out_free_ccb_pool: | 468 | out_free_ccb_pool: |
456 | arcmsr_free_ccb_pool(acb); | 469 | arcmsr_free_ccb_pool(acb); |
457 | iounmap(acb->pmu); | ||
458 | out_release_regions: | 470 | out_release_regions: |
459 | pci_release_regions(pdev); | 471 | pci_release_regions(pdev); |
460 | out_host_put: | 472 | out_host_put: |
@@ -467,7 +479,7 @@ static int arcmsr_probe(struct pci_dev *pdev, | |||
467 | 479 | ||
468 | static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb) | 480 | static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb) |
469 | { | 481 | { |
470 | struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; | 482 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
471 | uint32_t Index; | 483 | uint32_t Index; |
472 | uint8_t Retries = 0x00; | 484 | uint8_t Retries = 0x00; |
473 | 485 | ||
@@ -488,7 +500,7 @@ static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb) | |||
488 | 500 | ||
489 | static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb) | 501 | static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb) |
490 | { | 502 | { |
491 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 503 | struct MessageUnit_B *reg = acb->pmuB; |
492 | uint32_t Index; | 504 | uint32_t Index; |
493 | uint8_t Retries = 0x00; | 505 | uint8_t Retries = 0x00; |
494 | 506 | ||
@@ -509,7 +521,7 @@ static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb) | |||
509 | 521 | ||
510 | static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb) | 522 | static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb) |
511 | { | 523 | { |
512 | struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; | 524 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
513 | 525 | ||
514 | writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0); | 526 | writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, ®->inbound_msgaddr0); |
515 | if (arcmsr_hba_wait_msgint_ready(acb)) | 527 | if (arcmsr_hba_wait_msgint_ready(acb)) |
@@ -520,7 +532,7 @@ static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb) | |||
520 | 532 | ||
521 | static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb) | 533 | static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb) |
522 | { | 534 | { |
523 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 535 | struct MessageUnit_B *reg = acb->pmuB; |
524 | 536 | ||
525 | writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell_reg); | 537 | writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell_reg); |
526 | if (arcmsr_hbb_wait_msgint_ready(acb)) | 538 | if (arcmsr_hbb_wait_msgint_ready(acb)) |
@@ -566,7 +578,7 @@ static void arcmsr_ccb_complete(struct CommandControlBlock *ccb, int stand_flag) | |||
566 | 578 | ||
567 | static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb) | 579 | static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb) |
568 | { | 580 | { |
569 | struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; | 581 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
570 | int retry_count = 30; | 582 | int retry_count = 30; |
571 | 583 | ||
572 | writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0); | 584 | writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, ®->inbound_msgaddr0); |
@@ -583,7 +595,7 @@ static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb) | |||
583 | 595 | ||
584 | static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb) | 596 | static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb) |
585 | { | 597 | { |
586 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 598 | struct MessageUnit_B *reg = acb->pmuB; |
587 | int retry_count = 30; | 599 | int retry_count = 30; |
588 | 600 | ||
589 | writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell_reg); | 601 | writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell_reg); |
@@ -637,7 +649,7 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb) | |||
637 | switch (acb->adapter_type) { | 649 | switch (acb->adapter_type) { |
638 | 650 | ||
639 | case ACB_ADAPTER_TYPE_A : { | 651 | case ACB_ADAPTER_TYPE_A : { |
640 | struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; | 652 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
641 | orig_mask = readl(®->outbound_intmask)|\ | 653 | orig_mask = readl(®->outbound_intmask)|\ |
642 | ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE; | 654 | ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE; |
643 | writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \ | 655 | writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \ |
@@ -646,7 +658,7 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb) | |||
646 | break; | 658 | break; |
647 | 659 | ||
648 | case ACB_ADAPTER_TYPE_B : { | 660 | case ACB_ADAPTER_TYPE_B : { |
649 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 661 | struct MessageUnit_B *reg = acb->pmuB; |
650 | orig_mask = readl(reg->iop2drv_doorbell_mask_reg) & \ | 662 | orig_mask = readl(reg->iop2drv_doorbell_mask_reg) & \ |
651 | (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); | 663 | (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); |
652 | writel(0, reg->iop2drv_doorbell_mask_reg); | 664 | writel(0, reg->iop2drv_doorbell_mask_reg); |
@@ -748,14 +760,13 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) | |||
748 | switch (acb->adapter_type) { | 760 | switch (acb->adapter_type) { |
749 | 761 | ||
750 | case ACB_ADAPTER_TYPE_A: { | 762 | case ACB_ADAPTER_TYPE_A: { |
751 | struct MessageUnit_A __iomem *reg = \ | 763 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
752 | (struct MessageUnit_A *)acb->pmu; | ||
753 | uint32_t outbound_intstatus; | 764 | uint32_t outbound_intstatus; |
754 | outbound_intstatus = readl(®->outbound_intstatus) & \ | 765 | outbound_intstatus = readl(®->outbound_intstatus) & |
755 | acb->outbound_int_enable; | 766 | acb->outbound_int_enable; |
756 | /*clear and abort all outbound posted Q*/ | 767 | /*clear and abort all outbound posted Q*/ |
757 | writel(outbound_intstatus, ®->outbound_intstatus);/*clear interrupt*/ | 768 | writel(outbound_intstatus, ®->outbound_intstatus);/*clear interrupt*/ |
758 | while (((flag_ccb = readl(®->outbound_queueport)) != 0xFFFFFFFF) \ | 769 | while (((flag_ccb = readl(®->outbound_queueport)) != 0xFFFFFFFF) |
759 | && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) { | 770 | && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) { |
760 | arcmsr_drain_donequeue(acb, flag_ccb); | 771 | arcmsr_drain_donequeue(acb, flag_ccb); |
761 | } | 772 | } |
@@ -763,7 +774,7 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) | |||
763 | break; | 774 | break; |
764 | 775 | ||
765 | case ACB_ADAPTER_TYPE_B: { | 776 | case ACB_ADAPTER_TYPE_B: { |
766 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 777 | struct MessageUnit_B *reg = acb->pmuB; |
767 | /*clear all outbound posted Q*/ | 778 | /*clear all outbound posted Q*/ |
768 | for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) { | 779 | for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) { |
769 | if ((flag_ccb = readl(®->done_qbuffer[i])) != 0) { | 780 | if ((flag_ccb = readl(®->done_qbuffer[i])) != 0) { |
@@ -816,7 +827,6 @@ static void arcmsr_remove(struct pci_dev *pdev) | |||
816 | } | 827 | } |
817 | 828 | ||
818 | free_irq(pdev->irq, acb); | 829 | free_irq(pdev->irq, acb); |
819 | iounmap(acb->pmu); | ||
820 | arcmsr_free_ccb_pool(acb); | 830 | arcmsr_free_ccb_pool(acb); |
821 | pci_release_regions(pdev); | 831 | pci_release_regions(pdev); |
822 | 832 | ||
@@ -859,7 +869,7 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \ | |||
859 | switch (acb->adapter_type) { | 869 | switch (acb->adapter_type) { |
860 | 870 | ||
861 | case ACB_ADAPTER_TYPE_A : { | 871 | case ACB_ADAPTER_TYPE_A : { |
862 | struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; | 872 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
863 | mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE | | 873 | mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE | |
864 | ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE); | 874 | ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE); |
865 | writel(mask, ®->outbound_intmask); | 875 | writel(mask, ®->outbound_intmask); |
@@ -868,7 +878,7 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \ | |||
868 | break; | 878 | break; |
869 | 879 | ||
870 | case ACB_ADAPTER_TYPE_B : { | 880 | case ACB_ADAPTER_TYPE_B : { |
871 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 881 | struct MessageUnit_B *reg = acb->pmuB; |
872 | mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK | \ | 882 | mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK | \ |
873 | ARCMSR_IOP2DRV_DATA_READ_OK | ARCMSR_IOP2DRV_CDB_DONE); | 883 | ARCMSR_IOP2DRV_DATA_READ_OK | ARCMSR_IOP2DRV_CDB_DONE); |
874 | writel(mask, reg->iop2drv_doorbell_mask_reg); | 884 | writel(mask, reg->iop2drv_doorbell_mask_reg); |
@@ -882,7 +892,7 @@ static void arcmsr_build_ccb(struct AdapterControlBlock *acb, | |||
882 | { | 892 | { |
883 | struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb; | 893 | struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb; |
884 | int8_t *psge = (int8_t *)&arcmsr_cdb->u; | 894 | int8_t *psge = (int8_t *)&arcmsr_cdb->u; |
885 | uint32_t address_lo, address_hi; | 895 | __le32 address_lo, address_hi; |
886 | int arccdbsize = 0x30; | 896 | int arccdbsize = 0x30; |
887 | int nseg; | 897 | int nseg; |
888 | 898 | ||
@@ -900,7 +910,8 @@ static void arcmsr_build_ccb(struct AdapterControlBlock *acb, | |||
900 | BUG_ON(nseg < 0); | 910 | BUG_ON(nseg < 0); |
901 | 911 | ||
902 | if (nseg) { | 912 | if (nseg) { |
903 | int length, i, cdb_sgcount = 0; | 913 | __le32 length; |
914 | int i, cdb_sgcount = 0; | ||
904 | struct scatterlist *sg; | 915 | struct scatterlist *sg; |
905 | 916 | ||
906 | /* map stor port SG list to our iop SG List. */ | 917 | /* map stor port SG list to our iop SG List. */ |
@@ -921,7 +932,7 @@ static void arcmsr_build_ccb(struct AdapterControlBlock *acb, | |||
921 | 932 | ||
922 | pdma_sg->addresshigh = address_hi; | 933 | pdma_sg->addresshigh = address_hi; |
923 | pdma_sg->address = address_lo; | 934 | pdma_sg->address = address_lo; |
924 | pdma_sg->length = length|IS_SG64_ADDR; | 935 | pdma_sg->length = length|cpu_to_le32(IS_SG64_ADDR); |
925 | psge += sizeof (struct SG64ENTRY); | 936 | psge += sizeof (struct SG64ENTRY); |
926 | arccdbsize += sizeof (struct SG64ENTRY); | 937 | arccdbsize += sizeof (struct SG64ENTRY); |
927 | } | 938 | } |
@@ -947,7 +958,7 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr | |||
947 | 958 | ||
948 | switch (acb->adapter_type) { | 959 | switch (acb->adapter_type) { |
949 | case ACB_ADAPTER_TYPE_A: { | 960 | case ACB_ADAPTER_TYPE_A: { |
950 | struct MessageUnit_A *reg = (struct MessageUnit_A *)acb->pmu; | 961 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
951 | 962 | ||
952 | if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) | 963 | if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) |
953 | writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE, | 964 | writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE, |
@@ -959,7 +970,7 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr | |||
959 | break; | 970 | break; |
960 | 971 | ||
961 | case ACB_ADAPTER_TYPE_B: { | 972 | case ACB_ADAPTER_TYPE_B: { |
962 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 973 | struct MessageUnit_B *reg = acb->pmuB; |
963 | uint32_t ending_index, index = reg->postq_index; | 974 | uint32_t ending_index, index = reg->postq_index; |
964 | 975 | ||
965 | ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE); | 976 | ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE); |
@@ -982,7 +993,7 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr | |||
982 | 993 | ||
983 | static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb) | 994 | static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb) |
984 | { | 995 | { |
985 | struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; | 996 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
986 | acb->acb_flags &= ~ACB_F_MSG_START_BGRB; | 997 | acb->acb_flags &= ~ACB_F_MSG_START_BGRB; |
987 | writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0); | 998 | writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, ®->inbound_msgaddr0); |
988 | 999 | ||
@@ -995,7 +1006,7 @@ static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb) | |||
995 | 1006 | ||
996 | static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb) | 1007 | static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb) |
997 | { | 1008 | { |
998 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 1009 | struct MessageUnit_B *reg = acb->pmuB; |
999 | acb->acb_flags &= ~ACB_F_MSG_START_BGRB; | 1010 | acb->acb_flags &= ~ACB_F_MSG_START_BGRB; |
1000 | writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell_reg); | 1011 | writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell_reg); |
1001 | 1012 | ||
@@ -1023,6 +1034,17 @@ static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb) | |||
1023 | 1034 | ||
1024 | static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb) | 1035 | static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb) |
1025 | { | 1036 | { |
1037 | switch (acb->adapter_type) { | ||
1038 | case ACB_ADAPTER_TYPE_A: { | ||
1039 | iounmap(acb->pmuA); | ||
1040 | break; | ||
1041 | } | ||
1042 | case ACB_ADAPTER_TYPE_B: { | ||
1043 | struct MessageUnit_B *reg = acb->pmuB; | ||
1044 | iounmap(reg->drv2iop_doorbell_reg - ARCMSR_DRV2IOP_DOORBELL); | ||
1045 | iounmap(reg->ioctl_wbuffer_reg - ARCMSR_IOCTL_WBUFFER); | ||
1046 | } | ||
1047 | } | ||
1026 | dma_free_coherent(&acb->pdev->dev, | 1048 | dma_free_coherent(&acb->pdev->dev, |
1027 | ARCMSR_MAX_FREECCB_NUM * sizeof (struct CommandControlBlock) + 0x20, | 1049 | ARCMSR_MAX_FREECCB_NUM * sizeof (struct CommandControlBlock) + 0x20, |
1028 | acb->dma_coherent, | 1050 | acb->dma_coherent, |
@@ -1033,13 +1055,13 @@ void arcmsr_iop_message_read(struct AdapterControlBlock *acb) | |||
1033 | { | 1055 | { |
1034 | switch (acb->adapter_type) { | 1056 | switch (acb->adapter_type) { |
1035 | case ACB_ADAPTER_TYPE_A: { | 1057 | case ACB_ADAPTER_TYPE_A: { |
1036 | struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; | 1058 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
1037 | writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, ®->inbound_doorbell); | 1059 | writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, ®->inbound_doorbell); |
1038 | } | 1060 | } |
1039 | break; | 1061 | break; |
1040 | 1062 | ||
1041 | case ACB_ADAPTER_TYPE_B: { | 1063 | case ACB_ADAPTER_TYPE_B: { |
1042 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 1064 | struct MessageUnit_B *reg = acb->pmuB; |
1043 | writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell_reg); | 1065 | writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell_reg); |
1044 | } | 1066 | } |
1045 | break; | 1067 | break; |
@@ -1050,7 +1072,7 @@ static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb) | |||
1050 | { | 1072 | { |
1051 | switch (acb->adapter_type) { | 1073 | switch (acb->adapter_type) { |
1052 | case ACB_ADAPTER_TYPE_A: { | 1074 | case ACB_ADAPTER_TYPE_A: { |
1053 | struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; | 1075 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
1054 | /* | 1076 | /* |
1055 | ** push inbound doorbell tell iop, driver data write ok | 1077 | ** push inbound doorbell tell iop, driver data write ok |
1056 | ** and wait reply on next hwinterrupt for next Qbuffer post | 1078 | ** and wait reply on next hwinterrupt for next Qbuffer post |
@@ -1060,7 +1082,7 @@ static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb) | |||
1060 | break; | 1082 | break; |
1061 | 1083 | ||
1062 | case ACB_ADAPTER_TYPE_B: { | 1084 | case ACB_ADAPTER_TYPE_B: { |
1063 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 1085 | struct MessageUnit_B *reg = acb->pmuB; |
1064 | /* | 1086 | /* |
1065 | ** push inbound doorbell tell iop, driver data write ok | 1087 | ** push inbound doorbell tell iop, driver data write ok |
1066 | ** and wait reply on next hwinterrupt for next Qbuffer post | 1088 | ** and wait reply on next hwinterrupt for next Qbuffer post |
@@ -1071,41 +1093,41 @@ static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb) | |||
1071 | } | 1093 | } |
1072 | } | 1094 | } |
1073 | 1095 | ||
1074 | struct QBUFFER *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb) | 1096 | struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb) |
1075 | { | 1097 | { |
1076 | static struct QBUFFER *qbuffer; | 1098 | struct QBUFFER __iomem *qbuffer = NULL; |
1077 | 1099 | ||
1078 | switch (acb->adapter_type) { | 1100 | switch (acb->adapter_type) { |
1079 | 1101 | ||
1080 | case ACB_ADAPTER_TYPE_A: { | 1102 | case ACB_ADAPTER_TYPE_A: { |
1081 | struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; | 1103 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
1082 | qbuffer = (struct QBUFFER __iomem *) ®->message_rbuffer; | 1104 | qbuffer = (struct QBUFFER __iomem *)®->message_rbuffer; |
1083 | } | 1105 | } |
1084 | break; | 1106 | break; |
1085 | 1107 | ||
1086 | case ACB_ADAPTER_TYPE_B: { | 1108 | case ACB_ADAPTER_TYPE_B: { |
1087 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 1109 | struct MessageUnit_B *reg = acb->pmuB; |
1088 | qbuffer = (struct QBUFFER __iomem *) reg->ioctl_rbuffer_reg; | 1110 | qbuffer = (struct QBUFFER __iomem *)reg->ioctl_rbuffer_reg; |
1089 | } | 1111 | } |
1090 | break; | 1112 | break; |
1091 | } | 1113 | } |
1092 | return qbuffer; | 1114 | return qbuffer; |
1093 | } | 1115 | } |
1094 | 1116 | ||
1095 | static struct QBUFFER *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb) | 1117 | static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb) |
1096 | { | 1118 | { |
1097 | static struct QBUFFER *pqbuffer; | 1119 | struct QBUFFER __iomem *pqbuffer = NULL; |
1098 | 1120 | ||
1099 | switch (acb->adapter_type) { | 1121 | switch (acb->adapter_type) { |
1100 | 1122 | ||
1101 | case ACB_ADAPTER_TYPE_A: { | 1123 | case ACB_ADAPTER_TYPE_A: { |
1102 | struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; | 1124 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
1103 | pqbuffer = (struct QBUFFER *) ®->message_wbuffer; | 1125 | pqbuffer = (struct QBUFFER __iomem *) ®->message_wbuffer; |
1104 | } | 1126 | } |
1105 | break; | 1127 | break; |
1106 | 1128 | ||
1107 | case ACB_ADAPTER_TYPE_B: { | 1129 | case ACB_ADAPTER_TYPE_B: { |
1108 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 1130 | struct MessageUnit_B *reg = acb->pmuB; |
1109 | pqbuffer = (struct QBUFFER __iomem *)reg->ioctl_wbuffer_reg; | 1131 | pqbuffer = (struct QBUFFER __iomem *)reg->ioctl_wbuffer_reg; |
1110 | } | 1132 | } |
1111 | break; | 1133 | break; |
@@ -1115,15 +1137,15 @@ static struct QBUFFER *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb) | |||
1115 | 1137 | ||
1116 | static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb) | 1138 | static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb) |
1117 | { | 1139 | { |
1118 | struct QBUFFER *prbuffer; | 1140 | struct QBUFFER __iomem *prbuffer; |
1119 | struct QBUFFER *pQbuffer; | 1141 | struct QBUFFER *pQbuffer; |
1120 | uint8_t *iop_data; | 1142 | uint8_t __iomem *iop_data; |
1121 | int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex; | 1143 | int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex; |
1122 | 1144 | ||
1123 | rqbuf_lastindex = acb->rqbuf_lastindex; | 1145 | rqbuf_lastindex = acb->rqbuf_lastindex; |
1124 | rqbuf_firstindex = acb->rqbuf_firstindex; | 1146 | rqbuf_firstindex = acb->rqbuf_firstindex; |
1125 | prbuffer = arcmsr_get_iop_rqbuffer(acb); | 1147 | prbuffer = arcmsr_get_iop_rqbuffer(acb); |
1126 | iop_data = (uint8_t *)prbuffer->data; | 1148 | iop_data = (uint8_t __iomem *)prbuffer->data; |
1127 | iop_len = prbuffer->data_len; | 1149 | iop_len = prbuffer->data_len; |
1128 | my_empty_len = (rqbuf_firstindex - rqbuf_lastindex -1)&(ARCMSR_MAX_QBUFFER -1); | 1150 | my_empty_len = (rqbuf_firstindex - rqbuf_lastindex -1)&(ARCMSR_MAX_QBUFFER -1); |
1129 | 1151 | ||
@@ -1151,8 +1173,8 @@ static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb) | |||
1151 | acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED; | 1173 | acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED; |
1152 | if (acb->wqbuf_firstindex != acb->wqbuf_lastindex) { | 1174 | if (acb->wqbuf_firstindex != acb->wqbuf_lastindex) { |
1153 | uint8_t *pQbuffer; | 1175 | uint8_t *pQbuffer; |
1154 | struct QBUFFER *pwbuffer; | 1176 | struct QBUFFER __iomem *pwbuffer; |
1155 | uint8_t *iop_data; | 1177 | uint8_t __iomem *iop_data; |
1156 | int32_t allxfer_len = 0; | 1178 | int32_t allxfer_len = 0; |
1157 | 1179 | ||
1158 | acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED); | 1180 | acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED); |
@@ -1181,7 +1203,7 @@ static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb) | |||
1181 | static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb) | 1203 | static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb) |
1182 | { | 1204 | { |
1183 | uint32_t outbound_doorbell; | 1205 | uint32_t outbound_doorbell; |
1184 | struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; | 1206 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
1185 | 1207 | ||
1186 | outbound_doorbell = readl(®->outbound_doorbell); | 1208 | outbound_doorbell = readl(®->outbound_doorbell); |
1187 | writel(outbound_doorbell, ®->outbound_doorbell); | 1209 | writel(outbound_doorbell, ®->outbound_doorbell); |
@@ -1197,7 +1219,7 @@ static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb) | |||
1197 | static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb) | 1219 | static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb) |
1198 | { | 1220 | { |
1199 | uint32_t flag_ccb; | 1221 | uint32_t flag_ccb; |
1200 | struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; | 1222 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
1201 | 1223 | ||
1202 | while ((flag_ccb = readl(®->outbound_queueport)) != 0xFFFFFFFF) { | 1224 | while ((flag_ccb = readl(®->outbound_queueport)) != 0xFFFFFFFF) { |
1203 | arcmsr_drain_donequeue(acb, flag_ccb); | 1225 | arcmsr_drain_donequeue(acb, flag_ccb); |
@@ -1208,7 +1230,7 @@ static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb) | |||
1208 | { | 1230 | { |
1209 | uint32_t index; | 1231 | uint32_t index; |
1210 | uint32_t flag_ccb; | 1232 | uint32_t flag_ccb; |
1211 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 1233 | struct MessageUnit_B *reg = acb->pmuB; |
1212 | 1234 | ||
1213 | index = reg->doneq_index; | 1235 | index = reg->doneq_index; |
1214 | 1236 | ||
@@ -1224,7 +1246,7 @@ static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb) | |||
1224 | static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb) | 1246 | static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb) |
1225 | { | 1247 | { |
1226 | uint32_t outbound_intstatus; | 1248 | uint32_t outbound_intstatus; |
1227 | struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; | 1249 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
1228 | 1250 | ||
1229 | outbound_intstatus = readl(®->outbound_intstatus) & \ | 1251 | outbound_intstatus = readl(®->outbound_intstatus) & \ |
1230 | acb->outbound_int_enable; | 1252 | acb->outbound_int_enable; |
@@ -1244,7 +1266,7 @@ static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb) | |||
1244 | static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb) | 1266 | static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb) |
1245 | { | 1267 | { |
1246 | uint32_t outbound_doorbell; | 1268 | uint32_t outbound_doorbell; |
1247 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 1269 | struct MessageUnit_B *reg = acb->pmuB; |
1248 | 1270 | ||
1249 | outbound_doorbell = readl(reg->iop2drv_doorbell_reg) & \ | 1271 | outbound_doorbell = readl(reg->iop2drv_doorbell_reg) & \ |
1250 | acb->outbound_int_enable; | 1272 | acb->outbound_int_enable; |
@@ -1305,8 +1327,8 @@ void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb) | |||
1305 | { | 1327 | { |
1306 | int32_t wqbuf_firstindex, wqbuf_lastindex; | 1328 | int32_t wqbuf_firstindex, wqbuf_lastindex; |
1307 | uint8_t *pQbuffer; | 1329 | uint8_t *pQbuffer; |
1308 | struct QBUFFER *pwbuffer; | 1330 | struct QBUFFER __iomem *pwbuffer; |
1309 | uint8_t *iop_data; | 1331 | uint8_t __iomem *iop_data; |
1310 | int32_t allxfer_len = 0; | 1332 | int32_t allxfer_len = 0; |
1311 | 1333 | ||
1312 | pwbuffer = arcmsr_get_iop_wqbuffer(acb); | 1334 | pwbuffer = arcmsr_get_iop_wqbuffer(acb); |
@@ -1380,13 +1402,13 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \ | |||
1380 | } | 1402 | } |
1381 | if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { | 1403 | if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { |
1382 | 1404 | ||
1383 | struct QBUFFER *prbuffer; | 1405 | struct QBUFFER __iomem *prbuffer; |
1384 | uint8_t *iop_data; | 1406 | uint8_t __iomem *iop_data; |
1385 | int32_t iop_len; | 1407 | int32_t iop_len; |
1386 | 1408 | ||
1387 | acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; | 1409 | acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; |
1388 | prbuffer = arcmsr_get_iop_rqbuffer(acb); | 1410 | prbuffer = arcmsr_get_iop_rqbuffer(acb); |
1389 | iop_data = (uint8_t *)prbuffer->data; | 1411 | iop_data = prbuffer->data; |
1390 | iop_len = readl(&prbuffer->data_len); | 1412 | iop_len = readl(&prbuffer->data_len); |
1391 | while (iop_len > 0) { | 1413 | while (iop_len > 0) { |
1392 | acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data); | 1414 | acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data); |
@@ -1669,11 +1691,11 @@ static int arcmsr_queue_command(struct scsi_cmnd *cmd, | |||
1669 | 1691 | ||
1670 | static void arcmsr_get_hba_config(struct AdapterControlBlock *acb) | 1692 | static void arcmsr_get_hba_config(struct AdapterControlBlock *acb) |
1671 | { | 1693 | { |
1672 | struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; | 1694 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
1673 | char *acb_firm_model = acb->firm_model; | 1695 | char *acb_firm_model = acb->firm_model; |
1674 | char *acb_firm_version = acb->firm_version; | 1696 | char *acb_firm_version = acb->firm_version; |
1675 | char *iop_firm_model = (char *) (®->message_rwbuffer[15]); | 1697 | char __iomem *iop_firm_model = (char __iomem *)(®->message_rwbuffer[15]); |
1676 | char *iop_firm_version = (char *) (®->message_rwbuffer[17]); | 1698 | char __iomem *iop_firm_version = (char __iomem *)(®->message_rwbuffer[17]); |
1677 | int count; | 1699 | int count; |
1678 | 1700 | ||
1679 | writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); | 1701 | writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, ®->inbound_msgaddr0); |
@@ -1710,13 +1732,13 @@ static void arcmsr_get_hba_config(struct AdapterControlBlock *acb) | |||
1710 | 1732 | ||
1711 | static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb) | 1733 | static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb) |
1712 | { | 1734 | { |
1713 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 1735 | struct MessageUnit_B *reg = acb->pmuB; |
1714 | uint32_t *lrwbuffer = reg->msgcode_rwbuffer_reg; | 1736 | uint32_t __iomem *lrwbuffer = reg->msgcode_rwbuffer_reg; |
1715 | char *acb_firm_model = acb->firm_model; | 1737 | char *acb_firm_model = acb->firm_model; |
1716 | char *acb_firm_version = acb->firm_version; | 1738 | char *acb_firm_version = acb->firm_version; |
1717 | char *iop_firm_model = (char *) (&lrwbuffer[15]); | 1739 | char __iomem *iop_firm_model = (char __iomem *)(&lrwbuffer[15]); |
1718 | /*firm_model,15,60-67*/ | 1740 | /*firm_model,15,60-67*/ |
1719 | char *iop_firm_version = (char *) (&lrwbuffer[17]); | 1741 | char __iomem *iop_firm_version = (char __iomem *)(&lrwbuffer[17]); |
1720 | /*firm_version,17,68-83*/ | 1742 | /*firm_version,17,68-83*/ |
1721 | int count; | 1743 | int count; |
1722 | 1744 | ||
@@ -1777,7 +1799,7 @@ static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) | |||
1777 | static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb, | 1799 | static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb, |
1778 | struct CommandControlBlock *poll_ccb) | 1800 | struct CommandControlBlock *poll_ccb) |
1779 | { | 1801 | { |
1780 | struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; | 1802 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
1781 | struct CommandControlBlock *ccb; | 1803 | struct CommandControlBlock *ccb; |
1782 | uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0; | 1804 | uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0; |
1783 | 1805 | ||
@@ -1826,7 +1848,7 @@ static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb, | |||
1826 | static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb, \ | 1848 | static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb, \ |
1827 | struct CommandControlBlock *poll_ccb) | 1849 | struct CommandControlBlock *poll_ccb) |
1828 | { | 1850 | { |
1829 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 1851 | struct MessageUnit_B *reg = acb->pmuB; |
1830 | struct CommandControlBlock *ccb; | 1852 | struct CommandControlBlock *ccb; |
1831 | uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0; | 1853 | uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0; |
1832 | int index; | 1854 | int index; |
@@ -1918,8 +1940,7 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) | |||
1918 | 1940 | ||
1919 | case ACB_ADAPTER_TYPE_A: { | 1941 | case ACB_ADAPTER_TYPE_A: { |
1920 | if (ccb_phyaddr_hi32 != 0) { | 1942 | if (ccb_phyaddr_hi32 != 0) { |
1921 | struct MessageUnit_A __iomem *reg = \ | 1943 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
1922 | (struct MessageUnit_A *)acb->pmu; | ||
1923 | uint32_t intmask_org; | 1944 | uint32_t intmask_org; |
1924 | intmask_org = arcmsr_disable_outbound_ints(acb); | 1945 | intmask_org = arcmsr_disable_outbound_ints(acb); |
1925 | writel(ARCMSR_SIGNATURE_SET_CONFIG, \ | 1946 | writel(ARCMSR_SIGNATURE_SET_CONFIG, \ |
@@ -1940,9 +1961,9 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) | |||
1940 | 1961 | ||
1941 | case ACB_ADAPTER_TYPE_B: { | 1962 | case ACB_ADAPTER_TYPE_B: { |
1942 | unsigned long post_queue_phyaddr; | 1963 | unsigned long post_queue_phyaddr; |
1943 | uint32_t *rwbuffer; | 1964 | uint32_t __iomem *rwbuffer; |
1944 | 1965 | ||
1945 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 1966 | struct MessageUnit_B *reg = acb->pmuB; |
1946 | uint32_t intmask_org; | 1967 | uint32_t intmask_org; |
1947 | intmask_org = arcmsr_disable_outbound_ints(acb); | 1968 | intmask_org = arcmsr_disable_outbound_ints(acb); |
1948 | reg->postq_index = 0; | 1969 | reg->postq_index = 0; |
@@ -1994,7 +2015,7 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb) | |||
1994 | switch (acb->adapter_type) { | 2015 | switch (acb->adapter_type) { |
1995 | 2016 | ||
1996 | case ACB_ADAPTER_TYPE_A: { | 2017 | case ACB_ADAPTER_TYPE_A: { |
1997 | struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; | 2018 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
1998 | do { | 2019 | do { |
1999 | firmware_state = readl(®->outbound_msgaddr1); | 2020 | firmware_state = readl(®->outbound_msgaddr1); |
2000 | } while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0); | 2021 | } while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0); |
@@ -2002,7 +2023,7 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb) | |||
2002 | break; | 2023 | break; |
2003 | 2024 | ||
2004 | case ACB_ADAPTER_TYPE_B: { | 2025 | case ACB_ADAPTER_TYPE_B: { |
2005 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 2026 | struct MessageUnit_B *reg = acb->pmuB; |
2006 | do { | 2027 | do { |
2007 | firmware_state = readl(reg->iop2drv_doorbell_reg); | 2028 | firmware_state = readl(reg->iop2drv_doorbell_reg); |
2008 | } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0); | 2029 | } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0); |
@@ -2013,7 +2034,7 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb) | |||
2013 | 2034 | ||
2014 | static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb) | 2035 | static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb) |
2015 | { | 2036 | { |
2016 | struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu; | 2037 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
2017 | acb->acb_flags |= ACB_F_MSG_START_BGRB; | 2038 | acb->acb_flags |= ACB_F_MSG_START_BGRB; |
2018 | writel(ARCMSR_INBOUND_MESG0_START_BGRB, ®->inbound_msgaddr0); | 2039 | writel(ARCMSR_INBOUND_MESG0_START_BGRB, ®->inbound_msgaddr0); |
2019 | if (arcmsr_hba_wait_msgint_ready(acb)) { | 2040 | if (arcmsr_hba_wait_msgint_ready(acb)) { |
@@ -2024,7 +2045,7 @@ static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb) | |||
2024 | 2045 | ||
2025 | static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb) | 2046 | static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb) |
2026 | { | 2047 | { |
2027 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 2048 | struct MessageUnit_B *reg = acb->pmuB; |
2028 | acb->acb_flags |= ACB_F_MSG_START_BGRB; | 2049 | acb->acb_flags |= ACB_F_MSG_START_BGRB; |
2029 | writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell_reg); | 2050 | writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell_reg); |
2030 | if (arcmsr_hbb_wait_msgint_ready(acb)) { | 2051 | if (arcmsr_hbb_wait_msgint_ready(acb)) { |
@@ -2049,7 +2070,7 @@ static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb) | |||
2049 | { | 2070 | { |
2050 | switch (acb->adapter_type) { | 2071 | switch (acb->adapter_type) { |
2051 | case ACB_ADAPTER_TYPE_A: { | 2072 | case ACB_ADAPTER_TYPE_A: { |
2052 | struct MessageUnit_A *reg = (struct MessageUnit_A *)acb->pmu; | 2073 | struct MessageUnit_A __iomem *reg = acb->pmuA; |
2053 | uint32_t outbound_doorbell; | 2074 | uint32_t outbound_doorbell; |
2054 | /* empty doorbell Qbuffer if door bell ringed */ | 2075 | /* empty doorbell Qbuffer if door bell ringed */ |
2055 | outbound_doorbell = readl(®->outbound_doorbell); | 2076 | outbound_doorbell = readl(®->outbound_doorbell); |
@@ -2060,7 +2081,7 @@ static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb) | |||
2060 | break; | 2081 | break; |
2061 | 2082 | ||
2062 | case ACB_ADAPTER_TYPE_B: { | 2083 | case ACB_ADAPTER_TYPE_B: { |
2063 | struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu; | 2084 | struct MessageUnit_B *reg = acb->pmuB; |
2064 | /*clear interrupt and message state*/ | 2085 | /*clear interrupt and message state*/ |
2065 | writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell_reg); | 2086 | writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell_reg); |
2066 | writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell_reg); | 2087 | writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell_reg); |
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index b5fa4f091387..f1871ea04045 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c | |||
@@ -1652,6 +1652,7 @@ sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize) | |||
1652 | schp->buffer = kzalloc(sg_bufflen, gfp_flags); | 1652 | schp->buffer = kzalloc(sg_bufflen, gfp_flags); |
1653 | if (!schp->buffer) | 1653 | if (!schp->buffer) |
1654 | return -ENOMEM; | 1654 | return -ENOMEM; |
1655 | sg_init_table(schp->buffer, tablesize); | ||
1655 | schp->sglist_len = sg_bufflen; | 1656 | schp->sglist_len = sg_bufflen; |
1656 | return tablesize; /* number of scat_gath elements allocated */ | 1657 | return tablesize; /* number of scat_gath elements allocated */ |
1657 | } | 1658 | } |
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c index 5afcb2fa7cd3..d8b660061c13 100644 --- a/drivers/serial/serial_cs.c +++ b/drivers/serial/serial_cs.c | |||
@@ -345,7 +345,7 @@ static int serial_probe(struct pcmcia_device *link) | |||
345 | 345 | ||
346 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; | 346 | link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; |
347 | link->io.NumPorts1 = 8; | 347 | link->io.NumPorts1 = 8; |
348 | link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; | 348 | link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; |
349 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; | 349 | link->irq.IRQInfo1 = IRQ_LEVEL_ID; |
350 | link->conf.Attributes = CONF_ENABLE_IRQ; | 350 | link->conf.Attributes = CONF_ENABLE_IRQ; |
351 | if (do_sound) { | 351 | if (do_sound) { |
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index c55459c592b8..b3518ca9f04e 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c | |||
@@ -184,14 +184,14 @@ static int spidev_message(struct spidev_data *spidev, | |||
184 | if (u_tmp->rx_buf) { | 184 | if (u_tmp->rx_buf) { |
185 | k_tmp->rx_buf = buf; | 185 | k_tmp->rx_buf = buf; |
186 | if (!access_ok(VERIFY_WRITE, (u8 __user *) | 186 | if (!access_ok(VERIFY_WRITE, (u8 __user *) |
187 | (ptrdiff_t) u_tmp->rx_buf, | 187 | (uintptr_t) u_tmp->rx_buf, |
188 | u_tmp->len)) | 188 | u_tmp->len)) |
189 | goto done; | 189 | goto done; |
190 | } | 190 | } |
191 | if (u_tmp->tx_buf) { | 191 | if (u_tmp->tx_buf) { |
192 | k_tmp->tx_buf = buf; | 192 | k_tmp->tx_buf = buf; |
193 | if (copy_from_user(buf, (const u8 __user *) | 193 | if (copy_from_user(buf, (const u8 __user *) |
194 | (ptrdiff_t) u_tmp->tx_buf, | 194 | (uintptr_t) u_tmp->tx_buf, |
195 | u_tmp->len)) | 195 | u_tmp->len)) |
196 | goto done; | 196 | goto done; |
197 | } | 197 | } |
@@ -224,7 +224,7 @@ static int spidev_message(struct spidev_data *spidev, | |||
224 | for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) { | 224 | for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) { |
225 | if (u_tmp->rx_buf) { | 225 | if (u_tmp->rx_buf) { |
226 | if (__copy_to_user((u8 __user *) | 226 | if (__copy_to_user((u8 __user *) |
227 | (ptrdiff_t) u_tmp->rx_buf, buf, | 227 | (uintptr_t) u_tmp->rx_buf, buf, |
228 | u_tmp->len)) { | 228 | u_tmp->len)) { |
229 | status = -EFAULT; | 229 | status = -EFAULT; |
230 | goto done; | 230 | goto done; |
diff --git a/include/asm-um/unistd.h b/include/asm-um/unistd.h index 732c83f04c3d..38bd9d94ee46 100644 --- a/include/asm-um/unistd.h +++ b/include/asm-um/unistd.h | |||
@@ -14,7 +14,6 @@ extern int um_execve(const char *file, char *const argv[], char *const env[]); | |||
14 | 14 | ||
15 | #ifdef __KERNEL__ | 15 | #ifdef __KERNEL__ |
16 | /* We get __ARCH_WANT_OLD_STAT and __ARCH_WANT_STAT64 from the base arch */ | 16 | /* We get __ARCH_WANT_OLD_STAT and __ARCH_WANT_STAT64 from the base arch */ |
17 | #define __ARCH_WANT_IPC_PARSE_VERSION | ||
18 | #define __ARCH_WANT_OLD_READDIR | 17 | #define __ARCH_WANT_OLD_READDIR |
19 | #define __ARCH_WANT_SYS_ALARM | 18 | #define __ARCH_WANT_SYS_ALARM |
20 | #define __ARCH_WANT_SYS_GETHOSTNAME | 19 | #define __ARCH_WANT_SYS_GETHOSTNAME |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index bbf906a0b419..8396db24d019 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -341,7 +341,6 @@ enum blk_queue_state { | |||
341 | struct blk_queue_tag { | 341 | struct blk_queue_tag { |
342 | struct request **tag_index; /* map of busy tags */ | 342 | struct request **tag_index; /* map of busy tags */ |
343 | unsigned long *tag_map; /* bit map of free/busy tags */ | 343 | unsigned long *tag_map; /* bit map of free/busy tags */ |
344 | struct list_head busy_list; /* fifo list of busy tags */ | ||
345 | int busy; /* current depth */ | 344 | int busy; /* current depth */ |
346 | int max_depth; /* what we will send to device */ | 345 | int max_depth; /* what we will send to device */ |
347 | int real_max_depth; /* what the array can hold */ | 346 | int real_max_depth; /* what the array can hold */ |
@@ -435,6 +434,7 @@ struct request_queue | |||
435 | unsigned int dma_alignment; | 434 | unsigned int dma_alignment; |
436 | 435 | ||
437 | struct blk_queue_tag *queue_tags; | 436 | struct blk_queue_tag *queue_tags; |
437 | struct list_head tag_busy_list; | ||
438 | 438 | ||
439 | unsigned int nr_sorted; | 439 | unsigned int nr_sorted; |
440 | unsigned int in_flight; | 440 | unsigned int in_flight; |
diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h index d2a96cbf4f0e..cf79853967ff 100644 --- a/include/linux/eventpoll.h +++ b/include/linux/eventpoll.h | |||
@@ -32,18 +32,13 @@ | |||
32 | * On x86-64 make the 64bit structure have the same alignment as the | 32 | * On x86-64 make the 64bit structure have the same alignment as the |
33 | * 32bit structure. This makes 32bit emulation easier. | 33 | * 32bit structure. This makes 32bit emulation easier. |
34 | * | 34 | * |
35 | * UML/x86_64 needs the same packing as x86_64 - UML + UML_X86 + | 35 | * UML/x86_64 needs the same packing as x86_64 |
36 | * 64_BIT adds up to UML/x86_64. | ||
37 | */ | 36 | */ |
38 | #ifdef __x86_64__ | 37 | #ifdef __x86_64__ |
39 | #define EPOLL_PACKED __attribute__((packed)) | 38 | #define EPOLL_PACKED __attribute__((packed)) |
40 | #else | 39 | #else |
41 | #if defined(CONFIG_UML) && defined(CONFIG_UML_X86) && defined(CONFIG_64BIT) | ||
42 | #define EPOLL_PACKED __attribute__((packed)) | ||
43 | #else | ||
44 | #define EPOLL_PACKED | 40 | #define EPOLL_PACKED |
45 | #endif | 41 | #endif |
46 | #endif | ||
47 | 42 | ||
48 | struct epoll_event { | 43 | struct epoll_event { |
49 | __u32 events; | 44 | __u32 events; |
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 457123171389..32326c293d7b 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h | |||
@@ -150,7 +150,7 @@ static inline struct scatterlist *sg_last(struct scatterlist *sgl, | |||
150 | struct scatterlist *ret = &sgl[nents - 1]; | 150 | struct scatterlist *ret = &sgl[nents - 1]; |
151 | #else | 151 | #else |
152 | struct scatterlist *sg, *ret = NULL; | 152 | struct scatterlist *sg, *ret = NULL; |
153 | int i; | 153 | unsigned int i; |
154 | 154 | ||
155 | for_each_sg(sgl, sg, nents, i) | 155 | for_each_sg(sgl, sg, nents, i) |
156 | ret = sg; | 156 | ret = sg; |
@@ -179,7 +179,11 @@ static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents, | |||
179 | #ifndef ARCH_HAS_SG_CHAIN | 179 | #ifndef ARCH_HAS_SG_CHAIN |
180 | BUG(); | 180 | BUG(); |
181 | #endif | 181 | #endif |
182 | prv[prv_nents - 1].page_link = (unsigned long) sgl | 0x01; | 182 | /* |
183 | * Set lowest bit to indicate a link pointer, and make sure to clear | ||
184 | * the termination bit if it happens to be set. | ||
185 | */ | ||
186 | prv[prv_nents - 1].page_link = ((unsigned long) sgl | 0x01) & ~0x02; | ||
183 | } | 187 | } |
184 | 188 | ||
185 | /** | 189 | /** |
@@ -239,7 +243,7 @@ static inline void sg_init_table(struct scatterlist *sgl, unsigned int nents) | |||
239 | sg_mark_end(sgl, nents); | 243 | sg_mark_end(sgl, nents); |
240 | #ifdef CONFIG_DEBUG_SG | 244 | #ifdef CONFIG_DEBUG_SG |
241 | { | 245 | { |
242 | int i; | 246 | unsigned int i; |
243 | for (i = 0; i < nents; i++) | 247 | for (i = 0; i < nents; i++) |
244 | sgl[i].sg_magic = SG_MAGIC; | 248 | sgl[i].sg_magic = SG_MAGIC; |
245 | } | 249 | } |
diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h index 0013a0d8dc6b..87b895d5c786 100644 --- a/include/linux/sunrpc/rpc_rdma.h +++ b/include/linux/sunrpc/rpc_rdma.h | |||
@@ -41,17 +41,17 @@ | |||
41 | #define _LINUX_SUNRPC_RPC_RDMA_H | 41 | #define _LINUX_SUNRPC_RPC_RDMA_H |
42 | 42 | ||
43 | struct rpcrdma_segment { | 43 | struct rpcrdma_segment { |
44 | uint32_t rs_handle; /* Registered memory handle */ | 44 | __be32 rs_handle; /* Registered memory handle */ |
45 | uint32_t rs_length; /* Length of the chunk in bytes */ | 45 | __be32 rs_length; /* Length of the chunk in bytes */ |
46 | uint64_t rs_offset; /* Chunk virtual address or offset */ | 46 | __be64 rs_offset; /* Chunk virtual address or offset */ |
47 | }; | 47 | }; |
48 | 48 | ||
49 | /* | 49 | /* |
50 | * read chunk(s), encoded as a linked list. | 50 | * read chunk(s), encoded as a linked list. |
51 | */ | 51 | */ |
52 | struct rpcrdma_read_chunk { | 52 | struct rpcrdma_read_chunk { |
53 | uint32_t rc_discrim; /* 1 indicates presence */ | 53 | __be32 rc_discrim; /* 1 indicates presence */ |
54 | uint32_t rc_position; /* Position in XDR stream */ | 54 | __be32 rc_position; /* Position in XDR stream */ |
55 | struct rpcrdma_segment rc_target; | 55 | struct rpcrdma_segment rc_target; |
56 | }; | 56 | }; |
57 | 57 | ||
@@ -66,29 +66,29 @@ struct rpcrdma_write_chunk { | |||
66 | * write chunk(s), encoded as a counted array. | 66 | * write chunk(s), encoded as a counted array. |
67 | */ | 67 | */ |
68 | struct rpcrdma_write_array { | 68 | struct rpcrdma_write_array { |
69 | uint32_t wc_discrim; /* 1 indicates presence */ | 69 | __be32 wc_discrim; /* 1 indicates presence */ |
70 | uint32_t wc_nchunks; /* Array count */ | 70 | __be32 wc_nchunks; /* Array count */ |
71 | struct rpcrdma_write_chunk wc_array[0]; | 71 | struct rpcrdma_write_chunk wc_array[0]; |
72 | }; | 72 | }; |
73 | 73 | ||
74 | struct rpcrdma_msg { | 74 | struct rpcrdma_msg { |
75 | uint32_t rm_xid; /* Mirrors the RPC header xid */ | 75 | __be32 rm_xid; /* Mirrors the RPC header xid */ |
76 | uint32_t rm_vers; /* Version of this protocol */ | 76 | __be32 rm_vers; /* Version of this protocol */ |
77 | uint32_t rm_credit; /* Buffers requested/granted */ | 77 | __be32 rm_credit; /* Buffers requested/granted */ |
78 | uint32_t rm_type; /* Type of message (enum rpcrdma_proc) */ | 78 | __be32 rm_type; /* Type of message (enum rpcrdma_proc) */ |
79 | union { | 79 | union { |
80 | 80 | ||
81 | struct { /* no chunks */ | 81 | struct { /* no chunks */ |
82 | uint32_t rm_empty[3]; /* 3 empty chunk lists */ | 82 | __be32 rm_empty[3]; /* 3 empty chunk lists */ |
83 | } rm_nochunks; | 83 | } rm_nochunks; |
84 | 84 | ||
85 | struct { /* no chunks and padded */ | 85 | struct { /* no chunks and padded */ |
86 | uint32_t rm_align; /* Padding alignment */ | 86 | __be32 rm_align; /* Padding alignment */ |
87 | uint32_t rm_thresh; /* Padding threshold */ | 87 | __be32 rm_thresh; /* Padding threshold */ |
88 | uint32_t rm_pempty[3]; /* 3 empty chunk lists */ | 88 | __be32 rm_pempty[3]; /* 3 empty chunk lists */ |
89 | } rm_padded; | 89 | } rm_padded; |
90 | 90 | ||
91 | uint32_t rm_chunks[0]; /* read, write and reply chunks */ | 91 | __be32 rm_chunks[0]; /* read, write and reply chunks */ |
92 | 92 | ||
93 | } rm_body; | 93 | } rm_body; |
94 | }; | 94 | }; |
diff --git a/include/linux/types.h b/include/linux/types.h index 4f0dad21c917..f4f8d19158e4 100644 --- a/include/linux/types.h +++ b/include/linux/types.h | |||
@@ -37,6 +37,8 @@ typedef __kernel_gid32_t gid_t; | |||
37 | typedef __kernel_uid16_t uid16_t; | 37 | typedef __kernel_uid16_t uid16_t; |
38 | typedef __kernel_gid16_t gid16_t; | 38 | typedef __kernel_gid16_t gid16_t; |
39 | 39 | ||
40 | typedef unsigned long uintptr_t; | ||
41 | |||
40 | #ifdef CONFIG_UID16 | 42 | #ifdef CONFIG_UID16 |
41 | /* This is defined by include/asm-{arch}/posix_types.h */ | 43 | /* This is defined by include/asm-{arch}/posix_types.h */ |
42 | typedef __kernel_old_uid_t old_uid_t; | 44 | typedef __kernel_old_uid_t old_uid_t; |
diff --git a/include/net/sctp/auth.h b/include/net/sctp/auth.h index 9e8f13b7da5a..5db261a1e85e 100644 --- a/include/net/sctp/auth.h +++ b/include/net/sctp/auth.h | |||
@@ -103,7 +103,7 @@ struct sctp_hmac *sctp_auth_asoc_get_hmac(const struct sctp_association *asoc); | |||
103 | void sctp_auth_asoc_set_default_hmac(struct sctp_association *asoc, | 103 | void sctp_auth_asoc_set_default_hmac(struct sctp_association *asoc, |
104 | struct sctp_hmac_algo_param *hmacs); | 104 | struct sctp_hmac_algo_param *hmacs); |
105 | int sctp_auth_asoc_verify_hmac_id(const struct sctp_association *asoc, | 105 | int sctp_auth_asoc_verify_hmac_id(const struct sctp_association *asoc, |
106 | __u16 hmac_id); | 106 | __be16 hmac_id); |
107 | int sctp_auth_send_cid(sctp_cid_t chunk, const struct sctp_association *asoc); | 107 | int sctp_auth_send_cid(sctp_cid_t chunk, const struct sctp_association *asoc); |
108 | int sctp_auth_recv_cid(sctp_cid_t chunk, const struct sctp_association *asoc); | 108 | int sctp_auth_recv_cid(sctp_cid_t chunk, const struct sctp_association *asoc); |
109 | void sctp_auth_calculate_hmac(const struct sctp_association *asoc, | 109 | void sctp_auth_calculate_hmac(const struct sctp_association *asoc, |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index b6d2ff7e37ee..22a25142e4cf 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -602,7 +602,7 @@ static int hrtimer_switch_to_hres(void) | |||
602 | /* "Retrigger" the interrupt to get things going */ | 602 | /* "Retrigger" the interrupt to get things going */ |
603 | retrigger_next_event(NULL); | 603 | retrigger_next_event(NULL); |
604 | local_irq_restore(flags); | 604 | local_irq_restore(flags); |
605 | printk(KERN_INFO "Switched to high resolution mode on CPU %d\n", | 605 | printk(KERN_DEBUG "Switched to high resolution mode on CPU %d\n", |
606 | smp_processor_id()); | 606 | smp_processor_id()); |
607 | return 1; | 607 | return 1; |
608 | } | 608 | } |
diff --git a/kernel/signal.c b/kernel/signal.c index 12006308c7eb..4537bdda1ebf 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -732,7 +732,7 @@ static void print_fatal_signal(struct pt_regs *regs, int signr) | |||
732 | printk("%s/%d: potentially unexpected fatal signal %d.\n", | 732 | printk("%s/%d: potentially unexpected fatal signal %d.\n", |
733 | current->comm, task_pid_nr(current), signr); | 733 | current->comm, task_pid_nr(current), signr); |
734 | 734 | ||
735 | #ifdef __i386__ | 735 | #if defined(__i386__) && !defined(__arch_um__) |
736 | printk("code at %08lx: ", regs->eip); | 736 | printk("code at %08lx: ", regs->eip); |
737 | { | 737 | { |
738 | int i; | 738 | int i; |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 10a1347597fd..5997456ebbc9 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -320,8 +320,6 @@ ktime_t tick_nohz_get_sleep_length(void) | |||
320 | return ts->sleep_length; | 320 | return ts->sleep_length; |
321 | } | 321 | } |
322 | 322 | ||
323 | EXPORT_SYMBOL_GPL(tick_nohz_get_sleep_length); | ||
324 | |||
325 | /** | 323 | /** |
326 | * nohz_restart_sched_tick - restart the idle tick from the idle task | 324 | * nohz_restart_sched_tick - restart the idle tick from the idle task |
327 | * | 325 | * |
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index fdb2e03d4fe0..12c5f4cb6b8c 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c | |||
@@ -129,7 +129,8 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now) | |||
129 | struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); | 129 | struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); |
130 | int i; | 130 | int i; |
131 | 131 | ||
132 | SEQ_printf(m, "\ncpu: %d\n", cpu); | 132 | SEQ_printf(m, "\n"); |
133 | SEQ_printf(m, "cpu: %d\n", cpu); | ||
133 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | 134 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { |
134 | SEQ_printf(m, " clock %d:\n", i); | 135 | SEQ_printf(m, " clock %d:\n", i); |
135 | print_base(m, cpu_base->clock_base + i, now); | 136 | print_base(m, cpu_base->clock_base + i, now); |
@@ -184,7 +185,8 @@ print_tickdevice(struct seq_file *m, struct tick_device *td) | |||
184 | { | 185 | { |
185 | struct clock_event_device *dev = td->evtdev; | 186 | struct clock_event_device *dev = td->evtdev; |
186 | 187 | ||
187 | SEQ_printf(m, "\nTick Device: mode: %d\n", td->mode); | 188 | SEQ_printf(m, "\n"); |
189 | SEQ_printf(m, "Tick Device: mode: %d\n", td->mode); | ||
188 | 190 | ||
189 | SEQ_printf(m, "Clock Event Device: "); | 191 | SEQ_printf(m, "Clock Event Device: "); |
190 | if (!dev) { | 192 | if (!dev) { |
diff --git a/mm/filemap.c b/mm/filemap.c index 5209e47b7fe3..7c8643630023 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/backing-dev.h> | 28 | #include <linux/backing-dev.h> |
29 | #include <linux/pagevec.h> | 29 | #include <linux/pagevec.h> |
30 | #include <linux/blkdev.h> | 30 | #include <linux/blkdev.h> |
31 | #include <linux/backing-dev.h> | ||
31 | #include <linux/security.h> | 32 | #include <linux/security.h> |
32 | #include <linux/syscalls.h> | 33 | #include <linux/syscalls.h> |
33 | #include <linux/cpuset.h> | 34 | #include <linux/cpuset.h> |
diff --git a/mm/nommu.c b/mm/nommu.c index 8f09333f78e1..35622c590925 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -12,6 +12,7 @@ | |||
12 | * Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com> | 12 | * Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com> |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/module.h> | ||
15 | #include <linux/mm.h> | 16 | #include <linux/mm.h> |
16 | #include <linux/mman.h> | 17 | #include <linux/mman.h> |
17 | #include <linux/swap.h> | 18 | #include <linux/swap.h> |
@@ -2734,7 +2734,7 @@ static void slab_mem_offline_callback(void *arg) | |||
2734 | * and offline_pages() function shoudn't call this | 2734 | * and offline_pages() function shoudn't call this |
2735 | * callback. So, we must fail. | 2735 | * callback. So, we must fail. |
2736 | */ | 2736 | */ |
2737 | BUG_ON(atomic_read(&n->nr_slabs)); | 2737 | BUG_ON(atomic_long_read(&n->nr_slabs)); |
2738 | 2738 | ||
2739 | s->node[offline_node] = NULL; | 2739 | s->node[offline_node] = NULL; |
2740 | kmem_cache_free(kmalloc_caches, n); | 2740 | kmem_cache_free(kmalloc_caches, n); |
diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 8af1004abefe..6d5fa6bb371b 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c | |||
@@ -556,7 +556,7 @@ struct sctp_hmac *sctp_auth_asoc_get_hmac(const struct sctp_association *asoc) | |||
556 | return &sctp_hmac_list[id]; | 556 | return &sctp_hmac_list[id]; |
557 | } | 557 | } |
558 | 558 | ||
559 | static int __sctp_auth_find_hmacid(__u16 *hmacs, int n_elts, __u16 hmac_id) | 559 | static int __sctp_auth_find_hmacid(__be16 *hmacs, int n_elts, __be16 hmac_id) |
560 | { | 560 | { |
561 | int found = 0; | 561 | int found = 0; |
562 | int i; | 562 | int i; |
@@ -573,7 +573,7 @@ static int __sctp_auth_find_hmacid(__u16 *hmacs, int n_elts, __u16 hmac_id) | |||
573 | 573 | ||
574 | /* See if the HMAC_ID is one that we claim as supported */ | 574 | /* See if the HMAC_ID is one that we claim as supported */ |
575 | int sctp_auth_asoc_verify_hmac_id(const struct sctp_association *asoc, | 575 | int sctp_auth_asoc_verify_hmac_id(const struct sctp_association *asoc, |
576 | __u16 hmac_id) | 576 | __be16 hmac_id) |
577 | { | 577 | { |
578 | struct sctp_hmac_algo_param *hmacs; | 578 | struct sctp_hmac_algo_param *hmacs; |
579 | __u16 n_elt; | 579 | __u16 n_elt; |
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index 12db63580427..f877b88091ce 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c | |||
@@ -181,7 +181,7 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, | |||
181 | struct rpcrdma_read_chunk *cur_rchunk = NULL; | 181 | struct rpcrdma_read_chunk *cur_rchunk = NULL; |
182 | struct rpcrdma_write_array *warray = NULL; | 182 | struct rpcrdma_write_array *warray = NULL; |
183 | struct rpcrdma_write_chunk *cur_wchunk = NULL; | 183 | struct rpcrdma_write_chunk *cur_wchunk = NULL; |
184 | u32 *iptr = headerp->rm_body.rm_chunks; | 184 | __be32 *iptr = headerp->rm_body.rm_chunks; |
185 | 185 | ||
186 | if (type == rpcrdma_readch || type == rpcrdma_areadch) { | 186 | if (type == rpcrdma_readch || type == rpcrdma_areadch) { |
187 | /* a read chunk - server will RDMA Read our memory */ | 187 | /* a read chunk - server will RDMA Read our memory */ |
@@ -217,7 +217,7 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, | |||
217 | cur_rchunk->rc_target.rs_handle = htonl(seg->mr_rkey); | 217 | cur_rchunk->rc_target.rs_handle = htonl(seg->mr_rkey); |
218 | cur_rchunk->rc_target.rs_length = htonl(seg->mr_len); | 218 | cur_rchunk->rc_target.rs_length = htonl(seg->mr_len); |
219 | xdr_encode_hyper( | 219 | xdr_encode_hyper( |
220 | (u32 *)&cur_rchunk->rc_target.rs_offset, | 220 | (__be32 *)&cur_rchunk->rc_target.rs_offset, |
221 | seg->mr_base); | 221 | seg->mr_base); |
222 | dprintk("RPC: %s: read chunk " | 222 | dprintk("RPC: %s: read chunk " |
223 | "elem %d@0x%llx:0x%x pos %d (%s)\n", __func__, | 223 | "elem %d@0x%llx:0x%x pos %d (%s)\n", __func__, |
@@ -229,7 +229,7 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, | |||
229 | cur_wchunk->wc_target.rs_handle = htonl(seg->mr_rkey); | 229 | cur_wchunk->wc_target.rs_handle = htonl(seg->mr_rkey); |
230 | cur_wchunk->wc_target.rs_length = htonl(seg->mr_len); | 230 | cur_wchunk->wc_target.rs_length = htonl(seg->mr_len); |
231 | xdr_encode_hyper( | 231 | xdr_encode_hyper( |
232 | (u32 *)&cur_wchunk->wc_target.rs_offset, | 232 | (__be32 *)&cur_wchunk->wc_target.rs_offset, |
233 | seg->mr_base); | 233 | seg->mr_base); |
234 | dprintk("RPC: %s: %s chunk " | 234 | dprintk("RPC: %s: %s chunk " |
235 | "elem %d@0x%llx:0x%x (%s)\n", __func__, | 235 | "elem %d@0x%llx:0x%x (%s)\n", __func__, |
@@ -257,14 +257,14 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, | |||
257 | * finish off header. If write, marshal discrim and nchunks. | 257 | * finish off header. If write, marshal discrim and nchunks. |
258 | */ | 258 | */ |
259 | if (cur_rchunk) { | 259 | if (cur_rchunk) { |
260 | iptr = (u32 *) cur_rchunk; | 260 | iptr = (__be32 *) cur_rchunk; |
261 | *iptr++ = xdr_zero; /* finish the read chunk list */ | 261 | *iptr++ = xdr_zero; /* finish the read chunk list */ |
262 | *iptr++ = xdr_zero; /* encode a NULL write chunk list */ | 262 | *iptr++ = xdr_zero; /* encode a NULL write chunk list */ |
263 | *iptr++ = xdr_zero; /* encode a NULL reply chunk */ | 263 | *iptr++ = xdr_zero; /* encode a NULL reply chunk */ |
264 | } else { | 264 | } else { |
265 | warray->wc_discrim = xdr_one; | 265 | warray->wc_discrim = xdr_one; |
266 | warray->wc_nchunks = htonl(nchunks); | 266 | warray->wc_nchunks = htonl(nchunks); |
267 | iptr = (u32 *) cur_wchunk; | 267 | iptr = (__be32 *) cur_wchunk; |
268 | if (type == rpcrdma_writech) { | 268 | if (type == rpcrdma_writech) { |
269 | *iptr++ = xdr_zero; /* finish the write chunk list */ | 269 | *iptr++ = xdr_zero; /* finish the write chunk list */ |
270 | *iptr++ = xdr_zero; /* encode a NULL reply chunk */ | 270 | *iptr++ = xdr_zero; /* encode a NULL reply chunk */ |
@@ -559,7 +559,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst) | |||
559 | * RDMA'd by server. See map at rpcrdma_create_chunks()! :-) | 559 | * RDMA'd by server. See map at rpcrdma_create_chunks()! :-) |
560 | */ | 560 | */ |
561 | static int | 561 | static int |
562 | rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, u32 **iptrp) | 562 | rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, __be32 **iptrp) |
563 | { | 563 | { |
564 | unsigned int i, total_len; | 564 | unsigned int i, total_len; |
565 | struct rpcrdma_write_chunk *cur_wchunk; | 565 | struct rpcrdma_write_chunk *cur_wchunk; |
@@ -573,7 +573,7 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, u32 **iptrp) | |||
573 | struct rpcrdma_segment *seg = &cur_wchunk->wc_target; | 573 | struct rpcrdma_segment *seg = &cur_wchunk->wc_target; |
574 | ifdebug(FACILITY) { | 574 | ifdebug(FACILITY) { |
575 | u64 off; | 575 | u64 off; |
576 | xdr_decode_hyper((u32 *)&seg->rs_offset, &off); | 576 | xdr_decode_hyper((__be32 *)&seg->rs_offset, &off); |
577 | dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n", | 577 | dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n", |
578 | __func__, | 578 | __func__, |
579 | ntohl(seg->rs_length), | 579 | ntohl(seg->rs_length), |
@@ -585,7 +585,7 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, u32 **iptrp) | |||
585 | } | 585 | } |
586 | /* check and adjust for properly terminated write chunk */ | 586 | /* check and adjust for properly terminated write chunk */ |
587 | if (wrchunk) { | 587 | if (wrchunk) { |
588 | u32 *w = (u32 *) cur_wchunk; | 588 | __be32 *w = (__be32 *) cur_wchunk; |
589 | if (*w++ != xdr_zero) | 589 | if (*w++ != xdr_zero) |
590 | return -1; | 590 | return -1; |
591 | cur_wchunk = (struct rpcrdma_write_chunk *) w; | 591 | cur_wchunk = (struct rpcrdma_write_chunk *) w; |
@@ -593,7 +593,7 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, u32 **iptrp) | |||
593 | if ((char *) cur_wchunk > rep->rr_base + rep->rr_len) | 593 | if ((char *) cur_wchunk > rep->rr_base + rep->rr_len) |
594 | return -1; | 594 | return -1; |
595 | 595 | ||
596 | *iptrp = (u32 *) cur_wchunk; | 596 | *iptrp = (__be32 *) cur_wchunk; |
597 | return total_len; | 597 | return total_len; |
598 | } | 598 | } |
599 | 599 | ||
@@ -721,7 +721,7 @@ rpcrdma_reply_handler(struct rpcrdma_rep *rep) | |||
721 | struct rpc_rqst *rqst; | 721 | struct rpc_rqst *rqst; |
722 | struct rpc_xprt *xprt = rep->rr_xprt; | 722 | struct rpc_xprt *xprt = rep->rr_xprt; |
723 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); | 723 | struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); |
724 | u32 *iptr; | 724 | __be32 *iptr; |
725 | int i, rdmalen, status; | 725 | int i, rdmalen, status; |
726 | 726 | ||
727 | /* Check status. If bad, signal disconnect and return rep to pool */ | 727 | /* Check status. If bad, signal disconnect and return rep to pool */ |
@@ -801,7 +801,7 @@ repost: | |||
801 | r_xprt->rx_stats.total_rdma_reply += rdmalen; | 801 | r_xprt->rx_stats.total_rdma_reply += rdmalen; |
802 | } else { | 802 | } else { |
803 | /* else ordinary inline */ | 803 | /* else ordinary inline */ |
804 | iptr = (u32 *)((unsigned char *)headerp + 28); | 804 | iptr = (__be32 *)((unsigned char *)headerp + 28); |
805 | rep->rr_len -= 28; /*sizeof *headerp;*/ | 805 | rep->rr_len -= 28; /*sizeof *headerp;*/ |
806 | status = rep->rr_len; | 806 | status = rep->rr_len; |
807 | } | 807 | } |
@@ -816,7 +816,7 @@ repost: | |||
816 | headerp->rm_body.rm_chunks[2] != xdr_one || | 816 | headerp->rm_body.rm_chunks[2] != xdr_one || |
817 | req->rl_nchunks == 0) | 817 | req->rl_nchunks == 0) |
818 | goto badheader; | 818 | goto badheader; |
819 | iptr = (u32 *)((unsigned char *)headerp + 28); | 819 | iptr = (__be32 *)((unsigned char *)headerp + 28); |
820 | rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr); | 820 | rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr); |
821 | if (rdmalen < 0) | 821 | if (rdmalen < 0) |
822 | goto badheader; | 822 | goto badheader; |