aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ieee1394
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ieee1394')
-rw-r--r--drivers/ieee1394/Kconfig23
-rw-r--r--drivers/ieee1394/Makefile2
-rw-r--r--drivers/ieee1394/csr1212.c21
-rw-r--r--drivers/ieee1394/csr1212.h2
-rw-r--r--drivers/ieee1394/dma.c73
-rw-r--r--drivers/ieee1394/dv1394.c13
-rw-r--r--drivers/ieee1394/eth1394.c20
-rw-r--r--drivers/ieee1394/highlevel.c18
-rw-r--r--drivers/ieee1394/hosts.c30
-rw-r--r--drivers/ieee1394/hosts.h162
-rw-r--r--drivers/ieee1394/ieee1394-ioctl.h8
-rw-r--r--drivers/ieee1394/ieee1394.h19
-rw-r--r--drivers/ieee1394/ieee1394_core.c823
-rw-r--r--drivers/ieee1394/ieee1394_core.h100
-rw-r--r--drivers/ieee1394/ieee1394_transactions.c389
-rw-r--r--drivers/ieee1394/iso.c102
-rw-r--r--drivers/ieee1394/nodemgr.c50
-rw-r--r--drivers/ieee1394/nodemgr.h18
-rw-r--r--drivers/ieee1394/ohci1394.c43
-rw-r--r--drivers/ieee1394/ohci1394.h4
-rw-r--r--drivers/ieee1394/pcilynx.c2
-rw-r--r--drivers/ieee1394/raw1394.c79
-rw-r--r--drivers/ieee1394/sbp2.c1040
-rw-r--r--drivers/ieee1394/sbp2.h70
-rw-r--r--drivers/ieee1394/video1394.c106
25 files changed, 1416 insertions, 1801 deletions
diff --git a/drivers/ieee1394/Kconfig b/drivers/ieee1394/Kconfig
index 25103a0ef9b3..39142e2f804b 100644
--- a/drivers/ieee1394/Kconfig
+++ b/drivers/ieee1394/Kconfig
@@ -169,27 +169,4 @@ config IEEE1394_RAWIO
169 To compile this driver as a module, say M here: the 169 To compile this driver as a module, say M here: the
170 module will be called raw1394. 170 module will be called raw1394.
171 171
172config IEEE1394_CMP
173 tristate "IEC61883-1 Plug support"
174 depends on IEEE1394
175 help
176 This option enables the Connection Management Procedures
177 (IEC61883-1) driver, which implements input and output plugs.
178
179 To compile this driver as a module, say M here: the
180 module will be called cmp.
181
182config IEEE1394_AMDTP
183 tristate "IEC61883-6 (Audio transmission) support"
184 depends on IEEE1394 && IEEE1394_OHCI1394 && IEEE1394_CMP
185 help
186 This option enables the Audio & Music Data Transmission Protocol
187 (IEC61883-6) driver, which implements audio transmission over
188 IEEE1394.
189
190 The userspace interface is documented in amdtp.h.
191
192 To compile this driver as a module, say M here: the
193 module will be called amdtp.
194
195endmenu 172endmenu
diff --git a/drivers/ieee1394/Makefile b/drivers/ieee1394/Makefile
index e8b4d48d376e..6f53611fe255 100644
--- a/drivers/ieee1394/Makefile
+++ b/drivers/ieee1394/Makefile
@@ -14,8 +14,6 @@ obj-$(CONFIG_IEEE1394_RAWIO) += raw1394.o
14obj-$(CONFIG_IEEE1394_SBP2) += sbp2.o 14obj-$(CONFIG_IEEE1394_SBP2) += sbp2.o
15obj-$(CONFIG_IEEE1394_DV1394) += dv1394.o 15obj-$(CONFIG_IEEE1394_DV1394) += dv1394.o
16obj-$(CONFIG_IEEE1394_ETH1394) += eth1394.o 16obj-$(CONFIG_IEEE1394_ETH1394) += eth1394.o
17obj-$(CONFIG_IEEE1394_AMDTP) += amdtp.o
18obj-$(CONFIG_IEEE1394_CMP) += cmp.o
19 17
20quiet_cmd_oui2c = OUI2C $@ 18quiet_cmd_oui2c = OUI2C $@
21 cmd_oui2c = $(CONFIG_SHELL) $(srctree)/$(src)/oui2c.sh < $< > $@ 19 cmd_oui2c = $(CONFIG_SHELL) $(srctree)/$(src)/oui2c.sh < $< > $@
diff --git a/drivers/ieee1394/csr1212.c b/drivers/ieee1394/csr1212.c
index 61ddd5d37eff..15773544234b 100644
--- a/drivers/ieee1394/csr1212.c
+++ b/drivers/ieee1394/csr1212.c
@@ -1261,7 +1261,7 @@ static int csr1212_parse_bus_info_block(struct csr1212_csr *csr)
1261 return CSR1212_EINVAL; 1261 return CSR1212_EINVAL;
1262#endif 1262#endif
1263 1263
1264 cr = CSR1212_MALLOC(sizeof(struct csr1212_cache_region)); 1264 cr = CSR1212_MALLOC(sizeof(*cr));
1265 if (!cr) 1265 if (!cr)
1266 return CSR1212_ENOMEM; 1266 return CSR1212_ENOMEM;
1267 1267
@@ -1393,8 +1393,7 @@ int csr1212_parse_keyval(struct csr1212_keyval *kv,
1393 case CSR1212_KV_TYPE_LEAF: 1393 case CSR1212_KV_TYPE_LEAF:
1394 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) { 1394 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) {
1395 kv->value.leaf.data = CSR1212_MALLOC(quads_to_bytes(kvi_len)); 1395 kv->value.leaf.data = CSR1212_MALLOC(quads_to_bytes(kvi_len));
1396 if (!kv->value.leaf.data) 1396 if (!kv->value.leaf.data) {
1397 {
1398 ret = CSR1212_ENOMEM; 1397 ret = CSR1212_ENOMEM;
1399 goto fail; 1398 goto fail;
1400 } 1399 }
@@ -1462,7 +1461,7 @@ int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
1462 cache->next = NULL; 1461 cache->next = NULL;
1463 csr->cache_tail = cache; 1462 csr->cache_tail = cache;
1464 cache->filled_head = 1463 cache->filled_head =
1465 CSR1212_MALLOC(sizeof(struct csr1212_cache_region)); 1464 CSR1212_MALLOC(sizeof(*cache->filled_head));
1466 if (!cache->filled_head) { 1465 if (!cache->filled_head) {
1467 return CSR1212_ENOMEM; 1466 return CSR1212_ENOMEM;
1468 } 1467 }
@@ -1484,7 +1483,7 @@ int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
1484 /* Now seach read portions of the cache to see if it is there. */ 1483 /* Now seach read portions of the cache to see if it is there. */
1485 for (cr = cache->filled_head; cr; cr = cr->next) { 1484 for (cr = cache->filled_head; cr; cr = cr->next) {
1486 if (cache_index < cr->offset_start) { 1485 if (cache_index < cr->offset_start) {
1487 newcr = CSR1212_MALLOC(sizeof(struct csr1212_cache_region)); 1486 newcr = CSR1212_MALLOC(sizeof(*newcr));
1488 if (!newcr) 1487 if (!newcr)
1489 return CSR1212_ENOMEM; 1488 return CSR1212_ENOMEM;
1490 1489
@@ -1508,7 +1507,7 @@ int _csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
1508 1507
1509 if (!cr) { 1508 if (!cr) {
1510 cr = cache->filled_tail; 1509 cr = cache->filled_tail;
1511 newcr = CSR1212_MALLOC(sizeof(struct csr1212_cache_region)); 1510 newcr = CSR1212_MALLOC(sizeof(*newcr));
1512 if (!newcr) 1511 if (!newcr)
1513 return CSR1212_ENOMEM; 1512 return CSR1212_ENOMEM;
1514 1513
@@ -1611,15 +1610,17 @@ int csr1212_parse_csr(struct csr1212_csr *csr)
1611 csr->root_kv->valid = 0; 1610 csr->root_kv->valid = 0;
1612 csr->root_kv->next = csr->root_kv; 1611 csr->root_kv->next = csr->root_kv;
1613 csr->root_kv->prev = csr->root_kv; 1612 csr->root_kv->prev = csr->root_kv;
1614 csr1212_get_keyval(csr, csr->root_kv); 1613 ret = _csr1212_read_keyval(csr, csr->root_kv);
1614 if (ret != CSR1212_SUCCESS)
1615 return ret;
1615 1616
1616 /* Scan through the Root directory finding all extended ROM regions 1617 /* Scan through the Root directory finding all extended ROM regions
1617 * and make cache regions for them */ 1618 * and make cache regions for them */
1618 for (dentry = csr->root_kv->value.directory.dentries_head; 1619 for (dentry = csr->root_kv->value.directory.dentries_head;
1619 dentry; dentry = dentry->next) { 1620 dentry; dentry = dentry->next) {
1620 if (dentry->kv->key.id == CSR1212_KV_ID_EXTENDED_ROM) { 1621 if (dentry->kv->key.id == CSR1212_KV_ID_EXTENDED_ROM &&
1621 csr1212_get_keyval(csr, dentry->kv); 1622 !dentry->kv->valid) {
1622 1623 ret = _csr1212_read_keyval(csr, dentry->kv);
1623 if (ret != CSR1212_SUCCESS) 1624 if (ret != CSR1212_SUCCESS)
1624 return ret; 1625 return ret;
1625 } 1626 }
diff --git a/drivers/ieee1394/csr1212.h b/drivers/ieee1394/csr1212.h
index 28c5f4b726e2..cecd5871f2de 100644
--- a/drivers/ieee1394/csr1212.h
+++ b/drivers/ieee1394/csr1212.h
@@ -646,7 +646,7 @@ static inline struct csr1212_csr_rom_cache *csr1212_rom_cache_malloc(u_int32_t o
646{ 646{
647 struct csr1212_csr_rom_cache *cache; 647 struct csr1212_csr_rom_cache *cache;
648 648
649 cache = CSR1212_MALLOC(sizeof(struct csr1212_csr_rom_cache) + size); 649 cache = CSR1212_MALLOC(sizeof(*cache) + size);
650 if (!cache) 650 if (!cache)
651 return NULL; 651 return NULL;
652 652
diff --git a/drivers/ieee1394/dma.c b/drivers/ieee1394/dma.c
index b79ddb43e746..9fb2769d9abc 100644
--- a/drivers/ieee1394/dma.c
+++ b/drivers/ieee1394/dma.c
@@ -23,7 +23,8 @@ void dma_prog_region_init(struct dma_prog_region *prog)
23 prog->bus_addr = 0; 23 prog->bus_addr = 0;
24} 24}
25 25
26int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes, struct pci_dev *dev) 26int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes,
27 struct pci_dev *dev)
27{ 28{
28 /* round up to page size */ 29 /* round up to page size */
29 n_bytes = PAGE_ALIGN(n_bytes); 30 n_bytes = PAGE_ALIGN(n_bytes);
@@ -32,7 +33,8 @@ int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes,
32 33
33 prog->kvirt = pci_alloc_consistent(dev, n_bytes, &prog->bus_addr); 34 prog->kvirt = pci_alloc_consistent(dev, n_bytes, &prog->bus_addr);
34 if (!prog->kvirt) { 35 if (!prog->kvirt) {
35 printk(KERN_ERR "dma_prog_region_alloc: pci_alloc_consistent() failed\n"); 36 printk(KERN_ERR
37 "dma_prog_region_alloc: pci_alloc_consistent() failed\n");
36 dma_prog_region_free(prog); 38 dma_prog_region_free(prog);
37 return -ENOMEM; 39 return -ENOMEM;
38 } 40 }
@@ -45,7 +47,8 @@ int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes,
45void dma_prog_region_free(struct dma_prog_region *prog) 47void dma_prog_region_free(struct dma_prog_region *prog)
46{ 48{
47 if (prog->kvirt) { 49 if (prog->kvirt) {
48 pci_free_consistent(prog->dev, prog->n_pages << PAGE_SHIFT, prog->kvirt, prog->bus_addr); 50 pci_free_consistent(prog->dev, prog->n_pages << PAGE_SHIFT,
51 prog->kvirt, prog->bus_addr);
49 } 52 }
50 53
51 prog->kvirt = NULL; 54 prog->kvirt = NULL;
@@ -65,7 +68,8 @@ void dma_region_init(struct dma_region *dma)
65 dma->sglist = NULL; 68 dma->sglist = NULL;
66} 69}
67 70
68int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, struct pci_dev *dev, int direction) 71int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes,
72 struct pci_dev *dev, int direction)
69{ 73{
70 unsigned int i; 74 unsigned int i;
71 75
@@ -95,14 +99,16 @@ int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, struct pci_d
95 99
96 /* fill scatter/gather list with pages */ 100 /* fill scatter/gather list with pages */
97 for (i = 0; i < dma->n_pages; i++) { 101 for (i = 0; i < dma->n_pages; i++) {
98 unsigned long va = (unsigned long) dma->kvirt + (i << PAGE_SHIFT); 102 unsigned long va =
103 (unsigned long)dma->kvirt + (i << PAGE_SHIFT);
99 104
100 dma->sglist[i].page = vmalloc_to_page((void *)va); 105 dma->sglist[i].page = vmalloc_to_page((void *)va);
101 dma->sglist[i].length = PAGE_SIZE; 106 dma->sglist[i].length = PAGE_SIZE;
102 } 107 }
103 108
104 /* map sglist to the IOMMU */ 109 /* map sglist to the IOMMU */
105 dma->n_dma_pages = pci_map_sg(dev, dma->sglist, dma->n_pages, direction); 110 dma->n_dma_pages =
111 pci_map_sg(dev, dma->sglist, dma->n_pages, direction);
106 112
107 if (dma->n_dma_pages == 0) { 113 if (dma->n_dma_pages == 0) {
108 printk(KERN_ERR "dma_region_alloc: pci_map_sg() failed\n"); 114 printk(KERN_ERR "dma_region_alloc: pci_map_sg() failed\n");
@@ -114,7 +120,7 @@ int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, struct pci_d
114 120
115 return 0; 121 return 0;
116 122
117err: 123 err:
118 dma_region_free(dma); 124 dma_region_free(dma);
119 return -ENOMEM; 125 return -ENOMEM;
120} 126}
@@ -122,7 +128,8 @@ err:
122void dma_region_free(struct dma_region *dma) 128void dma_region_free(struct dma_region *dma)
123{ 129{
124 if (dma->n_dma_pages) { 130 if (dma->n_dma_pages) {
125 pci_unmap_sg(dma->dev, dma->sglist, dma->n_pages, dma->direction); 131 pci_unmap_sg(dma->dev, dma->sglist, dma->n_pages,
132 dma->direction);
126 dma->n_dma_pages = 0; 133 dma->n_dma_pages = 0;
127 dma->dev = NULL; 134 dma->dev = NULL;
128 } 135 }
@@ -137,7 +144,8 @@ void dma_region_free(struct dma_region *dma)
137 144
138/* find the scatterlist index and remaining offset corresponding to a 145/* find the scatterlist index and remaining offset corresponding to a
139 given offset from the beginning of the buffer */ 146 given offset from the beginning of the buffer */
140static inline int dma_region_find(struct dma_region *dma, unsigned long offset, unsigned long *rem) 147static inline int dma_region_find(struct dma_region *dma, unsigned long offset,
148 unsigned long *rem)
141{ 149{
142 int i; 150 int i;
143 unsigned long off = offset; 151 unsigned long off = offset;
@@ -156,15 +164,18 @@ static inline int dma_region_find(struct dma_region *dma, unsigned long offset,
156 return i; 164 return i;
157} 165}
158 166
159dma_addr_t dma_region_offset_to_bus(struct dma_region *dma, unsigned long offset) 167dma_addr_t dma_region_offset_to_bus(struct dma_region * dma,
168 unsigned long offset)
160{ 169{
161 unsigned long rem = 0; 170 unsigned long rem = 0;
162 171
163 struct scatterlist *sg = &dma->sglist[dma_region_find(dma, offset, &rem)]; 172 struct scatterlist *sg =
173 &dma->sglist[dma_region_find(dma, offset, &rem)];
164 return sg_dma_address(sg) + rem; 174 return sg_dma_address(sg) + rem;
165} 175}
166 176
167void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset, unsigned long len) 177void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset,
178 unsigned long len)
168{ 179{
169 int first, last; 180 int first, last;
170 unsigned long rem; 181 unsigned long rem;
@@ -175,10 +186,12 @@ void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset, unsig
175 first = dma_region_find(dma, offset, &rem); 186 first = dma_region_find(dma, offset, &rem);
176 last = dma_region_find(dma, offset + len - 1, &rem); 187 last = dma_region_find(dma, offset + len - 1, &rem);
177 188
178 pci_dma_sync_sg_for_cpu(dma->dev, &dma->sglist[first], last - first + 1, dma->direction); 189 pci_dma_sync_sg_for_cpu(dma->dev, &dma->sglist[first], last - first + 1,
190 dma->direction);
179} 191}
180 192
181void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset, unsigned long len) 193void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset,
194 unsigned long len)
182{ 195{
183 int first, last; 196 int first, last;
184 unsigned long rem; 197 unsigned long rem;
@@ -189,44 +202,47 @@ void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset, un
189 first = dma_region_find(dma, offset, &rem); 202 first = dma_region_find(dma, offset, &rem);
190 last = dma_region_find(dma, offset + len - 1, &rem); 203 last = dma_region_find(dma, offset + len - 1, &rem);
191 204
192 pci_dma_sync_sg_for_device(dma->dev, &dma->sglist[first], last - first + 1, dma->direction); 205 pci_dma_sync_sg_for_device(dma->dev, &dma->sglist[first],
206 last - first + 1, dma->direction);
193} 207}
194 208
195#ifdef CONFIG_MMU 209#ifdef CONFIG_MMU
196 210
197/* nopage() handler for mmap access */ 211/* nopage() handler for mmap access */
198 212
199static struct page* 213static struct page *dma_region_pagefault(struct vm_area_struct *area,
200dma_region_pagefault(struct vm_area_struct *area, unsigned long address, int *type) 214 unsigned long address, int *type)
201{ 215{
202 unsigned long offset; 216 unsigned long offset;
203 unsigned long kernel_virt_addr; 217 unsigned long kernel_virt_addr;
204 struct page *ret = NOPAGE_SIGBUS; 218 struct page *ret = NOPAGE_SIGBUS;
205 219
206 struct dma_region *dma = (struct dma_region*) area->vm_private_data; 220 struct dma_region *dma = (struct dma_region *)area->vm_private_data;
207 221
208 if (!dma->kvirt) 222 if (!dma->kvirt)
209 goto out; 223 goto out;
210 224
211 if ( (address < (unsigned long) area->vm_start) || 225 if ((address < (unsigned long)area->vm_start) ||
212 (address > (unsigned long) area->vm_start + (dma->n_pages << PAGE_SHIFT)) ) 226 (address >
227 (unsigned long)area->vm_start + (dma->n_pages << PAGE_SHIFT)))
213 goto out; 228 goto out;
214 229
215 if (type) 230 if (type)
216 *type = VM_FAULT_MINOR; 231 *type = VM_FAULT_MINOR;
217 offset = address - area->vm_start; 232 offset = address - area->vm_start;
218 kernel_virt_addr = (unsigned long) dma->kvirt + offset; 233 kernel_virt_addr = (unsigned long)dma->kvirt + offset;
219 ret = vmalloc_to_page((void*) kernel_virt_addr); 234 ret = vmalloc_to_page((void *)kernel_virt_addr);
220 get_page(ret); 235 get_page(ret);
221out: 236 out:
222 return ret; 237 return ret;
223} 238}
224 239
225static struct vm_operations_struct dma_region_vm_ops = { 240static struct vm_operations_struct dma_region_vm_ops = {
226 .nopage = dma_region_pagefault, 241 .nopage = dma_region_pagefault,
227}; 242};
228 243
229int dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_struct *vma) 244int dma_region_mmap(struct dma_region *dma, struct file *file,
245 struct vm_area_struct *vma)
230{ 246{
231 unsigned long size; 247 unsigned long size;
232 248
@@ -250,11 +266,12 @@ int dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_st
250 return 0; 266 return 0;
251} 267}
252 268
253#else /* CONFIG_MMU */ 269#else /* CONFIG_MMU */
254 270
255int dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_struct *vma) 271int dma_region_mmap(struct dma_region *dma, struct file *file,
272 struct vm_area_struct *vma)
256{ 273{
257 return -EINVAL; 274 return -EINVAL;
258} 275}
259 276
260#endif /* CONFIG_MMU */ 277#endif /* CONFIG_MMU */
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
index cbbbe14b8849..196db7439272 100644
--- a/drivers/ieee1394/dv1394.c
+++ b/drivers/ieee1394/dv1394.c
@@ -123,15 +123,6 @@
123 123
124#include "ohci1394.h" 124#include "ohci1394.h"
125 125
126#ifndef virt_to_page
127#define virt_to_page(x) MAP_NR(x)
128#endif
129
130#ifndef vmalloc_32
131#define vmalloc_32(x) vmalloc(x)
132#endif
133
134
135/* DEBUG LEVELS: 126/* DEBUG LEVELS:
136 0 - no debugging messages 127 0 - no debugging messages
137 1 - some debugging messages, but none during DMA frame transmission 128 1 - some debugging messages, but none during DMA frame transmission
@@ -2218,14 +2209,12 @@ static int dv1394_init(struct ti_ohci *ohci, enum pal_or_ntsc format, enum modes
2218 unsigned long flags; 2209 unsigned long flags;
2219 int i; 2210 int i;
2220 2211
2221 video = kmalloc(sizeof(struct video_card), GFP_KERNEL); 2212 video = kzalloc(sizeof(*video), GFP_KERNEL);
2222 if (!video) { 2213 if (!video) {
2223 printk(KERN_ERR "dv1394: cannot allocate video_card\n"); 2214 printk(KERN_ERR "dv1394: cannot allocate video_card\n");
2224 goto err; 2215 goto err;
2225 } 2216 }
2226 2217
2227 memset(video, 0, sizeof(struct video_card));
2228
2229 video->ohci = ohci; 2218 video->ohci = ohci;
2230 /* lower 2 bits of id indicate which of four "plugs" 2219 /* lower 2 bits of id indicate which of four "plugs"
2231 per host */ 2220 per host */
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
index c9e92d85c893..30fa0d43a43a 100644
--- a/drivers/ieee1394/eth1394.c
+++ b/drivers/ieee1394/eth1394.c
@@ -88,9 +88,6 @@
88 printk(KERN_ERR "%s:%s[%d]: " fmt "\n", driver_name, __FUNCTION__, __LINE__, ## args) 88 printk(KERN_ERR "%s:%s[%d]: " fmt "\n", driver_name, __FUNCTION__, __LINE__, ## args)
89#define TRACE() printk(KERN_ERR "%s:%s[%d] ---- TRACE\n", driver_name, __FUNCTION__, __LINE__) 89#define TRACE() printk(KERN_ERR "%s:%s[%d] ---- TRACE\n", driver_name, __FUNCTION__, __LINE__)
90 90
91static char version[] __devinitdata =
92 "$Rev: 1312 $ Ben Collins <bcollins@debian.org>";
93
94struct fragment_info { 91struct fragment_info {
95 struct list_head list; 92 struct list_head list;
96 int offset; 93 int offset;
@@ -355,12 +352,12 @@ static int eth1394_probe(struct device *dev)
355 if (!hi) 352 if (!hi)
356 return -ENOENT; 353 return -ENOENT;
357 354
358 new_node = kmalloc(sizeof(struct eth1394_node_ref), 355 new_node = kmalloc(sizeof(*new_node),
359 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); 356 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
360 if (!new_node) 357 if (!new_node)
361 return -ENOMEM; 358 return -ENOMEM;
362 359
363 node_info = kmalloc(sizeof(struct eth1394_node_info), 360 node_info = kmalloc(sizeof(*node_info),
364 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); 361 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
365 if (!node_info) { 362 if (!node_info) {
366 kfree(new_node); 363 kfree(new_node);
@@ -436,12 +433,12 @@ static int eth1394_update(struct unit_directory *ud)
436 node = eth1394_find_node(&priv->ip_node_list, ud); 433 node = eth1394_find_node(&priv->ip_node_list, ud);
437 434
438 if (!node) { 435 if (!node) {
439 node = kmalloc(sizeof(struct eth1394_node_ref), 436 node = kmalloc(sizeof(*node),
440 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); 437 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
441 if (!node) 438 if (!node)
442 return -ENOMEM; 439 return -ENOMEM;
443 440
444 node_info = kmalloc(sizeof(struct eth1394_node_info), 441 node_info = kmalloc(sizeof(*node_info),
445 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); 442 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
446 if (!node_info) { 443 if (!node_info) {
447 kfree(node); 444 kfree(node);
@@ -566,7 +563,6 @@ static void ether1394_add_host (struct hpsb_host *host)
566 struct eth1394_host_info *hi = NULL; 563 struct eth1394_host_info *hi = NULL;
567 struct net_device *dev = NULL; 564 struct net_device *dev = NULL;
568 struct eth1394_priv *priv; 565 struct eth1394_priv *priv;
569 static int version_printed = 0;
570 u64 fifo_addr; 566 u64 fifo_addr;
571 567
572 if (!(host->config_roms & HPSB_CONFIG_ROM_ENTRY_IP1394)) 568 if (!(host->config_roms & HPSB_CONFIG_ROM_ENTRY_IP1394))
@@ -581,9 +577,6 @@ static void ether1394_add_host (struct hpsb_host *host)
581 if (fifo_addr == ~0ULL) 577 if (fifo_addr == ~0ULL)
582 goto out; 578 goto out;
583 579
584 if (version_printed++ == 0)
585 ETH1394_PRINT_G (KERN_INFO, "%s\n", version);
586
587 /* We should really have our own alloc_hpsbdev() function in 580 /* We should really have our own alloc_hpsbdev() function in
588 * net_init.c instead of calling the one for ethernet then hijacking 581 * net_init.c instead of calling the one for ethernet then hijacking
589 * it for ourselves. That way we'd be a real networking device. */ 582 * it for ourselves. That way we'd be a real networking device. */
@@ -1021,7 +1014,7 @@ static inline int new_fragment(struct list_head *frag_info, int offset, int len)
1021 } 1014 }
1022 } 1015 }
1023 1016
1024 new = kmalloc(sizeof(struct fragment_info), GFP_ATOMIC); 1017 new = kmalloc(sizeof(*new), GFP_ATOMIC);
1025 if (!new) 1018 if (!new)
1026 return -ENOMEM; 1019 return -ENOMEM;
1027 1020
@@ -1040,7 +1033,7 @@ static inline int new_partial_datagram(struct net_device *dev,
1040{ 1033{
1041 struct partial_datagram *new; 1034 struct partial_datagram *new;
1042 1035
1043 new = kmalloc(sizeof(struct partial_datagram), GFP_ATOMIC); 1036 new = kmalloc(sizeof(*new), GFP_ATOMIC);
1044 if (!new) 1037 if (!new)
1045 return -ENOMEM; 1038 return -ENOMEM;
1046 1039
@@ -1768,7 +1761,6 @@ fail:
1768static void ether1394_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 1761static void ether1394_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1769{ 1762{
1770 strcpy (info->driver, driver_name); 1763 strcpy (info->driver, driver_name);
1771 strcpy (info->version, "$Rev: 1312 $");
1772 /* FIXME XXX provide sane businfo */ 1764 /* FIXME XXX provide sane businfo */
1773 strcpy (info->bus_info, "ieee1394"); 1765 strcpy (info->bus_info, "ieee1394");
1774} 1766}
diff --git a/drivers/ieee1394/highlevel.c b/drivers/ieee1394/highlevel.c
index 997e1bf6297f..734b121a0554 100644
--- a/drivers/ieee1394/highlevel.c
+++ b/drivers/ieee1394/highlevel.c
@@ -101,12 +101,10 @@ void *hpsb_create_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host,
101 return NULL; 101 return NULL;
102 } 102 }
103 103
104 hi = kmalloc(sizeof(*hi) + data_size, GFP_ATOMIC); 104 hi = kzalloc(sizeof(*hi) + data_size, GFP_ATOMIC);
105 if (!hi) 105 if (!hi)
106 return NULL; 106 return NULL;
107 107
108 memset(hi, 0, sizeof(*hi) + data_size);
109
110 if (data_size) { 108 if (data_size) {
111 data = hi->data = hi + 1; 109 data = hi->data = hi + 1;
112 hi->size = data_size; 110 hi->size = data_size;
@@ -326,11 +324,9 @@ u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl,
326 return retval; 324 return retval;
327 } 325 }
328 326
329 as = (struct hpsb_address_serve *) 327 as = kmalloc(sizeof(*as), GFP_KERNEL);
330 kmalloc(sizeof(struct hpsb_address_serve), GFP_KERNEL); 328 if (!as)
331 if (as == NULL) {
332 return retval; 329 return retval;
333 }
334 330
335 INIT_LIST_HEAD(&as->host_list); 331 INIT_LIST_HEAD(&as->host_list);
336 INIT_LIST_HEAD(&as->hl_list); 332 INIT_LIST_HEAD(&as->hl_list);
@@ -383,11 +379,9 @@ int hpsb_register_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
383 return 0; 379 return 0;
384 } 380 }
385 381
386 as = (struct hpsb_address_serve *) 382 as = kmalloc(sizeof(*as), GFP_ATOMIC);
387 kmalloc(sizeof(struct hpsb_address_serve), GFP_ATOMIC); 383 if (!as)
388 if (as == NULL) { 384 return 0;
389 return 0;
390 }
391 385
392 INIT_LIST_HEAD(&as->host_list); 386 INIT_LIST_HEAD(&as->host_list);
393 INIT_LIST_HEAD(&as->hl_list); 387 INIT_LIST_HEAD(&as->hl_list);
diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
index aeeaeb670d03..ba09741fc826 100644
--- a/drivers/ieee1394/hosts.c
+++ b/drivers/ieee1394/hosts.c
@@ -61,12 +61,12 @@ static void delayed_reset_bus(void * __reset_info)
61 61
62static int dummy_transmit_packet(struct hpsb_host *h, struct hpsb_packet *p) 62static int dummy_transmit_packet(struct hpsb_host *h, struct hpsb_packet *p)
63{ 63{
64 return 0; 64 return 0;
65} 65}
66 66
67static int dummy_devctl(struct hpsb_host *h, enum devctl_cmd c, int arg) 67static int dummy_devctl(struct hpsb_host *h, enum devctl_cmd c, int arg)
68{ 68{
69 return -1; 69 return -1;
70} 70}
71 71
72static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command, unsigned long arg) 72static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command, unsigned long arg)
@@ -75,9 +75,9 @@ static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command, unsigned
75} 75}
76 76
77static struct hpsb_host_driver dummy_driver = { 77static struct hpsb_host_driver dummy_driver = {
78 .transmit_packet = dummy_transmit_packet, 78 .transmit_packet = dummy_transmit_packet,
79 .devctl = dummy_devctl, 79 .devctl = dummy_devctl,
80 .isoctl = dummy_isoctl 80 .isoctl = dummy_isoctl
81}; 81};
82 82
83static int alloc_hostnum_cb(struct hpsb_host *host, void *__data) 83static int alloc_hostnum_cb(struct hpsb_host *host, void *__data)
@@ -110,13 +110,13 @@ static DECLARE_MUTEX(host_num_alloc);
110struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra, 110struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
111 struct device *dev) 111 struct device *dev)
112{ 112{
113 struct hpsb_host *h; 113 struct hpsb_host *h;
114 int i; 114 int i;
115 int hostnum = 0; 115 int hostnum = 0;
116 116
117 h = kmalloc(sizeof(struct hpsb_host) + extra, SLAB_KERNEL); 117 h = kzalloc(sizeof(*h) + extra, SLAB_KERNEL);
118 if (!h) return NULL; 118 if (!h)
119 memset(h, 0, sizeof(struct hpsb_host) + extra); 119 return NULL;
120 120
121 h->csr.rom = csr1212_create_csr(&csr_bus_ops, CSR_BUS_INFO_SIZE, h); 121 h->csr.rom = csr1212_create_csr(&csr_bus_ops, CSR_BUS_INFO_SIZE, h);
122 if (!h->csr.rom) { 122 if (!h->csr.rom) {
@@ -125,7 +125,7 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
125 } 125 }
126 126
127 h->hostdata = h + 1; 127 h->hostdata = h + 1;
128 h->driver = drv; 128 h->driver = drv;
129 129
130 skb_queue_head_init(&h->pending_packet_queue); 130 skb_queue_head_init(&h->pending_packet_queue);
131 INIT_LIST_HEAD(&h->addr_space); 131 INIT_LIST_HEAD(&h->addr_space);
@@ -145,8 +145,8 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra,
145 h->timeout.function = abort_timedouts; 145 h->timeout.function = abort_timedouts;
146 h->timeout_interval = HZ / 20; // 50ms by default 146 h->timeout_interval = HZ / 20; // 50ms by default
147 147
148 h->topology_map = h->csr.topology_map + 3; 148 h->topology_map = h->csr.topology_map + 3;
149 h->speed_map = (u8 *)(h->csr.speed_map + 2); 149 h->speed_map = (u8 *)(h->csr.speed_map + 2);
150 150
151 down(&host_num_alloc); 151 down(&host_num_alloc);
152 152
@@ -186,14 +186,14 @@ int hpsb_add_host(struct hpsb_host *host)
186 186
187void hpsb_remove_host(struct hpsb_host *host) 187void hpsb_remove_host(struct hpsb_host *host)
188{ 188{
189 host->is_shutdown = 1; 189 host->is_shutdown = 1;
190 190
191 cancel_delayed_work(&host->delayed_reset); 191 cancel_delayed_work(&host->delayed_reset);
192 flush_scheduled_work(); 192 flush_scheduled_work();
193 193
194 host->driver = &dummy_driver; 194 host->driver = &dummy_driver;
195 195
196 highlevel_remove_host(host); 196 highlevel_remove_host(host);
197 197
198 hpsb_remove_extra_config_roms(host); 198 hpsb_remove_extra_config_roms(host);
199 199
diff --git a/drivers/ieee1394/hosts.h b/drivers/ieee1394/hosts.h
index ae9b02cc013f..07d188ca8495 100644
--- a/drivers/ieee1394/hosts.h
+++ b/drivers/ieee1394/hosts.h
@@ -17,47 +17,47 @@ struct hpsb_packet;
17struct hpsb_iso; 17struct hpsb_iso;
18 18
19struct hpsb_host { 19struct hpsb_host {
20 struct list_head host_list; 20 struct list_head host_list;
21 21
22 void *hostdata; 22 void *hostdata;
23 23
24 atomic_t generation; 24 atomic_t generation;
25 25
26 struct sk_buff_head pending_packet_queue; 26 struct sk_buff_head pending_packet_queue;
27 27
28 struct timer_list timeout; 28 struct timer_list timeout;
29 unsigned long timeout_interval; 29 unsigned long timeout_interval;
30 30
31 unsigned char iso_listen_count[64]; 31 unsigned char iso_listen_count[64];
32 32
33 int node_count; /* number of identified nodes on this bus */ 33 int node_count; /* number of identified nodes on this bus */
34 int selfid_count; /* total number of SelfIDs received */ 34 int selfid_count; /* total number of SelfIDs received */
35 int nodes_active; /* number of nodes that are actually active */ 35 int nodes_active; /* number of nodes that are actually active */
36 36
37 nodeid_t node_id; /* node ID of this host */ 37 nodeid_t node_id; /* node ID of this host */
38 nodeid_t irm_id; /* ID of this bus' isochronous resource manager */ 38 nodeid_t irm_id; /* ID of this bus' isochronous resource manager */
39 nodeid_t busmgr_id; /* ID of this bus' bus manager */ 39 nodeid_t busmgr_id; /* ID of this bus' bus manager */
40 40
41 /* this nodes state */ 41 /* this nodes state */
42 unsigned in_bus_reset:1; 42 unsigned in_bus_reset:1;
43 unsigned is_shutdown:1; 43 unsigned is_shutdown:1;
44 unsigned resume_packet_sent:1; 44 unsigned resume_packet_sent:1;
45 45
46 /* this nodes' duties on the bus */ 46 /* this nodes' duties on the bus */
47 unsigned is_root:1; 47 unsigned is_root:1;
48 unsigned is_cycmst:1; 48 unsigned is_cycmst:1;
49 unsigned is_irm:1; 49 unsigned is_irm:1;
50 unsigned is_busmgr:1; 50 unsigned is_busmgr:1;
51 51
52 int reset_retries; 52 int reset_retries;
53 quadlet_t *topology_map; 53 quadlet_t *topology_map;
54 u8 *speed_map; 54 u8 *speed_map;
55 struct csr_control csr; 55 struct csr_control csr;
56 56
57 /* Per node tlabel pool allocation */ 57 /* Per node tlabel pool allocation */
58 struct hpsb_tlabel_pool tpool[64]; 58 struct hpsb_tlabel_pool tpool[64];
59 59
60 struct hpsb_host_driver *driver; 60 struct hpsb_host_driver *driver;
61 61
62 struct pci_dev *pdev; 62 struct pci_dev *pdev;
63 63
@@ -77,34 +77,34 @@ struct hpsb_host {
77 77
78 78
79enum devctl_cmd { 79enum devctl_cmd {
80 /* Host is requested to reset its bus and cancel all outstanding async 80 /* Host is requested to reset its bus and cancel all outstanding async
81 * requests. If arg == 1, it shall also attempt to become root on the 81 * requests. If arg == 1, it shall also attempt to become root on the
82 * bus. Return void. */ 82 * bus. Return void. */
83 RESET_BUS, 83 RESET_BUS,
84 84
85 /* Arg is void, return value is the hardware cycle counter value. */ 85 /* Arg is void, return value is the hardware cycle counter value. */
86 GET_CYCLE_COUNTER, 86 GET_CYCLE_COUNTER,
87 87
88 /* Set the hardware cycle counter to the value in arg, return void. 88 /* Set the hardware cycle counter to the value in arg, return void.
89 * FIXME - setting is probably not required. */ 89 * FIXME - setting is probably not required. */
90 SET_CYCLE_COUNTER, 90 SET_CYCLE_COUNTER,
91 91
92 /* Configure hardware for new bus ID in arg, return void. */ 92 /* Configure hardware for new bus ID in arg, return void. */
93 SET_BUS_ID, 93 SET_BUS_ID,
94 94
95 /* If arg true, start sending cycle start packets, stop if arg == 0. 95 /* If arg true, start sending cycle start packets, stop if arg == 0.
96 * Return void. */ 96 * Return void. */
97 ACT_CYCLE_MASTER, 97 ACT_CYCLE_MASTER,
98 98
99 /* Cancel all outstanding async requests without resetting the bus. 99 /* Cancel all outstanding async requests without resetting the bus.
100 * Return void. */ 100 * Return void. */
101 CANCEL_REQUESTS, 101 CANCEL_REQUESTS,
102 102
103 /* Start or stop receiving isochronous channel in arg. Return void. 103 /* Start or stop receiving isochronous channel in arg. Return void.
104 * This acts as an optimization hint, hosts are not required not to 104 * This acts as an optimization hint, hosts are not required not to
105 * listen on unrequested channels. */ 105 * listen on unrequested channels. */
106 ISO_LISTEN_CHANNEL, 106 ISO_LISTEN_CHANNEL,
107 ISO_UNLISTEN_CHANNEL 107 ISO_UNLISTEN_CHANNEL
108}; 108};
109 109
110enum isoctl_cmd { 110enum isoctl_cmd {
@@ -135,13 +135,13 @@ enum isoctl_cmd {
135}; 135};
136 136
137enum reset_types { 137enum reset_types {
138 /* 166 microsecond reset -- only type of reset available on 138 /* 166 microsecond reset -- only type of reset available on
139 non-1394a capable controllers */ 139 non-1394a capable controllers */
140 LONG_RESET, 140 LONG_RESET,
141 141
142 /* Short (arbitrated) reset -- only available on 1394a capable 142 /* Short (arbitrated) reset -- only available on 1394a capable
143 controllers */ 143 controllers */
144 SHORT_RESET, 144 SHORT_RESET,
145 145
146 /* Variants that set force_root before issueing the bus reset */ 146 /* Variants that set force_root before issueing the bus reset */
147 LONG_RESET_FORCE_ROOT, SHORT_RESET_FORCE_ROOT, 147 LONG_RESET_FORCE_ROOT, SHORT_RESET_FORCE_ROOT,
@@ -159,22 +159,22 @@ struct hpsb_host_driver {
159 * reads to the ConfigROM on its own. */ 159 * reads to the ConfigROM on its own. */
160 void (*set_hw_config_rom) (struct hpsb_host *host, quadlet_t *config_rom); 160 void (*set_hw_config_rom) (struct hpsb_host *host, quadlet_t *config_rom);
161 161
162 /* This function shall implement packet transmission based on 162 /* This function shall implement packet transmission based on
163 * packet->type. It shall CRC both parts of the packet (unless 163 * packet->type. It shall CRC both parts of the packet (unless
164 * packet->type == raw) and do byte-swapping as necessary or instruct 164 * packet->type == raw) and do byte-swapping as necessary or instruct
165 * the hardware to do so. It can return immediately after the packet 165 * the hardware to do so. It can return immediately after the packet
166 * was queued for sending. After sending, hpsb_sent_packet() has to be 166 * was queued for sending. After sending, hpsb_sent_packet() has to be
167 * called. Return 0 on success, negative errno on failure. 167 * called. Return 0 on success, negative errno on failure.
168 * NOTE: The function must be callable in interrupt context. 168 * NOTE: The function must be callable in interrupt context.
169 */ 169 */
170 int (*transmit_packet) (struct hpsb_host *host, 170 int (*transmit_packet) (struct hpsb_host *host,
171 struct hpsb_packet *packet); 171 struct hpsb_packet *packet);
172 172
173 /* This function requests miscellanous services from the driver, see 173 /* This function requests miscellanous services from the driver, see
174 * above for command codes and expected actions. Return -1 for unknown 174 * above for command codes and expected actions. Return -1 for unknown
175 * command, though that should never happen. 175 * command, though that should never happen.
176 */ 176 */
177 int (*devctl) (struct hpsb_host *host, enum devctl_cmd command, int arg); 177 int (*devctl) (struct hpsb_host *host, enum devctl_cmd command, int arg);
178 178
179 /* ISO transmission/reception functions. Return 0 on success, -1 179 /* ISO transmission/reception functions. Return 0 on success, -1
180 * (or -EXXX errno code) on failure. If the low-level driver does not 180 * (or -EXXX errno code) on failure. If the low-level driver does not
@@ -182,15 +182,15 @@ struct hpsb_host_driver {
182 */ 182 */
183 int (*isoctl) (struct hpsb_iso *iso, enum isoctl_cmd command, unsigned long arg); 183 int (*isoctl) (struct hpsb_iso *iso, enum isoctl_cmd command, unsigned long arg);
184 184
185 /* This function is mainly to redirect local CSR reads/locks to the iso 185 /* This function is mainly to redirect local CSR reads/locks to the iso
186 * management registers (bus manager id, bandwidth available, channels 186 * management registers (bus manager id, bandwidth available, channels
187 * available) to the hardware registers in OHCI. reg is 0,1,2,3 for bus 187 * available) to the hardware registers in OHCI. reg is 0,1,2,3 for bus
188 * mgr, bwdth avail, ch avail hi, ch avail lo respectively (the same ids 188 * mgr, bwdth avail, ch avail hi, ch avail lo respectively (the same ids
189 * as OHCI uses). data and compare are the new data and expected data 189 * as OHCI uses). data and compare are the new data and expected data
190 * respectively, return value is the old value. 190 * respectively, return value is the old value.
191 */ 191 */
192 quadlet_t (*hw_csr_reg) (struct hpsb_host *host, int reg, 192 quadlet_t (*hw_csr_reg) (struct hpsb_host *host, int reg,
193 quadlet_t data, quadlet_t compare); 193 quadlet_t data, quadlet_t compare);
194}; 194};
195 195
196 196
diff --git a/drivers/ieee1394/ieee1394-ioctl.h b/drivers/ieee1394/ieee1394-ioctl.h
index f92b566363d5..156703986348 100644
--- a/drivers/ieee1394/ieee1394-ioctl.h
+++ b/drivers/ieee1394/ieee1394-ioctl.h
@@ -7,14 +7,6 @@
7#include <linux/ioctl.h> 7#include <linux/ioctl.h>
8#include <linux/types.h> 8#include <linux/types.h>
9 9
10
11/* AMDTP Gets 6 */
12#define AMDTP_IOC_CHANNEL _IOW('#', 0x00, struct amdtp_ioctl)
13#define AMDTP_IOC_PLUG _IOW('#', 0x01, struct amdtp_ioctl)
14#define AMDTP_IOC_PING _IOW('#', 0x02, struct amdtp_ioctl)
15#define AMDTP_IOC_ZAP _IO ('#', 0x03)
16
17
18/* DV1394 Gets 10 */ 10/* DV1394 Gets 10 */
19 11
20/* Get the driver ready to transmit video. pass a struct dv1394_init* as 12/* Get the driver ready to transmit video. pass a struct dv1394_init* as
diff --git a/drivers/ieee1394/ieee1394.h b/drivers/ieee1394/ieee1394.h
index b634a9bb365c..936d776de00a 100644
--- a/drivers/ieee1394/ieee1394.h
+++ b/drivers/ieee1394/ieee1394.h
@@ -62,6 +62,7 @@
62extern const char *hpsb_speedto_str[]; 62extern const char *hpsb_speedto_str[];
63 63
64 64
65/* 1394a cable PHY packets */
65#define SELFID_PWRCL_NO_POWER 0x0 66#define SELFID_PWRCL_NO_POWER 0x0
66#define SELFID_PWRCL_PROVIDE_15W 0x1 67#define SELFID_PWRCL_PROVIDE_15W 0x1
67#define SELFID_PWRCL_PROVIDE_30W 0x2 68#define SELFID_PWRCL_PROVIDE_30W 0x2
@@ -76,8 +77,24 @@ extern const char *hpsb_speedto_str[];
76#define SELFID_PORT_NCONN 0x1 77#define SELFID_PORT_NCONN 0x1
77#define SELFID_PORT_NONE 0x0 78#define SELFID_PORT_NONE 0x0
78 79
80#define PHYPACKET_LINKON 0x40000000
81#define PHYPACKET_PHYCONFIG_R 0x00800000
82#define PHYPACKET_PHYCONFIG_T 0x00400000
83#define EXTPHYPACKET_TYPE_PING 0x00000000
84#define EXTPHYPACKET_TYPE_REMOTEACCESS_BASE 0x00040000
85#define EXTPHYPACKET_TYPE_REMOTEACCESS_PAGED 0x00140000
86#define EXTPHYPACKET_TYPE_REMOTEREPLY_BASE 0x000C0000
87#define EXTPHYPACKET_TYPE_REMOTEREPLY_PAGED 0x001C0000
88#define EXTPHYPACKET_TYPE_REMOTECOMMAND 0x00200000
89#define EXTPHYPACKET_TYPE_REMOTECONFIRMATION 0x00280000
90#define EXTPHYPACKET_TYPE_RESUME 0x003C0000
79 91
80/* 1394a PHY bitmasks */ 92#define EXTPHYPACKET_TYPEMASK 0xC0FC0000
93
94#define PHYPACKET_PORT_SHIFT 24
95#define PHYPACKET_GAPCOUNT_SHIFT 16
96
97/* 1394a PHY register map bitmasks */
81#define PHY_00_PHYSICAL_ID 0xFC 98#define PHY_00_PHYSICAL_ID 0xFC
82#define PHY_00_R 0x02 /* Root */ 99#define PHY_00_R 0x02 /* Root */
83#define PHY_00_PS 0x01 /* Power Status*/ 100#define PHY_00_PS 0x01 /* Power Status*/
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c
index 32a1e016c85e..64fbbb01d52a 100644
--- a/drivers/ieee1394/ieee1394_core.c
+++ b/drivers/ieee1394/ieee1394_core.c
@@ -179,34 +179,34 @@ void hpsb_free_packet(struct hpsb_packet *packet)
179 179
180int hpsb_reset_bus(struct hpsb_host *host, int type) 180int hpsb_reset_bus(struct hpsb_host *host, int type)
181{ 181{
182 if (!host->in_bus_reset) { 182 if (!host->in_bus_reset) {
183 host->driver->devctl(host, RESET_BUS, type); 183 host->driver->devctl(host, RESET_BUS, type);
184 return 0; 184 return 0;
185 } else { 185 } else {
186 return 1; 186 return 1;
187 } 187 }
188} 188}
189 189
190 190
191int hpsb_bus_reset(struct hpsb_host *host) 191int hpsb_bus_reset(struct hpsb_host *host)
192{ 192{
193 if (host->in_bus_reset) { 193 if (host->in_bus_reset) {
194 HPSB_NOTICE("%s called while bus reset already in progress", 194 HPSB_NOTICE("%s called while bus reset already in progress",
195 __FUNCTION__); 195 __FUNCTION__);
196 return 1; 196 return 1;
197 } 197 }
198 198
199 abort_requests(host); 199 abort_requests(host);
200 host->in_bus_reset = 1; 200 host->in_bus_reset = 1;
201 host->irm_id = -1; 201 host->irm_id = -1;
202 host->is_irm = 0; 202 host->is_irm = 0;
203 host->busmgr_id = -1; 203 host->busmgr_id = -1;
204 host->is_busmgr = 0; 204 host->is_busmgr = 0;
205 host->is_cycmst = 0; 205 host->is_cycmst = 0;
206 host->node_count = 0; 206 host->node_count = 0;
207 host->selfid_count = 0; 207 host->selfid_count = 0;
208 208
209 return 0; 209 return 0;
210} 210}
211 211
212 212
@@ -216,150 +216,156 @@ int hpsb_bus_reset(struct hpsb_host *host)
216 */ 216 */
217static int check_selfids(struct hpsb_host *host) 217static int check_selfids(struct hpsb_host *host)
218{ 218{
219 int nodeid = -1; 219 int nodeid = -1;
220 int rest_of_selfids = host->selfid_count; 220 int rest_of_selfids = host->selfid_count;
221 struct selfid *sid = (struct selfid *)host->topology_map; 221 struct selfid *sid = (struct selfid *)host->topology_map;
222 struct ext_selfid *esid; 222 struct ext_selfid *esid;
223 int esid_seq = 23; 223 int esid_seq = 23;
224 224
225 host->nodes_active = 0; 225 host->nodes_active = 0;
226 226
227 while (rest_of_selfids--) { 227 while (rest_of_selfids--) {
228 if (!sid->extended) { 228 if (!sid->extended) {
229 nodeid++; 229 nodeid++;
230 esid_seq = 0; 230 esid_seq = 0;
231 231
232 if (sid->phy_id != nodeid) { 232 if (sid->phy_id != nodeid) {
233 HPSB_INFO("SelfIDs failed monotony check with " 233 HPSB_INFO("SelfIDs failed monotony check with "
234 "%d", sid->phy_id); 234 "%d", sid->phy_id);
235 return 0; 235 return 0;
236 } 236 }
237 237
238 if (sid->link_active) { 238 if (sid->link_active) {
239 host->nodes_active++; 239 host->nodes_active++;
240 if (sid->contender) 240 if (sid->contender)
241 host->irm_id = LOCAL_BUS | sid->phy_id; 241 host->irm_id = LOCAL_BUS | sid->phy_id;
242 } 242 }
243 } else { 243 } else {
244 esid = (struct ext_selfid *)sid; 244 esid = (struct ext_selfid *)sid;
245 245
246 if ((esid->phy_id != nodeid) 246 if ((esid->phy_id != nodeid)
247 || (esid->seq_nr != esid_seq)) { 247 || (esid->seq_nr != esid_seq)) {
248 HPSB_INFO("SelfIDs failed monotony check with " 248 HPSB_INFO("SelfIDs failed monotony check with "
249 "%d/%d", esid->phy_id, esid->seq_nr); 249 "%d/%d", esid->phy_id, esid->seq_nr);
250 return 0; 250 return 0;
251 } 251 }
252 esid_seq++; 252 esid_seq++;
253 } 253 }
254 sid++; 254 sid++;
255 } 255 }
256 256
257 esid = (struct ext_selfid *)(sid - 1); 257 esid = (struct ext_selfid *)(sid - 1);
258 while (esid->extended) { 258 while (esid->extended) {
259 if ((esid->porta == 0x2) || (esid->portb == 0x2) 259 if ((esid->porta == SELFID_PORT_PARENT) ||
260 || (esid->portc == 0x2) || (esid->portd == 0x2) 260 (esid->portb == SELFID_PORT_PARENT) ||
261 || (esid->porte == 0x2) || (esid->portf == 0x2) 261 (esid->portc == SELFID_PORT_PARENT) ||
262 || (esid->portg == 0x2) || (esid->porth == 0x2)) { 262 (esid->portd == SELFID_PORT_PARENT) ||
263 (esid->porte == SELFID_PORT_PARENT) ||
264 (esid->portf == SELFID_PORT_PARENT) ||
265 (esid->portg == SELFID_PORT_PARENT) ||
266 (esid->porth == SELFID_PORT_PARENT)) {
263 HPSB_INFO("SelfIDs failed root check on " 267 HPSB_INFO("SelfIDs failed root check on "
264 "extended SelfID"); 268 "extended SelfID");
265 return 0; 269 return 0;
266 } 270 }
267 esid--; 271 esid--;
268 } 272 }
269 273
270 sid = (struct selfid *)esid; 274 sid = (struct selfid *)esid;
271 if ((sid->port0 == 0x2) || (sid->port1 == 0x2) || (sid->port2 == 0x2)) { 275 if ((sid->port0 == SELFID_PORT_PARENT) ||
276 (sid->port1 == SELFID_PORT_PARENT) ||
277 (sid->port2 == SELFID_PORT_PARENT)) {
272 HPSB_INFO("SelfIDs failed root check"); 278 HPSB_INFO("SelfIDs failed root check");
273 return 0; 279 return 0;
274 } 280 }
275 281
276 host->node_count = nodeid + 1; 282 host->node_count = nodeid + 1;
277 return 1; 283 return 1;
278} 284}
279 285
280static void build_speed_map(struct hpsb_host *host, int nodecount) 286static void build_speed_map(struct hpsb_host *host, int nodecount)
281{ 287{
282 u8 speedcap[nodecount]; 288 u8 speedcap[nodecount];
283 u8 cldcnt[nodecount]; 289 u8 cldcnt[nodecount];
284 u8 *map = host->speed_map; 290 u8 *map = host->speed_map;
285 struct selfid *sid; 291 struct selfid *sid;
286 struct ext_selfid *esid; 292 struct ext_selfid *esid;
287 int i, j, n; 293 int i, j, n;
288 294
289 for (i = 0; i < (nodecount * 64); i += 64) { 295 for (i = 0; i < (nodecount * 64); i += 64) {
290 for (j = 0; j < nodecount; j++) { 296 for (j = 0; j < nodecount; j++) {
291 map[i+j] = IEEE1394_SPEED_MAX; 297 map[i+j] = IEEE1394_SPEED_MAX;
292 } 298 }
293 } 299 }
294 300
295 for (i = 0; i < nodecount; i++) { 301 for (i = 0; i < nodecount; i++) {
296 cldcnt[i] = 0; 302 cldcnt[i] = 0;
297 } 303 }
298 304
299 /* find direct children count and speed */ 305 /* find direct children count and speed */
300 for (sid = (struct selfid *)&host->topology_map[host->selfid_count-1], 306 for (sid = (struct selfid *)&host->topology_map[host->selfid_count-1],
301 n = nodecount - 1; 307 n = nodecount - 1;
302 (void *)sid >= (void *)host->topology_map; sid--) { 308 (void *)sid >= (void *)host->topology_map; sid--) {
303 if (sid->extended) { 309 if (sid->extended) {
304 esid = (struct ext_selfid *)sid; 310 esid = (struct ext_selfid *)sid;
305 311
306 if (esid->porta == 0x3) cldcnt[n]++; 312 if (esid->porta == SELFID_PORT_CHILD) cldcnt[n]++;
307 if (esid->portb == 0x3) cldcnt[n]++; 313 if (esid->portb == SELFID_PORT_CHILD) cldcnt[n]++;
308 if (esid->portc == 0x3) cldcnt[n]++; 314 if (esid->portc == SELFID_PORT_CHILD) cldcnt[n]++;
309 if (esid->portd == 0x3) cldcnt[n]++; 315 if (esid->portd == SELFID_PORT_CHILD) cldcnt[n]++;
310 if (esid->porte == 0x3) cldcnt[n]++; 316 if (esid->porte == SELFID_PORT_CHILD) cldcnt[n]++;
311 if (esid->portf == 0x3) cldcnt[n]++; 317 if (esid->portf == SELFID_PORT_CHILD) cldcnt[n]++;
312 if (esid->portg == 0x3) cldcnt[n]++; 318 if (esid->portg == SELFID_PORT_CHILD) cldcnt[n]++;
313 if (esid->porth == 0x3) cldcnt[n]++; 319 if (esid->porth == SELFID_PORT_CHILD) cldcnt[n]++;
314 } else { 320 } else {
315 if (sid->port0 == 0x3) cldcnt[n]++; 321 if (sid->port0 == SELFID_PORT_CHILD) cldcnt[n]++;
316 if (sid->port1 == 0x3) cldcnt[n]++; 322 if (sid->port1 == SELFID_PORT_CHILD) cldcnt[n]++;
317 if (sid->port2 == 0x3) cldcnt[n]++; 323 if (sid->port2 == SELFID_PORT_CHILD) cldcnt[n]++;
318 324
319 speedcap[n] = sid->speed; 325 speedcap[n] = sid->speed;
320 n--; 326 n--;
321 } 327 }
322 } 328 }
323 329
324 /* set self mapping */ 330 /* set self mapping */
325 for (i = 0; i < nodecount; i++) { 331 for (i = 0; i < nodecount; i++) {
326 map[64*i + i] = speedcap[i]; 332 map[64*i + i] = speedcap[i];
327 } 333 }
328 334
329 /* fix up direct children count to total children count; 335 /* fix up direct children count to total children count;
330 * also fix up speedcaps for sibling and parent communication */ 336 * also fix up speedcaps for sibling and parent communication */
331 for (i = 1; i < nodecount; i++) { 337 for (i = 1; i < nodecount; i++) {
332 for (j = cldcnt[i], n = i - 1; j > 0; j--) { 338 for (j = cldcnt[i], n = i - 1; j > 0; j--) {
333 cldcnt[i] += cldcnt[n]; 339 cldcnt[i] += cldcnt[n];
334 speedcap[n] = min(speedcap[n], speedcap[i]); 340 speedcap[n] = min(speedcap[n], speedcap[i]);
335 n -= cldcnt[n] + 1; 341 n -= cldcnt[n] + 1;
336 } 342 }
337 } 343 }
338 344
339 for (n = 0; n < nodecount; n++) { 345 for (n = 0; n < nodecount; n++) {
340 for (i = n - cldcnt[n]; i <= n; i++) { 346 for (i = n - cldcnt[n]; i <= n; i++) {
341 for (j = 0; j < (n - cldcnt[n]); j++) { 347 for (j = 0; j < (n - cldcnt[n]); j++) {
342 map[j*64 + i] = map[i*64 + j] = 348 map[j*64 + i] = map[i*64 + j] =
343 min(map[i*64 + j], speedcap[n]); 349 min(map[i*64 + j], speedcap[n]);
344 } 350 }
345 for (j = n + 1; j < nodecount; j++) { 351 for (j = n + 1; j < nodecount; j++) {
346 map[j*64 + i] = map[i*64 + j] = 352 map[j*64 + i] = map[i*64 + j] =
347 min(map[i*64 + j], speedcap[n]); 353 min(map[i*64 + j], speedcap[n]);
348 } 354 }
349 } 355 }
350 } 356 }
351} 357}
352 358
353 359
354void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid) 360void hpsb_selfid_received(struct hpsb_host *host, quadlet_t sid)
355{ 361{
356 if (host->in_bus_reset) { 362 if (host->in_bus_reset) {
357 HPSB_VERBOSE("Including SelfID 0x%x", sid); 363 HPSB_VERBOSE("Including SelfID 0x%x", sid);
358 host->topology_map[host->selfid_count++] = sid; 364 host->topology_map[host->selfid_count++] = sid;
359 } else { 365 } else {
360 HPSB_NOTICE("Spurious SelfID packet (0x%08x) received from bus %d", 366 HPSB_NOTICE("Spurious SelfID packet (0x%08x) received from bus %d",
361 sid, NODEID_TO_BUS(host->node_id)); 367 sid, NODEID_TO_BUS(host->node_id));
362 } 368 }
363} 369}
364 370
365void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot) 371void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot)
@@ -367,50 +373,50 @@ void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot)
367 if (!host->in_bus_reset) 373 if (!host->in_bus_reset)
368 HPSB_NOTICE("SelfID completion called outside of bus reset!"); 374 HPSB_NOTICE("SelfID completion called outside of bus reset!");
369 375
370 host->node_id = LOCAL_BUS | phyid; 376 host->node_id = LOCAL_BUS | phyid;
371 host->is_root = isroot; 377 host->is_root = isroot;
372 378
373 if (!check_selfids(host)) { 379 if (!check_selfids(host)) {
374 if (host->reset_retries++ < 20) { 380 if (host->reset_retries++ < 20) {
375 /* selfid stage did not complete without error */ 381 /* selfid stage did not complete without error */
376 HPSB_NOTICE("Error in SelfID stage, resetting"); 382 HPSB_NOTICE("Error in SelfID stage, resetting");
377 host->in_bus_reset = 0; 383 host->in_bus_reset = 0;
378 /* this should work from ohci1394 now... */ 384 /* this should work from ohci1394 now... */
379 hpsb_reset_bus(host, LONG_RESET); 385 hpsb_reset_bus(host, LONG_RESET);
380 return; 386 return;
381 } else { 387 } else {
382 HPSB_NOTICE("Stopping out-of-control reset loop"); 388 HPSB_NOTICE("Stopping out-of-control reset loop");
383 HPSB_NOTICE("Warning - topology map and speed map will not be valid"); 389 HPSB_NOTICE("Warning - topology map and speed map will not be valid");
384 host->reset_retries = 0; 390 host->reset_retries = 0;
385 } 391 }
386 } else { 392 } else {
387 host->reset_retries = 0; 393 host->reset_retries = 0;
388 build_speed_map(host, host->node_count); 394 build_speed_map(host, host->node_count);
389 } 395 }
390 396
391 HPSB_VERBOSE("selfid_complete called with successful SelfID stage " 397 HPSB_VERBOSE("selfid_complete called with successful SelfID stage "
392 "... irm_id: 0x%X node_id: 0x%X",host->irm_id,host->node_id); 398 "... irm_id: 0x%X node_id: 0x%X",host->irm_id,host->node_id);
393 399
394 /* irm_id is kept up to date by check_selfids() */ 400 /* irm_id is kept up to date by check_selfids() */
395 if (host->irm_id == host->node_id) { 401 if (host->irm_id == host->node_id) {
396 host->is_irm = 1; 402 host->is_irm = 1;
397 } else { 403 } else {
398 host->is_busmgr = 0; 404 host->is_busmgr = 0;
399 host->is_irm = 0; 405 host->is_irm = 0;
400 } 406 }
401 407
402 if (isroot) { 408 if (isroot) {
403 host->driver->devctl(host, ACT_CYCLE_MASTER, 1); 409 host->driver->devctl(host, ACT_CYCLE_MASTER, 1);
404 host->is_cycmst = 1; 410 host->is_cycmst = 1;
405 } 411 }
406 atomic_inc(&host->generation); 412 atomic_inc(&host->generation);
407 host->in_bus_reset = 0; 413 host->in_bus_reset = 0;
408 highlevel_host_reset(host); 414 highlevel_host_reset(host);
409} 415}
410 416
411 417
412void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet, 418void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
413 int ackcode) 419 int ackcode)
414{ 420{
415 unsigned long flags; 421 unsigned long flags;
416 422
@@ -457,6 +463,7 @@ void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
457int hpsb_send_phy_config(struct hpsb_host *host, int rootid, int gapcnt) 463int hpsb_send_phy_config(struct hpsb_host *host, int rootid, int gapcnt)
458{ 464{
459 struct hpsb_packet *packet; 465 struct hpsb_packet *packet;
466 quadlet_t d = 0;
460 int retval = 0; 467 int retval = 0;
461 468
462 if (rootid >= ALL_NODES || rootid < -1 || gapcnt > 0x3f || gapcnt < -1 || 469 if (rootid >= ALL_NODES || rootid < -1 || gapcnt > 0x3f || gapcnt < -1 ||
@@ -466,26 +473,16 @@ int hpsb_send_phy_config(struct hpsb_host *host, int rootid, int gapcnt)
466 return -EINVAL; 473 return -EINVAL;
467 } 474 }
468 475
469 packet = hpsb_alloc_packet(0);
470 if (!packet)
471 return -ENOMEM;
472
473 packet->host = host;
474 packet->header_size = 8;
475 packet->data_size = 0;
476 packet->expect_response = 0;
477 packet->no_waiter = 0;
478 packet->type = hpsb_raw;
479 packet->header[0] = 0;
480 if (rootid != -1) 476 if (rootid != -1)
481 packet->header[0] |= rootid << 24 | 1 << 23; 477 d |= PHYPACKET_PHYCONFIG_R | rootid << PHYPACKET_PORT_SHIFT;
482 if (gapcnt != -1) 478 if (gapcnt != -1)
483 packet->header[0] |= gapcnt << 16 | 1 << 22; 479 d |= PHYPACKET_PHYCONFIG_T | gapcnt << PHYPACKET_GAPCOUNT_SHIFT;
484 480
485 packet->header[1] = ~packet->header[0]; 481 packet = hpsb_make_phypacket(host, d);
482 if (!packet)
483 return -ENOMEM;
486 484
487 packet->generation = get_hpsb_generation(host); 485 packet->generation = get_hpsb_generation(host);
488
489 retval = hpsb_send_packet_and_wait(packet); 486 retval = hpsb_send_packet_and_wait(packet);
490 hpsb_free_packet(packet); 487 hpsb_free_packet(packet);
491 488
@@ -510,13 +507,13 @@ int hpsb_send_packet(struct hpsb_packet *packet)
510{ 507{
511 struct hpsb_host *host = packet->host; 508 struct hpsb_host *host = packet->host;
512 509
513 if (host->is_shutdown) 510 if (host->is_shutdown)
514 return -EINVAL; 511 return -EINVAL;
515 if (host->in_bus_reset || 512 if (host->in_bus_reset ||
516 (packet->generation != get_hpsb_generation(host))) 513 (packet->generation != get_hpsb_generation(host)))
517 return -EAGAIN; 514 return -EAGAIN;
518 515
519 packet->state = hpsb_queued; 516 packet->state = hpsb_queued;
520 517
521 /* This just seems silly to me */ 518 /* This just seems silly to me */
522 WARN_ON(packet->no_waiter && packet->expect_response); 519 WARN_ON(packet->no_waiter && packet->expect_response);
@@ -530,42 +527,42 @@ int hpsb_send_packet(struct hpsb_packet *packet)
530 skb_queue_tail(&host->pending_packet_queue, packet->skb); 527 skb_queue_tail(&host->pending_packet_queue, packet->skb);
531 } 528 }
532 529
533 if (packet->node_id == host->node_id) { 530 if (packet->node_id == host->node_id) {
534 /* it is a local request, so handle it locally */ 531 /* it is a local request, so handle it locally */
535 532
536 quadlet_t *data; 533 quadlet_t *data;
537 size_t size = packet->data_size + packet->header_size; 534 size_t size = packet->data_size + packet->header_size;
538 535
539 data = kmalloc(size, GFP_ATOMIC); 536 data = kmalloc(size, GFP_ATOMIC);
540 if (!data) { 537 if (!data) {
541 HPSB_ERR("unable to allocate memory for concatenating header and data"); 538 HPSB_ERR("unable to allocate memory for concatenating header and data");
542 return -ENOMEM; 539 return -ENOMEM;
543 } 540 }
544 541
545 memcpy(data, packet->header, packet->header_size); 542 memcpy(data, packet->header, packet->header_size);
546 543
547 if (packet->data_size) 544 if (packet->data_size)
548 memcpy(((u8*)data) + packet->header_size, packet->data, packet->data_size); 545 memcpy(((u8*)data) + packet->header_size, packet->data, packet->data_size);
549 546
550 dump_packet("send packet local", packet->header, packet->header_size, -1); 547 dump_packet("send packet local", packet->header, packet->header_size, -1);
551 548
552 hpsb_packet_sent(host, packet, packet->expect_response ? ACK_PENDING : ACK_COMPLETE); 549 hpsb_packet_sent(host, packet, packet->expect_response ? ACK_PENDING : ACK_COMPLETE);
553 hpsb_packet_received(host, data, size, 0); 550 hpsb_packet_received(host, data, size, 0);
554 551
555 kfree(data); 552 kfree(data);
556 553
557 return 0; 554 return 0;
558 } 555 }
559 556
560 if (packet->type == hpsb_async && packet->node_id != ALL_NODES) { 557 if (packet->type == hpsb_async && packet->node_id != ALL_NODES) {
561 packet->speed_code = 558 packet->speed_code =
562 host->speed_map[NODEID_TO_NODE(host->node_id) * 64 559 host->speed_map[NODEID_TO_NODE(host->node_id) * 64
563 + NODEID_TO_NODE(packet->node_id)]; 560 + NODEID_TO_NODE(packet->node_id)];
564 } 561 }
565 562
566 dump_packet("send packet", packet->header, packet->header_size, packet->speed_code); 563 dump_packet("send packet", packet->header, packet->header_size, packet->speed_code);
567 564
568 return host->driver->transmit_packet(host, packet); 565 return host->driver->transmit_packet(host, packet);
569} 566}
570 567
571/* We could just use complete() directly as the packet complete 568/* We could just use complete() directly as the packet complete
@@ -593,81 +590,81 @@ int hpsb_send_packet_and_wait(struct hpsb_packet *packet)
593 590
594static void send_packet_nocare(struct hpsb_packet *packet) 591static void send_packet_nocare(struct hpsb_packet *packet)
595{ 592{
596 if (hpsb_send_packet(packet) < 0) { 593 if (hpsb_send_packet(packet) < 0) {
597 hpsb_free_packet(packet); 594 hpsb_free_packet(packet);
598 } 595 }
599} 596}
600 597
601 598
602static void handle_packet_response(struct hpsb_host *host, int tcode, 599static void handle_packet_response(struct hpsb_host *host, int tcode,
603 quadlet_t *data, size_t size) 600 quadlet_t *data, size_t size)
604{ 601{
605 struct hpsb_packet *packet = NULL; 602 struct hpsb_packet *packet = NULL;
606 struct sk_buff *skb; 603 struct sk_buff *skb;
607 int tcode_match = 0; 604 int tcode_match = 0;
608 int tlabel; 605 int tlabel;
609 unsigned long flags; 606 unsigned long flags;
610 607
611 tlabel = (data[0] >> 10) & 0x3f; 608 tlabel = (data[0] >> 10) & 0x3f;
612 609
613 spin_lock_irqsave(&host->pending_packet_queue.lock, flags); 610 spin_lock_irqsave(&host->pending_packet_queue.lock, flags);
614 611
615 skb_queue_walk(&host->pending_packet_queue, skb) { 612 skb_queue_walk(&host->pending_packet_queue, skb) {
616 packet = (struct hpsb_packet *)skb->data; 613 packet = (struct hpsb_packet *)skb->data;
617 if ((packet->tlabel == tlabel) 614 if ((packet->tlabel == tlabel)
618 && (packet->node_id == (data[1] >> 16))){ 615 && (packet->node_id == (data[1] >> 16))){
619 break; 616 break;
620 } 617 }
621 618
622 packet = NULL; 619 packet = NULL;
623 } 620 }
624 621
625 if (packet == NULL) { 622 if (packet == NULL) {
626 HPSB_DEBUG("unsolicited response packet received - no tlabel match"); 623 HPSB_DEBUG("unsolicited response packet received - no tlabel match");
627 dump_packet("contents", data, 16, -1); 624 dump_packet("contents", data, 16, -1);
628 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags); 625 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
629 return; 626 return;
630 } 627 }
631 628
632 switch (packet->tcode) { 629 switch (packet->tcode) {
633 case TCODE_WRITEQ: 630 case TCODE_WRITEQ:
634 case TCODE_WRITEB: 631 case TCODE_WRITEB:
635 if (tcode != TCODE_WRITE_RESPONSE) 632 if (tcode != TCODE_WRITE_RESPONSE)
636 break; 633 break;
637 tcode_match = 1; 634 tcode_match = 1;
638 memcpy(packet->header, data, 12); 635 memcpy(packet->header, data, 12);
639 break; 636 break;
640 case TCODE_READQ: 637 case TCODE_READQ:
641 if (tcode != TCODE_READQ_RESPONSE) 638 if (tcode != TCODE_READQ_RESPONSE)
642 break; 639 break;
643 tcode_match = 1; 640 tcode_match = 1;
644 memcpy(packet->header, data, 16); 641 memcpy(packet->header, data, 16);
645 break; 642 break;
646 case TCODE_READB: 643 case TCODE_READB:
647 if (tcode != TCODE_READB_RESPONSE) 644 if (tcode != TCODE_READB_RESPONSE)
648 break; 645 break;
649 tcode_match = 1; 646 tcode_match = 1;
650 BUG_ON(packet->skb->len - sizeof(*packet) < size - 16); 647 BUG_ON(packet->skb->len - sizeof(*packet) < size - 16);
651 memcpy(packet->header, data, 16); 648 memcpy(packet->header, data, 16);
652 memcpy(packet->data, data + 4, size - 16); 649 memcpy(packet->data, data + 4, size - 16);
653 break; 650 break;
654 case TCODE_LOCK_REQUEST: 651 case TCODE_LOCK_REQUEST:
655 if (tcode != TCODE_LOCK_RESPONSE) 652 if (tcode != TCODE_LOCK_RESPONSE)
656 break; 653 break;
657 tcode_match = 1; 654 tcode_match = 1;
658 size = min((size - 16), (size_t)8); 655 size = min((size - 16), (size_t)8);
659 BUG_ON(packet->skb->len - sizeof(*packet) < size); 656 BUG_ON(packet->skb->len - sizeof(*packet) < size);
660 memcpy(packet->header, data, 16); 657 memcpy(packet->header, data, 16);
661 memcpy(packet->data, data + 4, size); 658 memcpy(packet->data, data + 4, size);
662 break; 659 break;
663 } 660 }
664 661
665 if (!tcode_match) { 662 if (!tcode_match) {
666 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags); 663 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
667 HPSB_INFO("unsolicited response packet received - tcode mismatch"); 664 HPSB_INFO("unsolicited response packet received - tcode mismatch");
668 dump_packet("contents", data, 16, -1); 665 dump_packet("contents", data, 16, -1);
669 return; 666 return;
670 } 667 }
671 668
672 __skb_unlink(skb, &host->pending_packet_queue); 669 __skb_unlink(skb, &host->pending_packet_queue);
673 670
@@ -686,27 +683,27 @@ static void handle_packet_response(struct hpsb_host *host, int tcode,
686static struct hpsb_packet *create_reply_packet(struct hpsb_host *host, 683static struct hpsb_packet *create_reply_packet(struct hpsb_host *host,
687 quadlet_t *data, size_t dsize) 684 quadlet_t *data, size_t dsize)
688{ 685{
689 struct hpsb_packet *p; 686 struct hpsb_packet *p;
690 687
691 p = hpsb_alloc_packet(dsize); 688 p = hpsb_alloc_packet(dsize);
692 if (unlikely(p == NULL)) { 689 if (unlikely(p == NULL)) {
693 /* FIXME - send data_error response */ 690 /* FIXME - send data_error response */
694 return NULL; 691 return NULL;
695 } 692 }
696 693
697 p->type = hpsb_async; 694 p->type = hpsb_async;
698 p->state = hpsb_unused; 695 p->state = hpsb_unused;
699 p->host = host; 696 p->host = host;
700 p->node_id = data[1] >> 16; 697 p->node_id = data[1] >> 16;
701 p->tlabel = (data[0] >> 10) & 0x3f; 698 p->tlabel = (data[0] >> 10) & 0x3f;
702 p->no_waiter = 1; 699 p->no_waiter = 1;
703 700
704 p->generation = get_hpsb_generation(host); 701 p->generation = get_hpsb_generation(host);
705 702
706 if (dsize % 4) 703 if (dsize % 4)
707 p->data[dsize / 4] = 0; 704 p->data[dsize / 4] = 0;
708 705
709 return p; 706 return p;
710} 707}
711 708
712#define PREP_ASYNC_HEAD_RCODE(tc) \ 709#define PREP_ASYNC_HEAD_RCODE(tc) \
@@ -717,7 +714,7 @@ static struct hpsb_packet *create_reply_packet(struct hpsb_host *host,
717 packet->header[2] = 0 714 packet->header[2] = 0
718 715
719static void fill_async_readquad_resp(struct hpsb_packet *packet, int rcode, 716static void fill_async_readquad_resp(struct hpsb_packet *packet, int rcode,
720 quadlet_t data) 717 quadlet_t data)
721{ 718{
722 PREP_ASYNC_HEAD_RCODE(TCODE_READQ_RESPONSE); 719 PREP_ASYNC_HEAD_RCODE(TCODE_READQ_RESPONSE);
723 packet->header[3] = data; 720 packet->header[3] = data;
@@ -726,7 +723,7 @@ static void fill_async_readquad_resp(struct hpsb_packet *packet, int rcode,
726} 723}
727 724
728static void fill_async_readblock_resp(struct hpsb_packet *packet, int rcode, 725static void fill_async_readblock_resp(struct hpsb_packet *packet, int rcode,
729 int length) 726 int length)
730{ 727{
731 if (rcode != RCODE_COMPLETE) 728 if (rcode != RCODE_COMPLETE)
732 length = 0; 729 length = 0;
@@ -746,7 +743,7 @@ static void fill_async_write_resp(struct hpsb_packet *packet, int rcode)
746} 743}
747 744
748static void fill_async_lock_resp(struct hpsb_packet *packet, int rcode, int extcode, 745static void fill_async_lock_resp(struct hpsb_packet *packet, int rcode, int extcode,
749 int length) 746 int length)
750{ 747{
751 if (rcode != RCODE_COMPLETE) 748 if (rcode != RCODE_COMPLETE)
752 length = 0; 749 length = 0;
@@ -758,184 +755,184 @@ static void fill_async_lock_resp(struct hpsb_packet *packet, int rcode, int extc
758} 755}
759 756
760#define PREP_REPLY_PACKET(length) \ 757#define PREP_REPLY_PACKET(length) \
761 packet = create_reply_packet(host, data, length); \ 758 packet = create_reply_packet(host, data, length); \
762 if (packet == NULL) break 759 if (packet == NULL) break
763 760
764static void handle_incoming_packet(struct hpsb_host *host, int tcode, 761static void handle_incoming_packet(struct hpsb_host *host, int tcode,
765 quadlet_t *data, size_t size, int write_acked) 762 quadlet_t *data, size_t size, int write_acked)
766{ 763{
767 struct hpsb_packet *packet; 764 struct hpsb_packet *packet;
768 int length, rcode, extcode; 765 int length, rcode, extcode;
769 quadlet_t buffer; 766 quadlet_t buffer;
770 nodeid_t source = data[1] >> 16; 767 nodeid_t source = data[1] >> 16;
771 nodeid_t dest = data[0] >> 16; 768 nodeid_t dest = data[0] >> 16;
772 u16 flags = (u16) data[0]; 769 u16 flags = (u16) data[0];
773 u64 addr; 770 u64 addr;
774 771
775 /* big FIXME - no error checking is done for an out of bounds length */ 772 /* big FIXME - no error checking is done for an out of bounds length */
776 773
777 switch (tcode) { 774 switch (tcode) {
778 case TCODE_WRITEQ: 775 case TCODE_WRITEQ:
779 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2]; 776 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
780 rcode = highlevel_write(host, source, dest, data+3, 777 rcode = highlevel_write(host, source, dest, data+3,
781 addr, 4, flags); 778 addr, 4, flags);
782 779
783 if (!write_acked 780 if (!write_acked
784 && (NODEID_TO_NODE(data[0] >> 16) != NODE_MASK) 781 && (NODEID_TO_NODE(data[0] >> 16) != NODE_MASK)
785 && (rcode >= 0)) { 782 && (rcode >= 0)) {
786 /* not a broadcast write, reply */ 783 /* not a broadcast write, reply */
787 PREP_REPLY_PACKET(0); 784 PREP_REPLY_PACKET(0);
788 fill_async_write_resp(packet, rcode); 785 fill_async_write_resp(packet, rcode);
789 send_packet_nocare(packet); 786 send_packet_nocare(packet);
790 } 787 }
791 break; 788 break;
792 789
793 case TCODE_WRITEB: 790 case TCODE_WRITEB:
794 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2]; 791 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
795 rcode = highlevel_write(host, source, dest, data+4, 792 rcode = highlevel_write(host, source, dest, data+4,
796 addr, data[3]>>16, flags); 793 addr, data[3]>>16, flags);
797 794
798 if (!write_acked 795 if (!write_acked
799 && (NODEID_TO_NODE(data[0] >> 16) != NODE_MASK) 796 && (NODEID_TO_NODE(data[0] >> 16) != NODE_MASK)
800 && (rcode >= 0)) { 797 && (rcode >= 0)) {
801 /* not a broadcast write, reply */ 798 /* not a broadcast write, reply */
802 PREP_REPLY_PACKET(0); 799 PREP_REPLY_PACKET(0);
803 fill_async_write_resp(packet, rcode); 800 fill_async_write_resp(packet, rcode);
804 send_packet_nocare(packet); 801 send_packet_nocare(packet);
805 } 802 }
806 break; 803 break;
807 804
808 case TCODE_READQ: 805 case TCODE_READQ:
809 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2]; 806 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
810 rcode = highlevel_read(host, source, &buffer, addr, 4, flags); 807 rcode = highlevel_read(host, source, &buffer, addr, 4, flags);
811 808
812 if (rcode >= 0) { 809 if (rcode >= 0) {
813 PREP_REPLY_PACKET(0); 810 PREP_REPLY_PACKET(0);
814 fill_async_readquad_resp(packet, rcode, buffer); 811 fill_async_readquad_resp(packet, rcode, buffer);
815 send_packet_nocare(packet); 812 send_packet_nocare(packet);
816 } 813 }
817 break; 814 break;
818 815
819 case TCODE_READB: 816 case TCODE_READB:
820 length = data[3] >> 16; 817 length = data[3] >> 16;
821 PREP_REPLY_PACKET(length); 818 PREP_REPLY_PACKET(length);
822 819
823 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2]; 820 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
824 rcode = highlevel_read(host, source, packet->data, addr, 821 rcode = highlevel_read(host, source, packet->data, addr,
825 length, flags); 822 length, flags);
826 823
827 if (rcode >= 0) { 824 if (rcode >= 0) {
828 fill_async_readblock_resp(packet, rcode, length); 825 fill_async_readblock_resp(packet, rcode, length);
829 send_packet_nocare(packet); 826 send_packet_nocare(packet);
830 } else { 827 } else {
831 hpsb_free_packet(packet); 828 hpsb_free_packet(packet);
832 } 829 }
833 break; 830 break;
834 831
835 case TCODE_LOCK_REQUEST: 832 case TCODE_LOCK_REQUEST:
836 length = data[3] >> 16; 833 length = data[3] >> 16;
837 extcode = data[3] & 0xffff; 834 extcode = data[3] & 0xffff;
838 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2]; 835 addr = (((u64)(data[1] & 0xffff)) << 32) | data[2];
839 836
840 PREP_REPLY_PACKET(8); 837 PREP_REPLY_PACKET(8);
841 838
842 if ((extcode == 0) || (extcode >= 7)) { 839 if ((extcode == 0) || (extcode >= 7)) {
843 /* let switch default handle error */ 840 /* let switch default handle error */
844 length = 0; 841 length = 0;
845 } 842 }
846 843
847 switch (length) { 844 switch (length) {
848 case 4: 845 case 4:
849 rcode = highlevel_lock(host, source, packet->data, addr, 846 rcode = highlevel_lock(host, source, packet->data, addr,
850 data[4], 0, extcode,flags); 847 data[4], 0, extcode,flags);
851 fill_async_lock_resp(packet, rcode, extcode, 4); 848 fill_async_lock_resp(packet, rcode, extcode, 4);
852 break; 849 break;
853 case 8: 850 case 8:
854 if ((extcode != EXTCODE_FETCH_ADD) 851 if ((extcode != EXTCODE_FETCH_ADD)
855 && (extcode != EXTCODE_LITTLE_ADD)) { 852 && (extcode != EXTCODE_LITTLE_ADD)) {
856 rcode = highlevel_lock(host, source, 853 rcode = highlevel_lock(host, source,
857 packet->data, addr, 854 packet->data, addr,
858 data[5], data[4], 855 data[5], data[4],
859 extcode, flags); 856 extcode, flags);
860 fill_async_lock_resp(packet, rcode, extcode, 4); 857 fill_async_lock_resp(packet, rcode, extcode, 4);
861 } else { 858 } else {
862 rcode = highlevel_lock64(host, source, 859 rcode = highlevel_lock64(host, source,
863 (octlet_t *)packet->data, addr, 860 (octlet_t *)packet->data, addr,
864 *(octlet_t *)(data + 4), 0ULL, 861 *(octlet_t *)(data + 4), 0ULL,
865 extcode, flags); 862 extcode, flags);
866 fill_async_lock_resp(packet, rcode, extcode, 8); 863 fill_async_lock_resp(packet, rcode, extcode, 8);
867 } 864 }
868 break; 865 break;
869 case 16: 866 case 16:
870 rcode = highlevel_lock64(host, source, 867 rcode = highlevel_lock64(host, source,
871 (octlet_t *)packet->data, addr, 868 (octlet_t *)packet->data, addr,
872 *(octlet_t *)(data + 6), 869 *(octlet_t *)(data + 6),
873 *(octlet_t *)(data + 4), 870 *(octlet_t *)(data + 4),
874 extcode, flags); 871 extcode, flags);
875 fill_async_lock_resp(packet, rcode, extcode, 8); 872 fill_async_lock_resp(packet, rcode, extcode, 8);
876 break; 873 break;
877 default: 874 default:
878 rcode = RCODE_TYPE_ERROR; 875 rcode = RCODE_TYPE_ERROR;
879 fill_async_lock_resp(packet, rcode, 876 fill_async_lock_resp(packet, rcode,
880 extcode, 0); 877 extcode, 0);
881 } 878 }
882 879
883 if (rcode >= 0) { 880 if (rcode >= 0) {
884 send_packet_nocare(packet); 881 send_packet_nocare(packet);
885 } else { 882 } else {
886 hpsb_free_packet(packet); 883 hpsb_free_packet(packet);
887 } 884 }
888 break; 885 break;
889 } 886 }
890 887
891} 888}
892#undef PREP_REPLY_PACKET 889#undef PREP_REPLY_PACKET
893 890
894 891
895void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size, 892void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
896 int write_acked) 893 int write_acked)
897{ 894{
898 int tcode; 895 int tcode;
899 896
900 if (host->in_bus_reset) { 897 if (host->in_bus_reset) {
901 HPSB_INFO("received packet during reset; ignoring"); 898 HPSB_INFO("received packet during reset; ignoring");
902 return; 899 return;
903 } 900 }
904 901
905 dump_packet("received packet", data, size, -1); 902 dump_packet("received packet", data, size, -1);
906 903
907 tcode = (data[0] >> 4) & 0xf; 904 tcode = (data[0] >> 4) & 0xf;
908 905
909 switch (tcode) { 906 switch (tcode) {
910 case TCODE_WRITE_RESPONSE: 907 case TCODE_WRITE_RESPONSE:
911 case TCODE_READQ_RESPONSE: 908 case TCODE_READQ_RESPONSE:
912 case TCODE_READB_RESPONSE: 909 case TCODE_READB_RESPONSE:
913 case TCODE_LOCK_RESPONSE: 910 case TCODE_LOCK_RESPONSE:
914 handle_packet_response(host, tcode, data, size); 911 handle_packet_response(host, tcode, data, size);
915 break; 912 break;
916 913
917 case TCODE_WRITEQ: 914 case TCODE_WRITEQ:
918 case TCODE_WRITEB: 915 case TCODE_WRITEB:
919 case TCODE_READQ: 916 case TCODE_READQ:
920 case TCODE_READB: 917 case TCODE_READB:
921 case TCODE_LOCK_REQUEST: 918 case TCODE_LOCK_REQUEST:
922 handle_incoming_packet(host, tcode, data, size, write_acked); 919 handle_incoming_packet(host, tcode, data, size, write_acked);
923 break; 920 break;
924 921
925 922
926 case TCODE_ISO_DATA: 923 case TCODE_ISO_DATA:
927 highlevel_iso_receive(host, data, size); 924 highlevel_iso_receive(host, data, size);
928 break; 925 break;
929 926
930 case TCODE_CYCLE_START: 927 case TCODE_CYCLE_START:
931 /* simply ignore this packet if it is passed on */ 928 /* simply ignore this packet if it is passed on */
932 break; 929 break;
933 930
934 default: 931 default:
935 HPSB_NOTICE("received packet with bogus transaction code %d", 932 HPSB_NOTICE("received packet with bogus transaction code %d",
936 tcode); 933 tcode);
937 break; 934 break;
938 } 935 }
939} 936}
940 937
941 938
@@ -1129,7 +1126,7 @@ static int __init ieee1394_init(void)
1129 nodemgr implements functionality required of ieee1394a-2000 1126 nodemgr implements functionality required of ieee1394a-2000
1130 IRMs */ 1127 IRMs */
1131 hpsb_disable_irm = 1; 1128 hpsb_disable_irm = 1;
1132 1129
1133 return 0; 1130 return 0;
1134 } 1131 }
1135 1132
diff --git a/drivers/ieee1394/ieee1394_core.h b/drivers/ieee1394/ieee1394_core.h
index 0b31429d0a68..b35466023f00 100644
--- a/drivers/ieee1394/ieee1394_core.h
+++ b/drivers/ieee1394/ieee1394_core.h
@@ -10,8 +10,8 @@
10 10
11 11
12struct hpsb_packet { 12struct hpsb_packet {
13 /* This struct is basically read-only for hosts with the exception of 13 /* This struct is basically read-only for hosts with the exception of
14 * the data buffer contents and xnext - see below. */ 14 * the data buffer contents and xnext - see below. */
15 15
16 /* This can be used for host driver internal linking. 16 /* This can be used for host driver internal linking.
17 * 17 *
@@ -21,47 +21,47 @@ struct hpsb_packet {
21 * driver_list when free'ing it. */ 21 * driver_list when free'ing it. */
22 struct list_head driver_list; 22 struct list_head driver_list;
23 23
24 nodeid_t node_id; 24 nodeid_t node_id;
25 25
26 /* Async and Iso types should be clear, raw means send-as-is, do not 26 /* Async and Iso types should be clear, raw means send-as-is, do not
27 * CRC! Byte swapping shall still be done in this case. */ 27 * CRC! Byte swapping shall still be done in this case. */
28 enum { hpsb_async, hpsb_iso, hpsb_raw } __attribute__((packed)) type; 28 enum { hpsb_async, hpsb_iso, hpsb_raw } __attribute__((packed)) type;
29 29
30 /* Okay, this is core internal and a no care for hosts. 30 /* Okay, this is core internal and a no care for hosts.
31 * queued = queued for sending 31 * queued = queued for sending
32 * pending = sent, waiting for response 32 * pending = sent, waiting for response
33 * complete = processing completed, successful or not 33 * complete = processing completed, successful or not
34 */ 34 */
35 enum { 35 enum {
36 hpsb_unused, hpsb_queued, hpsb_pending, hpsb_complete 36 hpsb_unused, hpsb_queued, hpsb_pending, hpsb_complete
37 } __attribute__((packed)) state; 37 } __attribute__((packed)) state;
38 38
39 /* These are core internal. */ 39 /* These are core internal. */
40 signed char tlabel; 40 signed char tlabel;
41 signed char ack_code; 41 signed char ack_code;
42 unsigned char tcode; 42 unsigned char tcode;
43 43
44 unsigned expect_response:1; 44 unsigned expect_response:1;
45 unsigned no_waiter:1; 45 unsigned no_waiter:1;
46 46
47 /* Speed to transmit with: 0 = 100Mbps, 1 = 200Mbps, 2 = 400Mbps */ 47 /* Speed to transmit with: 0 = 100Mbps, 1 = 200Mbps, 2 = 400Mbps */
48 unsigned speed_code:2; 48 unsigned speed_code:2;
49 49
50 /* 50 /*
51 * *header and *data are guaranteed to be 32-bit DMAable and may be 51 * *header and *data are guaranteed to be 32-bit DMAable and may be
52 * overwritten to allow in-place byte swapping. Neither of these is 52 * overwritten to allow in-place byte swapping. Neither of these is
53 * CRCed (the sizes also don't include CRC), but contain space for at 53 * CRCed (the sizes also don't include CRC), but contain space for at
54 * least one additional quadlet to allow in-place CRCing. The memory is 54 * least one additional quadlet to allow in-place CRCing. The memory is
55 * also guaranteed to be DMA mappable. 55 * also guaranteed to be DMA mappable.
56 */ 56 */
57 quadlet_t *header; 57 quadlet_t *header;
58 quadlet_t *data; 58 quadlet_t *data;
59 size_t header_size; 59 size_t header_size;
60 size_t data_size; 60 size_t data_size;
61 61
62 62
63 struct hpsb_host *host; 63 struct hpsb_host *host;
64 unsigned int generation; 64 unsigned int generation;
65 65
66 atomic_t refcnt; 66 atomic_t refcnt;
67 67
@@ -73,10 +73,10 @@ struct hpsb_packet {
73 /* XXX This is just a hack at the moment */ 73 /* XXX This is just a hack at the moment */
74 struct sk_buff *skb; 74 struct sk_buff *skb;
75 75
76 /* Store jiffies for implementing bus timeouts. */ 76 /* Store jiffies for implementing bus timeouts. */
77 unsigned long sendtime; 77 unsigned long sendtime;
78 78
79 quadlet_t embedded_header[5]; 79 quadlet_t embedded_header[5];
80}; 80};
81 81
82/* Set a task for when a packet completes */ 82/* Set a task for when a packet completes */
@@ -102,7 +102,7 @@ void hpsb_free_packet(struct hpsb_packet *packet);
102 */ 102 */
103static inline unsigned int get_hpsb_generation(struct hpsb_host *host) 103static inline unsigned int get_hpsb_generation(struct hpsb_host *host)
104{ 104{
105 return atomic_read(&host->generation); 105 return atomic_read(&host->generation);
106} 106}
107 107
108/* 108/*
@@ -157,7 +157,7 @@ void hpsb_selfid_complete(struct hpsb_host *host, int phyid, int isroot);
157 * from within a transmit packet routine. 157 * from within a transmit packet routine.
158 */ 158 */
159void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet, 159void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
160 int ackcode); 160 int ackcode);
161 161
162/* 162/*
163 * Hand over received packet to the core. The contents of data are expected to 163 * Hand over received packet to the core. The contents of data are expected to
@@ -171,7 +171,7 @@ void hpsb_packet_sent(struct hpsb_host *host, struct hpsb_packet *packet,
171 * packet type. 171 * packet type.
172 */ 172 */
173void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size, 173void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
174 int write_acked); 174 int write_acked);
175 175
176 176
177/* 177/*
@@ -197,20 +197,20 @@ void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
197 * Block 15 (240-255) reserved for drivers under development, etc. 197 * Block 15 (240-255) reserved for drivers under development, etc.
198 */ 198 */
199 199
200#define IEEE1394_MAJOR 171 200#define IEEE1394_MAJOR 171
201 201
202#define IEEE1394_MINOR_BLOCK_RAW1394 0 202#define IEEE1394_MINOR_BLOCK_RAW1394 0
203#define IEEE1394_MINOR_BLOCK_VIDEO1394 1 203#define IEEE1394_MINOR_BLOCK_VIDEO1394 1
204#define IEEE1394_MINOR_BLOCK_DV1394 2 204#define IEEE1394_MINOR_BLOCK_DV1394 2
205#define IEEE1394_MINOR_BLOCK_AMDTP 3 205#define IEEE1394_MINOR_BLOCK_AMDTP 3
206#define IEEE1394_MINOR_BLOCK_EXPERIMENTAL 15 206#define IEEE1394_MINOR_BLOCK_EXPERIMENTAL 15
207 207
208#define IEEE1394_CORE_DEV MKDEV(IEEE1394_MAJOR, 0) 208#define IEEE1394_CORE_DEV MKDEV(IEEE1394_MAJOR, 0)
209#define IEEE1394_RAW1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16) 209#define IEEE1394_RAW1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16)
210#define IEEE1394_VIDEO1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_VIDEO1394 * 16) 210#define IEEE1394_VIDEO1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_VIDEO1394 * 16)
211#define IEEE1394_DV1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_DV1394 * 16) 211#define IEEE1394_DV1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_DV1394 * 16)
212#define IEEE1394_AMDTP_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_AMDTP * 16) 212#define IEEE1394_AMDTP_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_AMDTP * 16)
213#define IEEE1394_EXPERIMENTAL_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_EXPERIMENTAL * 16) 213#define IEEE1394_EXPERIMENTAL_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_EXPERIMENTAL * 16)
214 214
215/* return the index (within a minor number block) of a file */ 215/* return the index (within a minor number block) of a file */
216static inline unsigned char ieee1394_file_to_instance(struct file *file) 216static inline unsigned char ieee1394_file_to_instance(struct file *file)
diff --git a/drivers/ieee1394/ieee1394_transactions.c b/drivers/ieee1394/ieee1394_transactions.c
index 0aa876360f9b..3fe2f6c4a253 100644
--- a/drivers/ieee1394/ieee1394_transactions.c
+++ b/drivers/ieee1394/ieee1394_transactions.c
@@ -22,7 +22,7 @@
22#include "ieee1394_core.h" 22#include "ieee1394_core.h"
23#include "highlevel.h" 23#include "highlevel.h"
24#include "nodemgr.h" 24#include "nodemgr.h"
25 25#include "ieee1394_transactions.h"
26 26
27#define PREP_ASYNC_HEAD_ADDRESS(tc) \ 27#define PREP_ASYNC_HEAD_ADDRESS(tc) \
28 packet->tcode = tc; \ 28 packet->tcode = tc; \
@@ -31,80 +31,82 @@
31 packet->header[1] = (packet->host->node_id << 16) | (addr >> 32); \ 31 packet->header[1] = (packet->host->node_id << 16) | (addr >> 32); \
32 packet->header[2] = addr & 0xffffffff 32 packet->header[2] = addr & 0xffffffff
33 33
34
35static void fill_async_readquad(struct hpsb_packet *packet, u64 addr) 34static void fill_async_readquad(struct hpsb_packet *packet, u64 addr)
36{ 35{
37 PREP_ASYNC_HEAD_ADDRESS(TCODE_READQ); 36 PREP_ASYNC_HEAD_ADDRESS(TCODE_READQ);
38 packet->header_size = 12; 37 packet->header_size = 12;
39 packet->data_size = 0; 38 packet->data_size = 0;
40 packet->expect_response = 1; 39 packet->expect_response = 1;
41} 40}
42 41
43static void fill_async_readblock(struct hpsb_packet *packet, u64 addr, int length) 42static void fill_async_readblock(struct hpsb_packet *packet, u64 addr,
43 int length)
44{ 44{
45 PREP_ASYNC_HEAD_ADDRESS(TCODE_READB); 45 PREP_ASYNC_HEAD_ADDRESS(TCODE_READB);
46 packet->header[3] = length << 16; 46 packet->header[3] = length << 16;
47 packet->header_size = 16; 47 packet->header_size = 16;
48 packet->data_size = 0; 48 packet->data_size = 0;
49 packet->expect_response = 1; 49 packet->expect_response = 1;
50} 50}
51 51
52static void fill_async_writequad(struct hpsb_packet *packet, u64 addr, quadlet_t data) 52static void fill_async_writequad(struct hpsb_packet *packet, u64 addr,
53 quadlet_t data)
53{ 54{
54 PREP_ASYNC_HEAD_ADDRESS(TCODE_WRITEQ); 55 PREP_ASYNC_HEAD_ADDRESS(TCODE_WRITEQ);
55 packet->header[3] = data; 56 packet->header[3] = data;
56 packet->header_size = 16; 57 packet->header_size = 16;
57 packet->data_size = 0; 58 packet->data_size = 0;
58 packet->expect_response = 1; 59 packet->expect_response = 1;
59} 60}
60 61
61static void fill_async_writeblock(struct hpsb_packet *packet, u64 addr, int length) 62static void fill_async_writeblock(struct hpsb_packet *packet, u64 addr,
63 int length)
62{ 64{
63 PREP_ASYNC_HEAD_ADDRESS(TCODE_WRITEB); 65 PREP_ASYNC_HEAD_ADDRESS(TCODE_WRITEB);
64 packet->header[3] = length << 16; 66 packet->header[3] = length << 16;
65 packet->header_size = 16; 67 packet->header_size = 16;
66 packet->expect_response = 1; 68 packet->expect_response = 1;
67 packet->data_size = length + (length % 4 ? 4 - (length % 4) : 0); 69 packet->data_size = length + (length % 4 ? 4 - (length % 4) : 0);
68} 70}
69 71
70static void fill_async_lock(struct hpsb_packet *packet, u64 addr, int extcode, 72static void fill_async_lock(struct hpsb_packet *packet, u64 addr, int extcode,
71 int length) 73 int length)
72{ 74{
73 PREP_ASYNC_HEAD_ADDRESS(TCODE_LOCK_REQUEST); 75 PREP_ASYNC_HEAD_ADDRESS(TCODE_LOCK_REQUEST);
74 packet->header[3] = (length << 16) | extcode; 76 packet->header[3] = (length << 16) | extcode;
75 packet->header_size = 16; 77 packet->header_size = 16;
76 packet->data_size = length; 78 packet->data_size = length;
77 packet->expect_response = 1; 79 packet->expect_response = 1;
78} 80}
79 81
80static void fill_iso_packet(struct hpsb_packet *packet, int length, int channel, 82static void fill_iso_packet(struct hpsb_packet *packet, int length, int channel,
81 int tag, int sync) 83 int tag, int sync)
82{ 84{
83 packet->header[0] = (length << 16) | (tag << 14) | (channel << 8) 85 packet->header[0] = (length << 16) | (tag << 14) | (channel << 8)
84 | (TCODE_ISO_DATA << 4) | sync; 86 | (TCODE_ISO_DATA << 4) | sync;
85 87
86 packet->header_size = 4; 88 packet->header_size = 4;
87 packet->data_size = length; 89 packet->data_size = length;
88 packet->type = hpsb_iso; 90 packet->type = hpsb_iso;
89 packet->tcode = TCODE_ISO_DATA; 91 packet->tcode = TCODE_ISO_DATA;
90} 92}
91 93
92static void fill_phy_packet(struct hpsb_packet *packet, quadlet_t data) 94static void fill_phy_packet(struct hpsb_packet *packet, quadlet_t data)
93{ 95{
94 packet->header[0] = data; 96 packet->header[0] = data;
95 packet->header[1] = ~data; 97 packet->header[1] = ~data;
96 packet->header_size = 8; 98 packet->header_size = 8;
97 packet->data_size = 0; 99 packet->data_size = 0;
98 packet->expect_response = 0; 100 packet->expect_response = 0;
99 packet->type = hpsb_raw; /* No CRC added */ 101 packet->type = hpsb_raw; /* No CRC added */
100 packet->speed_code = IEEE1394_SPEED_100; /* Force speed to be 100Mbps */ 102 packet->speed_code = IEEE1394_SPEED_100; /* Force speed to be 100Mbps */
101} 103}
102 104
103static void fill_async_stream_packet(struct hpsb_packet *packet, int length, 105static void fill_async_stream_packet(struct hpsb_packet *packet, int length,
104 int channel, int tag, int sync) 106 int channel, int tag, int sync)
105{ 107{
106 packet->header[0] = (length << 16) | (tag << 14) | (channel << 8) 108 packet->header[0] = (length << 16) | (tag << 14) | (channel << 8)
107 | (TCODE_STREAM_DATA << 4) | sync; 109 | (TCODE_STREAM_DATA << 4) | sync;
108 110
109 packet->header_size = 4; 111 packet->header_size = 4;
110 packet->data_size = length; 112 packet->data_size = length;
@@ -171,99 +173,96 @@ int hpsb_get_tlabel(struct hpsb_packet *packet)
171 */ 173 */
172void hpsb_free_tlabel(struct hpsb_packet *packet) 174void hpsb_free_tlabel(struct hpsb_packet *packet)
173{ 175{
174 unsigned long flags; 176 unsigned long flags;
175 struct hpsb_tlabel_pool *tp; 177 struct hpsb_tlabel_pool *tp;
176 178
177 tp = &packet->host->tpool[packet->node_id & NODE_MASK]; 179 tp = &packet->host->tpool[packet->node_id & NODE_MASK];
178 180
179 BUG_ON(packet->tlabel > 63 || packet->tlabel < 0); 181 BUG_ON(packet->tlabel > 63 || packet->tlabel < 0);
180 182
181 spin_lock_irqsave(&tp->lock, flags); 183 spin_lock_irqsave(&tp->lock, flags);
182 BUG_ON(!test_and_clear_bit(packet->tlabel, tp->pool)); 184 BUG_ON(!test_and_clear_bit(packet->tlabel, tp->pool));
183 spin_unlock_irqrestore(&tp->lock, flags); 185 spin_unlock_irqrestore(&tp->lock, flags);
184 186
185 up(&tp->count); 187 up(&tp->count);
186} 188}
187 189
188
189
190int hpsb_packet_success(struct hpsb_packet *packet) 190int hpsb_packet_success(struct hpsb_packet *packet)
191{ 191{
192 switch (packet->ack_code) { 192 switch (packet->ack_code) {
193 case ACK_PENDING: 193 case ACK_PENDING:
194 switch ((packet->header[1] >> 12) & 0xf) { 194 switch ((packet->header[1] >> 12) & 0xf) {
195 case RCODE_COMPLETE: 195 case RCODE_COMPLETE:
196 return 0; 196 return 0;
197 case RCODE_CONFLICT_ERROR: 197 case RCODE_CONFLICT_ERROR:
198 return -EAGAIN; 198 return -EAGAIN;
199 case RCODE_DATA_ERROR: 199 case RCODE_DATA_ERROR:
200 return -EREMOTEIO; 200 return -EREMOTEIO;
201 case RCODE_TYPE_ERROR: 201 case RCODE_TYPE_ERROR:
202 return -EACCES; 202 return -EACCES;
203 case RCODE_ADDRESS_ERROR: 203 case RCODE_ADDRESS_ERROR:
204 return -EINVAL; 204 return -EINVAL;
205 default: 205 default:
206 HPSB_ERR("received reserved rcode %d from node %d", 206 HPSB_ERR("received reserved rcode %d from node %d",
207 (packet->header[1] >> 12) & 0xf, 207 (packet->header[1] >> 12) & 0xf,
208 packet->node_id); 208 packet->node_id);
209 return -EAGAIN; 209 return -EAGAIN;
210 } 210 }
211 HPSB_PANIC("reached unreachable code 1 in %s", __FUNCTION__); 211 HPSB_PANIC("reached unreachable code 1 in %s", __FUNCTION__);
212 212
213 case ACK_BUSY_X: 213 case ACK_BUSY_X:
214 case ACK_BUSY_A: 214 case ACK_BUSY_A:
215 case ACK_BUSY_B: 215 case ACK_BUSY_B:
216 return -EBUSY; 216 return -EBUSY;
217 217
218 case ACK_TYPE_ERROR: 218 case ACK_TYPE_ERROR:
219 return -EACCES; 219 return -EACCES;
220 220
221 case ACK_COMPLETE: 221 case ACK_COMPLETE:
222 if (packet->tcode == TCODE_WRITEQ 222 if (packet->tcode == TCODE_WRITEQ
223 || packet->tcode == TCODE_WRITEB) { 223 || packet->tcode == TCODE_WRITEB) {
224 return 0; 224 return 0;
225 } else { 225 } else {
226 HPSB_ERR("impossible ack_complete from node %d " 226 HPSB_ERR("impossible ack_complete from node %d "
227 "(tcode %d)", packet->node_id, packet->tcode); 227 "(tcode %d)", packet->node_id, packet->tcode);
228 return -EAGAIN; 228 return -EAGAIN;
229 } 229 }
230 230
231 231 case ACK_DATA_ERROR:
232 case ACK_DATA_ERROR: 232 if (packet->tcode == TCODE_WRITEB
233 if (packet->tcode == TCODE_WRITEB 233 || packet->tcode == TCODE_LOCK_REQUEST) {
234 || packet->tcode == TCODE_LOCK_REQUEST) { 234 return -EAGAIN;
235 return -EAGAIN; 235 } else {
236 } else { 236 HPSB_ERR("impossible ack_data_error from node %d "
237 HPSB_ERR("impossible ack_data_error from node %d " 237 "(tcode %d)", packet->node_id, packet->tcode);
238 "(tcode %d)", packet->node_id, packet->tcode); 238 return -EAGAIN;
239 return -EAGAIN; 239 }
240 } 240
241 241 case ACK_ADDRESS_ERROR:
242 case ACK_ADDRESS_ERROR: 242 return -EINVAL;
243 return -EINVAL; 243
244 244 case ACK_TARDY:
245 case ACK_TARDY: 245 case ACK_CONFLICT_ERROR:
246 case ACK_CONFLICT_ERROR: 246 case ACKX_NONE:
247 case ACKX_NONE: 247 case ACKX_SEND_ERROR:
248 case ACKX_SEND_ERROR: 248 case ACKX_ABORTED:
249 case ACKX_ABORTED: 249 case ACKX_TIMEOUT:
250 case ACKX_TIMEOUT: 250 /* error while sending */
251 /* error while sending */ 251 return -EAGAIN;
252 return -EAGAIN; 252
253 253 default:
254 default: 254 HPSB_ERR("got invalid ack %d from node %d (tcode %d)",
255 HPSB_ERR("got invalid ack %d from node %d (tcode %d)", 255 packet->ack_code, packet->node_id, packet->tcode);
256 packet->ack_code, packet->node_id, packet->tcode); 256 return -EAGAIN;
257 return -EAGAIN; 257 }
258 } 258
259 259 HPSB_PANIC("reached unreachable code 2 in %s", __FUNCTION__);
260 HPSB_PANIC("reached unreachable code 2 in %s", __FUNCTION__);
261} 260}
262 261
263struct hpsb_packet *hpsb_make_readpacket(struct hpsb_host *host, nodeid_t node, 262struct hpsb_packet *hpsb_make_readpacket(struct hpsb_host *host, nodeid_t node,
264 u64 addr, size_t length) 263 u64 addr, size_t length)
265{ 264{
266 struct hpsb_packet *packet; 265 struct hpsb_packet *packet;
267 266
268 if (length == 0) 267 if (length == 0)
269 return NULL; 268 return NULL;
@@ -288,8 +287,9 @@ struct hpsb_packet *hpsb_make_readpacket(struct hpsb_host *host, nodeid_t node,
288 return packet; 287 return packet;
289} 288}
290 289
291struct hpsb_packet *hpsb_make_writepacket (struct hpsb_host *host, nodeid_t node, 290struct hpsb_packet *hpsb_make_writepacket(struct hpsb_host *host, nodeid_t node,
292 u64 addr, quadlet_t *buffer, size_t length) 291 u64 addr, quadlet_t * buffer,
292 size_t length)
293{ 293{
294 struct hpsb_packet *packet; 294 struct hpsb_packet *packet;
295 295
@@ -300,7 +300,7 @@ struct hpsb_packet *hpsb_make_writepacket (struct hpsb_host *host, nodeid_t node
300 if (!packet) 300 if (!packet)
301 return NULL; 301 return NULL;
302 302
303 if (length % 4) { /* zero padding bytes */ 303 if (length % 4) { /* zero padding bytes */
304 packet->data[length >> 2] = 0; 304 packet->data[length >> 2] = 0;
305 } 305 }
306 packet->host = host; 306 packet->host = host;
@@ -322,8 +322,9 @@ struct hpsb_packet *hpsb_make_writepacket (struct hpsb_host *host, nodeid_t node
322 return packet; 322 return packet;
323} 323}
324 324
325struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 *buffer, int length, 325struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 * buffer,
326 int channel, int tag, int sync) 326 int length, int channel, int tag,
327 int sync)
327{ 328{
328 struct hpsb_packet *packet; 329 struct hpsb_packet *packet;
329 330
@@ -334,7 +335,7 @@ struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 *buffer, i
334 if (!packet) 335 if (!packet)
335 return NULL; 336 return NULL;
336 337
337 if (length % 4) { /* zero padding bytes */ 338 if (length % 4) { /* zero padding bytes */
338 packet->data[length >> 2] = 0; 339 packet->data[length >> 2] = 0;
339 } 340 }
340 packet->host = host; 341 packet->host = host;
@@ -352,14 +353,15 @@ struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 *buffer, i
352} 353}
353 354
354struct hpsb_packet *hpsb_make_lockpacket(struct hpsb_host *host, nodeid_t node, 355struct hpsb_packet *hpsb_make_lockpacket(struct hpsb_host *host, nodeid_t node,
355 u64 addr, int extcode, quadlet_t *data, 356 u64 addr, int extcode,
356 quadlet_t arg) 357 quadlet_t * data, quadlet_t arg)
357{ 358{
358 struct hpsb_packet *p; 359 struct hpsb_packet *p;
359 u32 length; 360 u32 length;
360 361
361 p = hpsb_alloc_packet(8); 362 p = hpsb_alloc_packet(8);
362 if (!p) return NULL; 363 if (!p)
364 return NULL;
363 365
364 p->host = host; 366 p->host = host;
365 p->node_id = node; 367 p->node_id = node;
@@ -388,15 +390,16 @@ struct hpsb_packet *hpsb_make_lockpacket(struct hpsb_host *host, nodeid_t node,
388 return p; 390 return p;
389} 391}
390 392
391struct hpsb_packet *hpsb_make_lock64packet(struct hpsb_host *host, nodeid_t node, 393struct hpsb_packet *hpsb_make_lock64packet(struct hpsb_host *host,
392 u64 addr, int extcode, octlet_t *data, 394 nodeid_t node, u64 addr, int extcode,
393 octlet_t arg) 395 octlet_t * data, octlet_t arg)
394{ 396{
395 struct hpsb_packet *p; 397 struct hpsb_packet *p;
396 u32 length; 398 u32 length;
397 399
398 p = hpsb_alloc_packet(16); 400 p = hpsb_alloc_packet(16);
399 if (!p) return NULL; 401 if (!p)
402 return NULL;
400 403
401 p->host = host; 404 p->host = host;
402 p->node_id = node; 405 p->node_id = node;
@@ -429,18 +432,18 @@ struct hpsb_packet *hpsb_make_lock64packet(struct hpsb_host *host, nodeid_t node
429 return p; 432 return p;
430} 433}
431 434
432struct hpsb_packet *hpsb_make_phypacket(struct hpsb_host *host, 435struct hpsb_packet *hpsb_make_phypacket(struct hpsb_host *host, quadlet_t data)
433 quadlet_t data)
434{ 436{
435 struct hpsb_packet *p; 437 struct hpsb_packet *p;
436 438
437 p = hpsb_alloc_packet(0); 439 p = hpsb_alloc_packet(0);
438 if (!p) return NULL; 440 if (!p)
441 return NULL;
439 442
440 p->host = host; 443 p->host = host;
441 fill_phy_packet(p, data); 444 fill_phy_packet(p, data);
442 445
443 return p; 446 return p;
444} 447}
445 448
446struct hpsb_packet *hpsb_make_isopacket(struct hpsb_host *host, 449struct hpsb_packet *hpsb_make_isopacket(struct hpsb_host *host,
@@ -450,7 +453,8 @@ struct hpsb_packet *hpsb_make_isopacket(struct hpsb_host *host,
450 struct hpsb_packet *p; 453 struct hpsb_packet *p;
451 454
452 p = hpsb_alloc_packet(length); 455 p = hpsb_alloc_packet(length);
453 if (!p) return NULL; 456 if (!p)
457 return NULL;
454 458
455 p->host = host; 459 p->host = host;
456 fill_iso_packet(p, length, channel, tag, sync); 460 fill_iso_packet(p, length, channel, tag, sync);
@@ -466,47 +470,46 @@ struct hpsb_packet *hpsb_make_isopacket(struct hpsb_host *host,
466 */ 470 */
467 471
468int hpsb_read(struct hpsb_host *host, nodeid_t node, unsigned int generation, 472int hpsb_read(struct hpsb_host *host, nodeid_t node, unsigned int generation,
469 u64 addr, quadlet_t *buffer, size_t length) 473 u64 addr, quadlet_t * buffer, size_t length)
470{ 474{
471 struct hpsb_packet *packet; 475 struct hpsb_packet *packet;
472 int retval = 0; 476 int retval = 0;
473 477
474 if (length == 0) 478 if (length == 0)
475 return -EINVAL; 479 return -EINVAL;
476 480
477 BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet 481 BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet
478 482
479 packet = hpsb_make_readpacket(host, node, addr, length); 483 packet = hpsb_make_readpacket(host, node, addr, length);
480 484
481 if (!packet) { 485 if (!packet) {
482 return -ENOMEM; 486 return -ENOMEM;
483 } 487 }
484 488
485 packet->generation = generation; 489 packet->generation = generation;
486 retval = hpsb_send_packet_and_wait(packet); 490 retval = hpsb_send_packet_and_wait(packet);
487 if (retval < 0) 491 if (retval < 0)
488 goto hpsb_read_fail; 492 goto hpsb_read_fail;
489 493
490 retval = hpsb_packet_success(packet); 494 retval = hpsb_packet_success(packet);
491 495
492 if (retval == 0) { 496 if (retval == 0) {
493 if (length == 4) { 497 if (length == 4) {
494 *buffer = packet->header[3]; 498 *buffer = packet->header[3];
495 } else { 499 } else {
496 memcpy(buffer, packet->data, length); 500 memcpy(buffer, packet->data, length);
497 } 501 }
498 } 502 }
499 503
500hpsb_read_fail: 504 hpsb_read_fail:
501 hpsb_free_tlabel(packet); 505 hpsb_free_tlabel(packet);
502 hpsb_free_packet(packet); 506 hpsb_free_packet(packet);
503 507
504 return retval; 508 return retval;
505} 509}
506 510
507
508int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation, 511int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation,
509 u64 addr, quadlet_t *buffer, size_t length) 512 u64 addr, quadlet_t * buffer, size_t length)
510{ 513{
511 struct hpsb_packet *packet; 514 struct hpsb_packet *packet;
512 int retval; 515 int retval;
@@ -514,62 +517,61 @@ int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation,
514 if (length == 0) 517 if (length == 0)
515 return -EINVAL; 518 return -EINVAL;
516 519
517 BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet 520 BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet
518 521
519 packet = hpsb_make_writepacket (host, node, addr, buffer, length); 522 packet = hpsb_make_writepacket(host, node, addr, buffer, length);
520 523
521 if (!packet) 524 if (!packet)
522 return -ENOMEM; 525 return -ENOMEM;
523 526
524 packet->generation = generation; 527 packet->generation = generation;
525 retval = hpsb_send_packet_and_wait(packet); 528 retval = hpsb_send_packet_and_wait(packet);
526 if (retval < 0) 529 if (retval < 0)
527 goto hpsb_write_fail; 530 goto hpsb_write_fail;
528 531
529 retval = hpsb_packet_success(packet); 532 retval = hpsb_packet_success(packet);
530 533
531hpsb_write_fail: 534 hpsb_write_fail:
532 hpsb_free_tlabel(packet); 535 hpsb_free_tlabel(packet);
533 hpsb_free_packet(packet); 536 hpsb_free_packet(packet);
534 537
535 return retval; 538 return retval;
536} 539}
537 540
538#if 0 541#if 0
539 542
540int hpsb_lock(struct hpsb_host *host, nodeid_t node, unsigned int generation, 543int hpsb_lock(struct hpsb_host *host, nodeid_t node, unsigned int generation,
541 u64 addr, int extcode, quadlet_t *data, quadlet_t arg) 544 u64 addr, int extcode, quadlet_t * data, quadlet_t arg)
542{ 545{
543 struct hpsb_packet *packet; 546 struct hpsb_packet *packet;
544 int retval = 0; 547 int retval = 0;
545 548
546 BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet 549 BUG_ON(in_interrupt()); // We can't be called in an interrupt, yet
547 550
548 packet = hpsb_make_lockpacket(host, node, addr, extcode, data, arg); 551 packet = hpsb_make_lockpacket(host, node, addr, extcode, data, arg);
549 if (!packet) 552 if (!packet)
550 return -ENOMEM; 553 return -ENOMEM;
551 554
552 packet->generation = generation; 555 packet->generation = generation;
553 retval = hpsb_send_packet_and_wait(packet); 556 retval = hpsb_send_packet_and_wait(packet);
554 if (retval < 0) 557 if (retval < 0)
555 goto hpsb_lock_fail; 558 goto hpsb_lock_fail;
556 559
557 retval = hpsb_packet_success(packet); 560 retval = hpsb_packet_success(packet);
558 561
559 if (retval == 0) { 562 if (retval == 0) {
560 *data = packet->data[0]; 563 *data = packet->data[0];
561 } 564 }
562 565
563hpsb_lock_fail: 566 hpsb_lock_fail:
564 hpsb_free_tlabel(packet); 567 hpsb_free_tlabel(packet);
565 hpsb_free_packet(packet); 568 hpsb_free_packet(packet);
566 569
567 return retval; 570 return retval;
568} 571}
569 572
570
571int hpsb_send_gasp(struct hpsb_host *host, int channel, unsigned int generation, 573int hpsb_send_gasp(struct hpsb_host *host, int channel, unsigned int generation,
572 quadlet_t *buffer, size_t length, u32 specifier_id, 574 quadlet_t * buffer, size_t length, u32 specifier_id,
573 unsigned int version) 575 unsigned int version)
574{ 576{
575 struct hpsb_packet *packet; 577 struct hpsb_packet *packet;
@@ -586,7 +588,8 @@ int hpsb_send_gasp(struct hpsb_host *host, int channel, unsigned int generation,
586 return -ENOMEM; 588 return -ENOMEM;
587 589
588 packet->data[0] = cpu_to_be32((host->node_id << 16) | specifier_id_hi); 590 packet->data[0] = cpu_to_be32((host->node_id << 16) | specifier_id_hi);
589 packet->data[1] = cpu_to_be32((specifier_id_lo << 24) | (version & 0x00ffffff)); 591 packet->data[1] =
592 cpu_to_be32((specifier_id_lo << 24) | (version & 0x00ffffff));
590 593
591 memcpy(&(packet->data[2]), buffer, length - 8); 594 memcpy(&(packet->data[2]), buffer, length - 8);
592 595
@@ -601,4 +604,4 @@ int hpsb_send_gasp(struct hpsb_host *host, int channel, unsigned int generation,
601 return retval; 604 return retval;
602} 605}
603 606
604#endif /* 0 */ 607#endif /* 0 */
diff --git a/drivers/ieee1394/iso.c b/drivers/ieee1394/iso.c
index 615541b8b90f..f26680ebef7c 100644
--- a/drivers/ieee1394/iso.c
+++ b/drivers/ieee1394/iso.c
@@ -36,20 +36,22 @@ void hpsb_iso_shutdown(struct hpsb_iso *iso)
36 kfree(iso); 36 kfree(iso);
37} 37}
38 38
39static struct hpsb_iso* hpsb_iso_common_init(struct hpsb_host *host, enum hpsb_iso_type type, 39static struct hpsb_iso *hpsb_iso_common_init(struct hpsb_host *host,
40 enum hpsb_iso_type type,
40 unsigned int data_buf_size, 41 unsigned int data_buf_size,
41 unsigned int buf_packets, 42 unsigned int buf_packets,
42 int channel, 43 int channel, int dma_mode,
43 int dma_mode,
44 int irq_interval, 44 int irq_interval,
45 void (*callback)(struct hpsb_iso*)) 45 void (*callback) (struct hpsb_iso
46 *))
46{ 47{
47 struct hpsb_iso *iso; 48 struct hpsb_iso *iso;
48 int dma_direction; 49 int dma_direction;
49 50
50 /* make sure driver supports the ISO API */ 51 /* make sure driver supports the ISO API */
51 if (!host->driver->isoctl) { 52 if (!host->driver->isoctl) {
52 printk(KERN_INFO "ieee1394: host driver '%s' does not support the rawiso API\n", 53 printk(KERN_INFO
54 "ieee1394: host driver '%s' does not support the rawiso API\n",
53 host->driver->name); 55 host->driver->name);
54 return NULL; 56 return NULL;
55 } 57 }
@@ -59,12 +61,13 @@ static struct hpsb_iso* hpsb_iso_common_init(struct hpsb_host *host, enum hpsb_i
59 if (buf_packets < 2) 61 if (buf_packets < 2)
60 buf_packets = 2; 62 buf_packets = 2;
61 63
62 if ((dma_mode < HPSB_ISO_DMA_DEFAULT) || (dma_mode > HPSB_ISO_DMA_PACKET_PER_BUFFER)) 64 if ((dma_mode < HPSB_ISO_DMA_DEFAULT)
63 dma_mode=HPSB_ISO_DMA_DEFAULT; 65 || (dma_mode > HPSB_ISO_DMA_PACKET_PER_BUFFER))
66 dma_mode = HPSB_ISO_DMA_DEFAULT;
64 67
65 if ((irq_interval < 0) || (irq_interval > buf_packets / 4)) 68 if ((irq_interval < 0) || (irq_interval > buf_packets / 4))
66 irq_interval = buf_packets / 4; 69 irq_interval = buf_packets / 4;
67 if (irq_interval == 0) /* really interrupt for each packet*/ 70 if (irq_interval == 0) /* really interrupt for each packet */
68 irq_interval = 1; 71 irq_interval = 1;
69 72
70 if (channel < -1 || channel >= 64) 73 if (channel < -1 || channel >= 64)
@@ -76,7 +79,10 @@ static struct hpsb_iso* hpsb_iso_common_init(struct hpsb_host *host, enum hpsb_i
76 79
77 /* allocate and write the struct hpsb_iso */ 80 /* allocate and write the struct hpsb_iso */
78 81
79 iso = kmalloc(sizeof(*iso) + buf_packets * sizeof(struct hpsb_iso_packet_info), GFP_KERNEL); 82 iso =
83 kmalloc(sizeof(*iso) +
84 buf_packets * sizeof(struct hpsb_iso_packet_info),
85 GFP_KERNEL);
80 if (!iso) 86 if (!iso)
81 return NULL; 87 return NULL;
82 88
@@ -111,17 +117,18 @@ static struct hpsb_iso* hpsb_iso_common_init(struct hpsb_host *host, enum hpsb_i
111 iso->prebuffer = 0; 117 iso->prebuffer = 0;
112 118
113 /* allocate the packet buffer */ 119 /* allocate the packet buffer */
114 if (dma_region_alloc(&iso->data_buf, iso->buf_size, host->pdev, dma_direction)) 120 if (dma_region_alloc
121 (&iso->data_buf, iso->buf_size, host->pdev, dma_direction))
115 goto err; 122 goto err;
116 123
117 return iso; 124 return iso;
118 125
119err: 126 err:
120 hpsb_iso_shutdown(iso); 127 hpsb_iso_shutdown(iso);
121 return NULL; 128 return NULL;
122} 129}
123 130
124int hpsb_iso_n_ready(struct hpsb_iso* iso) 131int hpsb_iso_n_ready(struct hpsb_iso *iso)
125{ 132{
126 unsigned long flags; 133 unsigned long flags;
127 int val; 134 int val;
@@ -133,18 +140,19 @@ int hpsb_iso_n_ready(struct hpsb_iso* iso)
133 return val; 140 return val;
134} 141}
135 142
136 143struct hpsb_iso *hpsb_iso_xmit_init(struct hpsb_host *host,
137struct hpsb_iso* hpsb_iso_xmit_init(struct hpsb_host *host,
138 unsigned int data_buf_size, 144 unsigned int data_buf_size,
139 unsigned int buf_packets, 145 unsigned int buf_packets,
140 int channel, 146 int channel,
141 int speed, 147 int speed,
142 int irq_interval, 148 int irq_interval,
143 void (*callback)(struct hpsb_iso*)) 149 void (*callback) (struct hpsb_iso *))
144{ 150{
145 struct hpsb_iso *iso = hpsb_iso_common_init(host, HPSB_ISO_XMIT, 151 struct hpsb_iso *iso = hpsb_iso_common_init(host, HPSB_ISO_XMIT,
146 data_buf_size, buf_packets, 152 data_buf_size, buf_packets,
147 channel, HPSB_ISO_DMA_DEFAULT, irq_interval, callback); 153 channel,
154 HPSB_ISO_DMA_DEFAULT,
155 irq_interval, callback);
148 if (!iso) 156 if (!iso)
149 return NULL; 157 return NULL;
150 158
@@ -157,22 +165,23 @@ struct hpsb_iso* hpsb_iso_xmit_init(struct hpsb_host *host,
157 iso->flags |= HPSB_ISO_DRIVER_INIT; 165 iso->flags |= HPSB_ISO_DRIVER_INIT;
158 return iso; 166 return iso;
159 167
160err: 168 err:
161 hpsb_iso_shutdown(iso); 169 hpsb_iso_shutdown(iso);
162 return NULL; 170 return NULL;
163} 171}
164 172
165struct hpsb_iso* hpsb_iso_recv_init(struct hpsb_host *host, 173struct hpsb_iso *hpsb_iso_recv_init(struct hpsb_host *host,
166 unsigned int data_buf_size, 174 unsigned int data_buf_size,
167 unsigned int buf_packets, 175 unsigned int buf_packets,
168 int channel, 176 int channel,
169 int dma_mode, 177 int dma_mode,
170 int irq_interval, 178 int irq_interval,
171 void (*callback)(struct hpsb_iso*)) 179 void (*callback) (struct hpsb_iso *))
172{ 180{
173 struct hpsb_iso *iso = hpsb_iso_common_init(host, HPSB_ISO_RECV, 181 struct hpsb_iso *iso = hpsb_iso_common_init(host, HPSB_ISO_RECV,
174 data_buf_size, buf_packets, 182 data_buf_size, buf_packets,
175 channel, dma_mode, irq_interval, callback); 183 channel, dma_mode,
184 irq_interval, callback);
176 if (!iso) 185 if (!iso)
177 return NULL; 186 return NULL;
178 187
@@ -183,7 +192,7 @@ struct hpsb_iso* hpsb_iso_recv_init(struct hpsb_host *host,
183 iso->flags |= HPSB_ISO_DRIVER_INIT; 192 iso->flags |= HPSB_ISO_DRIVER_INIT;
184 return iso; 193 return iso;
185 194
186err: 195 err:
187 hpsb_iso_shutdown(iso); 196 hpsb_iso_shutdown(iso);
188 return NULL; 197 return NULL;
189} 198}
@@ -197,16 +206,17 @@ int hpsb_iso_recv_listen_channel(struct hpsb_iso *iso, unsigned char channel)
197 206
198int hpsb_iso_recv_unlisten_channel(struct hpsb_iso *iso, unsigned char channel) 207int hpsb_iso_recv_unlisten_channel(struct hpsb_iso *iso, unsigned char channel)
199{ 208{
200 if (iso->type != HPSB_ISO_RECV || iso->channel != -1 || channel >= 64) 209 if (iso->type != HPSB_ISO_RECV || iso->channel != -1 || channel >= 64)
201 return -EINVAL; 210 return -EINVAL;
202 return iso->host->driver->isoctl(iso, RECV_UNLISTEN_CHANNEL, channel); 211 return iso->host->driver->isoctl(iso, RECV_UNLISTEN_CHANNEL, channel);
203} 212}
204 213
205int hpsb_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask) 214int hpsb_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
206{ 215{
207 if (iso->type != HPSB_ISO_RECV || iso->channel != -1) 216 if (iso->type != HPSB_ISO_RECV || iso->channel != -1)
208 return -EINVAL; 217 return -EINVAL;
209 return iso->host->driver->isoctl(iso, RECV_SET_CHANNEL_MASK, (unsigned long) &mask); 218 return iso->host->driver->isoctl(iso, RECV_SET_CHANNEL_MASK,
219 (unsigned long)&mask);
210} 220}
211 221
212int hpsb_iso_recv_flush(struct hpsb_iso *iso) 222int hpsb_iso_recv_flush(struct hpsb_iso *iso)
@@ -283,7 +293,9 @@ int hpsb_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
283 293
284 isoctl_args[2] = sync; 294 isoctl_args[2] = sync;
285 295
286 retval = iso->host->driver->isoctl(iso, RECV_START, (unsigned long) &isoctl_args[0]); 296 retval =
297 iso->host->driver->isoctl(iso, RECV_START,
298 (unsigned long)&isoctl_args[0]);
287 if (retval) 299 if (retval)
288 return retval; 300 return retval;
289 301
@@ -296,7 +308,8 @@ int hpsb_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
296 308
297static int hpsb_iso_check_offset_len(struct hpsb_iso *iso, 309static int hpsb_iso_check_offset_len(struct hpsb_iso *iso,
298 unsigned int offset, unsigned short len, 310 unsigned int offset, unsigned short len,
299 unsigned int *out_offset, unsigned short *out_len) 311 unsigned int *out_offset,
312 unsigned short *out_len)
300{ 313{
301 if (offset >= iso->buf_size) 314 if (offset >= iso->buf_size)
302 return -EFAULT; 315 return -EFAULT;
@@ -316,8 +329,8 @@ static int hpsb_iso_check_offset_len(struct hpsb_iso *iso,
316 return 0; 329 return 0;
317} 330}
318 331
319 332int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len,
320int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len, u8 tag, u8 sy) 333 u8 tag, u8 sy)
321{ 334{
322 struct hpsb_iso_packet_info *info; 335 struct hpsb_iso_packet_info *info;
323 unsigned long flags; 336 unsigned long flags;
@@ -334,7 +347,8 @@ int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len, u8 tag
334 info = &iso->infos[iso->first_packet]; 347 info = &iso->infos[iso->first_packet];
335 348
336 /* check for bogus offset/length */ 349 /* check for bogus offset/length */
337 if (hpsb_iso_check_offset_len(iso, offset, len, &info->offset, &info->len)) 350 if (hpsb_iso_check_offset_len
351 (iso, offset, len, &info->offset, &info->len))
338 return -EFAULT; 352 return -EFAULT;
339 353
340 info->tag = tag; 354 info->tag = tag;
@@ -342,13 +356,13 @@ int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len, u8 tag
342 356
343 spin_lock_irqsave(&iso->lock, flags); 357 spin_lock_irqsave(&iso->lock, flags);
344 358
345 rv = iso->host->driver->isoctl(iso, XMIT_QUEUE, (unsigned long) info); 359 rv = iso->host->driver->isoctl(iso, XMIT_QUEUE, (unsigned long)info);
346 if (rv) 360 if (rv)
347 goto out; 361 goto out;
348 362
349 /* increment cursors */ 363 /* increment cursors */
350 iso->first_packet = (iso->first_packet+1) % iso->buf_packets; 364 iso->first_packet = (iso->first_packet + 1) % iso->buf_packets;
351 iso->xmit_cycle = (iso->xmit_cycle+1) % 8000; 365 iso->xmit_cycle = (iso->xmit_cycle + 1) % 8000;
352 iso->n_ready_packets--; 366 iso->n_ready_packets--;
353 367
354 if (iso->prebuffer != 0) { 368 if (iso->prebuffer != 0) {
@@ -359,7 +373,7 @@ int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len, u8 tag
359 } 373 }
360 } 374 }
361 375
362out: 376 out:
363 spin_unlock_irqrestore(&iso->lock, flags); 377 spin_unlock_irqrestore(&iso->lock, flags);
364 return rv; 378 return rv;
365} 379}
@@ -369,7 +383,9 @@ int hpsb_iso_xmit_sync(struct hpsb_iso *iso)
369 if (iso->type != HPSB_ISO_XMIT) 383 if (iso->type != HPSB_ISO_XMIT)
370 return -EINVAL; 384 return -EINVAL;
371 385
372 return wait_event_interruptible(iso->waitq, hpsb_iso_n_ready(iso) == iso->buf_packets); 386 return wait_event_interruptible(iso->waitq,
387 hpsb_iso_n_ready(iso) ==
388 iso->buf_packets);
373} 389}
374 390
375void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error) 391void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error)
@@ -396,7 +412,8 @@ void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error)
396} 412}
397 413
398void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len, 414void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len,
399 u16 total_len, u16 cycle, u8 channel, u8 tag, u8 sy) 415 u16 total_len, u16 cycle, u8 channel, u8 tag,
416 u8 sy)
400{ 417{
401 unsigned long flags; 418 unsigned long flags;
402 spin_lock_irqsave(&iso->lock, flags); 419 spin_lock_irqsave(&iso->lock, flags);
@@ -416,7 +433,7 @@ void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len,
416 info->tag = tag; 433 info->tag = tag;
417 info->sy = sy; 434 info->sy = sy;
418 435
419 iso->pkt_dma = (iso->pkt_dma+1) % iso->buf_packets; 436 iso->pkt_dma = (iso->pkt_dma + 1) % iso->buf_packets;
420 iso->n_ready_packets++; 437 iso->n_ready_packets++;
421 } 438 }
422 439
@@ -435,20 +452,21 @@ int hpsb_iso_recv_release_packets(struct hpsb_iso *iso, unsigned int n_packets)
435 spin_lock_irqsave(&iso->lock, flags); 452 spin_lock_irqsave(&iso->lock, flags);
436 for (i = 0; i < n_packets; i++) { 453 for (i = 0; i < n_packets; i++) {
437 rv = iso->host->driver->isoctl(iso, RECV_RELEASE, 454 rv = iso->host->driver->isoctl(iso, RECV_RELEASE,
438 (unsigned long) &iso->infos[iso->first_packet]); 455 (unsigned long)&iso->infos[iso->
456 first_packet]);
439 if (rv) 457 if (rv)
440 break; 458 break;
441 459
442 iso->first_packet = (iso->first_packet+1) % iso->buf_packets; 460 iso->first_packet = (iso->first_packet + 1) % iso->buf_packets;
443 iso->n_ready_packets--; 461 iso->n_ready_packets--;
444 462
445 /* release memory from packets discarded when queue was full */ 463 /* release memory from packets discarded when queue was full */
446 if (iso->n_ready_packets == 0) { /* Release only after all prior packets handled */ 464 if (iso->n_ready_packets == 0) { /* Release only after all prior packets handled */
447 if (iso->bytes_discarded != 0) { 465 if (iso->bytes_discarded != 0) {
448 struct hpsb_iso_packet_info inf; 466 struct hpsb_iso_packet_info inf;
449 inf.total_len = iso->bytes_discarded; 467 inf.total_len = iso->bytes_discarded;
450 iso->host->driver->isoctl(iso, RECV_RELEASE, 468 iso->host->driver->isoctl(iso, RECV_RELEASE,
451 (unsigned long) &inf); 469 (unsigned long)&inf);
452 iso->bytes_discarded = 0; 470 iso->bytes_discarded = 0;
453 } 471 }
454 } 472 }
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index f2453668acf5..082c7fd239f5 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -743,21 +743,20 @@ static struct node_entry *nodemgr_create_node(octlet_t guid, struct csr1212_csr
743 unsigned int generation) 743 unsigned int generation)
744{ 744{
745 struct hpsb_host *host = hi->host; 745 struct hpsb_host *host = hi->host;
746 struct node_entry *ne; 746 struct node_entry *ne;
747
748 ne = kmalloc(sizeof(struct node_entry), GFP_KERNEL);
749 if (!ne) return NULL;
750 747
751 memset(ne, 0, sizeof(struct node_entry)); 748 ne = kzalloc(sizeof(*ne), GFP_KERNEL);
749 if (!ne)
750 return NULL;
752 751
753 ne->tpool = &host->tpool[nodeid & NODE_MASK]; 752 ne->tpool = &host->tpool[nodeid & NODE_MASK];
754 753
755 ne->host = host; 754 ne->host = host;
756 ne->nodeid = nodeid; 755 ne->nodeid = nodeid;
757 ne->generation = generation; 756 ne->generation = generation;
758 ne->needs_probe = 1; 757 ne->needs_probe = 1;
759 758
760 ne->guid = guid; 759 ne->guid = guid;
761 ne->guid_vendor_id = (guid >> 40) & 0xffffff; 760 ne->guid_vendor_id = (guid >> 40) & 0xffffff;
762 ne->guid_vendor_oui = nodemgr_find_oui_name(ne->guid_vendor_id); 761 ne->guid_vendor_oui = nodemgr_find_oui_name(ne->guid_vendor_id);
763 ne->csr = csr; 762 ne->csr = csr;
@@ -787,7 +786,7 @@ static struct node_entry *nodemgr_create_node(octlet_t guid, struct csr1212_csr
787 (host->node_id == nodeid) ? "Host" : "Node", 786 (host->node_id == nodeid) ? "Host" : "Node",
788 NODE_BUS_ARGS(host, nodeid), (unsigned long long)guid); 787 NODE_BUS_ARGS(host, nodeid), (unsigned long long)guid);
789 788
790 return ne; 789 return ne;
791} 790}
792 791
793 792
@@ -872,12 +871,10 @@ static struct unit_directory *nodemgr_process_unit_directory
872 struct csr1212_keyval *kv; 871 struct csr1212_keyval *kv;
873 u8 last_key_id = 0; 872 u8 last_key_id = 0;
874 873
875 ud = kmalloc(sizeof(struct unit_directory), GFP_KERNEL); 874 ud = kzalloc(sizeof(*ud), GFP_KERNEL);
876 if (!ud) 875 if (!ud)
877 goto unit_directory_error; 876 goto unit_directory_error;
878 877
879 memset (ud, 0, sizeof(struct unit_directory));
880
881 ud->ne = ne; 878 ud->ne = ne;
882 ud->ignore_driver = ignore_drivers; 879 ud->ignore_driver = ignore_drivers;
883 ud->address = ud_kv->offset + CSR1212_CONFIG_ROM_SPACE_BASE; 880 ud->address = ud_kv->offset + CSR1212_CONFIG_ROM_SPACE_BASE;
@@ -937,10 +934,10 @@ static struct unit_directory *nodemgr_process_unit_directory
937 /* Logical Unit Number */ 934 /* Logical Unit Number */
938 if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE) { 935 if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE) {
939 if (ud->flags & UNIT_DIRECTORY_HAS_LUN) { 936 if (ud->flags & UNIT_DIRECTORY_HAS_LUN) {
940 ud_child = kmalloc(sizeof(struct unit_directory), GFP_KERNEL); 937 ud_child = kmalloc(sizeof(*ud_child), GFP_KERNEL);
941 if (!ud_child) 938 if (!ud_child)
942 goto unit_directory_error; 939 goto unit_directory_error;
943 memcpy(ud_child, ud, sizeof(struct unit_directory)); 940 memcpy(ud_child, ud, sizeof(*ud_child));
944 nodemgr_register_device(ne, ud_child, &ne->device); 941 nodemgr_register_device(ne, ud_child, &ne->device);
945 ud_child = NULL; 942 ud_child = NULL;
946 943
@@ -1200,7 +1197,7 @@ static void nodemgr_node_scan_one(struct host_info *hi,
1200 struct csr1212_csr *csr; 1197 struct csr1212_csr *csr;
1201 struct nodemgr_csr_info *ci; 1198 struct nodemgr_csr_info *ci;
1202 1199
1203 ci = kmalloc(sizeof(struct nodemgr_csr_info), GFP_KERNEL); 1200 ci = kmalloc(sizeof(*ci), GFP_KERNEL);
1204 if (!ci) 1201 if (!ci)
1205 return; 1202 return;
1206 1203
@@ -1410,14 +1407,28 @@ static void nodemgr_node_probe(struct host_info *hi, int generation)
1410 struct hpsb_host *host = hi->host; 1407 struct hpsb_host *host = hi->host;
1411 struct class *class = &nodemgr_ne_class; 1408 struct class *class = &nodemgr_ne_class;
1412 struct class_device *cdev; 1409 struct class_device *cdev;
1410 struct node_entry *ne;
1413 1411
1414 /* Do some processing of the nodes we've probed. This pulls them 1412 /* Do some processing of the nodes we've probed. This pulls them
1415 * into the sysfs layer if needed, and can result in processing of 1413 * into the sysfs layer if needed, and can result in processing of
1416 * unit-directories, or just updating the node and it's 1414 * unit-directories, or just updating the node and it's
1417 * unit-directories. */ 1415 * unit-directories.
1416 *
1417 * Run updates before probes. Usually, updates are time-critical
1418 * while probes are time-consuming. (Well, those probes need some
1419 * improvement...) */
1420
1418 down_read(&class->subsys.rwsem); 1421 down_read(&class->subsys.rwsem);
1419 list_for_each_entry(cdev, &class->children, node) 1422 list_for_each_entry(cdev, &class->children, node) {
1420 nodemgr_probe_ne(hi, container_of(cdev, struct node_entry, class_dev), generation); 1423 ne = container_of(cdev, struct node_entry, class_dev);
1424 if (!ne->needs_probe)
1425 nodemgr_probe_ne(hi, ne, generation);
1426 }
1427 list_for_each_entry(cdev, &class->children, node) {
1428 ne = container_of(cdev, struct node_entry, class_dev);
1429 if (ne->needs_probe)
1430 nodemgr_probe_ne(hi, ne, generation);
1431 }
1421 up_read(&class->subsys.rwsem); 1432 up_read(&class->subsys.rwsem);
1422 1433
1423 1434
@@ -1448,7 +1459,8 @@ static int nodemgr_send_resume_packet(struct hpsb_host *host)
1448 int ret = 1; 1459 int ret = 1;
1449 1460
1450 packet = hpsb_make_phypacket(host, 1461 packet = hpsb_make_phypacket(host,
1451 0x003c0000 | NODEID_TO_NODE(host->node_id) << 24); 1462 EXTPHYPACKET_TYPE_RESUME |
1463 NODEID_TO_NODE(host->node_id) << PHYPACKET_PORT_SHIFT);
1452 if (packet) { 1464 if (packet) {
1453 packet->no_waiter = 1; 1465 packet->no_waiter = 1;
1454 packet->generation = get_hpsb_generation(host); 1466 packet->generation = get_hpsb_generation(host);
diff --git a/drivers/ieee1394/nodemgr.h b/drivers/ieee1394/nodemgr.h
index 3a2f0c02fd08..0b26616e16c3 100644
--- a/drivers/ieee1394/nodemgr.h
+++ b/drivers/ieee1394/nodemgr.h
@@ -151,24 +151,6 @@ static inline int hpsb_node_entry_valid(struct node_entry *ne)
151} 151}
152 152
153/* 153/*
154 * Returns a node entry (which has its reference count incremented) or NULL if
155 * the GUID in question is not known. Getting a valid entry does not mean that
156 * the node with this GUID is currently accessible (might be powered down).
157 */
158struct node_entry *hpsb_guid_get_entry(u64 guid);
159
160/* Same as above, but use the nodeid to get an node entry. This is not
161 * fool-proof by itself, since the nodeid can change. */
162struct node_entry *hpsb_nodeid_get_entry(struct hpsb_host *host, nodeid_t nodeid);
163
164/*
165 * If the entry refers to a local host, this function will return the pointer
166 * to the hpsb_host structure. It will return NULL otherwise. Once you have
167 * established it is a local host, you can use that knowledge from then on (the
168 * GUID won't wander to an external node). */
169struct hpsb_host *hpsb_get_host_by_ne(struct node_entry *ne);
170
171/*
172 * This will fill in the given, pre-initialised hpsb_packet with the current 154 * This will fill in the given, pre-initialised hpsb_packet with the current
173 * information from the node entry (host, node ID, generation number). It will 155 * information from the node entry (host, node ID, generation number). It will
174 * return false if the node owning the GUID is not accessible (and not modify the 156 * return false if the node owning the GUID is not accessible (and not modify the
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index 4cf9b8f3e336..b6b96fa04d62 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -161,9 +161,6 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
161#define PRINT(level, fmt, args...) \ 161#define PRINT(level, fmt, args...) \
162printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args) 162printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
163 163
164static char version[] __devinitdata =
165 "$Rev: 1313 $ Ben Collins <bcollins@debian.org>";
166
167/* Module Parameters */ 164/* Module Parameters */
168static int phys_dma = 1; 165static int phys_dma = 1;
169module_param(phys_dma, int, 0644); 166module_param(phys_dma, int, 0644);
@@ -587,12 +584,13 @@ static void ohci_initialize(struct ti_ohci *ohci)
587 sprintf (irq_buf, "%s", __irq_itoa(ohci->dev->irq)); 584 sprintf (irq_buf, "%s", __irq_itoa(ohci->dev->irq));
588#endif 585#endif
589 PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%s] " 586 PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%s] "
590 "MMIO=[%lx-%lx] Max Packet=[%d]", 587 "MMIO=[%lx-%lx] Max Packet=[%d] IR/IT contexts=[%d/%d]",
591 ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10), 588 ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
592 ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), irq_buf, 589 ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), irq_buf,
593 pci_resource_start(ohci->dev, 0), 590 pci_resource_start(ohci->dev, 0),
594 pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1, 591 pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
595 ohci->max_packet_size); 592 ohci->max_packet_size,
593 ohci->nb_iso_rcv_ctx, ohci->nb_iso_xmit_ctx);
596 594
597 /* Check all of our ports to make sure that if anything is 595 /* Check all of our ports to make sure that if anything is
598 * connected, we enable that port. */ 596 * connected, we enable that port. */
@@ -2960,28 +2958,23 @@ alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2960 d->ctrlClear = 0; 2958 d->ctrlClear = 0;
2961 d->cmdPtr = 0; 2959 d->cmdPtr = 0;
2962 2960
2963 d->buf_cpu = kmalloc(d->num_desc * sizeof(quadlet_t*), GFP_ATOMIC); 2961 d->buf_cpu = kzalloc(d->num_desc * sizeof(*d->buf_cpu), GFP_ATOMIC);
2964 d->buf_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_ATOMIC); 2962 d->buf_bus = kzalloc(d->num_desc * sizeof(*d->buf_bus), GFP_ATOMIC);
2965 2963
2966 if (d->buf_cpu == NULL || d->buf_bus == NULL) { 2964 if (d->buf_cpu == NULL || d->buf_bus == NULL) {
2967 PRINT(KERN_ERR, "Failed to allocate dma buffer"); 2965 PRINT(KERN_ERR, "Failed to allocate dma buffer");
2968 free_dma_rcv_ctx(d); 2966 free_dma_rcv_ctx(d);
2969 return -ENOMEM; 2967 return -ENOMEM;
2970 } 2968 }
2971 memset(d->buf_cpu, 0, d->num_desc * sizeof(quadlet_t*));
2972 memset(d->buf_bus, 0, d->num_desc * sizeof(dma_addr_t));
2973 2969
2974 d->prg_cpu = kmalloc(d->num_desc * sizeof(struct dma_cmd*), 2970 d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_ATOMIC);
2975 GFP_ATOMIC); 2971 d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_ATOMIC);
2976 d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_ATOMIC);
2977 2972
2978 if (d->prg_cpu == NULL || d->prg_bus == NULL) { 2973 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2979 PRINT(KERN_ERR, "Failed to allocate dma prg"); 2974 PRINT(KERN_ERR, "Failed to allocate dma prg");
2980 free_dma_rcv_ctx(d); 2975 free_dma_rcv_ctx(d);
2981 return -ENOMEM; 2976 return -ENOMEM;
2982 } 2977 }
2983 memset(d->prg_cpu, 0, d->num_desc * sizeof(struct dma_cmd*));
2984 memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
2985 2978
2986 d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC); 2979 d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC);
2987 2980
@@ -3093,17 +3086,14 @@ alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
3093 d->ctrlClear = 0; 3086 d->ctrlClear = 0;
3094 d->cmdPtr = 0; 3087 d->cmdPtr = 0;
3095 3088
3096 d->prg_cpu = kmalloc(d->num_desc * sizeof(struct at_dma_prg*), 3089 d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_KERNEL);
3097 GFP_KERNEL); 3090 d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_KERNEL);
3098 d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL);
3099 3091
3100 if (d->prg_cpu == NULL || d->prg_bus == NULL) { 3092 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
3101 PRINT(KERN_ERR, "Failed to allocate at dma prg"); 3093 PRINT(KERN_ERR, "Failed to allocate at dma prg");
3102 free_dma_trm_ctx(d); 3094 free_dma_trm_ctx(d);
3103 return -ENOMEM; 3095 return -ENOMEM;
3104 } 3096 }
3105 memset(d->prg_cpu, 0, d->num_desc * sizeof(struct at_dma_prg*));
3106 memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
3107 3097
3108 len = sprintf(pool_name, "ohci1394_trm_prg"); 3098 len = sprintf(pool_name, "ohci1394_trm_prg");
3109 sprintf(pool_name+len, "%d", num_allocs); 3099 sprintf(pool_name+len, "%d", num_allocs);
@@ -3201,8 +3191,6 @@ static struct hpsb_host_driver ohci1394_driver = {
3201 .hw_csr_reg = ohci_hw_csr_reg, 3191 .hw_csr_reg = ohci_hw_csr_reg,
3202}; 3192};
3203 3193
3204
3205
3206/*********************************** 3194/***********************************
3207 * PCI Driver Interface functions * 3195 * PCI Driver Interface functions *
3208 ***********************************/ 3196 ***********************************/
@@ -3217,15 +3205,10 @@ do { \
3217static int __devinit ohci1394_pci_probe(struct pci_dev *dev, 3205static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3218 const struct pci_device_id *ent) 3206 const struct pci_device_id *ent)
3219{ 3207{
3220 static int version_printed = 0;
3221
3222 struct hpsb_host *host; 3208 struct hpsb_host *host;
3223 struct ti_ohci *ohci; /* shortcut to currently handled device */ 3209 struct ti_ohci *ohci; /* shortcut to currently handled device */
3224 unsigned long ohci_base; 3210 unsigned long ohci_base;
3225 3211
3226 if (version_printed++ == 0)
3227 PRINT_G(KERN_INFO, "%s", version);
3228
3229 if (pci_enable_device(dev)) 3212 if (pci_enable_device(dev))
3230 FAIL(-ENXIO, "Failed to enable OHCI hardware"); 3213 FAIL(-ENXIO, "Failed to enable OHCI hardware");
3231 pci_set_master(dev); 3214 pci_set_master(dev);
@@ -3369,13 +3352,8 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3369 /* Determine the number of available IR and IT contexts. */ 3352 /* Determine the number of available IR and IT contexts. */
3370 ohci->nb_iso_rcv_ctx = 3353 ohci->nb_iso_rcv_ctx =
3371 get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet); 3354 get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
3372 DBGMSG("%d iso receive contexts available",
3373 ohci->nb_iso_rcv_ctx);
3374
3375 ohci->nb_iso_xmit_ctx = 3355 ohci->nb_iso_xmit_ctx =
3376 get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet); 3356 get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
3377 DBGMSG("%d iso transmit contexts available",
3378 ohci->nb_iso_xmit_ctx);
3379 3357
3380 /* Set the usage bits for non-existent contexts so they can't 3358 /* Set the usage bits for non-existent contexts so they can't
3381 * be allocated */ 3359 * be allocated */
@@ -3606,8 +3584,6 @@ static struct pci_driver ohci1394_pci_driver = {
3606 .suspend = ohci1394_pci_suspend, 3584 .suspend = ohci1394_pci_suspend,
3607}; 3585};
3608 3586
3609
3610
3611/*********************************** 3587/***********************************
3612 * OHCI1394 Video Interface * 3588 * OHCI1394 Video Interface *
3613 ***********************************/ 3589 ***********************************/
@@ -3714,7 +3690,6 @@ EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
3714EXPORT_SYMBOL(ohci1394_register_iso_tasklet); 3690EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
3715EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet); 3691EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
3716 3692
3717
3718/*********************************** 3693/***********************************
3719 * General module initialization * 3694 * General module initialization *
3720 ***********************************/ 3695 ***********************************/
diff --git a/drivers/ieee1394/ohci1394.h b/drivers/ieee1394/ohci1394.h
index cc66c1cae250..7df0962144e3 100644
--- a/drivers/ieee1394/ohci1394.h
+++ b/drivers/ieee1394/ohci1394.h
@@ -219,8 +219,8 @@ struct ti_ohci {
219 219
220 int self_id_errors; 220 int self_id_errors;
221 221
222 /* Tasklets for iso receive and transmit, used by video1394, 222 /* Tasklets for iso receive and transmit, used by video1394
223 * amdtp and dv1394 */ 223 * and dv1394 */
224 224
225 struct list_head iso_tasklet_list; 225 struct list_head iso_tasklet_list;
226 spinlock_t iso_tasklet_list_lock; 226 spinlock_t iso_tasklet_list_lock;
diff --git a/drivers/ieee1394/pcilynx.c b/drivers/ieee1394/pcilynx.c
index 6b1ab875333b..e2edc41e1b6f 100644
--- a/drivers/ieee1394/pcilynx.c
+++ b/drivers/ieee1394/pcilynx.c
@@ -1435,7 +1435,7 @@ static int __devinit add_card(struct pci_dev *dev,
1435 struct i2c_algo_bit_data i2c_adapter_data; 1435 struct i2c_algo_bit_data i2c_adapter_data;
1436 1436
1437 error = -ENOMEM; 1437 error = -ENOMEM;
1438 i2c_ad = kmalloc(sizeof(struct i2c_adapter), SLAB_KERNEL); 1438 i2c_ad = kmalloc(sizeof(*i2c_ad), SLAB_KERNEL);
1439 if (!i2c_ad) FAIL("failed to allocate I2C adapter memory"); 1439 if (!i2c_ad) FAIL("failed to allocate I2C adapter memory");
1440 1440
1441 memcpy(i2c_ad, &bit_ops, sizeof(struct i2c_adapter)); 1441 memcpy(i2c_ad, &bit_ops, sizeof(struct i2c_adapter));
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index 24411e666b21..b05235639918 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -102,12 +102,9 @@ static struct pending_request *__alloc_pending_request(gfp_t flags)
102{ 102{
103 struct pending_request *req; 103 struct pending_request *req;
104 104
105 req = (struct pending_request *)kmalloc(sizeof(struct pending_request), 105 req = kzalloc(sizeof(*req), flags);
106 flags); 106 if (req)
107 if (req != NULL) {
108 memset(req, 0, sizeof(struct pending_request));
109 INIT_LIST_HEAD(&req->list); 107 INIT_LIST_HEAD(&req->list);
110 }
111 108
112 return req; 109 return req;
113} 110}
@@ -192,9 +189,9 @@ static void add_host(struct hpsb_host *host)
192 struct host_info *hi; 189 struct host_info *hi;
193 unsigned long flags; 190 unsigned long flags;
194 191
195 hi = (struct host_info *)kmalloc(sizeof(struct host_info), GFP_KERNEL); 192 hi = kmalloc(sizeof(*hi), GFP_KERNEL);
196 193
197 if (hi != NULL) { 194 if (hi) {
198 INIT_LIST_HEAD(&hi->list); 195 INIT_LIST_HEAD(&hi->list);
199 hi->host = host; 196 hi->host = host;
200 INIT_LIST_HEAD(&hi->file_info_list); 197 INIT_LIST_HEAD(&hi->file_info_list);
@@ -315,8 +312,8 @@ static void iso_receive(struct hpsb_host *host, int channel, quadlet_t * data,
315 break; 312 break;
316 313
317 if (!ibs) { 314 if (!ibs) {
318 ibs = kmalloc(sizeof(struct iso_block_store) 315 ibs = kmalloc(sizeof(*ibs) + length,
319 + length, SLAB_ATOMIC); 316 SLAB_ATOMIC);
320 if (!ibs) { 317 if (!ibs) {
321 kfree(req); 318 kfree(req);
322 break; 319 break;
@@ -376,8 +373,8 @@ static void fcp_request(struct hpsb_host *host, int nodeid, int direction,
376 break; 373 break;
377 374
378 if (!ibs) { 375 if (!ibs) {
379 ibs = kmalloc(sizeof(struct iso_block_store) 376 ibs = kmalloc(sizeof(*ibs) + length,
380 + length, SLAB_ATOMIC); 377 SLAB_ATOMIC);
381 if (!ibs) { 378 if (!ibs) {
382 kfree(req); 379 kfree(req);
383 break; 380 break;
@@ -502,10 +499,9 @@ static int state_initialized(struct file_info *fi, struct pending_request *req)
502 switch (req->req.type) { 499 switch (req->req.type) {
503 case RAW1394_REQ_LIST_CARDS: 500 case RAW1394_REQ_LIST_CARDS:
504 spin_lock_irqsave(&host_info_lock, flags); 501 spin_lock_irqsave(&host_info_lock, flags);
505 khl = kmalloc(sizeof(struct raw1394_khost_list) * host_count, 502 khl = kmalloc(sizeof(*khl) * host_count, SLAB_ATOMIC);
506 SLAB_ATOMIC);
507 503
508 if (khl != NULL) { 504 if (khl) {
509 req->req.misc = host_count; 505 req->req.misc = host_count;
510 req->data = (quadlet_t *) khl; 506 req->data = (quadlet_t *) khl;
511 507
@@ -517,7 +513,7 @@ static int state_initialized(struct file_info *fi, struct pending_request *req)
517 } 513 }
518 spin_unlock_irqrestore(&host_info_lock, flags); 514 spin_unlock_irqrestore(&host_info_lock, flags);
519 515
520 if (khl != NULL) { 516 if (khl) {
521 req->req.error = RAW1394_ERROR_NONE; 517 req->req.error = RAW1394_ERROR_NONE;
522 req->req.length = min(req->req.length, 518 req->req.length = min(req->req.length,
523 (u32) (sizeof 519 (u32) (sizeof
@@ -1647,13 +1643,13 @@ static int arm_register(struct file_info *fi, struct pending_request *req)
1647 return (-EINVAL); 1643 return (-EINVAL);
1648 } 1644 }
1649 /* addr-list-entry for fileinfo */ 1645 /* addr-list-entry for fileinfo */
1650 addr = (struct arm_addr *)kmalloc(sizeof(struct arm_addr), SLAB_KERNEL); 1646 addr = kmalloc(sizeof(*addr), SLAB_KERNEL);
1651 if (!addr) { 1647 if (!addr) {
1652 req->req.length = 0; 1648 req->req.length = 0;
1653 return (-ENOMEM); 1649 return (-ENOMEM);
1654 } 1650 }
1655 /* allocation of addr_space_buffer */ 1651 /* allocation of addr_space_buffer */
1656 addr->addr_space_buffer = (u8 *) vmalloc(req->req.length); 1652 addr->addr_space_buffer = vmalloc(req->req.length);
1657 if (!(addr->addr_space_buffer)) { 1653 if (!(addr->addr_space_buffer)) {
1658 kfree(addr); 1654 kfree(addr);
1659 req->req.length = 0; 1655 req->req.length = 0;
@@ -2122,8 +2118,7 @@ static int modify_config_rom(struct file_info *fi, struct pending_request *req)
2122 return -ENOMEM; 2118 return -ENOMEM;
2123 } 2119 }
2124 2120
2125 cache->filled_head = 2121 cache->filled_head = kmalloc(sizeof(*cache->filled_head), GFP_KERNEL);
2126 kmalloc(sizeof(struct csr1212_cache_region), GFP_KERNEL);
2127 if (!cache->filled_head) { 2122 if (!cache->filled_head) {
2128 csr1212_release_keyval(fi->csr1212_dirs[dr]); 2123 csr1212_release_keyval(fi->csr1212_dirs[dr]);
2129 fi->csr1212_dirs[dr] = NULL; 2124 fi->csr1212_dirs[dr] = NULL;
@@ -2136,7 +2131,6 @@ static int modify_config_rom(struct file_info *fi, struct pending_request *req)
2136 req->req.length)) { 2131 req->req.length)) {
2137 csr1212_release_keyval(fi->csr1212_dirs[dr]); 2132 csr1212_release_keyval(fi->csr1212_dirs[dr]);
2138 fi->csr1212_dirs[dr] = NULL; 2133 fi->csr1212_dirs[dr] = NULL;
2139 CSR1212_FREE(cache);
2140 ret = -EFAULT; 2134 ret = -EFAULT;
2141 } else { 2135 } else {
2142 cache->len = req->req.length; 2136 cache->len = req->req.length;
@@ -2172,7 +2166,7 @@ static int modify_config_rom(struct file_info *fi, struct pending_request *req)
2172 } 2166 }
2173 } 2167 }
2174 kfree(cache->filled_head); 2168 kfree(cache->filled_head);
2175 kfree(cache); 2169 CSR1212_FREE(cache);
2176 2170
2177 if (ret >= 0) { 2171 if (ret >= 0) {
2178 /* we have to free the request, because we queue no response, 2172 /* we have to free the request, because we queue no response,
@@ -2488,8 +2482,8 @@ static int raw1394_iso_recv_packets(struct file_info *fi, void __user * uaddr)
2488 2482
2489 /* ensure user-supplied buffer is accessible and big enough */ 2483 /* ensure user-supplied buffer is accessible and big enough */
2490 if (!access_ok(VERIFY_WRITE, upackets.infos, 2484 if (!access_ok(VERIFY_WRITE, upackets.infos,
2491 upackets.n_packets * 2485 upackets.n_packets *
2492 sizeof(struct raw1394_iso_packet_info))) 2486 sizeof(struct raw1394_iso_packet_info)))
2493 return -EFAULT; 2487 return -EFAULT;
2494 2488
2495 /* copy the packet_infos out */ 2489 /* copy the packet_infos out */
@@ -2522,8 +2516,8 @@ static int raw1394_iso_send_packets(struct file_info *fi, void __user * uaddr)
2522 2516
2523 /* ensure user-supplied buffer is accessible and big enough */ 2517 /* ensure user-supplied buffer is accessible and big enough */
2524 if (!access_ok(VERIFY_READ, upackets.infos, 2518 if (!access_ok(VERIFY_READ, upackets.infos,
2525 upackets.n_packets * 2519 upackets.n_packets *
2526 sizeof(struct raw1394_iso_packet_info))) 2520 sizeof(struct raw1394_iso_packet_info)))
2527 return -EFAULT; 2521 return -EFAULT;
2528 2522
2529 /* copy the infos structs in and queue the packets */ 2523 /* copy the infos structs in and queue the packets */
@@ -2684,11 +2678,10 @@ static int raw1394_open(struct inode *inode, struct file *file)
2684{ 2678{
2685 struct file_info *fi; 2679 struct file_info *fi;
2686 2680
2687 fi = kmalloc(sizeof(struct file_info), SLAB_KERNEL); 2681 fi = kzalloc(sizeof(*fi), SLAB_KERNEL);
2688 if (fi == NULL) 2682 if (!fi)
2689 return -ENOMEM; 2683 return -ENOMEM;
2690 2684
2691 memset(fi, 0, sizeof(struct file_info));
2692 fi->notification = (u8) RAW1394_NOTIFY_ON; /* busreset notification */ 2685 fi->notification = (u8) RAW1394_NOTIFY_ON; /* busreset notification */
2693 2686
2694 INIT_LIST_HEAD(&fi->list); 2687 INIT_LIST_HEAD(&fi->list);
@@ -2748,8 +2741,7 @@ static int raw1394_release(struct inode *inode, struct file *file)
2748 list) { 2741 list) {
2749 entry = fi_hlp->addr_list.next; 2742 entry = fi_hlp->addr_list.next;
2750 while (entry != &(fi_hlp->addr_list)) { 2743 while (entry != &(fi_hlp->addr_list)) {
2751 arm_addr = list_entry(entry, 2744 arm_addr = list_entry(entry, struct
2752 struct
2753 arm_addr, 2745 arm_addr,
2754 addr_list); 2746 addr_list);
2755 if (arm_addr->start == 2747 if (arm_addr->start ==
@@ -2912,16 +2904,17 @@ static int __init init_raw1394(void)
2912 2904
2913 hpsb_register_highlevel(&raw1394_highlevel); 2905 hpsb_register_highlevel(&raw1394_highlevel);
2914 2906
2915 if (IS_ERR(class_device_create(hpsb_protocol_class, NULL, MKDEV( 2907 if (IS_ERR
2916 IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16), 2908 (class_device_create
2917 NULL, RAW1394_DEVICE_NAME))) { 2909 (hpsb_protocol_class, NULL,
2910 MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16), NULL,
2911 RAW1394_DEVICE_NAME))) {
2918 ret = -EFAULT; 2912 ret = -EFAULT;
2919 goto out_unreg; 2913 goto out_unreg;
2920 } 2914 }
2921 2915
2922 devfs_mk_cdev(MKDEV( 2916 devfs_mk_cdev(MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16),
2923 IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16), 2917 S_IFCHR | S_IRUSR | S_IWUSR, RAW1394_DEVICE_NAME);
2924 S_IFCHR | S_IRUSR | S_IWUSR, RAW1394_DEVICE_NAME);
2925 2918
2926 cdev_init(&raw1394_cdev, &raw1394_fops); 2919 cdev_init(&raw1394_cdev, &raw1394_fops);
2927 raw1394_cdev.owner = THIS_MODULE; 2920 raw1394_cdev.owner = THIS_MODULE;
@@ -2943,20 +2936,22 @@ static int __init init_raw1394(void)
2943 2936
2944 goto out; 2937 goto out;
2945 2938
2946out_dev: 2939 out_dev:
2947 devfs_remove(RAW1394_DEVICE_NAME); 2940 devfs_remove(RAW1394_DEVICE_NAME);
2948 class_device_destroy(hpsb_protocol_class, 2941 class_device_destroy(hpsb_protocol_class,
2949 MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16)); 2942 MKDEV(IEEE1394_MAJOR,
2950out_unreg: 2943 IEEE1394_MINOR_BLOCK_RAW1394 * 16));
2944 out_unreg:
2951 hpsb_unregister_highlevel(&raw1394_highlevel); 2945 hpsb_unregister_highlevel(&raw1394_highlevel);
2952out: 2946 out:
2953 return ret; 2947 return ret;
2954} 2948}
2955 2949
2956static void __exit cleanup_raw1394(void) 2950static void __exit cleanup_raw1394(void)
2957{ 2951{
2958 class_device_destroy(hpsb_protocol_class, 2952 class_device_destroy(hpsb_protocol_class,
2959 MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16)); 2953 MKDEV(IEEE1394_MAJOR,
2954 IEEE1394_MINOR_BLOCK_RAW1394 * 16));
2960 cdev_del(&raw1394_cdev); 2955 cdev_del(&raw1394_cdev);
2961 devfs_remove(RAW1394_DEVICE_NAME); 2956 devfs_remove(RAW1394_DEVICE_NAME);
2962 hpsb_unregister_highlevel(&raw1394_highlevel); 2957 hpsb_unregister_highlevel(&raw1394_highlevel);
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index f7e18ccc5c0a..18d7eda38851 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -80,9 +80,6 @@
80#include "ieee1394_transactions.h" 80#include "ieee1394_transactions.h"
81#include "sbp2.h" 81#include "sbp2.h"
82 82
83static char version[] __devinitdata =
84 "$Rev: 1306 $ Ben Collins <bcollins@debian.org>";
85
86/* 83/*
87 * Module load parameter definitions 84 * Module load parameter definitions
88 */ 85 */
@@ -151,18 +148,15 @@ static int force_inquiry_hack;
151module_param(force_inquiry_hack, int, 0444); 148module_param(force_inquiry_hack, int, 0444);
152MODULE_PARM_DESC(force_inquiry_hack, "Force SCSI inquiry hack (default = 0)"); 149MODULE_PARM_DESC(force_inquiry_hack, "Force SCSI inquiry hack (default = 0)");
153 150
154
155/* 151/*
156 * Export information about protocols/devices supported by this driver. 152 * Export information about protocols/devices supported by this driver.
157 */ 153 */
158static struct ieee1394_device_id sbp2_id_table[] = { 154static struct ieee1394_device_id sbp2_id_table[] = {
159 { 155 {
160 .match_flags =IEEE1394_MATCH_SPECIFIER_ID | 156 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
161 IEEE1394_MATCH_VERSION, 157 .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff,
162 .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff, 158 .version = SBP2_SW_VERSION_ENTRY & 0xffffff},
163 .version = SBP2_SW_VERSION_ENTRY & 0xffffff 159 {}
164 },
165 { }
166}; 160};
167 161
168MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table); 162MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);
@@ -221,7 +215,6 @@ static u32 global_outstanding_dmas = 0;
221 215
222#define SBP2_ERR(fmt, args...) HPSB_ERR("sbp2: "fmt, ## args) 216#define SBP2_ERR(fmt, args...) HPSB_ERR("sbp2: "fmt, ## args)
223 217
224
225/* 218/*
226 * Globals 219 * Globals
227 */ 220 */
@@ -254,8 +247,8 @@ static struct hpsb_address_ops sbp2_ops = {
254 247
255#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA 248#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
256static struct hpsb_address_ops sbp2_physdma_ops = { 249static struct hpsb_address_ops sbp2_physdma_ops = {
257 .read = sbp2_handle_physdma_read, 250 .read = sbp2_handle_physdma_read,
258 .write = sbp2_handle_physdma_write, 251 .write = sbp2_handle_physdma_write,
259}; 252};
260#endif 253#endif
261 254
@@ -287,7 +280,6 @@ static u32 sbp2_broken_inquiry_list[] = {
287 * General utility functions 280 * General utility functions
288 **************************************/ 281 **************************************/
289 282
290
291#ifndef __BIG_ENDIAN 283#ifndef __BIG_ENDIAN
292/* 284/*
293 * Converts a buffer from be32 to cpu byte ordering. Length is in bytes. 285 * Converts a buffer from be32 to cpu byte ordering. Length is in bytes.
@@ -324,7 +316,8 @@ static __inline__ void sbp2util_cpu_to_be32_buffer(void *buffer, int length)
324/* 316/*
325 * Debug packet dump routine. Length is in bytes. 317 * Debug packet dump routine. Length is in bytes.
326 */ 318 */
327static void sbp2util_packet_dump(void *buffer, int length, char *dump_name, u32 dump_phys_addr) 319static void sbp2util_packet_dump(void *buffer, int length, char *dump_name,
320 u32 dump_phys_addr)
328{ 321{
329 int i; 322 int i;
330 unsigned char *dump = buffer; 323 unsigned char *dump = buffer;
@@ -345,7 +338,7 @@ static void sbp2util_packet_dump(void *buffer, int length, char *dump_name, u32
345 printk(" "); 338 printk(" ");
346 if ((i & 0xf) == 0) 339 if ((i & 0xf) == 0)
347 printk("\n "); 340 printk("\n ");
348 printk("%02x ", (int) dump[i]); 341 printk("%02x ", (int)dump[i]);
349 } 342 }
350 printk("\n"); 343 printk("\n");
351 344
@@ -364,9 +357,9 @@ static int sbp2util_down_timeout(atomic_t *done, int timeout)
364 357
365 for (i = timeout; (i > 0 && atomic_read(done) == 0); i-= HZ/10) { 358 for (i = timeout; (i > 0 && atomic_read(done) == 0); i-= HZ/10) {
366 if (msleep_interruptible(100)) /* 100ms */ 359 if (msleep_interruptible(100)) /* 100ms */
367 return(1); 360 return 1;
368 } 361 }
369 return ((i > 0) ? 0:1); 362 return (i > 0) ? 0 : 1;
370} 363}
371 364
372/* Free's an allocated packet */ 365/* Free's an allocated packet */
@@ -380,21 +373,22 @@ static void sbp2_free_packet(struct hpsb_packet *packet)
380 * subaction and returns immediately. Can be used from interrupts. 373 * subaction and returns immediately. Can be used from interrupts.
381 */ 374 */
382static int sbp2util_node_write_no_wait(struct node_entry *ne, u64 addr, 375static int sbp2util_node_write_no_wait(struct node_entry *ne, u64 addr,
383 quadlet_t *buffer, size_t length) 376 quadlet_t *buffer, size_t length)
384{ 377{
385 struct hpsb_packet *packet; 378 struct hpsb_packet *packet;
386 379
387 packet = hpsb_make_writepacket(ne->host, ne->nodeid, 380 packet = hpsb_make_writepacket(ne->host, ne->nodeid,
388 addr, buffer, length); 381 addr, buffer, length);
389 if (!packet) 382 if (!packet)
390 return -ENOMEM; 383 return -ENOMEM;
391 384
392 hpsb_set_packet_complete_task(packet, (void (*)(void*))sbp2_free_packet, 385 hpsb_set_packet_complete_task(packet,
386 (void (*)(void *))sbp2_free_packet,
393 packet); 387 packet);
394 388
395 hpsb_node_fill_packet(ne, packet); 389 hpsb_node_fill_packet(ne, packet);
396 390
397 if (hpsb_send_packet(packet) < 0) { 391 if (hpsb_send_packet(packet) < 0) {
398 sbp2_free_packet(packet); 392 sbp2_free_packet(packet);
399 return -EIO; 393 return -EIO;
400 } 394 }
@@ -417,22 +411,22 @@ static int sbp2util_create_command_orb_pool(struct scsi_id_instance_data *scsi_i
417 411
418 spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags); 412 spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
419 for (i = 0; i < orbs; i++) { 413 for (i = 0; i < orbs; i++) {
420 command = (struct sbp2_command_info *) 414 command = kzalloc(sizeof(*command), GFP_ATOMIC);
421 kmalloc(sizeof(struct sbp2_command_info), GFP_ATOMIC);
422 if (!command) { 415 if (!command) {
423 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags); 416 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock,
424 return(-ENOMEM); 417 flags);
418 return -ENOMEM;
425 } 419 }
426 memset(command, '\0', sizeof(struct sbp2_command_info));
427 command->command_orb_dma = 420 command->command_orb_dma =
428 pci_map_single (hi->host->pdev, &command->command_orb, 421 pci_map_single(hi->host->pdev, &command->command_orb,
429 sizeof(struct sbp2_command_orb), 422 sizeof(struct sbp2_command_orb),
430 PCI_DMA_BIDIRECTIONAL); 423 PCI_DMA_BIDIRECTIONAL);
431 SBP2_DMA_ALLOC("single command orb DMA"); 424 SBP2_DMA_ALLOC("single command orb DMA");
432 command->sge_dma = 425 command->sge_dma =
433 pci_map_single (hi->host->pdev, &command->scatter_gather_element, 426 pci_map_single(hi->host->pdev,
434 sizeof(command->scatter_gather_element), 427 &command->scatter_gather_element,
435 PCI_DMA_BIDIRECTIONAL); 428 sizeof(command->scatter_gather_element),
429 PCI_DMA_BIDIRECTIONAL);
436 SBP2_DMA_ALLOC("scatter_gather_element"); 430 SBP2_DMA_ALLOC("scatter_gather_element");
437 INIT_LIST_HEAD(&command->list); 431 INIT_LIST_HEAD(&command->list);
438 list_add_tail(&command->list, &scsi_id->sbp2_command_orb_completed); 432 list_add_tail(&command->list, &scsi_id->sbp2_command_orb_completed);
@@ -488,7 +482,7 @@ static struct sbp2_command_info *sbp2util_find_command_for_orb(
488 list_for_each_entry(command, &scsi_id->sbp2_command_orb_inuse, list) { 482 list_for_each_entry(command, &scsi_id->sbp2_command_orb_inuse, list) {
489 if (command->command_orb_dma == orb) { 483 if (command->command_orb_dma == orb) {
490 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags); 484 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
491 return (command); 485 return command;
492 } 486 }
493 } 487 }
494 } 488 }
@@ -496,7 +490,7 @@ static struct sbp2_command_info *sbp2util_find_command_for_orb(
496 490
497 SBP2_ORB_DEBUG("could not match command orb %x", (unsigned int)orb); 491 SBP2_ORB_DEBUG("could not match command orb %x", (unsigned int)orb);
498 492
499 return(NULL); 493 return NULL;
500} 494}
501 495
502/* 496/*
@@ -513,12 +507,12 @@ static struct sbp2_command_info *sbp2util_find_command_for_SCpnt(struct scsi_id_
513 list_for_each_entry(command, &scsi_id->sbp2_command_orb_inuse, list) { 507 list_for_each_entry(command, &scsi_id->sbp2_command_orb_inuse, list) {
514 if (command->Current_SCpnt == SCpnt) { 508 if (command->Current_SCpnt == SCpnt) {
515 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags); 509 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
516 return (command); 510 return command;
517 } 511 }
518 } 512 }
519 } 513 }
520 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags); 514 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
521 return(NULL); 515 return NULL;
522} 516}
523 517
524/* 518/*
@@ -545,7 +539,7 @@ static struct sbp2_command_info *sbp2util_allocate_command_orb(
545 SBP2_ERR("sbp2util_allocate_command_orb - No orbs available!"); 539 SBP2_ERR("sbp2util_allocate_command_orb - No orbs available!");
546 } 540 }
547 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags); 541 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
548 return (command); 542 return command;
549} 543}
550 544
551/* Free our DMA's */ 545/* Free our DMA's */
@@ -587,7 +581,8 @@ static void sbp2util_free_command_dma(struct sbp2_command_info *command)
587/* 581/*
588 * This function moves a command to the completed orb list. 582 * This function moves a command to the completed orb list.
589 */ 583 */
590static void sbp2util_mark_command_completed(struct scsi_id_instance_data *scsi_id, struct sbp2_command_info *command) 584static void sbp2util_mark_command_completed(struct scsi_id_instance_data *scsi_id,
585 struct sbp2_command_info *command)
591{ 586{
592 unsigned long flags; 587 unsigned long flags;
593 588
@@ -606,8 +601,6 @@ static inline int sbp2util_node_is_available(struct scsi_id_instance_data *scsi_
606 return scsi_id && scsi_id->ne && !scsi_id->ne->in_limbo; 601 return scsi_id && scsi_id->ne && !scsi_id->ne->in_limbo;
607} 602}
608 603
609
610
611/********************************************* 604/*********************************************
612 * IEEE-1394 core driver stack related section 605 * IEEE-1394 core driver stack related section
613 *********************************************/ 606 *********************************************/
@@ -627,14 +620,14 @@ static int sbp2_probe(struct device *dev)
627 if (ud->flags & UNIT_DIRECTORY_HAS_LUN_DIRECTORY) 620 if (ud->flags & UNIT_DIRECTORY_HAS_LUN_DIRECTORY)
628 return -ENODEV; 621 return -ENODEV;
629 622
630 scsi_id = sbp2_alloc_device(ud); 623 scsi_id = sbp2_alloc_device(ud);
631 624
632 if (!scsi_id) 625 if (!scsi_id)
633 return -ENOMEM; 626 return -ENOMEM;
634 627
635 sbp2_parse_unit_directory(scsi_id, ud); 628 sbp2_parse_unit_directory(scsi_id, ud);
636 629
637 return sbp2_start_device(scsi_id); 630 return sbp2_start_device(scsi_id);
638} 631}
639 632
640static int sbp2_remove(struct device *dev) 633static int sbp2_remove(struct device *dev)
@@ -719,12 +712,11 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
719 712
720 SBP2_DEBUG("sbp2_alloc_device"); 713 SBP2_DEBUG("sbp2_alloc_device");
721 714
722 scsi_id = kmalloc(sizeof(*scsi_id), GFP_KERNEL); 715 scsi_id = kzalloc(sizeof(*scsi_id), GFP_KERNEL);
723 if (!scsi_id) { 716 if (!scsi_id) {
724 SBP2_ERR("failed to create scsi_id"); 717 SBP2_ERR("failed to create scsi_id");
725 goto failed_alloc; 718 goto failed_alloc;
726 } 719 }
727 memset(scsi_id, 0, sizeof(*scsi_id));
728 720
729 scsi_id->ne = ud->ne; 721 scsi_id->ne = ud->ne;
730 scsi_id->ud = ud; 722 scsi_id->ud = ud;
@@ -735,7 +727,7 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
735 INIT_LIST_HEAD(&scsi_id->sbp2_command_orb_completed); 727 INIT_LIST_HEAD(&scsi_id->sbp2_command_orb_completed);
736 INIT_LIST_HEAD(&scsi_id->scsi_list); 728 INIT_LIST_HEAD(&scsi_id->scsi_list);
737 spin_lock_init(&scsi_id->sbp2_command_orb_lock); 729 spin_lock_init(&scsi_id->sbp2_command_orb_lock);
738 scsi_id->sbp2_device_type_and_lun = SBP2_DEVICE_TYPE_LUN_UNINITIALIZED; 730 scsi_id->sbp2_lun = 0;
739 731
740 ud->device.driver_data = scsi_id; 732 ud->device.driver_data = scsi_id;
741 733
@@ -769,7 +761,7 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
769 761
770 /* Register our host with the SCSI stack. */ 762 /* Register our host with the SCSI stack. */
771 scsi_host = scsi_host_alloc(&scsi_driver_template, 763 scsi_host = scsi_host_alloc(&scsi_driver_template,
772 sizeof (unsigned long)); 764 sizeof(unsigned long));
773 if (!scsi_host) { 765 if (!scsi_host) {
774 SBP2_ERR("failed to register scsi host"); 766 SBP2_ERR("failed to register scsi host");
775 goto failed_alloc; 767 goto failed_alloc;
@@ -790,7 +782,6 @@ failed_alloc:
790 return NULL; 782 return NULL;
791} 783}
792 784
793
794static void sbp2_host_reset(struct hpsb_host *host) 785static void sbp2_host_reset(struct hpsb_host *host)
795{ 786{
796 struct sbp2scsi_host_info *hi; 787 struct sbp2scsi_host_info *hi;
@@ -804,7 +795,6 @@ static void sbp2_host_reset(struct hpsb_host *host)
804 } 795 }
805} 796}
806 797
807
808/* 798/*
809 * This function is where we first pull the node unique ids, and then 799 * This function is where we first pull the node unique ids, and then
810 * allocate memory and register a SBP-2 device. 800 * allocate memory and register a SBP-2 device.
@@ -818,7 +808,8 @@ static int sbp2_start_device(struct scsi_id_instance_data *scsi_id)
818 808
819 /* Login FIFO DMA */ 809 /* Login FIFO DMA */
820 scsi_id->login_response = 810 scsi_id->login_response =
821 pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_login_response), 811 pci_alloc_consistent(hi->host->pdev,
812 sizeof(struct sbp2_login_response),
822 &scsi_id->login_response_dma); 813 &scsi_id->login_response_dma);
823 if (!scsi_id->login_response) 814 if (!scsi_id->login_response)
824 goto alloc_fail; 815 goto alloc_fail;
@@ -826,7 +817,8 @@ static int sbp2_start_device(struct scsi_id_instance_data *scsi_id)
826 817
827 /* Query logins ORB DMA */ 818 /* Query logins ORB DMA */
828 scsi_id->query_logins_orb = 819 scsi_id->query_logins_orb =
829 pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_query_logins_orb), 820 pci_alloc_consistent(hi->host->pdev,
821 sizeof(struct sbp2_query_logins_orb),
830 &scsi_id->query_logins_orb_dma); 822 &scsi_id->query_logins_orb_dma);
831 if (!scsi_id->query_logins_orb) 823 if (!scsi_id->query_logins_orb)
832 goto alloc_fail; 824 goto alloc_fail;
@@ -834,7 +826,8 @@ static int sbp2_start_device(struct scsi_id_instance_data *scsi_id)
834 826
835 /* Query logins response DMA */ 827 /* Query logins response DMA */
836 scsi_id->query_logins_response = 828 scsi_id->query_logins_response =
837 pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_query_logins_response), 829 pci_alloc_consistent(hi->host->pdev,
830 sizeof(struct sbp2_query_logins_response),
838 &scsi_id->query_logins_response_dma); 831 &scsi_id->query_logins_response_dma);
839 if (!scsi_id->query_logins_response) 832 if (!scsi_id->query_logins_response)
840 goto alloc_fail; 833 goto alloc_fail;
@@ -842,7 +835,8 @@ static int sbp2_start_device(struct scsi_id_instance_data *scsi_id)
842 835
843 /* Reconnect ORB DMA */ 836 /* Reconnect ORB DMA */
844 scsi_id->reconnect_orb = 837 scsi_id->reconnect_orb =
845 pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_reconnect_orb), 838 pci_alloc_consistent(hi->host->pdev,
839 sizeof(struct sbp2_reconnect_orb),
846 &scsi_id->reconnect_orb_dma); 840 &scsi_id->reconnect_orb_dma);
847 if (!scsi_id->reconnect_orb) 841 if (!scsi_id->reconnect_orb)
848 goto alloc_fail; 842 goto alloc_fail;
@@ -850,7 +844,8 @@ static int sbp2_start_device(struct scsi_id_instance_data *scsi_id)
850 844
851 /* Logout ORB DMA */ 845 /* Logout ORB DMA */
852 scsi_id->logout_orb = 846 scsi_id->logout_orb =
853 pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_logout_orb), 847 pci_alloc_consistent(hi->host->pdev,
848 sizeof(struct sbp2_logout_orb),
854 &scsi_id->logout_orb_dma); 849 &scsi_id->logout_orb_dma);
855 if (!scsi_id->logout_orb) 850 if (!scsi_id->logout_orb)
856 goto alloc_fail; 851 goto alloc_fail;
@@ -858,58 +853,11 @@ static int sbp2_start_device(struct scsi_id_instance_data *scsi_id)
858 853
859 /* Login ORB DMA */ 854 /* Login ORB DMA */
860 scsi_id->login_orb = 855 scsi_id->login_orb =
861 pci_alloc_consistent(hi->host->pdev, sizeof(struct sbp2_login_orb), 856 pci_alloc_consistent(hi->host->pdev,
857 sizeof(struct sbp2_login_orb),
862 &scsi_id->login_orb_dma); 858 &scsi_id->login_orb_dma);
863 if (!scsi_id->login_orb) { 859 if (!scsi_id->login_orb)
864alloc_fail: 860 goto alloc_fail;
865 if (scsi_id->query_logins_response) {
866 pci_free_consistent(hi->host->pdev,
867 sizeof(struct sbp2_query_logins_response),
868 scsi_id->query_logins_response,
869 scsi_id->query_logins_response_dma);
870 SBP2_DMA_FREE("query logins response DMA");
871 }
872
873 if (scsi_id->query_logins_orb) {
874 pci_free_consistent(hi->host->pdev,
875 sizeof(struct sbp2_query_logins_orb),
876 scsi_id->query_logins_orb,
877 scsi_id->query_logins_orb_dma);
878 SBP2_DMA_FREE("query logins ORB DMA");
879 }
880
881 if (scsi_id->logout_orb) {
882 pci_free_consistent(hi->host->pdev,
883 sizeof(struct sbp2_logout_orb),
884 scsi_id->logout_orb,
885 scsi_id->logout_orb_dma);
886 SBP2_DMA_FREE("logout ORB DMA");
887 }
888
889 if (scsi_id->reconnect_orb) {
890 pci_free_consistent(hi->host->pdev,
891 sizeof(struct sbp2_reconnect_orb),
892 scsi_id->reconnect_orb,
893 scsi_id->reconnect_orb_dma);
894 SBP2_DMA_FREE("reconnect ORB DMA");
895 }
896
897 if (scsi_id->login_response) {
898 pci_free_consistent(hi->host->pdev,
899 sizeof(struct sbp2_login_response),
900 scsi_id->login_response,
901 scsi_id->login_response_dma);
902 SBP2_DMA_FREE("login FIFO DMA");
903 }
904
905 list_del(&scsi_id->scsi_list);
906
907 kfree(scsi_id);
908
909 SBP2_ERR ("Could not allocate memory for scsi_id");
910
911 return -ENOMEM;
912 }
913 SBP2_DMA_ALLOC("consistent DMA region for login ORB"); 861 SBP2_DMA_ALLOC("consistent DMA region for login ORB");
914 862
915 SBP2_DEBUG("New SBP-2 device inserted, SCSI ID = %x", scsi_id->ud->id); 863 SBP2_DEBUG("New SBP-2 device inserted, SCSI ID = %x", scsi_id->ud->id);
@@ -935,7 +883,7 @@ alloc_fail:
935 sbp2_remove_device(scsi_id); 883 sbp2_remove_device(scsi_id);
936 return -EINTR; 884 return -EINTR;
937 } 885 }
938 886
939 /* 887 /*
940 * Login to the sbp-2 device 888 * Login to the sbp-2 device
941 */ 889 */
@@ -964,10 +912,17 @@ alloc_fail:
964 error = scsi_add_device(scsi_id->scsi_host, 0, scsi_id->ud->id, 0); 912 error = scsi_add_device(scsi_id->scsi_host, 0, scsi_id->ud->id, 0);
965 if (error) { 913 if (error) {
966 SBP2_ERR("scsi_add_device failed"); 914 SBP2_ERR("scsi_add_device failed");
915 sbp2_logout_device(scsi_id);
916 sbp2_remove_device(scsi_id);
967 return error; 917 return error;
968 } 918 }
969 919
970 return 0; 920 return 0;
921
922alloc_fail:
923 SBP2_ERR("Could not allocate memory for scsi_id");
924 sbp2_remove_device(scsi_id);
925 return -ENOMEM;
971} 926}
972 927
973/* 928/*
@@ -1054,51 +1009,44 @@ static void sbp2_remove_device(struct scsi_id_instance_data *scsi_id)
1054 * This function deals with physical dma write requests (for adapters that do not support 1009 * This function deals with physical dma write requests (for adapters that do not support
1055 * physical dma in hardware). Mostly just here for debugging... 1010 * physical dma in hardware). Mostly just here for debugging...
1056 */ 1011 */
1057static int sbp2_handle_physdma_write(struct hpsb_host *host, int nodeid, int destid, quadlet_t *data, 1012static int sbp2_handle_physdma_write(struct hpsb_host *host, int nodeid,
1058 u64 addr, size_t length, u16 flags) 1013 int destid, quadlet_t *data, u64 addr,
1014 size_t length, u16 flags)
1059{ 1015{
1060 1016
1061 /* 1017 /*
1062 * Manually put the data in the right place. 1018 * Manually put the data in the right place.
1063 */ 1019 */
1064 memcpy(bus_to_virt((u32)addr), data, length); 1020 memcpy(bus_to_virt((u32) addr), data, length);
1065 sbp2util_packet_dump(data, length, "sbp2 phys dma write by device", (u32)addr); 1021 sbp2util_packet_dump(data, length, "sbp2 phys dma write by device",
1066 return(RCODE_COMPLETE); 1022 (u32) addr);
1023 return RCODE_COMPLETE;
1067} 1024}
1068 1025
1069/* 1026/*
1070 * This function deals with physical dma read requests (for adapters that do not support 1027 * This function deals with physical dma read requests (for adapters that do not support
1071 * physical dma in hardware). Mostly just here for debugging... 1028 * physical dma in hardware). Mostly just here for debugging...
1072 */ 1029 */
1073static int sbp2_handle_physdma_read(struct hpsb_host *host, int nodeid, quadlet_t *data, 1030static int sbp2_handle_physdma_read(struct hpsb_host *host, int nodeid,
1074 u64 addr, size_t length, u16 flags) 1031 quadlet_t *data, u64 addr, size_t length,
1032 u16 flags)
1075{ 1033{
1076 1034
1077 /* 1035 /*
1078 * Grab data from memory and send a read response. 1036 * Grab data from memory and send a read response.
1079 */ 1037 */
1080 memcpy(data, bus_to_virt((u32)addr), length); 1038 memcpy(data, bus_to_virt((u32) addr), length);
1081 sbp2util_packet_dump(data, length, "sbp2 phys dma read by device", (u32)addr); 1039 sbp2util_packet_dump(data, length, "sbp2 phys dma read by device",
1082 return(RCODE_COMPLETE); 1040 (u32) addr);
1041 return RCODE_COMPLETE;
1083} 1042}
1084#endif 1043#endif
1085 1044
1086
1087/************************************** 1045/**************************************
1088 * SBP-2 protocol related section 1046 * SBP-2 protocol related section
1089 **************************************/ 1047 **************************************/
1090 1048
1091/* 1049/*
1092 * This function determines if we should convert scsi commands for a particular sbp2 device type
1093 */
1094static __inline__ int sbp2_command_conversion_device_type(u8 device_type)
1095{
1096 return (((device_type == TYPE_DISK) ||
1097 (device_type == TYPE_RBC) ||
1098 (device_type == TYPE_ROM)) ? 1:0);
1099}
1100
1101/*
1102 * This function queries the device for the maximum concurrent logins it 1050 * This function queries the device for the maximum concurrent logins it
1103 * supports. 1051 * supports.
1104 */ 1052 */
@@ -1120,11 +1068,7 @@ static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id)
1120 1068
1121 scsi_id->query_logins_orb->lun_misc = ORB_SET_FUNCTION(SBP2_QUERY_LOGINS_REQUEST); 1069 scsi_id->query_logins_orb->lun_misc = ORB_SET_FUNCTION(SBP2_QUERY_LOGINS_REQUEST);
1122 scsi_id->query_logins_orb->lun_misc |= ORB_SET_NOTIFY(1); 1070 scsi_id->query_logins_orb->lun_misc |= ORB_SET_NOTIFY(1);
1123 if (scsi_id->sbp2_device_type_and_lun != SBP2_DEVICE_TYPE_LUN_UNINITIALIZED) { 1071 scsi_id->query_logins_orb->lun_misc |= ORB_SET_LUN(scsi_id->sbp2_lun);
1124 scsi_id->query_logins_orb->lun_misc |= ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun);
1125 SBP2_DEBUG("sbp2_query_logins: set lun to %d",
1126 ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun));
1127 }
1128 SBP2_DEBUG("sbp2_query_logins: lun_misc initialized"); 1072 SBP2_DEBUG("sbp2_query_logins: lun_misc initialized");
1129 1073
1130 scsi_id->query_logins_orb->reserved_resp_length = 1074 scsi_id->query_logins_orb->reserved_resp_length =
@@ -1161,12 +1105,12 @@ static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id)
1161 1105
1162 if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, 2*HZ)) { 1106 if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, 2*HZ)) {
1163 SBP2_INFO("Error querying logins to SBP-2 device - timed out"); 1107 SBP2_INFO("Error querying logins to SBP-2 device - timed out");
1164 return(-EIO); 1108 return -EIO;
1165 } 1109 }
1166 1110
1167 if (scsi_id->status_block.ORB_offset_lo != scsi_id->query_logins_orb_dma) { 1111 if (scsi_id->status_block.ORB_offset_lo != scsi_id->query_logins_orb_dma) {
1168 SBP2_INFO("Error querying logins to SBP-2 device - timed out"); 1112 SBP2_INFO("Error querying logins to SBP-2 device - timed out");
1169 return(-EIO); 1113 return -EIO;
1170 } 1114 }
1171 1115
1172 if (STATUS_GET_RESP(scsi_id->status_block.ORB_offset_hi_misc) || 1116 if (STATUS_GET_RESP(scsi_id->status_block.ORB_offset_hi_misc) ||
@@ -1174,7 +1118,7 @@ static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id)
1174 STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc)) { 1118 STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc)) {
1175 1119
1176 SBP2_INFO("Error querying logins to SBP-2 device - timed out"); 1120 SBP2_INFO("Error querying logins to SBP-2 device - timed out");
1177 return(-EIO); 1121 return -EIO;
1178 } 1122 }
1179 1123
1180 sbp2util_cpu_to_be32_buffer(scsi_id->query_logins_response, sizeof(struct sbp2_query_logins_response)); 1124 sbp2util_cpu_to_be32_buffer(scsi_id->query_logins_response, sizeof(struct sbp2_query_logins_response));
@@ -1191,7 +1135,7 @@ static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id)
1191 SBP2_DEBUG("Number of active logins: %d", active_logins); 1135 SBP2_DEBUG("Number of active logins: %d", active_logins);
1192 1136
1193 if (active_logins >= max_logins) { 1137 if (active_logins >= max_logins) {
1194 return(-EIO); 1138 return -EIO;
1195 } 1139 }
1196 1140
1197 return 0; 1141 return 0;
@@ -1210,13 +1154,13 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
1210 1154
1211 if (!scsi_id->login_orb) { 1155 if (!scsi_id->login_orb) {
1212 SBP2_DEBUG("sbp2_login_device: login_orb not alloc'd!"); 1156 SBP2_DEBUG("sbp2_login_device: login_orb not alloc'd!");
1213 return(-EIO); 1157 return -EIO;
1214 } 1158 }
1215 1159
1216 if (!exclusive_login) { 1160 if (!exclusive_login) {
1217 if (sbp2_query_logins(scsi_id)) { 1161 if (sbp2_query_logins(scsi_id)) {
1218 SBP2_INFO("Device does not support any more concurrent logins"); 1162 SBP2_INFO("Device does not support any more concurrent logins");
1219 return(-EIO); 1163 return -EIO;
1220 } 1164 }
1221 } 1165 }
1222 1166
@@ -1233,12 +1177,7 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
1233 scsi_id->login_orb->lun_misc |= ORB_SET_RECONNECT(0); /* One second reconnect time */ 1177 scsi_id->login_orb->lun_misc |= ORB_SET_RECONNECT(0); /* One second reconnect time */
1234 scsi_id->login_orb->lun_misc |= ORB_SET_EXCLUSIVE(exclusive_login); /* Exclusive access to device */ 1178 scsi_id->login_orb->lun_misc |= ORB_SET_EXCLUSIVE(exclusive_login); /* Exclusive access to device */
1235 scsi_id->login_orb->lun_misc |= ORB_SET_NOTIFY(1); /* Notify us of login complete */ 1179 scsi_id->login_orb->lun_misc |= ORB_SET_NOTIFY(1); /* Notify us of login complete */
1236 /* Set the lun if we were able to pull it from the device's unit directory */ 1180 scsi_id->login_orb->lun_misc |= ORB_SET_LUN(scsi_id->sbp2_lun);
1237 if (scsi_id->sbp2_device_type_and_lun != SBP2_DEVICE_TYPE_LUN_UNINITIALIZED) {
1238 scsi_id->login_orb->lun_misc |= ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun);
1239 SBP2_DEBUG("sbp2_query_logins: set lun to %d",
1240 ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun));
1241 }
1242 SBP2_DEBUG("sbp2_login_device: lun_misc initialized"); 1181 SBP2_DEBUG("sbp2_login_device: lun_misc initialized");
1243 1182
1244 scsi_id->login_orb->passwd_resp_lengths = 1183 scsi_id->login_orb->passwd_resp_lengths =
@@ -1288,7 +1227,7 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
1288 */ 1227 */
1289 if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, 20*HZ)) { 1228 if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, 20*HZ)) {
1290 SBP2_ERR("Error logging into SBP-2 device - login timed-out"); 1229 SBP2_ERR("Error logging into SBP-2 device - login timed-out");
1291 return(-EIO); 1230 return -EIO;
1292 } 1231 }
1293 1232
1294 /* 1233 /*
@@ -1296,7 +1235,7 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
1296 */ 1235 */
1297 if (scsi_id->status_block.ORB_offset_lo != scsi_id->login_orb_dma) { 1236 if (scsi_id->status_block.ORB_offset_lo != scsi_id->login_orb_dma) {
1298 SBP2_ERR("Error logging into SBP-2 device - login timed-out"); 1237 SBP2_ERR("Error logging into SBP-2 device - login timed-out");
1299 return(-EIO); 1238 return -EIO;
1300 } 1239 }
1301 1240
1302 /* 1241 /*
@@ -1307,7 +1246,7 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
1307 STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc)) { 1246 STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc)) {
1308 1247
1309 SBP2_ERR("Error logging into SBP-2 device - login failed"); 1248 SBP2_ERR("Error logging into SBP-2 device - login failed");
1310 return(-EIO); 1249 return -EIO;
1311 } 1250 }
1312 1251
1313 /* 1252 /*
@@ -1331,7 +1270,7 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
1331 1270
1332 SBP2_INFO("Logged into SBP-2 device"); 1271 SBP2_INFO("Logged into SBP-2 device");
1333 1272
1334 return(0); 1273 return 0;
1335 1274
1336} 1275}
1337 1276
@@ -1385,8 +1324,7 @@ static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id)
1385 atomic_set(&scsi_id->sbp2_login_complete, 0); 1324 atomic_set(&scsi_id->sbp2_login_complete, 0);
1386 1325
1387 error = hpsb_node_write(scsi_id->ne, 1326 error = hpsb_node_write(scsi_id->ne,
1388 scsi_id->sbp2_management_agent_addr, 1327 scsi_id->sbp2_management_agent_addr, data, 8);
1389 data, 8);
1390 if (error) 1328 if (error)
1391 return error; 1329 return error;
1392 1330
@@ -1396,7 +1334,7 @@ static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id)
1396 1334
1397 SBP2_INFO("Logged out of SBP-2 device"); 1335 SBP2_INFO("Logged out of SBP-2 device");
1398 1336
1399 return(0); 1337 return 0;
1400 1338
1401} 1339}
1402 1340
@@ -1456,8 +1394,7 @@ static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id)
1456 atomic_set(&scsi_id->sbp2_login_complete, 0); 1394 atomic_set(&scsi_id->sbp2_login_complete, 0);
1457 1395
1458 error = hpsb_node_write(scsi_id->ne, 1396 error = hpsb_node_write(scsi_id->ne,
1459 scsi_id->sbp2_management_agent_addr, 1397 scsi_id->sbp2_management_agent_addr, data, 8);
1460 data, 8);
1461 if (error) 1398 if (error)
1462 return error; 1399 return error;
1463 1400
@@ -1466,7 +1403,7 @@ static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id)
1466 */ 1403 */
1467 if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, HZ)) { 1404 if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, HZ)) {
1468 SBP2_ERR("Error reconnecting to SBP-2 device - reconnect timed-out"); 1405 SBP2_ERR("Error reconnecting to SBP-2 device - reconnect timed-out");
1469 return(-EIO); 1406 return -EIO;
1470 } 1407 }
1471 1408
1472 /* 1409 /*
@@ -1474,7 +1411,7 @@ static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id)
1474 */ 1411 */
1475 if (scsi_id->status_block.ORB_offset_lo != scsi_id->reconnect_orb_dma) { 1412 if (scsi_id->status_block.ORB_offset_lo != scsi_id->reconnect_orb_dma) {
1476 SBP2_ERR("Error reconnecting to SBP-2 device - reconnect timed-out"); 1413 SBP2_ERR("Error reconnecting to SBP-2 device - reconnect timed-out");
1477 return(-EIO); 1414 return -EIO;
1478 } 1415 }
1479 1416
1480 /* 1417 /*
@@ -1485,12 +1422,12 @@ static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id)
1485 STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc)) { 1422 STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc)) {
1486 1423
1487 SBP2_ERR("Error reconnecting to SBP-2 device - reconnect failed"); 1424 SBP2_ERR("Error reconnecting to SBP-2 device - reconnect failed");
1488 return(-EIO); 1425 return -EIO;
1489 } 1426 }
1490 1427
1491 HPSB_DEBUG("Reconnected to SBP-2 device"); 1428 HPSB_DEBUG("Reconnected to SBP-2 device");
1492 1429
1493 return(0); 1430 return 0;
1494 1431
1495} 1432}
1496 1433
@@ -1513,10 +1450,9 @@ static int sbp2_set_busy_timeout(struct scsi_id_instance_data *scsi_id)
1513 SBP2_ERR("sbp2_set_busy_timeout error"); 1450 SBP2_ERR("sbp2_set_busy_timeout error");
1514 } 1451 }
1515 1452
1516 return(0); 1453 return 0;
1517} 1454}
1518 1455
1519
1520/* 1456/*
1521 * This function is called to parse sbp2 device's config rom unit 1457 * This function is called to parse sbp2 device's config rom unit
1522 * directory. Used to determine things like sbp2 management agent offset, 1458 * directory. Used to determine things like sbp2 management agent offset,
@@ -1529,7 +1465,7 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
1529 struct csr1212_dentry *dentry; 1465 struct csr1212_dentry *dentry;
1530 u64 management_agent_addr; 1466 u64 management_agent_addr;
1531 u32 command_set_spec_id, command_set, unit_characteristics, 1467 u32 command_set_spec_id, command_set, unit_characteristics,
1532 firmware_revision, workarounds; 1468 firmware_revision, workarounds;
1533 int i; 1469 int i;
1534 1470
1535 SBP2_DEBUG("sbp2_parse_unit_directory"); 1471 SBP2_DEBUG("sbp2_parse_unit_directory");
@@ -1547,13 +1483,14 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
1547 if (kv->key.type == CSR1212_KV_TYPE_CSR_OFFSET) { 1483 if (kv->key.type == CSR1212_KV_TYPE_CSR_OFFSET) {
1548 /* Save off the management agent address */ 1484 /* Save off the management agent address */
1549 management_agent_addr = 1485 management_agent_addr =
1550 CSR1212_REGISTER_SPACE_BASE + 1486 CSR1212_REGISTER_SPACE_BASE +
1551 (kv->value.csr_offset << 2); 1487 (kv->value.csr_offset << 2);
1552 1488
1553 SBP2_DEBUG("sbp2_management_agent_addr = %x", 1489 SBP2_DEBUG("sbp2_management_agent_addr = %x",
1554 (unsigned int) management_agent_addr); 1490 (unsigned int)management_agent_addr);
1555 } else if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE) { 1491 } else if (kv->key.type == CSR1212_KV_TYPE_IMMEDIATE) {
1556 scsi_id->sbp2_device_type_and_lun = kv->value.immediate; 1492 scsi_id->sbp2_lun =
1493 ORB_SET_LUN(kv->value.immediate);
1557 } 1494 }
1558 break; 1495 break;
1559 1496
@@ -1561,14 +1498,14 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
1561 /* Command spec organization */ 1498 /* Command spec organization */
1562 command_set_spec_id = kv->value.immediate; 1499 command_set_spec_id = kv->value.immediate;
1563 SBP2_DEBUG("sbp2_command_set_spec_id = %x", 1500 SBP2_DEBUG("sbp2_command_set_spec_id = %x",
1564 (unsigned int) command_set_spec_id); 1501 (unsigned int)command_set_spec_id);
1565 break; 1502 break;
1566 1503
1567 case SBP2_COMMAND_SET_KEY: 1504 case SBP2_COMMAND_SET_KEY:
1568 /* Command set used by sbp2 device */ 1505 /* Command set used by sbp2 device */
1569 command_set = kv->value.immediate; 1506 command_set = kv->value.immediate;
1570 SBP2_DEBUG("sbp2_command_set = %x", 1507 SBP2_DEBUG("sbp2_command_set = %x",
1571 (unsigned int) command_set); 1508 (unsigned int)command_set);
1572 break; 1509 break;
1573 1510
1574 case SBP2_UNIT_CHARACTERISTICS_KEY: 1511 case SBP2_UNIT_CHARACTERISTICS_KEY:
@@ -1578,7 +1515,7 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
1578 */ 1515 */
1579 unit_characteristics = kv->value.immediate; 1516 unit_characteristics = kv->value.immediate;
1580 SBP2_DEBUG("sbp2_unit_characteristics = %x", 1517 SBP2_DEBUG("sbp2_unit_characteristics = %x",
1581 (unsigned int) unit_characteristics); 1518 (unsigned int)unit_characteristics);
1582 break; 1519 break;
1583 1520
1584 case SBP2_FIRMWARE_REVISION_KEY: 1521 case SBP2_FIRMWARE_REVISION_KEY:
@@ -1586,9 +1523,10 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
1586 firmware_revision = kv->value.immediate; 1523 firmware_revision = kv->value.immediate;
1587 if (force_inquiry_hack) 1524 if (force_inquiry_hack)
1588 SBP2_INFO("sbp2_firmware_revision = %x", 1525 SBP2_INFO("sbp2_firmware_revision = %x",
1589 (unsigned int) firmware_revision); 1526 (unsigned int)firmware_revision);
1590 else SBP2_DEBUG("sbp2_firmware_revision = %x", 1527 else
1591 (unsigned int) firmware_revision); 1528 SBP2_DEBUG("sbp2_firmware_revision = %x",
1529 (unsigned int)firmware_revision);
1592 break; 1530 break;
1593 1531
1594 default: 1532 default:
@@ -1646,7 +1584,7 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
1646 scsi_id->sbp2_firmware_revision = firmware_revision; 1584 scsi_id->sbp2_firmware_revision = firmware_revision;
1647 scsi_id->workarounds = workarounds; 1585 scsi_id->workarounds = workarounds;
1648 if (ud->flags & UNIT_DIRECTORY_HAS_LUN) 1586 if (ud->flags & UNIT_DIRECTORY_HAS_LUN)
1649 scsi_id->sbp2_device_type_and_lun = ud->lun; 1587 scsi_id->sbp2_lun = ORB_SET_LUN(ud->lun);
1650 } 1588 }
1651} 1589}
1652 1590
@@ -1666,8 +1604,9 @@ static int sbp2_max_speed_and_size(struct scsi_id_instance_data *scsi_id)
1666 SBP2_DEBUG("sbp2_max_speed_and_size"); 1604 SBP2_DEBUG("sbp2_max_speed_and_size");
1667 1605
1668 /* Initial setting comes from the hosts speed map */ 1606 /* Initial setting comes from the hosts speed map */
1669 scsi_id->speed_code = hi->host->speed_map[NODEID_TO_NODE(hi->host->node_id) * 64 1607 scsi_id->speed_code =
1670 + NODEID_TO_NODE(scsi_id->ne->nodeid)]; 1608 hi->host->speed_map[NODEID_TO_NODE(hi->host->node_id) * 64 +
1609 NODEID_TO_NODE(scsi_id->ne->nodeid)];
1671 1610
1672 /* Bump down our speed if the user requested it */ 1611 /* Bump down our speed if the user requested it */
1673 if (scsi_id->speed_code > max_speed) { 1612 if (scsi_id->speed_code > max_speed) {
@@ -1678,15 +1617,16 @@ static int sbp2_max_speed_and_size(struct scsi_id_instance_data *scsi_id)
1678 1617
1679 /* Payload size is the lesser of what our speed supports and what 1618 /* Payload size is the lesser of what our speed supports and what
1680 * our host supports. */ 1619 * our host supports. */
1681 scsi_id->max_payload_size = min(sbp2_speedto_max_payload[scsi_id->speed_code], 1620 scsi_id->max_payload_size =
1682 (u8)(hi->host->csr.max_rec - 1)); 1621 min(sbp2_speedto_max_payload[scsi_id->speed_code],
1622 (u8) (hi->host->csr.max_rec - 1));
1683 1623
1684 HPSB_DEBUG("Node " NODE_BUS_FMT ": Max speed [%s] - Max payload [%u]", 1624 HPSB_DEBUG("Node " NODE_BUS_FMT ": Max speed [%s] - Max payload [%u]",
1685 NODE_BUS_ARGS(hi->host, scsi_id->ne->nodeid), 1625 NODE_BUS_ARGS(hi->host, scsi_id->ne->nodeid),
1686 hpsb_speedto_str[scsi_id->speed_code], 1626 hpsb_speedto_str[scsi_id->speed_code],
1687 1 << ((u32)scsi_id->max_payload_size + 2)); 1627 1 << ((u32) scsi_id->max_payload_size + 2));
1688 1628
1689 return(0); 1629 return 0;
1690} 1630}
1691 1631
1692/* 1632/*
@@ -1721,30 +1661,187 @@ static int sbp2_agent_reset(struct scsi_id_instance_data *scsi_id, int wait)
1721 */ 1661 */
1722 scsi_id->last_orb = NULL; 1662 scsi_id->last_orb = NULL;
1723 1663
1724 return(0); 1664 return 0;
1665}
1666
1667static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
1668 struct sbp2scsi_host_info *hi,
1669 struct sbp2_command_info *command,
1670 unsigned int scsi_use_sg,
1671 struct scatterlist *sgpnt,
1672 u32 orb_direction,
1673 enum dma_data_direction dma_dir)
1674{
1675 command->dma_dir = dma_dir;
1676 orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
1677 orb->misc |= ORB_SET_DIRECTION(orb_direction);
1678
1679 /* Special case if only one element (and less than 64KB in size) */
1680 if ((scsi_use_sg == 1) &&
1681 (sgpnt[0].length <= SBP2_MAX_SG_ELEMENT_LENGTH)) {
1682
1683 SBP2_DEBUG("Only one s/g element");
1684 command->dma_size = sgpnt[0].length;
1685 command->dma_type = CMD_DMA_PAGE;
1686 command->cmd_dma = pci_map_page(hi->host->pdev,
1687 sgpnt[0].page,
1688 sgpnt[0].offset,
1689 command->dma_size,
1690 command->dma_dir);
1691 SBP2_DMA_ALLOC("single page scatter element");
1692
1693 orb->data_descriptor_lo = command->cmd_dma;
1694 orb->misc |= ORB_SET_DATA_SIZE(command->dma_size);
1695
1696 } else {
1697 struct sbp2_unrestricted_page_table *sg_element =
1698 &command->scatter_gather_element[0];
1699 u32 sg_count, sg_len;
1700 dma_addr_t sg_addr;
1701 int i, count = pci_map_sg(hi->host->pdev, sgpnt, scsi_use_sg,
1702 dma_dir);
1703
1704 SBP2_DMA_ALLOC("scatter list");
1705
1706 command->dma_size = scsi_use_sg;
1707 command->sge_buffer = sgpnt;
1708
1709 /* use page tables (s/g) */
1710 orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
1711 orb->data_descriptor_lo = command->sge_dma;
1712
1713 /*
1714 * Loop through and fill out our sbp-2 page tables
1715 * (and split up anything too large)
1716 */
1717 for (i = 0, sg_count = 0 ; i < count; i++, sgpnt++) {
1718 sg_len = sg_dma_len(sgpnt);
1719 sg_addr = sg_dma_address(sgpnt);
1720 while (sg_len) {
1721 sg_element[sg_count].segment_base_lo = sg_addr;
1722 if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {
1723 sg_element[sg_count].length_segment_base_hi =
1724 PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH);
1725 sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH;
1726 sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH;
1727 } else {
1728 sg_element[sg_count].length_segment_base_hi =
1729 PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len);
1730 sg_len = 0;
1731 }
1732 sg_count++;
1733 }
1734 }
1735
1736 /* Number of page table (s/g) elements */
1737 orb->misc |= ORB_SET_DATA_SIZE(sg_count);
1738
1739 sbp2util_packet_dump(sg_element,
1740 (sizeof(struct sbp2_unrestricted_page_table)) * sg_count,
1741 "sbp2 s/g list", command->sge_dma);
1742
1743 /* Byte swap page tables if necessary */
1744 sbp2util_cpu_to_be32_buffer(sg_element,
1745 (sizeof(struct sbp2_unrestricted_page_table)) *
1746 sg_count);
1747 }
1748}
1749
1750static void sbp2_prep_command_orb_no_sg(struct sbp2_command_orb *orb,
1751 struct sbp2scsi_host_info *hi,
1752 struct sbp2_command_info *command,
1753 struct scatterlist *sgpnt,
1754 u32 orb_direction,
1755 unsigned int scsi_request_bufflen,
1756 void *scsi_request_buffer,
1757 enum dma_data_direction dma_dir)
1758{
1759 command->dma_dir = dma_dir;
1760 command->dma_size = scsi_request_bufflen;
1761 command->dma_type = CMD_DMA_SINGLE;
1762 command->cmd_dma = pci_map_single(hi->host->pdev, scsi_request_buffer,
1763 command->dma_size, command->dma_dir);
1764 orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
1765 orb->misc |= ORB_SET_DIRECTION(orb_direction);
1766
1767 SBP2_DMA_ALLOC("single bulk");
1768
1769 /*
1770 * Handle case where we get a command w/o s/g enabled (but
1771 * check for transfers larger than 64K)
1772 */
1773 if (scsi_request_bufflen <= SBP2_MAX_SG_ELEMENT_LENGTH) {
1774
1775 orb->data_descriptor_lo = command->cmd_dma;
1776 orb->misc |= ORB_SET_DATA_SIZE(scsi_request_bufflen);
1777
1778 } else {
1779 struct sbp2_unrestricted_page_table *sg_element =
1780 &command->scatter_gather_element[0];
1781 u32 sg_count, sg_len;
1782 dma_addr_t sg_addr;
1783
1784 /*
1785 * Need to turn this into page tables, since the
1786 * buffer is too large.
1787 */
1788 orb->data_descriptor_lo = command->sge_dma;
1789
1790 /* Use page tables (s/g) */
1791 orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
1792
1793 /*
1794 * fill out our sbp-2 page tables (and split up
1795 * the large buffer)
1796 */
1797 sg_count = 0;
1798 sg_len = scsi_request_bufflen;
1799 sg_addr = command->cmd_dma;
1800 while (sg_len) {
1801 sg_element[sg_count].segment_base_lo = sg_addr;
1802 if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {
1803 sg_element[sg_count].length_segment_base_hi =
1804 PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH);
1805 sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH;
1806 sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH;
1807 } else {
1808 sg_element[sg_count].length_segment_base_hi =
1809 PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len);
1810 sg_len = 0;
1811 }
1812 sg_count++;
1813 }
1814
1815 /* Number of page table (s/g) elements */
1816 orb->misc |= ORB_SET_DATA_SIZE(sg_count);
1817
1818 sbp2util_packet_dump(sg_element,
1819 (sizeof(struct sbp2_unrestricted_page_table)) * sg_count,
1820 "sbp2 s/g list", command->sge_dma);
1821
1822 /* Byte swap page tables if necessary */
1823 sbp2util_cpu_to_be32_buffer(sg_element,
1824 (sizeof(struct sbp2_unrestricted_page_table)) *
1825 sg_count);
1826 }
1725} 1827}
1726 1828
1727/* 1829/*
1728 * This function is called to create the actual command orb and s/g list 1830 * This function is called to create the actual command orb and s/g list
1729 * out of the scsi command itself. 1831 * out of the scsi command itself.
1730 */ 1832 */
1731static int sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id, 1833static void sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id,
1732 struct sbp2_command_info *command, 1834 struct sbp2_command_info *command,
1733 unchar *scsi_cmd, 1835 unchar *scsi_cmd,
1734 unsigned int scsi_use_sg, 1836 unsigned int scsi_use_sg,
1735 unsigned int scsi_request_bufflen, 1837 unsigned int scsi_request_bufflen,
1736 void *scsi_request_buffer, 1838 void *scsi_request_buffer,
1737 enum dma_data_direction dma_dir) 1839 enum dma_data_direction dma_dir)
1738
1739{ 1840{
1740 struct sbp2scsi_host_info *hi = scsi_id->hi; 1841 struct sbp2scsi_host_info *hi = scsi_id->hi;
1741 struct scatterlist *sgpnt = (struct scatterlist *) scsi_request_buffer; 1842 struct scatterlist *sgpnt = (struct scatterlist *)scsi_request_buffer;
1742 struct sbp2_command_orb *command_orb = &command->command_orb; 1843 struct sbp2_command_orb *command_orb = &command->command_orb;
1743 struct sbp2_unrestricted_page_table *scatter_gather_element = 1844 u32 orb_direction;
1744 &command->scatter_gather_element[0];
1745 u32 sg_count, sg_len, orb_direction;
1746 dma_addr_t sg_addr;
1747 int i;
1748 1845
1749 /* 1846 /*
1750 * Set-up our command ORB.. 1847 * Set-up our command ORB..
@@ -1758,222 +1855,42 @@ static int sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id,
1758 command_orb->next_ORB_lo = 0x0; 1855 command_orb->next_ORB_lo = 0x0;
1759 command_orb->misc = ORB_SET_MAX_PAYLOAD(scsi_id->max_payload_size); 1856 command_orb->misc = ORB_SET_MAX_PAYLOAD(scsi_id->max_payload_size);
1760 command_orb->misc |= ORB_SET_SPEED(scsi_id->speed_code); 1857 command_orb->misc |= ORB_SET_SPEED(scsi_id->speed_code);
1761 command_orb->misc |= ORB_SET_NOTIFY(1); /* Notify us when complete */ 1858 command_orb->misc |= ORB_SET_NOTIFY(1); /* Notify us when complete */
1762 1859
1763 /* 1860 if (dma_dir == DMA_NONE)
1764 * Get the direction of the transfer. If the direction is unknown, then use our 1861 orb_direction = ORB_DIRECTION_NO_DATA_TRANSFER;
1765 * goofy table as a back-up. 1862 else if (dma_dir == DMA_TO_DEVICE && scsi_request_bufflen)
1766 */ 1863 orb_direction = ORB_DIRECTION_WRITE_TO_MEDIA;
1767 switch (dma_dir) { 1864 else if (dma_dir == DMA_FROM_DEVICE && scsi_request_bufflen)
1768 case DMA_NONE: 1865 orb_direction = ORB_DIRECTION_READ_FROM_MEDIA;
1769 orb_direction = ORB_DIRECTION_NO_DATA_TRANSFER; 1866 else {
1770 break; 1867 SBP2_WARN("Falling back to DMA_NONE");
1771 case DMA_TO_DEVICE: 1868 orb_direction = ORB_DIRECTION_NO_DATA_TRANSFER;
1772 orb_direction = ORB_DIRECTION_WRITE_TO_MEDIA;
1773 break;
1774 case DMA_FROM_DEVICE:
1775 orb_direction = ORB_DIRECTION_READ_FROM_MEDIA;
1776 break;
1777 case DMA_BIDIRECTIONAL:
1778 default:
1779 SBP2_ERR("SCSI data transfer direction not specified. "
1780 "Update the SBP2 direction table in sbp2.h if "
1781 "necessary for your application");
1782 __scsi_print_command(scsi_cmd);
1783 orb_direction = sbp2scsi_direction_table[*scsi_cmd];
1784 break;
1785 } 1869 }
1786 1870
1787 /* 1871 /* Set-up our pagetable stuff */
1788 * Set-up our pagetable stuff... unfortunately, this has become
1789 * messier than I'd like. Need to clean this up a bit. ;-)
1790 */
1791 if (orb_direction == ORB_DIRECTION_NO_DATA_TRANSFER) { 1872 if (orb_direction == ORB_DIRECTION_NO_DATA_TRANSFER) {
1792
1793 SBP2_DEBUG("No data transfer"); 1873 SBP2_DEBUG("No data transfer");
1794
1795 /*
1796 * Handle no data transfer
1797 */
1798 command_orb->data_descriptor_hi = 0x0; 1874 command_orb->data_descriptor_hi = 0x0;
1799 command_orb->data_descriptor_lo = 0x0; 1875 command_orb->data_descriptor_lo = 0x0;
1800 command_orb->misc |= ORB_SET_DIRECTION(1); 1876 command_orb->misc |= ORB_SET_DIRECTION(1);
1801
1802 } else if (scsi_use_sg) { 1877 } else if (scsi_use_sg) {
1803
1804 SBP2_DEBUG("Use scatter/gather"); 1878 SBP2_DEBUG("Use scatter/gather");
1805 1879 sbp2_prep_command_orb_sg(command_orb, hi, command, scsi_use_sg,
1806 /* 1880 sgpnt, orb_direction, dma_dir);
1807 * Special case if only one element (and less than 64KB in size)
1808 */
1809 if ((scsi_use_sg == 1) && (sgpnt[0].length <= SBP2_MAX_SG_ELEMENT_LENGTH)) {
1810
1811 SBP2_DEBUG("Only one s/g element");
1812 command->dma_dir = dma_dir;
1813 command->dma_size = sgpnt[0].length;
1814 command->dma_type = CMD_DMA_PAGE;
1815 command->cmd_dma = pci_map_page(hi->host->pdev,
1816 sgpnt[0].page,
1817 sgpnt[0].offset,
1818 command->dma_size,
1819 command->dma_dir);
1820 SBP2_DMA_ALLOC("single page scatter element");
1821
1822 command_orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
1823 command_orb->data_descriptor_lo = command->cmd_dma;
1824 command_orb->misc |= ORB_SET_DATA_SIZE(command->dma_size);
1825 command_orb->misc |= ORB_SET_DIRECTION(orb_direction);
1826
1827 } else {
1828 int count = pci_map_sg(hi->host->pdev, sgpnt, scsi_use_sg, dma_dir);
1829 SBP2_DMA_ALLOC("scatter list");
1830
1831 command->dma_size = scsi_use_sg;
1832 command->dma_dir = dma_dir;
1833 command->sge_buffer = sgpnt;
1834
1835 /* use page tables (s/g) */
1836 command_orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
1837 command_orb->misc |= ORB_SET_DIRECTION(orb_direction);
1838 command_orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
1839 command_orb->data_descriptor_lo = command->sge_dma;
1840
1841 /*
1842 * Loop through and fill out our sbp-2 page tables
1843 * (and split up anything too large)
1844 */
1845 for (i = 0, sg_count = 0 ; i < count; i++, sgpnt++) {
1846 sg_len = sg_dma_len(sgpnt);
1847 sg_addr = sg_dma_address(sgpnt);
1848 while (sg_len) {
1849 scatter_gather_element[sg_count].segment_base_lo = sg_addr;
1850 if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {
1851 scatter_gather_element[sg_count].length_segment_base_hi =
1852 PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH);
1853 sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH;
1854 sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH;
1855 } else {
1856 scatter_gather_element[sg_count].length_segment_base_hi =
1857 PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len);
1858 sg_len = 0;
1859 }
1860 sg_count++;
1861 }
1862 }
1863
1864 /* Number of page table (s/g) elements */
1865 command_orb->misc |= ORB_SET_DATA_SIZE(sg_count);
1866
1867 sbp2util_packet_dump(scatter_gather_element,
1868 (sizeof(struct sbp2_unrestricted_page_table)) * sg_count,
1869 "sbp2 s/g list", command->sge_dma);
1870
1871 /*
1872 * Byte swap page tables if necessary
1873 */
1874 sbp2util_cpu_to_be32_buffer(scatter_gather_element,
1875 (sizeof(struct sbp2_unrestricted_page_table)) *
1876 sg_count);
1877
1878 }
1879
1880 } else { 1881 } else {
1881
1882 SBP2_DEBUG("No scatter/gather"); 1882 SBP2_DEBUG("No scatter/gather");
1883 1883 sbp2_prep_command_orb_no_sg(command_orb, hi, command, sgpnt,
1884 command->dma_dir = dma_dir; 1884 orb_direction, scsi_request_bufflen,
1885 command->dma_size = scsi_request_bufflen; 1885 scsi_request_buffer, dma_dir);
1886 command->dma_type = CMD_DMA_SINGLE;
1887 command->cmd_dma = pci_map_single (hi->host->pdev, scsi_request_buffer,
1888 command->dma_size,
1889 command->dma_dir);
1890 SBP2_DMA_ALLOC("single bulk");
1891
1892 /*
1893 * Handle case where we get a command w/o s/g enabled (but
1894 * check for transfers larger than 64K)
1895 */
1896 if (scsi_request_bufflen <= SBP2_MAX_SG_ELEMENT_LENGTH) {
1897
1898 command_orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
1899 command_orb->data_descriptor_lo = command->cmd_dma;
1900 command_orb->misc |= ORB_SET_DATA_SIZE(scsi_request_bufflen);
1901 command_orb->misc |= ORB_SET_DIRECTION(orb_direction);
1902
1903 /*
1904 * Sanity, in case our direction table is not
1905 * up-to-date
1906 */
1907 if (!scsi_request_bufflen) {
1908 command_orb->data_descriptor_hi = 0x0;
1909 command_orb->data_descriptor_lo = 0x0;
1910 command_orb->misc |= ORB_SET_DIRECTION(1);
1911 }
1912
1913 } else {
1914 /*
1915 * Need to turn this into page tables, since the
1916 * buffer is too large.
1917 */
1918 command_orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
1919 command_orb->data_descriptor_lo = command->sge_dma;
1920
1921 /* Use page tables (s/g) */
1922 command_orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
1923 command_orb->misc |= ORB_SET_DIRECTION(orb_direction);
1924
1925 /*
1926 * fill out our sbp-2 page tables (and split up
1927 * the large buffer)
1928 */
1929 sg_count = 0;
1930 sg_len = scsi_request_bufflen;
1931 sg_addr = command->cmd_dma;
1932 while (sg_len) {
1933 scatter_gather_element[sg_count].segment_base_lo = sg_addr;
1934 if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {
1935 scatter_gather_element[sg_count].length_segment_base_hi =
1936 PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH);
1937 sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH;
1938 sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH;
1939 } else {
1940 scatter_gather_element[sg_count].length_segment_base_hi =
1941 PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len);
1942 sg_len = 0;
1943 }
1944 sg_count++;
1945 }
1946
1947 /* Number of page table (s/g) elements */
1948 command_orb->misc |= ORB_SET_DATA_SIZE(sg_count);
1949
1950 sbp2util_packet_dump(scatter_gather_element,
1951 (sizeof(struct sbp2_unrestricted_page_table)) * sg_count,
1952 "sbp2 s/g list", command->sge_dma);
1953
1954 /*
1955 * Byte swap page tables if necessary
1956 */
1957 sbp2util_cpu_to_be32_buffer(scatter_gather_element,
1958 (sizeof(struct sbp2_unrestricted_page_table)) *
1959 sg_count);
1960
1961 }
1962
1963 } 1886 }
1964 1887
1965 /* 1888 /* Byte swap command ORB if necessary */
1966 * Byte swap command ORB if necessary
1967 */
1968 sbp2util_cpu_to_be32_buffer(command_orb, sizeof(struct sbp2_command_orb)); 1889 sbp2util_cpu_to_be32_buffer(command_orb, sizeof(struct sbp2_command_orb));
1969 1890
1970 /* 1891 /* Put our scsi command in the command ORB */
1971 * Put our scsi command in the command ORB
1972 */
1973 memset(command_orb->cdb, 0, 12); 1892 memset(command_orb->cdb, 0, 12);
1974 memcpy(command_orb->cdb, scsi_cmd, COMMAND_SIZE(*scsi_cmd)); 1893 memcpy(command_orb->cdb, scsi_cmd, COMMAND_SIZE(*scsi_cmd));
1975
1976 return(0);
1977} 1894}
1978 1895
1979/* 1896/*
@@ -1989,7 +1906,7 @@ static int sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
1989 1906
1990 outstanding_orb_incr; 1907 outstanding_orb_incr;
1991 SBP2_ORB_DEBUG("sending command orb %p, total orbs = %x", 1908 SBP2_ORB_DEBUG("sending command orb %p, total orbs = %x",
1992 command_orb, global_outstanding_command_orbs); 1909 command_orb, global_outstanding_command_orbs);
1993 1910
1994 pci_dma_sync_single_for_device(hi->host->pdev, command->command_orb_dma, 1911 pci_dma_sync_single_for_device(hi->host->pdev, command->command_orb_dma,
1995 sizeof(struct sbp2_command_orb), 1912 sizeof(struct sbp2_command_orb),
@@ -2034,10 +1951,11 @@ static int sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
2034 * both by the sbp2 device and us. 1951 * both by the sbp2 device and us.
2035 */ 1952 */
2036 scsi_id->last_orb->next_ORB_lo = 1953 scsi_id->last_orb->next_ORB_lo =
2037 cpu_to_be32(command->command_orb_dma); 1954 cpu_to_be32(command->command_orb_dma);
2038 /* Tells hardware that this pointer is valid */ 1955 /* Tells hardware that this pointer is valid */
2039 scsi_id->last_orb->next_ORB_hi = 0x0; 1956 scsi_id->last_orb->next_ORB_hi = 0x0;
2040 pci_dma_sync_single_for_device(hi->host->pdev, scsi_id->last_orb_dma, 1957 pci_dma_sync_single_for_device(hi->host->pdev,
1958 scsi_id->last_orb_dma,
2041 sizeof(struct sbp2_command_orb), 1959 sizeof(struct sbp2_command_orb),
2042 PCI_DMA_BIDIRECTIONAL); 1960 PCI_DMA_BIDIRECTIONAL);
2043 1961
@@ -2051,14 +1969,14 @@ static int sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
2051 1969
2052 if (sbp2util_node_write_no_wait(ne, addr, &data, 4) < 0) { 1970 if (sbp2util_node_write_no_wait(ne, addr, &data, 4) < 0) {
2053 SBP2_ERR("sbp2util_node_write_no_wait failed"); 1971 SBP2_ERR("sbp2util_node_write_no_wait failed");
2054 return(-EIO); 1972 return -EIO;
2055 } 1973 }
2056 1974
2057 scsi_id->last_orb = command_orb; 1975 scsi_id->last_orb = command_orb;
2058 scsi_id->last_orb_dma = command->command_orb_dma; 1976 scsi_id->last_orb_dma = command->command_orb_dma;
2059 1977
2060 } 1978 }
2061 return(0); 1979 return 0;
2062} 1980}
2063 1981
2064/* 1982/*
@@ -2085,7 +2003,7 @@ static int sbp2_send_command(struct scsi_id_instance_data *scsi_id,
2085 */ 2003 */
2086 command = sbp2util_allocate_command_orb(scsi_id, SCpnt, done); 2004 command = sbp2util_allocate_command_orb(scsi_id, SCpnt, done);
2087 if (!command) { 2005 if (!command) {
2088 return(-EIO); 2006 return -EIO;
2089 } 2007 }
2090 2008
2091 /* 2009 /*
@@ -2106,11 +2024,6 @@ static int sbp2_send_command(struct scsi_id_instance_data *scsi_id,
2106 sbp2_create_command_orb(scsi_id, command, cmd, SCpnt->use_sg, 2024 sbp2_create_command_orb(scsi_id, command, cmd, SCpnt->use_sg,
2107 request_bufflen, SCpnt->request_buffer, 2025 request_bufflen, SCpnt->request_buffer,
2108 SCpnt->sc_data_direction); 2026 SCpnt->sc_data_direction);
2109 /*
2110 * Update our cdb if necessary (to handle sbp2 RBC command set
2111 * differences). This is where the command set hacks go! =)
2112 */
2113 sbp2_check_sbp2_command(scsi_id, command->command_orb.cdb);
2114 2027
2115 sbp2util_packet_dump(&command->command_orb, sizeof(struct sbp2_command_orb), 2028 sbp2util_packet_dump(&command->command_orb, sizeof(struct sbp2_command_orb),
2116 "sbp2 command orb", command->command_orb_dma); 2029 "sbp2 command orb", command->command_orb_dma);
@@ -2125,112 +2038,7 @@ static int sbp2_send_command(struct scsi_id_instance_data *scsi_id,
2125 */ 2038 */
2126 sbp2_link_orb_command(scsi_id, command); 2039 sbp2_link_orb_command(scsi_id, command);
2127 2040
2128 return(0); 2041 return 0;
2129}
2130
2131
2132/*
2133 * This function deals with command set differences between Linux scsi
2134 * command set and sbp2 RBC command set.
2135 */
2136static void sbp2_check_sbp2_command(struct scsi_id_instance_data *scsi_id, unchar *cmd)
2137{
2138 unchar new_cmd[16];
2139 u8 device_type = SBP2_DEVICE_TYPE (scsi_id->sbp2_device_type_and_lun);
2140
2141 SBP2_DEBUG("sbp2_check_sbp2_command");
2142
2143 switch (*cmd) {
2144
2145 case READ_6:
2146
2147 if (sbp2_command_conversion_device_type(device_type)) {
2148
2149 SBP2_DEBUG("Convert READ_6 to READ_10");
2150
2151 /*
2152 * Need to turn read_6 into read_10
2153 */
2154 new_cmd[0] = 0x28;
2155 new_cmd[1] = (cmd[1] & 0xe0);
2156 new_cmd[2] = 0x0;
2157 new_cmd[3] = (cmd[1] & 0x1f);
2158 new_cmd[4] = cmd[2];
2159 new_cmd[5] = cmd[3];
2160 new_cmd[6] = 0x0;
2161 new_cmd[7] = 0x0;
2162 new_cmd[8] = cmd[4];
2163 new_cmd[9] = cmd[5];
2164
2165 memcpy(cmd, new_cmd, 10);
2166
2167 }
2168
2169 break;
2170
2171 case WRITE_6:
2172
2173 if (sbp2_command_conversion_device_type(device_type)) {
2174
2175 SBP2_DEBUG("Convert WRITE_6 to WRITE_10");
2176
2177 /*
2178 * Need to turn write_6 into write_10
2179 */
2180 new_cmd[0] = 0x2a;
2181 new_cmd[1] = (cmd[1] & 0xe0);
2182 new_cmd[2] = 0x0;
2183 new_cmd[3] = (cmd[1] & 0x1f);
2184 new_cmd[4] = cmd[2];
2185 new_cmd[5] = cmd[3];
2186 new_cmd[6] = 0x0;
2187 new_cmd[7] = 0x0;
2188 new_cmd[8] = cmd[4];
2189 new_cmd[9] = cmd[5];
2190
2191 memcpy(cmd, new_cmd, 10);
2192
2193 }
2194
2195 break;
2196
2197 case MODE_SENSE:
2198
2199 if (sbp2_command_conversion_device_type(device_type)) {
2200
2201 SBP2_DEBUG("Convert MODE_SENSE_6 to MODE_SENSE_10");
2202
2203 /*
2204 * Need to turn mode_sense_6 into mode_sense_10
2205 */
2206 new_cmd[0] = 0x5a;
2207 new_cmd[1] = cmd[1];
2208 new_cmd[2] = cmd[2];
2209 new_cmd[3] = 0x0;
2210 new_cmd[4] = 0x0;
2211 new_cmd[5] = 0x0;
2212 new_cmd[6] = 0x0;
2213 new_cmd[7] = 0x0;
2214 new_cmd[8] = cmd[4];
2215 new_cmd[9] = cmd[5];
2216
2217 memcpy(cmd, new_cmd, 10);
2218
2219 }
2220
2221 break;
2222
2223 case MODE_SELECT:
2224
2225 /*
2226 * TODO. Probably need to change mode select to 10 byte version
2227 */
2228
2229 default:
2230 break;
2231 }
2232
2233 return;
2234} 2042}
2235 2043
2236/* 2044/*
@@ -2260,80 +2068,40 @@ static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status, unchar *sense
2260 sense_data[14] = sbp2_status[20]; 2068 sense_data[14] = sbp2_status[20];
2261 sense_data[15] = sbp2_status[21]; 2069 sense_data[15] = sbp2_status[21];
2262 2070
2263 return(sbp2_status[8] & 0x3f); /* return scsi status */ 2071 return sbp2_status[8] & 0x3f; /* return scsi status */
2264} 2072}
2265 2073
2266/* 2074/*
2267 * This function is called after a command is completed, in order to do any necessary SBP-2 2075 * This function is called after a command is completed, in order to do any necessary SBP-2
2268 * response data translations for the SCSI stack 2076 * response data translations for the SCSI stack
2269 */ 2077 */
2270static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id, 2078static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id,
2271 struct scsi_cmnd *SCpnt) 2079 struct scsi_cmnd *SCpnt)
2272{ 2080{
2273 u8 *scsi_buf = SCpnt->request_buffer; 2081 u8 *scsi_buf = SCpnt->request_buffer;
2274 u8 device_type = SBP2_DEVICE_TYPE (scsi_id->sbp2_device_type_and_lun);
2275 2082
2276 SBP2_DEBUG("sbp2_check_sbp2_response"); 2083 SBP2_DEBUG("sbp2_check_sbp2_response");
2277 2084
2278 switch (SCpnt->cmnd[0]) { 2085 switch (SCpnt->cmnd[0]) {
2279 2086
2280 case INQUIRY: 2087 case INQUIRY:
2281 2088 /*
2282 /* 2089 * Make sure data length is ok. Minimum length is 36 bytes
2283 * If scsi_id->sbp2_device_type_and_lun is uninitialized, then fill 2090 */
2284 * this information in from the inquiry response data. Lun is set to zero. 2091 if (scsi_buf[4] == 0) {
2285 */ 2092 scsi_buf[4] = 36 - 5;
2286 if (scsi_id->sbp2_device_type_and_lun == SBP2_DEVICE_TYPE_LUN_UNINITIALIZED) { 2093 }
2287 SBP2_DEBUG("Creating sbp2_device_type_and_lun from scsi inquiry data");
2288 scsi_id->sbp2_device_type_and_lun = (scsi_buf[0] & 0x1f) << 16;
2289 }
2290
2291 /*
2292 * Make sure data length is ok. Minimum length is 36 bytes
2293 */
2294 if (scsi_buf[4] == 0) {
2295 scsi_buf[4] = 36 - 5;
2296 }
2297
2298 /*
2299 * Check for Simple Direct Access Device and change it to TYPE_DISK
2300 */
2301 if ((scsi_buf[0] & 0x1f) == TYPE_RBC) {
2302 SBP2_DEBUG("Changing TYPE_RBC to TYPE_DISK");
2303 scsi_buf[0] &= 0xe0;
2304 }
2305
2306 /*
2307 * Fix ansi revision and response data format
2308 */
2309 scsi_buf[2] |= 2;
2310 scsi_buf[3] = (scsi_buf[3] & 0xf0) | 2;
2311
2312 break;
2313
2314 case MODE_SENSE:
2315
2316 if (sbp2_command_conversion_device_type(device_type)) {
2317
2318 SBP2_DEBUG("Modify mode sense response (10 byte version)");
2319
2320 scsi_buf[0] = scsi_buf[1]; /* Mode data length */
2321 scsi_buf[1] = scsi_buf[2]; /* Medium type */
2322 scsi_buf[2] = scsi_buf[3]; /* Device specific parameter */
2323 scsi_buf[3] = scsi_buf[7]; /* Block descriptor length */
2324 memcpy(scsi_buf + 4, scsi_buf + 8, scsi_buf[0]);
2325 }
2326
2327 break;
2328 2094
2329 case MODE_SELECT: 2095 /*
2096 * Fix ansi revision and response data format
2097 */
2098 scsi_buf[2] |= 2;
2099 scsi_buf[3] = (scsi_buf[3] & 0xf0) | 2;
2330 2100
2331 /* 2101 break;
2332 * TODO. Probably need to change mode select to 10 byte version
2333 */
2334 2102
2335 default: 2103 default:
2336 break; 2104 break;
2337 } 2105 }
2338 return; 2106 return;
2339} 2107}
@@ -2358,14 +2126,14 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
2358 2126
2359 if (!host) { 2127 if (!host) {
2360 SBP2_ERR("host is NULL - this is bad!"); 2128 SBP2_ERR("host is NULL - this is bad!");
2361 return(RCODE_ADDRESS_ERROR); 2129 return RCODE_ADDRESS_ERROR;
2362 } 2130 }
2363 2131
2364 hi = hpsb_get_hostinfo(&sbp2_highlevel, host); 2132 hi = hpsb_get_hostinfo(&sbp2_highlevel, host);
2365 2133
2366 if (!hi) { 2134 if (!hi) {
2367 SBP2_ERR("host info is NULL - this is bad!"); 2135 SBP2_ERR("host info is NULL - this is bad!");
2368 return(RCODE_ADDRESS_ERROR); 2136 return RCODE_ADDRESS_ERROR;
2369 } 2137 }
2370 2138
2371 /* 2139 /*
@@ -2382,7 +2150,7 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
2382 2150
2383 if (!scsi_id) { 2151 if (!scsi_id) {
2384 SBP2_ERR("scsi_id is NULL - device is gone?"); 2152 SBP2_ERR("scsi_id is NULL - device is gone?");
2385 return(RCODE_ADDRESS_ERROR); 2153 return RCODE_ADDRESS_ERROR;
2386 } 2154 }
2387 2155
2388 /* 2156 /*
@@ -2480,10 +2248,9 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
2480 SBP2_ORB_DEBUG("command orb completed"); 2248 SBP2_ORB_DEBUG("command orb completed");
2481 } 2249 }
2482 2250
2483 return(RCODE_COMPLETE); 2251 return RCODE_COMPLETE;
2484} 2252}
2485 2253
2486
2487/************************************** 2254/**************************************
2488 * SCSI interface related section 2255 * SCSI interface related section
2489 **************************************/ 2256 **************************************/
@@ -2541,6 +2308,16 @@ static int sbp2scsi_queuecommand(struct scsi_cmnd *SCpnt,
2541 } 2308 }
2542 2309
2543 /* 2310 /*
2311 * Bidirectional commands are not yet implemented,
2312 * and unknown transfer direction not handled.
2313 */
2314 if (SCpnt->sc_data_direction == DMA_BIDIRECTIONAL) {
2315 SBP2_ERR("Cannot handle DMA_BIDIRECTIONAL - rejecting command");
2316 result = DID_ERROR << 16;
2317 goto done;
2318 }
2319
2320 /*
2544 * Try and send our SCSI command 2321 * Try and send our SCSI command
2545 */ 2322 */
2546 if (sbp2_send_command(scsi_id, SCpnt, done)) { 2323 if (sbp2_send_command(scsi_id, SCpnt, done)) {
@@ -2616,55 +2393,56 @@ static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
2616 * complete the command, just let it get retried at the end of the 2393 * complete the command, just let it get retried at the end of the
2617 * bus reset. 2394 * bus reset.
2618 */ 2395 */
2619 if (!hpsb_node_entry_valid(scsi_id->ne) && (scsi_status != SBP2_SCSI_STATUS_GOOD)) { 2396 if (!hpsb_node_entry_valid(scsi_id->ne)
2397 && (scsi_status != SBP2_SCSI_STATUS_GOOD)) {
2620 SBP2_ERR("Bus reset in progress - retry command later"); 2398 SBP2_ERR("Bus reset in progress - retry command later");
2621 return; 2399 return;
2622 } 2400 }
2623 2401
2624 /* 2402 /*
2625 * Switch on scsi status 2403 * Switch on scsi status
2626 */ 2404 */
2627 switch (scsi_status) { 2405 switch (scsi_status) {
2628 case SBP2_SCSI_STATUS_GOOD: 2406 case SBP2_SCSI_STATUS_GOOD:
2629 SCpnt->result = DID_OK; 2407 SCpnt->result = DID_OK;
2630 break; 2408 break;
2631 2409
2632 case SBP2_SCSI_STATUS_BUSY: 2410 case SBP2_SCSI_STATUS_BUSY:
2633 SBP2_ERR("SBP2_SCSI_STATUS_BUSY"); 2411 SBP2_ERR("SBP2_SCSI_STATUS_BUSY");
2634 SCpnt->result = DID_BUS_BUSY << 16; 2412 SCpnt->result = DID_BUS_BUSY << 16;
2635 break; 2413 break;
2636 2414
2637 case SBP2_SCSI_STATUS_CHECK_CONDITION: 2415 case SBP2_SCSI_STATUS_CHECK_CONDITION:
2638 SBP2_DEBUG("SBP2_SCSI_STATUS_CHECK_CONDITION"); 2416 SBP2_DEBUG("SBP2_SCSI_STATUS_CHECK_CONDITION");
2639 SCpnt->result = CHECK_CONDITION << 1; 2417 SCpnt->result = CHECK_CONDITION << 1;
2640 2418
2641 /* 2419 /*
2642 * Debug stuff 2420 * Debug stuff
2643 */ 2421 */
2644#if CONFIG_IEEE1394_SBP2_DEBUG >= 1 2422#if CONFIG_IEEE1394_SBP2_DEBUG >= 1
2645 scsi_print_command(SCpnt); 2423 scsi_print_command(SCpnt);
2646 scsi_print_sense("bh", SCpnt); 2424 scsi_print_sense("bh", SCpnt);
2647#endif 2425#endif
2648 2426
2649 break; 2427 break;
2650 2428
2651 case SBP2_SCSI_STATUS_SELECTION_TIMEOUT: 2429 case SBP2_SCSI_STATUS_SELECTION_TIMEOUT:
2652 SBP2_ERR("SBP2_SCSI_STATUS_SELECTION_TIMEOUT"); 2430 SBP2_ERR("SBP2_SCSI_STATUS_SELECTION_TIMEOUT");
2653 SCpnt->result = DID_NO_CONNECT << 16; 2431 SCpnt->result = DID_NO_CONNECT << 16;
2654 scsi_print_command(SCpnt); 2432 scsi_print_command(SCpnt);
2655 break; 2433 break;
2656 2434
2657 case SBP2_SCSI_STATUS_CONDITION_MET: 2435 case SBP2_SCSI_STATUS_CONDITION_MET:
2658 case SBP2_SCSI_STATUS_RESERVATION_CONFLICT: 2436 case SBP2_SCSI_STATUS_RESERVATION_CONFLICT:
2659 case SBP2_SCSI_STATUS_COMMAND_TERMINATED: 2437 case SBP2_SCSI_STATUS_COMMAND_TERMINATED:
2660 SBP2_ERR("Bad SCSI status = %x", scsi_status); 2438 SBP2_ERR("Bad SCSI status = %x", scsi_status);
2661 SCpnt->result = DID_ERROR << 16; 2439 SCpnt->result = DID_ERROR << 16;
2662 scsi_print_command(SCpnt); 2440 scsi_print_command(SCpnt);
2663 break; 2441 break;
2664 2442
2665 default: 2443 default:
2666 SBP2_ERR("Unsupported SCSI status = %x", scsi_status); 2444 SBP2_ERR("Unsupported SCSI status = %x", scsi_status);
2667 SCpnt->result = DID_ERROR << 16; 2445 SCpnt->result = DID_ERROR << 16;
2668 } 2446 }
2669 2447
2670 /* 2448 /*
@@ -2678,7 +2456,8 @@ static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
2678 * If a bus reset is in progress and there was an error, complete 2456 * If a bus reset is in progress and there was an error, complete
2679 * the command as busy so that it will get retried. 2457 * the command as busy so that it will get retried.
2680 */ 2458 */
2681 if (!hpsb_node_entry_valid(scsi_id->ne) && (scsi_status != SBP2_SCSI_STATUS_GOOD)) { 2459 if (!hpsb_node_entry_valid(scsi_id->ne)
2460 && (scsi_status != SBP2_SCSI_STATUS_GOOD)) {
2682 SBP2_ERR("Completing command with busy (bus reset)"); 2461 SBP2_ERR("Completing command with busy (bus reset)");
2683 SCpnt->result = DID_BUS_BUSY << 16; 2462 SCpnt->result = DID_BUS_BUSY << 16;
2684 } 2463 }
@@ -2699,31 +2478,29 @@ static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
2699 /* 2478 /*
2700 * Tell scsi stack that we're done with this command 2479 * Tell scsi stack that we're done with this command
2701 */ 2480 */
2702 done (SCpnt); 2481 done(SCpnt);
2703} 2482}
2704 2483
2705
2706static int sbp2scsi_slave_alloc(struct scsi_device *sdev) 2484static int sbp2scsi_slave_alloc(struct scsi_device *sdev)
2707{ 2485{
2708 ((struct scsi_id_instance_data *)sdev->host->hostdata[0])->sdev = sdev; 2486 ((struct scsi_id_instance_data *)sdev->host->hostdata[0])->sdev = sdev;
2709 return 0; 2487 return 0;
2710} 2488}
2711 2489
2712
2713static int sbp2scsi_slave_configure(struct scsi_device *sdev) 2490static int sbp2scsi_slave_configure(struct scsi_device *sdev)
2714{ 2491{
2715 blk_queue_dma_alignment(sdev->request_queue, (512 - 1)); 2492 blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
2493 sdev->use_10_for_rw = 1;
2494 sdev->use_10_for_ms = 1;
2716 return 0; 2495 return 0;
2717} 2496}
2718 2497
2719
2720static void sbp2scsi_slave_destroy(struct scsi_device *sdev) 2498static void sbp2scsi_slave_destroy(struct scsi_device *sdev)
2721{ 2499{
2722 ((struct scsi_id_instance_data *)sdev->host->hostdata[0])->sdev = NULL; 2500 ((struct scsi_id_instance_data *)sdev->host->hostdata[0])->sdev = NULL;
2723 return; 2501 return;
2724} 2502}
2725 2503
2726
2727/* 2504/*
2728 * Called by scsi stack when something has really gone wrong. Usually 2505 * Called by scsi stack when something has really gone wrong. Usually
2729 * called when a command has timed-out for some reason. 2506 * called when a command has timed-out for some reason.
@@ -2769,7 +2546,7 @@ static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
2769 sbp2scsi_complete_all_commands(scsi_id, DID_BUS_BUSY); 2546 sbp2scsi_complete_all_commands(scsi_id, DID_BUS_BUSY);
2770 } 2547 }
2771 2548
2772 return(SUCCESS); 2549 return SUCCESS;
2773} 2550}
2774 2551
2775/* 2552/*
@@ -2779,28 +2556,20 @@ static int sbp2scsi_reset(struct scsi_cmnd *SCpnt)
2779{ 2556{
2780 struct scsi_id_instance_data *scsi_id = 2557 struct scsi_id_instance_data *scsi_id =
2781 (struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0]; 2558 (struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0];
2782 unsigned long flags;
2783 2559
2784 SBP2_ERR("reset requested"); 2560 SBP2_ERR("reset requested");
2785 2561
2786 spin_lock_irqsave(SCpnt->device->host->host_lock, flags);
2787
2788 if (sbp2util_node_is_available(scsi_id)) { 2562 if (sbp2util_node_is_available(scsi_id)) {
2789 SBP2_ERR("Generating sbp2 fetch agent reset"); 2563 SBP2_ERR("Generating sbp2 fetch agent reset");
2790 sbp2_agent_reset(scsi_id, 0); 2564 sbp2_agent_reset(scsi_id, 0);
2791 } 2565 }
2792 2566
2793 spin_unlock_irqrestore(SCpnt->device->host->host_lock, flags);
2794
2795 return SUCCESS; 2567 return SUCCESS;
2796} 2568}
2797 2569
2798static const char *sbp2scsi_info (struct Scsi_Host *host) 2570static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev,
2799{ 2571 struct device_attribute *attr,
2800 return "SCSI emulation for IEEE-1394 SBP-2 Devices"; 2572 char *buf)
2801}
2802
2803static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev, struct device_attribute *attr, char *buf)
2804{ 2573{
2805 struct scsi_device *sdev; 2574 struct scsi_device *sdev;
2806 struct scsi_id_instance_data *scsi_id; 2575 struct scsi_id_instance_data *scsi_id;
@@ -2812,10 +2581,7 @@ static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev, struct device_att
2812 if (!(scsi_id = (struct scsi_id_instance_data *)sdev->host->hostdata[0])) 2581 if (!(scsi_id = (struct scsi_id_instance_data *)sdev->host->hostdata[0]))
2813 return 0; 2582 return 0;
2814 2583
2815 if (scsi_id->sbp2_device_type_and_lun == SBP2_DEVICE_TYPE_LUN_UNINITIALIZED) 2584 lun = ORB_SET_LUN(scsi_id->sbp2_lun);
2816 lun = 0;
2817 else
2818 lun = ORB_SET_LUN(scsi_id->sbp2_device_type_and_lun);
2819 2585
2820 return sprintf(buf, "%016Lx:%d:%d\n", (unsigned long long)scsi_id->ne->guid, 2586 return sprintf(buf, "%016Lx:%d:%d\n", (unsigned long long)scsi_id->ne->guid,
2821 scsi_id->ud->id, lun); 2587 scsi_id->ud->id, lun);
@@ -2837,12 +2603,9 @@ static struct scsi_host_template scsi_driver_template = {
2837 .module = THIS_MODULE, 2603 .module = THIS_MODULE,
2838 .name = "SBP-2 IEEE-1394", 2604 .name = "SBP-2 IEEE-1394",
2839 .proc_name = SBP2_DEVICE_NAME, 2605 .proc_name = SBP2_DEVICE_NAME,
2840 .info = sbp2scsi_info,
2841 .queuecommand = sbp2scsi_queuecommand, 2606 .queuecommand = sbp2scsi_queuecommand,
2842 .eh_abort_handler = sbp2scsi_abort, 2607 .eh_abort_handler = sbp2scsi_abort,
2843 .eh_device_reset_handler = sbp2scsi_reset, 2608 .eh_device_reset_handler = sbp2scsi_reset,
2844 .eh_bus_reset_handler = sbp2scsi_reset,
2845 .eh_host_reset_handler = sbp2scsi_reset,
2846 .slave_alloc = sbp2scsi_slave_alloc, 2609 .slave_alloc = sbp2scsi_slave_alloc,
2847 .slave_configure = sbp2scsi_slave_configure, 2610 .slave_configure = sbp2scsi_slave_configure,
2848 .slave_destroy = sbp2scsi_slave_destroy, 2611 .slave_destroy = sbp2scsi_slave_destroy,
@@ -2861,8 +2624,6 @@ static int sbp2_module_init(void)
2861 2624
2862 SBP2_DEBUG("sbp2_module_init"); 2625 SBP2_DEBUG("sbp2_module_init");
2863 2626
2864 printk(KERN_INFO "sbp2: %s\n", version);
2865
2866 /* Module load debug option to force one command at a time (serializing I/O) */ 2627 /* Module load debug option to force one command at a time (serializing I/O) */
2867 if (serialize_io) { 2628 if (serialize_io) {
2868 SBP2_INFO("Driver forced to serialize I/O (serialize_io=1)"); 2629 SBP2_INFO("Driver forced to serialize I/O (serialize_io=1)");
@@ -2874,7 +2635,6 @@ static int sbp2_module_init(void)
2874 /* Set max sectors (module load option). Default is 255 sectors. */ 2635 /* Set max sectors (module load option). Default is 255 sectors. */
2875 scsi_driver_template.max_sectors = max_sectors; 2636 scsi_driver_template.max_sectors = max_sectors;
2876 2637
2877
2878 /* Register our high level driver with 1394 stack */ 2638 /* Register our high level driver with 1394 stack */
2879 hpsb_register_highlevel(&sbp2_highlevel); 2639 hpsb_register_highlevel(&sbp2_highlevel);
2880 2640
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h
index cd425be74841..900ea1d25e71 100644
--- a/drivers/ieee1394/sbp2.h
+++ b/drivers/ieee1394/sbp2.h
@@ -119,8 +119,8 @@ struct sbp2_query_logins_response {
119struct sbp2_reconnect_orb { 119struct sbp2_reconnect_orb {
120 u32 reserved1; 120 u32 reserved1;
121 u32 reserved2; 121 u32 reserved2;
122 u32 reserved3; 122 u32 reserved3;
123 u32 reserved4; 123 u32 reserved4;
124 u32 login_ID_misc; 124 u32 login_ID_misc;
125 u32 reserved5; 125 u32 reserved5;
126 u32 status_FIFO_hi; 126 u32 status_FIFO_hi;
@@ -130,8 +130,8 @@ struct sbp2_reconnect_orb {
130struct sbp2_logout_orb { 130struct sbp2_logout_orb {
131 u32 reserved1; 131 u32 reserved1;
132 u32 reserved2; 132 u32 reserved2;
133 u32 reserved3; 133 u32 reserved3;
134 u32 reserved4; 134 u32 reserved4;
135 u32 login_ID_misc; 135 u32 login_ID_misc;
136 u32 reserved5; 136 u32 reserved5;
137 u32 status_FIFO_hi; 137 u32 status_FIFO_hi;
@@ -188,7 +188,7 @@ struct sbp2_unrestricted_page_table {
188struct sbp2_status_block { 188struct sbp2_status_block {
189 u32 ORB_offset_hi_misc; 189 u32 ORB_offset_hi_misc;
190 u32 ORB_offset_lo; 190 u32 ORB_offset_lo;
191 u8 command_set_dependent[24]; 191 u8 command_set_dependent[24];
192}; 192};
193 193
194/* 194/*
@@ -211,7 +211,7 @@ struct sbp2_status_block {
211 * specified for write posting, where the ohci controller will 211 * specified for write posting, where the ohci controller will
212 * automatically send an ack_complete when the status is written by the 212 * automatically send an ack_complete when the status is written by the
213 * sbp2 device... saving a split transaction. =) 213 * sbp2 device... saving a split transaction. =)
214 */ 214 */
215#define SBP2_STATUS_FIFO_ADDRESS 0xfffe00000000ULL 215#define SBP2_STATUS_FIFO_ADDRESS 0xfffe00000000ULL
216#define SBP2_STATUS_FIFO_ADDRESS_HI 0xfffe 216#define SBP2_STATUS_FIFO_ADDRESS_HI 0xfffe
217#define SBP2_STATUS_FIFO_ADDRESS_LO 0x0 217#define SBP2_STATUS_FIFO_ADDRESS_LO 0x0
@@ -229,9 +229,6 @@ struct sbp2_status_block {
229#define SBP2_DEVICE_TYPE_AND_LUN_KEY 0x14 229#define SBP2_DEVICE_TYPE_AND_LUN_KEY 0x14
230#define SBP2_FIRMWARE_REVISION_KEY 0x3c 230#define SBP2_FIRMWARE_REVISION_KEY 0x3c
231 231
232#define SBP2_DEVICE_TYPE(q) (((q) >> 16) & 0x1f)
233#define SBP2_DEVICE_LUN(q) ((q) & 0xffff)
234
235#define SBP2_AGENT_STATE_OFFSET 0x00ULL 232#define SBP2_AGENT_STATE_OFFSET 0x00ULL
236#define SBP2_AGENT_RESET_OFFSET 0x04ULL 233#define SBP2_AGENT_RESET_OFFSET 0x04ULL
237#define SBP2_ORB_POINTER_OFFSET 0x08ULL 234#define SBP2_ORB_POINTER_OFFSET 0x08ULL
@@ -256,8 +253,6 @@ struct sbp2_status_block {
256 */ 253 */
257#define SBP2_128KB_BROKEN_FIRMWARE 0xa0b800 254#define SBP2_128KB_BROKEN_FIRMWARE 0xa0b800
258 255
259#define SBP2_DEVICE_TYPE_LUN_UNINITIALIZED 0xffffffff
260
261/* 256/*
262 * SCSI specific stuff 257 * SCSI specific stuff
263 */ 258 */
@@ -265,45 +260,7 @@ struct sbp2_status_block {
265#define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000 260#define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000
266#define SBP2_MAX_UDS_PER_NODE 16 /* Maximum scsi devices per node */ 261#define SBP2_MAX_UDS_PER_NODE 16 /* Maximum scsi devices per node */
267#define SBP2_MAX_SECTORS 255 /* Max sectors supported */ 262#define SBP2_MAX_SECTORS 255 /* Max sectors supported */
268 263#define SBP2_MAX_CMDS 8 /* This should be safe */
269/*
270 * SCSI direction table...
271 * (now used as a back-up in case the direction passed down from above is "unknown")
272 *
273 * DIN = IN data direction
274 * DOU = OUT data direction
275 * DNO = No data transfer
276 * DUN = Unknown data direction
277 *
278 * Opcode 0xec (Teac specific "opc execute") possibly should be DNO,
279 * but we'll change it when somebody reports a problem with this.
280 */
281#define DIN ORB_DIRECTION_READ_FROM_MEDIA
282#define DOU ORB_DIRECTION_WRITE_TO_MEDIA
283#define DNO ORB_DIRECTION_NO_DATA_TRANSFER
284#define DUN DIN
285
286static unchar sbp2scsi_direction_table[0x100] = {
287 DNO,DNO,DIN,DIN,DOU,DIN,DIN,DOU,DIN,DUN,DOU,DOU,DUN,DUN,DUN,DIN,
288 DNO,DIN,DIN,DOU,DIN,DOU,DNO,DNO,DOU,DNO,DIN,DNO,DIN,DOU,DNO,DUN,
289 DIN,DUN,DIN,DIN,DOU,DIN,DUN,DUN,DIN,DIN,DOU,DNO,DUN,DIN,DOU,DOU,
290 DOU,DOU,DOU,DNO,DIN,DNO,DNO,DIN,DOU,DOU,DOU,DOU,DIN,DOU,DIN,DOU,
291 DOU,DOU,DIN,DIN,DIN,DNO,DIN,DNO,DNO,DNO,DUN,DNO,DOU,DIN,DNO,DUN,
292 DUN,DIN,DIN,DNO,DNO,DOU,DUN,DUN,DNO,DIN,DIN,DNO,DIN,DOU,DUN,DUN,
293 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
294 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
295 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
296 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
297 DUN,DNO,DOU,DOU,DIN,DNO,DNO,DNO,DIN,DNO,DOU,DUN,DNO,DIN,DOU,DOU,
298 DOU,DOU,DOU,DNO,DUN,DIN,DOU,DIN,DIN,DIN,DNO,DNO,DNO,DIN,DIN,DUN,
299 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
300 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
301 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DOU,DUN,DUN,DUN,DUN,DUN,
302 DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN
303};
304
305/* This should be safe */
306#define SBP2_MAX_CMDS 8
307 264
308/* This is the two dma types we use for cmd_dma below */ 265/* This is the two dma types we use for cmd_dma below */
309enum cmd_dma_types { 266enum cmd_dma_types {
@@ -338,10 +295,8 @@ struct sbp2_command_info {
338#define SBP2_BREAKAGE_128K_MAX_TRANSFER 0x1 295#define SBP2_BREAKAGE_128K_MAX_TRANSFER 0x1
339#define SBP2_BREAKAGE_INQUIRY_HACK 0x2 296#define SBP2_BREAKAGE_INQUIRY_HACK 0x2
340 297
341
342struct sbp2scsi_host_info; 298struct sbp2scsi_host_info;
343 299
344
345/* 300/*
346 * Information needed on a per scsi id basis (one for each sbp2 device) 301 * Information needed on a per scsi id basis (one for each sbp2 device)
347 */ 302 */
@@ -379,7 +334,7 @@ struct scsi_id_instance_data {
379 u32 sbp2_command_set_spec_id; 334 u32 sbp2_command_set_spec_id;
380 u32 sbp2_command_set; 335 u32 sbp2_command_set;
381 u32 sbp2_unit_characteristics; 336 u32 sbp2_unit_characteristics;
382 u32 sbp2_device_type_and_lun; 337 u32 sbp2_lun;
383 u32 sbp2_firmware_revision; 338 u32 sbp2_firmware_revision;
384 339
385 /* 340 /*
@@ -411,7 +366,6 @@ struct scsi_id_instance_data {
411 u32 workarounds; 366 u32 workarounds;
412}; 367};
413 368
414
415/* Sbp2 host data structure (one per IEEE1394 host) */ 369/* Sbp2 host data structure (one per IEEE1394 host) */
416struct sbp2scsi_host_info { 370struct sbp2scsi_host_info {
417 struct hpsb_host *host; /* IEEE1394 host */ 371 struct hpsb_host *host; /* IEEE1394 host */
@@ -456,20 +410,12 @@ static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id);
456static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int destid, 410static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int destid,
457 quadlet_t *data, u64 addr, size_t length, u16 flags); 411 quadlet_t *data, u64 addr, size_t length, u16 flags);
458static int sbp2_agent_reset(struct scsi_id_instance_data *scsi_id, int wait); 412static int sbp2_agent_reset(struct scsi_id_instance_data *scsi_id, int wait);
459static int sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id,
460 struct sbp2_command_info *command,
461 unchar *scsi_cmd,
462 unsigned int scsi_use_sg,
463 unsigned int scsi_request_bufflen,
464 void *scsi_request_buffer,
465 enum dma_data_direction dma_dir);
466static int sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id, 413static int sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id,
467 struct sbp2_command_info *command); 414 struct sbp2_command_info *command);
468static int sbp2_send_command(struct scsi_id_instance_data *scsi_id, 415static int sbp2_send_command(struct scsi_id_instance_data *scsi_id,
469 struct scsi_cmnd *SCpnt, 416 struct scsi_cmnd *SCpnt,
470 void (*done)(struct scsi_cmnd *)); 417 void (*done)(struct scsi_cmnd *));
471static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status, unchar *sense_data); 418static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status, unchar *sense_data);
472static void sbp2_check_sbp2_command(struct scsi_id_instance_data *scsi_id, unchar *cmd);
473static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id, 419static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id,
474 struct scsi_cmnd *SCpnt); 420 struct scsi_cmnd *SCpnt);
475static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id, 421static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c
index 23911da50154..608479b2df14 100644
--- a/drivers/ieee1394/video1394.c
+++ b/drivers/ieee1394/video1394.c
@@ -19,12 +19,6 @@
19 * 19 *
20 * NOTES: 20 * NOTES:
21 * 21 *
22 * jds -- add private data to file to keep track of iso contexts associated
23 * with each open -- so release won't kill all iso transfers.
24 *
25 * Damien Douxchamps: Fix failure when the number of DMA pages per frame is
26 * one.
27 *
28 * ioctl return codes: 22 * ioctl return codes:
29 * EFAULT is only for invalid address for the argp 23 * EFAULT is only for invalid address for the argp
30 * EINVAL for out of range values 24 * EINVAL for out of range values
@@ -34,12 +28,6 @@
34 * ENOTTY for unsupported ioctl request 28 * ENOTTY for unsupported ioctl request
35 * 29 *
36 */ 30 */
37
38/* Markus Tavenrath <speedygoo@speedygoo.de> :
39 - fixed checks for valid buffer-numbers in video1394_icotl
40 - changed the ways the dma prg's are used, now it's possible to use
41 even a single dma buffer
42*/
43#include <linux/config.h> 31#include <linux/config.h>
44#include <linux/kernel.h> 32#include <linux/kernel.h>
45#include <linux/list.h> 33#include <linux/list.h>
@@ -77,14 +65,6 @@
77 65
78#define ISO_CHANNELS 64 66#define ISO_CHANNELS 64
79 67
80#ifndef virt_to_page
81#define virt_to_page(x) MAP_NR(x)
82#endif
83
84#ifndef vmalloc_32
85#define vmalloc_32(x) vmalloc(x)
86#endif
87
88struct it_dma_prg { 68struct it_dma_prg {
89 struct dma_cmd begin; 69 struct dma_cmd begin;
90 quadlet_t data[4]; 70 quadlet_t data[4];
@@ -206,14 +186,12 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
206 struct dma_iso_ctx *d; 186 struct dma_iso_ctx *d;
207 int i; 187 int i;
208 188
209 d = kmalloc(sizeof(struct dma_iso_ctx), GFP_KERNEL); 189 d = kzalloc(sizeof(*d), GFP_KERNEL);
210 if (d == NULL) { 190 if (!d) {
211 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate dma_iso_ctx"); 191 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate dma_iso_ctx");
212 return NULL; 192 return NULL;
213 } 193 }
214 194
215 memset(d, 0, sizeof *d);
216
217 d->ohci = ohci; 195 d->ohci = ohci;
218 d->type = type; 196 d->type = type;
219 d->channel = channel; 197 d->channel = channel;
@@ -251,9 +229,8 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
251 } 229 }
252 d->ctx = d->iso_tasklet.context; 230 d->ctx = d->iso_tasklet.context;
253 231
254 d->prg_reg = kmalloc(d->num_desc * sizeof(struct dma_prog_region), 232 d->prg_reg = kmalloc(d->num_desc * sizeof(*d->prg_reg), GFP_KERNEL);
255 GFP_KERNEL); 233 if (!d->prg_reg) {
256 if (d->prg_reg == NULL) {
257 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate ir prg regs"); 234 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate ir prg regs");
258 free_dma_iso_ctx(d); 235 free_dma_iso_ctx(d);
259 return NULL; 236 return NULL;
@@ -268,15 +245,14 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
268 d->cmdPtr = OHCI1394_IsoRcvCommandPtr+32*d->ctx; 245 d->cmdPtr = OHCI1394_IsoRcvCommandPtr+32*d->ctx;
269 d->ctxMatch = OHCI1394_IsoRcvContextMatch+32*d->ctx; 246 d->ctxMatch = OHCI1394_IsoRcvContextMatch+32*d->ctx;
270 247
271 d->ir_prg = kmalloc(d->num_desc * sizeof(struct dma_cmd *), 248 d->ir_prg = kzalloc(d->num_desc * sizeof(*d->ir_prg),
272 GFP_KERNEL); 249 GFP_KERNEL);
273 250
274 if (d->ir_prg == NULL) { 251 if (!d->ir_prg) {
275 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate dma ir prg"); 252 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate dma ir prg");
276 free_dma_iso_ctx(d); 253 free_dma_iso_ctx(d);
277 return NULL; 254 return NULL;
278 } 255 }
279 memset(d->ir_prg, 0, d->num_desc * sizeof(struct dma_cmd *));
280 256
281 d->nb_cmd = d->buf_size / PAGE_SIZE + 1; 257 d->nb_cmd = d->buf_size / PAGE_SIZE + 1;
282 d->left_size = (d->frame_size % PAGE_SIZE) ? 258 d->left_size = (d->frame_size % PAGE_SIZE) ?
@@ -297,16 +273,15 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
297 d->ctrlClear = OHCI1394_IsoXmitContextControlClear+16*d->ctx; 273 d->ctrlClear = OHCI1394_IsoXmitContextControlClear+16*d->ctx;
298 d->cmdPtr = OHCI1394_IsoXmitCommandPtr+16*d->ctx; 274 d->cmdPtr = OHCI1394_IsoXmitCommandPtr+16*d->ctx;
299 275
300 d->it_prg = kmalloc(d->num_desc * sizeof(struct it_dma_prg *), 276 d->it_prg = kzalloc(d->num_desc * sizeof(*d->it_prg),
301 GFP_KERNEL); 277 GFP_KERNEL);
302 278
303 if (d->it_prg == NULL) { 279 if (!d->it_prg) {
304 PRINT(KERN_ERR, ohci->host->id, 280 PRINT(KERN_ERR, ohci->host->id,
305 "Failed to allocate dma it prg"); 281 "Failed to allocate dma it prg");
306 free_dma_iso_ctx(d); 282 free_dma_iso_ctx(d);
307 return NULL; 283 return NULL;
308 } 284 }
309 memset(d->it_prg, 0, d->num_desc*sizeof(struct it_dma_prg *));
310 285
311 d->packet_size = packet_size; 286 d->packet_size = packet_size;
312 287
@@ -337,47 +312,24 @@ alloc_dma_iso_ctx(struct ti_ohci *ohci, int type, int num_desc,
337 } 312 }
338 } 313 }
339 314
340 d->buffer_status = kmalloc(d->num_desc * sizeof(unsigned int), 315 d->buffer_status =
341 GFP_KERNEL); 316 kzalloc(d->num_desc * sizeof(*d->buffer_status), GFP_KERNEL);
342 d->buffer_prg_assignment = kmalloc(d->num_desc * sizeof(unsigned int), 317 d->buffer_prg_assignment =
343 GFP_KERNEL); 318 kzalloc(d->num_desc * sizeof(*d->buffer_prg_assignment), GFP_KERNEL);
344 d->buffer_time = kmalloc(d->num_desc * sizeof(struct timeval), 319 d->buffer_time =
345 GFP_KERNEL); 320 kzalloc(d->num_desc * sizeof(*d->buffer_time), GFP_KERNEL);
346 d->last_used_cmd = kmalloc(d->num_desc * sizeof(unsigned int), 321 d->last_used_cmd =
347 GFP_KERNEL); 322 kzalloc(d->num_desc * sizeof(*d->last_used_cmd), GFP_KERNEL);
348 d->next_buffer = kmalloc(d->num_desc * sizeof(int), 323 d->next_buffer =
349 GFP_KERNEL); 324 kzalloc(d->num_desc * sizeof(*d->next_buffer), GFP_KERNEL);
350 325
351 if (d->buffer_status == NULL) { 326 if (!d->buffer_status || !d->buffer_prg_assignment || !d->buffer_time ||
352 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate buffer_status"); 327 !d->last_used_cmd || !d->next_buffer) {
353 free_dma_iso_ctx(d); 328 PRINT(KERN_ERR, ohci->host->id,
354 return NULL; 329 "Failed to allocate dma_iso_ctx member");
355 }
356 if (d->buffer_prg_assignment == NULL) {
357 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate buffer_prg_assignment");
358 free_dma_iso_ctx(d);
359 return NULL;
360 }
361 if (d->buffer_time == NULL) {
362 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate buffer_time");
363 free_dma_iso_ctx(d);
364 return NULL;
365 }
366 if (d->last_used_cmd == NULL) {
367 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate last_used_cmd");
368 free_dma_iso_ctx(d);
369 return NULL;
370 }
371 if (d->next_buffer == NULL) {
372 PRINT(KERN_ERR, ohci->host->id, "Failed to allocate next_buffer");
373 free_dma_iso_ctx(d); 330 free_dma_iso_ctx(d);
374 return NULL; 331 return NULL;
375 } 332 }
376 memset(d->buffer_status, 0, d->num_desc * sizeof(unsigned int));
377 memset(d->buffer_prg_assignment, 0, d->num_desc * sizeof(unsigned int));
378 memset(d->buffer_time, 0, d->num_desc * sizeof(struct timeval));
379 memset(d->last_used_cmd, 0, d->num_desc * sizeof(unsigned int));
380 memset(d->next_buffer, -1, d->num_desc * sizeof(int));
381 333
382 spin_lock_init(&d->lock); 334 spin_lock_init(&d->lock);
383 335
@@ -539,7 +491,7 @@ static void wakeup_dma_ir_ctx(unsigned long l)
539 if (d->ir_prg[i][d->nb_cmd-1].status & cpu_to_le32(0xFFFF0000)) { 491 if (d->ir_prg[i][d->nb_cmd-1].status & cpu_to_le32(0xFFFF0000)) {
540 reset_ir_status(d, i); 492 reset_ir_status(d, i);
541 d->buffer_status[d->buffer_prg_assignment[i]] = VIDEO1394_BUFFER_READY; 493 d->buffer_status[d->buffer_prg_assignment[i]] = VIDEO1394_BUFFER_READY;
542 do_gettimeofday(&d->buffer_time[i]); 494 do_gettimeofday(&d->buffer_time[d->buffer_prg_assignment[i]]);
543 } 495 }
544 } 496 }
545 497
@@ -1046,7 +998,6 @@ static int __video1394_ioctl(struct file *file,
1046 998
1047 /* set time of buffer */ 999 /* set time of buffer */
1048 v.filltime = d->buffer_time[v.buffer]; 1000 v.filltime = d->buffer_time[v.buffer];
1049// printk("Buffer %d time %d\n", v.buffer, (d->buffer_time[v.buffer]).tv_usec);
1050 1001
1051 /* 1002 /*
1052 * Look ahead to see how many more buffers have been received 1003 * Look ahead to see how many more buffers have been received
@@ -1085,7 +1036,7 @@ static int __video1394_ioctl(struct file *file,
1085 } 1036 }
1086 1037
1087 if (d->flags & VIDEO1394_VARIABLE_PACKET_SIZE) { 1038 if (d->flags & VIDEO1394_VARIABLE_PACKET_SIZE) {
1088 int buf_size = d->nb_cmd * sizeof(unsigned int); 1039 int buf_size = d->nb_cmd * sizeof(*psizes);
1089 struct video1394_queue_variable __user *p = argp; 1040 struct video1394_queue_variable __user *p = argp;
1090 unsigned int __user *qv; 1041 unsigned int __user *qv;
1091 1042
@@ -1104,7 +1055,7 @@ static int __video1394_ioctl(struct file *file,
1104 1055
1105 spin_lock_irqsave(&d->lock,flags); 1056 spin_lock_irqsave(&d->lock,flags);
1106 1057
1107 // last_buffer is last_prg 1058 /* last_buffer is last_prg */
1108 next_prg = (d->last_buffer + 1) % d->num_desc; 1059 next_prg = (d->last_buffer + 1) % d->num_desc;
1109 if (d->buffer_status[v.buffer]!=VIDEO1394_BUFFER_FREE) { 1060 if (d->buffer_status[v.buffer]!=VIDEO1394_BUFFER_FREE) {
1110 PRINT(KERN_ERR, ohci->host->id, 1061 PRINT(KERN_ERR, ohci->host->id,
@@ -1251,13 +1202,12 @@ static int video1394_open(struct inode *inode, struct file *file)
1251 if (ohci == NULL) 1202 if (ohci == NULL)
1252 return -EIO; 1203 return -EIO;
1253 1204
1254 ctx = kmalloc(sizeof(struct file_ctx), GFP_KERNEL); 1205 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1255 if (ctx == NULL) { 1206 if (!ctx) {
1256 PRINT(KERN_ERR, ohci->host->id, "Cannot malloc file_ctx"); 1207 PRINT(KERN_ERR, ohci->host->id, "Cannot malloc file_ctx");
1257 return -ENOMEM; 1208 return -ENOMEM;
1258 } 1209 }
1259 1210
1260 memset(ctx, 0, sizeof(struct file_ctx));
1261 ctx->ohci = ohci; 1211 ctx->ohci = ohci;
1262 INIT_LIST_HEAD(&ctx->context_list); 1212 INIT_LIST_HEAD(&ctx->context_list);
1263 ctx->current_ctx = NULL; 1213 ctx->current_ctx = NULL;