diff options
Diffstat (limited to 'drivers')
29 files changed, 1184 insertions, 1115 deletions
diff --git a/drivers/ieee1394/Kconfig b/drivers/ieee1394/Kconfig index 186737539cf5..2769e505f051 100644 --- a/drivers/ieee1394/Kconfig +++ b/drivers/ieee1394/Kconfig | |||
@@ -120,12 +120,19 @@ config IEEE1394_VIDEO1394 | |||
120 | this option only if you have an IEEE 1394 video device connected to | 120 | this option only if you have an IEEE 1394 video device connected to |
121 | an OHCI-1394 card. | 121 | an OHCI-1394 card. |
122 | 122 | ||
123 | comment "SBP-2 support (for storage devices) requires SCSI" | ||
124 | depends on IEEE1394 && SCSI=n | ||
125 | |||
123 | config IEEE1394_SBP2 | 126 | config IEEE1394_SBP2 |
124 | tristate "SBP-2 support (Harddisks etc.)" | 127 | tristate "SBP-2 support (Harddisks etc.)" |
125 | depends on IEEE1394 && SCSI && (PCI || BROKEN) | 128 | depends on IEEE1394 && SCSI && (PCI || BROKEN) |
126 | help | 129 | help |
127 | This option enables you to use SBP-2 devices connected to your IEEE | 130 | This option enables you to use SBP-2 devices connected to an IEEE |
128 | 1394 bus. SBP-2 devices include harddrives and DVD devices. | 131 | 1394 bus. SBP-2 devices include storage devices like harddisks and |
132 | DVD drives, also some other FireWire devices like scanners. | ||
133 | |||
134 | You should also enable support for disks, CD-ROMs, etc. in the SCSI | ||
135 | configuration section. | ||
129 | 136 | ||
130 | config IEEE1394_SBP2_PHYS_DMA | 137 | config IEEE1394_SBP2_PHYS_DMA |
131 | bool "Enable replacement for physical DMA in SBP2" | 138 | bool "Enable replacement for physical DMA in SBP2" |
diff --git a/drivers/ieee1394/csr.c b/drivers/ieee1394/csr.c index 149573db91c5..ab0c80f61b9d 100644 --- a/drivers/ieee1394/csr.c +++ b/drivers/ieee1394/csr.c | |||
@@ -17,11 +17,13 @@ | |||
17 | * | 17 | * |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/string.h> | 20 | #include <linux/jiffies.h> |
21 | #include <linux/kernel.h> | ||
21 | #include <linux/module.h> | 22 | #include <linux/module.h> |
22 | #include <linux/moduleparam.h> | 23 | #include <linux/moduleparam.h> |
23 | #include <linux/param.h> | 24 | #include <linux/param.h> |
24 | #include <linux/spinlock.h> | 25 | #include <linux/spinlock.h> |
26 | #include <linux/string.h> | ||
25 | 27 | ||
26 | #include "csr1212.h" | 28 | #include "csr1212.h" |
27 | #include "ieee1394_types.h" | 29 | #include "ieee1394_types.h" |
@@ -149,31 +151,18 @@ static void host_reset(struct hpsb_host *host) | |||
149 | 151 | ||
150 | /* | 152 | /* |
151 | * HI == seconds (bits 0:2) | 153 | * HI == seconds (bits 0:2) |
152 | * LO == fraction units of 1/8000 of a second, as per 1394 (bits 19:31) | 154 | * LO == fractions of a second in units of 125usec (bits 19:31) |
153 | * | ||
154 | * Convert to units and then to HZ, for comparison to jiffies. | ||
155 | * | ||
156 | * By default this will end up being 800 units, or 100ms (125usec per | ||
157 | * unit). | ||
158 | * | 155 | * |
159 | * NOTE: The spec says 1/8000, but also says we can compute based on 1/8192 | 156 | * Convert SPLIT_TIMEOUT to jiffies. |
160 | * like CSR specifies. Should make our math less complex. | 157 | * The default and minimum as per 1394a-2000 clause 8.3.2.2.6 is 100ms. |
161 | */ | 158 | */ |
162 | static inline void calculate_expire(struct csr_control *csr) | 159 | static inline void calculate_expire(struct csr_control *csr) |
163 | { | 160 | { |
164 | unsigned long units; | 161 | unsigned long usecs = |
165 | 162 | (csr->split_timeout_hi & 0x07) * USEC_PER_SEC + | |
166 | /* Take the seconds, and convert to units */ | 163 | (csr->split_timeout_lo >> 19) * 125L; |
167 | units = (unsigned long)(csr->split_timeout_hi & 0x07) << 13; | ||
168 | |||
169 | /* Add in the fractional units */ | ||
170 | units += (unsigned long)(csr->split_timeout_lo >> 19); | ||
171 | |||
172 | /* Convert to jiffies */ | ||
173 | csr->expire = (unsigned long)(units * HZ) >> 13UL; | ||
174 | 164 | ||
175 | /* Just to keep from rounding low */ | 165 | csr->expire = usecs_to_jiffies(usecs > 100000L ? usecs : 100000L); |
176 | csr->expire++; | ||
177 | 166 | ||
178 | HPSB_VERBOSE("CSR: setting expire to %lu, HZ=%u", csr->expire, HZ); | 167 | HPSB_VERBOSE("CSR: setting expire to %lu, HZ=%u", csr->expire, HZ); |
179 | } | 168 | } |
diff --git a/drivers/ieee1394/csr.h b/drivers/ieee1394/csr.h index ea9aa4f53ab6..f11546550d84 100644 --- a/drivers/ieee1394/csr.h +++ b/drivers/ieee1394/csr.h | |||
@@ -1,75 +1,73 @@ | |||
1 | |||
2 | #ifndef _IEEE1394_CSR_H | 1 | #ifndef _IEEE1394_CSR_H |
3 | #define _IEEE1394_CSR_H | 2 | #define _IEEE1394_CSR_H |
4 | 3 | ||
5 | #ifdef CONFIG_PREEMPT | 4 | #include <linux/spinlock_types.h> |
6 | #include <linux/sched.h> | ||
7 | #endif | ||
8 | 5 | ||
9 | #include "csr1212.h" | 6 | #include "csr1212.h" |
7 | #include "ieee1394_types.h" | ||
10 | 8 | ||
11 | #define CSR_REGISTER_BASE 0xfffff0000000ULL | 9 | #define CSR_REGISTER_BASE 0xfffff0000000ULL |
12 | 10 | ||
13 | /* register offsets relative to CSR_REGISTER_BASE */ | 11 | /* register offsets relative to CSR_REGISTER_BASE */ |
14 | #define CSR_STATE_CLEAR 0x0 | 12 | #define CSR_STATE_CLEAR 0x0 |
15 | #define CSR_STATE_SET 0x4 | 13 | #define CSR_STATE_SET 0x4 |
16 | #define CSR_NODE_IDS 0x8 | 14 | #define CSR_NODE_IDS 0x8 |
17 | #define CSR_RESET_START 0xc | 15 | #define CSR_RESET_START 0xc |
18 | #define CSR_SPLIT_TIMEOUT_HI 0x18 | 16 | #define CSR_SPLIT_TIMEOUT_HI 0x18 |
19 | #define CSR_SPLIT_TIMEOUT_LO 0x1c | 17 | #define CSR_SPLIT_TIMEOUT_LO 0x1c |
20 | #define CSR_CYCLE_TIME 0x200 | 18 | #define CSR_CYCLE_TIME 0x200 |
21 | #define CSR_BUS_TIME 0x204 | 19 | #define CSR_BUS_TIME 0x204 |
22 | #define CSR_BUSY_TIMEOUT 0x210 | 20 | #define CSR_BUSY_TIMEOUT 0x210 |
23 | #define CSR_BUS_MANAGER_ID 0x21c | 21 | #define CSR_BUS_MANAGER_ID 0x21c |
24 | #define CSR_BANDWIDTH_AVAILABLE 0x220 | 22 | #define CSR_BANDWIDTH_AVAILABLE 0x220 |
25 | #define CSR_CHANNELS_AVAILABLE 0x224 | 23 | #define CSR_CHANNELS_AVAILABLE 0x224 |
26 | #define CSR_CHANNELS_AVAILABLE_HI 0x224 | 24 | #define CSR_CHANNELS_AVAILABLE_HI 0x224 |
27 | #define CSR_CHANNELS_AVAILABLE_LO 0x228 | 25 | #define CSR_CHANNELS_AVAILABLE_LO 0x228 |
28 | #define CSR_BROADCAST_CHANNEL 0x234 | 26 | #define CSR_BROADCAST_CHANNEL 0x234 |
29 | #define CSR_CONFIG_ROM 0x400 | 27 | #define CSR_CONFIG_ROM 0x400 |
30 | #define CSR_CONFIG_ROM_END 0x800 | 28 | #define CSR_CONFIG_ROM_END 0x800 |
31 | #define CSR_FCP_COMMAND 0xB00 | 29 | #define CSR_FCP_COMMAND 0xB00 |
32 | #define CSR_FCP_RESPONSE 0xD00 | 30 | #define CSR_FCP_RESPONSE 0xD00 |
33 | #define CSR_FCP_END 0xF00 | 31 | #define CSR_FCP_END 0xF00 |
34 | #define CSR_TOPOLOGY_MAP 0x1000 | 32 | #define CSR_TOPOLOGY_MAP 0x1000 |
35 | #define CSR_TOPOLOGY_MAP_END 0x1400 | 33 | #define CSR_TOPOLOGY_MAP_END 0x1400 |
36 | #define CSR_SPEED_MAP 0x2000 | 34 | #define CSR_SPEED_MAP 0x2000 |
37 | #define CSR_SPEED_MAP_END 0x3000 | 35 | #define CSR_SPEED_MAP_END 0x3000 |
38 | 36 | ||
39 | /* IEEE 1394 bus specific Configuration ROM Key IDs */ | 37 | /* IEEE 1394 bus specific Configuration ROM Key IDs */ |
40 | #define IEEE1394_KV_ID_POWER_REQUIREMENTS (0x30) | 38 | #define IEEE1394_KV_ID_POWER_REQUIREMENTS (0x30) |
41 | 39 | ||
42 | /* IEEE 1394 Bus Inforamation Block specifics */ | 40 | /* IEEE 1394 Bus Information Block specifics */ |
43 | #define CSR_BUS_INFO_SIZE (5 * sizeof(quadlet_t)) | 41 | #define CSR_BUS_INFO_SIZE (5 * sizeof(quadlet_t)) |
44 | 42 | ||
45 | #define CSR_IRMC_SHIFT 31 | 43 | #define CSR_IRMC_SHIFT 31 |
46 | #define CSR_CMC_SHIFT 30 | 44 | #define CSR_CMC_SHIFT 30 |
47 | #define CSR_ISC_SHIFT 29 | 45 | #define CSR_ISC_SHIFT 29 |
48 | #define CSR_BMC_SHIFT 28 | 46 | #define CSR_BMC_SHIFT 28 |
49 | #define CSR_PMC_SHIFT 27 | 47 | #define CSR_PMC_SHIFT 27 |
50 | #define CSR_CYC_CLK_ACC_SHIFT 16 | 48 | #define CSR_CYC_CLK_ACC_SHIFT 16 |
51 | #define CSR_MAX_REC_SHIFT 12 | 49 | #define CSR_MAX_REC_SHIFT 12 |
52 | #define CSR_MAX_ROM_SHIFT 8 | 50 | #define CSR_MAX_ROM_SHIFT 8 |
53 | #define CSR_GENERATION_SHIFT 4 | 51 | #define CSR_GENERATION_SHIFT 4 |
54 | 52 | ||
55 | #define CSR_SET_BUS_INFO_GENERATION(csr, gen) \ | 53 | #define CSR_SET_BUS_INFO_GENERATION(csr, gen) \ |
56 | ((csr)->bus_info_data[2] = \ | 54 | ((csr)->bus_info_data[2] = \ |
57 | cpu_to_be32((be32_to_cpu((csr)->bus_info_data[2]) & \ | 55 | cpu_to_be32((be32_to_cpu((csr)->bus_info_data[2]) & \ |
58 | ~(0xf << CSR_GENERATION_SHIFT)) | \ | 56 | ~(0xf << CSR_GENERATION_SHIFT)) | \ |
59 | (gen) << CSR_GENERATION_SHIFT)) | 57 | (gen) << CSR_GENERATION_SHIFT)) |
60 | 58 | ||
61 | struct csr_control { | 59 | struct csr_control { |
62 | spinlock_t lock; | 60 | spinlock_t lock; |
63 | 61 | ||
64 | quadlet_t state; | 62 | quadlet_t state; |
65 | quadlet_t node_ids; | 63 | quadlet_t node_ids; |
66 | quadlet_t split_timeout_hi, split_timeout_lo; | 64 | quadlet_t split_timeout_hi, split_timeout_lo; |
67 | unsigned long expire; // Calculated from split_timeout | 65 | unsigned long expire; /* Calculated from split_timeout */ |
68 | quadlet_t cycle_time; | 66 | quadlet_t cycle_time; |
69 | quadlet_t bus_time; | 67 | quadlet_t bus_time; |
70 | quadlet_t bus_manager_id; | 68 | quadlet_t bus_manager_id; |
71 | quadlet_t bandwidth_available; | 69 | quadlet_t bandwidth_available; |
72 | quadlet_t channels_available_hi, channels_available_lo; | 70 | quadlet_t channels_available_hi, channels_available_lo; |
73 | quadlet_t broadcast_channel; | 71 | quadlet_t broadcast_channel; |
74 | 72 | ||
75 | /* Bus Info */ | 73 | /* Bus Info */ |
@@ -84,8 +82,8 @@ struct csr_control { | |||
84 | 82 | ||
85 | struct csr1212_csr *rom; | 83 | struct csr1212_csr *rom; |
86 | 84 | ||
87 | quadlet_t topology_map[256]; | 85 | quadlet_t topology_map[256]; |
88 | quadlet_t speed_map[1024]; | 86 | quadlet_t speed_map[1024]; |
89 | }; | 87 | }; |
90 | 88 | ||
91 | extern struct csr1212_bus_ops csr_bus_ops; | 89 | extern struct csr1212_bus_ops csr_bus_ops; |
@@ -93,4 +91,9 @@ extern struct csr1212_bus_ops csr_bus_ops; | |||
93 | int init_csr(void); | 91 | int init_csr(void); |
94 | void cleanup_csr(void); | 92 | void cleanup_csr(void); |
95 | 93 | ||
94 | /* hpsb_update_config_rom() is deprecated */ | ||
95 | struct hpsb_host; | ||
96 | int hpsb_update_config_rom(struct hpsb_host *host, const quadlet_t *new_rom, | ||
97 | size_t size, unsigned char rom_version); | ||
98 | |||
96 | #endif /* _IEEE1394_CSR_H */ | 99 | #endif /* _IEEE1394_CSR_H */ |
diff --git a/drivers/ieee1394/dma.c b/drivers/ieee1394/dma.c index ca5167de707d..c68f328e1a29 100644 --- a/drivers/ieee1394/dma.c +++ b/drivers/ieee1394/dma.c | |||
@@ -7,10 +7,13 @@ | |||
7 | * directory of the kernel sources for details. | 7 | * directory of the kernel sources for details. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/mm.h> | ||
10 | #include <linux/module.h> | 11 | #include <linux/module.h> |
11 | #include <linux/vmalloc.h> | 12 | #include <linux/pci.h> |
12 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
13 | #include <linux/mm.h> | 14 | #include <linux/vmalloc.h> |
15 | #include <asm/scatterlist.h> | ||
16 | |||
14 | #include "dma.h" | 17 | #include "dma.h" |
15 | 18 | ||
16 | /* dma_prog_region */ | 19 | /* dma_prog_region */ |
diff --git a/drivers/ieee1394/dma.h b/drivers/ieee1394/dma.h index 061550a6fb99..a1682aba71c7 100644 --- a/drivers/ieee1394/dma.h +++ b/drivers/ieee1394/dma.h | |||
@@ -10,69 +10,91 @@ | |||
10 | #ifndef IEEE1394_DMA_H | 10 | #ifndef IEEE1394_DMA_H |
11 | #define IEEE1394_DMA_H | 11 | #define IEEE1394_DMA_H |
12 | 12 | ||
13 | #include <linux/pci.h> | 13 | #include <asm/types.h> |
14 | #include <asm/scatterlist.h> | 14 | |
15 | 15 | struct pci_dev; | |
16 | /* struct dma_prog_region | 16 | struct scatterlist; |
17 | 17 | struct vm_area_struct; | |
18 | a small, physically-contiguous DMA buffer with random-access, | 18 | |
19 | synchronous usage characteristics | 19 | /** |
20 | */ | 20 | * struct dma_prog_region - small contiguous DMA buffer |
21 | 21 | * @kvirt: kernel virtual address | |
22 | * @dev: PCI device | ||
23 | * @n_pages: number of kernel pages | ||
24 | * @bus_addr: base bus address | ||
25 | * | ||
26 | * a small, physically contiguous DMA buffer with random-access, synchronous | ||
27 | * usage characteristics | ||
28 | */ | ||
22 | struct dma_prog_region { | 29 | struct dma_prog_region { |
23 | unsigned char *kvirt; /* kernel virtual address */ | 30 | unsigned char *kvirt; |
24 | struct pci_dev *dev; /* PCI device */ | 31 | struct pci_dev *dev; |
25 | unsigned int n_pages; /* # of kernel pages */ | 32 | unsigned int n_pages; |
26 | dma_addr_t bus_addr; /* base bus address */ | 33 | dma_addr_t bus_addr; |
27 | }; | 34 | }; |
28 | 35 | ||
29 | /* clear out all fields but do not allocate any memory */ | 36 | /* clear out all fields but do not allocate any memory */ |
30 | void dma_prog_region_init(struct dma_prog_region *prog); | 37 | void dma_prog_region_init(struct dma_prog_region *prog); |
31 | int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes, struct pci_dev *dev); | 38 | int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes, |
39 | struct pci_dev *dev); | ||
32 | void dma_prog_region_free(struct dma_prog_region *prog); | 40 | void dma_prog_region_free(struct dma_prog_region *prog); |
33 | 41 | ||
34 | static inline dma_addr_t dma_prog_region_offset_to_bus(struct dma_prog_region *prog, unsigned long offset) | 42 | static inline dma_addr_t dma_prog_region_offset_to_bus( |
43 | struct dma_prog_region *prog, unsigned long offset) | ||
35 | { | 44 | { |
36 | return prog->bus_addr + offset; | 45 | return prog->bus_addr + offset; |
37 | } | 46 | } |
38 | 47 | ||
39 | /* struct dma_region | 48 | /** |
40 | 49 | * struct dma_region - large non-contiguous DMA buffer | |
41 | a large, non-physically-contiguous DMA buffer with streaming, | 50 | * @virt: kernel virtual address |
42 | asynchronous usage characteristics | 51 | * @dev: PCI device |
43 | */ | 52 | * @n_pages: number of kernel pages |
44 | 53 | * @n_dma_pages: number of IOMMU pages | |
54 | * @sglist: IOMMU mapping | ||
55 | * @direction: PCI_DMA_TODEVICE, etc. | ||
56 | * | ||
57 | * a large, non-physically-contiguous DMA buffer with streaming, asynchronous | ||
58 | * usage characteristics | ||
59 | */ | ||
45 | struct dma_region { | 60 | struct dma_region { |
46 | unsigned char *kvirt; /* kernel virtual address */ | 61 | unsigned char *kvirt; |
47 | struct pci_dev *dev; /* PCI device */ | 62 | struct pci_dev *dev; |
48 | unsigned int n_pages; /* # of kernel pages */ | 63 | unsigned int n_pages; |
49 | unsigned int n_dma_pages; /* # of IOMMU pages */ | 64 | unsigned int n_dma_pages; |
50 | struct scatterlist *sglist; /* IOMMU mapping */ | 65 | struct scatterlist *sglist; |
51 | int direction; /* PCI_DMA_TODEVICE, etc */ | 66 | int direction; |
52 | }; | 67 | }; |
53 | 68 | ||
54 | /* clear out all fields but do not allocate anything */ | 69 | /* clear out all fields but do not allocate anything */ |
55 | void dma_region_init(struct dma_region *dma); | 70 | void dma_region_init(struct dma_region *dma); |
56 | 71 | ||
57 | /* allocate the buffer and map it to the IOMMU */ | 72 | /* allocate the buffer and map it to the IOMMU */ |
58 | int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, struct pci_dev *dev, int direction); | 73 | int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes, |
74 | struct pci_dev *dev, int direction); | ||
59 | 75 | ||
60 | /* unmap and free the buffer */ | 76 | /* unmap and free the buffer */ |
61 | void dma_region_free(struct dma_region *dma); | 77 | void dma_region_free(struct dma_region *dma); |
62 | 78 | ||
63 | /* sync the CPU's view of the buffer */ | 79 | /* sync the CPU's view of the buffer */ |
64 | void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset, unsigned long len); | 80 | void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset, |
81 | unsigned long len); | ||
82 | |||
65 | /* sync the IO bus' view of the buffer */ | 83 | /* sync the IO bus' view of the buffer */ |
66 | void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset, unsigned long len); | 84 | void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset, |
85 | unsigned long len); | ||
67 | 86 | ||
68 | /* map the buffer into a user space process */ | 87 | /* map the buffer into a user space process */ |
69 | int dma_region_mmap(struct dma_region *dma, struct file *file, struct vm_area_struct *vma); | 88 | int dma_region_mmap(struct dma_region *dma, struct file *file, |
89 | struct vm_area_struct *vma); | ||
70 | 90 | ||
71 | /* macro to index into a DMA region (or dma_prog_region) */ | 91 | /* macro to index into a DMA region (or dma_prog_region) */ |
72 | #define dma_region_i(_dma, _type, _index) ( ((_type*) ((_dma)->kvirt)) + (_index) ) | 92 | #define dma_region_i(_dma, _type, _index) \ |
93 | ( ((_type*) ((_dma)->kvirt)) + (_index) ) | ||
73 | 94 | ||
74 | /* return the DMA bus address of the byte with the given offset | 95 | /* return the DMA bus address of the byte with the given offset |
75 | relative to the beginning of the dma_region */ | 96 | * relative to the beginning of the dma_region */ |
76 | dma_addr_t dma_region_offset_to_bus(struct dma_region *dma, unsigned long offset); | 97 | dma_addr_t dma_region_offset_to_bus(struct dma_region *dma, |
98 | unsigned long offset); | ||
77 | 99 | ||
78 | #endif /* IEEE1394_DMA_H */ | 100 | #endif /* IEEE1394_DMA_H */ |
diff --git a/drivers/ieee1394/dv1394-private.h b/drivers/ieee1394/dv1394-private.h index 80b5ac7fe383..7d1d2845b420 100644 --- a/drivers/ieee1394/dv1394-private.h +++ b/drivers/ieee1394/dv1394-private.h | |||
@@ -460,7 +460,7 @@ struct video_card { | |||
460 | int dma_running; | 460 | int dma_running; |
461 | 461 | ||
462 | /* | 462 | /* |
463 | 3) the sleeping semaphore 'sem' - this is used from process context only, | 463 | 3) the sleeping mutex 'mtx' - this is used from process context only, |
464 | to serialize various operations on the video_card. Even though only one | 464 | to serialize various operations on the video_card. Even though only one |
465 | open() is allowed, we still need to prevent multiple threads of execution | 465 | open() is allowed, we still need to prevent multiple threads of execution |
466 | from entering calls like read, write, ioctl, etc. | 466 | from entering calls like read, write, ioctl, etc. |
@@ -468,9 +468,9 @@ struct video_card { | |||
468 | I honestly can't think of a good reason to use dv1394 from several threads | 468 | I honestly can't think of a good reason to use dv1394 from several threads |
469 | at once, but we need to serialize anyway to prevent oopses =). | 469 | at once, but we need to serialize anyway to prevent oopses =). |
470 | 470 | ||
471 | NOTE: if you need both spinlock and sem, take sem first to avoid deadlock! | 471 | NOTE: if you need both spinlock and mtx, take mtx first to avoid deadlock! |
472 | */ | 472 | */ |
473 | struct semaphore sem; | 473 | struct mutex mtx; |
474 | 474 | ||
475 | /* people waiting for buffer space, please form a line here... */ | 475 | /* people waiting for buffer space, please form a line here... */ |
476 | wait_queue_head_t waitq; | 476 | wait_queue_head_t waitq; |
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c index 87532dd43374..6c72f04b2b5d 100644 --- a/drivers/ieee1394/dv1394.c +++ b/drivers/ieee1394/dv1394.c | |||
@@ -95,6 +95,7 @@ | |||
95 | #include <linux/fs.h> | 95 | #include <linux/fs.h> |
96 | #include <linux/poll.h> | 96 | #include <linux/poll.h> |
97 | #include <linux/smp_lock.h> | 97 | #include <linux/smp_lock.h> |
98 | #include <linux/mutex.h> | ||
98 | #include <linux/bitops.h> | 99 | #include <linux/bitops.h> |
99 | #include <asm/byteorder.h> | 100 | #include <asm/byteorder.h> |
100 | #include <asm/atomic.h> | 101 | #include <asm/atomic.h> |
@@ -110,15 +111,15 @@ | |||
110 | #include <linux/compat.h> | 111 | #include <linux/compat.h> |
111 | #include <linux/cdev.h> | 112 | #include <linux/cdev.h> |
112 | 113 | ||
114 | #include "dv1394.h" | ||
115 | #include "dv1394-private.h" | ||
116 | #include "highlevel.h" | ||
117 | #include "hosts.h" | ||
113 | #include "ieee1394.h" | 118 | #include "ieee1394.h" |
119 | #include "ieee1394_core.h" | ||
120 | #include "ieee1394_hotplug.h" | ||
114 | #include "ieee1394_types.h" | 121 | #include "ieee1394_types.h" |
115 | #include "nodemgr.h" | 122 | #include "nodemgr.h" |
116 | #include "hosts.h" | ||
117 | #include "ieee1394_core.h" | ||
118 | #include "highlevel.h" | ||
119 | #include "dv1394.h" | ||
120 | #include "dv1394-private.h" | ||
121 | |||
122 | #include "ohci1394.h" | 123 | #include "ohci1394.h" |
123 | 124 | ||
124 | /* DEBUG LEVELS: | 125 | /* DEBUG LEVELS: |
@@ -136,13 +137,13 @@ | |||
136 | #if DV1394_DEBUG_LEVEL >= 2 | 137 | #if DV1394_DEBUG_LEVEL >= 2 |
137 | #define irq_printk( args... ) printk( args ) | 138 | #define irq_printk( args... ) printk( args ) |
138 | #else | 139 | #else |
139 | #define irq_printk( args... ) | 140 | #define irq_printk( args... ) do {} while (0) |
140 | #endif | 141 | #endif |
141 | 142 | ||
142 | #if DV1394_DEBUG_LEVEL >= 1 | 143 | #if DV1394_DEBUG_LEVEL >= 1 |
143 | #define debug_printk( args... ) printk( args) | 144 | #define debug_printk( args... ) printk( args) |
144 | #else | 145 | #else |
145 | #define debug_printk( args... ) | 146 | #define debug_printk( args... ) do {} while (0) |
146 | #endif | 147 | #endif |
147 | 148 | ||
148 | /* issue a dummy PCI read to force the preceding write | 149 | /* issue a dummy PCI read to force the preceding write |
@@ -247,7 +248,7 @@ static void frame_delete(struct frame *f) | |||
247 | 248 | ||
248 | Frame_prepare() must be called OUTSIDE the video->spinlock. | 249 | Frame_prepare() must be called OUTSIDE the video->spinlock. |
249 | However, frame_prepare() must still be serialized, so | 250 | However, frame_prepare() must still be serialized, so |
250 | it should be called WITH the video->sem taken. | 251 | it should be called WITH the video->mtx taken. |
251 | */ | 252 | */ |
252 | 253 | ||
253 | static void frame_prepare(struct video_card *video, unsigned int this_frame) | 254 | static void frame_prepare(struct video_card *video, unsigned int this_frame) |
@@ -1271,7 +1272,7 @@ static int dv1394_mmap(struct file *file, struct vm_area_struct *vma) | |||
1271 | int retval = -EINVAL; | 1272 | int retval = -EINVAL; |
1272 | 1273 | ||
1273 | /* serialize mmap */ | 1274 | /* serialize mmap */ |
1274 | down(&video->sem); | 1275 | mutex_lock(&video->mtx); |
1275 | 1276 | ||
1276 | if ( ! video_card_initialized(video) ) { | 1277 | if ( ! video_card_initialized(video) ) { |
1277 | retval = do_dv1394_init_default(video); | 1278 | retval = do_dv1394_init_default(video); |
@@ -1281,7 +1282,7 @@ static int dv1394_mmap(struct file *file, struct vm_area_struct *vma) | |||
1281 | 1282 | ||
1282 | retval = dma_region_mmap(&video->dv_buf, file, vma); | 1283 | retval = dma_region_mmap(&video->dv_buf, file, vma); |
1283 | out: | 1284 | out: |
1284 | up(&video->sem); | 1285 | mutex_unlock(&video->mtx); |
1285 | return retval; | 1286 | return retval; |
1286 | } | 1287 | } |
1287 | 1288 | ||
@@ -1337,17 +1338,17 @@ static ssize_t dv1394_write(struct file *file, const char __user *buffer, size_t | |||
1337 | 1338 | ||
1338 | /* serialize this to prevent multi-threaded mayhem */ | 1339 | /* serialize this to prevent multi-threaded mayhem */ |
1339 | if (file->f_flags & O_NONBLOCK) { | 1340 | if (file->f_flags & O_NONBLOCK) { |
1340 | if (down_trylock(&video->sem)) | 1341 | if (!mutex_trylock(&video->mtx)) |
1341 | return -EAGAIN; | 1342 | return -EAGAIN; |
1342 | } else { | 1343 | } else { |
1343 | if (down_interruptible(&video->sem)) | 1344 | if (mutex_lock_interruptible(&video->mtx)) |
1344 | return -ERESTARTSYS; | 1345 | return -ERESTARTSYS; |
1345 | } | 1346 | } |
1346 | 1347 | ||
1347 | if ( !video_card_initialized(video) ) { | 1348 | if ( !video_card_initialized(video) ) { |
1348 | ret = do_dv1394_init_default(video); | 1349 | ret = do_dv1394_init_default(video); |
1349 | if (ret) { | 1350 | if (ret) { |
1350 | up(&video->sem); | 1351 | mutex_unlock(&video->mtx); |
1351 | return ret; | 1352 | return ret; |
1352 | } | 1353 | } |
1353 | } | 1354 | } |
@@ -1418,7 +1419,7 @@ static ssize_t dv1394_write(struct file *file, const char __user *buffer, size_t | |||
1418 | 1419 | ||
1419 | remove_wait_queue(&video->waitq, &wait); | 1420 | remove_wait_queue(&video->waitq, &wait); |
1420 | set_current_state(TASK_RUNNING); | 1421 | set_current_state(TASK_RUNNING); |
1421 | up(&video->sem); | 1422 | mutex_unlock(&video->mtx); |
1422 | return ret; | 1423 | return ret; |
1423 | } | 1424 | } |
1424 | 1425 | ||
@@ -1434,17 +1435,17 @@ static ssize_t dv1394_read(struct file *file, char __user *buffer, size_t count | |||
1434 | 1435 | ||
1435 | /* serialize this to prevent multi-threaded mayhem */ | 1436 | /* serialize this to prevent multi-threaded mayhem */ |
1436 | if (file->f_flags & O_NONBLOCK) { | 1437 | if (file->f_flags & O_NONBLOCK) { |
1437 | if (down_trylock(&video->sem)) | 1438 | if (!mutex_trylock(&video->mtx)) |
1438 | return -EAGAIN; | 1439 | return -EAGAIN; |
1439 | } else { | 1440 | } else { |
1440 | if (down_interruptible(&video->sem)) | 1441 | if (mutex_lock_interruptible(&video->mtx)) |
1441 | return -ERESTARTSYS; | 1442 | return -ERESTARTSYS; |
1442 | } | 1443 | } |
1443 | 1444 | ||
1444 | if ( !video_card_initialized(video) ) { | 1445 | if ( !video_card_initialized(video) ) { |
1445 | ret = do_dv1394_init_default(video); | 1446 | ret = do_dv1394_init_default(video); |
1446 | if (ret) { | 1447 | if (ret) { |
1447 | up(&video->sem); | 1448 | mutex_unlock(&video->mtx); |
1448 | return ret; | 1449 | return ret; |
1449 | } | 1450 | } |
1450 | video->continuity_counter = -1; | 1451 | video->continuity_counter = -1; |
@@ -1526,7 +1527,7 @@ static ssize_t dv1394_read(struct file *file, char __user *buffer, size_t count | |||
1526 | 1527 | ||
1527 | remove_wait_queue(&video->waitq, &wait); | 1528 | remove_wait_queue(&video->waitq, &wait); |
1528 | set_current_state(TASK_RUNNING); | 1529 | set_current_state(TASK_RUNNING); |
1529 | up(&video->sem); | 1530 | mutex_unlock(&video->mtx); |
1530 | return ret; | 1531 | return ret; |
1531 | } | 1532 | } |
1532 | 1533 | ||
@@ -1547,12 +1548,12 @@ static long dv1394_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
1547 | 1548 | ||
1548 | /* serialize this to prevent multi-threaded mayhem */ | 1549 | /* serialize this to prevent multi-threaded mayhem */ |
1549 | if (file->f_flags & O_NONBLOCK) { | 1550 | if (file->f_flags & O_NONBLOCK) { |
1550 | if (down_trylock(&video->sem)) { | 1551 | if (!mutex_trylock(&video->mtx)) { |
1551 | unlock_kernel(); | 1552 | unlock_kernel(); |
1552 | return -EAGAIN; | 1553 | return -EAGAIN; |
1553 | } | 1554 | } |
1554 | } else { | 1555 | } else { |
1555 | if (down_interruptible(&video->sem)) { | 1556 | if (mutex_lock_interruptible(&video->mtx)) { |
1556 | unlock_kernel(); | 1557 | unlock_kernel(); |
1557 | return -ERESTARTSYS; | 1558 | return -ERESTARTSYS; |
1558 | } | 1559 | } |
@@ -1778,7 +1779,7 @@ static long dv1394_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
1778 | } | 1779 | } |
1779 | 1780 | ||
1780 | out: | 1781 | out: |
1781 | up(&video->sem); | 1782 | mutex_unlock(&video->mtx); |
1782 | unlock_kernel(); | 1783 | unlock_kernel(); |
1783 | return ret; | 1784 | return ret; |
1784 | } | 1785 | } |
@@ -2253,7 +2254,7 @@ static int dv1394_init(struct ti_ohci *ohci, enum pal_or_ntsc format, enum modes | |||
2253 | clear_bit(0, &video->open); | 2254 | clear_bit(0, &video->open); |
2254 | spin_lock_init(&video->spinlock); | 2255 | spin_lock_init(&video->spinlock); |
2255 | video->dma_running = 0; | 2256 | video->dma_running = 0; |
2256 | init_MUTEX(&video->sem); | 2257 | mutex_init(&video->mtx); |
2257 | init_waitqueue_head(&video->waitq); | 2258 | init_waitqueue_head(&video->waitq); |
2258 | video->fasync = NULL; | 2259 | video->fasync = NULL; |
2259 | 2260 | ||
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c index 2d5b57be98c3..8a7b8fab6238 100644 --- a/drivers/ieee1394/eth1394.c +++ b/drivers/ieee1394/eth1394.c | |||
@@ -64,19 +64,19 @@ | |||
64 | #include <linux/ethtool.h> | 64 | #include <linux/ethtool.h> |
65 | #include <asm/uaccess.h> | 65 | #include <asm/uaccess.h> |
66 | #include <asm/delay.h> | 66 | #include <asm/delay.h> |
67 | #include <asm/semaphore.h> | ||
68 | #include <net/arp.h> | 67 | #include <net/arp.h> |
69 | 68 | ||
69 | #include "config_roms.h" | ||
70 | #include "csr1212.h" | 70 | #include "csr1212.h" |
71 | #include "ieee1394_types.h" | 71 | #include "eth1394.h" |
72 | #include "highlevel.h" | ||
73 | #include "ieee1394.h" | ||
72 | #include "ieee1394_core.h" | 74 | #include "ieee1394_core.h" |
75 | #include "ieee1394_hotplug.h" | ||
73 | #include "ieee1394_transactions.h" | 76 | #include "ieee1394_transactions.h" |
74 | #include "ieee1394.h" | 77 | #include "ieee1394_types.h" |
75 | #include "highlevel.h" | ||
76 | #include "iso.h" | 78 | #include "iso.h" |
77 | #include "nodemgr.h" | 79 | #include "nodemgr.h" |
78 | #include "eth1394.h" | ||
79 | #include "config_roms.h" | ||
80 | 80 | ||
81 | #define ETH1394_PRINT_G(level, fmt, args...) \ | 81 | #define ETH1394_PRINT_G(level, fmt, args...) \ |
82 | printk(level "%s: " fmt, driver_name, ## args) | 82 | printk(level "%s: " fmt, driver_name, ## args) |
diff --git a/drivers/ieee1394/highlevel.h b/drivers/ieee1394/highlevel.h index e119fb87e5b5..50f2dd2c7e20 100644 --- a/drivers/ieee1394/highlevel.h +++ b/drivers/ieee1394/highlevel.h | |||
@@ -1,60 +1,61 @@ | |||
1 | |||
2 | #ifndef IEEE1394_HIGHLEVEL_H | 1 | #ifndef IEEE1394_HIGHLEVEL_H |
3 | #define IEEE1394_HIGHLEVEL_H | 2 | #define IEEE1394_HIGHLEVEL_H |
4 | 3 | ||
4 | #include <linux/list.h> | ||
5 | #include <linux/spinlock_types.h> | ||
6 | #include <linux/types.h> | ||
5 | 7 | ||
6 | struct hpsb_address_serve { | 8 | struct module; |
7 | struct list_head host_list; /* per host list */ | ||
8 | 9 | ||
9 | struct list_head hl_list; /* hpsb_highlevel list */ | 10 | #include "ieee1394_types.h" |
10 | 11 | ||
11 | struct hpsb_address_ops *op; | 12 | struct hpsb_host; |
12 | 13 | ||
14 | /* internal to ieee1394 core */ | ||
15 | struct hpsb_address_serve { | ||
16 | struct list_head host_list; /* per host list */ | ||
17 | struct list_head hl_list; /* hpsb_highlevel list */ | ||
18 | struct hpsb_address_ops *op; | ||
13 | struct hpsb_host *host; | 19 | struct hpsb_host *host; |
14 | 20 | u64 start; /* first address handled, quadlet aligned */ | |
15 | /* first address handled and first address behind, quadlet aligned */ | 21 | u64 end; /* first address behind, quadlet aligned */ |
16 | u64 start, end; | ||
17 | }; | 22 | }; |
18 | 23 | ||
19 | 24 | /* Only the following structures are of interest to actual highlevel drivers. */ | |
20 | /* | ||
21 | * The above structs are internal to highlevel driver handling. Only the | ||
22 | * following structures are of interest to actual highlevel drivers. | ||
23 | */ | ||
24 | 25 | ||
25 | struct hpsb_highlevel { | 26 | struct hpsb_highlevel { |
26 | struct module *owner; | 27 | struct module *owner; |
27 | const char *name; | 28 | const char *name; |
28 | 29 | ||
29 | /* Any of the following pointers can legally be NULL, except for | 30 | /* Any of the following pointers can legally be NULL, except for |
30 | * iso_receive which can only be NULL when you don't request | 31 | * iso_receive which can only be NULL when you don't request |
31 | * channels. */ | 32 | * channels. */ |
32 | 33 | ||
33 | /* New host initialized. Will also be called during | 34 | /* New host initialized. Will also be called during |
34 | * hpsb_register_highlevel for all hosts already installed. */ | 35 | * hpsb_register_highlevel for all hosts already installed. */ |
35 | void (*add_host) (struct hpsb_host *host); | 36 | void (*add_host)(struct hpsb_host *host); |
36 | 37 | ||
37 | /* Host about to be removed. Will also be called during | 38 | /* Host about to be removed. Will also be called during |
38 | * hpsb_unregister_highlevel once for each host. */ | 39 | * hpsb_unregister_highlevel once for each host. */ |
39 | void (*remove_host) (struct hpsb_host *host); | 40 | void (*remove_host)(struct hpsb_host *host); |
40 | 41 | ||
41 | /* Host experienced bus reset with possible configuration changes. | 42 | /* Host experienced bus reset with possible configuration changes. |
42 | * Note that this one may occur during interrupt/bottom half handling. | 43 | * Note that this one may occur during interrupt/bottom half handling. |
43 | * You can not expect to be able to do stock hpsb_reads. */ | 44 | * You can not expect to be able to do stock hpsb_reads. */ |
44 | void (*host_reset) (struct hpsb_host *host); | 45 | void (*host_reset)(struct hpsb_host *host); |
45 | 46 | ||
46 | /* An isochronous packet was received. Channel contains the channel | 47 | /* An isochronous packet was received. Channel contains the channel |
47 | * number for your convenience, it is also contained in the included | 48 | * number for your convenience, it is also contained in the included |
48 | * packet header (first quadlet, CRCs are missing). You may get called | 49 | * packet header (first quadlet, CRCs are missing). You may get called |
49 | * for channel/host combinations you did not request. */ | 50 | * for channel/host combinations you did not request. */ |
50 | void (*iso_receive) (struct hpsb_host *host, int channel, | 51 | void (*iso_receive)(struct hpsb_host *host, int channel, |
51 | quadlet_t *data, size_t length); | 52 | quadlet_t *data, size_t length); |
52 | 53 | ||
53 | /* A write request was received on either the FCP_COMMAND (direction = | 54 | /* A write request was received on either the FCP_COMMAND (direction = |
54 | * 0) or the FCP_RESPONSE (direction = 1) register. The cts arg | 55 | * 0) or the FCP_RESPONSE (direction = 1) register. The cts arg |
55 | * contains the cts field (first byte of data). */ | 56 | * contains the cts field (first byte of data). */ |
56 | void (*fcp_request) (struct hpsb_host *host, int nodeid, int direction, | 57 | void (*fcp_request)(struct hpsb_host *host, int nodeid, int direction, |
57 | int cts, u8 *data, size_t length); | 58 | int cts, u8 *data, size_t length); |
58 | 59 | ||
59 | /* These are initialized by the subsystem when the | 60 | /* These are initialized by the subsystem when the |
60 | * hpsb_higlevel is registered. */ | 61 | * hpsb_higlevel is registered. */ |
@@ -67,61 +68,62 @@ struct hpsb_highlevel { | |||
67 | }; | 68 | }; |
68 | 69 | ||
69 | struct hpsb_address_ops { | 70 | struct hpsb_address_ops { |
70 | /* | 71 | /* |
71 | * Null function pointers will make the respective operation complete | 72 | * Null function pointers will make the respective operation complete |
72 | * with RCODE_TYPE_ERROR. Makes for easy to implement read-only | 73 | * with RCODE_TYPE_ERROR. Makes for easy to implement read-only |
73 | * registers (just leave everything but read NULL). | 74 | * registers (just leave everything but read NULL). |
74 | * | 75 | * |
75 | * All functions shall return appropriate IEEE 1394 rcodes. | 76 | * All functions shall return appropriate IEEE 1394 rcodes. |
76 | */ | 77 | */ |
77 | 78 | ||
78 | /* These functions have to implement block reads for themselves. */ | 79 | /* These functions have to implement block reads for themselves. |
79 | /* These functions either return a response code | 80 | * |
80 | or a negative number. In the first case a response will be generated; in the | 81 | * These functions either return a response code or a negative number. |
81 | later case, no response will be sent and the driver, that handled the request | 82 | * In the first case a response will be generated. In the latter case, |
82 | will send the response itself | 83 | * no response will be sent and the driver which handled the request |
83 | */ | 84 | * will send the response itself. */ |
84 | int (*read) (struct hpsb_host *host, int nodeid, quadlet_t *buffer, | 85 | int (*read)(struct hpsb_host *host, int nodeid, quadlet_t *buffer, |
85 | u64 addr, size_t length, u16 flags); | 86 | u64 addr, size_t length, u16 flags); |
86 | int (*write) (struct hpsb_host *host, int nodeid, int destid, | 87 | int (*write)(struct hpsb_host *host, int nodeid, int destid, |
87 | quadlet_t *data, u64 addr, size_t length, u16 flags); | 88 | quadlet_t *data, u64 addr, size_t length, u16 flags); |
88 | 89 | ||
89 | /* Lock transactions: write results of ext_tcode operation into | 90 | /* Lock transactions: write results of ext_tcode operation into |
90 | * *store. */ | 91 | * *store. */ |
91 | int (*lock) (struct hpsb_host *host, int nodeid, quadlet_t *store, | 92 | int (*lock)(struct hpsb_host *host, int nodeid, quadlet_t *store, |
92 | u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode, u16 flags); | 93 | u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode, |
93 | int (*lock64) (struct hpsb_host *host, int nodeid, octlet_t *store, | 94 | u16 flags); |
94 | u64 addr, octlet_t data, octlet_t arg, int ext_tcode, u16 flags); | 95 | int (*lock64)(struct hpsb_host *host, int nodeid, octlet_t *store, |
96 | u64 addr, octlet_t data, octlet_t arg, int ext_tcode, | ||
97 | u16 flags); | ||
95 | }; | 98 | }; |
96 | 99 | ||
97 | |||
98 | void highlevel_add_host(struct hpsb_host *host); | 100 | void highlevel_add_host(struct hpsb_host *host); |
99 | void highlevel_remove_host(struct hpsb_host *host); | 101 | void highlevel_remove_host(struct hpsb_host *host); |
100 | void highlevel_host_reset(struct hpsb_host *host); | 102 | void highlevel_host_reset(struct hpsb_host *host); |
101 | 103 | ||
102 | 104 | /* | |
103 | /* these functions are called to handle transactions. They are called, when | 105 | * These functions are called to handle transactions. They are called when a |
104 | a packet arrives. The flags argument contains the second word of the first header | 106 | * packet arrives. The flags argument contains the second word of the first |
105 | quadlet of the incoming packet (containing transaction label, retry code, | 107 | * header quadlet of the incoming packet (containing transaction label, retry |
106 | transaction code and priority). These functions either return a response code | 108 | * code, transaction code and priority). These functions either return a |
107 | or a negative number. In the first case a response will be generated; in the | 109 | * response code or a negative number. In the first case a response will be |
108 | later case, no response will be sent and the driver, that handled the request | 110 | * generated. In the latter case, no response will be sent and the driver which |
109 | will send the response itself. | 111 | * handled the request will send the response itself. |
110 | */ | 112 | */ |
111 | int highlevel_read(struct hpsb_host *host, int nodeid, void *data, | 113 | int highlevel_read(struct hpsb_host *host, int nodeid, void *data, u64 addr, |
112 | u64 addr, unsigned int length, u16 flags); | 114 | unsigned int length, u16 flags); |
113 | int highlevel_write(struct hpsb_host *host, int nodeid, int destid, | 115 | int highlevel_write(struct hpsb_host *host, int nodeid, int destid, void *data, |
114 | void *data, u64 addr, unsigned int length, u16 flags); | 116 | u64 addr, unsigned int length, u16 flags); |
115 | int highlevel_lock(struct hpsb_host *host, int nodeid, quadlet_t *store, | 117 | int highlevel_lock(struct hpsb_host *host, int nodeid, quadlet_t *store, |
116 | u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode, u16 flags); | 118 | u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode, |
119 | u16 flags); | ||
117 | int highlevel_lock64(struct hpsb_host *host, int nodeid, octlet_t *store, | 120 | int highlevel_lock64(struct hpsb_host *host, int nodeid, octlet_t *store, |
118 | u64 addr, octlet_t data, octlet_t arg, int ext_tcode, u16 flags); | 121 | u64 addr, octlet_t data, octlet_t arg, int ext_tcode, |
122 | u16 flags); | ||
119 | 123 | ||
120 | void highlevel_iso_receive(struct hpsb_host *host, void *data, | 124 | void highlevel_iso_receive(struct hpsb_host *host, void *data, size_t length); |
121 | size_t length); | ||
122 | void highlevel_fcp_request(struct hpsb_host *host, int nodeid, int direction, | 125 | void highlevel_fcp_request(struct hpsb_host *host, int nodeid, int direction, |
123 | void *data, size_t length); | 126 | void *data, size_t length); |
124 | |||
125 | 127 | ||
126 | /* | 128 | /* |
127 | * Register highlevel driver. The name pointer has to stay valid at all times | 129 | * Register highlevel driver. The name pointer has to stay valid at all times |
@@ -132,13 +134,15 @@ void hpsb_unregister_highlevel(struct hpsb_highlevel *hl); | |||
132 | 134 | ||
133 | /* | 135 | /* |
134 | * Register handlers for host address spaces. Start and end are 48 bit pointers | 136 | * Register handlers for host address spaces. Start and end are 48 bit pointers |
135 | * and have to be quadlet aligned (end points to the first address behind the | 137 | * and have to be quadlet aligned. Argument "end" points to the first address |
136 | * handled addresses. This function can be called multiple times for a single | 138 | * behind the handled addresses. This function can be called multiple times for |
137 | * hpsb_highlevel to implement sparse register sets. The requested region must | 139 | * a single hpsb_highlevel to implement sparse register sets. The requested |
138 | * not overlap any previously allocated region, otherwise registering will fail. | 140 | * region must not overlap any previously allocated region, otherwise |
141 | * registering will fail. | ||
139 | * | 142 | * |
140 | * It returns true for successful allocation. There is no unregister function, | 143 | * It returns true for successful allocation. Address spaces can be |
141 | * all address spaces are deallocated together with the hpsb_highlevel. | 144 | * unregistered with hpsb_unregister_addrspace. All remaining address spaces |
145 | * are automatically deallocated together with the hpsb_highlevel. | ||
142 | */ | 146 | */ |
143 | u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl, | 147 | u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl, |
144 | struct hpsb_host *host, | 148 | struct hpsb_host *host, |
@@ -146,20 +150,18 @@ u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl, | |||
146 | u64 size, u64 alignment, | 150 | u64 size, u64 alignment, |
147 | u64 start, u64 end); | 151 | u64 start, u64 end); |
148 | int hpsb_register_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host, | 152 | int hpsb_register_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host, |
149 | struct hpsb_address_ops *ops, u64 start, u64 end); | 153 | struct hpsb_address_ops *ops, u64 start, u64 end); |
150 | |||
151 | int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host, | 154 | int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host, |
152 | u64 start); | 155 | u64 start); |
153 | 156 | ||
154 | /* | 157 | /* |
155 | * Enable or disable receving a certain isochronous channel through the | 158 | * Enable or disable receving a certain isochronous channel through the |
156 | * iso_receive op. | 159 | * iso_receive op. |
157 | */ | 160 | */ |
158 | int hpsb_listen_channel(struct hpsb_highlevel *hl, struct hpsb_host *host, | 161 | int hpsb_listen_channel(struct hpsb_highlevel *hl, struct hpsb_host *host, |
159 | unsigned int channel); | 162 | unsigned int channel); |
160 | void hpsb_unlisten_channel(struct hpsb_highlevel *hl, struct hpsb_host *host, | 163 | void hpsb_unlisten_channel(struct hpsb_highlevel *hl, struct hpsb_host *host, |
161 | unsigned int channel); | 164 | unsigned int channel); |
162 | |||
163 | 165 | ||
164 | /* Retrieve a hostinfo pointer bound to this driver/host */ | 166 | /* Retrieve a hostinfo pointer bound to this driver/host */ |
165 | void *hpsb_get_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host); | 167 | void *hpsb_get_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host); |
@@ -172,19 +174,24 @@ void *hpsb_create_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host, | |||
172 | void hpsb_destroy_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host); | 174 | void hpsb_destroy_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host); |
173 | 175 | ||
174 | /* Set an alternate lookup key for the hostinfo bound to this driver/host */ | 176 | /* Set an alternate lookup key for the hostinfo bound to this driver/host */ |
175 | void hpsb_set_hostinfo_key(struct hpsb_highlevel *hl, struct hpsb_host *host, unsigned long key); | 177 | void hpsb_set_hostinfo_key(struct hpsb_highlevel *hl, struct hpsb_host *host, |
178 | unsigned long key); | ||
176 | 179 | ||
177 | /* Retrieve the alternate lookup key for the hostinfo bound to this driver/host */ | 180 | /* Retrieve the alternate lookup key for the hostinfo bound to this |
178 | unsigned long hpsb_get_hostinfo_key(struct hpsb_highlevel *hl, struct hpsb_host *host); | 181 | * driver/host */ |
182 | unsigned long hpsb_get_hostinfo_key(struct hpsb_highlevel *hl, | ||
183 | struct hpsb_host *host); | ||
179 | 184 | ||
180 | /* Retrieve a hostinfo pointer bound to this driver using its alternate key */ | 185 | /* Retrieve a hostinfo pointer bound to this driver using its alternate key */ |
181 | void *hpsb_get_hostinfo_bykey(struct hpsb_highlevel *hl, unsigned long key); | 186 | void *hpsb_get_hostinfo_bykey(struct hpsb_highlevel *hl, unsigned long key); |
182 | 187 | ||
183 | /* Set the hostinfo pointer to something useful. Usually follows a call to | 188 | /* Set the hostinfo pointer to something useful. Usually follows a call to |
184 | * hpsb_create_hostinfo, where the size is 0. */ | 189 | * hpsb_create_hostinfo, where the size is 0. */ |
185 | int hpsb_set_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host, void *data); | 190 | int hpsb_set_hostinfo(struct hpsb_highlevel *hl, struct hpsb_host *host, |
191 | void *data); | ||
186 | 192 | ||
187 | /* Retrieve hpsb_host using a highlevel handle and a key */ | 193 | /* Retrieve hpsb_host using a highlevel handle and a key */ |
188 | struct hpsb_host *hpsb_get_host_bykey(struct hpsb_highlevel *hl, unsigned long key); | 194 | struct hpsb_host *hpsb_get_host_bykey(struct hpsb_highlevel *hl, |
195 | unsigned long key); | ||
189 | 196 | ||
190 | #endif /* IEEE1394_HIGHLEVEL_H */ | 197 | #endif /* IEEE1394_HIGHLEVEL_H */ |
diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c index 4feead4a35c5..d90a3a1898c0 100644 --- a/drivers/ieee1394/hosts.c +++ b/drivers/ieee1394/hosts.c | |||
@@ -90,6 +90,16 @@ static int alloc_hostnum_cb(struct hpsb_host *host, void *__data) | |||
90 | return 0; | 90 | return 0; |
91 | } | 91 | } |
92 | 92 | ||
93 | /* | ||
94 | * The pending_packet_queue is special in that it's processed | ||
95 | * from hardirq context too (such as hpsb_bus_reset()). Hence | ||
96 | * split the lock class from the usual networking skb-head | ||
97 | * lock class by using a separate key for it: | ||
98 | */ | ||
99 | static struct lock_class_key pending_packet_queue_key; | ||
100 | |||
101 | static DEFINE_MUTEX(host_num_alloc); | ||
102 | |||
93 | /** | 103 | /** |
94 | * hpsb_alloc_host - allocate a new host controller. | 104 | * hpsb_alloc_host - allocate a new host controller. |
95 | * @drv: the driver that will manage the host controller | 105 | * @drv: the driver that will manage the host controller |
@@ -105,16 +115,6 @@ static int alloc_hostnum_cb(struct hpsb_host *host, void *__data) | |||
105 | * Return Value: a pointer to the &hpsb_host if successful, %NULL if | 115 | * Return Value: a pointer to the &hpsb_host if successful, %NULL if |
106 | * no memory was available. | 116 | * no memory was available. |
107 | */ | 117 | */ |
108 | static DEFINE_MUTEX(host_num_alloc); | ||
109 | |||
110 | /* | ||
111 | * The pending_packet_queue is special in that it's processed | ||
112 | * from hardirq context too (such as hpsb_bus_reset()). Hence | ||
113 | * split the lock class from the usual networking skb-head | ||
114 | * lock class by using a separate key for it: | ||
115 | */ | ||
116 | static struct lock_class_key pending_packet_queue_key; | ||
117 | |||
118 | struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra, | 118 | struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra, |
119 | struct device *dev) | 119 | struct device *dev) |
120 | { | 120 | { |
@@ -143,9 +143,6 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra, | |||
143 | for (i = 2; i < 16; i++) | 143 | for (i = 2; i < 16; i++) |
144 | h->csr.gen_timestamp[i] = jiffies - 60 * HZ; | 144 | h->csr.gen_timestamp[i] = jiffies - 60 * HZ; |
145 | 145 | ||
146 | for (i = 0; i < ARRAY_SIZE(h->tpool); i++) | ||
147 | HPSB_TPOOL_INIT(&h->tpool[i]); | ||
148 | |||
149 | atomic_set(&h->generation, 0); | 146 | atomic_set(&h->generation, 0); |
150 | 147 | ||
151 | INIT_WORK(&h->delayed_reset, delayed_reset_bus, h); | 148 | INIT_WORK(&h->delayed_reset, delayed_reset_bus, h); |
diff --git a/drivers/ieee1394/hosts.h b/drivers/ieee1394/hosts.h index 9ad4b2463077..bc6dbfadb891 100644 --- a/drivers/ieee1394/hosts.h +++ b/drivers/ieee1394/hosts.h | |||
@@ -2,17 +2,19 @@ | |||
2 | #define _IEEE1394_HOSTS_H | 2 | #define _IEEE1394_HOSTS_H |
3 | 3 | ||
4 | #include <linux/device.h> | 4 | #include <linux/device.h> |
5 | #include <linux/wait.h> | ||
6 | #include <linux/list.h> | 5 | #include <linux/list.h> |
7 | #include <linux/timer.h> | ||
8 | #include <linux/skbuff.h> | 6 | #include <linux/skbuff.h> |
7 | #include <linux/timer.h> | ||
8 | #include <linux/types.h> | ||
9 | #include <linux/workqueue.h> | ||
10 | #include <asm/atomic.h> | ||
9 | 11 | ||
10 | #include <asm/semaphore.h> | 12 | struct pci_dev; |
13 | struct module; | ||
11 | 14 | ||
12 | #include "ieee1394_types.h" | 15 | #include "ieee1394_types.h" |
13 | #include "csr.h" | 16 | #include "csr.h" |
14 | 17 | ||
15 | |||
16 | struct hpsb_packet; | 18 | struct hpsb_packet; |
17 | struct hpsb_iso; | 19 | struct hpsb_iso; |
18 | 20 | ||
@@ -33,7 +35,6 @@ struct hpsb_host { | |||
33 | int node_count; /* number of identified nodes on this bus */ | 35 | int node_count; /* number of identified nodes on this bus */ |
34 | int selfid_count; /* total number of SelfIDs received */ | 36 | int selfid_count; /* total number of SelfIDs received */ |
35 | int nodes_active; /* number of nodes with active link layer */ | 37 | int nodes_active; /* number of nodes with active link layer */ |
36 | u8 speed[ALL_NODES]; /* speed between each node and local node */ | ||
37 | 38 | ||
38 | nodeid_t node_id; /* node ID of this host */ | 39 | nodeid_t node_id; /* node ID of this host */ |
39 | nodeid_t irm_id; /* ID of this bus' isochronous resource manager */ | 40 | nodeid_t irm_id; /* ID of this bus' isochronous resource manager */ |
@@ -53,31 +54,29 @@ struct hpsb_host { | |||
53 | int reset_retries; | 54 | int reset_retries; |
54 | quadlet_t *topology_map; | 55 | quadlet_t *topology_map; |
55 | u8 *speed_map; | 56 | u8 *speed_map; |
56 | struct csr_control csr; | ||
57 | |||
58 | /* Per node tlabel pool allocation */ | ||
59 | struct hpsb_tlabel_pool tpool[ALL_NODES]; | ||
60 | 57 | ||
58 | int id; | ||
61 | struct hpsb_host_driver *driver; | 59 | struct hpsb_host_driver *driver; |
62 | |||
63 | struct pci_dev *pdev; | 60 | struct pci_dev *pdev; |
64 | |||
65 | int id; | ||
66 | |||
67 | struct device device; | 61 | struct device device; |
68 | struct class_device class_dev; | 62 | struct class_device class_dev; |
69 | 63 | ||
70 | int update_config_rom; | 64 | int update_config_rom; |
71 | struct work_struct delayed_reset; | 65 | struct work_struct delayed_reset; |
72 | |||
73 | unsigned int config_roms; | 66 | unsigned int config_roms; |
74 | 67 | ||
75 | struct list_head addr_space; | 68 | struct list_head addr_space; |
76 | u64 low_addr_space; /* upper bound of physical DMA area */ | 69 | u64 low_addr_space; /* upper bound of physical DMA area */ |
77 | u64 middle_addr_space; /* upper bound of posted write area */ | 70 | u64 middle_addr_space; /* upper bound of posted write area */ |
78 | }; | ||
79 | 71 | ||
72 | u8 speed[ALL_NODES]; /* speed between each node and local node */ | ||
73 | |||
74 | /* per node tlabel allocation */ | ||
75 | u8 next_tl[ALL_NODES]; | ||
76 | struct { DECLARE_BITMAP(map, 64); } tl_pool[ALL_NODES]; | ||
80 | 77 | ||
78 | struct csr_control csr; | ||
79 | }; | ||
81 | 80 | ||
82 | enum devctl_cmd { | 81 | enum devctl_cmd { |
83 | /* Host is requested to reset its bus and cancel all outstanding async | 82 | /* Host is requested to reset its bus and cancel all outstanding async |
@@ -112,7 +111,7 @@ enum devctl_cmd { | |||
112 | 111 | ||
113 | enum isoctl_cmd { | 112 | enum isoctl_cmd { |
114 | /* rawiso API - see iso.h for the meanings of these commands | 113 | /* rawiso API - see iso.h for the meanings of these commands |
115 | (they correspond exactly to the hpsb_iso_* API functions) | 114 | * (they correspond exactly to the hpsb_iso_* API functions) |
116 | * INIT = allocate resources | 115 | * INIT = allocate resources |
117 | * START = begin transmission/reception | 116 | * START = begin transmission/reception |
118 | * STOP = halt transmission/reception | 117 | * STOP = halt transmission/reception |
@@ -160,7 +159,8 @@ struct hpsb_host_driver { | |||
160 | /* The hardware driver may optionally support a function that is used | 159 | /* The hardware driver may optionally support a function that is used |
161 | * to set the hardware ConfigROM if the hardware supports handling | 160 | * to set the hardware ConfigROM if the hardware supports handling |
162 | * reads to the ConfigROM on its own. */ | 161 | * reads to the ConfigROM on its own. */ |
163 | void (*set_hw_config_rom) (struct hpsb_host *host, quadlet_t *config_rom); | 162 | void (*set_hw_config_rom)(struct hpsb_host *host, |
163 | quadlet_t *config_rom); | ||
164 | 164 | ||
165 | /* This function shall implement packet transmission based on | 165 | /* This function shall implement packet transmission based on |
166 | * packet->type. It shall CRC both parts of the packet (unless | 166 | * packet->type. It shall CRC both parts of the packet (unless |
@@ -170,20 +170,21 @@ struct hpsb_host_driver { | |||
170 | * called. Return 0 on success, negative errno on failure. | 170 | * called. Return 0 on success, negative errno on failure. |
171 | * NOTE: The function must be callable in interrupt context. | 171 | * NOTE: The function must be callable in interrupt context. |
172 | */ | 172 | */ |
173 | int (*transmit_packet) (struct hpsb_host *host, | 173 | int (*transmit_packet)(struct hpsb_host *host, |
174 | struct hpsb_packet *packet); | 174 | struct hpsb_packet *packet); |
175 | 175 | ||
176 | /* This function requests miscellanous services from the driver, see | 176 | /* This function requests miscellanous services from the driver, see |
177 | * above for command codes and expected actions. Return -1 for unknown | 177 | * above for command codes and expected actions. Return -1 for unknown |
178 | * command, though that should never happen. | 178 | * command, though that should never happen. |
179 | */ | 179 | */ |
180 | int (*devctl) (struct hpsb_host *host, enum devctl_cmd command, int arg); | 180 | int (*devctl)(struct hpsb_host *host, enum devctl_cmd command, int arg); |
181 | 181 | ||
182 | /* ISO transmission/reception functions. Return 0 on success, -1 | 182 | /* ISO transmission/reception functions. Return 0 on success, -1 |
183 | * (or -EXXX errno code) on failure. If the low-level driver does not | 183 | * (or -EXXX errno code) on failure. If the low-level driver does not |
184 | * support the new ISO API, set isoctl to NULL. | 184 | * support the new ISO API, set isoctl to NULL. |
185 | */ | 185 | */ |
186 | int (*isoctl) (struct hpsb_iso *iso, enum isoctl_cmd command, unsigned long arg); | 186 | int (*isoctl)(struct hpsb_iso *iso, enum isoctl_cmd command, |
187 | unsigned long arg); | ||
187 | 188 | ||
188 | /* This function is mainly to redirect local CSR reads/locks to the iso | 189 | /* This function is mainly to redirect local CSR reads/locks to the iso |
189 | * management registers (bus manager id, bandwidth available, channels | 190 | * management registers (bus manager id, bandwidth available, channels |
@@ -196,19 +197,11 @@ struct hpsb_host_driver { | |||
196 | quadlet_t data, quadlet_t compare); | 197 | quadlet_t data, quadlet_t compare); |
197 | }; | 198 | }; |
198 | 199 | ||
199 | |||
200 | struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra, | 200 | struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra, |
201 | struct device *dev); | 201 | struct device *dev); |
202 | int hpsb_add_host(struct hpsb_host *host); | 202 | int hpsb_add_host(struct hpsb_host *host); |
203 | void hpsb_remove_host(struct hpsb_host *h); | 203 | void hpsb_remove_host(struct hpsb_host *h); |
204 | 204 | ||
205 | /* The following 2 functions are deprecated and will be removed when the | ||
206 | * raw1394/libraw1394 update is complete. */ | ||
207 | int hpsb_update_config_rom(struct hpsb_host *host, | ||
208 | const quadlet_t *new_rom, size_t size, unsigned char rom_version); | ||
209 | int hpsb_get_config_rom(struct hpsb_host *host, quadlet_t *buffer, | ||
210 | size_t buffersize, size_t *rom_size, unsigned char *rom_version); | ||
211 | |||
212 | /* Updates the configuration rom image of a host. rom_version must be the | 205 | /* Updates the configuration rom image of a host. rom_version must be the |
213 | * current version, otherwise it will fail with return value -1. If this | 206 | * current version, otherwise it will fail with return value -1. If this |
214 | * host does not support config-rom-update, it will return -EINVAL. | 207 | * host does not support config-rom-update, it will return -EINVAL. |
diff --git a/drivers/ieee1394/ieee1394-ioctl.h b/drivers/ieee1394/ieee1394-ioctl.h index 156703986348..8f207508ed1d 100644 --- a/drivers/ieee1394/ieee1394-ioctl.h +++ b/drivers/ieee1394/ieee1394-ioctl.h | |||
@@ -1,5 +1,7 @@ | |||
1 | /* Base file for all ieee1394 ioctl's. Linux-1394 has allocated base '#' | 1 | /* |
2 | * with a range of 0x00-0x3f. */ | 2 | * Base file for all ieee1394 ioctl's. |
3 | * Linux-1394 has allocated base '#' with a range of 0x00-0x3f. | ||
4 | */ | ||
3 | 5 | ||
4 | #ifndef __IEEE1394_IOCTL_H | 6 | #ifndef __IEEE1394_IOCTL_H |
5 | #define __IEEE1394_IOCTL_H | 7 | #define __IEEE1394_IOCTL_H |
@@ -96,8 +98,7 @@ | |||
96 | _IOW ('#', 0x27, struct raw1394_iso_packets) | 98 | _IOW ('#', 0x27, struct raw1394_iso_packets) |
97 | #define RAW1394_IOC_ISO_XMIT_SYNC \ | 99 | #define RAW1394_IOC_ISO_XMIT_SYNC \ |
98 | _IO ('#', 0x28) | 100 | _IO ('#', 0x28) |
99 | #define RAW1394_IOC_ISO_RECV_FLUSH \ | 101 | #define RAW1394_IOC_ISO_RECV_FLUSH \ |
100 | _IO ('#', 0x29) | 102 | _IO ('#', 0x29) |
101 | 103 | ||
102 | |||
103 | #endif /* __IEEE1394_IOCTL_H */ | 104 | #endif /* __IEEE1394_IOCTL_H */ |
diff --git a/drivers/ieee1394/ieee1394.h b/drivers/ieee1394/ieee1394.h index 936d776de00a..40492074c013 100644 --- a/drivers/ieee1394/ieee1394.h +++ b/drivers/ieee1394/ieee1394.h | |||
@@ -5,77 +5,78 @@ | |||
5 | #ifndef _IEEE1394_IEEE1394_H | 5 | #ifndef _IEEE1394_IEEE1394_H |
6 | #define _IEEE1394_IEEE1394_H | 6 | #define _IEEE1394_IEEE1394_H |
7 | 7 | ||
8 | #define TCODE_WRITEQ 0x0 | 8 | #define TCODE_WRITEQ 0x0 |
9 | #define TCODE_WRITEB 0x1 | 9 | #define TCODE_WRITEB 0x1 |
10 | #define TCODE_WRITE_RESPONSE 0x2 | 10 | #define TCODE_WRITE_RESPONSE 0x2 |
11 | #define TCODE_READQ 0x4 | 11 | #define TCODE_READQ 0x4 |
12 | #define TCODE_READB 0x5 | 12 | #define TCODE_READB 0x5 |
13 | #define TCODE_READQ_RESPONSE 0x6 | 13 | #define TCODE_READQ_RESPONSE 0x6 |
14 | #define TCODE_READB_RESPONSE 0x7 | 14 | #define TCODE_READB_RESPONSE 0x7 |
15 | #define TCODE_CYCLE_START 0x8 | 15 | #define TCODE_CYCLE_START 0x8 |
16 | #define TCODE_LOCK_REQUEST 0x9 | 16 | #define TCODE_LOCK_REQUEST 0x9 |
17 | #define TCODE_ISO_DATA 0xa | 17 | #define TCODE_ISO_DATA 0xa |
18 | #define TCODE_STREAM_DATA 0xa | 18 | #define TCODE_STREAM_DATA 0xa |
19 | #define TCODE_LOCK_RESPONSE 0xb | 19 | #define TCODE_LOCK_RESPONSE 0xb |
20 | 20 | ||
21 | #define RCODE_COMPLETE 0x0 | 21 | #define RCODE_COMPLETE 0x0 |
22 | #define RCODE_CONFLICT_ERROR 0x4 | 22 | #define RCODE_CONFLICT_ERROR 0x4 |
23 | #define RCODE_DATA_ERROR 0x5 | 23 | #define RCODE_DATA_ERROR 0x5 |
24 | #define RCODE_TYPE_ERROR 0x6 | 24 | #define RCODE_TYPE_ERROR 0x6 |
25 | #define RCODE_ADDRESS_ERROR 0x7 | 25 | #define RCODE_ADDRESS_ERROR 0x7 |
26 | 26 | ||
27 | #define EXTCODE_MASK_SWAP 0x1 | 27 | #define EXTCODE_MASK_SWAP 0x1 |
28 | #define EXTCODE_COMPARE_SWAP 0x2 | 28 | #define EXTCODE_COMPARE_SWAP 0x2 |
29 | #define EXTCODE_FETCH_ADD 0x3 | 29 | #define EXTCODE_FETCH_ADD 0x3 |
30 | #define EXTCODE_LITTLE_ADD 0x4 | 30 | #define EXTCODE_LITTLE_ADD 0x4 |
31 | #define EXTCODE_BOUNDED_ADD 0x5 | 31 | #define EXTCODE_BOUNDED_ADD 0x5 |
32 | #define EXTCODE_WRAP_ADD 0x6 | 32 | #define EXTCODE_WRAP_ADD 0x6 |
33 | 33 | ||
34 | #define ACK_COMPLETE 0x1 | 34 | #define ACK_COMPLETE 0x1 |
35 | #define ACK_PENDING 0x2 | 35 | #define ACK_PENDING 0x2 |
36 | #define ACK_BUSY_X 0x4 | 36 | #define ACK_BUSY_X 0x4 |
37 | #define ACK_BUSY_A 0x5 | 37 | #define ACK_BUSY_A 0x5 |
38 | #define ACK_BUSY_B 0x6 | 38 | #define ACK_BUSY_B 0x6 |
39 | #define ACK_TARDY 0xb | 39 | #define ACK_TARDY 0xb |
40 | #define ACK_CONFLICT_ERROR 0xc | 40 | #define ACK_CONFLICT_ERROR 0xc |
41 | #define ACK_DATA_ERROR 0xd | 41 | #define ACK_DATA_ERROR 0xd |
42 | #define ACK_TYPE_ERROR 0xe | 42 | #define ACK_TYPE_ERROR 0xe |
43 | #define ACK_ADDRESS_ERROR 0xf | 43 | #define ACK_ADDRESS_ERROR 0xf |
44 | 44 | ||
45 | /* Non-standard "ACK codes" for internal use */ | 45 | /* Non-standard "ACK codes" for internal use */ |
46 | #define ACKX_NONE (-1) | 46 | #define ACKX_NONE (-1) |
47 | #define ACKX_SEND_ERROR (-2) | 47 | #define ACKX_SEND_ERROR (-2) |
48 | #define ACKX_ABORTED (-3) | 48 | #define ACKX_ABORTED (-3) |
49 | #define ACKX_TIMEOUT (-4) | 49 | #define ACKX_TIMEOUT (-4) |
50 | 50 | ||
51 | 51 | #define IEEE1394_SPEED_100 0x00 | |
52 | #define IEEE1394_SPEED_100 0x00 | 52 | #define IEEE1394_SPEED_200 0x01 |
53 | #define IEEE1394_SPEED_200 0x01 | 53 | #define IEEE1394_SPEED_400 0x02 |
54 | #define IEEE1394_SPEED_400 0x02 | 54 | #define IEEE1394_SPEED_800 0x03 |
55 | #define IEEE1394_SPEED_800 0x03 | 55 | #define IEEE1394_SPEED_1600 0x04 |
56 | #define IEEE1394_SPEED_1600 0x04 | 56 | #define IEEE1394_SPEED_3200 0x05 |
57 | #define IEEE1394_SPEED_3200 0x05 | 57 | |
58 | /* The current highest tested speed supported by the subsystem */ | 58 | /* The current highest tested speed supported by the subsystem */ |
59 | #define IEEE1394_SPEED_MAX IEEE1394_SPEED_800 | 59 | #define IEEE1394_SPEED_MAX IEEE1394_SPEED_800 |
60 | 60 | ||
61 | /* Maps speed values above to a string representation */ | 61 | /* Maps speed values above to a string representation */ |
62 | extern const char *hpsb_speedto_str[]; | 62 | extern const char *hpsb_speedto_str[]; |
63 | 63 | ||
64 | |||
65 | /* 1394a cable PHY packets */ | 64 | /* 1394a cable PHY packets */ |
66 | #define SELFID_PWRCL_NO_POWER 0x0 | 65 | #define SELFID_PWRCL_NO_POWER 0x0 |
67 | #define SELFID_PWRCL_PROVIDE_15W 0x1 | 66 | #define SELFID_PWRCL_PROVIDE_15W 0x1 |
68 | #define SELFID_PWRCL_PROVIDE_30W 0x2 | 67 | #define SELFID_PWRCL_PROVIDE_30W 0x2 |
69 | #define SELFID_PWRCL_PROVIDE_45W 0x3 | 68 | #define SELFID_PWRCL_PROVIDE_45W 0x3 |
70 | #define SELFID_PWRCL_USE_1W 0x4 | 69 | #define SELFID_PWRCL_USE_1W 0x4 |
71 | #define SELFID_PWRCL_USE_3W 0x5 | 70 | #define SELFID_PWRCL_USE_3W 0x5 |
72 | #define SELFID_PWRCL_USE_6W 0x6 | 71 | #define SELFID_PWRCL_USE_6W 0x6 |
73 | #define SELFID_PWRCL_USE_10W 0x7 | 72 | #define SELFID_PWRCL_USE_10W 0x7 |
74 | 73 | ||
75 | #define SELFID_PORT_CHILD 0x3 | 74 | #define SELFID_PORT_CHILD 0x3 |
76 | #define SELFID_PORT_PARENT 0x2 | 75 | #define SELFID_PORT_PARENT 0x2 |
77 | #define SELFID_PORT_NCONN 0x1 | 76 | #define SELFID_PORT_NCONN 0x1 |
78 | #define SELFID_PORT_NONE 0x0 | 77 | #define SELFID_PORT_NONE 0x0 |
78 | |||
79 | #define SELFID_SPEED_UNKNOWN 0x3 /* 1394b PHY */ | ||
79 | 80 | ||
80 | #define PHYPACKET_LINKON 0x40000000 | 81 | #define PHYPACKET_LINKON 0x40000000 |
81 | #define PHYPACKET_PHYCONFIG_R 0x00800000 | 82 | #define PHYPACKET_PHYCONFIG_R 0x00800000 |
@@ -91,76 +92,76 @@ extern const char *hpsb_speedto_str[]; | |||
91 | 92 | ||
92 | #define EXTPHYPACKET_TYPEMASK 0xC0FC0000 | 93 | #define EXTPHYPACKET_TYPEMASK 0xC0FC0000 |
93 | 94 | ||
94 | #define PHYPACKET_PORT_SHIFT 24 | 95 | #define PHYPACKET_PORT_SHIFT 24 |
95 | #define PHYPACKET_GAPCOUNT_SHIFT 16 | 96 | #define PHYPACKET_GAPCOUNT_SHIFT 16 |
96 | 97 | ||
97 | /* 1394a PHY register map bitmasks */ | 98 | /* 1394a PHY register map bitmasks */ |
98 | #define PHY_00_PHYSICAL_ID 0xFC | 99 | #define PHY_00_PHYSICAL_ID 0xFC |
99 | #define PHY_00_R 0x02 /* Root */ | 100 | #define PHY_00_R 0x02 /* Root */ |
100 | #define PHY_00_PS 0x01 /* Power Status*/ | 101 | #define PHY_00_PS 0x01 /* Power Status*/ |
101 | #define PHY_01_RHB 0x80 /* Root Hold-Off */ | 102 | #define PHY_01_RHB 0x80 /* Root Hold-Off */ |
102 | #define PHY_01_IBR 0x80 /* Initiate Bus Reset */ | 103 | #define PHY_01_IBR 0x80 /* Initiate Bus Reset */ |
103 | #define PHY_01_GAP_COUNT 0x3F | 104 | #define PHY_01_GAP_COUNT 0x3F |
104 | #define PHY_02_EXTENDED 0xE0 /* 0x7 for 1394a-compliant PHY */ | 105 | #define PHY_02_EXTENDED 0xE0 /* 0x7 for 1394a-compliant PHY */ |
105 | #define PHY_02_TOTAL_PORTS 0x1F | 106 | #define PHY_02_TOTAL_PORTS 0x1F |
106 | #define PHY_03_MAX_SPEED 0xE0 | 107 | #define PHY_03_MAX_SPEED 0xE0 |
107 | #define PHY_03_DELAY 0x0F | 108 | #define PHY_03_DELAY 0x0F |
108 | #define PHY_04_LCTRL 0x80 /* Link Active Report Control */ | 109 | #define PHY_04_LCTRL 0x80 /* Link Active Report Control */ |
109 | #define PHY_04_CONTENDER 0x40 | 110 | #define PHY_04_CONTENDER 0x40 |
110 | #define PHY_04_JITTER 0x38 | 111 | #define PHY_04_JITTER 0x38 |
111 | #define PHY_04_PWR_CLASS 0x07 /* Power Class */ | 112 | #define PHY_04_PWR_CLASS 0x07 /* Power Class */ |
112 | #define PHY_05_WATCHDOG 0x80 | 113 | #define PHY_05_WATCHDOG 0x80 |
113 | #define PHY_05_ISBR 0x40 /* Initiate Short Bus Reset */ | 114 | #define PHY_05_ISBR 0x40 /* Initiate Short Bus Reset */ |
114 | #define PHY_05_LOOP 0x20 /* Loop Detect */ | 115 | #define PHY_05_LOOP 0x20 /* Loop Detect */ |
115 | #define PHY_05_PWR_FAIL 0x10 /* Cable Power Failure Detect */ | 116 | #define PHY_05_PWR_FAIL 0x10 /* Cable Power Failure Detect */ |
116 | #define PHY_05_TIMEOUT 0x08 /* Arbitration State Machine Timeout */ | 117 | #define PHY_05_TIMEOUT 0x08 /* Arbitration State Machine Timeout */ |
117 | #define PHY_05_PORT_EVENT 0x04 /* Port Event Detect */ | 118 | #define PHY_05_PORT_EVENT 0x04 /* Port Event Detect */ |
118 | #define PHY_05_ENAB_ACCEL 0x02 /* Enable Arbitration Acceleration */ | 119 | #define PHY_05_ENAB_ACCEL 0x02 /* Enable Arbitration Acceleration */ |
119 | #define PHY_05_ENAB_MULTI 0x01 /* Ena. Multispeed Packet Concatenation */ | 120 | #define PHY_05_ENAB_MULTI 0x01 /* Ena. Multispeed Packet Concatenation */ |
120 | 121 | ||
121 | #include <asm/byteorder.h> | 122 | #include <asm/byteorder.h> |
122 | 123 | ||
123 | #ifdef __BIG_ENDIAN_BITFIELD | 124 | #ifdef __BIG_ENDIAN_BITFIELD |
124 | 125 | ||
125 | struct selfid { | 126 | struct selfid { |
126 | u32 packet_identifier:2; /* always binary 10 */ | 127 | u32 packet_identifier:2; /* always binary 10 */ |
127 | u32 phy_id:6; | 128 | u32 phy_id:6; |
128 | /* byte */ | 129 | /* byte */ |
129 | u32 extended:1; /* if true is struct ext_selfid */ | 130 | u32 extended:1; /* if true is struct ext_selfid */ |
130 | u32 link_active:1; | 131 | u32 link_active:1; |
131 | u32 gap_count:6; | 132 | u32 gap_count:6; |
132 | /* byte */ | 133 | /* byte */ |
133 | u32 speed:2; | 134 | u32 speed:2; |
134 | u32 phy_delay:2; | 135 | u32 phy_delay:2; |
135 | u32 contender:1; | 136 | u32 contender:1; |
136 | u32 power_class:3; | 137 | u32 power_class:3; |
137 | /* byte */ | 138 | /* byte */ |
138 | u32 port0:2; | 139 | u32 port0:2; |
139 | u32 port1:2; | 140 | u32 port1:2; |
140 | u32 port2:2; | 141 | u32 port2:2; |
141 | u32 initiated_reset:1; | 142 | u32 initiated_reset:1; |
142 | u32 more_packets:1; | 143 | u32 more_packets:1; |
143 | } __attribute__((packed)); | 144 | } __attribute__((packed)); |
144 | 145 | ||
145 | struct ext_selfid { | 146 | struct ext_selfid { |
146 | u32 packet_identifier:2; /* always binary 10 */ | 147 | u32 packet_identifier:2; /* always binary 10 */ |
147 | u32 phy_id:6; | 148 | u32 phy_id:6; |
148 | /* byte */ | 149 | /* byte */ |
149 | u32 extended:1; /* if false is struct selfid */ | 150 | u32 extended:1; /* if false is struct selfid */ |
150 | u32 seq_nr:3; | 151 | u32 seq_nr:3; |
151 | u32 reserved:2; | 152 | u32 reserved:2; |
152 | u32 porta:2; | 153 | u32 porta:2; |
153 | /* byte */ | 154 | /* byte */ |
154 | u32 portb:2; | 155 | u32 portb:2; |
155 | u32 portc:2; | 156 | u32 portc:2; |
156 | u32 portd:2; | 157 | u32 portd:2; |
157 | u32 porte:2; | 158 | u32 porte:2; |
158 | /* byte */ | 159 | /* byte */ |
159 | u32 portf:2; | 160 | u32 portf:2; |
160 | u32 portg:2; | 161 | u32 portg:2; |
161 | u32 porth:2; | 162 | u32 porth:2; |
162 | u32 reserved2:1; | 163 | u32 reserved2:1; |
163 | u32 more_packets:1; | 164 | u32 more_packets:1; |
164 | } __attribute__((packed)); | 165 | } __attribute__((packed)); |
165 | 166 | ||
166 | #elif defined __LITTLE_ENDIAN_BITFIELD /* __BIG_ENDIAN_BITFIELD */ | 167 | #elif defined __LITTLE_ENDIAN_BITFIELD /* __BIG_ENDIAN_BITFIELD */ |
@@ -171,49 +172,48 @@ struct ext_selfid { | |||
171 | */ | 172 | */ |
172 | 173 | ||
173 | struct selfid { | 174 | struct selfid { |
174 | u32 phy_id:6; | 175 | u32 phy_id:6; |
175 | u32 packet_identifier:2; /* always binary 10 */ | 176 | u32 packet_identifier:2; /* always binary 10 */ |
176 | /* byte */ | 177 | /* byte */ |
177 | u32 gap_count:6; | 178 | u32 gap_count:6; |
178 | u32 link_active:1; | 179 | u32 link_active:1; |
179 | u32 extended:1; /* if true is struct ext_selfid */ | 180 | u32 extended:1; /* if true is struct ext_selfid */ |
180 | /* byte */ | 181 | /* byte */ |
181 | u32 power_class:3; | 182 | u32 power_class:3; |
182 | u32 contender:1; | 183 | u32 contender:1; |
183 | u32 phy_delay:2; | 184 | u32 phy_delay:2; |
184 | u32 speed:2; | 185 | u32 speed:2; |
185 | /* byte */ | 186 | /* byte */ |
186 | u32 more_packets:1; | 187 | u32 more_packets:1; |
187 | u32 initiated_reset:1; | 188 | u32 initiated_reset:1; |
188 | u32 port2:2; | 189 | u32 port2:2; |
189 | u32 port1:2; | 190 | u32 port1:2; |
190 | u32 port0:2; | 191 | u32 port0:2; |
191 | } __attribute__((packed)); | 192 | } __attribute__((packed)); |
192 | 193 | ||
193 | struct ext_selfid { | 194 | struct ext_selfid { |
194 | u32 phy_id:6; | 195 | u32 phy_id:6; |
195 | u32 packet_identifier:2; /* always binary 10 */ | 196 | u32 packet_identifier:2; /* always binary 10 */ |
196 | /* byte */ | 197 | /* byte */ |
197 | u32 porta:2; | 198 | u32 porta:2; |
198 | u32 reserved:2; | 199 | u32 reserved:2; |
199 | u32 seq_nr:3; | 200 | u32 seq_nr:3; |
200 | u32 extended:1; /* if false is struct selfid */ | 201 | u32 extended:1; /* if false is struct selfid */ |
201 | /* byte */ | 202 | /* byte */ |
202 | u32 porte:2; | 203 | u32 porte:2; |
203 | u32 portd:2; | 204 | u32 portd:2; |
204 | u32 portc:2; | 205 | u32 portc:2; |
205 | u32 portb:2; | 206 | u32 portb:2; |
206 | /* byte */ | 207 | /* byte */ |
207 | u32 more_packets:1; | 208 | u32 more_packets:1; |
208 | u32 reserved2:1; | 209 | u32 reserved2:1; |
209 | u32 porth:2; | 210 | u32 porth:2; |
210 | u32 portg:2; | 211 | u32 portg:2; |
211 | u32 portf:2; | 212 | u32 portf:2; |
212 | } __attribute__((packed)); | 213 | } __attribute__((packed)); |
213 | 214 | ||
214 | #else | 215 | #else |
215 | #error What? PDP endian? | 216 | #error What? PDP endian? |
216 | #endif /* __BIG_ENDIAN_BITFIELD */ | 217 | #endif /* __BIG_ENDIAN_BITFIELD */ |
217 | 218 | ||
218 | |||
219 | #endif /* _IEEE1394_IEEE1394_H */ | 219 | #endif /* _IEEE1394_IEEE1394_H */ |
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c index f43739c5cab2..5fccf9f7a1d2 100644 --- a/drivers/ieee1394/ieee1394_core.c +++ b/drivers/ieee1394/ieee1394_core.c | |||
@@ -35,7 +35,6 @@ | |||
35 | #include <linux/kthread.h> | 35 | #include <linux/kthread.h> |
36 | 36 | ||
37 | #include <asm/byteorder.h> | 37 | #include <asm/byteorder.h> |
38 | #include <asm/semaphore.h> | ||
39 | 38 | ||
40 | #include "ieee1394_types.h" | 39 | #include "ieee1394_types.h" |
41 | #include "ieee1394.h" | 40 | #include "ieee1394.h" |
@@ -86,7 +85,7 @@ static void dump_packet(const char *text, quadlet_t *data, int size, int speed) | |||
86 | printk("\n"); | 85 | printk("\n"); |
87 | } | 86 | } |
88 | #else | 87 | #else |
89 | #define dump_packet(a,b,c,d) | 88 | #define dump_packet(a,b,c,d) do {} while (0) |
90 | #endif | 89 | #endif |
91 | 90 | ||
92 | static void abort_requests(struct hpsb_host *host); | 91 | static void abort_requests(struct hpsb_host *host); |
@@ -355,10 +354,12 @@ static void build_speed_map(struct hpsb_host *host, int nodecount) | |||
355 | } | 354 | } |
356 | } | 355 | } |
357 | 356 | ||
357 | #if SELFID_SPEED_UNKNOWN != IEEE1394_SPEED_MAX | ||
358 | /* assume maximum speed for 1394b PHYs, nodemgr will correct it */ | 358 | /* assume maximum speed for 1394b PHYs, nodemgr will correct it */ |
359 | for (n = 0; n < nodecount; n++) | 359 | for (n = 0; n < nodecount; n++) |
360 | if (speedcap[n] == 3) | 360 | if (speedcap[n] == SELFID_SPEED_UNKNOWN) |
361 | speedcap[n] = IEEE1394_SPEED_MAX; | 361 | speedcap[n] = IEEE1394_SPEED_MAX; |
362 | #endif | ||
362 | } | 363 | } |
363 | 364 | ||
364 | 365 | ||
@@ -1169,7 +1170,7 @@ static void __exit ieee1394_cleanup(void) | |||
1169 | unregister_chrdev_region(IEEE1394_CORE_DEV, 256); | 1170 | unregister_chrdev_region(IEEE1394_CORE_DEV, 256); |
1170 | } | 1171 | } |
1171 | 1172 | ||
1172 | module_init(ieee1394_init); | 1173 | fs_initcall(ieee1394_init); /* same as ohci1394 */ |
1173 | module_exit(ieee1394_cleanup); | 1174 | module_exit(ieee1394_cleanup); |
1174 | 1175 | ||
1175 | /* Exported symbols */ | 1176 | /* Exported symbols */ |
diff --git a/drivers/ieee1394/ieee1394_core.h b/drivers/ieee1394/ieee1394_core.h index 0ecbf335c64f..af4a78a8ef3b 100644 --- a/drivers/ieee1394/ieee1394_core.h +++ b/drivers/ieee1394/ieee1394_core.h | |||
@@ -1,12 +1,15 @@ | |||
1 | |||
2 | #ifndef _IEEE1394_CORE_H | 1 | #ifndef _IEEE1394_CORE_H |
3 | #define _IEEE1394_CORE_H | 2 | #define _IEEE1394_CORE_H |
4 | 3 | ||
5 | #include <linux/slab.h> | 4 | #include <linux/device.h> |
5 | #include <linux/fs.h> | ||
6 | #include <linux/list.h> | ||
7 | #include <linux/skbuff.h> | ||
8 | #include <linux/types.h> | ||
6 | #include <asm/atomic.h> | 9 | #include <asm/atomic.h> |
7 | #include <asm/semaphore.h> | ||
8 | #include "hosts.h" | ||
9 | 10 | ||
11 | #include "hosts.h" | ||
12 | #include "ieee1394_types.h" | ||
10 | 13 | ||
11 | struct hpsb_packet { | 14 | struct hpsb_packet { |
12 | /* This struct is basically read-only for hosts with the exception of | 15 | /* This struct is basically read-only for hosts with the exception of |
@@ -58,7 +61,6 @@ struct hpsb_packet { | |||
58 | size_t header_size; | 61 | size_t header_size; |
59 | size_t data_size; | 62 | size_t data_size; |
60 | 63 | ||
61 | |||
62 | struct hpsb_host *host; | 64 | struct hpsb_host *host; |
63 | unsigned int generation; | 65 | unsigned int generation; |
64 | 66 | ||
@@ -80,7 +82,7 @@ struct hpsb_packet { | |||
80 | 82 | ||
81 | /* Set a task for when a packet completes */ | 83 | /* Set a task for when a packet completes */ |
82 | void hpsb_set_packet_complete_task(struct hpsb_packet *packet, | 84 | void hpsb_set_packet_complete_task(struct hpsb_packet *packet, |
83 | void (*routine)(void *), void *data); | 85 | void (*routine)(void *), void *data); |
84 | 86 | ||
85 | static inline struct hpsb_packet *driver_packet(struct list_head *l) | 87 | static inline struct hpsb_packet *driver_packet(struct list_head *l) |
86 | { | 88 | { |
@@ -92,7 +94,6 @@ void abort_timedouts(unsigned long __opaque); | |||
92 | struct hpsb_packet *hpsb_alloc_packet(size_t data_size); | 94 | struct hpsb_packet *hpsb_alloc_packet(size_t data_size); |
93 | void hpsb_free_packet(struct hpsb_packet *packet); | 95 | void hpsb_free_packet(struct hpsb_packet *packet); |
94 | 96 | ||
95 | |||
96 | /* | 97 | /* |
97 | * Generation counter for the complete 1394 subsystem. Generation gets | 98 | * Generation counter for the complete 1394 subsystem. Generation gets |
98 | * incremented on every change in the subsystem (e.g. bus reset). | 99 | * incremented on every change in the subsystem (e.g. bus reset). |
@@ -204,10 +205,14 @@ void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size, | |||
204 | #define IEEE1394_MINOR_BLOCK_EXPERIMENTAL 15 | 205 | #define IEEE1394_MINOR_BLOCK_EXPERIMENTAL 15 |
205 | 206 | ||
206 | #define IEEE1394_CORE_DEV MKDEV(IEEE1394_MAJOR, 0) | 207 | #define IEEE1394_CORE_DEV MKDEV(IEEE1394_MAJOR, 0) |
207 | #define IEEE1394_RAW1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16) | 208 | #define IEEE1394_RAW1394_DEV MKDEV(IEEE1394_MAJOR, \ |
208 | #define IEEE1394_VIDEO1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_VIDEO1394 * 16) | 209 | IEEE1394_MINOR_BLOCK_RAW1394 * 16) |
209 | #define IEEE1394_DV1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_DV1394 * 16) | 210 | #define IEEE1394_VIDEO1394_DEV MKDEV(IEEE1394_MAJOR, \ |
210 | #define IEEE1394_EXPERIMENTAL_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_EXPERIMENTAL * 16) | 211 | IEEE1394_MINOR_BLOCK_VIDEO1394 * 16) |
212 | #define IEEE1394_DV1394_DEV MKDEV(IEEE1394_MAJOR, \ | ||
213 | IEEE1394_MINOR_BLOCK_DV1394 * 16) | ||
214 | #define IEEE1394_EXPERIMENTAL_DEV MKDEV(IEEE1394_MAJOR, \ | ||
215 | IEEE1394_MINOR_BLOCK_EXPERIMENTAL * 16) | ||
211 | 216 | ||
212 | /* return the index (within a minor number block) of a file */ | 217 | /* return the index (within a minor number block) of a file */ |
213 | static inline unsigned char ieee1394_file_to_instance(struct file *file) | 218 | static inline unsigned char ieee1394_file_to_instance(struct file *file) |
@@ -223,4 +228,3 @@ extern struct class hpsb_host_class; | |||
223 | extern struct class *hpsb_protocol_class; | 228 | extern struct class *hpsb_protocol_class; |
224 | 229 | ||
225 | #endif /* _IEEE1394_CORE_H */ | 230 | #endif /* _IEEE1394_CORE_H */ |
226 | |||
diff --git a/drivers/ieee1394/ieee1394_hotplug.h b/drivers/ieee1394/ieee1394_hotplug.h index 5be70d31b007..dd5500ed8322 100644 --- a/drivers/ieee1394/ieee1394_hotplug.h +++ b/drivers/ieee1394/ieee1394_hotplug.h | |||
@@ -1,33 +1,19 @@ | |||
1 | #ifndef _IEEE1394_HOTPLUG_H | 1 | #ifndef _IEEE1394_HOTPLUG_H |
2 | #define _IEEE1394_HOTPLUG_H | 2 | #define _IEEE1394_HOTPLUG_H |
3 | 3 | ||
4 | #include <linux/kernel.h> | ||
5 | #include <linux/types.h> | ||
6 | #include <linux/mod_devicetable.h> | ||
7 | |||
8 | /* Unit spec id and sw version entry for some protocols */ | 4 | /* Unit spec id and sw version entry for some protocols */ |
9 | #define AVC_UNIT_SPEC_ID_ENTRY 0x0000A02D | 5 | #define AVC_UNIT_SPEC_ID_ENTRY 0x0000A02D |
10 | #define AVC_SW_VERSION_ENTRY 0x00010001 | 6 | #define AVC_SW_VERSION_ENTRY 0x00010001 |
11 | #define CAMERA_UNIT_SPEC_ID_ENTRY 0x0000A02D | 7 | #define CAMERA_UNIT_SPEC_ID_ENTRY 0x0000A02D |
12 | #define CAMERA_SW_VERSION_ENTRY 0x00000100 | 8 | #define CAMERA_SW_VERSION_ENTRY 0x00000100 |
13 | 9 | ||
14 | /* Check to make sure this all isn't already defined */ | 10 | /* /include/linux/mod_devicetable.h defines: |
15 | #ifndef IEEE1394_MATCH_VENDOR_ID | 11 | * IEEE1394_MATCH_VENDOR_ID |
16 | 12 | * IEEE1394_MATCH_MODEL_ID | |
17 | #define IEEE1394_MATCH_VENDOR_ID 0x0001 | 13 | * IEEE1394_MATCH_SPECIFIER_ID |
18 | #define IEEE1394_MATCH_MODEL_ID 0x0002 | 14 | * IEEE1394_MATCH_VERSION |
19 | #define IEEE1394_MATCH_SPECIFIER_ID 0x0004 | 15 | * struct ieee1394_device_id |
20 | #define IEEE1394_MATCH_VERSION 0x0008 | 16 | */ |
21 | 17 | #include <linux/mod_devicetable.h> | |
22 | struct ieee1394_device_id { | ||
23 | u32 match_flags; | ||
24 | u32 vendor_id; | ||
25 | u32 model_id; | ||
26 | u32 specifier_id; | ||
27 | u32 version; | ||
28 | void *driver_data; | ||
29 | }; | ||
30 | |||
31 | #endif | ||
32 | 18 | ||
33 | #endif /* _IEEE1394_HOTPLUG_H */ | 19 | #endif /* _IEEE1394_HOTPLUG_H */ |
diff --git a/drivers/ieee1394/ieee1394_transactions.c b/drivers/ieee1394/ieee1394_transactions.c index a114b91d606d..0833fc9f50c4 100644 --- a/drivers/ieee1394/ieee1394_transactions.c +++ b/drivers/ieee1394/ieee1394_transactions.c | |||
@@ -9,19 +9,17 @@ | |||
9 | * directory of the kernel sources for details. | 9 | * directory of the kernel sources for details. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/bitops.h> | 12 | #include <linux/bitops.h> |
14 | #include <linux/smp_lock.h> | 13 | #include <linux/spinlock.h> |
15 | #include <linux/interrupt.h> | 14 | #include <linux/wait.h> |
16 | 15 | ||
16 | #include <asm/bug.h> | ||
17 | #include <asm/errno.h> | 17 | #include <asm/errno.h> |
18 | 18 | ||
19 | #include "ieee1394.h" | 19 | #include "ieee1394.h" |
20 | #include "ieee1394_types.h" | 20 | #include "ieee1394_types.h" |
21 | #include "hosts.h" | 21 | #include "hosts.h" |
22 | #include "ieee1394_core.h" | 22 | #include "ieee1394_core.h" |
23 | #include "highlevel.h" | ||
24 | #include "nodemgr.h" | ||
25 | #include "ieee1394_transactions.h" | 23 | #include "ieee1394_transactions.h" |
26 | 24 | ||
27 | #define PREP_ASYNC_HEAD_ADDRESS(tc) \ | 25 | #define PREP_ASYNC_HEAD_ADDRESS(tc) \ |
@@ -31,6 +29,13 @@ | |||
31 | packet->header[1] = (packet->host->node_id << 16) | (addr >> 32); \ | 29 | packet->header[1] = (packet->host->node_id << 16) | (addr >> 32); \ |
32 | packet->header[2] = addr & 0xffffffff | 30 | packet->header[2] = addr & 0xffffffff |
33 | 31 | ||
32 | #ifndef HPSB_DEBUG_TLABELS | ||
33 | static | ||
34 | #endif | ||
35 | spinlock_t hpsb_tlabel_lock = SPIN_LOCK_UNLOCKED; | ||
36 | |||
37 | static DECLARE_WAIT_QUEUE_HEAD(tlabel_wq); | ||
38 | |||
34 | static void fill_async_readquad(struct hpsb_packet *packet, u64 addr) | 39 | static void fill_async_readquad(struct hpsb_packet *packet, u64 addr) |
35 | { | 40 | { |
36 | PREP_ASYNC_HEAD_ADDRESS(TCODE_READQ); | 41 | PREP_ASYNC_HEAD_ADDRESS(TCODE_READQ); |
@@ -114,9 +119,41 @@ static void fill_async_stream_packet(struct hpsb_packet *packet, int length, | |||
114 | packet->tcode = TCODE_ISO_DATA; | 119 | packet->tcode = TCODE_ISO_DATA; |
115 | } | 120 | } |
116 | 121 | ||
122 | /* same as hpsb_get_tlabel, except that it returns immediately */ | ||
123 | static int hpsb_get_tlabel_atomic(struct hpsb_packet *packet) | ||
124 | { | ||
125 | unsigned long flags, *tp; | ||
126 | u8 *next; | ||
127 | int tlabel, n = NODEID_TO_NODE(packet->node_id); | ||
128 | |||
129 | /* Broadcast transactions are complete once the request has been sent. | ||
130 | * Use the same transaction label for all broadcast transactions. */ | ||
131 | if (unlikely(n == ALL_NODES)) { | ||
132 | packet->tlabel = 0; | ||
133 | return 0; | ||
134 | } | ||
135 | tp = packet->host->tl_pool[n].map; | ||
136 | next = &packet->host->next_tl[n]; | ||
137 | |||
138 | spin_lock_irqsave(&hpsb_tlabel_lock, flags); | ||
139 | tlabel = find_next_zero_bit(tp, 64, *next); | ||
140 | if (tlabel > 63) | ||
141 | tlabel = find_first_zero_bit(tp, 64); | ||
142 | if (tlabel > 63) { | ||
143 | spin_unlock_irqrestore(&hpsb_tlabel_lock, flags); | ||
144 | return -EAGAIN; | ||
145 | } | ||
146 | __set_bit(tlabel, tp); | ||
147 | *next = (tlabel + 1) & 63; | ||
148 | spin_unlock_irqrestore(&hpsb_tlabel_lock, flags); | ||
149 | |||
150 | packet->tlabel = tlabel; | ||
151 | return 0; | ||
152 | } | ||
153 | |||
117 | /** | 154 | /** |
118 | * hpsb_get_tlabel - allocate a transaction label | 155 | * hpsb_get_tlabel - allocate a transaction label |
119 | * @packet: the packet who's tlabel/tpool we set | 156 | * @packet: the packet whose tlabel and tl_pool we set |
120 | * | 157 | * |
121 | * Every asynchronous transaction on the 1394 bus needs a transaction | 158 | * Every asynchronous transaction on the 1394 bus needs a transaction |
122 | * label to match the response to the request. This label has to be | 159 | * label to match the response to the request. This label has to be |
@@ -130,42 +167,25 @@ static void fill_async_stream_packet(struct hpsb_packet *packet, int length, | |||
130 | * Return value: Zero on success, otherwise non-zero. A non-zero return | 167 | * Return value: Zero on success, otherwise non-zero. A non-zero return |
131 | * generally means there are no available tlabels. If this is called out | 168 | * generally means there are no available tlabels. If this is called out |
132 | * of interrupt or atomic context, then it will sleep until can return a | 169 | * of interrupt or atomic context, then it will sleep until can return a |
133 | * tlabel. | 170 | * tlabel or a signal is received. |
134 | */ | 171 | */ |
135 | int hpsb_get_tlabel(struct hpsb_packet *packet) | 172 | int hpsb_get_tlabel(struct hpsb_packet *packet) |
136 | { | 173 | { |
137 | unsigned long flags; | 174 | if (irqs_disabled() || in_atomic()) |
138 | struct hpsb_tlabel_pool *tp; | 175 | return hpsb_get_tlabel_atomic(packet); |
139 | int n = NODEID_TO_NODE(packet->node_id); | 176 | |
140 | 177 | /* NB: The macro wait_event_interruptible() is called with a condition | |
141 | if (unlikely(n == ALL_NODES)) | 178 | * argument with side effect. This is only possible because the side |
142 | return 0; | 179 | * effect does not occur until the condition became true, and |
143 | tp = &packet->host->tpool[n]; | 180 | * wait_event_interruptible() won't evaluate the condition again after |
144 | 181 | * that. */ | |
145 | if (irqs_disabled() || in_atomic()) { | 182 | return wait_event_interruptible(tlabel_wq, |
146 | if (down_trylock(&tp->count)) | 183 | !hpsb_get_tlabel_atomic(packet)); |
147 | return 1; | ||
148 | } else { | ||
149 | down(&tp->count); | ||
150 | } | ||
151 | |||
152 | spin_lock_irqsave(&tp->lock, flags); | ||
153 | |||
154 | packet->tlabel = find_next_zero_bit(tp->pool, 64, tp->next); | ||
155 | if (packet->tlabel > 63) | ||
156 | packet->tlabel = find_first_zero_bit(tp->pool, 64); | ||
157 | tp->next = (packet->tlabel + 1) % 64; | ||
158 | /* Should _never_ happen */ | ||
159 | BUG_ON(test_and_set_bit(packet->tlabel, tp->pool)); | ||
160 | tp->allocations++; | ||
161 | spin_unlock_irqrestore(&tp->lock, flags); | ||
162 | |||
163 | return 0; | ||
164 | } | 184 | } |
165 | 185 | ||
166 | /** | 186 | /** |
167 | * hpsb_free_tlabel - free an allocated transaction label | 187 | * hpsb_free_tlabel - free an allocated transaction label |
168 | * @packet: packet whos tlabel/tpool needs to be cleared | 188 | * @packet: packet whose tlabel and tl_pool needs to be cleared |
169 | * | 189 | * |
170 | * Frees the transaction label allocated with hpsb_get_tlabel(). The | 190 | * Frees the transaction label allocated with hpsb_get_tlabel(). The |
171 | * tlabel has to be freed after the transaction is complete (i.e. response | 191 | * tlabel has to be freed after the transaction is complete (i.e. response |
@@ -176,21 +196,20 @@ int hpsb_get_tlabel(struct hpsb_packet *packet) | |||
176 | */ | 196 | */ |
177 | void hpsb_free_tlabel(struct hpsb_packet *packet) | 197 | void hpsb_free_tlabel(struct hpsb_packet *packet) |
178 | { | 198 | { |
179 | unsigned long flags; | 199 | unsigned long flags, *tp; |
180 | struct hpsb_tlabel_pool *tp; | 200 | int tlabel, n = NODEID_TO_NODE(packet->node_id); |
181 | int n = NODEID_TO_NODE(packet->node_id); | ||
182 | 201 | ||
183 | if (unlikely(n == ALL_NODES)) | 202 | if (unlikely(n == ALL_NODES)) |
184 | return; | 203 | return; |
185 | tp = &packet->host->tpool[n]; | 204 | tp = packet->host->tl_pool[n].map; |
205 | tlabel = packet->tlabel; | ||
206 | BUG_ON(tlabel > 63 || tlabel < 0); | ||
186 | 207 | ||
187 | BUG_ON(packet->tlabel > 63 || packet->tlabel < 0); | 208 | spin_lock_irqsave(&hpsb_tlabel_lock, flags); |
209 | BUG_ON(!__test_and_clear_bit(tlabel, tp)); | ||
210 | spin_unlock_irqrestore(&hpsb_tlabel_lock, flags); | ||
188 | 211 | ||
189 | spin_lock_irqsave(&tp->lock, flags); | 212 | wake_up_interruptible(&tlabel_wq); |
190 | BUG_ON(!test_and_clear_bit(packet->tlabel, tp->pool)); | ||
191 | spin_unlock_irqrestore(&tp->lock, flags); | ||
192 | |||
193 | up(&tp->count); | ||
194 | } | 213 | } |
195 | 214 | ||
196 | int hpsb_packet_success(struct hpsb_packet *packet) | 215 | int hpsb_packet_success(struct hpsb_packet *packet) |
@@ -214,7 +233,7 @@ int hpsb_packet_success(struct hpsb_packet *packet) | |||
214 | packet->node_id); | 233 | packet->node_id); |
215 | return -EAGAIN; | 234 | return -EAGAIN; |
216 | } | 235 | } |
217 | HPSB_PANIC("reached unreachable code 1 in %s", __FUNCTION__); | 236 | BUG(); |
218 | 237 | ||
219 | case ACK_BUSY_X: | 238 | case ACK_BUSY_X: |
220 | case ACK_BUSY_A: | 239 | case ACK_BUSY_A: |
@@ -261,8 +280,7 @@ int hpsb_packet_success(struct hpsb_packet *packet) | |||
261 | packet->ack_code, packet->node_id, packet->tcode); | 280 | packet->ack_code, packet->node_id, packet->tcode); |
262 | return -EAGAIN; | 281 | return -EAGAIN; |
263 | } | 282 | } |
264 | 283 | BUG(); | |
265 | HPSB_PANIC("reached unreachable code 2 in %s", __FUNCTION__); | ||
266 | } | 284 | } |
267 | 285 | ||
268 | struct hpsb_packet *hpsb_make_readpacket(struct hpsb_host *host, nodeid_t node, | 286 | struct hpsb_packet *hpsb_make_readpacket(struct hpsb_host *host, nodeid_t node, |
diff --git a/drivers/ieee1394/ieee1394_transactions.h b/drivers/ieee1394/ieee1394_transactions.h index 45ba784fe6da..c1369c41469b 100644 --- a/drivers/ieee1394/ieee1394_transactions.h +++ b/drivers/ieee1394/ieee1394_transactions.h | |||
@@ -1,32 +1,32 @@ | |||
1 | #ifndef _IEEE1394_TRANSACTIONS_H | 1 | #ifndef _IEEE1394_TRANSACTIONS_H |
2 | #define _IEEE1394_TRANSACTIONS_H | 2 | #define _IEEE1394_TRANSACTIONS_H |
3 | 3 | ||
4 | #include "ieee1394_core.h" | 4 | #include <linux/types.h> |
5 | 5 | ||
6 | #include "ieee1394_types.h" | ||
7 | |||
8 | struct hpsb_packet; | ||
9 | struct hpsb_host; | ||
6 | 10 | ||
7 | /* | ||
8 | * Get and free transaction labels. | ||
9 | */ | ||
10 | int hpsb_get_tlabel(struct hpsb_packet *packet); | 11 | int hpsb_get_tlabel(struct hpsb_packet *packet); |
11 | void hpsb_free_tlabel(struct hpsb_packet *packet); | 12 | void hpsb_free_tlabel(struct hpsb_packet *packet); |
12 | |||
13 | struct hpsb_packet *hpsb_make_readpacket(struct hpsb_host *host, nodeid_t node, | 13 | struct hpsb_packet *hpsb_make_readpacket(struct hpsb_host *host, nodeid_t node, |
14 | u64 addr, size_t length); | 14 | u64 addr, size_t length); |
15 | struct hpsb_packet *hpsb_make_lockpacket(struct hpsb_host *host, nodeid_t node, | 15 | struct hpsb_packet *hpsb_make_lockpacket(struct hpsb_host *host, nodeid_t node, |
16 | u64 addr, int extcode, quadlet_t *data, | 16 | u64 addr, int extcode, quadlet_t *data, |
17 | quadlet_t arg); | 17 | quadlet_t arg); |
18 | struct hpsb_packet *hpsb_make_lock64packet(struct hpsb_host *host, nodeid_t node, | 18 | struct hpsb_packet *hpsb_make_lock64packet(struct hpsb_host *host, |
19 | u64 addr, int extcode, octlet_t *data, | 19 | nodeid_t node, u64 addr, int extcode, |
20 | octlet_t arg); | 20 | octlet_t *data, octlet_t arg); |
21 | struct hpsb_packet *hpsb_make_phypacket(struct hpsb_host *host, | 21 | struct hpsb_packet *hpsb_make_phypacket(struct hpsb_host *host, quadlet_t data); |
22 | quadlet_t data) ; | 22 | struct hpsb_packet *hpsb_make_isopacket(struct hpsb_host *host, int length, |
23 | struct hpsb_packet *hpsb_make_isopacket(struct hpsb_host *host, | 23 | int channel, int tag, int sync); |
24 | int length, int channel, | 24 | struct hpsb_packet *hpsb_make_writepacket(struct hpsb_host *host, |
25 | int tag, int sync); | 25 | nodeid_t node, u64 addr, |
26 | struct hpsb_packet *hpsb_make_writepacket (struct hpsb_host *host, nodeid_t node, | 26 | quadlet_t *buffer, size_t length); |
27 | u64 addr, quadlet_t *buffer, size_t length); | ||
28 | struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 *buffer, | 27 | struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 *buffer, |
29 | int length, int channel, int tag, int sync); | 28 | int length, int channel, int tag, |
29 | int sync); | ||
30 | 30 | ||
31 | /* | 31 | /* |
32 | * hpsb_packet_success - Make sense of the ack and reply codes and | 32 | * hpsb_packet_success - Make sense of the ack and reply codes and |
@@ -40,9 +40,8 @@ struct hpsb_packet *hpsb_make_streampacket(struct hpsb_host *host, u8 *buffer, | |||
40 | */ | 40 | */ |
41 | int hpsb_packet_success(struct hpsb_packet *packet); | 41 | int hpsb_packet_success(struct hpsb_packet *packet); |
42 | 42 | ||
43 | |||
44 | /* | 43 | /* |
45 | * The generic read, write and lock functions. All recognize the local node ID | 44 | * The generic read and write functions. All recognize the local node ID |
46 | * and act accordingly. Read and write automatically use quadlet commands if | 45 | * and act accordingly. Read and write automatically use quadlet commands if |
47 | * length == 4 and and block commands otherwise (however, they do not yet | 46 | * length == 4 and and block commands otherwise (however, they do not yet |
48 | * support lengths that are not a multiple of 4). You must explicitly specifiy | 47 | * support lengths that are not a multiple of 4). You must explicitly specifiy |
@@ -54,4 +53,8 @@ int hpsb_read(struct hpsb_host *host, nodeid_t node, unsigned int generation, | |||
54 | int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation, | 53 | int hpsb_write(struct hpsb_host *host, nodeid_t node, unsigned int generation, |
55 | u64 addr, quadlet_t *buffer, size_t length); | 54 | u64 addr, quadlet_t *buffer, size_t length); |
56 | 55 | ||
56 | #ifdef HPSB_DEBUG_TLABELS | ||
57 | extern spinlock_t hpsb_tlabel_lock; | ||
58 | #endif | ||
59 | |||
57 | #endif /* _IEEE1394_TRANSACTIONS_H */ | 60 | #endif /* _IEEE1394_TRANSACTIONS_H */ |
diff --git a/drivers/ieee1394/ieee1394_types.h b/drivers/ieee1394/ieee1394_types.h index 3165609ec1ec..9803aaa15be0 100644 --- a/drivers/ieee1394/ieee1394_types.h +++ b/drivers/ieee1394/ieee1394_types.h | |||
@@ -1,37 +1,11 @@ | |||
1 | |||
2 | #ifndef _IEEE1394_TYPES_H | 1 | #ifndef _IEEE1394_TYPES_H |
3 | #define _IEEE1394_TYPES_H | 2 | #define _IEEE1394_TYPES_H |
4 | 3 | ||
5 | #include <linux/kernel.h> | 4 | #include <linux/kernel.h> |
6 | #include <linux/types.h> | ||
7 | #include <linux/list.h> | ||
8 | #include <linux/init.h> | ||
9 | #include <linux/spinlock.h> | ||
10 | #include <linux/string.h> | 5 | #include <linux/string.h> |
11 | 6 | #include <linux/types.h> | |
12 | #include <asm/semaphore.h> | ||
13 | #include <asm/byteorder.h> | 7 | #include <asm/byteorder.h> |
14 | 8 | ||
15 | |||
16 | /* Transaction Label handling */ | ||
17 | struct hpsb_tlabel_pool { | ||
18 | DECLARE_BITMAP(pool, 64); | ||
19 | spinlock_t lock; | ||
20 | u8 next; | ||
21 | u32 allocations; | ||
22 | struct semaphore count; | ||
23 | }; | ||
24 | |||
25 | #define HPSB_TPOOL_INIT(_tp) \ | ||
26 | do { \ | ||
27 | bitmap_zero((_tp)->pool, 64); \ | ||
28 | spin_lock_init(&(_tp)->lock); \ | ||
29 | (_tp)->next = 0; \ | ||
30 | (_tp)->allocations = 0; \ | ||
31 | sema_init(&(_tp)->count, 63); \ | ||
32 | } while (0) | ||
33 | |||
34 | |||
35 | typedef u32 quadlet_t; | 9 | typedef u32 quadlet_t; |
36 | typedef u64 octlet_t; | 10 | typedef u64 octlet_t; |
37 | typedef u16 nodeid_t; | 11 | typedef u16 nodeid_t; |
@@ -54,46 +28,40 @@ typedef u16 arm_length_t; | |||
54 | #define NODE_BUS_ARGS(__host, __nodeid) \ | 28 | #define NODE_BUS_ARGS(__host, __nodeid) \ |
55 | __host->id, NODEID_TO_NODE(__nodeid), NODEID_TO_BUS(__nodeid) | 29 | __host->id, NODEID_TO_NODE(__nodeid), NODEID_TO_BUS(__nodeid) |
56 | 30 | ||
57 | #define HPSB_PRINT(level, fmt, args...) printk(level "ieee1394: " fmt "\n" , ## args) | 31 | #define HPSB_PRINT(level, fmt, args...) \ |
32 | printk(level "ieee1394: " fmt "\n" , ## args) | ||
58 | 33 | ||
59 | #define HPSB_DEBUG(fmt, args...) HPSB_PRINT(KERN_DEBUG, fmt , ## args) | 34 | #define HPSB_DEBUG(fmt, args...) HPSB_PRINT(KERN_DEBUG, fmt , ## args) |
60 | #define HPSB_INFO(fmt, args...) HPSB_PRINT(KERN_INFO, fmt , ## args) | 35 | #define HPSB_INFO(fmt, args...) HPSB_PRINT(KERN_INFO, fmt , ## args) |
61 | #define HPSB_NOTICE(fmt, args...) HPSB_PRINT(KERN_NOTICE, fmt , ## args) | 36 | #define HPSB_NOTICE(fmt, args...) HPSB_PRINT(KERN_NOTICE, fmt , ## args) |
62 | #define HPSB_WARN(fmt, args...) HPSB_PRINT(KERN_WARNING, fmt , ## args) | 37 | #define HPSB_WARN(fmt, args...) HPSB_PRINT(KERN_WARNING, fmt , ## args) |
63 | #define HPSB_ERR(fmt, args...) HPSB_PRINT(KERN_ERR, fmt , ## args) | 38 | #define HPSB_ERR(fmt, args...) HPSB_PRINT(KERN_ERR, fmt , ## args) |
64 | 39 | ||
65 | #ifdef CONFIG_IEEE1394_VERBOSEDEBUG | 40 | #ifdef CONFIG_IEEE1394_VERBOSEDEBUG |
66 | #define HPSB_VERBOSE(fmt, args...) HPSB_PRINT(KERN_DEBUG, fmt , ## args) | 41 | #define HPSB_VERBOSE(fmt, args...) HPSB_PRINT(KERN_DEBUG, fmt , ## args) |
42 | #define HPSB_DEBUG_TLABELS | ||
67 | #else | 43 | #else |
68 | #define HPSB_VERBOSE(fmt, args...) | 44 | #define HPSB_VERBOSE(fmt, args...) do {} while (0) |
69 | #endif | 45 | #endif |
70 | 46 | ||
71 | #define HPSB_PANIC(fmt, args...) panic("ieee1394: " fmt "\n" , ## args) | ||
72 | |||
73 | #define HPSB_TRACE() HPSB_PRINT(KERN_INFO, "TRACE - %s, %s(), line %d", __FILE__, __FUNCTION__, __LINE__) | ||
74 | |||
75 | |||
76 | #ifdef __BIG_ENDIAN | 47 | #ifdef __BIG_ENDIAN |
77 | 48 | ||
78 | static __inline__ void *memcpy_le32(u32 *dest, const u32 *__src, size_t count) | 49 | static inline void *memcpy_le32(u32 *dest, const u32 *__src, size_t count) |
79 | { | 50 | { |
80 | void *tmp = dest; | 51 | void *tmp = dest; |
81 | u32 *src = (u32 *)__src; | 52 | u32 *src = (u32 *)__src; |
82 | 53 | ||
83 | count /= 4; | 54 | count /= 4; |
84 | 55 | while (count--) | |
85 | while (count--) { | 56 | *dest++ = swab32p(src++); |
86 | *dest++ = swab32p(src++); | 57 | return tmp; |
87 | } | ||
88 | |||
89 | return tmp; | ||
90 | } | 58 | } |
91 | 59 | ||
92 | #else | 60 | #else |
93 | 61 | ||
94 | static __inline__ void *memcpy_le32(u32 *dest, const u32 *src, size_t count) | 62 | static __inline__ void *memcpy_le32(u32 *dest, const u32 *src, size_t count) |
95 | { | 63 | { |
96 | return memcpy(dest, src, count); | 64 | return memcpy(dest, src, count); |
97 | } | 65 | } |
98 | 66 | ||
99 | #endif /* __BIG_ENDIAN */ | 67 | #endif /* __BIG_ENDIAN */ |
diff --git a/drivers/ieee1394/iso.c b/drivers/ieee1394/iso.c index f26680ebef7c..08bd15d2a7b6 100644 --- a/drivers/ieee1394/iso.c +++ b/drivers/ieee1394/iso.c | |||
@@ -9,8 +9,11 @@ | |||
9 | * directory of the kernel sources for details. | 9 | * directory of the kernel sources for details. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/slab.h> | 12 | #include <linux/pci.h> |
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/slab.h> | ||
15 | |||
16 | #include "hosts.h" | ||
14 | #include "iso.h" | 17 | #include "iso.h" |
15 | 18 | ||
16 | void hpsb_iso_stop(struct hpsb_iso *iso) | 19 | void hpsb_iso_stop(struct hpsb_iso *iso) |
diff --git a/drivers/ieee1394/iso.h b/drivers/ieee1394/iso.h index 3efc60b33a88..1210a97e8685 100644 --- a/drivers/ieee1394/iso.h +++ b/drivers/ieee1394/iso.h | |||
@@ -12,33 +12,40 @@ | |||
12 | #ifndef IEEE1394_ISO_H | 12 | #ifndef IEEE1394_ISO_H |
13 | #define IEEE1394_ISO_H | 13 | #define IEEE1394_ISO_H |
14 | 14 | ||
15 | #include "hosts.h" | 15 | #include <linux/spinlock_types.h> |
16 | #include <asm/atomic.h> | ||
17 | #include <asm/types.h> | ||
18 | |||
16 | #include "dma.h" | 19 | #include "dma.h" |
17 | 20 | ||
18 | /* high-level ISO interface */ | 21 | struct hpsb_host; |
19 | 22 | ||
20 | /* This API sends and receives isochronous packets on a large, | 23 | /* high-level ISO interface */ |
21 | virtually-contiguous kernel memory buffer. The buffer may be mapped | ||
22 | into a user-space process for zero-copy transmission and reception. | ||
23 | 24 | ||
24 | There are no explicit boundaries between packets in the buffer. A | 25 | /* |
25 | packet may be transmitted or received at any location. However, | 26 | * This API sends and receives isochronous packets on a large, |
26 | low-level drivers may impose certain restrictions on alignment or | 27 | * virtually-contiguous kernel memory buffer. The buffer may be mapped |
27 | size of packets. (e.g. in OHCI no packet may cross a page boundary, | 28 | * into a user-space process for zero-copy transmission and reception. |
28 | and packets should be quadlet-aligned) | 29 | * |
29 | */ | 30 | * There are no explicit boundaries between packets in the buffer. A |
31 | * packet may be transmitted or received at any location. However, | ||
32 | * low-level drivers may impose certain restrictions on alignment or | ||
33 | * size of packets. (e.g. in OHCI no packet may cross a page boundary, | ||
34 | * and packets should be quadlet-aligned) | ||
35 | */ | ||
30 | 36 | ||
31 | /* Packet descriptor - the API maintains a ring buffer of these packet | 37 | /* Packet descriptor - the API maintains a ring buffer of these packet |
32 | descriptors in kernel memory (hpsb_iso.infos[]). */ | 38 | * descriptors in kernel memory (hpsb_iso.infos[]). */ |
33 | |||
34 | struct hpsb_iso_packet_info { | 39 | struct hpsb_iso_packet_info { |
35 | /* offset of data payload relative to the first byte of the buffer */ | 40 | /* offset of data payload relative to the first byte of the buffer */ |
36 | __u32 offset; | 41 | __u32 offset; |
37 | 42 | ||
38 | /* length of the data payload, in bytes (not including the isochronous header) */ | 43 | /* length of the data payload, in bytes (not including the isochronous |
44 | * header) */ | ||
39 | __u16 len; | 45 | __u16 len; |
40 | 46 | ||
41 | /* (recv only) the cycle number (mod 8000) on which the packet was received */ | 47 | /* (recv only) the cycle number (mod 8000) on which the packet was |
48 | * received */ | ||
42 | __u16 cycle; | 49 | __u16 cycle; |
43 | 50 | ||
44 | /* (recv only) channel on which the packet was received */ | 51 | /* (recv only) channel on which the packet was received */ |
@@ -48,12 +55,10 @@ struct hpsb_iso_packet_info { | |||
48 | __u8 tag; | 55 | __u8 tag; |
49 | __u8 sy; | 56 | __u8 sy; |
50 | 57 | ||
51 | /* | 58 | /* length in bytes of the packet including header/trailer. |
52 | * length in bytes of the packet including header/trailer. | 59 | * MUST be at structure end, since the first part of this structure is |
53 | * MUST be at structure end, since the first part of this structure is also | 60 | * also defined in raw1394.h (i.e. struct raw1394_iso_packet_info), is |
54 | * defined in raw1394.h (i.e. struct raw1394_iso_packet_info), is copied to | 61 | * copied to userspace and is accessed there through libraw1394. */ |
55 | * userspace and is accessed there through libraw1394. | ||
56 | */ | ||
57 | __u16 total_len; | 62 | __u16 total_len; |
58 | }; | 63 | }; |
59 | 64 | ||
@@ -75,8 +80,8 @@ struct hpsb_iso { | |||
75 | void *hostdata; | 80 | void *hostdata; |
76 | 81 | ||
77 | /* a function to be called (from interrupt context) after | 82 | /* a function to be called (from interrupt context) after |
78 | outgoing packets have been sent, or incoming packets have | 83 | * outgoing packets have been sent, or incoming packets have |
79 | arrived */ | 84 | * arrived */ |
80 | void (*callback)(struct hpsb_iso*); | 85 | void (*callback)(struct hpsb_iso*); |
81 | 86 | ||
82 | /* wait for buffer space */ | 87 | /* wait for buffer space */ |
@@ -88,7 +93,7 @@ struct hpsb_iso { | |||
88 | 93 | ||
89 | 94 | ||
90 | /* greatest # of packets between interrupts - controls | 95 | /* greatest # of packets between interrupts - controls |
91 | the maximum latency of the buffer */ | 96 | * the maximum latency of the buffer */ |
92 | int irq_interval; | 97 | int irq_interval; |
93 | 98 | ||
94 | /* the buffer for packet data payloads */ | 99 | /* the buffer for packet data payloads */ |
@@ -112,8 +117,8 @@ struct hpsb_iso { | |||
112 | int pkt_dma; | 117 | int pkt_dma; |
113 | 118 | ||
114 | /* how many packets, starting at first_packet: | 119 | /* how many packets, starting at first_packet: |
115 | (transmit) are ready to be filled with data | 120 | * (transmit) are ready to be filled with data |
116 | (receive) contain received data */ | 121 | * (receive) contain received data */ |
117 | int n_ready_packets; | 122 | int n_ready_packets; |
118 | 123 | ||
119 | /* how many times the buffer has overflowed or underflowed */ | 124 | /* how many times the buffer has overflowed or underflowed */ |
@@ -134,7 +139,7 @@ struct hpsb_iso { | |||
134 | int start_cycle; | 139 | int start_cycle; |
135 | 140 | ||
136 | /* cycle at which next packet will be transmitted, | 141 | /* cycle at which next packet will be transmitted, |
137 | -1 if not known */ | 142 | * -1 if not known */ |
138 | int xmit_cycle; | 143 | int xmit_cycle; |
139 | 144 | ||
140 | /* ringbuffer of packet descriptors in regular kernel memory | 145 | /* ringbuffer of packet descriptors in regular kernel memory |
@@ -170,25 +175,30 @@ int hpsb_iso_recv_unlisten_channel(struct hpsb_iso *iso, unsigned char channel); | |||
170 | int hpsb_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask); | 175 | int hpsb_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask); |
171 | 176 | ||
172 | /* start/stop DMA */ | 177 | /* start/stop DMA */ |
173 | int hpsb_iso_xmit_start(struct hpsb_iso *iso, int start_on_cycle, int prebuffer); | 178 | int hpsb_iso_xmit_start(struct hpsb_iso *iso, int start_on_cycle, |
174 | int hpsb_iso_recv_start(struct hpsb_iso *iso, int start_on_cycle, int tag_mask, int sync); | 179 | int prebuffer); |
180 | int hpsb_iso_recv_start(struct hpsb_iso *iso, int start_on_cycle, | ||
181 | int tag_mask, int sync); | ||
175 | void hpsb_iso_stop(struct hpsb_iso *iso); | 182 | void hpsb_iso_stop(struct hpsb_iso *iso); |
176 | 183 | ||
177 | /* deallocate buffer and DMA context */ | 184 | /* deallocate buffer and DMA context */ |
178 | void hpsb_iso_shutdown(struct hpsb_iso *iso); | 185 | void hpsb_iso_shutdown(struct hpsb_iso *iso); |
179 | 186 | ||
180 | /* queue a packet for transmission. 'offset' is relative to the beginning of the | 187 | /* queue a packet for transmission. |
181 | DMA buffer, where the packet's data payload should already have been placed */ | 188 | * 'offset' is relative to the beginning of the DMA buffer, where the packet's |
182 | int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len, u8 tag, u8 sy); | 189 | * data payload should already have been placed. */ |
190 | int hpsb_iso_xmit_queue_packet(struct hpsb_iso *iso, u32 offset, u16 len, | ||
191 | u8 tag, u8 sy); | ||
183 | 192 | ||
184 | /* wait until all queued packets have been transmitted to the bus */ | 193 | /* wait until all queued packets have been transmitted to the bus */ |
185 | int hpsb_iso_xmit_sync(struct hpsb_iso *iso); | 194 | int hpsb_iso_xmit_sync(struct hpsb_iso *iso); |
186 | 195 | ||
187 | /* N packets have been read out of the buffer, re-use the buffer space */ | 196 | /* N packets have been read out of the buffer, re-use the buffer space */ |
188 | int hpsb_iso_recv_release_packets(struct hpsb_iso *recv, unsigned int n_packets); | 197 | int hpsb_iso_recv_release_packets(struct hpsb_iso *recv, |
198 | unsigned int n_packets); | ||
189 | 199 | ||
190 | /* check for arrival of new packets immediately (even if irq_interval | 200 | /* check for arrival of new packets immediately (even if irq_interval |
191 | has not yet been reached) */ | 201 | * has not yet been reached) */ |
192 | int hpsb_iso_recv_flush(struct hpsb_iso *iso); | 202 | int hpsb_iso_recv_flush(struct hpsb_iso *iso); |
193 | 203 | ||
194 | /* returns # of packets ready to send or receive */ | 204 | /* returns # of packets ready to send or receive */ |
@@ -197,14 +207,15 @@ int hpsb_iso_n_ready(struct hpsb_iso *iso); | |||
197 | /* the following are callbacks available to low-level drivers */ | 207 | /* the following are callbacks available to low-level drivers */ |
198 | 208 | ||
199 | /* call after a packet has been transmitted to the bus (interrupt context is OK) | 209 | /* call after a packet has been transmitted to the bus (interrupt context is OK) |
200 | 'cycle' is the _exact_ cycle the packet was sent on | 210 | * 'cycle' is the _exact_ cycle the packet was sent on |
201 | 'error' should be non-zero if some sort of error occurred when sending the packet | 211 | * 'error' should be non-zero if some sort of error occurred when sending the |
202 | */ | 212 | * packet */ |
203 | void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error); | 213 | void hpsb_iso_packet_sent(struct hpsb_iso *iso, int cycle, int error); |
204 | 214 | ||
205 | /* call after a packet has been received (interrupt context OK) */ | 215 | /* call after a packet has been received (interrupt context OK) */ |
206 | void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len, | 216 | void hpsb_iso_packet_received(struct hpsb_iso *iso, u32 offset, u16 len, |
207 | u16 total_len, u16 cycle, u8 channel, u8 tag, u8 sy); | 217 | u16 total_len, u16 cycle, u8 channel, u8 tag, |
218 | u8 sy); | ||
208 | 219 | ||
209 | /* call to wake waiting processes after buffer space has opened up. */ | 220 | /* call to wake waiting processes after buffer space has opened up. */ |
210 | void hpsb_iso_wake(struct hpsb_iso *iso); | 221 | void hpsb_iso_wake(struct hpsb_iso *iso); |
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c index d541b508a159..3e7974c57443 100644 --- a/drivers/ieee1394/nodemgr.c +++ b/drivers/ieee1394/nodemgr.c | |||
@@ -12,26 +12,23 @@ | |||
12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
13 | #include <linux/list.h> | 13 | #include <linux/list.h> |
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | #include <linux/smp_lock.h> | ||
16 | #include <linux/interrupt.h> | ||
17 | #include <linux/kmod.h> | ||
18 | #include <linux/completion.h> | ||
19 | #include <linux/delay.h> | 15 | #include <linux/delay.h> |
20 | #include <linux/pci.h> | 16 | #include <linux/kthread.h> |
21 | #include <linux/moduleparam.h> | 17 | #include <linux/moduleparam.h> |
22 | #include <asm/atomic.h> | 18 | #include <asm/atomic.h> |
23 | 19 | ||
24 | #include "ieee1394_types.h" | 20 | #include "csr.h" |
21 | #include "highlevel.h" | ||
22 | #include "hosts.h" | ||
25 | #include "ieee1394.h" | 23 | #include "ieee1394.h" |
26 | #include "ieee1394_core.h" | 24 | #include "ieee1394_core.h" |
27 | #include "hosts.h" | 25 | #include "ieee1394_hotplug.h" |
26 | #include "ieee1394_types.h" | ||
28 | #include "ieee1394_transactions.h" | 27 | #include "ieee1394_transactions.h" |
29 | #include "highlevel.h" | ||
30 | #include "csr.h" | ||
31 | #include "nodemgr.h" | 28 | #include "nodemgr.h" |
32 | 29 | ||
33 | static int ignore_drivers; | 30 | static int ignore_drivers; |
34 | module_param(ignore_drivers, int, 0444); | 31 | module_param(ignore_drivers, int, S_IRUGO | S_IWUSR); |
35 | MODULE_PARM_DESC(ignore_drivers, "Disable automatic probing for drivers."); | 32 | MODULE_PARM_DESC(ignore_drivers, "Disable automatic probing for drivers."); |
36 | 33 | ||
37 | struct nodemgr_csr_info { | 34 | struct nodemgr_csr_info { |
@@ -71,7 +68,7 @@ static int nodemgr_check_speed(struct nodemgr_csr_info *ci, u64 addr, | |||
71 | u8 i, *speed, old_speed, good_speed; | 68 | u8 i, *speed, old_speed, good_speed; |
72 | int ret; | 69 | int ret; |
73 | 70 | ||
74 | speed = ci->host->speed + NODEID_TO_NODE(ci->nodeid); | 71 | speed = &(ci->host->speed[NODEID_TO_NODE(ci->nodeid)]); |
75 | old_speed = *speed; | 72 | old_speed = *speed; |
76 | good_speed = IEEE1394_SPEED_MAX + 1; | 73 | good_speed = IEEE1394_SPEED_MAX + 1; |
77 | 74 | ||
@@ -161,16 +158,12 @@ static struct csr1212_bus_ops nodemgr_csr_ops = { | |||
161 | * but now we are much simpler because of the LDM. | 158 | * but now we are much simpler because of the LDM. |
162 | */ | 159 | */ |
163 | 160 | ||
164 | static DECLARE_MUTEX(nodemgr_serialize); | 161 | static DEFINE_MUTEX(nodemgr_serialize); |
165 | 162 | ||
166 | struct host_info { | 163 | struct host_info { |
167 | struct hpsb_host *host; | 164 | struct hpsb_host *host; |
168 | struct list_head list; | 165 | struct list_head list; |
169 | struct completion exited; | 166 | struct task_struct *thread; |
170 | struct semaphore reset_sem; | ||
171 | int pid; | ||
172 | char daemon_name[15]; | ||
173 | int kill_me; | ||
174 | }; | 167 | }; |
175 | 168 | ||
176 | static int nodemgr_bus_match(struct device * dev, struct device_driver * drv); | 169 | static int nodemgr_bus_match(struct device * dev, struct device_driver * drv); |
@@ -334,34 +327,44 @@ static ssize_t fw_show_ne_bus_options(struct device *dev, struct device_attribut | |||
334 | static DEVICE_ATTR(bus_options,S_IRUGO,fw_show_ne_bus_options,NULL); | 327 | static DEVICE_ATTR(bus_options,S_IRUGO,fw_show_ne_bus_options,NULL); |
335 | 328 | ||
336 | 329 | ||
337 | /* tlabels_free, tlabels_allocations, tlabels_mask are read non-atomically | 330 | #ifdef HPSB_DEBUG_TLABELS |
338 | * here, therefore displayed values may be occasionally wrong. */ | 331 | static ssize_t fw_show_ne_tlabels_free(struct device *dev, |
339 | static ssize_t fw_show_ne_tlabels_free(struct device *dev, struct device_attribute *attr, char *buf) | 332 | struct device_attribute *attr, char *buf) |
340 | { | 333 | { |
341 | struct node_entry *ne = container_of(dev, struct node_entry, device); | 334 | struct node_entry *ne = container_of(dev, struct node_entry, device); |
342 | return sprintf(buf, "%d\n", 64 - bitmap_weight(ne->tpool->pool, 64)); | 335 | unsigned long flags; |
343 | } | 336 | unsigned long *tp = ne->host->tl_pool[NODEID_TO_NODE(ne->nodeid)].map; |
344 | static DEVICE_ATTR(tlabels_free,S_IRUGO,fw_show_ne_tlabels_free,NULL); | 337 | int tf; |
345 | 338 | ||
339 | spin_lock_irqsave(&hpsb_tlabel_lock, flags); | ||
340 | tf = 64 - bitmap_weight(tp, 64); | ||
341 | spin_unlock_irqrestore(&hpsb_tlabel_lock, flags); | ||
346 | 342 | ||
347 | static ssize_t fw_show_ne_tlabels_allocations(struct device *dev, struct device_attribute *attr, char *buf) | 343 | return sprintf(buf, "%d\n", tf); |
348 | { | ||
349 | struct node_entry *ne = container_of(dev, struct node_entry, device); | ||
350 | return sprintf(buf, "%u\n", ne->tpool->allocations); | ||
351 | } | 344 | } |
352 | static DEVICE_ATTR(tlabels_allocations,S_IRUGO,fw_show_ne_tlabels_allocations,NULL); | 345 | static DEVICE_ATTR(tlabels_free,S_IRUGO,fw_show_ne_tlabels_free,NULL); |
353 | 346 | ||
354 | 347 | ||
355 | static ssize_t fw_show_ne_tlabels_mask(struct device *dev, struct device_attribute *attr, char *buf) | 348 | static ssize_t fw_show_ne_tlabels_mask(struct device *dev, |
349 | struct device_attribute *attr, char *buf) | ||
356 | { | 350 | { |
357 | struct node_entry *ne = container_of(dev, struct node_entry, device); | 351 | struct node_entry *ne = container_of(dev, struct node_entry, device); |
352 | unsigned long flags; | ||
353 | unsigned long *tp = ne->host->tl_pool[NODEID_TO_NODE(ne->nodeid)].map; | ||
354 | u64 tm; | ||
355 | |||
356 | spin_lock_irqsave(&hpsb_tlabel_lock, flags); | ||
358 | #if (BITS_PER_LONG <= 32) | 357 | #if (BITS_PER_LONG <= 32) |
359 | return sprintf(buf, "0x%08lx%08lx\n", ne->tpool->pool[0], ne->tpool->pool[1]); | 358 | tm = ((u64)tp[0] << 32) + tp[1]; |
360 | #else | 359 | #else |
361 | return sprintf(buf, "0x%016lx\n", ne->tpool->pool[0]); | 360 | tm = tp[0]; |
362 | #endif | 361 | #endif |
362 | spin_unlock_irqrestore(&hpsb_tlabel_lock, flags); | ||
363 | |||
364 | return sprintf(buf, "0x%016llx\n", tm); | ||
363 | } | 365 | } |
364 | static DEVICE_ATTR(tlabels_mask, S_IRUGO, fw_show_ne_tlabels_mask, NULL); | 366 | static DEVICE_ATTR(tlabels_mask, S_IRUGO, fw_show_ne_tlabels_mask, NULL); |
367 | #endif /* HPSB_DEBUG_TLABELS */ | ||
365 | 368 | ||
366 | 369 | ||
367 | static ssize_t fw_set_ignore_driver(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | 370 | static ssize_t fw_set_ignore_driver(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) |
@@ -408,26 +411,11 @@ static ssize_t fw_get_destroy_node(struct bus_type *bus, char *buf) | |||
408 | } | 411 | } |
409 | static BUS_ATTR(destroy_node, S_IWUSR | S_IRUGO, fw_get_destroy_node, fw_set_destroy_node); | 412 | static BUS_ATTR(destroy_node, S_IWUSR | S_IRUGO, fw_get_destroy_node, fw_set_destroy_node); |
410 | 413 | ||
411 | static int nodemgr_rescan_bus_thread(void *__unused) | ||
412 | { | ||
413 | /* No userlevel access needed */ | ||
414 | daemonize("kfwrescan"); | ||
415 | |||
416 | bus_rescan_devices(&ieee1394_bus_type); | ||
417 | |||
418 | return 0; | ||
419 | } | ||
420 | 414 | ||
421 | static ssize_t fw_set_rescan(struct bus_type *bus, const char *buf, size_t count) | 415 | static ssize_t fw_set_rescan(struct bus_type *bus, const char *buf, size_t count) |
422 | { | 416 | { |
423 | int state = simple_strtoul(buf, NULL, 10); | 417 | if (simple_strtoul(buf, NULL, 10) == 1) |
424 | 418 | bus_rescan_devices(&ieee1394_bus_type); | |
425 | /* Don't wait for this, or care about errors. Root could do | ||
426 | * something stupid and spawn this a lot of times, but that's | ||
427 | * root's fault. */ | ||
428 | if (state == 1) | ||
429 | kernel_thread(nodemgr_rescan_bus_thread, NULL, CLONE_KERNEL); | ||
430 | |||
431 | return count; | 419 | return count; |
432 | } | 420 | } |
433 | static ssize_t fw_get_rescan(struct bus_type *bus, char *buf) | 421 | static ssize_t fw_get_rescan(struct bus_type *bus, char *buf) |
@@ -483,9 +471,10 @@ static struct device_attribute *const fw_ne_attrs[] = { | |||
483 | &dev_attr_ne_vendor_id, | 471 | &dev_attr_ne_vendor_id, |
484 | &dev_attr_ne_nodeid, | 472 | &dev_attr_ne_nodeid, |
485 | &dev_attr_bus_options, | 473 | &dev_attr_bus_options, |
474 | #ifdef HPSB_DEBUG_TLABELS | ||
486 | &dev_attr_tlabels_free, | 475 | &dev_attr_tlabels_free, |
487 | &dev_attr_tlabels_allocations, | ||
488 | &dev_attr_tlabels_mask, | 476 | &dev_attr_tlabels_mask, |
477 | #endif | ||
489 | }; | 478 | }; |
490 | 479 | ||
491 | 480 | ||
@@ -804,8 +793,6 @@ static struct node_entry *nodemgr_create_node(octlet_t guid, struct csr1212_csr | |||
804 | if (!ne) | 793 | if (!ne) |
805 | return NULL; | 794 | return NULL; |
806 | 795 | ||
807 | ne->tpool = &host->tpool[nodeid & NODE_MASK]; | ||
808 | |||
809 | ne->host = host; | 796 | ne->host = host; |
810 | ne->nodeid = nodeid; | 797 | ne->nodeid = nodeid; |
811 | ne->generation = generation; | 798 | ne->generation = generation; |
@@ -1251,6 +1238,7 @@ static void nodemgr_node_scan_one(struct host_info *hi, | |||
1251 | octlet_t guid; | 1238 | octlet_t guid; |
1252 | struct csr1212_csr *csr; | 1239 | struct csr1212_csr *csr; |
1253 | struct nodemgr_csr_info *ci; | 1240 | struct nodemgr_csr_info *ci; |
1241 | u8 *speed; | ||
1254 | 1242 | ||
1255 | ci = kmalloc(sizeof(*ci), GFP_KERNEL); | 1243 | ci = kmalloc(sizeof(*ci), GFP_KERNEL); |
1256 | if (!ci) | 1244 | if (!ci) |
@@ -1259,8 +1247,12 @@ static void nodemgr_node_scan_one(struct host_info *hi, | |||
1259 | ci->host = host; | 1247 | ci->host = host; |
1260 | ci->nodeid = nodeid; | 1248 | ci->nodeid = nodeid; |
1261 | ci->generation = generation; | 1249 | ci->generation = generation; |
1262 | ci->speed_unverified = | 1250 | |
1263 | host->speed[NODEID_TO_NODE(nodeid)] > IEEE1394_SPEED_100; | 1251 | /* Prepare for speed probe which occurs when reading the ROM */ |
1252 | speed = &(host->speed[NODEID_TO_NODE(nodeid)]); | ||
1253 | if (*speed > host->csr.lnk_spd) | ||
1254 | *speed = host->csr.lnk_spd; | ||
1255 | ci->speed_unverified = *speed > IEEE1394_SPEED_100; | ||
1264 | 1256 | ||
1265 | /* We need to detect when the ConfigROM's generation has changed, | 1257 | /* We need to detect when the ConfigROM's generation has changed, |
1266 | * so we only update the node's info when it needs to be. */ | 1258 | * so we only update the node's info when it needs to be. */ |
@@ -1300,8 +1292,6 @@ static void nodemgr_node_scan_one(struct host_info *hi, | |||
1300 | nodemgr_create_node(guid, csr, hi, nodeid, generation); | 1292 | nodemgr_create_node(guid, csr, hi, nodeid, generation); |
1301 | else | 1293 | else |
1302 | nodemgr_update_node(ne, csr, hi, nodeid, generation); | 1294 | nodemgr_update_node(ne, csr, hi, nodeid, generation); |
1303 | |||
1304 | return; | ||
1305 | } | 1295 | } |
1306 | 1296 | ||
1307 | 1297 | ||
@@ -1326,6 +1316,7 @@ static void nodemgr_node_scan(struct host_info *hi, int generation) | |||
1326 | } | 1316 | } |
1327 | 1317 | ||
1328 | 1318 | ||
1319 | /* Caller needs to hold nodemgr_ud_class.subsys.rwsem as reader. */ | ||
1329 | static void nodemgr_suspend_ne(struct node_entry *ne) | 1320 | static void nodemgr_suspend_ne(struct node_entry *ne) |
1330 | { | 1321 | { |
1331 | struct class_device *cdev; | 1322 | struct class_device *cdev; |
@@ -1361,6 +1352,7 @@ static void nodemgr_resume_ne(struct node_entry *ne) | |||
1361 | ne->in_limbo = 0; | 1352 | ne->in_limbo = 0; |
1362 | device_remove_file(&ne->device, &dev_attr_ne_in_limbo); | 1353 | device_remove_file(&ne->device, &dev_attr_ne_in_limbo); |
1363 | 1354 | ||
1355 | down_read(&nodemgr_ud_class.subsys.rwsem); | ||
1364 | down_read(&ne->device.bus->subsys.rwsem); | 1356 | down_read(&ne->device.bus->subsys.rwsem); |
1365 | list_for_each_entry(cdev, &nodemgr_ud_class.children, node) { | 1357 | list_for_each_entry(cdev, &nodemgr_ud_class.children, node) { |
1366 | ud = container_of(cdev, struct unit_directory, class_dev); | 1358 | ud = container_of(cdev, struct unit_directory, class_dev); |
@@ -1372,21 +1364,21 @@ static void nodemgr_resume_ne(struct node_entry *ne) | |||
1372 | ud->device.driver->resume(&ud->device); | 1364 | ud->device.driver->resume(&ud->device); |
1373 | } | 1365 | } |
1374 | up_read(&ne->device.bus->subsys.rwsem); | 1366 | up_read(&ne->device.bus->subsys.rwsem); |
1367 | up_read(&nodemgr_ud_class.subsys.rwsem); | ||
1375 | 1368 | ||
1376 | HPSB_DEBUG("Node resumed: ID:BUS[" NODE_BUS_FMT "] GUID[%016Lx]", | 1369 | HPSB_DEBUG("Node resumed: ID:BUS[" NODE_BUS_FMT "] GUID[%016Lx]", |
1377 | NODE_BUS_ARGS(ne->host, ne->nodeid), (unsigned long long)ne->guid); | 1370 | NODE_BUS_ARGS(ne->host, ne->nodeid), (unsigned long long)ne->guid); |
1378 | } | 1371 | } |
1379 | 1372 | ||
1380 | 1373 | ||
1374 | /* Caller needs to hold nodemgr_ud_class.subsys.rwsem as reader. */ | ||
1381 | static void nodemgr_update_pdrv(struct node_entry *ne) | 1375 | static void nodemgr_update_pdrv(struct node_entry *ne) |
1382 | { | 1376 | { |
1383 | struct unit_directory *ud; | 1377 | struct unit_directory *ud; |
1384 | struct hpsb_protocol_driver *pdrv; | 1378 | struct hpsb_protocol_driver *pdrv; |
1385 | struct class *class = &nodemgr_ud_class; | ||
1386 | struct class_device *cdev; | 1379 | struct class_device *cdev; |
1387 | 1380 | ||
1388 | down_read(&class->subsys.rwsem); | 1381 | list_for_each_entry(cdev, &nodemgr_ud_class.children, node) { |
1389 | list_for_each_entry(cdev, &class->children, node) { | ||
1390 | ud = container_of(cdev, struct unit_directory, class_dev); | 1382 | ud = container_of(cdev, struct unit_directory, class_dev); |
1391 | if (ud->ne != ne || !ud->device.driver) | 1383 | if (ud->ne != ne || !ud->device.driver) |
1392 | continue; | 1384 | continue; |
@@ -1399,7 +1391,6 @@ static void nodemgr_update_pdrv(struct node_entry *ne) | |||
1399 | up_write(&ud->device.bus->subsys.rwsem); | 1391 | up_write(&ud->device.bus->subsys.rwsem); |
1400 | } | 1392 | } |
1401 | } | 1393 | } |
1402 | up_read(&class->subsys.rwsem); | ||
1403 | } | 1394 | } |
1404 | 1395 | ||
1405 | 1396 | ||
@@ -1430,6 +1421,8 @@ static void nodemgr_irm_write_bc(struct node_entry *ne, int generation) | |||
1430 | } | 1421 | } |
1431 | 1422 | ||
1432 | 1423 | ||
1424 | /* Caller needs to hold nodemgr_ud_class.subsys.rwsem as reader because the | ||
1425 | * calls to nodemgr_update_pdrv() and nodemgr_suspend_ne() here require it. */ | ||
1433 | static void nodemgr_probe_ne(struct host_info *hi, struct node_entry *ne, int generation) | 1426 | static void nodemgr_probe_ne(struct host_info *hi, struct node_entry *ne, int generation) |
1434 | { | 1427 | { |
1435 | struct device *dev; | 1428 | struct device *dev; |
@@ -1492,9 +1485,8 @@ static void nodemgr_node_probe(struct host_info *hi, int generation) | |||
1492 | /* If we had a bus reset while we were scanning the bus, it is | 1485 | /* If we had a bus reset while we were scanning the bus, it is |
1493 | * possible that we did not probe all nodes. In that case, we | 1486 | * possible that we did not probe all nodes. In that case, we |
1494 | * skip the clean up for now, since we could remove nodes that | 1487 | * skip the clean up for now, since we could remove nodes that |
1495 | * were still on the bus. The bus reset increased hi->reset_sem, | 1488 | * were still on the bus. Another bus scan is pending which will |
1496 | * so there's a bus scan pending which will do the clean up | 1489 | * do the clean up eventually. |
1497 | * eventually. | ||
1498 | * | 1490 | * |
1499 | * Now let's tell the bus to rescan our devices. This may seem | 1491 | * Now let's tell the bus to rescan our devices. This may seem |
1500 | * like overhead, but the driver-model core will only scan a | 1492 | * like overhead, but the driver-model core will only scan a |
@@ -1622,41 +1614,37 @@ static int nodemgr_host_thread(void *__hi) | |||
1622 | { | 1614 | { |
1623 | struct host_info *hi = (struct host_info *)__hi; | 1615 | struct host_info *hi = (struct host_info *)__hi; |
1624 | struct hpsb_host *host = hi->host; | 1616 | struct hpsb_host *host = hi->host; |
1625 | int reset_cycles = 0; | 1617 | unsigned int g, generation = get_hpsb_generation(host) - 1; |
1626 | 1618 | int i, reset_cycles = 0; | |
1627 | /* No userlevel access needed */ | ||
1628 | daemonize(hi->daemon_name); | ||
1629 | 1619 | ||
1630 | /* Setup our device-model entries */ | 1620 | /* Setup our device-model entries */ |
1631 | nodemgr_create_host_dev_files(host); | 1621 | nodemgr_create_host_dev_files(host); |
1632 | 1622 | ||
1633 | /* Sit and wait for a signal to probe the nodes on the bus. This | 1623 | for (;;) { |
1634 | * happens when we get a bus reset. */ | 1624 | /* Sleep until next bus reset */ |
1635 | while (1) { | 1625 | set_current_state(TASK_INTERRUPTIBLE); |
1636 | unsigned int generation = 0; | 1626 | if (get_hpsb_generation(host) == generation) |
1637 | int i; | 1627 | schedule(); |
1628 | __set_current_state(TASK_RUNNING); | ||
1629 | |||
1630 | /* Thread may have been woken up to freeze or to exit */ | ||
1631 | if (try_to_freeze()) | ||
1632 | continue; | ||
1633 | if (kthread_should_stop()) | ||
1634 | goto exit; | ||
1638 | 1635 | ||
1639 | if (down_interruptible(&hi->reset_sem) || | 1636 | if (mutex_lock_interruptible(&nodemgr_serialize)) { |
1640 | down_interruptible(&nodemgr_serialize)) { | ||
1641 | if (try_to_freeze()) | 1637 | if (try_to_freeze()) |
1642 | continue; | 1638 | continue; |
1643 | printk("NodeMgr: received unexpected signal?!\n" ); | 1639 | goto exit; |
1644 | break; | ||
1645 | } | ||
1646 | |||
1647 | if (hi->kill_me) { | ||
1648 | up(&nodemgr_serialize); | ||
1649 | break; | ||
1650 | } | 1640 | } |
1651 | 1641 | ||
1652 | /* Pause for 1/4 second in 1/16 second intervals, | 1642 | /* Pause for 1/4 second in 1/16 second intervals, |
1653 | * to make sure things settle down. */ | 1643 | * to make sure things settle down. */ |
1644 | g = get_hpsb_generation(host); | ||
1654 | for (i = 0; i < 4 ; i++) { | 1645 | for (i = 0; i < 4 ; i++) { |
1655 | set_current_state(TASK_INTERRUPTIBLE); | 1646 | if (msleep_interruptible(63) || kthread_should_stop()) |
1656 | if (msleep_interruptible(63)) { | 1647 | goto unlock_exit; |
1657 | up(&nodemgr_serialize); | ||
1658 | goto caught_signal; | ||
1659 | } | ||
1660 | 1648 | ||
1661 | /* Now get the generation in which the node ID's we collect | 1649 | /* Now get the generation in which the node ID's we collect |
1662 | * are valid. During the bus scan we will use this generation | 1650 | * are valid. During the bus scan we will use this generation |
@@ -1667,20 +1655,14 @@ static int nodemgr_host_thread(void *__hi) | |||
1667 | 1655 | ||
1668 | /* If we get a reset before we are done waiting, then | 1656 | /* If we get a reset before we are done waiting, then |
1669 | * start the the waiting over again */ | 1657 | * start the the waiting over again */ |
1670 | while (!down_trylock(&hi->reset_sem)) | 1658 | if (generation != g) |
1671 | i = 0; | 1659 | g = generation, i = 0; |
1672 | |||
1673 | /* Check the kill_me again */ | ||
1674 | if (hi->kill_me) { | ||
1675 | up(&nodemgr_serialize); | ||
1676 | goto caught_signal; | ||
1677 | } | ||
1678 | } | 1660 | } |
1679 | 1661 | ||
1680 | if (!nodemgr_check_irm_capability(host, reset_cycles) || | 1662 | if (!nodemgr_check_irm_capability(host, reset_cycles) || |
1681 | !nodemgr_do_irm_duties(host, reset_cycles)) { | 1663 | !nodemgr_do_irm_duties(host, reset_cycles)) { |
1682 | reset_cycles++; | 1664 | reset_cycles++; |
1683 | up(&nodemgr_serialize); | 1665 | mutex_unlock(&nodemgr_serialize); |
1684 | continue; | 1666 | continue; |
1685 | } | 1667 | } |
1686 | reset_cycles = 0; | 1668 | reset_cycles = 0; |
@@ -1698,13 +1680,13 @@ static int nodemgr_host_thread(void *__hi) | |||
1698 | /* Update some of our sysfs symlinks */ | 1680 | /* Update some of our sysfs symlinks */ |
1699 | nodemgr_update_host_dev_links(host); | 1681 | nodemgr_update_host_dev_links(host); |
1700 | 1682 | ||
1701 | up(&nodemgr_serialize); | 1683 | mutex_unlock(&nodemgr_serialize); |
1702 | } | 1684 | } |
1703 | 1685 | unlock_exit: | |
1704 | caught_signal: | 1686 | mutex_unlock(&nodemgr_serialize); |
1687 | exit: | ||
1705 | HPSB_VERBOSE("NodeMgr: Exiting thread"); | 1688 | HPSB_VERBOSE("NodeMgr: Exiting thread"); |
1706 | 1689 | return 0; | |
1707 | complete_and_exit(&hi->exited, 0); | ||
1708 | } | 1690 | } |
1709 | 1691 | ||
1710 | int nodemgr_for_each_host(void *__data, int (*cb)(struct hpsb_host *, void *)) | 1692 | int nodemgr_for_each_host(void *__data, int (*cb)(struct hpsb_host *, void *)) |
@@ -1764,41 +1746,27 @@ static void nodemgr_add_host(struct hpsb_host *host) | |||
1764 | struct host_info *hi; | 1746 | struct host_info *hi; |
1765 | 1747 | ||
1766 | hi = hpsb_create_hostinfo(&nodemgr_highlevel, host, sizeof(*hi)); | 1748 | hi = hpsb_create_hostinfo(&nodemgr_highlevel, host, sizeof(*hi)); |
1767 | |||
1768 | if (!hi) { | 1749 | if (!hi) { |
1769 | HPSB_ERR ("NodeMgr: out of memory in add host"); | 1750 | HPSB_ERR("NodeMgr: out of memory in add host"); |
1770 | return; | 1751 | return; |
1771 | } | 1752 | } |
1772 | |||
1773 | hi->host = host; | 1753 | hi->host = host; |
1774 | init_completion(&hi->exited); | 1754 | hi->thread = kthread_run(nodemgr_host_thread, hi, "knodemgrd_%d", |
1775 | sema_init(&hi->reset_sem, 0); | 1755 | host->id); |
1776 | 1756 | if (IS_ERR(hi->thread)) { | |
1777 | sprintf(hi->daemon_name, "knodemgrd_%d", host->id); | 1757 | HPSB_ERR("NodeMgr: cannot start thread for host %d", host->id); |
1778 | |||
1779 | hi->pid = kernel_thread(nodemgr_host_thread, hi, CLONE_KERNEL); | ||
1780 | |||
1781 | if (hi->pid < 0) { | ||
1782 | HPSB_ERR ("NodeMgr: failed to start %s thread for %s", | ||
1783 | hi->daemon_name, host->driver->name); | ||
1784 | hpsb_destroy_hostinfo(&nodemgr_highlevel, host); | 1758 | hpsb_destroy_hostinfo(&nodemgr_highlevel, host); |
1785 | return; | ||
1786 | } | 1759 | } |
1787 | |||
1788 | return; | ||
1789 | } | 1760 | } |
1790 | 1761 | ||
1791 | static void nodemgr_host_reset(struct hpsb_host *host) | 1762 | static void nodemgr_host_reset(struct hpsb_host *host) |
1792 | { | 1763 | { |
1793 | struct host_info *hi = hpsb_get_hostinfo(&nodemgr_highlevel, host); | 1764 | struct host_info *hi = hpsb_get_hostinfo(&nodemgr_highlevel, host); |
1794 | 1765 | ||
1795 | if (hi != NULL) { | 1766 | if (hi) { |
1796 | HPSB_VERBOSE("NodeMgr: Processing host reset for %s", hi->daemon_name); | 1767 | HPSB_VERBOSE("NodeMgr: Processing reset for host %d", host->id); |
1797 | up(&hi->reset_sem); | 1768 | wake_up_process(hi->thread); |
1798 | } else | 1769 | } |
1799 | HPSB_ERR ("NodeMgr: could not process reset of unused host"); | ||
1800 | |||
1801 | return; | ||
1802 | } | 1770 | } |
1803 | 1771 | ||
1804 | static void nodemgr_remove_host(struct hpsb_host *host) | 1772 | static void nodemgr_remove_host(struct hpsb_host *host) |
@@ -1806,18 +1774,9 @@ static void nodemgr_remove_host(struct hpsb_host *host) | |||
1806 | struct host_info *hi = hpsb_get_hostinfo(&nodemgr_highlevel, host); | 1774 | struct host_info *hi = hpsb_get_hostinfo(&nodemgr_highlevel, host); |
1807 | 1775 | ||
1808 | if (hi) { | 1776 | if (hi) { |
1809 | if (hi->pid >= 0) { | 1777 | kthread_stop(hi->thread); |
1810 | hi->kill_me = 1; | 1778 | nodemgr_remove_host_dev(&host->device); |
1811 | mb(); | 1779 | } |
1812 | up(&hi->reset_sem); | ||
1813 | wait_for_completion(&hi->exited); | ||
1814 | nodemgr_remove_host_dev(&host->device); | ||
1815 | } | ||
1816 | } else | ||
1817 | HPSB_ERR("NodeMgr: host %s does not exist, cannot remove", | ||
1818 | host->driver->name); | ||
1819 | |||
1820 | return; | ||
1821 | } | 1780 | } |
1822 | 1781 | ||
1823 | static struct hpsb_highlevel nodemgr_highlevel = { | 1782 | static struct hpsb_highlevel nodemgr_highlevel = { |
diff --git a/drivers/ieee1394/nodemgr.h b/drivers/ieee1394/nodemgr.h index 0b26616e16c3..0e1e7d930783 100644 --- a/drivers/ieee1394/nodemgr.h +++ b/drivers/ieee1394/nodemgr.h | |||
@@ -21,9 +21,15 @@ | |||
21 | #define _IEEE1394_NODEMGR_H | 21 | #define _IEEE1394_NODEMGR_H |
22 | 22 | ||
23 | #include <linux/device.h> | 23 | #include <linux/device.h> |
24 | #include "csr1212.h" | 24 | #include <asm/types.h> |
25 | |||
25 | #include "ieee1394_core.h" | 26 | #include "ieee1394_core.h" |
26 | #include "ieee1394_hotplug.h" | 27 | #include "ieee1394_types.h" |
28 | |||
29 | struct csr1212_csr; | ||
30 | struct csr1212_keyval; | ||
31 | struct hpsb_host; | ||
32 | struct ieee1394_device_id; | ||
27 | 33 | ||
28 | /* '1' '3' '9' '4' in ASCII */ | 34 | /* '1' '3' '9' '4' in ASCII */ |
29 | #define IEEE1394_BUSID_MAGIC __constant_cpu_to_be32(0x31333934) | 35 | #define IEEE1394_BUSID_MAGIC __constant_cpu_to_be32(0x31333934) |
@@ -44,7 +50,6 @@ struct bus_options { | |||
44 | u16 max_rec; /* Maximum packet size node can receive */ | 50 | u16 max_rec; /* Maximum packet size node can receive */ |
45 | }; | 51 | }; |
46 | 52 | ||
47 | |||
48 | #define UNIT_DIRECTORY_VENDOR_ID 0x01 | 53 | #define UNIT_DIRECTORY_VENDOR_ID 0x01 |
49 | #define UNIT_DIRECTORY_MODEL_ID 0x02 | 54 | #define UNIT_DIRECTORY_MODEL_ID 0x02 |
50 | #define UNIT_DIRECTORY_SPECIFIER_ID 0x04 | 55 | #define UNIT_DIRECTORY_SPECIFIER_ID 0x04 |
@@ -59,8 +64,8 @@ struct bus_options { | |||
59 | * unit directory for each of these protocols. | 64 | * unit directory for each of these protocols. |
60 | */ | 65 | */ |
61 | struct unit_directory { | 66 | struct unit_directory { |
62 | struct node_entry *ne; /* The node which this directory belongs to */ | 67 | struct node_entry *ne; /* The node which this directory belongs to */ |
63 | octlet_t address; /* Address of the unit directory on the node */ | 68 | octlet_t address; /* Address of the unit directory on the node */ |
64 | u8 flags; /* Indicates which entries were read */ | 69 | u8 flags; /* Indicates which entries were read */ |
65 | 70 | ||
66 | quadlet_t vendor_id; | 71 | quadlet_t vendor_id; |
@@ -79,11 +84,10 @@ struct unit_directory { | |||
79 | int length; /* Number of quadlets */ | 84 | int length; /* Number of quadlets */ |
80 | 85 | ||
81 | struct device device; | 86 | struct device device; |
82 | |||
83 | struct class_device class_dev; | 87 | struct class_device class_dev; |
84 | 88 | ||
85 | struct csr1212_keyval *ud_kv; | 89 | struct csr1212_keyval *ud_kv; |
86 | u32 lun; /* logical unit number immediate value */ | 90 | u32 lun; /* logical unit number immediate value */ |
87 | }; | 91 | }; |
88 | 92 | ||
89 | struct node_entry { | 93 | struct node_entry { |
@@ -103,10 +107,8 @@ struct node_entry { | |||
103 | const char *vendor_oui; | 107 | const char *vendor_oui; |
104 | 108 | ||
105 | u32 capabilities; | 109 | u32 capabilities; |
106 | struct hpsb_tlabel_pool *tpool; | ||
107 | 110 | ||
108 | struct device device; | 111 | struct device device; |
109 | |||
110 | struct class_device class_dev; | 112 | struct class_device class_dev; |
111 | 113 | ||
112 | /* Means this node is not attached anymore */ | 114 | /* Means this node is not attached anymore */ |
@@ -153,8 +155,8 @@ static inline int hpsb_node_entry_valid(struct node_entry *ne) | |||
153 | /* | 155 | /* |
154 | * This will fill in the given, pre-initialised hpsb_packet with the current | 156 | * This will fill in the given, pre-initialised hpsb_packet with the current |
155 | * information from the node entry (host, node ID, generation number). It will | 157 | * information from the node entry (host, node ID, generation number). It will |
156 | * return false if the node owning the GUID is not accessible (and not modify the | 158 | * return false if the node owning the GUID is not accessible (and not modify |
157 | * hpsb_packet) and return true otherwise. | 159 | * the hpsb_packet) and return true otherwise. |
158 | * | 160 | * |
159 | * Note that packet sending may still fail in hpsb_send_packet if a bus reset | 161 | * Note that packet sending may still fail in hpsb_send_packet if a bus reset |
160 | * happens while you are trying to set up the packet (due to obsolete generation | 162 | * happens while you are trying to set up the packet (due to obsolete generation |
@@ -170,16 +172,13 @@ int hpsb_node_write(struct node_entry *ne, u64 addr, | |||
170 | int hpsb_node_lock(struct node_entry *ne, u64 addr, | 172 | int hpsb_node_lock(struct node_entry *ne, u64 addr, |
171 | int extcode, quadlet_t *data, quadlet_t arg); | 173 | int extcode, quadlet_t *data, quadlet_t arg); |
172 | 174 | ||
173 | |||
174 | /* Iterate the hosts, calling a given function with supplied data for each | 175 | /* Iterate the hosts, calling a given function with supplied data for each |
175 | * host. */ | 176 | * host. */ |
176 | int nodemgr_for_each_host(void *__data, int (*cb)(struct hpsb_host *, void *)); | 177 | int nodemgr_for_each_host(void *__data, int (*cb)(struct hpsb_host *, void *)); |
177 | 178 | ||
178 | |||
179 | int init_ieee1394_nodemgr(void); | 179 | int init_ieee1394_nodemgr(void); |
180 | void cleanup_ieee1394_nodemgr(void); | 180 | void cleanup_ieee1394_nodemgr(void); |
181 | 181 | ||
182 | |||
183 | /* The template for a host device */ | 182 | /* The template for a host device */ |
184 | extern struct device nodemgr_dev_template_host; | 183 | extern struct device nodemgr_dev_template_host; |
185 | 184 | ||
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c index 448df2773377..8fd0030475ba 100644 --- a/drivers/ieee1394/ohci1394.c +++ b/drivers/ieee1394/ohci1394.c | |||
@@ -136,7 +136,7 @@ | |||
136 | #define DBGMSG(fmt, args...) \ | 136 | #define DBGMSG(fmt, args...) \ |
137 | printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args) | 137 | printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args) |
138 | #else | 138 | #else |
139 | #define DBGMSG(fmt, args...) | 139 | #define DBGMSG(fmt, args...) do {} while (0) |
140 | #endif | 140 | #endif |
141 | 141 | ||
142 | #ifdef CONFIG_IEEE1394_OHCI_DMA_DEBUG | 142 | #ifdef CONFIG_IEEE1394_OHCI_DMA_DEBUG |
@@ -148,8 +148,8 @@ printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host-> | |||
148 | --global_outstanding_dmas, ## args) | 148 | --global_outstanding_dmas, ## args) |
149 | static int global_outstanding_dmas = 0; | 149 | static int global_outstanding_dmas = 0; |
150 | #else | 150 | #else |
151 | #define OHCI_DMA_ALLOC(fmt, args...) | 151 | #define OHCI_DMA_ALLOC(fmt, args...) do {} while (0) |
152 | #define OHCI_DMA_FREE(fmt, args...) | 152 | #define OHCI_DMA_FREE(fmt, args...) do {} while (0) |
153 | #endif | 153 | #endif |
154 | 154 | ||
155 | /* print general (card independent) information */ | 155 | /* print general (card independent) information */ |
@@ -181,36 +181,35 @@ static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d, | |||
181 | static void ohci1394_pci_remove(struct pci_dev *pdev); | 181 | static void ohci1394_pci_remove(struct pci_dev *pdev); |
182 | 182 | ||
183 | #ifndef __LITTLE_ENDIAN | 183 | #ifndef __LITTLE_ENDIAN |
184 | static unsigned hdr_sizes[] = | 184 | const static size_t hdr_sizes[] = { |
185 | { | ||
186 | 3, /* TCODE_WRITEQ */ | 185 | 3, /* TCODE_WRITEQ */ |
187 | 4, /* TCODE_WRITEB */ | 186 | 4, /* TCODE_WRITEB */ |
188 | 3, /* TCODE_WRITE_RESPONSE */ | 187 | 3, /* TCODE_WRITE_RESPONSE */ |
189 | 0, /* ??? */ | 188 | 0, /* reserved */ |
190 | 3, /* TCODE_READQ */ | 189 | 3, /* TCODE_READQ */ |
191 | 4, /* TCODE_READB */ | 190 | 4, /* TCODE_READB */ |
192 | 3, /* TCODE_READQ_RESPONSE */ | 191 | 3, /* TCODE_READQ_RESPONSE */ |
193 | 4, /* TCODE_READB_RESPONSE */ | 192 | 4, /* TCODE_READB_RESPONSE */ |
194 | 1, /* TCODE_CYCLE_START (???) */ | 193 | 1, /* TCODE_CYCLE_START */ |
195 | 4, /* TCODE_LOCK_REQUEST */ | 194 | 4, /* TCODE_LOCK_REQUEST */ |
196 | 2, /* TCODE_ISO_DATA */ | 195 | 2, /* TCODE_ISO_DATA */ |
197 | 4, /* TCODE_LOCK_RESPONSE */ | 196 | 4, /* TCODE_LOCK_RESPONSE */ |
197 | /* rest is reserved or link-internal */ | ||
198 | }; | 198 | }; |
199 | 199 | ||
200 | /* Swap headers */ | 200 | static inline void header_le32_to_cpu(quadlet_t *data, unsigned char tcode) |
201 | static inline void packet_swab(quadlet_t *data, int tcode) | ||
202 | { | 201 | { |
203 | size_t size = hdr_sizes[tcode]; | 202 | size_t size; |
204 | 203 | ||
205 | if (tcode > TCODE_LOCK_RESPONSE || hdr_sizes[tcode] == 0) | 204 | if (unlikely(tcode >= ARRAY_SIZE(hdr_sizes))) |
206 | return; | 205 | return; |
207 | 206 | ||
207 | size = hdr_sizes[tcode]; | ||
208 | while (size--) | 208 | while (size--) |
209 | data[size] = swab32(data[size]); | 209 | data[size] = le32_to_cpu(data[size]); |
210 | } | 210 | } |
211 | #else | 211 | #else |
212 | /* Don't waste cycles on same sex byte swaps */ | 212 | #define header_le32_to_cpu(w,x) do {} while (0) |
213 | #define packet_swab(w,x) | ||
214 | #endif /* !LITTLE_ENDIAN */ | 213 | #endif /* !LITTLE_ENDIAN */ |
215 | 214 | ||
216 | /*********************************** | 215 | /*********************************** |
@@ -701,7 +700,7 @@ static void insert_packet(struct ti_ohci *ohci, | |||
701 | d->prg_cpu[idx]->data[2] = packet->header[2]; | 700 | d->prg_cpu[idx]->data[2] = packet->header[2]; |
702 | d->prg_cpu[idx]->data[3] = packet->header[3]; | 701 | d->prg_cpu[idx]->data[3] = packet->header[3]; |
703 | } | 702 | } |
704 | packet_swab(d->prg_cpu[idx]->data, packet->tcode); | 703 | header_le32_to_cpu(d->prg_cpu[idx]->data, packet->tcode); |
705 | } | 704 | } |
706 | 705 | ||
707 | if (packet->data_size) { /* block transmit */ | 706 | if (packet->data_size) { /* block transmit */ |
@@ -777,7 +776,7 @@ static void insert_packet(struct ti_ohci *ohci, | |||
777 | d->prg_cpu[idx]->data[0] = packet->speed_code<<16 | | 776 | d->prg_cpu[idx]->data[0] = packet->speed_code<<16 | |
778 | (packet->header[0] & 0xFFFF); | 777 | (packet->header[0] & 0xFFFF); |
779 | d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000; | 778 | d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000; |
780 | packet_swab(d->prg_cpu[idx]->data, packet->tcode); | 779 | header_le32_to_cpu(d->prg_cpu[idx]->data, packet->tcode); |
781 | 780 | ||
782 | d->prg_cpu[idx]->begin.control = | 781 | d->prg_cpu[idx]->begin.control = |
783 | cpu_to_le32(DMA_CTL_OUTPUT_MORE | | 782 | cpu_to_le32(DMA_CTL_OUTPUT_MORE | |
@@ -2598,8 +2597,9 @@ static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0, | |||
2598 | * Determine the length of a packet in the buffer | 2597 | * Determine the length of a packet in the buffer |
2599 | * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca> | 2598 | * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca> |
2600 | */ | 2599 | */ |
2601 | static __inline__ int packet_length(struct dma_rcv_ctx *d, int idx, quadlet_t *buf_ptr, | 2600 | static inline int packet_length(struct dma_rcv_ctx *d, int idx, |
2602 | int offset, unsigned char tcode, int noswap) | 2601 | quadlet_t *buf_ptr, int offset, |
2602 | unsigned char tcode, int noswap) | ||
2603 | { | 2603 | { |
2604 | int length = -1; | 2604 | int length = -1; |
2605 | 2605 | ||
@@ -2730,7 +2730,7 @@ static void dma_rcv_tasklet (unsigned long data) | |||
2730 | * bus reset. We always ignore it. */ | 2730 | * bus reset. We always ignore it. */ |
2731 | if (tcode != OHCI1394_TCODE_PHY) { | 2731 | if (tcode != OHCI1394_TCODE_PHY) { |
2732 | if (!ohci->no_swap_incoming) | 2732 | if (!ohci->no_swap_incoming) |
2733 | packet_swab(d->spb, tcode); | 2733 | header_le32_to_cpu(d->spb, tcode); |
2734 | DBGMSG("Packet received from node" | 2734 | DBGMSG("Packet received from node" |
2735 | " %d ack=0x%02X spd=%d tcode=0x%X" | 2735 | " %d ack=0x%02X spd=%d tcode=0x%X" |
2736 | " length=%d ctx=%d tlabel=%d", | 2736 | " length=%d ctx=%d tlabel=%d", |
@@ -2738,7 +2738,7 @@ static void dma_rcv_tasklet (unsigned long data) | |||
2738 | (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f, | 2738 | (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f, |
2739 | (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3, | 2739 | (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3, |
2740 | tcode, length, d->ctx, | 2740 | tcode, length, d->ctx, |
2741 | (cond_le32_to_cpu(d->spb[0], ohci->no_swap_incoming)>>10)&0x3f); | 2741 | (d->spb[0]>>10)&0x3f); |
2742 | 2742 | ||
2743 | ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f) | 2743 | ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f) |
2744 | == 0x11) ? 1 : 0; | 2744 | == 0x11) ? 1 : 0; |
@@ -3529,9 +3529,10 @@ static void ohci1394_pci_remove(struct pci_dev *pdev) | |||
3529 | put_device(dev); | 3529 | put_device(dev); |
3530 | } | 3530 | } |
3531 | 3531 | ||
3532 | 3532 | #ifdef CONFIG_PM | |
3533 | static int ohci1394_pci_resume (struct pci_dev *pdev) | 3533 | static int ohci1394_pci_resume (struct pci_dev *pdev) |
3534 | { | 3534 | { |
3535 | /* PowerMac resume code comes first */ | ||
3535 | #ifdef CONFIG_PPC_PMAC | 3536 | #ifdef CONFIG_PPC_PMAC |
3536 | if (machine_is(powermac)) { | 3537 | if (machine_is(powermac)) { |
3537 | struct device_node *of_node; | 3538 | struct device_node *of_node; |
@@ -3543,17 +3544,23 @@ static int ohci1394_pci_resume (struct pci_dev *pdev) | |||
3543 | } | 3544 | } |
3544 | #endif /* CONFIG_PPC_PMAC */ | 3545 | #endif /* CONFIG_PPC_PMAC */ |
3545 | 3546 | ||
3547 | pci_set_power_state(pdev, PCI_D0); | ||
3546 | pci_restore_state(pdev); | 3548 | pci_restore_state(pdev); |
3547 | pci_enable_device(pdev); | 3549 | return pci_enable_device(pdev); |
3548 | |||
3549 | return 0; | ||
3550 | } | 3550 | } |
3551 | 3551 | ||
3552 | |||
3553 | static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state) | 3552 | static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state) |
3554 | { | 3553 | { |
3555 | pci_save_state(pdev); | 3554 | int err; |
3555 | |||
3556 | err = pci_save_state(pdev); | ||
3557 | if (err) | ||
3558 | goto out; | ||
3559 | err = pci_set_power_state(pdev, pci_choose_state(pdev, state)); | ||
3560 | if (err) | ||
3561 | goto out; | ||
3556 | 3562 | ||
3563 | /* PowerMac suspend code comes last */ | ||
3557 | #ifdef CONFIG_PPC_PMAC | 3564 | #ifdef CONFIG_PPC_PMAC |
3558 | if (machine_is(powermac)) { | 3565 | if (machine_is(powermac)) { |
3559 | struct device_node *of_node; | 3566 | struct device_node *of_node; |
@@ -3563,11 +3570,11 @@ static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state) | |||
3563 | if (of_node) | 3570 | if (of_node) |
3564 | pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0); | 3571 | pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0); |
3565 | } | 3572 | } |
3566 | #endif | 3573 | #endif /* CONFIG_PPC_PMAC */ |
3567 | 3574 | out: | |
3568 | return 0; | 3575 | return err; |
3569 | } | 3576 | } |
3570 | 3577 | #endif /* CONFIG_PM */ | |
3571 | 3578 | ||
3572 | #define PCI_CLASS_FIREWIRE_OHCI ((PCI_CLASS_SERIAL_FIREWIRE << 8) | 0x10) | 3579 | #define PCI_CLASS_FIREWIRE_OHCI ((PCI_CLASS_SERIAL_FIREWIRE << 8) | 0x10) |
3573 | 3580 | ||
@@ -3590,8 +3597,10 @@ static struct pci_driver ohci1394_pci_driver = { | |||
3590 | .id_table = ohci1394_pci_tbl, | 3597 | .id_table = ohci1394_pci_tbl, |
3591 | .probe = ohci1394_pci_probe, | 3598 | .probe = ohci1394_pci_probe, |
3592 | .remove = ohci1394_pci_remove, | 3599 | .remove = ohci1394_pci_remove, |
3600 | #ifdef CONFIG_PM | ||
3593 | .resume = ohci1394_pci_resume, | 3601 | .resume = ohci1394_pci_resume, |
3594 | .suspend = ohci1394_pci_suspend, | 3602 | .suspend = ohci1394_pci_suspend, |
3603 | #endif | ||
3595 | }; | 3604 | }; |
3596 | 3605 | ||
3597 | /*********************************** | 3606 | /*********************************** |
@@ -3718,5 +3727,7 @@ static int __init ohci1394_init(void) | |||
3718 | return pci_register_driver(&ohci1394_pci_driver); | 3727 | return pci_register_driver(&ohci1394_pci_driver); |
3719 | } | 3728 | } |
3720 | 3729 | ||
3721 | module_init(ohci1394_init); | 3730 | /* Register before most other device drivers. |
3731 | * Useful for remote debugging via physical DMA, e.g. using firescope. */ | ||
3732 | fs_initcall(ohci1394_init); | ||
3722 | module_exit(ohci1394_cleanup); | 3733 | module_exit(ohci1394_cleanup); |
diff --git a/drivers/ieee1394/raw1394-private.h b/drivers/ieee1394/raw1394-private.h index c93587be9cab..c7731d1bcd89 100644 --- a/drivers/ieee1394/raw1394-private.h +++ b/drivers/ieee1394/raw1394-private.h | |||
@@ -29,9 +29,8 @@ struct file_info { | |||
29 | 29 | ||
30 | struct list_head req_pending; | 30 | struct list_head req_pending; |
31 | struct list_head req_complete; | 31 | struct list_head req_complete; |
32 | struct semaphore complete_sem; | ||
33 | spinlock_t reqlists_lock; | 32 | spinlock_t reqlists_lock; |
34 | wait_queue_head_t poll_wait_complete; | 33 | wait_queue_head_t wait_complete; |
35 | 34 | ||
36 | struct list_head addr_list; | 35 | struct list_head addr_list; |
37 | 36 | ||
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c index 571ea68c0cf2..47e667593244 100644 --- a/drivers/ieee1394/raw1394.c +++ b/drivers/ieee1394/raw1394.c | |||
@@ -44,14 +44,15 @@ | |||
44 | #include <linux/compat.h> | 44 | #include <linux/compat.h> |
45 | 45 | ||
46 | #include "csr1212.h" | 46 | #include "csr1212.h" |
47 | #include "highlevel.h" | ||
48 | #include "hosts.h" | ||
47 | #include "ieee1394.h" | 49 | #include "ieee1394.h" |
48 | #include "ieee1394_types.h" | ||
49 | #include "ieee1394_core.h" | 50 | #include "ieee1394_core.h" |
50 | #include "nodemgr.h" | 51 | #include "ieee1394_hotplug.h" |
51 | #include "hosts.h" | ||
52 | #include "highlevel.h" | ||
53 | #include "iso.h" | ||
54 | #include "ieee1394_transactions.h" | 52 | #include "ieee1394_transactions.h" |
53 | #include "ieee1394_types.h" | ||
54 | #include "iso.h" | ||
55 | #include "nodemgr.h" | ||
55 | #include "raw1394.h" | 56 | #include "raw1394.h" |
56 | #include "raw1394-private.h" | 57 | #include "raw1394-private.h" |
57 | 58 | ||
@@ -66,7 +67,7 @@ | |||
66 | #define DBGMSG(fmt, args...) \ | 67 | #define DBGMSG(fmt, args...) \ |
67 | printk(KERN_INFO "raw1394:" fmt "\n" , ## args) | 68 | printk(KERN_INFO "raw1394:" fmt "\n" , ## args) |
68 | #else | 69 | #else |
69 | #define DBGMSG(fmt, args...) | 70 | #define DBGMSG(fmt, args...) do {} while (0) |
70 | #endif | 71 | #endif |
71 | 72 | ||
72 | static LIST_HEAD(host_info_list); | 73 | static LIST_HEAD(host_info_list); |
@@ -132,10 +133,9 @@ static void free_pending_request(struct pending_request *req) | |||
132 | static void __queue_complete_req(struct pending_request *req) | 133 | static void __queue_complete_req(struct pending_request *req) |
133 | { | 134 | { |
134 | struct file_info *fi = req->file_info; | 135 | struct file_info *fi = req->file_info; |
135 | list_move_tail(&req->list, &fi->req_complete); | ||
136 | 136 | ||
137 | up(&fi->complete_sem); | 137 | list_move_tail(&req->list, &fi->req_complete); |
138 | wake_up_interruptible(&fi->poll_wait_complete); | 138 | wake_up(&fi->wait_complete); |
139 | } | 139 | } |
140 | 140 | ||
141 | static void queue_complete_req(struct pending_request *req) | 141 | static void queue_complete_req(struct pending_request *req) |
@@ -463,13 +463,36 @@ raw1394_compat_read(const char __user *buf, struct raw1394_request *r) | |||
463 | 463 | ||
464 | #endif | 464 | #endif |
465 | 465 | ||
466 | /* get next completed request (caller must hold fi->reqlists_lock) */ | ||
467 | static inline struct pending_request *__next_complete_req(struct file_info *fi) | ||
468 | { | ||
469 | struct list_head *lh; | ||
470 | struct pending_request *req = NULL; | ||
471 | |||
472 | if (!list_empty(&fi->req_complete)) { | ||
473 | lh = fi->req_complete.next; | ||
474 | list_del(lh); | ||
475 | req = list_entry(lh, struct pending_request, list); | ||
476 | } | ||
477 | return req; | ||
478 | } | ||
479 | |||
480 | /* atomically get next completed request */ | ||
481 | static struct pending_request *next_complete_req(struct file_info *fi) | ||
482 | { | ||
483 | unsigned long flags; | ||
484 | struct pending_request *req; | ||
485 | |||
486 | spin_lock_irqsave(&fi->reqlists_lock, flags); | ||
487 | req = __next_complete_req(fi); | ||
488 | spin_unlock_irqrestore(&fi->reqlists_lock, flags); | ||
489 | return req; | ||
490 | } | ||
466 | 491 | ||
467 | static ssize_t raw1394_read(struct file *file, char __user * buffer, | 492 | static ssize_t raw1394_read(struct file *file, char __user * buffer, |
468 | size_t count, loff_t * offset_is_ignored) | 493 | size_t count, loff_t * offset_is_ignored) |
469 | { | 494 | { |
470 | unsigned long flags; | ||
471 | struct file_info *fi = (struct file_info *)file->private_data; | 495 | struct file_info *fi = (struct file_info *)file->private_data; |
472 | struct list_head *lh; | ||
473 | struct pending_request *req; | 496 | struct pending_request *req; |
474 | ssize_t ret; | 497 | ssize_t ret; |
475 | 498 | ||
@@ -487,22 +510,21 @@ static ssize_t raw1394_read(struct file *file, char __user * buffer, | |||
487 | } | 510 | } |
488 | 511 | ||
489 | if (file->f_flags & O_NONBLOCK) { | 512 | if (file->f_flags & O_NONBLOCK) { |
490 | if (down_trylock(&fi->complete_sem)) { | 513 | if (!(req = next_complete_req(fi))) |
491 | return -EAGAIN; | 514 | return -EAGAIN; |
492 | } | ||
493 | } else { | 515 | } else { |
494 | if (down_interruptible(&fi->complete_sem)) { | 516 | /* |
517 | * NB: We call the macro wait_event_interruptible() with a | ||
518 | * condition argument with side effect. This is only possible | ||
519 | * because the side effect does not occur until the condition | ||
520 | * became true, and wait_event_interruptible() won't evaluate | ||
521 | * the condition again after that. | ||
522 | */ | ||
523 | if (wait_event_interruptible(fi->wait_complete, | ||
524 | (req = next_complete_req(fi)))) | ||
495 | return -ERESTARTSYS; | 525 | return -ERESTARTSYS; |
496 | } | ||
497 | } | 526 | } |
498 | 527 | ||
499 | spin_lock_irqsave(&fi->reqlists_lock, flags); | ||
500 | lh = fi->req_complete.next; | ||
501 | list_del(lh); | ||
502 | spin_unlock_irqrestore(&fi->reqlists_lock, flags); | ||
503 | |||
504 | req = list_entry(lh, struct pending_request, list); | ||
505 | |||
506 | if (req->req.length) { | 528 | if (req->req.length) { |
507 | if (copy_to_user(int2ptr(req->req.recvb), req->data, | 529 | if (copy_to_user(int2ptr(req->req.recvb), req->data, |
508 | req->req.length)) { | 530 | req->req.length)) { |
@@ -2744,7 +2766,7 @@ static unsigned int raw1394_poll(struct file *file, poll_table * pt) | |||
2744 | unsigned int mask = POLLOUT | POLLWRNORM; | 2766 | unsigned int mask = POLLOUT | POLLWRNORM; |
2745 | unsigned long flags; | 2767 | unsigned long flags; |
2746 | 2768 | ||
2747 | poll_wait(file, &fi->poll_wait_complete, pt); | 2769 | poll_wait(file, &fi->wait_complete, pt); |
2748 | 2770 | ||
2749 | spin_lock_irqsave(&fi->reqlists_lock, flags); | 2771 | spin_lock_irqsave(&fi->reqlists_lock, flags); |
2750 | if (!list_empty(&fi->req_complete)) { | 2772 | if (!list_empty(&fi->req_complete)) { |
@@ -2769,9 +2791,8 @@ static int raw1394_open(struct inode *inode, struct file *file) | |||
2769 | fi->state = opened; | 2791 | fi->state = opened; |
2770 | INIT_LIST_HEAD(&fi->req_pending); | 2792 | INIT_LIST_HEAD(&fi->req_pending); |
2771 | INIT_LIST_HEAD(&fi->req_complete); | 2793 | INIT_LIST_HEAD(&fi->req_complete); |
2772 | sema_init(&fi->complete_sem, 0); | ||
2773 | spin_lock_init(&fi->reqlists_lock); | 2794 | spin_lock_init(&fi->reqlists_lock); |
2774 | init_waitqueue_head(&fi->poll_wait_complete); | 2795 | init_waitqueue_head(&fi->wait_complete); |
2775 | INIT_LIST_HEAD(&fi->addr_list); | 2796 | INIT_LIST_HEAD(&fi->addr_list); |
2776 | 2797 | ||
2777 | file->private_data = fi; | 2798 | file->private_data = fi; |
@@ -2784,7 +2805,7 @@ static int raw1394_release(struct inode *inode, struct file *file) | |||
2784 | struct file_info *fi = file->private_data; | 2805 | struct file_info *fi = file->private_data; |
2785 | struct list_head *lh; | 2806 | struct list_head *lh; |
2786 | struct pending_request *req; | 2807 | struct pending_request *req; |
2787 | int done = 0, i, fail = 0; | 2808 | int i, fail; |
2788 | int retval = 0; | 2809 | int retval = 0; |
2789 | struct list_head *entry; | 2810 | struct list_head *entry; |
2790 | struct arm_addr *addr = NULL; | 2811 | struct arm_addr *addr = NULL; |
@@ -2864,25 +2885,28 @@ static int raw1394_release(struct inode *inode, struct file *file) | |||
2864 | "error(s) occurred \n"); | 2885 | "error(s) occurred \n"); |
2865 | } | 2886 | } |
2866 | 2887 | ||
2867 | while (!done) { | 2888 | for (;;) { |
2889 | /* This locked section guarantees that neither | ||
2890 | * complete nor pending requests exist once i!=0 */ | ||
2868 | spin_lock_irqsave(&fi->reqlists_lock, flags); | 2891 | spin_lock_irqsave(&fi->reqlists_lock, flags); |
2869 | 2892 | while ((req = __next_complete_req(fi))) | |
2870 | while (!list_empty(&fi->req_complete)) { | ||
2871 | lh = fi->req_complete.next; | ||
2872 | list_del(lh); | ||
2873 | |||
2874 | req = list_entry(lh, struct pending_request, list); | ||
2875 | |||
2876 | free_pending_request(req); | 2893 | free_pending_request(req); |
2877 | } | ||
2878 | |||
2879 | if (list_empty(&fi->req_pending)) | ||
2880 | done = 1; | ||
2881 | 2894 | ||
2895 | i = list_empty(&fi->req_pending); | ||
2882 | spin_unlock_irqrestore(&fi->reqlists_lock, flags); | 2896 | spin_unlock_irqrestore(&fi->reqlists_lock, flags); |
2883 | 2897 | ||
2884 | if (!done) | 2898 | if (i) |
2885 | down_interruptible(&fi->complete_sem); | 2899 | break; |
2900 | /* | ||
2901 | * Sleep until more requests can be freed. | ||
2902 | * | ||
2903 | * NB: We call the macro wait_event() with a condition argument | ||
2904 | * with side effect. This is only possible because the side | ||
2905 | * effect does not occur until the condition became true, and | ||
2906 | * wait_event() won't evaluate the condition again after that. | ||
2907 | */ | ||
2908 | wait_event(fi->wait_complete, (req = next_complete_req(fi))); | ||
2909 | free_pending_request(req); | ||
2886 | } | 2910 | } |
2887 | 2911 | ||
2888 | /* Remove any sub-trees left by user space programs */ | 2912 | /* Remove any sub-trees left by user space programs */ |
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index b08755e2e68f..6986ac188281 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c | |||
@@ -38,31 +38,36 @@ | |||
38 | * but the code needs additional debugging. | 38 | * but the code needs additional debugging. |
39 | */ | 39 | */ |
40 | 40 | ||
41 | #include <linux/blkdev.h> | ||
42 | #include <linux/compiler.h> | ||
43 | #include <linux/delay.h> | ||
44 | #include <linux/device.h> | ||
45 | #include <linux/dma-mapping.h> | ||
46 | #include <linux/gfp.h> | ||
47 | #include <linux/init.h> | ||
41 | #include <linux/kernel.h> | 48 | #include <linux/kernel.h> |
42 | #include <linux/list.h> | 49 | #include <linux/list.h> |
43 | #include <linux/string.h> | ||
44 | #include <linux/stringify.h> | ||
45 | #include <linux/slab.h> | ||
46 | #include <linux/interrupt.h> | ||
47 | #include <linux/fs.h> | ||
48 | #include <linux/poll.h> | ||
49 | #include <linux/module.h> | 50 | #include <linux/module.h> |
50 | #include <linux/moduleparam.h> | 51 | #include <linux/moduleparam.h> |
51 | #include <linux/types.h> | ||
52 | #include <linux/delay.h> | ||
53 | #include <linux/sched.h> | ||
54 | #include <linux/blkdev.h> | ||
55 | #include <linux/smp_lock.h> | ||
56 | #include <linux/init.h> | ||
57 | #include <linux/pci.h> | 52 | #include <linux/pci.h> |
53 | #include <linux/slab.h> | ||
54 | #include <linux/spinlock.h> | ||
55 | #include <linux/stat.h> | ||
56 | #include <linux/string.h> | ||
57 | #include <linux/stringify.h> | ||
58 | #include <linux/types.h> | ||
59 | #include <linux/wait.h> | ||
58 | 60 | ||
59 | #include <asm/current.h> | ||
60 | #include <asm/uaccess.h> | ||
61 | #include <asm/io.h> | ||
62 | #include <asm/byteorder.h> | 61 | #include <asm/byteorder.h> |
63 | #include <asm/atomic.h> | 62 | #include <asm/errno.h> |
64 | #include <asm/system.h> | 63 | #include <asm/param.h> |
65 | #include <asm/scatterlist.h> | 64 | #include <asm/scatterlist.h> |
65 | #include <asm/system.h> | ||
66 | #include <asm/types.h> | ||
67 | |||
68 | #ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA | ||
69 | #include <asm/io.h> /* for bus_to_virt */ | ||
70 | #endif | ||
66 | 71 | ||
67 | #include <scsi/scsi.h> | 72 | #include <scsi/scsi.h> |
68 | #include <scsi/scsi_cmnd.h> | 73 | #include <scsi/scsi_cmnd.h> |
@@ -71,13 +76,14 @@ | |||
71 | #include <scsi/scsi_host.h> | 76 | #include <scsi/scsi_host.h> |
72 | 77 | ||
73 | #include "csr1212.h" | 78 | #include "csr1212.h" |
79 | #include "highlevel.h" | ||
80 | #include "hosts.h" | ||
74 | #include "ieee1394.h" | 81 | #include "ieee1394.h" |
75 | #include "ieee1394_types.h" | ||
76 | #include "ieee1394_core.h" | 82 | #include "ieee1394_core.h" |
77 | #include "nodemgr.h" | 83 | #include "ieee1394_hotplug.h" |
78 | #include "hosts.h" | ||
79 | #include "highlevel.h" | ||
80 | #include "ieee1394_transactions.h" | 84 | #include "ieee1394_transactions.h" |
85 | #include "ieee1394_types.h" | ||
86 | #include "nodemgr.h" | ||
81 | #include "sbp2.h" | 87 | #include "sbp2.h" |
82 | 88 | ||
83 | /* | 89 | /* |
@@ -173,11 +179,6 @@ MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0" | |||
173 | ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) | 179 | ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) |
174 | ", or a combination)"); | 180 | ", or a combination)"); |
175 | 181 | ||
176 | /* legacy parameter */ | ||
177 | static int force_inquiry_hack; | ||
178 | module_param(force_inquiry_hack, int, 0644); | ||
179 | MODULE_PARM_DESC(force_inquiry_hack, "Deprecated, use 'workarounds'"); | ||
180 | |||
181 | /* | 182 | /* |
182 | * Export information about protocols/devices supported by this driver. | 183 | * Export information about protocols/devices supported by this driver. |
183 | */ | 184 | */ |
@@ -208,9 +209,9 @@ static u32 global_outstanding_command_orbs = 0; | |||
208 | #define outstanding_orb_incr global_outstanding_command_orbs++ | 209 | #define outstanding_orb_incr global_outstanding_command_orbs++ |
209 | #define outstanding_orb_decr global_outstanding_command_orbs-- | 210 | #define outstanding_orb_decr global_outstanding_command_orbs-- |
210 | #else | 211 | #else |
211 | #define SBP2_ORB_DEBUG(fmt, args...) | 212 | #define SBP2_ORB_DEBUG(fmt, args...) do {} while (0) |
212 | #define outstanding_orb_incr | 213 | #define outstanding_orb_incr do {} while (0) |
213 | #define outstanding_orb_decr | 214 | #define outstanding_orb_decr do {} while (0) |
214 | #endif | 215 | #endif |
215 | 216 | ||
216 | #ifdef CONFIG_IEEE1394_SBP2_DEBUG_DMA | 217 | #ifdef CONFIG_IEEE1394_SBP2_DEBUG_DMA |
@@ -222,8 +223,8 @@ static u32 global_outstanding_command_orbs = 0; | |||
222 | --global_outstanding_dmas, ## args) | 223 | --global_outstanding_dmas, ## args) |
223 | static u32 global_outstanding_dmas = 0; | 224 | static u32 global_outstanding_dmas = 0; |
224 | #else | 225 | #else |
225 | #define SBP2_DMA_ALLOC(fmt, args...) | 226 | #define SBP2_DMA_ALLOC(fmt, args...) do {} while (0) |
226 | #define SBP2_DMA_FREE(fmt, args...) | 227 | #define SBP2_DMA_FREE(fmt, args...) do {} while (0) |
227 | #endif | 228 | #endif |
228 | 229 | ||
229 | #if CONFIG_IEEE1394_SBP2_DEBUG >= 2 | 230 | #if CONFIG_IEEE1394_SBP2_DEBUG >= 2 |
@@ -237,7 +238,7 @@ static u32 global_outstanding_dmas = 0; | |||
237 | #define SBP2_NOTICE(fmt, args...) HPSB_NOTICE("sbp2: "fmt, ## args) | 238 | #define SBP2_NOTICE(fmt, args...) HPSB_NOTICE("sbp2: "fmt, ## args) |
238 | #define SBP2_WARN(fmt, args...) HPSB_WARN("sbp2: "fmt, ## args) | 239 | #define SBP2_WARN(fmt, args...) HPSB_WARN("sbp2: "fmt, ## args) |
239 | #else | 240 | #else |
240 | #define SBP2_DEBUG(fmt, args...) | 241 | #define SBP2_DEBUG(fmt, args...) do {} while (0) |
241 | #define SBP2_INFO(fmt, args...) HPSB_INFO("sbp2: "fmt, ## args) | 242 | #define SBP2_INFO(fmt, args...) HPSB_INFO("sbp2: "fmt, ## args) |
242 | #define SBP2_NOTICE(fmt, args...) HPSB_NOTICE("sbp2: "fmt, ## args) | 243 | #define SBP2_NOTICE(fmt, args...) HPSB_NOTICE("sbp2: "fmt, ## args) |
243 | #define SBP2_WARN(fmt, args...) HPSB_WARN("sbp2: "fmt, ## args) | 244 | #define SBP2_WARN(fmt, args...) HPSB_WARN("sbp2: "fmt, ## args) |
@@ -356,7 +357,7 @@ static const struct { | |||
356 | /* | 357 | /* |
357 | * Converts a buffer from be32 to cpu byte ordering. Length is in bytes. | 358 | * Converts a buffer from be32 to cpu byte ordering. Length is in bytes. |
358 | */ | 359 | */ |
359 | static __inline__ void sbp2util_be32_to_cpu_buffer(void *buffer, int length) | 360 | static inline void sbp2util_be32_to_cpu_buffer(void *buffer, int length) |
360 | { | 361 | { |
361 | u32 *temp = buffer; | 362 | u32 *temp = buffer; |
362 | 363 | ||
@@ -369,7 +370,7 @@ static __inline__ void sbp2util_be32_to_cpu_buffer(void *buffer, int length) | |||
369 | /* | 370 | /* |
370 | * Converts a buffer from cpu to be32 byte ordering. Length is in bytes. | 371 | * Converts a buffer from cpu to be32 byte ordering. Length is in bytes. |
371 | */ | 372 | */ |
372 | static __inline__ void sbp2util_cpu_to_be32_buffer(void *buffer, int length) | 373 | static inline void sbp2util_cpu_to_be32_buffer(void *buffer, int length) |
373 | { | 374 | { |
374 | u32 *temp = buffer; | 375 | u32 *temp = buffer; |
375 | 376 | ||
@@ -380,8 +381,8 @@ static __inline__ void sbp2util_cpu_to_be32_buffer(void *buffer, int length) | |||
380 | } | 381 | } |
381 | #else /* BIG_ENDIAN */ | 382 | #else /* BIG_ENDIAN */ |
382 | /* Why waste the cpu cycles? */ | 383 | /* Why waste the cpu cycles? */ |
383 | #define sbp2util_be32_to_cpu_buffer(x,y) | 384 | #define sbp2util_be32_to_cpu_buffer(x,y) do {} while (0) |
384 | #define sbp2util_cpu_to_be32_buffer(x,y) | 385 | #define sbp2util_cpu_to_be32_buffer(x,y) do {} while (0) |
385 | #endif | 386 | #endif |
386 | 387 | ||
387 | #ifdef CONFIG_IEEE1394_SBP2_PACKET_DUMP | 388 | #ifdef CONFIG_IEEE1394_SBP2_PACKET_DUMP |
@@ -417,24 +418,26 @@ static void sbp2util_packet_dump(void *buffer, int length, char *dump_name, | |||
417 | return; | 418 | return; |
418 | } | 419 | } |
419 | #else | 420 | #else |
420 | #define sbp2util_packet_dump(w,x,y,z) | 421 | #define sbp2util_packet_dump(w,x,y,z) do {} while (0) |
421 | #endif | 422 | #endif |
422 | 423 | ||
424 | static DECLARE_WAIT_QUEUE_HEAD(access_wq); | ||
425 | |||
423 | /* | 426 | /* |
424 | * Goofy routine that basically does a down_timeout function. | 427 | * Waits for completion of an SBP-2 access request. |
428 | * Returns nonzero if timed out or prematurely interrupted. | ||
425 | */ | 429 | */ |
426 | static int sbp2util_down_timeout(atomic_t *done, int timeout) | 430 | static int sbp2util_access_timeout(struct scsi_id_instance_data *scsi_id, |
431 | int timeout) | ||
427 | { | 432 | { |
428 | int i; | 433 | long leftover = wait_event_interruptible_timeout( |
434 | access_wq, scsi_id->access_complete, timeout); | ||
429 | 435 | ||
430 | for (i = timeout; (i > 0 && atomic_read(done) == 0); i-= HZ/10) { | 436 | scsi_id->access_complete = 0; |
431 | if (msleep_interruptible(100)) /* 100ms */ | 437 | return leftover <= 0; |
432 | return 1; | ||
433 | } | ||
434 | return (i > 0) ? 0 : 1; | ||
435 | } | 438 | } |
436 | 439 | ||
437 | /* Free's an allocated packet */ | 440 | /* Frees an allocated packet */ |
438 | static void sbp2_free_packet(struct hpsb_packet *packet) | 441 | static void sbp2_free_packet(struct hpsb_packet *packet) |
439 | { | 442 | { |
440 | hpsb_free_tlabel(packet); | 443 | hpsb_free_tlabel(packet); |
@@ -468,6 +471,44 @@ static int sbp2util_node_write_no_wait(struct node_entry *ne, u64 addr, | |||
468 | return 0; | 471 | return 0; |
469 | } | 472 | } |
470 | 473 | ||
474 | static void sbp2util_notify_fetch_agent(struct scsi_id_instance_data *scsi_id, | ||
475 | u64 offset, quadlet_t *data, size_t len) | ||
476 | { | ||
477 | /* | ||
478 | * There is a small window after a bus reset within which the node | ||
479 | * entry's generation is current but the reconnect wasn't completed. | ||
480 | */ | ||
481 | if (unlikely(atomic_read(&scsi_id->state) == SBP2LU_STATE_IN_RESET)) | ||
482 | return; | ||
483 | |||
484 | if (hpsb_node_write(scsi_id->ne, | ||
485 | scsi_id->sbp2_command_block_agent_addr + offset, | ||
486 | data, len)) | ||
487 | SBP2_ERR("sbp2util_notify_fetch_agent failed."); | ||
488 | /* | ||
489 | * Now accept new SCSI commands, unless a bus reset happended during | ||
490 | * hpsb_node_write. | ||
491 | */ | ||
492 | if (likely(atomic_read(&scsi_id->state) != SBP2LU_STATE_IN_RESET)) | ||
493 | scsi_unblock_requests(scsi_id->scsi_host); | ||
494 | } | ||
495 | |||
496 | static void sbp2util_write_orb_pointer(void *p) | ||
497 | { | ||
498 | quadlet_t data[2]; | ||
499 | |||
500 | data[0] = ORB_SET_NODE_ID( | ||
501 | ((struct scsi_id_instance_data *)p)->hi->host->node_id); | ||
502 | data[1] = ((struct scsi_id_instance_data *)p)->last_orb_dma; | ||
503 | sbp2util_cpu_to_be32_buffer(data, 8); | ||
504 | sbp2util_notify_fetch_agent(p, SBP2_ORB_POINTER_OFFSET, data, 8); | ||
505 | } | ||
506 | |||
507 | static void sbp2util_write_doorbell(void *p) | ||
508 | { | ||
509 | sbp2util_notify_fetch_agent(p, SBP2_DOORBELL_OFFSET, NULL, 4); | ||
510 | } | ||
511 | |||
471 | /* | 512 | /* |
472 | * This function is called to create a pool of command orbs used for | 513 | * This function is called to create a pool of command orbs used for |
473 | * command processing. It is called when a new sbp2 device is detected. | 514 | * command processing. It is called when a new sbp2 device is detected. |
@@ -492,7 +533,7 @@ static int sbp2util_create_command_orb_pool(struct scsi_id_instance_data *scsi_i | |||
492 | command->command_orb_dma = | 533 | command->command_orb_dma = |
493 | pci_map_single(hi->host->pdev, &command->command_orb, | 534 | pci_map_single(hi->host->pdev, &command->command_orb, |
494 | sizeof(struct sbp2_command_orb), | 535 | sizeof(struct sbp2_command_orb), |
495 | PCI_DMA_BIDIRECTIONAL); | 536 | PCI_DMA_TODEVICE); |
496 | SBP2_DMA_ALLOC("single command orb DMA"); | 537 | SBP2_DMA_ALLOC("single command orb DMA"); |
497 | command->sge_dma = | 538 | command->sge_dma = |
498 | pci_map_single(hi->host->pdev, | 539 | pci_map_single(hi->host->pdev, |
@@ -525,7 +566,7 @@ static void sbp2util_remove_command_orb_pool(struct scsi_id_instance_data *scsi_ | |||
525 | /* Release our generic DMA's */ | 566 | /* Release our generic DMA's */ |
526 | pci_unmap_single(host->pdev, command->command_orb_dma, | 567 | pci_unmap_single(host->pdev, command->command_orb_dma, |
527 | sizeof(struct sbp2_command_orb), | 568 | sizeof(struct sbp2_command_orb), |
528 | PCI_DMA_BIDIRECTIONAL); | 569 | PCI_DMA_TODEVICE); |
529 | SBP2_DMA_FREE("single command orb DMA"); | 570 | SBP2_DMA_FREE("single command orb DMA"); |
530 | pci_unmap_single(host->pdev, command->sge_dma, | 571 | pci_unmap_single(host->pdev, command->sge_dma, |
531 | sizeof(command->scatter_gather_element), | 572 | sizeof(command->scatter_gather_element), |
@@ -715,6 +756,7 @@ static int sbp2_remove(struct device *dev) | |||
715 | sbp2scsi_complete_all_commands(scsi_id, DID_NO_CONNECT); | 756 | sbp2scsi_complete_all_commands(scsi_id, DID_NO_CONNECT); |
716 | /* scsi_remove_device() will trigger shutdown functions of SCSI | 757 | /* scsi_remove_device() will trigger shutdown functions of SCSI |
717 | * highlevel drivers which would deadlock if blocked. */ | 758 | * highlevel drivers which would deadlock if blocked. */ |
759 | atomic_set(&scsi_id->state, SBP2LU_STATE_IN_SHUTDOWN); | ||
718 | scsi_unblock_requests(scsi_id->scsi_host); | 760 | scsi_unblock_requests(scsi_id->scsi_host); |
719 | } | 761 | } |
720 | sdev = scsi_id->sdev; | 762 | sdev = scsi_id->sdev; |
@@ -766,10 +808,12 @@ static int sbp2_update(struct unit_directory *ud) | |||
766 | */ | 808 | */ |
767 | sbp2scsi_complete_all_commands(scsi_id, DID_BUS_BUSY); | 809 | sbp2scsi_complete_all_commands(scsi_id, DID_BUS_BUSY); |
768 | 810 | ||
769 | /* Make sure we unblock requests (since this is likely after a bus | 811 | /* Accept new commands unless there was another bus reset in the |
770 | * reset). */ | 812 | * meantime. */ |
771 | scsi_unblock_requests(scsi_id->scsi_host); | 813 | if (hpsb_node_entry_valid(scsi_id->ne)) { |
772 | 814 | atomic_set(&scsi_id->state, SBP2LU_STATE_RUNNING); | |
815 | scsi_unblock_requests(scsi_id->scsi_host); | ||
816 | } | ||
773 | return 0; | 817 | return 0; |
774 | } | 818 | } |
775 | 819 | ||
@@ -794,11 +838,12 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud | |||
794 | scsi_id->speed_code = IEEE1394_SPEED_100; | 838 | scsi_id->speed_code = IEEE1394_SPEED_100; |
795 | scsi_id->max_payload_size = sbp2_speedto_max_payload[IEEE1394_SPEED_100]; | 839 | scsi_id->max_payload_size = sbp2_speedto_max_payload[IEEE1394_SPEED_100]; |
796 | scsi_id->status_fifo_addr = CSR1212_INVALID_ADDR_SPACE; | 840 | scsi_id->status_fifo_addr = CSR1212_INVALID_ADDR_SPACE; |
797 | atomic_set(&scsi_id->sbp2_login_complete, 0); | ||
798 | INIT_LIST_HEAD(&scsi_id->sbp2_command_orb_inuse); | 841 | INIT_LIST_HEAD(&scsi_id->sbp2_command_orb_inuse); |
799 | INIT_LIST_HEAD(&scsi_id->sbp2_command_orb_completed); | 842 | INIT_LIST_HEAD(&scsi_id->sbp2_command_orb_completed); |
800 | INIT_LIST_HEAD(&scsi_id->scsi_list); | 843 | INIT_LIST_HEAD(&scsi_id->scsi_list); |
801 | spin_lock_init(&scsi_id->sbp2_command_orb_lock); | 844 | spin_lock_init(&scsi_id->sbp2_command_orb_lock); |
845 | atomic_set(&scsi_id->state, SBP2LU_STATE_RUNNING); | ||
846 | INIT_WORK(&scsi_id->protocol_work, NULL, NULL); | ||
802 | 847 | ||
803 | ud->device.driver_data = scsi_id; | 848 | ud->device.driver_data = scsi_id; |
804 | 849 | ||
@@ -881,11 +926,14 @@ static void sbp2_host_reset(struct hpsb_host *host) | |||
881 | struct scsi_id_instance_data *scsi_id; | 926 | struct scsi_id_instance_data *scsi_id; |
882 | 927 | ||
883 | hi = hpsb_get_hostinfo(&sbp2_highlevel, host); | 928 | hi = hpsb_get_hostinfo(&sbp2_highlevel, host); |
884 | 929 | if (!hi) | |
885 | if (hi) { | 930 | return; |
886 | list_for_each_entry(scsi_id, &hi->scsi_ids, scsi_list) | 931 | list_for_each_entry(scsi_id, &hi->scsi_ids, scsi_list) |
932 | if (likely(atomic_read(&scsi_id->state) != | ||
933 | SBP2LU_STATE_IN_SHUTDOWN)) { | ||
934 | atomic_set(&scsi_id->state, SBP2LU_STATE_IN_RESET); | ||
887 | scsi_block_requests(scsi_id->scsi_host); | 935 | scsi_block_requests(scsi_id->scsi_host); |
888 | } | 936 | } |
889 | } | 937 | } |
890 | 938 | ||
891 | /* | 939 | /* |
@@ -970,8 +1018,7 @@ static int sbp2_start_device(struct scsi_id_instance_data *scsi_id) | |||
970 | * connected to the sbp2 device being removed. That host would | 1018 | * connected to the sbp2 device being removed. That host would |
971 | * have a certain amount of time to relogin before the sbp2 device | 1019 | * have a certain amount of time to relogin before the sbp2 device |
972 | * allows someone else to login instead. One second makes sense. */ | 1020 | * allows someone else to login instead. One second makes sense. */ |
973 | msleep_interruptible(1000); | 1021 | if (msleep_interruptible(1000)) { |
974 | if (signal_pending(current)) { | ||
975 | sbp2_remove_device(scsi_id); | 1022 | sbp2_remove_device(scsi_id); |
976 | return -EINTR; | 1023 | return -EINTR; |
977 | } | 1024 | } |
@@ -1036,7 +1083,7 @@ static void sbp2_remove_device(struct scsi_id_instance_data *scsi_id) | |||
1036 | scsi_remove_host(scsi_id->scsi_host); | 1083 | scsi_remove_host(scsi_id->scsi_host); |
1037 | scsi_host_put(scsi_id->scsi_host); | 1084 | scsi_host_put(scsi_id->scsi_host); |
1038 | } | 1085 | } |
1039 | 1086 | flush_scheduled_work(); | |
1040 | sbp2util_remove_command_orb_pool(scsi_id); | 1087 | sbp2util_remove_command_orb_pool(scsi_id); |
1041 | 1088 | ||
1042 | list_del(&scsi_id->scsi_list); | 1089 | list_del(&scsi_id->scsi_list); |
@@ -1182,17 +1229,14 @@ static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id) | |||
1182 | "sbp2 query logins orb", scsi_id->query_logins_orb_dma); | 1229 | "sbp2 query logins orb", scsi_id->query_logins_orb_dma); |
1183 | 1230 | ||
1184 | memset(scsi_id->query_logins_response, 0, sizeof(struct sbp2_query_logins_response)); | 1231 | memset(scsi_id->query_logins_response, 0, sizeof(struct sbp2_query_logins_response)); |
1185 | memset(&scsi_id->status_block, 0, sizeof(struct sbp2_status_block)); | ||
1186 | 1232 | ||
1187 | data[0] = ORB_SET_NODE_ID(hi->host->node_id); | 1233 | data[0] = ORB_SET_NODE_ID(hi->host->node_id); |
1188 | data[1] = scsi_id->query_logins_orb_dma; | 1234 | data[1] = scsi_id->query_logins_orb_dma; |
1189 | sbp2util_cpu_to_be32_buffer(data, 8); | 1235 | sbp2util_cpu_to_be32_buffer(data, 8); |
1190 | 1236 | ||
1191 | atomic_set(&scsi_id->sbp2_login_complete, 0); | ||
1192 | |||
1193 | hpsb_node_write(scsi_id->ne, scsi_id->sbp2_management_agent_addr, data, 8); | 1237 | hpsb_node_write(scsi_id->ne, scsi_id->sbp2_management_agent_addr, data, 8); |
1194 | 1238 | ||
1195 | if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, 2*HZ)) { | 1239 | if (sbp2util_access_timeout(scsi_id, 2*HZ)) { |
1196 | SBP2_INFO("Error querying logins to SBP-2 device - timed out"); | 1240 | SBP2_INFO("Error querying logins to SBP-2 device - timed out"); |
1197 | return -EIO; | 1241 | return -EIO; |
1198 | } | 1242 | } |
@@ -1202,11 +1246,8 @@ static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id) | |||
1202 | return -EIO; | 1246 | return -EIO; |
1203 | } | 1247 | } |
1204 | 1248 | ||
1205 | if (STATUS_GET_RESP(scsi_id->status_block.ORB_offset_hi_misc) || | 1249 | if (STATUS_TEST_RDS(scsi_id->status_block.ORB_offset_hi_misc)) { |
1206 | STATUS_GET_DEAD_BIT(scsi_id->status_block.ORB_offset_hi_misc) || | 1250 | SBP2_INFO("Error querying logins to SBP-2 device - failed"); |
1207 | STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc)) { | ||
1208 | |||
1209 | SBP2_INFO("Error querying logins to SBP-2 device - timed out"); | ||
1210 | return -EIO; | 1251 | return -EIO; |
1211 | } | 1252 | } |
1212 | 1253 | ||
@@ -1278,21 +1319,18 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id) | |||
1278 | "sbp2 login orb", scsi_id->login_orb_dma); | 1319 | "sbp2 login orb", scsi_id->login_orb_dma); |
1279 | 1320 | ||
1280 | memset(scsi_id->login_response, 0, sizeof(struct sbp2_login_response)); | 1321 | memset(scsi_id->login_response, 0, sizeof(struct sbp2_login_response)); |
1281 | memset(&scsi_id->status_block, 0, sizeof(struct sbp2_status_block)); | ||
1282 | 1322 | ||
1283 | data[0] = ORB_SET_NODE_ID(hi->host->node_id); | 1323 | data[0] = ORB_SET_NODE_ID(hi->host->node_id); |
1284 | data[1] = scsi_id->login_orb_dma; | 1324 | data[1] = scsi_id->login_orb_dma; |
1285 | sbp2util_cpu_to_be32_buffer(data, 8); | 1325 | sbp2util_cpu_to_be32_buffer(data, 8); |
1286 | 1326 | ||
1287 | atomic_set(&scsi_id->sbp2_login_complete, 0); | ||
1288 | |||
1289 | hpsb_node_write(scsi_id->ne, scsi_id->sbp2_management_agent_addr, data, 8); | 1327 | hpsb_node_write(scsi_id->ne, scsi_id->sbp2_management_agent_addr, data, 8); |
1290 | 1328 | ||
1291 | /* | 1329 | /* |
1292 | * Wait for login status (up to 20 seconds)... | 1330 | * Wait for login status (up to 20 seconds)... |
1293 | */ | 1331 | */ |
1294 | if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, 20*HZ)) { | 1332 | if (sbp2util_access_timeout(scsi_id, 20*HZ)) { |
1295 | SBP2_ERR("Error logging into SBP-2 device - login timed-out"); | 1333 | SBP2_ERR("Error logging into SBP-2 device - timed out"); |
1296 | return -EIO; | 1334 | return -EIO; |
1297 | } | 1335 | } |
1298 | 1336 | ||
@@ -1300,18 +1338,12 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id) | |||
1300 | * Sanity. Make sure status returned matches login orb. | 1338 | * Sanity. Make sure status returned matches login orb. |
1301 | */ | 1339 | */ |
1302 | if (scsi_id->status_block.ORB_offset_lo != scsi_id->login_orb_dma) { | 1340 | if (scsi_id->status_block.ORB_offset_lo != scsi_id->login_orb_dma) { |
1303 | SBP2_ERR("Error logging into SBP-2 device - login timed-out"); | 1341 | SBP2_ERR("Error logging into SBP-2 device - timed out"); |
1304 | return -EIO; | 1342 | return -EIO; |
1305 | } | 1343 | } |
1306 | 1344 | ||
1307 | /* | 1345 | if (STATUS_TEST_RDS(scsi_id->status_block.ORB_offset_hi_misc)) { |
1308 | * Check status | 1346 | SBP2_ERR("Error logging into SBP-2 device - failed"); |
1309 | */ | ||
1310 | if (STATUS_GET_RESP(scsi_id->status_block.ORB_offset_hi_misc) || | ||
1311 | STATUS_GET_DEAD_BIT(scsi_id->status_block.ORB_offset_hi_misc) || | ||
1312 | STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc)) { | ||
1313 | |||
1314 | SBP2_ERR("Error logging into SBP-2 device - login failed"); | ||
1315 | return -EIO; | 1347 | return -EIO; |
1316 | } | 1348 | } |
1317 | 1349 | ||
@@ -1335,9 +1367,7 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id) | |||
1335 | scsi_id->sbp2_command_block_agent_addr &= 0x0000ffffffffffffULL; | 1367 | scsi_id->sbp2_command_block_agent_addr &= 0x0000ffffffffffffULL; |
1336 | 1368 | ||
1337 | SBP2_INFO("Logged into SBP-2 device"); | 1369 | SBP2_INFO("Logged into SBP-2 device"); |
1338 | |||
1339 | return 0; | 1370 | return 0; |
1340 | |||
1341 | } | 1371 | } |
1342 | 1372 | ||
1343 | /* | 1373 | /* |
@@ -1387,21 +1417,17 @@ static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id) | |||
1387 | data[1] = scsi_id->logout_orb_dma; | 1417 | data[1] = scsi_id->logout_orb_dma; |
1388 | sbp2util_cpu_to_be32_buffer(data, 8); | 1418 | sbp2util_cpu_to_be32_buffer(data, 8); |
1389 | 1419 | ||
1390 | atomic_set(&scsi_id->sbp2_login_complete, 0); | ||
1391 | |||
1392 | error = hpsb_node_write(scsi_id->ne, | 1420 | error = hpsb_node_write(scsi_id->ne, |
1393 | scsi_id->sbp2_management_agent_addr, data, 8); | 1421 | scsi_id->sbp2_management_agent_addr, data, 8); |
1394 | if (error) | 1422 | if (error) |
1395 | return error; | 1423 | return error; |
1396 | 1424 | ||
1397 | /* Wait for device to logout...1 second. */ | 1425 | /* Wait for device to logout...1 second. */ |
1398 | if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, HZ)) | 1426 | if (sbp2util_access_timeout(scsi_id, HZ)) |
1399 | return -EIO; | 1427 | return -EIO; |
1400 | 1428 | ||
1401 | SBP2_INFO("Logged out of SBP-2 device"); | 1429 | SBP2_INFO("Logged out of SBP-2 device"); |
1402 | |||
1403 | return 0; | 1430 | return 0; |
1404 | |||
1405 | } | 1431 | } |
1406 | 1432 | ||
1407 | /* | 1433 | /* |
@@ -1445,20 +1471,10 @@ static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id) | |||
1445 | sbp2util_packet_dump(scsi_id->reconnect_orb, sizeof(struct sbp2_reconnect_orb), | 1471 | sbp2util_packet_dump(scsi_id->reconnect_orb, sizeof(struct sbp2_reconnect_orb), |
1446 | "sbp2 reconnect orb", scsi_id->reconnect_orb_dma); | 1472 | "sbp2 reconnect orb", scsi_id->reconnect_orb_dma); |
1447 | 1473 | ||
1448 | /* | ||
1449 | * Initialize status fifo | ||
1450 | */ | ||
1451 | memset(&scsi_id->status_block, 0, sizeof(struct sbp2_status_block)); | ||
1452 | |||
1453 | /* | ||
1454 | * Ok, let's write to the target's management agent register | ||
1455 | */ | ||
1456 | data[0] = ORB_SET_NODE_ID(hi->host->node_id); | 1474 | data[0] = ORB_SET_NODE_ID(hi->host->node_id); |
1457 | data[1] = scsi_id->reconnect_orb_dma; | 1475 | data[1] = scsi_id->reconnect_orb_dma; |
1458 | sbp2util_cpu_to_be32_buffer(data, 8); | 1476 | sbp2util_cpu_to_be32_buffer(data, 8); |
1459 | 1477 | ||
1460 | atomic_set(&scsi_id->sbp2_login_complete, 0); | ||
1461 | |||
1462 | error = hpsb_node_write(scsi_id->ne, | 1478 | error = hpsb_node_write(scsi_id->ne, |
1463 | scsi_id->sbp2_management_agent_addr, data, 8); | 1479 | scsi_id->sbp2_management_agent_addr, data, 8); |
1464 | if (error) | 1480 | if (error) |
@@ -1467,8 +1483,8 @@ static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id) | |||
1467 | /* | 1483 | /* |
1468 | * Wait for reconnect status (up to 1 second)... | 1484 | * Wait for reconnect status (up to 1 second)... |
1469 | */ | 1485 | */ |
1470 | if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, HZ)) { | 1486 | if (sbp2util_access_timeout(scsi_id, HZ)) { |
1471 | SBP2_ERR("Error reconnecting to SBP-2 device - reconnect timed-out"); | 1487 | SBP2_ERR("Error reconnecting to SBP-2 device - timed out"); |
1472 | return -EIO; | 1488 | return -EIO; |
1473 | } | 1489 | } |
1474 | 1490 | ||
@@ -1476,25 +1492,17 @@ static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id) | |||
1476 | * Sanity. Make sure status returned matches reconnect orb. | 1492 | * Sanity. Make sure status returned matches reconnect orb. |
1477 | */ | 1493 | */ |
1478 | if (scsi_id->status_block.ORB_offset_lo != scsi_id->reconnect_orb_dma) { | 1494 | if (scsi_id->status_block.ORB_offset_lo != scsi_id->reconnect_orb_dma) { |
1479 | SBP2_ERR("Error reconnecting to SBP-2 device - reconnect timed-out"); | 1495 | SBP2_ERR("Error reconnecting to SBP-2 device - timed out"); |
1480 | return -EIO; | 1496 | return -EIO; |
1481 | } | 1497 | } |
1482 | 1498 | ||
1483 | /* | 1499 | if (STATUS_TEST_RDS(scsi_id->status_block.ORB_offset_hi_misc)) { |
1484 | * Check status | 1500 | SBP2_ERR("Error reconnecting to SBP-2 device - failed"); |
1485 | */ | ||
1486 | if (STATUS_GET_RESP(scsi_id->status_block.ORB_offset_hi_misc) || | ||
1487 | STATUS_GET_DEAD_BIT(scsi_id->status_block.ORB_offset_hi_misc) || | ||
1488 | STATUS_GET_SBP_STATUS(scsi_id->status_block.ORB_offset_hi_misc)) { | ||
1489 | |||
1490 | SBP2_ERR("Error reconnecting to SBP-2 device - reconnect failed"); | ||
1491 | return -EIO; | 1501 | return -EIO; |
1492 | } | 1502 | } |
1493 | 1503 | ||
1494 | HPSB_DEBUG("Reconnected to SBP-2 device"); | 1504 | HPSB_DEBUG("Reconnected to SBP-2 device"); |
1495 | |||
1496 | return 0; | 1505 | return 0; |
1497 | |||
1498 | } | 1506 | } |
1499 | 1507 | ||
1500 | /* | 1508 | /* |
@@ -1592,11 +1600,6 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id, | |||
1592 | } | 1600 | } |
1593 | 1601 | ||
1594 | workarounds = sbp2_default_workarounds; | 1602 | workarounds = sbp2_default_workarounds; |
1595 | if (force_inquiry_hack) { | ||
1596 | SBP2_WARN("force_inquiry_hack is deprecated. " | ||
1597 | "Use parameter 'workarounds' instead."); | ||
1598 | workarounds |= SBP2_WORKAROUND_INQUIRY_36; | ||
1599 | } | ||
1600 | 1603 | ||
1601 | if (!(workarounds & SBP2_WORKAROUND_OVERRIDE)) | 1604 | if (!(workarounds & SBP2_WORKAROUND_OVERRIDE)) |
1602 | for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) { | 1605 | for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) { |
@@ -1705,9 +1708,14 @@ static int sbp2_agent_reset(struct scsi_id_instance_data *scsi_id, int wait) | |||
1705 | quadlet_t data; | 1708 | quadlet_t data; |
1706 | u64 addr; | 1709 | u64 addr; |
1707 | int retval; | 1710 | int retval; |
1711 | unsigned long flags; | ||
1708 | 1712 | ||
1709 | SBP2_DEBUG_ENTER(); | 1713 | SBP2_DEBUG_ENTER(); |
1710 | 1714 | ||
1715 | cancel_delayed_work(&scsi_id->protocol_work); | ||
1716 | if (wait) | ||
1717 | flush_scheduled_work(); | ||
1718 | |||
1711 | data = ntohl(SBP2_AGENT_RESET_DATA); | 1719 | data = ntohl(SBP2_AGENT_RESET_DATA); |
1712 | addr = scsi_id->sbp2_command_block_agent_addr + SBP2_AGENT_RESET_OFFSET; | 1720 | addr = scsi_id->sbp2_command_block_agent_addr + SBP2_AGENT_RESET_OFFSET; |
1713 | 1721 | ||
@@ -1724,7 +1732,9 @@ static int sbp2_agent_reset(struct scsi_id_instance_data *scsi_id, int wait) | |||
1724 | /* | 1732 | /* |
1725 | * Need to make sure orb pointer is written on next command | 1733 | * Need to make sure orb pointer is written on next command |
1726 | */ | 1734 | */ |
1735 | spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags); | ||
1727 | scsi_id->last_orb = NULL; | 1736 | scsi_id->last_orb = NULL; |
1737 | spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags); | ||
1728 | 1738 | ||
1729 | return 0; | 1739 | return 0; |
1730 | } | 1740 | } |
@@ -1961,13 +1971,17 @@ static void sbp2_create_command_orb(struct scsi_id_instance_data *scsi_id, | |||
1961 | /* | 1971 | /* |
1962 | * This function is called in order to begin a regular SBP-2 command. | 1972 | * This function is called in order to begin a regular SBP-2 command. |
1963 | */ | 1973 | */ |
1964 | static int sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id, | 1974 | static void sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id, |
1965 | struct sbp2_command_info *command) | 1975 | struct sbp2_command_info *command) |
1966 | { | 1976 | { |
1967 | struct sbp2scsi_host_info *hi = scsi_id->hi; | 1977 | struct sbp2scsi_host_info *hi = scsi_id->hi; |
1968 | struct sbp2_command_orb *command_orb = &command->command_orb; | 1978 | struct sbp2_command_orb *command_orb = &command->command_orb; |
1969 | struct node_entry *ne = scsi_id->ne; | 1979 | struct sbp2_command_orb *last_orb; |
1970 | u64 addr; | 1980 | dma_addr_t last_orb_dma; |
1981 | u64 addr = scsi_id->sbp2_command_block_agent_addr; | ||
1982 | quadlet_t data[2]; | ||
1983 | size_t length; | ||
1984 | unsigned long flags; | ||
1971 | 1985 | ||
1972 | outstanding_orb_incr; | 1986 | outstanding_orb_incr; |
1973 | SBP2_ORB_DEBUG("sending command orb %p, total orbs = %x", | 1987 | SBP2_ORB_DEBUG("sending command orb %p, total orbs = %x", |
@@ -1975,73 +1989,70 @@ static int sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id, | |||
1975 | 1989 | ||
1976 | pci_dma_sync_single_for_device(hi->host->pdev, command->command_orb_dma, | 1990 | pci_dma_sync_single_for_device(hi->host->pdev, command->command_orb_dma, |
1977 | sizeof(struct sbp2_command_orb), | 1991 | sizeof(struct sbp2_command_orb), |
1978 | PCI_DMA_BIDIRECTIONAL); | 1992 | PCI_DMA_TODEVICE); |
1979 | pci_dma_sync_single_for_device(hi->host->pdev, command->sge_dma, | 1993 | pci_dma_sync_single_for_device(hi->host->pdev, command->sge_dma, |
1980 | sizeof(command->scatter_gather_element), | 1994 | sizeof(command->scatter_gather_element), |
1981 | PCI_DMA_BIDIRECTIONAL); | 1995 | PCI_DMA_BIDIRECTIONAL); |
1982 | /* | 1996 | /* |
1983 | * Check to see if there are any previous orbs to use | 1997 | * Check to see if there are any previous orbs to use |
1984 | */ | 1998 | */ |
1985 | if (scsi_id->last_orb == NULL) { | 1999 | spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags); |
1986 | quadlet_t data[2]; | 2000 | last_orb = scsi_id->last_orb; |
1987 | 2001 | last_orb_dma = scsi_id->last_orb_dma; | |
2002 | if (!last_orb) { | ||
1988 | /* | 2003 | /* |
1989 | * Ok, let's write to the target's management agent register | 2004 | * last_orb == NULL means: We know that the target's fetch agent |
2005 | * is not active right now. | ||
1990 | */ | 2006 | */ |
1991 | addr = scsi_id->sbp2_command_block_agent_addr + SBP2_ORB_POINTER_OFFSET; | 2007 | addr += SBP2_ORB_POINTER_OFFSET; |
1992 | data[0] = ORB_SET_NODE_ID(hi->host->node_id); | 2008 | data[0] = ORB_SET_NODE_ID(hi->host->node_id); |
1993 | data[1] = command->command_orb_dma; | 2009 | data[1] = command->command_orb_dma; |
1994 | sbp2util_cpu_to_be32_buffer(data, 8); | 2010 | sbp2util_cpu_to_be32_buffer(data, 8); |
1995 | 2011 | length = 8; | |
1996 | SBP2_ORB_DEBUG("write command agent, command orb %p", command_orb); | ||
1997 | |||
1998 | if (sbp2util_node_write_no_wait(ne, addr, data, 8) < 0) { | ||
1999 | SBP2_ERR("sbp2util_node_write_no_wait failed.\n"); | ||
2000 | return -EIO; | ||
2001 | } | ||
2002 | |||
2003 | SBP2_ORB_DEBUG("write command agent complete"); | ||
2004 | |||
2005 | scsi_id->last_orb = command_orb; | ||
2006 | scsi_id->last_orb_dma = command->command_orb_dma; | ||
2007 | |||
2008 | } else { | 2012 | } else { |
2009 | quadlet_t data; | ||
2010 | |||
2011 | /* | 2013 | /* |
2012 | * We have an orb already sent (maybe or maybe not | 2014 | * last_orb != NULL means: We know that the target's fetch agent |
2013 | * processed) that we can append this orb to. So do so, | 2015 | * is (very probably) not dead or in reset state right now. |
2014 | * and ring the doorbell. Have to be very careful | 2016 | * We have an ORB already sent that we can append a new one to. |
2015 | * modifying these next orb pointers, as they are accessed | 2017 | * The target's fetch agent may or may not have read this |
2016 | * both by the sbp2 device and us. | 2018 | * previous ORB yet. |
2017 | */ | 2019 | */ |
2018 | scsi_id->last_orb->next_ORB_lo = | 2020 | pci_dma_sync_single_for_cpu(hi->host->pdev, last_orb_dma, |
2019 | cpu_to_be32(command->command_orb_dma); | 2021 | sizeof(struct sbp2_command_orb), |
2022 | PCI_DMA_TODEVICE); | ||
2023 | last_orb->next_ORB_lo = cpu_to_be32(command->command_orb_dma); | ||
2024 | wmb(); | ||
2020 | /* Tells hardware that this pointer is valid */ | 2025 | /* Tells hardware that this pointer is valid */ |
2021 | scsi_id->last_orb->next_ORB_hi = 0x0; | 2026 | last_orb->next_ORB_hi = 0; |
2022 | pci_dma_sync_single_for_device(hi->host->pdev, | 2027 | pci_dma_sync_single_for_device(hi->host->pdev, last_orb_dma, |
2023 | scsi_id->last_orb_dma, | ||
2024 | sizeof(struct sbp2_command_orb), | 2028 | sizeof(struct sbp2_command_orb), |
2025 | PCI_DMA_BIDIRECTIONAL); | 2029 | PCI_DMA_TODEVICE); |
2030 | addr += SBP2_DOORBELL_OFFSET; | ||
2031 | data[0] = 0; | ||
2032 | length = 4; | ||
2033 | } | ||
2034 | scsi_id->last_orb = command_orb; | ||
2035 | scsi_id->last_orb_dma = command->command_orb_dma; | ||
2036 | spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags); | ||
2026 | 2037 | ||
2038 | SBP2_ORB_DEBUG("write to %s register, command orb %p", | ||
2039 | last_orb ? "DOORBELL" : "ORB_POINTER", command_orb); | ||
2040 | if (sbp2util_node_write_no_wait(scsi_id->ne, addr, data, length)) { | ||
2027 | /* | 2041 | /* |
2028 | * Ring the doorbell | 2042 | * sbp2util_node_write_no_wait failed. We certainly ran out |
2043 | * of transaction labels, perhaps just because there were no | ||
2044 | * context switches which gave khpsbpkt a chance to collect | ||
2045 | * free tlabels. Try again in non-atomic context. If necessary, | ||
2046 | * the workqueue job will sleep to guaranteedly get a tlabel. | ||
2047 | * We do not accept new commands until the job is over. | ||
2029 | */ | 2048 | */ |
2030 | data = cpu_to_be32(command->command_orb_dma); | 2049 | scsi_block_requests(scsi_id->scsi_host); |
2031 | addr = scsi_id->sbp2_command_block_agent_addr + SBP2_DOORBELL_OFFSET; | 2050 | PREPARE_WORK(&scsi_id->protocol_work, |
2032 | 2051 | last_orb ? sbp2util_write_doorbell: | |
2033 | SBP2_ORB_DEBUG("ring doorbell, command orb %p", command_orb); | 2052 | sbp2util_write_orb_pointer, |
2034 | 2053 | scsi_id); | |
2035 | if (sbp2util_node_write_no_wait(ne, addr, &data, 4) < 0) { | 2054 | schedule_work(&scsi_id->protocol_work); |
2036 | SBP2_ERR("sbp2util_node_write_no_wait failed"); | ||
2037 | return -EIO; | ||
2038 | } | ||
2039 | |||
2040 | scsi_id->last_orb = command_orb; | ||
2041 | scsi_id->last_orb_dma = command->command_orb_dma; | ||
2042 | |||
2043 | } | 2055 | } |
2044 | return 0; | ||
2045 | } | 2056 | } |
2046 | 2057 | ||
2047 | /* | 2058 | /* |
@@ -2078,11 +2089,6 @@ static int sbp2_send_command(struct scsi_id_instance_data *scsi_id, | |||
2078 | "sbp2 command orb", command->command_orb_dma); | 2089 | "sbp2 command orb", command->command_orb_dma); |
2079 | 2090 | ||
2080 | /* | 2091 | /* |
2081 | * Initialize status fifo | ||
2082 | */ | ||
2083 | memset(&scsi_id->status_block, 0, sizeof(struct sbp2_status_block)); | ||
2084 | |||
2085 | /* | ||
2086 | * Link up the orb, and ring the doorbell if needed | 2092 | * Link up the orb, and ring the doorbell if needed |
2087 | */ | 2093 | */ |
2088 | sbp2_link_orb_command(scsi_id, command); | 2094 | sbp2_link_orb_command(scsi_id, command); |
@@ -2123,12 +2129,14 @@ static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status, unchar *sense | |||
2123 | /* | 2129 | /* |
2124 | * This function deals with status writes from the SBP-2 device | 2130 | * This function deals with status writes from the SBP-2 device |
2125 | */ | 2131 | */ |
2126 | static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int destid, | 2132 | static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, |
2127 | quadlet_t *data, u64 addr, size_t length, u16 fl) | 2133 | int destid, quadlet_t *data, u64 addr, |
2134 | size_t length, u16 fl) | ||
2128 | { | 2135 | { |
2129 | struct sbp2scsi_host_info *hi; | 2136 | struct sbp2scsi_host_info *hi; |
2130 | struct scsi_id_instance_data *scsi_id = NULL, *scsi_id_tmp; | 2137 | struct scsi_id_instance_data *scsi_id = NULL, *scsi_id_tmp; |
2131 | struct scsi_cmnd *SCpnt = NULL; | 2138 | struct scsi_cmnd *SCpnt = NULL; |
2139 | struct sbp2_status_block *sb; | ||
2132 | u32 scsi_status = SBP2_SCSI_STATUS_GOOD; | 2140 | u32 scsi_status = SBP2_SCSI_STATUS_GOOD; |
2133 | struct sbp2_command_info *command; | 2141 | struct sbp2_command_info *command; |
2134 | unsigned long flags; | 2142 | unsigned long flags; |
@@ -2137,18 +2145,19 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest | |||
2137 | 2145 | ||
2138 | sbp2util_packet_dump(data, length, "sbp2 status write by device", (u32)addr); | 2146 | sbp2util_packet_dump(data, length, "sbp2 status write by device", (u32)addr); |
2139 | 2147 | ||
2140 | if (!host) { | 2148 | if (unlikely(length < 8 || length > sizeof(struct sbp2_status_block))) { |
2149 | SBP2_ERR("Wrong size of status block"); | ||
2150 | return RCODE_ADDRESS_ERROR; | ||
2151 | } | ||
2152 | if (unlikely(!host)) { | ||
2141 | SBP2_ERR("host is NULL - this is bad!"); | 2153 | SBP2_ERR("host is NULL - this is bad!"); |
2142 | return RCODE_ADDRESS_ERROR; | 2154 | return RCODE_ADDRESS_ERROR; |
2143 | } | 2155 | } |
2144 | |||
2145 | hi = hpsb_get_hostinfo(&sbp2_highlevel, host); | 2156 | hi = hpsb_get_hostinfo(&sbp2_highlevel, host); |
2146 | 2157 | if (unlikely(!hi)) { | |
2147 | if (!hi) { | ||
2148 | SBP2_ERR("host info is NULL - this is bad!"); | 2158 | SBP2_ERR("host info is NULL - this is bad!"); |
2149 | return RCODE_ADDRESS_ERROR; | 2159 | return RCODE_ADDRESS_ERROR; |
2150 | } | 2160 | } |
2151 | |||
2152 | /* | 2161 | /* |
2153 | * Find our scsi_id structure by looking at the status fifo address | 2162 | * Find our scsi_id structure by looking at the status fifo address |
2154 | * written to by the sbp2 device. | 2163 | * written to by the sbp2 device. |
@@ -2160,32 +2169,35 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest | |||
2160 | break; | 2169 | break; |
2161 | } | 2170 | } |
2162 | } | 2171 | } |
2163 | 2172 | if (unlikely(!scsi_id)) { | |
2164 | if (!scsi_id) { | ||
2165 | SBP2_ERR("scsi_id is NULL - device is gone?"); | 2173 | SBP2_ERR("scsi_id is NULL - device is gone?"); |
2166 | return RCODE_ADDRESS_ERROR; | 2174 | return RCODE_ADDRESS_ERROR; |
2167 | } | 2175 | } |
2168 | 2176 | ||
2169 | /* | 2177 | /* |
2170 | * Put response into scsi_id status fifo... | 2178 | * Put response into scsi_id status fifo buffer. The first two bytes |
2179 | * come in big endian bit order. Often the target writes only a | ||
2180 | * truncated status block, minimally the first two quadlets. The rest | ||
2181 | * is implied to be zeros. | ||
2171 | */ | 2182 | */ |
2172 | memcpy(&scsi_id->status_block, data, length); | 2183 | sb = &scsi_id->status_block; |
2184 | memset(sb->command_set_dependent, 0, sizeof(sb->command_set_dependent)); | ||
2185 | memcpy(sb, data, length); | ||
2186 | sbp2util_be32_to_cpu_buffer(sb, 8); | ||
2173 | 2187 | ||
2174 | /* | 2188 | /* |
2175 | * Byte swap first two quadlets (8 bytes) of status for processing | 2189 | * Ignore unsolicited status. Handle command ORB status. |
2176 | */ | 2190 | */ |
2177 | sbp2util_be32_to_cpu_buffer(&scsi_id->status_block, 8); | 2191 | if (unlikely(STATUS_GET_SRC(sb->ORB_offset_hi_misc) == 2)) |
2178 | 2192 | command = NULL; | |
2179 | /* | 2193 | else |
2180 | * Handle command ORB status here if necessary. First, need to match status with command. | 2194 | command = sbp2util_find_command_for_orb(scsi_id, |
2181 | */ | 2195 | sb->ORB_offset_lo); |
2182 | command = sbp2util_find_command_for_orb(scsi_id, scsi_id->status_block.ORB_offset_lo); | ||
2183 | if (command) { | 2196 | if (command) { |
2184 | |||
2185 | SBP2_DEBUG("Found status for command ORB"); | 2197 | SBP2_DEBUG("Found status for command ORB"); |
2186 | pci_dma_sync_single_for_cpu(hi->host->pdev, command->command_orb_dma, | 2198 | pci_dma_sync_single_for_cpu(hi->host->pdev, command->command_orb_dma, |
2187 | sizeof(struct sbp2_command_orb), | 2199 | sizeof(struct sbp2_command_orb), |
2188 | PCI_DMA_BIDIRECTIONAL); | 2200 | PCI_DMA_TODEVICE); |
2189 | pci_dma_sync_single_for_cpu(hi->host->pdev, command->sge_dma, | 2201 | pci_dma_sync_single_for_cpu(hi->host->pdev, command->sge_dma, |
2190 | sizeof(command->scatter_gather_element), | 2202 | sizeof(command->scatter_gather_element), |
2191 | PCI_DMA_BIDIRECTIONAL); | 2203 | PCI_DMA_BIDIRECTIONAL); |
@@ -2194,7 +2206,12 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest | |||
2194 | outstanding_orb_decr; | 2206 | outstanding_orb_decr; |
2195 | 2207 | ||
2196 | /* | 2208 | /* |
2197 | * Matched status with command, now grab scsi command pointers and check status | 2209 | * Matched status with command, now grab scsi command pointers |
2210 | * and check status. | ||
2211 | */ | ||
2212 | /* | ||
2213 | * FIXME: If the src field in the status is 1, the ORB DMA must | ||
2214 | * not be reused until status for a subsequent ORB is received. | ||
2198 | */ | 2215 | */ |
2199 | SCpnt = command->Current_SCpnt; | 2216 | SCpnt = command->Current_SCpnt; |
2200 | spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags); | 2217 | spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags); |
@@ -2202,61 +2219,64 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest | |||
2202 | spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags); | 2219 | spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags); |
2203 | 2220 | ||
2204 | if (SCpnt) { | 2221 | if (SCpnt) { |
2205 | 2222 | u32 h = sb->ORB_offset_hi_misc; | |
2223 | u32 r = STATUS_GET_RESP(h); | ||
2224 | |||
2225 | if (r != RESP_STATUS_REQUEST_COMPLETE) { | ||
2226 | SBP2_WARN("resp 0x%x, sbp_status 0x%x", | ||
2227 | r, STATUS_GET_SBP_STATUS(h)); | ||
2228 | scsi_status = | ||
2229 | r == RESP_STATUS_TRANSPORT_FAILURE ? | ||
2230 | SBP2_SCSI_STATUS_BUSY : | ||
2231 | SBP2_SCSI_STATUS_COMMAND_TERMINATED; | ||
2232 | } | ||
2206 | /* | 2233 | /* |
2207 | * See if the target stored any scsi status information | 2234 | * See if the target stored any scsi status information. |
2208 | */ | 2235 | */ |
2209 | if (STATUS_GET_LENGTH(scsi_id->status_block.ORB_offset_hi_misc) > 1) { | 2236 | if (STATUS_GET_LEN(h) > 1) { |
2210 | /* | ||
2211 | * Translate SBP-2 status to SCSI sense data | ||
2212 | */ | ||
2213 | SBP2_DEBUG("CHECK CONDITION"); | 2237 | SBP2_DEBUG("CHECK CONDITION"); |
2214 | scsi_status = sbp2_status_to_sense_data((unchar *)&scsi_id->status_block, SCpnt->sense_buffer); | 2238 | scsi_status = sbp2_status_to_sense_data( |
2239 | (unchar *)sb, SCpnt->sense_buffer); | ||
2215 | } | 2240 | } |
2216 | |||
2217 | /* | 2241 | /* |
2218 | * Check to see if the dead bit is set. If so, we'll have to initiate | 2242 | * Check to see if the dead bit is set. If so, we'll |
2219 | * a fetch agent reset. | 2243 | * have to initiate a fetch agent reset. |
2220 | */ | 2244 | */ |
2221 | if (STATUS_GET_DEAD_BIT(scsi_id->status_block.ORB_offset_hi_misc)) { | 2245 | if (STATUS_TEST_DEAD(h)) { |
2222 | 2246 | SBP2_DEBUG("Dead bit set - " | |
2223 | /* | 2247 | "initiating fetch agent reset"); |
2224 | * Initiate a fetch agent reset. | ||
2225 | */ | ||
2226 | SBP2_DEBUG("Dead bit set - initiating fetch agent reset"); | ||
2227 | sbp2_agent_reset(scsi_id, 0); | 2248 | sbp2_agent_reset(scsi_id, 0); |
2228 | } | 2249 | } |
2229 | |||
2230 | SBP2_ORB_DEBUG("completing command orb %p", &command->command_orb); | 2250 | SBP2_ORB_DEBUG("completing command orb %p", &command->command_orb); |
2231 | } | 2251 | } |
2232 | 2252 | ||
2233 | /* | 2253 | /* |
2234 | * Check here to see if there are no commands in-use. If there are none, we can | 2254 | * Check here to see if there are no commands in-use. If there |
2235 | * null out last orb so that next time around we write directly to the orb pointer... | 2255 | * are none, we know that the fetch agent left the active state |
2236 | * Quick start saves one 1394 bus transaction. | 2256 | * _and_ that we did not reactivate it yet. Therefore clear |
2257 | * last_orb so that next time we write directly to the | ||
2258 | * ORB_POINTER register. That way the fetch agent does not need | ||
2259 | * to refetch the next_ORB. | ||
2237 | */ | 2260 | */ |
2238 | spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags); | 2261 | spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags); |
2239 | if (list_empty(&scsi_id->sbp2_command_orb_inuse)) { | 2262 | if (list_empty(&scsi_id->sbp2_command_orb_inuse)) |
2240 | scsi_id->last_orb = NULL; | 2263 | scsi_id->last_orb = NULL; |
2241 | } | ||
2242 | spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags); | 2264 | spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags); |
2243 | 2265 | ||
2244 | } else { | 2266 | } else { |
2245 | |||
2246 | /* | 2267 | /* |
2247 | * It's probably a login/logout/reconnect status. | 2268 | * It's probably a login/logout/reconnect status. |
2248 | */ | 2269 | */ |
2249 | if ((scsi_id->login_orb_dma == scsi_id->status_block.ORB_offset_lo) || | 2270 | if ((sb->ORB_offset_lo == scsi_id->reconnect_orb_dma) || |
2250 | (scsi_id->query_logins_orb_dma == scsi_id->status_block.ORB_offset_lo) || | 2271 | (sb->ORB_offset_lo == scsi_id->login_orb_dma) || |
2251 | (scsi_id->reconnect_orb_dma == scsi_id->status_block.ORB_offset_lo) || | 2272 | (sb->ORB_offset_lo == scsi_id->query_logins_orb_dma) || |
2252 | (scsi_id->logout_orb_dma == scsi_id->status_block.ORB_offset_lo)) { | 2273 | (sb->ORB_offset_lo == scsi_id->logout_orb_dma)) { |
2253 | atomic_set(&scsi_id->sbp2_login_complete, 1); | 2274 | scsi_id->access_complete = 1; |
2275 | wake_up_interruptible(&access_wq); | ||
2254 | } | 2276 | } |
2255 | } | 2277 | } |
2256 | 2278 | ||
2257 | if (SCpnt) { | 2279 | if (SCpnt) { |
2258 | |||
2259 | /* Complete the SCSI command. */ | ||
2260 | SBP2_DEBUG("Completing SCSI command"); | 2280 | SBP2_DEBUG("Completing SCSI command"); |
2261 | sbp2scsi_complete_command(scsi_id, scsi_status, SCpnt, | 2281 | sbp2scsi_complete_command(scsi_id, scsi_status, SCpnt, |
2262 | command->Current_done); | 2282 | command->Current_done); |
@@ -2372,7 +2392,7 @@ static void sbp2scsi_complete_all_commands(struct scsi_id_instance_data *scsi_id | |||
2372 | command = list_entry(lh, struct sbp2_command_info, list); | 2392 | command = list_entry(lh, struct sbp2_command_info, list); |
2373 | pci_dma_sync_single_for_cpu(hi->host->pdev, command->command_orb_dma, | 2393 | pci_dma_sync_single_for_cpu(hi->host->pdev, command->command_orb_dma, |
2374 | sizeof(struct sbp2_command_orb), | 2394 | sizeof(struct sbp2_command_orb), |
2375 | PCI_DMA_BIDIRECTIONAL); | 2395 | PCI_DMA_TODEVICE); |
2376 | pci_dma_sync_single_for_cpu(hi->host->pdev, command->sge_dma, | 2396 | pci_dma_sync_single_for_cpu(hi->host->pdev, command->sge_dma, |
2377 | sizeof(command->scatter_gather_element), | 2397 | sizeof(command->scatter_gather_element), |
2378 | PCI_DMA_BIDIRECTIONAL); | 2398 | PCI_DMA_BIDIRECTIONAL); |
@@ -2495,6 +2515,7 @@ static int sbp2scsi_slave_alloc(struct scsi_device *sdev) | |||
2495 | (struct scsi_id_instance_data *)sdev->host->hostdata[0]; | 2515 | (struct scsi_id_instance_data *)sdev->host->hostdata[0]; |
2496 | 2516 | ||
2497 | scsi_id->sdev = sdev; | 2517 | scsi_id->sdev = sdev; |
2518 | sdev->allow_restart = 1; | ||
2498 | 2519 | ||
2499 | if (scsi_id->workarounds & SBP2_WORKAROUND_INQUIRY_36) | 2520 | if (scsi_id->workarounds & SBP2_WORKAROUND_INQUIRY_36) |
2500 | sdev->inquiry_len = 36; | 2521 | sdev->inquiry_len = 36; |
@@ -2508,16 +2529,12 @@ static int sbp2scsi_slave_configure(struct scsi_device *sdev) | |||
2508 | 2529 | ||
2509 | blk_queue_dma_alignment(sdev->request_queue, (512 - 1)); | 2530 | blk_queue_dma_alignment(sdev->request_queue, (512 - 1)); |
2510 | sdev->use_10_for_rw = 1; | 2531 | sdev->use_10_for_rw = 1; |
2511 | sdev->use_10_for_ms = 1; | ||
2512 | 2532 | ||
2513 | if (sdev->type == TYPE_DISK && | 2533 | if (sdev->type == TYPE_DISK && |
2514 | scsi_id->workarounds & SBP2_WORKAROUND_MODE_SENSE_8) | 2534 | scsi_id->workarounds & SBP2_WORKAROUND_MODE_SENSE_8) |
2515 | sdev->skip_ms_page_8 = 1; | 2535 | sdev->skip_ms_page_8 = 1; |
2516 | if (scsi_id->workarounds & SBP2_WORKAROUND_FIX_CAPACITY) | 2536 | if (scsi_id->workarounds & SBP2_WORKAROUND_FIX_CAPACITY) |
2517 | sdev->fix_capacity = 1; | 2537 | sdev->fix_capacity = 1; |
2518 | if (scsi_id->ne->guid_vendor_id == 0x0010b9 && /* Maxtor's OUI */ | ||
2519 | (sdev->type == TYPE_DISK || sdev->type == TYPE_RBC)) | ||
2520 | sdev->allow_restart = 1; | ||
2521 | return 0; | 2538 | return 0; |
2522 | } | 2539 | } |
2523 | 2540 | ||
@@ -2555,7 +2572,7 @@ static int sbp2scsi_abort(struct scsi_cmnd *SCpnt) | |||
2555 | pci_dma_sync_single_for_cpu(hi->host->pdev, | 2572 | pci_dma_sync_single_for_cpu(hi->host->pdev, |
2556 | command->command_orb_dma, | 2573 | command->command_orb_dma, |
2557 | sizeof(struct sbp2_command_orb), | 2574 | sizeof(struct sbp2_command_orb), |
2558 | PCI_DMA_BIDIRECTIONAL); | 2575 | PCI_DMA_TODEVICE); |
2559 | pci_dma_sync_single_for_cpu(hi->host->pdev, | 2576 | pci_dma_sync_single_for_cpu(hi->host->pdev, |
2560 | command->sge_dma, | 2577 | command->sge_dma, |
2561 | sizeof(command->scatter_gather_element), | 2578 | sizeof(command->scatter_gather_element), |
@@ -2571,7 +2588,7 @@ static int sbp2scsi_abort(struct scsi_cmnd *SCpnt) | |||
2571 | /* | 2588 | /* |
2572 | * Initiate a fetch agent reset. | 2589 | * Initiate a fetch agent reset. |
2573 | */ | 2590 | */ |
2574 | sbp2_agent_reset(scsi_id, 0); | 2591 | sbp2_agent_reset(scsi_id, 1); |
2575 | sbp2scsi_complete_all_commands(scsi_id, DID_BUS_BUSY); | 2592 | sbp2scsi_complete_all_commands(scsi_id, DID_BUS_BUSY); |
2576 | } | 2593 | } |
2577 | 2594 | ||
@@ -2590,7 +2607,7 @@ static int sbp2scsi_reset(struct scsi_cmnd *SCpnt) | |||
2590 | 2607 | ||
2591 | if (sbp2util_node_is_available(scsi_id)) { | 2608 | if (sbp2util_node_is_available(scsi_id)) { |
2592 | SBP2_ERR("Generating sbp2 fetch agent reset"); | 2609 | SBP2_ERR("Generating sbp2 fetch agent reset"); |
2593 | sbp2_agent_reset(scsi_id, 0); | 2610 | sbp2_agent_reset(scsi_id, 1); |
2594 | } | 2611 | } |
2595 | 2612 | ||
2596 | return SUCCESS; | 2613 | return SUCCESS; |
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h index b22ce1aa8fe4..abbe48e646c3 100644 --- a/drivers/ieee1394/sbp2.h +++ b/drivers/ieee1394/sbp2.h | |||
@@ -46,8 +46,8 @@ | |||
46 | #define ORB_SET_DIRECTION(value) ((value & 0x1) << 27) | 46 | #define ORB_SET_DIRECTION(value) ((value & 0x1) << 27) |
47 | 47 | ||
48 | struct sbp2_command_orb { | 48 | struct sbp2_command_orb { |
49 | volatile u32 next_ORB_hi; | 49 | u32 next_ORB_hi; |
50 | volatile u32 next_ORB_lo; | 50 | u32 next_ORB_lo; |
51 | u32 data_descriptor_hi; | 51 | u32 data_descriptor_hi; |
52 | u32 data_descriptor_lo; | 52 | u32 data_descriptor_lo; |
53 | u32 misc; | 53 | u32 misc; |
@@ -180,12 +180,14 @@ struct sbp2_unrestricted_page_table { | |||
180 | 180 | ||
181 | #define SBP2_SCSI_STATUS_SELECTION_TIMEOUT 0xff | 181 | #define SBP2_SCSI_STATUS_SELECTION_TIMEOUT 0xff |
182 | 182 | ||
183 | #define STATUS_GET_ORB_OFFSET_HI(value) (value & 0xffff) | 183 | #define STATUS_GET_SRC(value) (((value) >> 30) & 0x3) |
184 | #define STATUS_GET_SBP_STATUS(value) ((value >> 16) & 0xff) | 184 | #define STATUS_GET_RESP(value) (((value) >> 28) & 0x3) |
185 | #define STATUS_GET_LENGTH(value) ((value >> 24) & 0x7) | 185 | #define STATUS_GET_LEN(value) (((value) >> 24) & 0x7) |
186 | #define STATUS_GET_DEAD_BIT(value) ((value >> 27) & 0x1) | 186 | #define STATUS_GET_SBP_STATUS(value) (((value) >> 16) & 0xff) |
187 | #define STATUS_GET_RESP(value) ((value >> 28) & 0x3) | 187 | #define STATUS_GET_ORB_OFFSET_HI(value) ((value) & 0x0000ffff) |
188 | #define STATUS_GET_SRC(value) ((value >> 30) & 0x3) | 188 | #define STATUS_TEST_DEAD(value) ((value) & 0x08000000) |
189 | /* test 'resp' | 'dead' | 'sbp2_status' */ | ||
190 | #define STATUS_TEST_RDS(value) ((value) & 0x38ff0000) | ||
189 | 191 | ||
190 | struct sbp2_status_block { | 192 | struct sbp2_status_block { |
191 | u32 ORB_offset_hi_misc; | 193 | u32 ORB_offset_hi_misc; |
@@ -318,9 +320,9 @@ struct scsi_id_instance_data { | |||
318 | u64 status_fifo_addr; | 320 | u64 status_fifo_addr; |
319 | 321 | ||
320 | /* | 322 | /* |
321 | * Variable used for logins, reconnects, logouts, query logins | 323 | * Waitqueue flag for logins, reconnects, logouts, query logins |
322 | */ | 324 | */ |
323 | atomic_t sbp2_login_complete; | 325 | int access_complete:1; |
324 | 326 | ||
325 | /* | 327 | /* |
326 | * Pool of command orbs, so we can have more than overlapped command per id | 328 | * Pool of command orbs, so we can have more than overlapped command per id |
@@ -344,6 +346,16 @@ struct scsi_id_instance_data { | |||
344 | 346 | ||
345 | /* Device specific workarounds/brokeness */ | 347 | /* Device specific workarounds/brokeness */ |
346 | unsigned workarounds; | 348 | unsigned workarounds; |
349 | |||
350 | atomic_t state; | ||
351 | struct work_struct protocol_work; | ||
352 | }; | ||
353 | |||
354 | /* For use in scsi_id_instance_data.state */ | ||
355 | enum sbp2lu_state_types { | ||
356 | SBP2LU_STATE_RUNNING, /* all normal */ | ||
357 | SBP2LU_STATE_IN_RESET, /* between bus reset and reconnect */ | ||
358 | SBP2LU_STATE_IN_SHUTDOWN /* when sbp2_remove was called */ | ||
347 | }; | 359 | }; |
348 | 360 | ||
349 | /* Sbp2 host data structure (one per IEEE1394 host) */ | 361 | /* Sbp2 host data structure (one per IEEE1394 host) */ |
@@ -390,11 +402,6 @@ static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id); | |||
390 | static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int destid, | 402 | static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int destid, |
391 | quadlet_t *data, u64 addr, size_t length, u16 flags); | 403 | quadlet_t *data, u64 addr, size_t length, u16 flags); |
392 | static int sbp2_agent_reset(struct scsi_id_instance_data *scsi_id, int wait); | 404 | static int sbp2_agent_reset(struct scsi_id_instance_data *scsi_id, int wait); |
393 | static int sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id, | ||
394 | struct sbp2_command_info *command); | ||
395 | static int sbp2_send_command(struct scsi_id_instance_data *scsi_id, | ||
396 | struct scsi_cmnd *SCpnt, | ||
397 | void (*done)(struct scsi_cmnd *)); | ||
398 | static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status, | 405 | static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status, |
399 | unchar *sense_data); | 406 | unchar *sense_data); |
400 | static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id, | 407 | static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id, |
diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c index c6e3f02bc6d7..9bc65059cc69 100644 --- a/drivers/ieee1394/video1394.c +++ b/drivers/ieee1394/video1394.c | |||
@@ -49,16 +49,16 @@ | |||
49 | #include <linux/compat.h> | 49 | #include <linux/compat.h> |
50 | #include <linux/cdev.h> | 50 | #include <linux/cdev.h> |
51 | 51 | ||
52 | #include "ieee1394.h" | 52 | #include "dma.h" |
53 | #include "ieee1394_types.h" | 53 | #include "highlevel.h" |
54 | #include "hosts.h" | 54 | #include "hosts.h" |
55 | #include "ieee1394.h" | ||
55 | #include "ieee1394_core.h" | 56 | #include "ieee1394_core.h" |
56 | #include "highlevel.h" | 57 | #include "ieee1394_hotplug.h" |
57 | #include "video1394.h" | 58 | #include "ieee1394_types.h" |
58 | #include "nodemgr.h" | 59 | #include "nodemgr.h" |
59 | #include "dma.h" | ||
60 | |||
61 | #include "ohci1394.h" | 60 | #include "ohci1394.h" |
61 | #include "video1394.h" | ||
62 | 62 | ||
63 | #define ISO_CHANNELS 64 | 63 | #define ISO_CHANNELS 64 |
64 | 64 | ||
@@ -129,7 +129,7 @@ struct file_ctx { | |||
129 | #define DBGMSG(card, fmt, args...) \ | 129 | #define DBGMSG(card, fmt, args...) \ |
130 | printk(KERN_INFO "video1394_%d: " fmt "\n" , card , ## args) | 130 | printk(KERN_INFO "video1394_%d: " fmt "\n" , card , ## args) |
131 | #else | 131 | #else |
132 | #define DBGMSG(card, fmt, args...) | 132 | #define DBGMSG(card, fmt, args...) do {} while (0) |
133 | #endif | 133 | #endif |
134 | 134 | ||
135 | /* print general (card independent) information */ | 135 | /* print general (card independent) information */ |
@@ -1181,7 +1181,8 @@ static int video1394_mmap(struct file *file, struct vm_area_struct *vma) | |||
1181 | 1181 | ||
1182 | lock_kernel(); | 1182 | lock_kernel(); |
1183 | if (ctx->current_ctx == NULL) { | 1183 | if (ctx->current_ctx == NULL) { |
1184 | PRINT(KERN_ERR, ctx->ohci->host->id, "Current iso context not set"); | 1184 | PRINT(KERN_ERR, ctx->ohci->host->id, |
1185 | "Current iso context not set"); | ||
1185 | } else | 1186 | } else |
1186 | res = dma_region_mmap(&ctx->current_ctx->dma, file, vma); | 1187 | res = dma_region_mmap(&ctx->current_ctx->dma, file, vma); |
1187 | unlock_kernel(); | 1188 | unlock_kernel(); |
@@ -1189,6 +1190,40 @@ static int video1394_mmap(struct file *file, struct vm_area_struct *vma) | |||
1189 | return res; | 1190 | return res; |
1190 | } | 1191 | } |
1191 | 1192 | ||
1193 | static unsigned int video1394_poll(struct file *file, poll_table *pt) | ||
1194 | { | ||
1195 | struct file_ctx *ctx; | ||
1196 | unsigned int mask = 0; | ||
1197 | unsigned long flags; | ||
1198 | struct dma_iso_ctx *d; | ||
1199 | int i; | ||
1200 | |||
1201 | lock_kernel(); | ||
1202 | ctx = file->private_data; | ||
1203 | d = ctx->current_ctx; | ||
1204 | if (d == NULL) { | ||
1205 | PRINT(KERN_ERR, ctx->ohci->host->id, | ||
1206 | "Current iso context not set"); | ||
1207 | mask = POLLERR; | ||
1208 | goto done; | ||
1209 | } | ||
1210 | |||
1211 | poll_wait(file, &d->waitq, pt); | ||
1212 | |||
1213 | spin_lock_irqsave(&d->lock, flags); | ||
1214 | for (i = 0; i < d->num_desc; i++) { | ||
1215 | if (d->buffer_status[i] == VIDEO1394_BUFFER_READY) { | ||
1216 | mask |= POLLIN | POLLRDNORM; | ||
1217 | break; | ||
1218 | } | ||
1219 | } | ||
1220 | spin_unlock_irqrestore(&d->lock, flags); | ||
1221 | done: | ||
1222 | unlock_kernel(); | ||
1223 | |||
1224 | return mask; | ||
1225 | } | ||
1226 | |||
1192 | static int video1394_open(struct inode *inode, struct file *file) | 1227 | static int video1394_open(struct inode *inode, struct file *file) |
1193 | { | 1228 | { |
1194 | int i = ieee1394_file_to_instance(file); | 1229 | int i = ieee1394_file_to_instance(file); |
@@ -1257,6 +1292,7 @@ static struct file_operations video1394_fops= | |||
1257 | #ifdef CONFIG_COMPAT | 1292 | #ifdef CONFIG_COMPAT |
1258 | .compat_ioctl = video1394_compat_ioctl, | 1293 | .compat_ioctl = video1394_compat_ioctl, |
1259 | #endif | 1294 | #endif |
1295 | .poll = video1394_poll, | ||
1260 | .mmap = video1394_mmap, | 1296 | .mmap = video1394_mmap, |
1261 | .open = video1394_open, | 1297 | .open = video1394_open, |
1262 | .release = video1394_release | 1298 | .release = video1394_release |