diff options
139 files changed, 10038 insertions, 1599 deletions
diff --git a/Documentation/filesystems/squashfs.txt b/Documentation/filesystems/squashfs.txt new file mode 100644 index 000000000000..3e79e4a7a392 --- /dev/null +++ b/Documentation/filesystems/squashfs.txt | |||
@@ -0,0 +1,225 @@ | |||
1 | SQUASHFS 4.0 FILESYSTEM | ||
2 | ======================= | ||
3 | |||
4 | Squashfs is a compressed read-only filesystem for Linux. | ||
5 | It uses zlib compression to compress files, inodes and directories. | ||
6 | Inodes in the system are very small and all blocks are packed to minimise | ||
7 | data overhead. Block sizes greater than 4K are supported up to a maximum | ||
8 | of 1Mbytes (default block size 128K). | ||
9 | |||
10 | Squashfs is intended for general read-only filesystem use, for archival | ||
11 | use (i.e. in cases where a .tar.gz file may be used), and in constrained | ||
12 | block device/memory systems (e.g. embedded systems) where low overhead is | ||
13 | needed. | ||
14 | |||
15 | Mailing list: squashfs-devel@lists.sourceforge.net | ||
16 | Web site: www.squashfs.org | ||
17 | |||
18 | 1. FILESYSTEM FEATURES | ||
19 | ---------------------- | ||
20 | |||
21 | Squashfs filesystem features versus Cramfs: | ||
22 | |||
23 | Squashfs Cramfs | ||
24 | |||
25 | Max filesystem size: 2^64 16 MiB | ||
26 | Max file size: ~ 2 TiB 16 MiB | ||
27 | Max files: unlimited unlimited | ||
28 | Max directories: unlimited unlimited | ||
29 | Max entries per directory: unlimited unlimited | ||
30 | Max block size: 1 MiB 4 KiB | ||
31 | Metadata compression: yes no | ||
32 | Directory indexes: yes no | ||
33 | Sparse file support: yes no | ||
34 | Tail-end packing (fragments): yes no | ||
35 | Exportable (NFS etc.): yes no | ||
36 | Hard link support: yes no | ||
37 | "." and ".." in readdir: yes no | ||
38 | Real inode numbers: yes no | ||
39 | 32-bit uids/gids: yes no | ||
40 | File creation time: yes no | ||
41 | Xattr and ACL support: no no | ||
42 | |||
43 | Squashfs compresses data, inodes and directories. In addition, inode and | ||
44 | directory data are highly compacted, and packed on byte boundaries. Each | ||
45 | compressed inode is on average 8 bytes in length (the exact length varies on | ||
46 | file type, i.e. regular file, directory, symbolic link, and block/char device | ||
47 | inodes have different sizes). | ||
48 | |||
49 | 2. USING SQUASHFS | ||
50 | ----------------- | ||
51 | |||
52 | As squashfs is a read-only filesystem, the mksquashfs program must be used to | ||
53 | create populated squashfs filesystems. This and other squashfs utilities | ||
54 | can be obtained from http://www.squashfs.org. Usage instructions can be | ||
55 | obtained from this site also. | ||
56 | |||
57 | |||
58 | 3. SQUASHFS FILESYSTEM DESIGN | ||
59 | ----------------------------- | ||
60 | |||
61 | A squashfs filesystem consists of seven parts, packed together on a byte | ||
62 | alignment: | ||
63 | |||
64 | --------------- | ||
65 | | superblock | | ||
66 | |---------------| | ||
67 | | datablocks | | ||
68 | | & fragments | | ||
69 | |---------------| | ||
70 | | inode table | | ||
71 | |---------------| | ||
72 | | directory | | ||
73 | | table | | ||
74 | |---------------| | ||
75 | | fragment | | ||
76 | | table | | ||
77 | |---------------| | ||
78 | | export | | ||
79 | | table | | ||
80 | |---------------| | ||
81 | | uid/gid | | ||
82 | | lookup table | | ||
83 | --------------- | ||
84 | |||
85 | Compressed data blocks are written to the filesystem as files are read from | ||
86 | the source directory, and checked for duplicates. Once all file data has been | ||
87 | written the completed inode, directory, fragment, export and uid/gid lookup | ||
88 | tables are written. | ||
89 | |||
90 | 3.1 Inodes | ||
91 | ---------- | ||
92 | |||
93 | Metadata (inodes and directories) are compressed in 8Kbyte blocks. Each | ||
94 | compressed block is prefixed by a two byte length, the top bit is set if the | ||
95 | block is uncompressed. A block will be uncompressed if the -noI option is set, | ||
96 | or if the compressed block was larger than the uncompressed block. | ||
97 | |||
98 | Inodes are packed into the metadata blocks, and are not aligned to block | ||
99 | boundaries, therefore inodes overlap compressed blocks. Inodes are identified | ||
100 | by a 48-bit number which encodes the location of the compressed metadata block | ||
101 | containing the inode, and the byte offset into that block where the inode is | ||
102 | placed (<block, offset>). | ||
103 | |||
104 | To maximise compression there are different inodes for each file type | ||
105 | (regular file, directory, device, etc.), the inode contents and length | ||
106 | varying with the type. | ||
107 | |||
108 | To further maximise compression, two types of regular file inode and | ||
109 | directory inode are defined: inodes optimised for frequently occurring | ||
110 | regular files and directories, and extended types where extra | ||
111 | information has to be stored. | ||
112 | |||
113 | 3.2 Directories | ||
114 | --------------- | ||
115 | |||
116 | Like inodes, directories are packed into compressed metadata blocks, stored | ||
117 | in a directory table. Directories are accessed using the start address of | ||
118 | the metablock containing the directory and the offset into the | ||
119 | decompressed block (<block, offset>). | ||
120 | |||
121 | Directories are organised in a slightly complex way, and are not simply | ||
122 | a list of file names. The organisation takes advantage of the | ||
123 | fact that (in most cases) the inodes of the files will be in the same | ||
124 | compressed metadata block, and therefore, can share the start block. | ||
125 | Directories are therefore organised in a two level list, a directory | ||
126 | header containing the shared start block value, and a sequence of directory | ||
127 | entries, each of which share the shared start block. A new directory header | ||
128 | is written once/if the inode start block changes. The directory | ||
129 | header/directory entry list is repeated as many times as necessary. | ||
130 | |||
131 | Directories are sorted, and can contain a directory index to speed up | ||
132 | file lookup. Directory indexes store one entry per metablock, each entry | ||
133 | storing the index/filename mapping to the first directory header | ||
134 | in each metadata block. Directories are sorted in alphabetical order, | ||
135 | and at lookup the index is scanned linearly looking for the first filename | ||
136 | alphabetically larger than the filename being looked up. At this point the | ||
137 | location of the metadata block the filename is in has been found. | ||
138 | The general idea of the index is ensure only one metadata block needs to be | ||
139 | decompressed to do a lookup irrespective of the length of the directory. | ||
140 | This scheme has the advantage that it doesn't require extra memory overhead | ||
141 | and doesn't require much extra storage on disk. | ||
142 | |||
143 | 3.3 File data | ||
144 | ------------- | ||
145 | |||
146 | Regular files consist of a sequence of contiguous compressed blocks, and/or a | ||
147 | compressed fragment block (tail-end packed block). The compressed size | ||
148 | of each datablock is stored in a block list contained within the | ||
149 | file inode. | ||
150 | |||
151 | To speed up access to datablocks when reading 'large' files (256 Mbytes or | ||
152 | larger), the code implements an index cache that caches the mapping from | ||
153 | block index to datablock location on disk. | ||
154 | |||
155 | The index cache allows Squashfs to handle large files (up to 1.75 TiB) while | ||
156 | retaining a simple and space-efficient block list on disk. The cache | ||
157 | is split into slots, caching up to eight 224 GiB files (128 KiB blocks). | ||
158 | Larger files use multiple slots, with 1.75 TiB files using all 8 slots. | ||
159 | The index cache is designed to be memory efficient, and by default uses | ||
160 | 16 KiB. | ||
161 | |||
162 | 3.4 Fragment lookup table | ||
163 | ------------------------- | ||
164 | |||
165 | Regular files can contain a fragment index which is mapped to a fragment | ||
166 | location on disk and compressed size using a fragment lookup table. This | ||
167 | fragment lookup table is itself stored compressed into metadata blocks. | ||
168 | A second index table is used to locate these. This second index table for | ||
169 | speed of access (and because it is small) is read at mount time and cached | ||
170 | in memory. | ||
171 | |||
172 | 3.5 Uid/gid lookup table | ||
173 | ------------------------ | ||
174 | |||
175 | For space efficiency regular files store uid and gid indexes, which are | ||
176 | converted to 32-bit uids/gids using an id look up table. This table is | ||
177 | stored compressed into metadata blocks. A second index table is used to | ||
178 | locate these. This second index table for speed of access (and because it | ||
179 | is small) is read at mount time and cached in memory. | ||
180 | |||
181 | 3.6 Export table | ||
182 | ---------------- | ||
183 | |||
184 | To enable Squashfs filesystems to be exportable (via NFS etc.) filesystems | ||
185 | can optionally (disabled with the -no-exports Mksquashfs option) contain | ||
186 | an inode number to inode disk location lookup table. This is required to | ||
187 | enable Squashfs to map inode numbers passed in filehandles to the inode | ||
188 | location on disk, which is necessary when the export code reinstantiates | ||
189 | expired/flushed inodes. | ||
190 | |||
191 | This table is stored compressed into metadata blocks. A second index table is | ||
192 | used to locate these. This second index table for speed of access (and because | ||
193 | it is small) is read at mount time and cached in memory. | ||
194 | |||
195 | |||
196 | 4. TODOS AND OUTSTANDING ISSUES | ||
197 | ------------------------------- | ||
198 | |||
199 | 4.1 Todo list | ||
200 | ------------- | ||
201 | |||
202 | Implement Xattr and ACL support. The Squashfs 4.0 filesystem layout has hooks | ||
203 | for these but the code has not been written. Once the code has been written | ||
204 | the existing layout should not require modification. | ||
205 | |||
206 | 4.2 Squashfs internal cache | ||
207 | --------------------------- | ||
208 | |||
209 | Blocks in Squashfs are compressed. To avoid repeatedly decompressing | ||
210 | recently accessed data Squashfs uses two small metadata and fragment caches. | ||
211 | |||
212 | The cache is not used for file datablocks, these are decompressed and cached in | ||
213 | the page-cache in the normal way. The cache is used to temporarily cache | ||
214 | fragment and metadata blocks which have been read as a result of a metadata | ||
215 | (i.e. inode or directory) or fragment access. Because metadata and fragments | ||
216 | are packed together into blocks (to gain greater compression) the read of a | ||
217 | particular piece of metadata or fragment will retrieve other metadata/fragments | ||
218 | which have been packed with it, these because of locality-of-reference may be | ||
219 | read in the near future. Temporarily caching them ensures they are available | ||
220 | for near future access without requiring an additional read and decompress. | ||
221 | |||
222 | In the future this internal cache may be replaced with an implementation which | ||
223 | uses the kernel page cache. Because the page cache operates on page sized | ||
224 | units this may introduce additional complexity in terms of locking and | ||
225 | associated race conditions. | ||
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index fcc48bf722a8..8511d3532c27 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -834,8 +834,8 @@ and is between 256 and 4096 characters. It is defined in the file | |||
834 | 834 | ||
835 | hlt [BUGS=ARM,SH] | 835 | hlt [BUGS=ARM,SH] |
836 | 836 | ||
837 | hvc_iucv= [S390] Number of z/VM IUCV Hypervisor console (HVC) | 837 | hvc_iucv= [S390] Number of z/VM IUCV hypervisor console (HVC) |
838 | back-ends. Valid parameters: 0..8 | 838 | terminal devices. Valid values: 0..8 |
839 | 839 | ||
840 | i8042.debug [HW] Toggle i8042 debug mode | 840 | i8042.debug [HW] Toggle i8042 debug mode |
841 | i8042.direct [HW] Put keyboard port into non-translated mode | 841 | i8042.direct [HW] Put keyboard port into non-translated mode |
diff --git a/Documentation/nommu-mmap.txt b/Documentation/nommu-mmap.txt index 7714f57caad5..b565e8279d13 100644 --- a/Documentation/nommu-mmap.txt +++ b/Documentation/nommu-mmap.txt | |||
@@ -109,12 +109,18 @@ and it's also much more restricted in the latter case: | |||
109 | FURTHER NOTES ON NO-MMU MMAP | 109 | FURTHER NOTES ON NO-MMU MMAP |
110 | ============================ | 110 | ============================ |
111 | 111 | ||
112 | (*) A request for a private mapping of less than a page in size may not return | 112 | (*) A request for a private mapping of a file may return a buffer that is not |
113 | a page-aligned buffer. This is because the kernel calls kmalloc() to | 113 | page-aligned. This is because XIP may take place, and the data may not be |
114 | allocate the buffer, not get_free_page(). | 114 | paged aligned in the backing store. |
115 | 115 | ||
116 | (*) A list of all the mappings on the system is visible through /proc/maps in | 116 | (*) A request for an anonymous mapping will always be page aligned. If |
117 | no-MMU mode. | 117 | possible the size of the request should be a power of two otherwise some |
118 | of the space may be wasted as the kernel must allocate a power-of-2 | ||
119 | granule but will only discard the excess if appropriately configured as | ||
120 | this has an effect on fragmentation. | ||
121 | |||
122 | (*) A list of all the private copy and anonymous mappings on the system is | ||
123 | visible through /proc/maps in no-MMU mode. | ||
118 | 124 | ||
119 | (*) A list of all the mappings in use by a process is visible through | 125 | (*) A list of all the mappings in use by a process is visible through |
120 | /proc/<pid>/maps in no-MMU mode. | 126 | /proc/<pid>/maps in no-MMU mode. |
@@ -242,3 +248,18 @@ PROVIDING SHAREABLE BLOCK DEVICE SUPPORT | |||
242 | Provision of shared mappings on block device files is exactly the same as for | 248 | Provision of shared mappings on block device files is exactly the same as for |
243 | character devices. If there isn't a real device underneath, then the driver | 249 | character devices. If there isn't a real device underneath, then the driver |
244 | should allocate sufficient contiguous memory to honour any supported mapping. | 250 | should allocate sufficient contiguous memory to honour any supported mapping. |
251 | |||
252 | |||
253 | ================================= | ||
254 | ADJUSTING PAGE TRIMMING BEHAVIOUR | ||
255 | ================================= | ||
256 | |||
257 | NOMMU mmap automatically rounds up to the nearest power-of-2 number of pages | ||
258 | when performing an allocation. This can have adverse effects on memory | ||
259 | fragmentation, and as such, is left configurable. The default behaviour is to | ||
260 | aggressively trim allocations and discard any excess pages back in to the page | ||
261 | allocator. In order to retain finer-grained control over fragmentation, this | ||
262 | behaviour can either be disabled completely, or bumped up to a higher page | ||
263 | watermark where trimming begins. | ||
264 | |||
265 | Page trimming behaviour is configurable via the sysctl `vm.nr_trim_pages'. | ||
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt index cd05994a49e6..a3415070bcac 100644 --- a/Documentation/sysctl/vm.txt +++ b/Documentation/sysctl/vm.txt | |||
@@ -38,6 +38,7 @@ Currently, these files are in /proc/sys/vm: | |||
38 | - numa_zonelist_order | 38 | - numa_zonelist_order |
39 | - nr_hugepages | 39 | - nr_hugepages |
40 | - nr_overcommit_hugepages | 40 | - nr_overcommit_hugepages |
41 | - nr_trim_pages (only if CONFIG_MMU=n) | ||
41 | 42 | ||
42 | ============================================================== | 43 | ============================================================== |
43 | 44 | ||
@@ -348,3 +349,20 @@ Change the maximum size of the hugepage pool. The maximum is | |||
348 | nr_hugepages + nr_overcommit_hugepages. | 349 | nr_hugepages + nr_overcommit_hugepages. |
349 | 350 | ||
350 | See Documentation/vm/hugetlbpage.txt | 351 | See Documentation/vm/hugetlbpage.txt |
352 | |||
353 | ============================================================== | ||
354 | |||
355 | nr_trim_pages | ||
356 | |||
357 | This is available only on NOMMU kernels. | ||
358 | |||
359 | This value adjusts the excess page trimming behaviour of power-of-2 aligned | ||
360 | NOMMU mmap allocations. | ||
361 | |||
362 | A value of 0 disables trimming of allocations entirely, while a value of 1 | ||
363 | trims excess pages aggressively. Any value >= 1 acts as the watermark where | ||
364 | trimming of allocations is initiated. | ||
365 | |||
366 | The default value is 1. | ||
367 | |||
368 | See Documentation/nommu-mmap.txt for more information. | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 57e0309243cc..6f65a269cb17 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -4081,6 +4081,13 @@ L: cbe-oss-dev@ozlabs.org | |||
4081 | W: http://www.ibm.com/developerworks/power/cell/ | 4081 | W: http://www.ibm.com/developerworks/power/cell/ |
4082 | S: Supported | 4082 | S: Supported |
4083 | 4083 | ||
4084 | SQUASHFS FILE SYSTEM | ||
4085 | P: Phillip Lougher | ||
4086 | M: phillip@lougher.demon.co.uk | ||
4087 | L: squashfs-devel@lists.sourceforge.net (subscribers-only) | ||
4088 | W: http://squashfs.org.uk | ||
4089 | S: Maintained | ||
4090 | |||
4084 | SRM (Alpha) environment access | 4091 | SRM (Alpha) environment access |
4085 | P: Jan-Benedict Glaw | 4092 | P: Jan-Benedict Glaw |
4086 | M: jbglaw@lug-owl.de | 4093 | M: jbglaw@lug-owl.de |
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index 53099d4ee421..b561584d04a1 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h | |||
@@ -24,7 +24,6 @@ typedef struct { | |||
24 | * modified for 2.6 by Hyok S. Choi <hyok.choi@samsung.com> | 24 | * modified for 2.6 by Hyok S. Choi <hyok.choi@samsung.com> |
25 | */ | 25 | */ |
26 | typedef struct { | 26 | typedef struct { |
27 | struct vm_list_struct *vmlist; | ||
28 | unsigned long end_brk; | 27 | unsigned long end_brk; |
29 | } mm_context_t; | 28 | } mm_context_t; |
30 | 29 | ||
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 67960017dc8f..310e479309ef 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -71,7 +71,7 @@ static DEFINE_SPINLOCK(consistent_lock); | |||
71 | * the amount of RAM found at boot time.) I would imagine that get_vm_area() | 71 | * the amount of RAM found at boot time.) I would imagine that get_vm_area() |
72 | * would have to initialise this each time prior to calling vm_region_alloc(). | 72 | * would have to initialise this each time prior to calling vm_region_alloc(). |
73 | */ | 73 | */ |
74 | struct vm_region { | 74 | struct arm_vm_region { |
75 | struct list_head vm_list; | 75 | struct list_head vm_list; |
76 | unsigned long vm_start; | 76 | unsigned long vm_start; |
77 | unsigned long vm_end; | 77 | unsigned long vm_end; |
@@ -79,20 +79,20 @@ struct vm_region { | |||
79 | int vm_active; | 79 | int vm_active; |
80 | }; | 80 | }; |
81 | 81 | ||
82 | static struct vm_region consistent_head = { | 82 | static struct arm_vm_region consistent_head = { |
83 | .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), | 83 | .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), |
84 | .vm_start = CONSISTENT_BASE, | 84 | .vm_start = CONSISTENT_BASE, |
85 | .vm_end = CONSISTENT_END, | 85 | .vm_end = CONSISTENT_END, |
86 | }; | 86 | }; |
87 | 87 | ||
88 | static struct vm_region * | 88 | static struct arm_vm_region * |
89 | vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp) | 89 | arm_vm_region_alloc(struct arm_vm_region *head, size_t size, gfp_t gfp) |
90 | { | 90 | { |
91 | unsigned long addr = head->vm_start, end = head->vm_end - size; | 91 | unsigned long addr = head->vm_start, end = head->vm_end - size; |
92 | unsigned long flags; | 92 | unsigned long flags; |
93 | struct vm_region *c, *new; | 93 | struct arm_vm_region *c, *new; |
94 | 94 | ||
95 | new = kmalloc(sizeof(struct vm_region), gfp); | 95 | new = kmalloc(sizeof(struct arm_vm_region), gfp); |
96 | if (!new) | 96 | if (!new) |
97 | goto out; | 97 | goto out; |
98 | 98 | ||
@@ -127,9 +127,9 @@ vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp) | |||
127 | return NULL; | 127 | return NULL; |
128 | } | 128 | } |
129 | 129 | ||
130 | static struct vm_region *vm_region_find(struct vm_region *head, unsigned long addr) | 130 | static struct arm_vm_region *arm_vm_region_find(struct arm_vm_region *head, unsigned long addr) |
131 | { | 131 | { |
132 | struct vm_region *c; | 132 | struct arm_vm_region *c; |
133 | 133 | ||
134 | list_for_each_entry(c, &head->vm_list, vm_list) { | 134 | list_for_each_entry(c, &head->vm_list, vm_list) { |
135 | if (c->vm_active && c->vm_start == addr) | 135 | if (c->vm_active && c->vm_start == addr) |
@@ -149,7 +149,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, | |||
149 | pgprot_t prot) | 149 | pgprot_t prot) |
150 | { | 150 | { |
151 | struct page *page; | 151 | struct page *page; |
152 | struct vm_region *c; | 152 | struct arm_vm_region *c; |
153 | unsigned long order; | 153 | unsigned long order; |
154 | u64 mask = ISA_DMA_THRESHOLD, limit; | 154 | u64 mask = ISA_DMA_THRESHOLD, limit; |
155 | 155 | ||
@@ -214,7 +214,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, | |||
214 | /* | 214 | /* |
215 | * Allocate a virtual address in the consistent mapping region. | 215 | * Allocate a virtual address in the consistent mapping region. |
216 | */ | 216 | */ |
217 | c = vm_region_alloc(&consistent_head, size, | 217 | c = arm_vm_region_alloc(&consistent_head, size, |
218 | gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); | 218 | gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); |
219 | if (c) { | 219 | if (c) { |
220 | pte_t *pte; | 220 | pte_t *pte; |
@@ -311,13 +311,13 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma, | |||
311 | void *cpu_addr, dma_addr_t dma_addr, size_t size) | 311 | void *cpu_addr, dma_addr_t dma_addr, size_t size) |
312 | { | 312 | { |
313 | unsigned long flags, user_size, kern_size; | 313 | unsigned long flags, user_size, kern_size; |
314 | struct vm_region *c; | 314 | struct arm_vm_region *c; |
315 | int ret = -ENXIO; | 315 | int ret = -ENXIO; |
316 | 316 | ||
317 | user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 317 | user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
318 | 318 | ||
319 | spin_lock_irqsave(&consistent_lock, flags); | 319 | spin_lock_irqsave(&consistent_lock, flags); |
320 | c = vm_region_find(&consistent_head, (unsigned long)cpu_addr); | 320 | c = arm_vm_region_find(&consistent_head, (unsigned long)cpu_addr); |
321 | spin_unlock_irqrestore(&consistent_lock, flags); | 321 | spin_unlock_irqrestore(&consistent_lock, flags); |
322 | 322 | ||
323 | if (c) { | 323 | if (c) { |
@@ -359,7 +359,7 @@ EXPORT_SYMBOL(dma_mmap_writecombine); | |||
359 | */ | 359 | */ |
360 | void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) | 360 | void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) |
361 | { | 361 | { |
362 | struct vm_region *c; | 362 | struct arm_vm_region *c; |
363 | unsigned long flags, addr; | 363 | unsigned long flags, addr; |
364 | pte_t *ptep; | 364 | pte_t *ptep; |
365 | int idx; | 365 | int idx; |
@@ -378,7 +378,7 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr | |||
378 | size = PAGE_ALIGN(size); | 378 | size = PAGE_ALIGN(size); |
379 | 379 | ||
380 | spin_lock_irqsave(&consistent_lock, flags); | 380 | spin_lock_irqsave(&consistent_lock, flags); |
381 | c = vm_region_find(&consistent_head, (unsigned long)cpu_addr); | 381 | c = arm_vm_region_find(&consistent_head, (unsigned long)cpu_addr); |
382 | if (!c) | 382 | if (!c) |
383 | goto no_area; | 383 | goto no_area; |
384 | 384 | ||
diff --git a/arch/blackfin/include/asm/mmu.h b/arch/blackfin/include/asm/mmu.h index 757e43906ed4..dbfd686360e6 100644 --- a/arch/blackfin/include/asm/mmu.h +++ b/arch/blackfin/include/asm/mmu.h | |||
@@ -10,7 +10,6 @@ struct sram_list_struct { | |||
10 | }; | 10 | }; |
11 | 11 | ||
12 | typedef struct { | 12 | typedef struct { |
13 | struct vm_list_struct *vmlist; | ||
14 | unsigned long end_brk; | 13 | unsigned long end_brk; |
15 | unsigned long stack_start; | 14 | unsigned long stack_start; |
16 | 15 | ||
diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c index d2d388536630..594e325b40e4 100644 --- a/arch/blackfin/kernel/ptrace.c +++ b/arch/blackfin/kernel/ptrace.c | |||
@@ -160,15 +160,15 @@ put_reg(struct task_struct *task, int regno, unsigned long data) | |||
160 | static inline int is_user_addr_valid(struct task_struct *child, | 160 | static inline int is_user_addr_valid(struct task_struct *child, |
161 | unsigned long start, unsigned long len) | 161 | unsigned long start, unsigned long len) |
162 | { | 162 | { |
163 | struct vm_list_struct *vml; | 163 | struct vm_area_struct *vma; |
164 | struct sram_list_struct *sraml; | 164 | struct sram_list_struct *sraml; |
165 | 165 | ||
166 | /* overflow */ | 166 | /* overflow */ |
167 | if (start + len < start) | 167 | if (start + len < start) |
168 | return -EIO; | 168 | return -EIO; |
169 | 169 | ||
170 | for (vml = child->mm->context.vmlist; vml; vml = vml->next) | 170 | vma = find_vma(child->mm, start); |
171 | if (start >= vml->vma->vm_start && start + len < vml->vma->vm_end) | 171 | if (vma && start >= vma->vm_start && start + len <= vma->vm_end) |
172 | return 0; | 172 | return 0; |
173 | 173 | ||
174 | for (sraml = child->mm->context.sram_list; sraml; sraml = sraml->next) | 174 | for (sraml = child->mm->context.sram_list; sraml; sraml = sraml->next) |
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c index 17d8e4172896..5b0667da8d05 100644 --- a/arch/blackfin/kernel/traps.c +++ b/arch/blackfin/kernel/traps.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/module.h> | 32 | #include <linux/module.h> |
33 | #include <linux/kallsyms.h> | 33 | #include <linux/kallsyms.h> |
34 | #include <linux/fs.h> | 34 | #include <linux/fs.h> |
35 | #include <linux/rbtree.h> | ||
35 | #include <asm/traps.h> | 36 | #include <asm/traps.h> |
36 | #include <asm/cacheflush.h> | 37 | #include <asm/cacheflush.h> |
37 | #include <asm/cplb.h> | 38 | #include <asm/cplb.h> |
@@ -83,6 +84,7 @@ static void decode_address(char *buf, unsigned long address) | |||
83 | struct mm_struct *mm; | 84 | struct mm_struct *mm; |
84 | unsigned long flags, offset; | 85 | unsigned long flags, offset; |
85 | unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic(); | 86 | unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic(); |
87 | struct rb_node *n; | ||
86 | 88 | ||
87 | #ifdef CONFIG_KALLSYMS | 89 | #ifdef CONFIG_KALLSYMS |
88 | unsigned long symsize; | 90 | unsigned long symsize; |
@@ -128,9 +130,10 @@ static void decode_address(char *buf, unsigned long address) | |||
128 | if (!mm) | 130 | if (!mm) |
129 | continue; | 131 | continue; |
130 | 132 | ||
131 | vml = mm->context.vmlist; | 133 | for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) { |
132 | while (vml) { | 134 | struct vm_area_struct *vma; |
133 | struct vm_area_struct *vma = vml->vma; | 135 | |
136 | vma = rb_entry(n, struct vm_area_struct, vm_rb); | ||
134 | 137 | ||
135 | if (address >= vma->vm_start && address < vma->vm_end) { | 138 | if (address >= vma->vm_start && address < vma->vm_end) { |
136 | char _tmpbuf[256]; | 139 | char _tmpbuf[256]; |
@@ -176,8 +179,6 @@ static void decode_address(char *buf, unsigned long address) | |||
176 | 179 | ||
177 | goto done; | 180 | goto done; |
178 | } | 181 | } |
179 | |||
180 | vml = vml->next; | ||
181 | } | 182 | } |
182 | if (!in_atomic) | 183 | if (!in_atomic) |
183 | mmput(mm); | 184 | mmput(mm); |
diff --git a/arch/frv/kernel/ptrace.c b/arch/frv/kernel/ptrace.c index 709e9bdc6126..5e7d401d21e7 100644 --- a/arch/frv/kernel/ptrace.c +++ b/arch/frv/kernel/ptrace.c | |||
@@ -69,7 +69,8 @@ static inline int put_reg(struct task_struct *task, int regno, | |||
69 | } | 69 | } |
70 | 70 | ||
71 | /* | 71 | /* |
72 | * check that an address falls within the bounds of the target process's memory mappings | 72 | * check that an address falls within the bounds of the target process's memory |
73 | * mappings | ||
73 | */ | 74 | */ |
74 | static inline int is_user_addr_valid(struct task_struct *child, | 75 | static inline int is_user_addr_valid(struct task_struct *child, |
75 | unsigned long start, unsigned long len) | 76 | unsigned long start, unsigned long len) |
@@ -79,11 +80,11 @@ static inline int is_user_addr_valid(struct task_struct *child, | |||
79 | return -EIO; | 80 | return -EIO; |
80 | return 0; | 81 | return 0; |
81 | #else | 82 | #else |
82 | struct vm_list_struct *vml; | 83 | struct vm_area_struct *vma; |
83 | 84 | ||
84 | for (vml = child->mm->context.vmlist; vml; vml = vml->next) | 85 | vma = find_vma(child->mm, start); |
85 | if (start >= vml->vma->vm_start && start + len <= vml->vma->vm_end) | 86 | if (vma && start >= vma->vm_start && start + len <= vma->vm_end) |
86 | return 0; | 87 | return 0; |
87 | 88 | ||
88 | return -EIO; | 89 | return -EIO; |
89 | #endif | 90 | #endif |
diff --git a/arch/h8300/include/asm/mmu.h b/arch/h8300/include/asm/mmu.h index 2ce06ea46104..31309969df70 100644 --- a/arch/h8300/include/asm/mmu.h +++ b/arch/h8300/include/asm/mmu.h | |||
@@ -4,7 +4,6 @@ | |||
4 | /* Copyright (C) 2002, David McCullough <davidm@snapgear.com> */ | 4 | /* Copyright (C) 2002, David McCullough <davidm@snapgear.com> */ |
5 | 5 | ||
6 | typedef struct { | 6 | typedef struct { |
7 | struct vm_list_struct *vmlist; | ||
8 | unsigned long end_brk; | 7 | unsigned long end_brk; |
9 | } mm_context_t; | 8 | } mm_context_t; |
10 | 9 | ||
diff --git a/arch/m68knommu/include/asm/mmu.h b/arch/m68knommu/include/asm/mmu.h index 5fa6b68353ba..e2da1e6f09fe 100644 --- a/arch/m68knommu/include/asm/mmu.h +++ b/arch/m68knommu/include/asm/mmu.h | |||
@@ -4,7 +4,6 @@ | |||
4 | /* Copyright (C) 2002, David McCullough <davidm@snapgear.com> */ | 4 | /* Copyright (C) 2002, David McCullough <davidm@snapgear.com> */ |
5 | 5 | ||
6 | typedef struct { | 6 | typedef struct { |
7 | struct vm_list_struct *vmlist; | ||
8 | unsigned long end_brk; | 7 | unsigned long end_brk; |
9 | } mm_context_t; | 8 | } mm_context_t; |
10 | 9 | ||
diff --git a/arch/s390/include/asm/chpid.h b/arch/s390/include/asm/chpid.h index dfe3c7f3439a..fc71d8a6709b 100644 --- a/arch/s390/include/asm/chpid.h +++ b/arch/s390/include/asm/chpid.h | |||
@@ -9,7 +9,7 @@ | |||
9 | #define _ASM_S390_CHPID_H _ASM_S390_CHPID_H | 9 | #define _ASM_S390_CHPID_H _ASM_S390_CHPID_H |
10 | 10 | ||
11 | #include <linux/string.h> | 11 | #include <linux/string.h> |
12 | #include <asm/types.h> | 12 | #include <linux/types.h> |
13 | 13 | ||
14 | #define __MAX_CHPID 255 | 14 | #define __MAX_CHPID 255 |
15 | 15 | ||
diff --git a/arch/s390/include/asm/chsc.h b/arch/s390/include/asm/chsc.h index d38d0cf62d4b..807997f7414b 100644 --- a/arch/s390/include/asm/chsc.h +++ b/arch/s390/include/asm/chsc.h | |||
@@ -8,6 +8,7 @@ | |||
8 | #ifndef _ASM_CHSC_H | 8 | #ifndef _ASM_CHSC_H |
9 | #define _ASM_CHSC_H | 9 | #define _ASM_CHSC_H |
10 | 10 | ||
11 | #include <linux/types.h> | ||
11 | #include <asm/chpid.h> | 12 | #include <asm/chpid.h> |
12 | #include <asm/schid.h> | 13 | #include <asm/schid.h> |
13 | 14 | ||
diff --git a/arch/s390/include/asm/cmb.h b/arch/s390/include/asm/cmb.h index 50196857d27a..39ae03294794 100644 --- a/arch/s390/include/asm/cmb.h +++ b/arch/s390/include/asm/cmb.h | |||
@@ -1,5 +1,8 @@ | |||
1 | #ifndef S390_CMB_H | 1 | #ifndef S390_CMB_H |
2 | #define S390_CMB_H | 2 | #define S390_CMB_H |
3 | |||
4 | #include <linux/types.h> | ||
5 | |||
3 | /** | 6 | /** |
4 | * struct cmbdata - channel measurement block data for user space | 7 | * struct cmbdata - channel measurement block data for user space |
5 | * @size: size of the stored data | 8 | * @size: size of the stored data |
diff --git a/arch/s390/include/asm/dasd.h b/arch/s390/include/asm/dasd.h index 55b2b80cdf6e..e2db6f16d9c8 100644 --- a/arch/s390/include/asm/dasd.h +++ b/arch/s390/include/asm/dasd.h | |||
@@ -14,6 +14,7 @@ | |||
14 | 14 | ||
15 | #ifndef DASD_H | 15 | #ifndef DASD_H |
16 | #define DASD_H | 16 | #define DASD_H |
17 | #include <linux/types.h> | ||
17 | #include <linux/ioctl.h> | 18 | #include <linux/ioctl.h> |
18 | 19 | ||
19 | #define DASD_IOCTL_LETTER 'D' | 20 | #define DASD_IOCTL_LETTER 'D' |
@@ -78,6 +79,7 @@ typedef struct dasd_information2_t { | |||
78 | #define DASD_FEATURE_USEDIAG 0x02 | 79 | #define DASD_FEATURE_USEDIAG 0x02 |
79 | #define DASD_FEATURE_INITIAL_ONLINE 0x04 | 80 | #define DASD_FEATURE_INITIAL_ONLINE 0x04 |
80 | #define DASD_FEATURE_ERPLOG 0x08 | 81 | #define DASD_FEATURE_ERPLOG 0x08 |
82 | #define DASD_FEATURE_FAILFAST 0x10 | ||
81 | 83 | ||
82 | #define DASD_PARTN_BITS 2 | 84 | #define DASD_PARTN_BITS 2 |
83 | 85 | ||
diff --git a/arch/s390/include/asm/kvm.h b/arch/s390/include/asm/kvm.h index d74002f95794..e1f54654e3ae 100644 --- a/arch/s390/include/asm/kvm.h +++ b/arch/s390/include/asm/kvm.h | |||
@@ -13,7 +13,7 @@ | |||
13 | * Author(s): Carsten Otte <cotte@de.ibm.com> | 13 | * Author(s): Carsten Otte <cotte@de.ibm.com> |
14 | * Christian Borntraeger <borntraeger@de.ibm.com> | 14 | * Christian Borntraeger <borntraeger@de.ibm.com> |
15 | */ | 15 | */ |
16 | #include <asm/types.h> | 16 | #include <linux/types.h> |
17 | 17 | ||
18 | /* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */ | 18 | /* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */ |
19 | struct kvm_pic_state { | 19 | struct kvm_pic_state { |
diff --git a/arch/s390/include/asm/posix_types.h b/arch/s390/include/asm/posix_types.h index 397d93fba3a7..8cc113f92523 100644 --- a/arch/s390/include/asm/posix_types.h +++ b/arch/s390/include/asm/posix_types.h | |||
@@ -68,11 +68,7 @@ typedef unsigned short __kernel_old_dev_t; | |||
68 | #endif /* __s390x__ */ | 68 | #endif /* __s390x__ */ |
69 | 69 | ||
70 | typedef struct { | 70 | typedef struct { |
71 | #if defined(__KERNEL__) || defined(__USE_ALL) | ||
72 | int val[2]; | 71 | int val[2]; |
73 | #else /* !defined(__KERNEL__) && !defined(__USE_ALL)*/ | ||
74 | int __val[2]; | ||
75 | #endif /* !defined(__KERNEL__) && !defined(__USE_ALL)*/ | ||
76 | } __kernel_fsid_t; | 72 | } __kernel_fsid_t; |
77 | 73 | ||
78 | 74 | ||
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h index 5396f9f12263..8920025c3c02 100644 --- a/arch/s390/include/asm/ptrace.h +++ b/arch/s390/include/asm/ptrace.h | |||
@@ -272,12 +272,15 @@ typedef struct | |||
272 | #define PSW_ASC_SECONDARY 0x0000800000000000UL | 272 | #define PSW_ASC_SECONDARY 0x0000800000000000UL |
273 | #define PSW_ASC_HOME 0x0000C00000000000UL | 273 | #define PSW_ASC_HOME 0x0000C00000000000UL |
274 | 274 | ||
275 | extern long psw_user32_bits; | ||
276 | |||
277 | #endif /* __s390x__ */ | 275 | #endif /* __s390x__ */ |
278 | 276 | ||
277 | #ifdef __KERNEL__ | ||
279 | extern long psw_kernel_bits; | 278 | extern long psw_kernel_bits; |
280 | extern long psw_user_bits; | 279 | extern long psw_user_bits; |
280 | #ifdef CONFIG_64BIT | ||
281 | extern long psw_user32_bits; | ||
282 | #endif | ||
283 | #endif | ||
281 | 284 | ||
282 | /* This macro merges a NEW PSW mask specified by the user into | 285 | /* This macro merges a NEW PSW mask specified by the user into |
283 | the currently active PSW mask CURRENT, modifying only those | 286 | the currently active PSW mask CURRENT, modifying only those |
diff --git a/arch/s390/include/asm/qeth.h b/arch/s390/include/asm/qeth.h index 930d378ef75a..06cbd1e8c943 100644 --- a/arch/s390/include/asm/qeth.h +++ b/arch/s390/include/asm/qeth.h | |||
@@ -10,6 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | #ifndef __ASM_S390_QETH_IOCTL_H__ | 11 | #ifndef __ASM_S390_QETH_IOCTL_H__ |
12 | #define __ASM_S390_QETH_IOCTL_H__ | 12 | #define __ASM_S390_QETH_IOCTL_H__ |
13 | #include <linux/types.h> | ||
13 | #include <linux/ioctl.h> | 14 | #include <linux/ioctl.h> |
14 | 15 | ||
15 | #define SIOC_QETH_ARP_SET_NO_ENTRIES (SIOCDEVPRIVATE) | 16 | #define SIOC_QETH_ARP_SET_NO_ENTRIES (SIOCDEVPRIVATE) |
diff --git a/arch/s390/include/asm/schid.h b/arch/s390/include/asm/schid.h index 825503cf3dc2..3e4d401b4e45 100644 --- a/arch/s390/include/asm/schid.h +++ b/arch/s390/include/asm/schid.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef ASM_SCHID_H | 1 | #ifndef ASM_SCHID_H |
2 | #define ASM_SCHID_H | 2 | #define ASM_SCHID_H |
3 | 3 | ||
4 | #include <linux/types.h> | ||
5 | |||
4 | struct subchannel_id { | 6 | struct subchannel_id { |
5 | __u32 cssid : 8; | 7 | __u32 cssid : 8; |
6 | __u32 : 4; | 8 | __u32 : 4; |
diff --git a/arch/s390/include/asm/swab.h b/arch/s390/include/asm/swab.h index bd9321aa55a9..eb18dc1f327b 100644 --- a/arch/s390/include/asm/swab.h +++ b/arch/s390/include/asm/swab.h | |||
@@ -9,7 +9,7 @@ | |||
9 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) | 9 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <asm/types.h> | 12 | #include <linux/types.h> |
13 | 13 | ||
14 | #ifndef __s390x__ | 14 | #ifndef __s390x__ |
15 | # define __SWAB_64_THRU_32__ | 15 | # define __SWAB_64_THRU_32__ |
diff --git a/arch/s390/include/asm/types.h b/arch/s390/include/asm/types.h index 41c547656130..3dc3fc228812 100644 --- a/arch/s390/include/asm/types.h +++ b/arch/s390/include/asm/types.h | |||
@@ -9,11 +9,7 @@ | |||
9 | #ifndef _S390_TYPES_H | 9 | #ifndef _S390_TYPES_H |
10 | #define _S390_TYPES_H | 10 | #define _S390_TYPES_H |
11 | 11 | ||
12 | #ifndef __s390x__ | 12 | #include <asm-generic/int-ll64.h> |
13 | # include <asm-generic/int-ll64.h> | ||
14 | #else | ||
15 | # include <asm-generic/int-l64.h> | ||
16 | #endif | ||
17 | 13 | ||
18 | #ifndef __ASSEMBLY__ | 14 | #ifndef __ASSEMBLY__ |
19 | 15 | ||
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h index 6b1896345eda..a65afc91e8aa 100644 --- a/arch/s390/kernel/entry.h +++ b/arch/s390/kernel/entry.h | |||
@@ -54,7 +54,5 @@ long sys_sigreturn(void); | |||
54 | long sys_rt_sigreturn(void); | 54 | long sys_rt_sigreturn(void); |
55 | long sys32_sigreturn(void); | 55 | long sys32_sigreturn(void); |
56 | long sys32_rt_sigreturn(void); | 56 | long sys32_rt_sigreturn(void); |
57 | long old_select(struct sel_arg_struct __user *arg); | ||
58 | long sys_ptrace(long request, long pid, long addr, long data); | ||
59 | 57 | ||
60 | #endif /* _ENTRY_H */ | 58 | #endif /* _ENTRY_H */ |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 9c0ccb532a45..2d337cbb9329 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -685,7 +685,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
685 | if (MACHINE_HAS_IEEE) | 685 | if (MACHINE_HAS_IEEE) |
686 | lowcore->extended_save_area_addr = (u32) save_area; | 686 | lowcore->extended_save_area_addr = (u32) save_area; |
687 | #else | 687 | #else |
688 | BUG_ON(vdso_alloc_per_cpu(smp_processor_id(), lowcore)); | 688 | if (vdso_alloc_per_cpu(smp_processor_id(), lowcore)) |
689 | BUG(); | ||
689 | #endif | 690 | #endif |
690 | set_prefix((u32)(unsigned long) lowcore); | 691 | set_prefix((u32)(unsigned long) lowcore); |
691 | local_mcck_enable(); | 692 | local_mcck_enable(); |
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c index 4fe952e557ac..c34be4568b80 100644 --- a/arch/s390/kernel/sys_s390.c +++ b/arch/s390/kernel/sys_s390.c | |||
@@ -103,25 +103,6 @@ out: | |||
103 | return error; | 103 | return error; |
104 | } | 104 | } |
105 | 105 | ||
106 | #ifndef CONFIG_64BIT | ||
107 | struct sel_arg_struct { | ||
108 | unsigned long n; | ||
109 | fd_set __user *inp, *outp, *exp; | ||
110 | struct timeval __user *tvp; | ||
111 | }; | ||
112 | |||
113 | asmlinkage long old_select(struct sel_arg_struct __user *arg) | ||
114 | { | ||
115 | struct sel_arg_struct a; | ||
116 | |||
117 | if (copy_from_user(&a, arg, sizeof(a))) | ||
118 | return -EFAULT; | ||
119 | /* sys_select() does the appropriate kernel locking */ | ||
120 | return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp); | ||
121 | |||
122 | } | ||
123 | #endif /* CONFIG_64BIT */ | ||
124 | |||
125 | /* | 106 | /* |
126 | * sys_ipc() is the de-multiplexer for the SysV IPC calls.. | 107 | * sys_ipc() is the de-multiplexer for the SysV IPC calls.. |
127 | * | 108 | * |
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index 25a6a82f1c02..690e17819686 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c | |||
@@ -322,7 +322,8 @@ static int __init vdso_init(void) | |||
322 | vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data); | 322 | vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data); |
323 | vdso64_pagelist[vdso64_pages] = NULL; | 323 | vdso64_pagelist[vdso64_pages] = NULL; |
324 | #ifndef CONFIG_SMP | 324 | #ifndef CONFIG_SMP |
325 | BUG_ON(vdso_alloc_per_cpu(0, S390_lowcore)); | 325 | if (vdso_alloc_per_cpu(0, &S390_lowcore)) |
326 | BUG(); | ||
326 | #endif | 327 | #endif |
327 | vdso_init_cr5(); | 328 | vdso_init_cr5(); |
328 | #endif /* CONFIG_64BIT */ | 329 | #endif /* CONFIG_64BIT */ |
diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S index c32f29c3d70c..ad8acfc949fb 100644 --- a/arch/s390/kernel/vdso32/gettimeofday.S +++ b/arch/s390/kernel/vdso32/gettimeofday.S | |||
@@ -13,10 +13,6 @@ | |||
13 | #include <asm/asm-offsets.h> | 13 | #include <asm/asm-offsets.h> |
14 | #include <asm/unistd.h> | 14 | #include <asm/unistd.h> |
15 | 15 | ||
16 | #include <asm/vdso.h> | ||
17 | #include <asm/asm-offsets.h> | ||
18 | #include <asm/unistd.h> | ||
19 | |||
20 | .text | 16 | .text |
21 | .align 4 | 17 | .align 4 |
22 | .globl __kernel_gettimeofday | 18 | .globl __kernel_gettimeofday |
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index a0775e1f08df..8300309698fa 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c | |||
@@ -47,7 +47,7 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu) | |||
47 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL; | 47 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL; |
48 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT; | 48 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT; |
49 | vcpu->run->exit_reason = KVM_EXIT_S390_RESET; | 49 | vcpu->run->exit_reason = KVM_EXIT_S390_RESET; |
50 | VCPU_EVENT(vcpu, 3, "requesting userspace resets %lx", | 50 | VCPU_EVENT(vcpu, 3, "requesting userspace resets %llx", |
51 | vcpu->run->s390_reset_flags); | 51 | vcpu->run->s390_reset_flags); |
52 | return -EREMOTE; | 52 | return -EREMOTE; |
53 | } | 53 | } |
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 2960702b4824..f4fe28a2521a 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c | |||
@@ -160,7 +160,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, | |||
160 | break; | 160 | break; |
161 | 161 | ||
162 | case KVM_S390_INT_VIRTIO: | 162 | case KVM_S390_INT_VIRTIO: |
163 | VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%lx", | 163 | VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx", |
164 | inti->ext.ext_params, inti->ext.ext_params2); | 164 | inti->ext.ext_params, inti->ext.ext_params2); |
165 | vcpu->stat.deliver_virtio_interrupt++; | 165 | vcpu->stat.deliver_virtio_interrupt++; |
166 | rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603); | 166 | rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603); |
@@ -360,7 +360,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) | |||
360 | vcpu->arch.ckc_timer.expires = jiffies + sltime; | 360 | vcpu->arch.ckc_timer.expires = jiffies + sltime; |
361 | 361 | ||
362 | add_timer(&vcpu->arch.ckc_timer); | 362 | add_timer(&vcpu->arch.ckc_timer); |
363 | VCPU_EVENT(vcpu, 5, "enabled wait timer:%lx jiffies", sltime); | 363 | VCPU_EVENT(vcpu, 5, "enabled wait timer:%llx jiffies", sltime); |
364 | no_timer: | 364 | no_timer: |
365 | spin_lock_bh(&vcpu->arch.local_int.float_int->lock); | 365 | spin_lock_bh(&vcpu->arch.local_int.float_int->lock); |
366 | spin_lock_bh(&vcpu->arch.local_int.lock); | 366 | spin_lock_bh(&vcpu->arch.local_int.lock); |
@@ -491,7 +491,7 @@ int kvm_s390_inject_vm(struct kvm *kvm, | |||
491 | 491 | ||
492 | switch (s390int->type) { | 492 | switch (s390int->type) { |
493 | case KVM_S390_INT_VIRTIO: | 493 | case KVM_S390_INT_VIRTIO: |
494 | VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%lx", | 494 | VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx", |
495 | s390int->parm, s390int->parm64); | 495 | s390int->parm, s390int->parm64); |
496 | inti->type = s390int->type; | 496 | inti->type = s390int->type; |
497 | inti->ext.ext_params = s390int->parm; | 497 | inti->ext.ext_params = s390int->parm; |
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index cce40ff2913b..3605df45dd41 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c | |||
@@ -118,7 +118,7 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu) | |||
118 | goto out; | 118 | goto out; |
119 | } | 119 | } |
120 | 120 | ||
121 | VCPU_EVENT(vcpu, 5, "storing cpu address to %lx", useraddr); | 121 | VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr); |
122 | out: | 122 | out: |
123 | return 0; | 123 | return 0; |
124 | } | 124 | } |
diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index fdcb93bc6d11..6c43625bb1a5 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h | |||
@@ -9,7 +9,6 @@ typedef struct { | |||
9 | mm_context_id_t id; | 9 | mm_context_id_t id; |
10 | void *vdso; | 10 | void *vdso; |
11 | #else | 11 | #else |
12 | struct vm_list_struct *vmlist; | ||
13 | unsigned long end_brk; | 12 | unsigned long end_brk; |
14 | #endif | 13 | #endif |
15 | #ifdef CONFIG_BINFMT_ELF_FDPIC | 14 | #ifdef CONFIG_BINFMT_ELF_FDPIC |
diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c index 5ea7d7713fca..a53496828b76 100644 --- a/drivers/char/hvc_iucv.c +++ b/drivers/char/hvc_iucv.c | |||
@@ -1,26 +1,30 @@ | |||
1 | /* | 1 | /* |
2 | * hvc_iucv.c - z/VM IUCV back-end for the Hypervisor Console (HVC) | 2 | * hvc_iucv.c - z/VM IUCV hypervisor console (HVC) device driver |
3 | * | 3 | * |
4 | * This back-end for HVC provides terminal access via | 4 | * This HVC device driver provides terminal access using |
5 | * z/VM IUCV communication paths. | 5 | * z/VM IUCV communication paths. |
6 | * | 6 | * |
7 | * Copyright IBM Corp. 2008. | 7 | * Copyright IBM Corp. 2008 |
8 | * | 8 | * |
9 | * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> | 9 | * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> |
10 | */ | 10 | */ |
11 | #define KMSG_COMPONENT "hvc_iucv" | 11 | #define KMSG_COMPONENT "hvc_iucv" |
12 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
12 | 13 | ||
13 | #include <linux/types.h> | 14 | #include <linux/types.h> |
14 | #include <asm/ebcdic.h> | 15 | #include <asm/ebcdic.h> |
16 | #include <linux/delay.h> | ||
17 | #include <linux/init.h> | ||
15 | #include <linux/mempool.h> | 18 | #include <linux/mempool.h> |
16 | #include <linux/module.h> | 19 | #include <linux/module.h> |
17 | #include <linux/tty.h> | 20 | #include <linux/tty.h> |
21 | #include <linux/wait.h> | ||
18 | #include <net/iucv/iucv.h> | 22 | #include <net/iucv/iucv.h> |
19 | 23 | ||
20 | #include "hvc_console.h" | 24 | #include "hvc_console.h" |
21 | 25 | ||
22 | 26 | ||
23 | /* HVC backend for z/VM IUCV */ | 27 | /* General device driver settings */ |
24 | #define HVC_IUCV_MAGIC 0xc9e4c3e5 | 28 | #define HVC_IUCV_MAGIC 0xc9e4c3e5 |
25 | #define MAX_HVC_IUCV_LINES HVC_ALLOC_TTY_ADAPTERS | 29 | #define MAX_HVC_IUCV_LINES HVC_ALLOC_TTY_ADAPTERS |
26 | #define MEMPOOL_MIN_NR (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4) | 30 | #define MEMPOOL_MIN_NR (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4) |
@@ -33,14 +37,14 @@ | |||
33 | #define MSG_TYPE_WINSIZE 0x08 /* Terminal window size update */ | 37 | #define MSG_TYPE_WINSIZE 0x08 /* Terminal window size update */ |
34 | #define MSG_TYPE_DATA 0x10 /* Terminal data */ | 38 | #define MSG_TYPE_DATA 0x10 /* Terminal data */ |
35 | 39 | ||
36 | #define MSG_SIZE(s) ((s) + offsetof(struct iucv_tty_msg, data)) | ||
37 | struct iucv_tty_msg { | 40 | struct iucv_tty_msg { |
38 | u8 version; /* Message version */ | 41 | u8 version; /* Message version */ |
39 | u8 type; /* Message type */ | 42 | u8 type; /* Message type */ |
40 | #define MSG_MAX_DATALEN (~(u16)0) | 43 | #define MSG_MAX_DATALEN ((u16)(~0)) |
41 | u16 datalen; /* Payload length */ | 44 | u16 datalen; /* Payload length */ |
42 | u8 data[]; /* Payload buffer */ | 45 | u8 data[]; /* Payload buffer */ |
43 | } __attribute__((packed)); | 46 | } __attribute__((packed)); |
47 | #define MSG_SIZE(s) ((s) + offsetof(struct iucv_tty_msg, data)) | ||
44 | 48 | ||
45 | enum iucv_state_t { | 49 | enum iucv_state_t { |
46 | IUCV_DISCONN = 0, | 50 | IUCV_DISCONN = 0, |
@@ -54,19 +58,26 @@ enum tty_state_t { | |||
54 | }; | 58 | }; |
55 | 59 | ||
56 | struct hvc_iucv_private { | 60 | struct hvc_iucv_private { |
57 | struct hvc_struct *hvc; /* HVC console struct reference */ | 61 | struct hvc_struct *hvc; /* HVC struct reference */ |
58 | u8 srv_name[8]; /* IUCV service name (ebcdic) */ | 62 | u8 srv_name[8]; /* IUCV service name (ebcdic) */ |
63 | unsigned char is_console; /* Linux console usage flag */ | ||
59 | enum iucv_state_t iucv_state; /* IUCV connection status */ | 64 | enum iucv_state_t iucv_state; /* IUCV connection status */ |
60 | enum tty_state_t tty_state; /* TTY status */ | 65 | enum tty_state_t tty_state; /* TTY status */ |
61 | struct iucv_path *path; /* IUCV path pointer */ | 66 | struct iucv_path *path; /* IUCV path pointer */ |
62 | spinlock_t lock; /* hvc_iucv_private lock */ | 67 | spinlock_t lock; /* hvc_iucv_private lock */ |
68 | #define SNDBUF_SIZE (PAGE_SIZE) /* must be < MSG_MAX_DATALEN */ | ||
69 | void *sndbuf; /* send buffer */ | ||
70 | size_t sndbuf_len; /* length of send buffer */ | ||
71 | #define QUEUE_SNDBUF_DELAY (HZ / 25) | ||
72 | struct delayed_work sndbuf_work; /* work: send iucv msg(s) */ | ||
73 | wait_queue_head_t sndbuf_waitq; /* wait for send completion */ | ||
63 | struct list_head tty_outqueue; /* outgoing IUCV messages */ | 74 | struct list_head tty_outqueue; /* outgoing IUCV messages */ |
64 | struct list_head tty_inqueue; /* incoming IUCV messages */ | 75 | struct list_head tty_inqueue; /* incoming IUCV messages */ |
65 | }; | 76 | }; |
66 | 77 | ||
67 | struct iucv_tty_buffer { | 78 | struct iucv_tty_buffer { |
68 | struct list_head list; /* list pointer */ | 79 | struct list_head list; /* list pointer */ |
69 | struct iucv_message msg; /* store an incoming IUCV message */ | 80 | struct iucv_message msg; /* store an IUCV message */ |
70 | size_t offset; /* data buffer offset */ | 81 | size_t offset; /* data buffer offset */ |
71 | struct iucv_tty_msg *mbuf; /* buffer to store input/output data */ | 82 | struct iucv_tty_msg *mbuf; /* buffer to store input/output data */ |
72 | }; | 83 | }; |
@@ -78,11 +89,12 @@ static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *); | |||
78 | static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *); | 89 | static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *); |
79 | 90 | ||
80 | 91 | ||
81 | /* Kernel module parameters */ | 92 | /* Kernel module parameter: use one terminal device as default */ |
82 | static unsigned long hvc_iucv_devices; | 93 | static unsigned long hvc_iucv_devices = 1; |
83 | 94 | ||
84 | /* Array of allocated hvc iucv tty lines... */ | 95 | /* Array of allocated hvc iucv tty lines... */ |
85 | static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES]; | 96 | static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES]; |
97 | #define IUCV_HVC_CON_IDX (0) | ||
86 | 98 | ||
87 | /* Kmem cache and mempool for iucv_tty_buffer elements */ | 99 | /* Kmem cache and mempool for iucv_tty_buffer elements */ |
88 | static struct kmem_cache *hvc_iucv_buffer_cache; | 100 | static struct kmem_cache *hvc_iucv_buffer_cache; |
@@ -112,7 +124,7 @@ struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num) | |||
112 | } | 124 | } |
113 | 125 | ||
114 | /** | 126 | /** |
115 | * alloc_tty_buffer() - Returns a new struct iucv_tty_buffer element. | 127 | * alloc_tty_buffer() - Return a new struct iucv_tty_buffer element. |
116 | * @size: Size of the internal buffer used to store data. | 128 | * @size: Size of the internal buffer used to store data. |
117 | * @flags: Memory allocation flags passed to mempool. | 129 | * @flags: Memory allocation flags passed to mempool. |
118 | * | 130 | * |
@@ -120,7 +132,6 @@ struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num) | |||
120 | * allocates an internal data buffer with the specified size @size. | 132 | * allocates an internal data buffer with the specified size @size. |
121 | * Note: The total message size arises from the internal buffer size and the | 133 | * Note: The total message size arises from the internal buffer size and the |
122 | * members of the iucv_tty_msg structure. | 134 | * members of the iucv_tty_msg structure. |
123 | * | ||
124 | * The function returns NULL if memory allocation has failed. | 135 | * The function returns NULL if memory allocation has failed. |
125 | */ | 136 | */ |
126 | static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags) | 137 | static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags) |
@@ -130,7 +141,7 @@ static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags) | |||
130 | bufp = mempool_alloc(hvc_iucv_mempool, flags); | 141 | bufp = mempool_alloc(hvc_iucv_mempool, flags); |
131 | if (!bufp) | 142 | if (!bufp) |
132 | return NULL; | 143 | return NULL; |
133 | memset(bufp, 0, sizeof(struct iucv_tty_buffer)); | 144 | memset(bufp, 0, sizeof(*bufp)); |
134 | 145 | ||
135 | if (size > 0) { | 146 | if (size > 0) { |
136 | bufp->msg.length = MSG_SIZE(size); | 147 | bufp->msg.length = MSG_SIZE(size); |
@@ -149,9 +160,6 @@ static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags) | |||
149 | /** | 160 | /** |
150 | * destroy_tty_buffer() - destroy struct iucv_tty_buffer element. | 161 | * destroy_tty_buffer() - destroy struct iucv_tty_buffer element. |
151 | * @bufp: Pointer to a struct iucv_tty_buffer element, SHALL NOT be NULL. | 162 | * @bufp: Pointer to a struct iucv_tty_buffer element, SHALL NOT be NULL. |
152 | * | ||
153 | * The destroy_tty_buffer() function frees the internal data buffer and returns | ||
154 | * the struct iucv_tty_buffer element back to the mempool for freeing. | ||
155 | */ | 163 | */ |
156 | static void destroy_tty_buffer(struct iucv_tty_buffer *bufp) | 164 | static void destroy_tty_buffer(struct iucv_tty_buffer *bufp) |
157 | { | 165 | { |
@@ -161,11 +169,7 @@ static void destroy_tty_buffer(struct iucv_tty_buffer *bufp) | |||
161 | 169 | ||
162 | /** | 170 | /** |
163 | * destroy_tty_buffer_list() - call destroy_tty_buffer() for each list element. | 171 | * destroy_tty_buffer_list() - call destroy_tty_buffer() for each list element. |
164 | * @list: List head pointer to a list containing struct iucv_tty_buffer | 172 | * @list: List containing struct iucv_tty_buffer elements. |
165 | * elements. | ||
166 | * | ||
167 | * Calls destroy_tty_buffer() for each struct iucv_tty_buffer element in the | ||
168 | * list @list. | ||
169 | */ | 173 | */ |
170 | static void destroy_tty_buffer_list(struct list_head *list) | 174 | static void destroy_tty_buffer_list(struct list_head *list) |
171 | { | 175 | { |
@@ -178,24 +182,24 @@ static void destroy_tty_buffer_list(struct list_head *list) | |||
178 | } | 182 | } |
179 | 183 | ||
180 | /** | 184 | /** |
181 | * hvc_iucv_write() - Receive IUCV message write data to HVC console buffer. | 185 | * hvc_iucv_write() - Receive IUCV message & write data to HVC buffer. |
182 | * @priv: Pointer to hvc_iucv_private structure. | 186 | * @priv: Pointer to struct hvc_iucv_private |
183 | * @buf: HVC console buffer for writing received terminal data. | 187 | * @buf: HVC buffer for writing received terminal data. |
184 | * @count: HVC console buffer size. | 188 | * @count: HVC buffer size. |
185 | * @has_more_data: Pointer to an int variable. | 189 | * @has_more_data: Pointer to an int variable. |
186 | * | 190 | * |
187 | * The function picks up pending messages from the input queue and receives | 191 | * The function picks up pending messages from the input queue and receives |
188 | * the message data that is then written to the specified buffer @buf. | 192 | * the message data that is then written to the specified buffer @buf. |
189 | * If the buffer size @count is less than the data message size, then the | 193 | * If the buffer size @count is less than the data message size, the |
190 | * message is kept on the input queue and @has_more_data is set to 1. | 194 | * message is kept on the input queue and @has_more_data is set to 1. |
191 | * If the message data has been entirely written, the message is removed from | 195 | * If all message data has been written, the message is removed from |
192 | * the input queue. | 196 | * the input queue. |
193 | * | 197 | * |
194 | * The function returns the number of bytes written to the terminal, zero if | 198 | * The function returns the number of bytes written to the terminal, zero if |
195 | * there are no pending data messages available or if there is no established | 199 | * there are no pending data messages available or if there is no established |
196 | * IUCV path. | 200 | * IUCV path. |
197 | * If the IUCV path has been severed, then -EPIPE is returned to cause a | 201 | * If the IUCV path has been severed, then -EPIPE is returned to cause a |
198 | * hang up (that is issued by the HVC console layer). | 202 | * hang up (that is issued by the HVC layer). |
199 | */ | 203 | */ |
200 | static int hvc_iucv_write(struct hvc_iucv_private *priv, | 204 | static int hvc_iucv_write(struct hvc_iucv_private *priv, |
201 | char *buf, int count, int *has_more_data) | 205 | char *buf, int count, int *has_more_data) |
@@ -204,12 +208,12 @@ static int hvc_iucv_write(struct hvc_iucv_private *priv, | |||
204 | int written; | 208 | int written; |
205 | int rc; | 209 | int rc; |
206 | 210 | ||
207 | /* Immediately return if there is no IUCV connection */ | 211 | /* immediately return if there is no IUCV connection */ |
208 | if (priv->iucv_state == IUCV_DISCONN) | 212 | if (priv->iucv_state == IUCV_DISCONN) |
209 | return 0; | 213 | return 0; |
210 | 214 | ||
211 | /* If the IUCV path has been severed, return -EPIPE to inform the | 215 | /* if the IUCV path has been severed, return -EPIPE to inform the |
212 | * hvc console layer to hang up the tty device. */ | 216 | * HVC layer to hang up the tty device. */ |
213 | if (priv->iucv_state == IUCV_SEVERED) | 217 | if (priv->iucv_state == IUCV_SEVERED) |
214 | return -EPIPE; | 218 | return -EPIPE; |
215 | 219 | ||
@@ -217,7 +221,7 @@ static int hvc_iucv_write(struct hvc_iucv_private *priv, | |||
217 | if (list_empty(&priv->tty_inqueue)) | 221 | if (list_empty(&priv->tty_inqueue)) |
218 | return 0; | 222 | return 0; |
219 | 223 | ||
220 | /* receive a iucv message and flip data to the tty (ldisc) */ | 224 | /* receive an iucv message and flip data to the tty (ldisc) */ |
221 | rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list); | 225 | rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list); |
222 | 226 | ||
223 | written = 0; | 227 | written = 0; |
@@ -260,7 +264,7 @@ static int hvc_iucv_write(struct hvc_iucv_private *priv, | |||
260 | case MSG_TYPE_WINSIZE: | 264 | case MSG_TYPE_WINSIZE: |
261 | if (rb->mbuf->datalen != sizeof(struct winsize)) | 265 | if (rb->mbuf->datalen != sizeof(struct winsize)) |
262 | break; | 266 | break; |
263 | hvc_resize(priv->hvc, *((struct winsize *)rb->mbuf->data)); | 267 | hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data)); |
264 | break; | 268 | break; |
265 | 269 | ||
266 | case MSG_TYPE_ERROR: /* ignored ... */ | 270 | case MSG_TYPE_ERROR: /* ignored ... */ |
@@ -284,10 +288,9 @@ out_written: | |||
284 | * @buf: Pointer to a buffer to store data | 288 | * @buf: Pointer to a buffer to store data |
285 | * @count: Size of buffer available for writing | 289 | * @count: Size of buffer available for writing |
286 | * | 290 | * |
287 | * The hvc_console thread calls this method to read characters from | 291 | * The HVC thread calls this method to read characters from the back-end. |
288 | * the terminal backend. If an IUCV communication path has been established, | 292 | * If an IUCV communication path has been established, pending IUCV messages |
289 | * pending IUCV messages are received and data is copied into buffer @buf | 293 | * are received and data is copied into buffer @buf up to @count bytes. |
290 | * up to @count bytes. | ||
291 | * | 294 | * |
292 | * Locking: The routine gets called under an irqsave() spinlock; and | 295 | * Locking: The routine gets called under an irqsave() spinlock; and |
293 | * the routine locks the struct hvc_iucv_private->lock to call | 296 | * the routine locks the struct hvc_iucv_private->lock to call |
@@ -318,66 +321,122 @@ static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count) | |||
318 | } | 321 | } |
319 | 322 | ||
320 | /** | 323 | /** |
321 | * hvc_iucv_send() - Send an IUCV message containing terminal data. | 324 | * hvc_iucv_queue() - Buffer terminal data for sending. |
322 | * @priv: Pointer to struct hvc_iucv_private instance. | 325 | * @priv: Pointer to struct hvc_iucv_private instance. |
323 | * @buf: Buffer containing data to send. | 326 | * @buf: Buffer containing data to send. |
324 | * @size: Size of buffer and amount of data to send. | 327 | * @count: Size of buffer and amount of data to send. |
328 | * | ||
329 | * The function queues data for sending. To actually send the buffered data, | ||
330 | * a work queue function is scheduled (with QUEUE_SNDBUF_DELAY). | ||
331 | * The function returns the number of data bytes that has been buffered. | ||
325 | * | 332 | * |
326 | * If an IUCV communication path is established, the function copies the buffer | 333 | * If the device is not connected, data is ignored and the function returns |
327 | * data to a newly allocated struct iucv_tty_buffer element, sends the data and | 334 | * @count. |
328 | * puts the element to the outqueue. | 335 | * If the buffer is full, the function returns 0. |
336 | * If an existing IUCV communicaton path has been severed, -EPIPE is returned | ||
337 | * (that can be passed to HVC layer to cause a tty hangup). | ||
338 | */ | ||
339 | static int hvc_iucv_queue(struct hvc_iucv_private *priv, const char *buf, | ||
340 | int count) | ||
341 | { | ||
342 | size_t len; | ||
343 | |||
344 | if (priv->iucv_state == IUCV_DISCONN) | ||
345 | return count; /* ignore data */ | ||
346 | |||
347 | if (priv->iucv_state == IUCV_SEVERED) | ||
348 | return -EPIPE; | ||
349 | |||
350 | len = min_t(size_t, count, SNDBUF_SIZE - priv->sndbuf_len); | ||
351 | if (!len) | ||
352 | return 0; | ||
353 | |||
354 | memcpy(priv->sndbuf + priv->sndbuf_len, buf, len); | ||
355 | priv->sndbuf_len += len; | ||
356 | |||
357 | if (priv->iucv_state == IUCV_CONNECTED) | ||
358 | schedule_delayed_work(&priv->sndbuf_work, QUEUE_SNDBUF_DELAY); | ||
359 | |||
360 | return len; | ||
361 | } | ||
362 | |||
363 | /** | ||
364 | * hvc_iucv_send() - Send an IUCV message containing terminal data. | ||
365 | * @priv: Pointer to struct hvc_iucv_private instance. | ||
329 | * | 366 | * |
330 | * If there is no IUCV communication path established, the function returns 0. | 367 | * If an IUCV communication path has been established, the buffered output data |
331 | * If an existing IUCV communicaton path has been severed, the function returns | 368 | * is sent via an IUCV message and the number of bytes sent is returned. |
332 | * -EPIPE (can be passed to HVC layer to cause a tty hangup). | 369 | * Returns 0 if there is no established IUCV communication path or |
370 | * -EPIPE if an existing IUCV communicaton path has been severed. | ||
333 | */ | 371 | */ |
334 | static int hvc_iucv_send(struct hvc_iucv_private *priv, const char *buf, | 372 | static int hvc_iucv_send(struct hvc_iucv_private *priv) |
335 | int count) | ||
336 | { | 373 | { |
337 | struct iucv_tty_buffer *sb; | 374 | struct iucv_tty_buffer *sb; |
338 | int rc; | 375 | int rc, len; |
339 | u16 len; | ||
340 | 376 | ||
341 | if (priv->iucv_state == IUCV_SEVERED) | 377 | if (priv->iucv_state == IUCV_SEVERED) |
342 | return -EPIPE; | 378 | return -EPIPE; |
343 | 379 | ||
344 | if (priv->iucv_state == IUCV_DISCONN) | 380 | if (priv->iucv_state == IUCV_DISCONN) |
345 | return 0; | 381 | return -EIO; |
346 | 382 | ||
347 | len = min_t(u16, MSG_MAX_DATALEN, count); | 383 | if (!priv->sndbuf_len) |
384 | return 0; | ||
348 | 385 | ||
349 | /* allocate internal buffer to store msg data and also compute total | 386 | /* allocate internal buffer to store msg data and also compute total |
350 | * message length */ | 387 | * message length */ |
351 | sb = alloc_tty_buffer(len, GFP_ATOMIC); | 388 | sb = alloc_tty_buffer(priv->sndbuf_len, GFP_ATOMIC); |
352 | if (!sb) | 389 | if (!sb) |
353 | return -ENOMEM; | 390 | return -ENOMEM; |
354 | 391 | ||
355 | sb->mbuf->datalen = len; | 392 | memcpy(sb->mbuf->data, priv->sndbuf, priv->sndbuf_len); |
356 | memcpy(sb->mbuf->data, buf, len); | 393 | sb->mbuf->datalen = (u16) priv->sndbuf_len; |
394 | sb->msg.length = MSG_SIZE(sb->mbuf->datalen); | ||
357 | 395 | ||
358 | list_add_tail(&sb->list, &priv->tty_outqueue); | 396 | list_add_tail(&sb->list, &priv->tty_outqueue); |
359 | 397 | ||
360 | rc = __iucv_message_send(priv->path, &sb->msg, 0, 0, | 398 | rc = __iucv_message_send(priv->path, &sb->msg, 0, 0, |
361 | (void *) sb->mbuf, sb->msg.length); | 399 | (void *) sb->mbuf, sb->msg.length); |
362 | if (rc) { | 400 | if (rc) { |
401 | /* drop the message here; however we might want to handle | ||
402 | * 0x03 (msg limit reached) by trying again... */ | ||
363 | list_del(&sb->list); | 403 | list_del(&sb->list); |
364 | destroy_tty_buffer(sb); | 404 | destroy_tty_buffer(sb); |
365 | len = 0; | ||
366 | } | 405 | } |
406 | len = priv->sndbuf_len; | ||
407 | priv->sndbuf_len = 0; | ||
367 | 408 | ||
368 | return len; | 409 | return len; |
369 | } | 410 | } |
370 | 411 | ||
371 | /** | 412 | /** |
413 | * hvc_iucv_sndbuf_work() - Send buffered data over IUCV | ||
414 | * @work: Work structure. | ||
415 | * | ||
416 | * This work queue function sends buffered output data over IUCV and, | ||
417 | * if not all buffered data could be sent, reschedules itself. | ||
418 | */ | ||
419 | static void hvc_iucv_sndbuf_work(struct work_struct *work) | ||
420 | { | ||
421 | struct hvc_iucv_private *priv; | ||
422 | |||
423 | priv = container_of(work, struct hvc_iucv_private, sndbuf_work.work); | ||
424 | if (!priv) | ||
425 | return; | ||
426 | |||
427 | spin_lock_bh(&priv->lock); | ||
428 | hvc_iucv_send(priv); | ||
429 | spin_unlock_bh(&priv->lock); | ||
430 | } | ||
431 | |||
432 | /** | ||
372 | * hvc_iucv_put_chars() - HVC put_chars operation. | 433 | * hvc_iucv_put_chars() - HVC put_chars operation. |
373 | * @vtermno: HVC virtual terminal number. | 434 | * @vtermno: HVC virtual terminal number. |
374 | * @buf: Pointer to an buffer to read data from | 435 | * @buf: Pointer to an buffer to read data from |
375 | * @count: Size of buffer available for reading | 436 | * @count: Size of buffer available for reading |
376 | * | 437 | * |
377 | * The hvc_console thread calls this method to write characters from | 438 | * The HVC thread calls this method to write characters to the back-end. |
378 | * to the terminal backend. | 439 | * The function calls hvc_iucv_queue() to queue terminal data for sending. |
379 | * The function calls hvc_iucv_send() under the lock of the | ||
380 | * struct hvc_iucv_private instance that corresponds to the tty @vtermno. | ||
381 | * | 440 | * |
382 | * Locking: The method gets called under an irqsave() spinlock; and | 441 | * Locking: The method gets called under an irqsave() spinlock; and |
383 | * locks struct hvc_iucv_private->lock. | 442 | * locks struct hvc_iucv_private->lock. |
@@ -385,7 +444,7 @@ static int hvc_iucv_send(struct hvc_iucv_private *priv, const char *buf, | |||
385 | static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count) | 444 | static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count) |
386 | { | 445 | { |
387 | struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno); | 446 | struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno); |
388 | int sent; | 447 | int queued; |
389 | 448 | ||
390 | if (count <= 0) | 449 | if (count <= 0) |
391 | return 0; | 450 | return 0; |
@@ -394,10 +453,10 @@ static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count) | |||
394 | return -ENODEV; | 453 | return -ENODEV; |
395 | 454 | ||
396 | spin_lock(&priv->lock); | 455 | spin_lock(&priv->lock); |
397 | sent = hvc_iucv_send(priv, buf, count); | 456 | queued = hvc_iucv_queue(priv, buf, count); |
398 | spin_unlock(&priv->lock); | 457 | spin_unlock(&priv->lock); |
399 | 458 | ||
400 | return sent; | 459 | return queued; |
401 | } | 460 | } |
402 | 461 | ||
403 | /** | 462 | /** |
@@ -406,7 +465,7 @@ static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count) | |||
406 | * @id: Additional data (originally passed to hvc_alloc): the index of an struct | 465 | * @id: Additional data (originally passed to hvc_alloc): the index of an struct |
407 | * hvc_iucv_private instance. | 466 | * hvc_iucv_private instance. |
408 | * | 467 | * |
409 | * The function sets the tty state to TTY_OPEN for the struct hvc_iucv_private | 468 | * The function sets the tty state to TTY_OPENED for the struct hvc_iucv_private |
410 | * instance that is derived from @id. Always returns 0. | 469 | * instance that is derived from @id. Always returns 0. |
411 | * | 470 | * |
412 | * Locking: struct hvc_iucv_private->lock, spin_lock_bh | 471 | * Locking: struct hvc_iucv_private->lock, spin_lock_bh |
@@ -427,12 +486,8 @@ static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id) | |||
427 | } | 486 | } |
428 | 487 | ||
429 | /** | 488 | /** |
430 | * hvc_iucv_cleanup() - Clean up function if the tty portion is finally closed. | 489 | * hvc_iucv_cleanup() - Clean up and reset a z/VM IUCV HVC instance. |
431 | * @priv: Pointer to the struct hvc_iucv_private instance. | 490 | * @priv: Pointer to the struct hvc_iucv_private instance. |
432 | * | ||
433 | * The functions severs the established IUCV communication path (if any), and | ||
434 | * destroy struct iucv_tty_buffer elements from the in- and outqueue. Finally, | ||
435 | * the functions resets the states to TTY_CLOSED and IUCV_DISCONN. | ||
436 | */ | 491 | */ |
437 | static void hvc_iucv_cleanup(struct hvc_iucv_private *priv) | 492 | static void hvc_iucv_cleanup(struct hvc_iucv_private *priv) |
438 | { | 493 | { |
@@ -441,25 +496,62 @@ static void hvc_iucv_cleanup(struct hvc_iucv_private *priv) | |||
441 | 496 | ||
442 | priv->tty_state = TTY_CLOSED; | 497 | priv->tty_state = TTY_CLOSED; |
443 | priv->iucv_state = IUCV_DISCONN; | 498 | priv->iucv_state = IUCV_DISCONN; |
499 | |||
500 | priv->sndbuf_len = 0; | ||
444 | } | 501 | } |
445 | 502 | ||
446 | /** | 503 | /** |
447 | * hvc_iucv_notifier_hangup() - HVC notifier for tty hangups. | 504 | * tty_outqueue_empty() - Test if the tty outq is empty |
448 | * @hp: Pointer to the HVC device (struct hvc_struct) | 505 | * @priv: Pointer to struct hvc_iucv_private instance. |
449 | * @id: Additional data (originally passed to hvc_alloc): the index of an struct | 506 | */ |
450 | * hvc_iucv_private instance. | 507 | static inline int tty_outqueue_empty(struct hvc_iucv_private *priv) |
508 | { | ||
509 | int rc; | ||
510 | |||
511 | spin_lock_bh(&priv->lock); | ||
512 | rc = list_empty(&priv->tty_outqueue); | ||
513 | spin_unlock_bh(&priv->lock); | ||
514 | |||
515 | return rc; | ||
516 | } | ||
517 | |||
518 | /** | ||
519 | * flush_sndbuf_sync() - Flush send buffer and wait for completion | ||
520 | * @priv: Pointer to struct hvc_iucv_private instance. | ||
451 | * | 521 | * |
452 | * This routine notifies the HVC backend that a tty hangup (carrier loss, | 522 | * The routine cancels a pending sndbuf work, calls hvc_iucv_send() |
453 | * virtual or otherwise) has occured. | 523 | * to flush any buffered terminal output data and waits for completion. |
524 | */ | ||
525 | static void flush_sndbuf_sync(struct hvc_iucv_private *priv) | ||
526 | { | ||
527 | int sync_wait; | ||
528 | |||
529 | cancel_delayed_work_sync(&priv->sndbuf_work); | ||
530 | |||
531 | spin_lock_bh(&priv->lock); | ||
532 | hvc_iucv_send(priv); /* force sending buffered data */ | ||
533 | sync_wait = !list_empty(&priv->tty_outqueue); /* anything queued ? */ | ||
534 | spin_unlock_bh(&priv->lock); | ||
535 | |||
536 | if (sync_wait) | ||
537 | wait_event_timeout(priv->sndbuf_waitq, | ||
538 | tty_outqueue_empty(priv), HZ); | ||
539 | } | ||
540 | |||
541 | /** | ||
542 | * hvc_iucv_notifier_hangup() - HVC notifier for TTY hangups. | ||
543 | * @hp: Pointer to the HVC device (struct hvc_struct) | ||
544 | * @id: Additional data (originally passed to hvc_alloc): | ||
545 | * the index of an struct hvc_iucv_private instance. | ||
454 | * | 546 | * |
455 | * The HVC backend for z/VM IUCV ignores virtual hangups (vhangup()), to keep | 547 | * This routine notifies the HVC back-end that a tty hangup (carrier loss, |
456 | * an existing IUCV communication path established. | 548 | * virtual or otherwise) has occured. |
549 | * The z/VM IUCV HVC device driver ignores virtual hangups (vhangup()) | ||
550 | * to keep an existing IUCV communication path established. | ||
457 | * (Background: vhangup() is called from user space (by getty or login) to | 551 | * (Background: vhangup() is called from user space (by getty or login) to |
458 | * disable writing to the tty by other applications). | 552 | * disable writing to the tty by other applications). |
459 | * | 553 | * If the tty has been opened and an established IUCV path has been severed |
460 | * If the tty has been opened (e.g. getty) and an established IUCV path has been | 554 | * (we caused the tty hangup), the function calls hvc_iucv_cleanup(). |
461 | * severed (we caused the tty hangup in that case), then the functions invokes | ||
462 | * hvc_iucv_cleanup() to clean up. | ||
463 | * | 555 | * |
464 | * Locking: struct hvc_iucv_private->lock | 556 | * Locking: struct hvc_iucv_private->lock |
465 | */ | 557 | */ |
@@ -471,12 +563,12 @@ static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id) | |||
471 | if (!priv) | 563 | if (!priv) |
472 | return; | 564 | return; |
473 | 565 | ||
566 | flush_sndbuf_sync(priv); | ||
567 | |||
474 | spin_lock_bh(&priv->lock); | 568 | spin_lock_bh(&priv->lock); |
475 | /* NOTE: If the hangup was scheduled by ourself (from the iucv | 569 | /* NOTE: If the hangup was scheduled by ourself (from the iucv |
476 | * path_servered callback [IUCV_SEVERED]), then we have to | 570 | * path_servered callback [IUCV_SEVERED]), we have to clean up |
477 | * finally clean up the tty backend structure and set state to | 571 | * our structure and to set state to TTY_CLOSED. |
478 | * TTY_CLOSED. | ||
479 | * | ||
480 | * If the tty was hung up otherwise (e.g. vhangup()), then we | 572 | * If the tty was hung up otherwise (e.g. vhangup()), then we |
481 | * ignore this hangup and keep an established IUCV path open... | 573 | * ignore this hangup and keep an established IUCV path open... |
482 | * (...the reason is that we are not able to connect back to the | 574 | * (...the reason is that we are not able to connect back to the |
@@ -494,10 +586,9 @@ static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id) | |||
494 | * @id: Additional data (originally passed to hvc_alloc): | 586 | * @id: Additional data (originally passed to hvc_alloc): |
495 | * the index of an struct hvc_iucv_private instance. | 587 | * the index of an struct hvc_iucv_private instance. |
496 | * | 588 | * |
497 | * This routine notifies the HVC backend that the last tty device file | 589 | * This routine notifies the HVC back-end that the last tty device fd has been |
498 | * descriptor has been closed. | 590 | * closed. The function calls hvc_iucv_cleanup() to clean up the struct |
499 | * The function calls hvc_iucv_cleanup() to clean up the struct hvc_iucv_private | 591 | * hvc_iucv_private instance. |
500 | * instance. | ||
501 | * | 592 | * |
502 | * Locking: struct hvc_iucv_private->lock | 593 | * Locking: struct hvc_iucv_private->lock |
503 | */ | 594 | */ |
@@ -510,6 +601,8 @@ static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id) | |||
510 | if (!priv) | 601 | if (!priv) |
511 | return; | 602 | return; |
512 | 603 | ||
604 | flush_sndbuf_sync(priv); | ||
605 | |||
513 | spin_lock_bh(&priv->lock); | 606 | spin_lock_bh(&priv->lock); |
514 | path = priv->path; /* save reference to IUCV path */ | 607 | path = priv->path; /* save reference to IUCV path */ |
515 | priv->path = NULL; | 608 | priv->path = NULL; |
@@ -527,20 +620,18 @@ static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id) | |||
527 | /** | 620 | /** |
528 | * hvc_iucv_path_pending() - IUCV handler to process a connection request. | 621 | * hvc_iucv_path_pending() - IUCV handler to process a connection request. |
529 | * @path: Pending path (struct iucv_path) | 622 | * @path: Pending path (struct iucv_path) |
530 | * @ipvmid: Originator z/VM system identifier | 623 | * @ipvmid: z/VM system identifier of originator |
531 | * @ipuser: User specified data for this path | 624 | * @ipuser: User specified data for this path |
532 | * (AF_IUCV: port/service name and originator port) | 625 | * (AF_IUCV: port/service name and originator port) |
533 | * | 626 | * |
534 | * The function uses the @ipuser data to check to determine if the pending | 627 | * The function uses the @ipuser data to determine if the pending path belongs |
535 | * path belongs to a terminal managed by this HVC backend. | 628 | * to a terminal managed by this device driver. |
536 | * If the check is successful, then an additional check is done to ensure | 629 | * If the path belongs to this driver, ensure that the terminal is not accessed |
537 | * that a terminal cannot be accessed multiple times (only one connection | 630 | * multiple times (only one connection to a terminal is allowed). |
538 | * to a terminal is allowed). In that particular case, the pending path is | 631 | * If the terminal is not yet connected, the pending path is accepted and is |
539 | * severed. If it is the first connection, the pending path is accepted and | 632 | * associated to the appropriate struct hvc_iucv_private instance. |
540 | * associated to the struct hvc_iucv_private. The iucv state is updated to | ||
541 | * reflect that a communication path has been established. | ||
542 | * | 633 | * |
543 | * Returns 0 if the path belongs to a terminal managed by the this HVC backend; | 634 | * Returns 0 if @path belongs to a terminal managed by the this device driver; |
544 | * otherwise returns -ENODEV in order to dispatch this path to other handlers. | 635 | * otherwise returns -ENODEV in order to dispatch this path to other handlers. |
545 | * | 636 | * |
546 | * Locking: struct hvc_iucv_private->lock | 637 | * Locking: struct hvc_iucv_private->lock |
@@ -559,7 +650,6 @@ static int hvc_iucv_path_pending(struct iucv_path *path, | |||
559 | priv = hvc_iucv_table[i]; | 650 | priv = hvc_iucv_table[i]; |
560 | break; | 651 | break; |
561 | } | 652 | } |
562 | |||
563 | if (!priv) | 653 | if (!priv) |
564 | return -ENODEV; | 654 | return -ENODEV; |
565 | 655 | ||
@@ -588,6 +678,9 @@ static int hvc_iucv_path_pending(struct iucv_path *path, | |||
588 | priv->path = path; | 678 | priv->path = path; |
589 | priv->iucv_state = IUCV_CONNECTED; | 679 | priv->iucv_state = IUCV_CONNECTED; |
590 | 680 | ||
681 | /* flush buffered output data... */ | ||
682 | schedule_delayed_work(&priv->sndbuf_work, 5); | ||
683 | |||
591 | out_path_handled: | 684 | out_path_handled: |
592 | spin_unlock(&priv->lock); | 685 | spin_unlock(&priv->lock); |
593 | return 0; | 686 | return 0; |
@@ -603,8 +696,7 @@ out_path_handled: | |||
603 | * sets the iucv state to IUCV_SEVERED for the associated struct | 696 | * sets the iucv state to IUCV_SEVERED for the associated struct |
604 | * hvc_iucv_private instance. Later, the IUCV_SEVERED state triggers a tty | 697 | * hvc_iucv_private instance. Later, the IUCV_SEVERED state triggers a tty |
605 | * hangup (hvc_iucv_get_chars() / hvc_iucv_write()). | 698 | * hangup (hvc_iucv_get_chars() / hvc_iucv_write()). |
606 | * | 699 | * If tty portion of the HVC is closed, clean up the outqueue. |
607 | * If tty portion of the HVC is closed then clean up the outqueue in addition. | ||
608 | * | 700 | * |
609 | * Locking: struct hvc_iucv_private->lock | 701 | * Locking: struct hvc_iucv_private->lock |
610 | */ | 702 | */ |
@@ -615,15 +707,25 @@ static void hvc_iucv_path_severed(struct iucv_path *path, u8 ipuser[16]) | |||
615 | spin_lock(&priv->lock); | 707 | spin_lock(&priv->lock); |
616 | priv->iucv_state = IUCV_SEVERED; | 708 | priv->iucv_state = IUCV_SEVERED; |
617 | 709 | ||
618 | /* NOTE: If the tty has not yet been opened by a getty program | 710 | /* If the tty has not yet been opened, clean up the hvc_iucv_private |
619 | * (e.g. to see console messages), then cleanup the | 711 | * structure to allow re-connects. |
620 | * hvc_iucv_private structure to allow re-connects. | 712 | * This is also done for our console device because console hangups |
713 | * are handled specially and no notifier is called by HVC. | ||
714 | * The tty session is active (TTY_OPEN) and ready for re-connects... | ||
621 | * | 715 | * |
622 | * If the tty has been opened, the get_chars() callback returns | 716 | * If it has been opened, let get_chars() return -EPIPE to signal the |
623 | * -EPIPE to signal the hvc console layer to hang up the tty. */ | 717 | * HVC layer to hang up the tty. |
718 | * If so, we need to wake up the HVC thread to call get_chars()... | ||
719 | */ | ||
624 | priv->path = NULL; | 720 | priv->path = NULL; |
625 | if (priv->tty_state == TTY_CLOSED) | 721 | if (priv->tty_state == TTY_CLOSED) |
626 | hvc_iucv_cleanup(priv); | 722 | hvc_iucv_cleanup(priv); |
723 | else | ||
724 | if (priv->is_console) { | ||
725 | hvc_iucv_cleanup(priv); | ||
726 | priv->tty_state = TTY_OPENED; | ||
727 | } else | ||
728 | hvc_kick(); | ||
627 | spin_unlock(&priv->lock); | 729 | spin_unlock(&priv->lock); |
628 | 730 | ||
629 | /* finally sever path (outside of priv->lock due to lock ordering) */ | 731 | /* finally sever path (outside of priv->lock due to lock ordering) */ |
@@ -636,9 +738,9 @@ static void hvc_iucv_path_severed(struct iucv_path *path, u8 ipuser[16]) | |||
636 | * @path: Pending path (struct iucv_path) | 738 | * @path: Pending path (struct iucv_path) |
637 | * @msg: Pointer to the IUCV message | 739 | * @msg: Pointer to the IUCV message |
638 | * | 740 | * |
639 | * The function stores an incoming message on the input queue for later | 741 | * The function puts an incoming message on the input queue for later |
640 | * processing (by hvc_iucv_get_chars() / hvc_iucv_write()). | 742 | * processing (by hvc_iucv_get_chars() / hvc_iucv_write()). |
641 | * However, if the tty has not yet been opened, the message is rejected. | 743 | * If the tty has not yet been opened, the message is rejected. |
642 | * | 744 | * |
643 | * Locking: struct hvc_iucv_private->lock | 745 | * Locking: struct hvc_iucv_private->lock |
644 | */ | 746 | */ |
@@ -648,6 +750,12 @@ static void hvc_iucv_msg_pending(struct iucv_path *path, | |||
648 | struct hvc_iucv_private *priv = path->private; | 750 | struct hvc_iucv_private *priv = path->private; |
649 | struct iucv_tty_buffer *rb; | 751 | struct iucv_tty_buffer *rb; |
650 | 752 | ||
753 | /* reject messages that exceed max size of iucv_tty_msg->datalen */ | ||
754 | if (msg->length > MSG_SIZE(MSG_MAX_DATALEN)) { | ||
755 | iucv_message_reject(path, msg); | ||
756 | return; | ||
757 | } | ||
758 | |||
651 | spin_lock(&priv->lock); | 759 | spin_lock(&priv->lock); |
652 | 760 | ||
653 | /* reject messages if tty has not yet been opened */ | 761 | /* reject messages if tty has not yet been opened */ |
@@ -656,7 +764,7 @@ static void hvc_iucv_msg_pending(struct iucv_path *path, | |||
656 | goto unlock_return; | 764 | goto unlock_return; |
657 | } | 765 | } |
658 | 766 | ||
659 | /* allocate buffer an empty buffer element */ | 767 | /* allocate tty buffer to save iucv msg only */ |
660 | rb = alloc_tty_buffer(0, GFP_ATOMIC); | 768 | rb = alloc_tty_buffer(0, GFP_ATOMIC); |
661 | if (!rb) { | 769 | if (!rb) { |
662 | iucv_message_reject(path, msg); | 770 | iucv_message_reject(path, msg); |
@@ -666,7 +774,7 @@ static void hvc_iucv_msg_pending(struct iucv_path *path, | |||
666 | 774 | ||
667 | list_add_tail(&rb->list, &priv->tty_inqueue); | 775 | list_add_tail(&rb->list, &priv->tty_inqueue); |
668 | 776 | ||
669 | hvc_kick(); /* wakup hvc console thread */ | 777 | hvc_kick(); /* wake up hvc thread */ |
670 | 778 | ||
671 | unlock_return: | 779 | unlock_return: |
672 | spin_unlock(&priv->lock); | 780 | spin_unlock(&priv->lock); |
@@ -677,10 +785,10 @@ unlock_return: | |||
677 | * @path: Pending path (struct iucv_path) | 785 | * @path: Pending path (struct iucv_path) |
678 | * @msg: Pointer to the IUCV message | 786 | * @msg: Pointer to the IUCV message |
679 | * | 787 | * |
680 | * The function is called upon completion of message delivery and the | 788 | * The function is called upon completion of message delivery to remove the |
681 | * message is removed from the outqueue. Additional delivery information | 789 | * message from the outqueue. Additional delivery information can be found |
682 | * can be found in msg->audit: rejected messages (0x040000 (IPADRJCT)) and | 790 | * msg->audit: rejected messages (0x040000 (IPADRJCT)), and |
683 | * purged messages (0x010000 (IPADPGNR)). | 791 | * purged messages (0x010000 (IPADPGNR)). |
684 | * | 792 | * |
685 | * Locking: struct hvc_iucv_private->lock | 793 | * Locking: struct hvc_iucv_private->lock |
686 | */ | 794 | */ |
@@ -697,6 +805,7 @@ static void hvc_iucv_msg_complete(struct iucv_path *path, | |||
697 | list_move(&ent->list, &list_remove); | 805 | list_move(&ent->list, &list_remove); |
698 | break; | 806 | break; |
699 | } | 807 | } |
808 | wake_up(&priv->sndbuf_waitq); | ||
700 | spin_unlock(&priv->lock); | 809 | spin_unlock(&priv->lock); |
701 | destroy_tty_buffer_list(&list_remove); | 810 | destroy_tty_buffer_list(&list_remove); |
702 | } | 811 | } |
@@ -713,13 +822,14 @@ static struct hv_ops hvc_iucv_ops = { | |||
713 | 822 | ||
714 | /** | 823 | /** |
715 | * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance | 824 | * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance |
716 | * @id: hvc_iucv_table index | 825 | * @id: hvc_iucv_table index |
826 | * @is_console: Flag if the instance is used as Linux console | ||
717 | * | 827 | * |
718 | * This function allocates a new hvc_iucv_private struct and put the | 828 | * This function allocates a new hvc_iucv_private structure and stores |
719 | * instance into hvc_iucv_table at index @id. | 829 | * the instance in hvc_iucv_table at index @id. |
720 | * Returns 0 on success; otherwise non-zero. | 830 | * Returns 0 on success; otherwise non-zero. |
721 | */ | 831 | */ |
722 | static int __init hvc_iucv_alloc(int id) | 832 | static int __init hvc_iucv_alloc(int id, unsigned int is_console) |
723 | { | 833 | { |
724 | struct hvc_iucv_private *priv; | 834 | struct hvc_iucv_private *priv; |
725 | char name[9]; | 835 | char name[9]; |
@@ -732,18 +842,33 @@ static int __init hvc_iucv_alloc(int id) | |||
732 | spin_lock_init(&priv->lock); | 842 | spin_lock_init(&priv->lock); |
733 | INIT_LIST_HEAD(&priv->tty_outqueue); | 843 | INIT_LIST_HEAD(&priv->tty_outqueue); |
734 | INIT_LIST_HEAD(&priv->tty_inqueue); | 844 | INIT_LIST_HEAD(&priv->tty_inqueue); |
845 | INIT_DELAYED_WORK(&priv->sndbuf_work, hvc_iucv_sndbuf_work); | ||
846 | init_waitqueue_head(&priv->sndbuf_waitq); | ||
847 | |||
848 | priv->sndbuf = (void *) get_zeroed_page(GFP_KERNEL); | ||
849 | if (!priv->sndbuf) { | ||
850 | kfree(priv); | ||
851 | return -ENOMEM; | ||
852 | } | ||
853 | |||
854 | /* set console flag */ | ||
855 | priv->is_console = is_console; | ||
735 | 856 | ||
736 | /* Finally allocate hvc */ | 857 | /* finally allocate hvc */ |
737 | priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id, | 858 | priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id, /* PAGE_SIZE */ |
738 | HVC_IUCV_MAGIC + id, &hvc_iucv_ops, PAGE_SIZE); | 859 | HVC_IUCV_MAGIC + id, &hvc_iucv_ops, 256); |
739 | if (IS_ERR(priv->hvc)) { | 860 | if (IS_ERR(priv->hvc)) { |
740 | rc = PTR_ERR(priv->hvc); | 861 | rc = PTR_ERR(priv->hvc); |
862 | free_page((unsigned long) priv->sndbuf); | ||
741 | kfree(priv); | 863 | kfree(priv); |
742 | return rc; | 864 | return rc; |
743 | } | 865 | } |
744 | 866 | ||
867 | /* notify HVC thread instead of using polling */ | ||
868 | priv->hvc->irq_requested = 1; | ||
869 | |||
745 | /* setup iucv related information */ | 870 | /* setup iucv related information */ |
746 | snprintf(name, 9, "ihvc%-4d", id); | 871 | snprintf(name, 9, "lnxhvc%-2d", id); |
747 | memcpy(priv->srv_name, name, 8); | 872 | memcpy(priv->srv_name, name, 8); |
748 | ASCEBC(priv->srv_name, 8); | 873 | ASCEBC(priv->srv_name, 8); |
749 | 874 | ||
@@ -752,15 +877,16 @@ static int __init hvc_iucv_alloc(int id) | |||
752 | } | 877 | } |
753 | 878 | ||
754 | /** | 879 | /** |
755 | * hvc_iucv_init() - Initialization of HVC backend for z/VM IUCV | 880 | * hvc_iucv_init() - z/VM IUCV HVC device driver initialization |
756 | */ | 881 | */ |
757 | static int __init hvc_iucv_init(void) | 882 | static int __init hvc_iucv_init(void) |
758 | { | 883 | { |
759 | int rc, i; | 884 | int rc; |
885 | unsigned int i; | ||
760 | 886 | ||
761 | if (!MACHINE_IS_VM) { | 887 | if (!MACHINE_IS_VM) { |
762 | pr_warning("The z/VM IUCV Hypervisor console cannot be " | 888 | pr_info("The z/VM IUCV HVC device driver cannot " |
763 | "used without z/VM.\n"); | 889 | "be used without z/VM\n"); |
764 | return -ENODEV; | 890 | return -ENODEV; |
765 | } | 891 | } |
766 | 892 | ||
@@ -774,26 +900,33 @@ static int __init hvc_iucv_init(void) | |||
774 | sizeof(struct iucv_tty_buffer), | 900 | sizeof(struct iucv_tty_buffer), |
775 | 0, 0, NULL); | 901 | 0, 0, NULL); |
776 | if (!hvc_iucv_buffer_cache) { | 902 | if (!hvc_iucv_buffer_cache) { |
777 | pr_err("Not enough memory for driver initialization " | 903 | pr_err("Allocating memory failed with reason code=%d\n", 1); |
778 | "(rs=%d).\n", 1); | ||
779 | return -ENOMEM; | 904 | return -ENOMEM; |
780 | } | 905 | } |
781 | 906 | ||
782 | hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR, | 907 | hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR, |
783 | hvc_iucv_buffer_cache); | 908 | hvc_iucv_buffer_cache); |
784 | if (!hvc_iucv_mempool) { | 909 | if (!hvc_iucv_mempool) { |
785 | pr_err("Not enough memory for driver initialization " | 910 | pr_err("Allocating memory failed with reason code=%d\n", 2); |
786 | "(rs=%d).\n", 2); | ||
787 | kmem_cache_destroy(hvc_iucv_buffer_cache); | 911 | kmem_cache_destroy(hvc_iucv_buffer_cache); |
788 | return -ENOMEM; | 912 | return -ENOMEM; |
789 | } | 913 | } |
790 | 914 | ||
915 | /* register the first terminal device as console | ||
916 | * (must be done before allocating hvc terminal devices) */ | ||
917 | rc = hvc_instantiate(HVC_IUCV_MAGIC, IUCV_HVC_CON_IDX, &hvc_iucv_ops); | ||
918 | if (rc) { | ||
919 | pr_err("Registering HVC terminal device as " | ||
920 | "Linux console failed\n"); | ||
921 | goto out_error_memory; | ||
922 | } | ||
923 | |||
791 | /* allocate hvc_iucv_private structs */ | 924 | /* allocate hvc_iucv_private structs */ |
792 | for (i = 0; i < hvc_iucv_devices; i++) { | 925 | for (i = 0; i < hvc_iucv_devices; i++) { |
793 | rc = hvc_iucv_alloc(i); | 926 | rc = hvc_iucv_alloc(i, (i == IUCV_HVC_CON_IDX) ? 1 : 0); |
794 | if (rc) { | 927 | if (rc) { |
795 | pr_err("Could not create new z/VM IUCV HVC backend " | 928 | pr_err("Creating a new HVC terminal device " |
796 | "rc=%d.\n", rc); | 929 | "failed with error code=%d\n", rc); |
797 | goto out_error_hvc; | 930 | goto out_error_hvc; |
798 | } | 931 | } |
799 | } | 932 | } |
@@ -801,7 +934,8 @@ static int __init hvc_iucv_init(void) | |||
801 | /* register IUCV callback handler */ | 934 | /* register IUCV callback handler */ |
802 | rc = iucv_register(&hvc_iucv_handler, 0); | 935 | rc = iucv_register(&hvc_iucv_handler, 0); |
803 | if (rc) { | 936 | if (rc) { |
804 | pr_err("Could not register iucv handler (rc=%d).\n", rc); | 937 | pr_err("Registering IUCV handlers failed with error code=%d\n", |
938 | rc); | ||
805 | goto out_error_iucv; | 939 | goto out_error_iucv; |
806 | } | 940 | } |
807 | 941 | ||
@@ -816,22 +950,13 @@ out_error_hvc: | |||
816 | hvc_remove(hvc_iucv_table[i]->hvc); | 950 | hvc_remove(hvc_iucv_table[i]->hvc); |
817 | kfree(hvc_iucv_table[i]); | 951 | kfree(hvc_iucv_table[i]); |
818 | } | 952 | } |
953 | out_error_memory: | ||
819 | mempool_destroy(hvc_iucv_mempool); | 954 | mempool_destroy(hvc_iucv_mempool); |
820 | kmem_cache_destroy(hvc_iucv_buffer_cache); | 955 | kmem_cache_destroy(hvc_iucv_buffer_cache); |
821 | return rc; | 956 | return rc; |
822 | } | 957 | } |
823 | 958 | ||
824 | /** | 959 | /** |
825 | * hvc_iucv_console_init() - Early console initialization | ||
826 | */ | ||
827 | static int __init hvc_iucv_console_init(void) | ||
828 | { | ||
829 | if (!MACHINE_IS_VM || !hvc_iucv_devices) | ||
830 | return -ENODEV; | ||
831 | return hvc_instantiate(HVC_IUCV_MAGIC, 0, &hvc_iucv_ops); | ||
832 | } | ||
833 | |||
834 | /** | ||
835 | * hvc_iucv_config() - Parsing of hvc_iucv= kernel command line parameter | 960 | * hvc_iucv_config() - Parsing of hvc_iucv= kernel command line parameter |
836 | * @val: Parameter value (numeric) | 961 | * @val: Parameter value (numeric) |
837 | */ | 962 | */ |
@@ -841,10 +966,5 @@ static int __init hvc_iucv_config(char *val) | |||
841 | } | 966 | } |
842 | 967 | ||
843 | 968 | ||
844 | module_init(hvc_iucv_init); | 969 | device_initcall(hvc_iucv_init); |
845 | console_initcall(hvc_iucv_console_init); | ||
846 | __setup("hvc_iucv=", hvc_iucv_config); | 970 | __setup("hvc_iucv=", hvc_iucv_config); |
847 | |||
848 | MODULE_LICENSE("GPL"); | ||
849 | MODULE_DESCRIPTION("HVC back-end for z/VM IUCV."); | ||
850 | MODULE_AUTHOR("Hendrik Brueckner <brueckner@linux.vnet.ibm.com>"); | ||
diff --git a/drivers/isdn/hardware/mISDN/Kconfig b/drivers/isdn/hardware/mISDN/Kconfig index 14793480c453..fd112ae252cf 100644 --- a/drivers/isdn/hardware/mISDN/Kconfig +++ b/drivers/isdn/hardware/mISDN/Kconfig | |||
@@ -23,3 +23,10 @@ config MISDN_HFCMULTI | |||
23 | * HFC-8S (8 S/T interfaces on one chip) | 23 | * HFC-8S (8 S/T interfaces on one chip) |
24 | * HFC-E1 (E1 interface for 2Mbit ISDN) | 24 | * HFC-E1 (E1 interface for 2Mbit ISDN) |
25 | 25 | ||
26 | config MISDN_HFCUSB | ||
27 | tristate "Support for HFC-S USB based TAs" | ||
28 | depends on USB | ||
29 | help | ||
30 | Enable support for USB ISDN TAs with Cologne Chip AG's | ||
31 | HFC-S USB ISDN Controller | ||
32 | |||
diff --git a/drivers/isdn/hardware/mISDN/Makefile b/drivers/isdn/hardware/mISDN/Makefile index 1e7ca5332ad7..b0403526bbba 100644 --- a/drivers/isdn/hardware/mISDN/Makefile +++ b/drivers/isdn/hardware/mISDN/Makefile | |||
@@ -5,3 +5,4 @@ | |||
5 | 5 | ||
6 | obj-$(CONFIG_MISDN_HFCPCI) += hfcpci.o | 6 | obj-$(CONFIG_MISDN_HFCPCI) += hfcpci.o |
7 | obj-$(CONFIG_MISDN_HFCMULTI) += hfcmulti.o | 7 | obj-$(CONFIG_MISDN_HFCMULTI) += hfcmulti.o |
8 | obj-$(CONFIG_MISDN_HFCUSB) += hfcsusb.o | ||
diff --git a/drivers/isdn/hardware/mISDN/hfc_multi.h b/drivers/isdn/hardware/mISDN/hfc_multi.h index 7bbf7300593d..663b77f578be 100644 --- a/drivers/isdn/hardware/mISDN/hfc_multi.h +++ b/drivers/isdn/hardware/mISDN/hfc_multi.h | |||
@@ -2,10 +2,6 @@ | |||
2 | * see notice in hfc_multi.c | 2 | * see notice in hfc_multi.c |
3 | */ | 3 | */ |
4 | 4 | ||
5 | extern void ztdummy_extern_interrupt(void); | ||
6 | extern void ztdummy_register_interrupt(void); | ||
7 | extern int ztdummy_unregister_interrupt(void); | ||
8 | |||
9 | #define DEBUG_HFCMULTI_FIFO 0x00010000 | 5 | #define DEBUG_HFCMULTI_FIFO 0x00010000 |
10 | #define DEBUG_HFCMULTI_CRC 0x00020000 | 6 | #define DEBUG_HFCMULTI_CRC 0x00020000 |
11 | #define DEBUG_HFCMULTI_INIT 0x00040000 | 7 | #define DEBUG_HFCMULTI_INIT 0x00040000 |
@@ -13,6 +9,7 @@ extern int ztdummy_unregister_interrupt(void); | |||
13 | #define DEBUG_HFCMULTI_MODE 0x00100000 | 9 | #define DEBUG_HFCMULTI_MODE 0x00100000 |
14 | #define DEBUG_HFCMULTI_MSG 0x00200000 | 10 | #define DEBUG_HFCMULTI_MSG 0x00200000 |
15 | #define DEBUG_HFCMULTI_STATE 0x00400000 | 11 | #define DEBUG_HFCMULTI_STATE 0x00400000 |
12 | #define DEBUG_HFCMULTI_FILL 0x00800000 | ||
16 | #define DEBUG_HFCMULTI_SYNC 0x01000000 | 13 | #define DEBUG_HFCMULTI_SYNC 0x01000000 |
17 | #define DEBUG_HFCMULTI_DTMF 0x02000000 | 14 | #define DEBUG_HFCMULTI_DTMF 0x02000000 |
18 | #define DEBUG_HFCMULTI_LOCK 0x80000000 | 15 | #define DEBUG_HFCMULTI_LOCK 0x80000000 |
@@ -170,6 +167,8 @@ struct hfc_multi { | |||
170 | 167 | ||
171 | u_long chip; /* chip configuration */ | 168 | u_long chip; /* chip configuration */ |
172 | int masterclk; /* port that provides master clock -1=off */ | 169 | int masterclk; /* port that provides master clock -1=off */ |
170 | unsigned char silence;/* silence byte */ | ||
171 | unsigned char silence_data[128];/* silence block */ | ||
173 | int dtmf; /* flag that dtmf is currently in process */ | 172 | int dtmf; /* flag that dtmf is currently in process */ |
174 | int Flen; /* F-buffer size */ | 173 | int Flen; /* F-buffer size */ |
175 | int Zlen; /* Z-buffer size (must be int for calculation)*/ | 174 | int Zlen; /* Z-buffer size (must be int for calculation)*/ |
@@ -198,6 +197,9 @@ struct hfc_multi { | |||
198 | 197 | ||
199 | spinlock_t lock; /* the lock */ | 198 | spinlock_t lock; /* the lock */ |
200 | 199 | ||
200 | struct mISDNclock *iclock; /* isdn clock support */ | ||
201 | int iclock_on; | ||
202 | |||
201 | /* | 203 | /* |
202 | * the channel index is counted from 0, regardless where the channel | 204 | * the channel index is counted from 0, regardless where the channel |
203 | * is located on the hfc-channel. | 205 | * is located on the hfc-channel. |
diff --git a/drivers/isdn/hardware/mISDN/hfc_pci.h b/drivers/isdn/hardware/mISDN/hfc_pci.h index 5783d22a18fe..3132ddc99fcd 100644 --- a/drivers/isdn/hardware/mISDN/hfc_pci.h +++ b/drivers/isdn/hardware/mISDN/hfc_pci.h | |||
@@ -26,7 +26,7 @@ | |||
26 | * change mask and threshold simultaneously | 26 | * change mask and threshold simultaneously |
27 | */ | 27 | */ |
28 | #define HFCPCI_BTRANS_THRESHOLD 128 | 28 | #define HFCPCI_BTRANS_THRESHOLD 128 |
29 | #define HFCPCI_BTRANS_MAX 256 | 29 | #define HFCPCI_FILLEMPTY 64 |
30 | #define HFCPCI_BTRANS_THRESMASK 0x00 | 30 | #define HFCPCI_BTRANS_THRESMASK 0x00 |
31 | 31 | ||
32 | /* defines for PCI config */ | 32 | /* defines for PCI config */ |
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c index c63e2f49da8a..97f4708b3879 100644 --- a/drivers/isdn/hardware/mISDN/hfcmulti.c +++ b/drivers/isdn/hardware/mISDN/hfcmulti.c | |||
@@ -133,6 +133,12 @@ | |||
133 | * Give the value of the clock control register (A_ST_CLK_DLY) | 133 | * Give the value of the clock control register (A_ST_CLK_DLY) |
134 | * of the S/T interfaces in TE mode. | 134 | * of the S/T interfaces in TE mode. |
135 | * This register is needed for the TBR3 certification, so don't change it. | 135 | * This register is needed for the TBR3 certification, so don't change it. |
136 | * | ||
137 | * clock: | ||
138 | * NOTE: only one clock value must be given once | ||
139 | * Selects interface with clock source for mISDN and applications. | ||
140 | * Set to card number starting with 1. Set to -1 to disable. | ||
141 | * By default, the first card is used as clock source. | ||
136 | */ | 142 | */ |
137 | 143 | ||
138 | /* | 144 | /* |
@@ -140,7 +146,7 @@ | |||
140 | * #define HFC_REGISTER_DEBUG | 146 | * #define HFC_REGISTER_DEBUG |
141 | */ | 147 | */ |
142 | 148 | ||
143 | static const char *hfcmulti_revision = "2.02"; | 149 | #define HFC_MULTI_VERSION "2.03" |
144 | 150 | ||
145 | #include <linux/module.h> | 151 | #include <linux/module.h> |
146 | #include <linux/pci.h> | 152 | #include <linux/pci.h> |
@@ -165,10 +171,6 @@ static LIST_HEAD(HFClist); | |||
165 | static spinlock_t HFClock; /* global hfc list lock */ | 171 | static spinlock_t HFClock; /* global hfc list lock */ |
166 | 172 | ||
167 | static void ph_state_change(struct dchannel *); | 173 | static void ph_state_change(struct dchannel *); |
168 | static void (*hfc_interrupt)(void); | ||
169 | static void (*register_interrupt)(void); | ||
170 | static int (*unregister_interrupt)(void); | ||
171 | static int interrupt_registered; | ||
172 | 174 | ||
173 | static struct hfc_multi *syncmaster; | 175 | static struct hfc_multi *syncmaster; |
174 | static int plxsd_master; /* if we have a master card (yet) */ | 176 | static int plxsd_master; /* if we have a master card (yet) */ |
@@ -184,7 +186,6 @@ static int nt_t1_count[] = { 3840, 1920, 960, 480, 240, 120, 60, 30 }; | |||
184 | #define CLKDEL_TE 0x0f /* CLKDEL in TE mode */ | 186 | #define CLKDEL_TE 0x0f /* CLKDEL in TE mode */ |
185 | #define CLKDEL_NT 0x6c /* CLKDEL in NT mode | 187 | #define CLKDEL_NT 0x6c /* CLKDEL in NT mode |
186 | (0x60 MUST be included!) */ | 188 | (0x60 MUST be included!) */ |
187 | static u_char silence = 0xff; /* silence by LAW */ | ||
188 | 189 | ||
189 | #define DIP_4S 0x1 /* DIP Switches for Beronet 1S/2S/4S cards */ | 190 | #define DIP_4S 0x1 /* DIP Switches for Beronet 1S/2S/4S cards */ |
190 | #define DIP_8S 0x2 /* DIP Switches for Beronet 8S+ cards */ | 191 | #define DIP_8S 0x2 /* DIP Switches for Beronet 8S+ cards */ |
@@ -195,12 +196,13 @@ static u_char silence = 0xff; /* silence by LAW */ | |||
195 | */ | 196 | */ |
196 | 197 | ||
197 | static uint type[MAX_CARDS]; | 198 | static uint type[MAX_CARDS]; |
198 | static uint pcm[MAX_CARDS]; | 199 | static int pcm[MAX_CARDS]; |
199 | static uint dslot[MAX_CARDS]; | 200 | static int dslot[MAX_CARDS]; |
200 | static uint iomode[MAX_CARDS]; | 201 | static uint iomode[MAX_CARDS]; |
201 | static uint port[MAX_PORTS]; | 202 | static uint port[MAX_PORTS]; |
202 | static uint debug; | 203 | static uint debug; |
203 | static uint poll; | 204 | static uint poll; |
205 | static int clock; | ||
204 | static uint timer; | 206 | static uint timer; |
205 | static uint clockdelay_te = CLKDEL_TE; | 207 | static uint clockdelay_te = CLKDEL_TE; |
206 | static uint clockdelay_nt = CLKDEL_NT; | 208 | static uint clockdelay_nt = CLKDEL_NT; |
@@ -209,14 +211,16 @@ static int HFC_cnt, Port_cnt, PCM_cnt = 99; | |||
209 | 211 | ||
210 | MODULE_AUTHOR("Andreas Eversberg"); | 212 | MODULE_AUTHOR("Andreas Eversberg"); |
211 | MODULE_LICENSE("GPL"); | 213 | MODULE_LICENSE("GPL"); |
214 | MODULE_VERSION(HFC_MULTI_VERSION); | ||
212 | module_param(debug, uint, S_IRUGO | S_IWUSR); | 215 | module_param(debug, uint, S_IRUGO | S_IWUSR); |
213 | module_param(poll, uint, S_IRUGO | S_IWUSR); | 216 | module_param(poll, uint, S_IRUGO | S_IWUSR); |
217 | module_param(clock, int, S_IRUGO | S_IWUSR); | ||
214 | module_param(timer, uint, S_IRUGO | S_IWUSR); | 218 | module_param(timer, uint, S_IRUGO | S_IWUSR); |
215 | module_param(clockdelay_te, uint, S_IRUGO | S_IWUSR); | 219 | module_param(clockdelay_te, uint, S_IRUGO | S_IWUSR); |
216 | module_param(clockdelay_nt, uint, S_IRUGO | S_IWUSR); | 220 | module_param(clockdelay_nt, uint, S_IRUGO | S_IWUSR); |
217 | module_param_array(type, uint, NULL, S_IRUGO | S_IWUSR); | 221 | module_param_array(type, uint, NULL, S_IRUGO | S_IWUSR); |
218 | module_param_array(pcm, uint, NULL, S_IRUGO | S_IWUSR); | 222 | module_param_array(pcm, int, NULL, S_IRUGO | S_IWUSR); |
219 | module_param_array(dslot, uint, NULL, S_IRUGO | S_IWUSR); | 223 | module_param_array(dslot, int, NULL, S_IRUGO | S_IWUSR); |
220 | module_param_array(iomode, uint, NULL, S_IRUGO | S_IWUSR); | 224 | module_param_array(iomode, uint, NULL, S_IRUGO | S_IWUSR); |
221 | module_param_array(port, uint, NULL, S_IRUGO | S_IWUSR); | 225 | module_param_array(port, uint, NULL, S_IRUGO | S_IWUSR); |
222 | 226 | ||
@@ -1419,19 +1423,6 @@ controller_fail: | |||
1419 | HFC_outb(hc, R_TI_WD, poll_timer); | 1423 | HFC_outb(hc, R_TI_WD, poll_timer); |
1420 | hc->hw.r_irqmsk_misc |= V_TI_IRQMSK; | 1424 | hc->hw.r_irqmsk_misc |= V_TI_IRQMSK; |
1421 | 1425 | ||
1422 | /* | ||
1423 | * set up 125us interrupt, only if function pointer is available | ||
1424 | * and module parameter timer is set | ||
1425 | */ | ||
1426 | if (timer && hfc_interrupt && register_interrupt) { | ||
1427 | /* only one chip should use this interrupt */ | ||
1428 | timer = 0; | ||
1429 | interrupt_registered = 1; | ||
1430 | hc->hw.r_irqmsk_misc |= V_PROC_IRQMSK; | ||
1431 | /* deactivate other interrupts in ztdummy */ | ||
1432 | register_interrupt(); | ||
1433 | } | ||
1434 | |||
1435 | /* set E1 state machine IRQ */ | 1426 | /* set E1 state machine IRQ */ |
1436 | if (hc->type == 1) | 1427 | if (hc->type == 1) |
1437 | hc->hw.r_irqmsk_misc |= V_STA_IRQMSK; | 1428 | hc->hw.r_irqmsk_misc |= V_STA_IRQMSK; |
@@ -1991,6 +1982,17 @@ next_frame: | |||
1991 | return; /* no data */ | 1982 | return; /* no data */ |
1992 | } | 1983 | } |
1993 | 1984 | ||
1985 | /* "fill fifo if empty" feature */ | ||
1986 | if (bch && test_bit(FLG_FILLEMPTY, &bch->Flags) | ||
1987 | && !test_bit(FLG_HDLC, &bch->Flags) && z2 == z1) { | ||
1988 | if (debug & DEBUG_HFCMULTI_FILL) | ||
1989 | printk(KERN_DEBUG "%s: buffer empty, so we have " | ||
1990 | "underrun\n", __func__); | ||
1991 | /* fill buffer, to prevent future underrun */ | ||
1992 | hc->write_fifo(hc, hc->silence_data, poll >> 1); | ||
1993 | Zspace -= (poll >> 1); | ||
1994 | } | ||
1995 | |||
1994 | /* if audio data and connected slot */ | 1996 | /* if audio data and connected slot */ |
1995 | if (bch && (!test_bit(FLG_HDLC, &bch->Flags)) && (!*txpending) | 1997 | if (bch && (!test_bit(FLG_HDLC, &bch->Flags)) && (!*txpending) |
1996 | && slot_tx >= 0) { | 1998 | && slot_tx >= 0) { |
@@ -2027,7 +2029,6 @@ next_frame: | |||
2027 | __func__, hc->id + 1, ch, Zspace, z1, z2, ii-i, len-i, | 2029 | __func__, hc->id + 1, ch, Zspace, z1, z2, ii-i, len-i, |
2028 | temp ? "HDLC":"TRANS"); | 2030 | temp ? "HDLC":"TRANS"); |
2029 | 2031 | ||
2030 | |||
2031 | /* Have to prep the audio data */ | 2032 | /* Have to prep the audio data */ |
2032 | hc->write_fifo(hc, d, ii - i); | 2033 | hc->write_fifo(hc, d, ii - i); |
2033 | *idxp = ii; | 2034 | *idxp = ii; |
@@ -2066,7 +2067,7 @@ next_frame: | |||
2066 | * no more data at all. this prevents sending an undefined value. | 2067 | * no more data at all. this prevents sending an undefined value. |
2067 | */ | 2068 | */ |
2068 | if (bch && test_bit(FLG_TRANSPARENT, &bch->Flags)) | 2069 | if (bch && test_bit(FLG_TRANSPARENT, &bch->Flags)) |
2069 | HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, silence); | 2070 | HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, hc->silence); |
2070 | } | 2071 | } |
2071 | 2072 | ||
2072 | 2073 | ||
@@ -2583,7 +2584,6 @@ hfcmulti_interrupt(int intno, void *dev_id) | |||
2583 | static int iq1 = 0, iq2 = 0, iq3 = 0, iq4 = 0, | 2584 | static int iq1 = 0, iq2 = 0, iq3 = 0, iq4 = 0, |
2584 | iq5 = 0, iq6 = 0, iqcnt = 0; | 2585 | iq5 = 0, iq6 = 0, iqcnt = 0; |
2585 | #endif | 2586 | #endif |
2586 | static int count; | ||
2587 | struct hfc_multi *hc = dev_id; | 2587 | struct hfc_multi *hc = dev_id; |
2588 | struct dchannel *dch; | 2588 | struct dchannel *dch; |
2589 | u_char r_irq_statech, status, r_irq_misc, r_irq_oview; | 2589 | u_char r_irq_statech, status, r_irq_misc, r_irq_oview; |
@@ -2637,6 +2637,7 @@ hfcmulti_interrupt(int intno, void *dev_id) | |||
2637 | iqcnt = 0; | 2637 | iqcnt = 0; |
2638 | } | 2638 | } |
2639 | #endif | 2639 | #endif |
2640 | |||
2640 | if (!r_irq_statech && | 2641 | if (!r_irq_statech && |
2641 | !(status & (V_DTMF_STA | V_LOST_STA | V_EXT_IRQSTA | | 2642 | !(status & (V_DTMF_STA | V_LOST_STA | V_EXT_IRQSTA | |
2642 | V_MISC_IRQSTA | V_FR_IRQSTA))) { | 2643 | V_MISC_IRQSTA | V_FR_IRQSTA))) { |
@@ -2657,6 +2658,7 @@ hfcmulti_interrupt(int intno, void *dev_id) | |||
2657 | if (status & V_MISC_IRQSTA) { | 2658 | if (status & V_MISC_IRQSTA) { |
2658 | /* misc IRQ */ | 2659 | /* misc IRQ */ |
2659 | r_irq_misc = HFC_inb_nodebug(hc, R_IRQ_MISC); | 2660 | r_irq_misc = HFC_inb_nodebug(hc, R_IRQ_MISC); |
2661 | r_irq_misc &= hc->hw.r_irqmsk_misc; /* ignore disabled irqs */ | ||
2660 | if (r_irq_misc & V_STA_IRQ) { | 2662 | if (r_irq_misc & V_STA_IRQ) { |
2661 | if (hc->type == 1) { | 2663 | if (hc->type == 1) { |
2662 | /* state machine */ | 2664 | /* state machine */ |
@@ -2691,23 +2693,20 @@ hfcmulti_interrupt(int intno, void *dev_id) | |||
2691 | plxsd_checksync(hc, 0); | 2693 | plxsd_checksync(hc, 0); |
2692 | } | 2694 | } |
2693 | } | 2695 | } |
2694 | if (r_irq_misc & V_TI_IRQ) | 2696 | if (r_irq_misc & V_TI_IRQ) { |
2697 | if (hc->iclock_on) | ||
2698 | mISDN_clock_update(hc->iclock, poll, NULL); | ||
2695 | handle_timer_irq(hc); | 2699 | handle_timer_irq(hc); |
2700 | } | ||
2696 | 2701 | ||
2697 | if (r_irq_misc & V_DTMF_IRQ) { | 2702 | if (r_irq_misc & V_DTMF_IRQ) { |
2698 | /* -> DTMF IRQ */ | ||
2699 | hfcmulti_dtmf(hc); | 2703 | hfcmulti_dtmf(hc); |
2700 | } | 2704 | } |
2701 | /* TODO: REPLACE !!!! 125 us Interrupts are not acceptable */ | ||
2702 | if (r_irq_misc & V_IRQ_PROC) { | 2705 | if (r_irq_misc & V_IRQ_PROC) { |
2703 | /* IRQ every 125us */ | 2706 | static int irq_proc_cnt; |
2704 | count++; | 2707 | if (!irq_proc_cnt++) |
2705 | /* generate 1kHz signal */ | 2708 | printk(KERN_WARNING "%s: got V_IRQ_PROC -" |
2706 | if (count == 8) { | 2709 | " this should not happen\n", __func__); |
2707 | if (hfc_interrupt) | ||
2708 | hfc_interrupt(); | ||
2709 | count = 0; | ||
2710 | } | ||
2711 | } | 2710 | } |
2712 | 2711 | ||
2713 | } | 2712 | } |
@@ -2954,7 +2953,7 @@ mode_hfcmulti(struct hfc_multi *hc, int ch, int protocol, int slot_tx, | |||
2954 | HFC_outb(hc, R_INC_RES_FIFO, V_RES_F); | 2953 | HFC_outb(hc, R_INC_RES_FIFO, V_RES_F); |
2955 | HFC_wait(hc); | 2954 | HFC_wait(hc); |
2956 | /* tx silence */ | 2955 | /* tx silence */ |
2957 | HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, silence); | 2956 | HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, hc->silence); |
2958 | HFC_outb(hc, R_SLOT, (((ch / 4) * 8) + | 2957 | HFC_outb(hc, R_SLOT, (((ch / 4) * 8) + |
2959 | ((ch % 4) * 4)) << 1); | 2958 | ((ch % 4) * 4)) << 1); |
2960 | HFC_outb(hc, A_SL_CFG, 0x80 | 0x20 | (ch << 1)); | 2959 | HFC_outb(hc, A_SL_CFG, 0x80 | 0x20 | (ch << 1)); |
@@ -2969,7 +2968,7 @@ mode_hfcmulti(struct hfc_multi *hc, int ch, int protocol, int slot_tx, | |||
2969 | HFC_outb(hc, R_INC_RES_FIFO, V_RES_F); | 2968 | HFC_outb(hc, R_INC_RES_FIFO, V_RES_F); |
2970 | HFC_wait(hc); | 2969 | HFC_wait(hc); |
2971 | /* tx silence */ | 2970 | /* tx silence */ |
2972 | HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, silence); | 2971 | HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, hc->silence); |
2973 | /* enable RX fifo */ | 2972 | /* enable RX fifo */ |
2974 | HFC_outb(hc, R_FIFO, (ch<<1)|1); | 2973 | HFC_outb(hc, R_FIFO, (ch<<1)|1); |
2975 | HFC_wait(hc); | 2974 | HFC_wait(hc); |
@@ -3461,7 +3460,7 @@ channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) | |||
3461 | switch (cq->op) { | 3460 | switch (cq->op) { |
3462 | case MISDN_CTRL_GETOP: | 3461 | case MISDN_CTRL_GETOP: |
3463 | cq->op = MISDN_CTRL_HFC_OP | MISDN_CTRL_HW_FEATURES_OP | 3462 | cq->op = MISDN_CTRL_HFC_OP | MISDN_CTRL_HW_FEATURES_OP |
3464 | | MISDN_CTRL_RX_OFF; | 3463 | | MISDN_CTRL_RX_OFF | MISDN_CTRL_FILL_EMPTY; |
3465 | break; | 3464 | break; |
3466 | case MISDN_CTRL_RX_OFF: /* turn off / on rx stream */ | 3465 | case MISDN_CTRL_RX_OFF: /* turn off / on rx stream */ |
3467 | hc->chan[bch->slot].rx_off = !!cq->p1; | 3466 | hc->chan[bch->slot].rx_off = !!cq->p1; |
@@ -3476,6 +3475,12 @@ channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) | |||
3476 | printk(KERN_DEBUG "%s: RX_OFF request (nr=%d off=%d)\n", | 3475 | printk(KERN_DEBUG "%s: RX_OFF request (nr=%d off=%d)\n", |
3477 | __func__, bch->nr, hc->chan[bch->slot].rx_off); | 3476 | __func__, bch->nr, hc->chan[bch->slot].rx_off); |
3478 | break; | 3477 | break; |
3478 | case MISDN_CTRL_FILL_EMPTY: /* fill fifo, if empty */ | ||
3479 | test_and_set_bit(FLG_FILLEMPTY, &bch->Flags); | ||
3480 | if (debug & DEBUG_HFCMULTI_MSG) | ||
3481 | printk(KERN_DEBUG "%s: FILL_EMPTY request (nr=%d " | ||
3482 | "off=%d)\n", __func__, bch->nr, !!cq->p1); | ||
3483 | break; | ||
3479 | case MISDN_CTRL_HW_FEATURES: /* fill features structure */ | 3484 | case MISDN_CTRL_HW_FEATURES: /* fill features structure */ |
3480 | if (debug & DEBUG_HFCMULTI_MSG) | 3485 | if (debug & DEBUG_HFCMULTI_MSG) |
3481 | printk(KERN_DEBUG "%s: HW_FEATURE request\n", | 3486 | printk(KERN_DEBUG "%s: HW_FEATURE request\n", |
@@ -3992,6 +3997,7 @@ open_bchannel(struct hfc_multi *hc, struct dchannel *dch, | |||
3992 | } | 3997 | } |
3993 | if (test_and_set_bit(FLG_OPEN, &bch->Flags)) | 3998 | if (test_and_set_bit(FLG_OPEN, &bch->Flags)) |
3994 | return -EBUSY; /* b-channel can be only open once */ | 3999 | return -EBUSY; /* b-channel can be only open once */ |
4000 | test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags); | ||
3995 | bch->ch.protocol = rq->protocol; | 4001 | bch->ch.protocol = rq->protocol; |
3996 | hc->chan[ch].rx_off = 0; | 4002 | hc->chan[ch].rx_off = 0; |
3997 | rq->ch = &bch->ch; | 4003 | rq->ch = &bch->ch; |
@@ -4081,6 +4087,15 @@ hfcm_dctrl(struct mISDNchannel *ch, u_int cmd, void *arg) | |||
4081 | return err; | 4087 | return err; |
4082 | } | 4088 | } |
4083 | 4089 | ||
4090 | static int | ||
4091 | clockctl(void *priv, int enable) | ||
4092 | { | ||
4093 | struct hfc_multi *hc = priv; | ||
4094 | |||
4095 | hc->iclock_on = enable; | ||
4096 | return 0; | ||
4097 | } | ||
4098 | |||
4084 | /* | 4099 | /* |
4085 | * initialize the card | 4100 | * initialize the card |
4086 | */ | 4101 | */ |
@@ -4495,10 +4510,14 @@ release_card(struct hfc_multi *hc) | |||
4495 | printk(KERN_WARNING "%s: release card (%d) entered\n", | 4510 | printk(KERN_WARNING "%s: release card (%d) entered\n", |
4496 | __func__, hc->id); | 4511 | __func__, hc->id); |
4497 | 4512 | ||
4513 | /* unregister clock source */ | ||
4514 | if (hc->iclock) | ||
4515 | mISDN_unregister_clock(hc->iclock); | ||
4516 | |||
4517 | /* disable irq */ | ||
4498 | spin_lock_irqsave(&hc->lock, flags); | 4518 | spin_lock_irqsave(&hc->lock, flags); |
4499 | disable_hwirq(hc); | 4519 | disable_hwirq(hc); |
4500 | spin_unlock_irqrestore(&hc->lock, flags); | 4520 | spin_unlock_irqrestore(&hc->lock, flags); |
4501 | |||
4502 | udelay(1000); | 4521 | udelay(1000); |
4503 | 4522 | ||
4504 | /* dimm leds */ | 4523 | /* dimm leds */ |
@@ -4699,7 +4718,7 @@ init_e1_port(struct hfc_multi *hc, struct hm_map *m) | |||
4699 | } else | 4718 | } else |
4700 | hc->chan[hc->dslot].jitter = 2; /* default */ | 4719 | hc->chan[hc->dslot].jitter = 2; /* default */ |
4701 | snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-e1.%d", HFC_cnt + 1); | 4720 | snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-e1.%d", HFC_cnt + 1); |
4702 | ret = mISDN_register_device(&dch->dev, name); | 4721 | ret = mISDN_register_device(&dch->dev, &hc->pci_dev->dev, name); |
4703 | if (ret) | 4722 | if (ret) |
4704 | goto free_chan; | 4723 | goto free_chan; |
4705 | hc->created[0] = 1; | 4724 | hc->created[0] = 1; |
@@ -4807,9 +4826,9 @@ init_multi_port(struct hfc_multi *hc, int pt) | |||
4807 | test_and_set_bit(HFC_CFG_DIS_ECHANNEL, | 4826 | test_and_set_bit(HFC_CFG_DIS_ECHANNEL, |
4808 | &hc->chan[i + 2].cfg); | 4827 | &hc->chan[i + 2].cfg); |
4809 | } | 4828 | } |
4810 | snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-%ds.%d/%d", | 4829 | snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-%ds.%d-%d", |
4811 | hc->type, HFC_cnt + 1, pt + 1); | 4830 | hc->type, HFC_cnt + 1, pt + 1); |
4812 | ret = mISDN_register_device(&dch->dev, name); | 4831 | ret = mISDN_register_device(&dch->dev, &hc->pci_dev->dev, name); |
4813 | if (ret) | 4832 | if (ret) |
4814 | goto free_chan; | 4833 | goto free_chan; |
4815 | hc->created[pt] = 1; | 4834 | hc->created[pt] = 1; |
@@ -4828,6 +4847,7 @@ hfcmulti_init(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
4828 | struct hfc_multi *hc; | 4847 | struct hfc_multi *hc; |
4829 | u_long flags; | 4848 | u_long flags; |
4830 | u_char dips = 0, pmj = 0; /* dip settings, port mode Jumpers */ | 4849 | u_char dips = 0, pmj = 0; /* dip settings, port mode Jumpers */ |
4850 | int i; | ||
4831 | 4851 | ||
4832 | if (HFC_cnt >= MAX_CARDS) { | 4852 | if (HFC_cnt >= MAX_CARDS) { |
4833 | printk(KERN_ERR "too many cards (max=%d).\n", | 4853 | printk(KERN_ERR "too many cards (max=%d).\n", |
@@ -4861,11 +4881,11 @@ hfcmulti_init(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
4861 | hc->id = HFC_cnt; | 4881 | hc->id = HFC_cnt; |
4862 | hc->pcm = pcm[HFC_cnt]; | 4882 | hc->pcm = pcm[HFC_cnt]; |
4863 | hc->io_mode = iomode[HFC_cnt]; | 4883 | hc->io_mode = iomode[HFC_cnt]; |
4864 | if (dslot[HFC_cnt] < 0) { | 4884 | if (dslot[HFC_cnt] < 0 && hc->type == 1) { |
4865 | hc->dslot = 0; | 4885 | hc->dslot = 0; |
4866 | printk(KERN_INFO "HFC-E1 card has disabled D-channel, but " | 4886 | printk(KERN_INFO "HFC-E1 card has disabled D-channel, but " |
4867 | "31 B-channels\n"); | 4887 | "31 B-channels\n"); |
4868 | } if (dslot[HFC_cnt] > 0 && dslot[HFC_cnt] < 32) { | 4888 | } if (dslot[HFC_cnt] > 0 && dslot[HFC_cnt] < 32 && hc->type == 1) { |
4869 | hc->dslot = dslot[HFC_cnt]; | 4889 | hc->dslot = dslot[HFC_cnt]; |
4870 | printk(KERN_INFO "HFC-E1 card has alternating D-channel on " | 4890 | printk(KERN_INFO "HFC-E1 card has alternating D-channel on " |
4871 | "time slot %d\n", dslot[HFC_cnt]); | 4891 | "time slot %d\n", dslot[HFC_cnt]); |
@@ -4876,9 +4896,17 @@ hfcmulti_init(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
4876 | hc->masterclk = -1; | 4896 | hc->masterclk = -1; |
4877 | if (type[HFC_cnt] & 0x100) { | 4897 | if (type[HFC_cnt] & 0x100) { |
4878 | test_and_set_bit(HFC_CHIP_ULAW, &hc->chip); | 4898 | test_and_set_bit(HFC_CHIP_ULAW, &hc->chip); |
4879 | silence = 0xff; /* ulaw silence */ | 4899 | hc->silence = 0xff; /* ulaw silence */ |
4880 | } else | 4900 | } else |
4881 | silence = 0x2a; /* alaw silence */ | 4901 | hc->silence = 0x2a; /* alaw silence */ |
4902 | if ((poll >> 1) > sizeof(hc->silence_data)) { | ||
4903 | printk(KERN_ERR "HFCMULTI error: silence_data too small, " | ||
4904 | "please fix\n"); | ||
4905 | return -EINVAL; | ||
4906 | } | ||
4907 | for (i = 0; i < (poll >> 1); i++) | ||
4908 | hc->silence_data[i] = hc->silence; | ||
4909 | |||
4882 | if (!(type[HFC_cnt] & 0x200)) | 4910 | if (!(type[HFC_cnt] & 0x200)) |
4883 | test_and_set_bit(HFC_CHIP_DTMF, &hc->chip); | 4911 | test_and_set_bit(HFC_CHIP_DTMF, &hc->chip); |
4884 | 4912 | ||
@@ -4945,9 +4973,7 @@ hfcmulti_init(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
4945 | switch (m->dip_type) { | 4973 | switch (m->dip_type) { |
4946 | case DIP_4S: | 4974 | case DIP_4S: |
4947 | /* | 4975 | /* |
4948 | * get DIP Setting for beroNet 1S/2S/4S cards | 4976 | * Get DIP setting for beroNet 1S/2S/4S cards |
4949 | * check if Port Jumper config matches | ||
4950 | * module param 'protocol' | ||
4951 | * DIP Setting: (collect GPIO 13/14/15 (R_GPIO_IN1) + | 4977 | * DIP Setting: (collect GPIO 13/14/15 (R_GPIO_IN1) + |
4952 | * GPI 19/23 (R_GPI_IN2)) | 4978 | * GPI 19/23 (R_GPI_IN2)) |
4953 | */ | 4979 | */ |
@@ -4966,9 +4992,8 @@ hfcmulti_init(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
4966 | break; | 4992 | break; |
4967 | case DIP_8S: | 4993 | case DIP_8S: |
4968 | /* | 4994 | /* |
4969 | * get DIP Setting for beroNet 8S0+ cards | 4995 | * Get DIP Setting for beroNet 8S0+ cards |
4970 | * | 4996 | * Enable PCI auxbridge function |
4971 | * enable PCI auxbridge function | ||
4972 | */ | 4997 | */ |
4973 | HFC_outb(hc, R_BRG_PCM_CFG, 1 | V_PCM_CLK); | 4998 | HFC_outb(hc, R_BRG_PCM_CFG, 1 | V_PCM_CLK); |
4974 | /* prepare access to auxport */ | 4999 | /* prepare access to auxport */ |
@@ -5003,6 +5028,10 @@ hfcmulti_init(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
5003 | list_add_tail(&hc->list, &HFClist); | 5028 | list_add_tail(&hc->list, &HFClist); |
5004 | spin_unlock_irqrestore(&HFClock, flags); | 5029 | spin_unlock_irqrestore(&HFClock, flags); |
5005 | 5030 | ||
5031 | /* use as clock source */ | ||
5032 | if (clock == HFC_cnt + 1) | ||
5033 | hc->iclock = mISDN_register_clock("HFCMulti", 0, clockctl, hc); | ||
5034 | |||
5006 | /* initialize hardware */ | 5035 | /* initialize hardware */ |
5007 | ret_err = init_card(hc); | 5036 | ret_err = init_card(hc); |
5008 | if (ret_err) { | 5037 | if (ret_err) { |
@@ -5137,8 +5166,7 @@ static struct pci_device_id hfmultipci_ids[] __devinitdata = { | |||
5137 | { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD, | 5166 | { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD, |
5138 | PCI_DEVICE_ID_CCD_HFC8S, 0, 0, H(14)}, /* old Eval */ | 5167 | PCI_DEVICE_ID_CCD_HFC8S, 0, 0, H(14)}, /* old Eval */ |
5139 | { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD, | 5168 | { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD, |
5140 | PCI_SUBDEVICE_ID_CCD_IOB8STR, 0, 0, H(15)}, | 5169 | PCI_SUBDEVICE_ID_CCD_IOB8STR, 0, 0, H(15)}, /* IOB8ST Recording */ |
5141 | /* IOB8ST Recording */ | ||
5142 | { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD, | 5170 | { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD, |
5143 | PCI_SUBDEVICE_ID_CCD_IOB8ST, 0, 0, H(16)}, /* IOB8ST */ | 5171 | PCI_SUBDEVICE_ID_CCD_IOB8ST, 0, 0, H(16)}, /* IOB8ST */ |
5144 | { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD, | 5172 | { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD, |
@@ -5188,18 +5216,16 @@ hfcmulti_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
5188 | struct hm_map *m = (struct hm_map *)ent->driver_data; | 5216 | struct hm_map *m = (struct hm_map *)ent->driver_data; |
5189 | int ret; | 5217 | int ret; |
5190 | 5218 | ||
5191 | if (m == NULL) { | 5219 | if (m == NULL && ent->vendor == PCI_VENDOR_ID_CCD && ( |
5192 | if (ent->vendor == PCI_VENDOR_ID_CCD) | 5220 | ent->device == PCI_DEVICE_ID_CCD_HFC4S || |
5193 | if (ent->device == PCI_DEVICE_ID_CCD_HFC4S || | 5221 | ent->device == PCI_DEVICE_ID_CCD_HFC8S || |
5194 | ent->device == PCI_DEVICE_ID_CCD_HFC8S || | 5222 | ent->device == PCI_DEVICE_ID_CCD_HFCE1)) { |
5195 | ent->device == PCI_DEVICE_ID_CCD_HFCE1) | 5223 | printk(KERN_ERR |
5196 | printk(KERN_ERR | 5224 | "Unknown HFC multiport controller (vendor:%x device:%x " |
5197 | "unknown HFC multiport controller " | 5225 | "subvendor:%x subdevice:%x)\n", ent->vendor, ent->device, |
5198 | "(vendor:%x device:%x subvendor:%x " | 5226 | ent->subvendor, ent->subdevice); |
5199 | "subdevice:%x) Please contact the " | 5227 | printk(KERN_ERR |
5200 | "driver maintainer for support.\n", | 5228 | "Please contact the driver maintainer for support.\n"); |
5201 | ent->vendor, ent->device, | ||
5202 | ent->subvendor, ent->subdevice); | ||
5203 | return -ENODEV; | 5229 | return -ENODEV; |
5204 | } | 5230 | } |
5205 | ret = hfcmulti_init(pdev, ent); | 5231 | ret = hfcmulti_init(pdev, ent); |
@@ -5222,22 +5248,9 @@ HFCmulti_cleanup(void) | |||
5222 | { | 5248 | { |
5223 | struct hfc_multi *card, *next; | 5249 | struct hfc_multi *card, *next; |
5224 | 5250 | ||
5225 | /* unload interrupt function symbol */ | 5251 | /* get rid of all devices of this driver */ |
5226 | if (hfc_interrupt) | ||
5227 | symbol_put(ztdummy_extern_interrupt); | ||
5228 | if (register_interrupt) | ||
5229 | symbol_put(ztdummy_register_interrupt); | ||
5230 | if (unregister_interrupt) { | ||
5231 | if (interrupt_registered) { | ||
5232 | interrupt_registered = 0; | ||
5233 | unregister_interrupt(); | ||
5234 | } | ||
5235 | symbol_put(ztdummy_unregister_interrupt); | ||
5236 | } | ||
5237 | |||
5238 | list_for_each_entry_safe(card, next, &HFClist, list) | 5252 | list_for_each_entry_safe(card, next, &HFClist, list) |
5239 | release_card(card); | 5253 | release_card(card); |
5240 | /* get rid of all devices of this driver */ | ||
5241 | pci_unregister_driver(&hfcmultipci_driver); | 5254 | pci_unregister_driver(&hfcmultipci_driver); |
5242 | } | 5255 | } |
5243 | 5256 | ||
@@ -5246,8 +5259,10 @@ HFCmulti_init(void) | |||
5246 | { | 5259 | { |
5247 | int err; | 5260 | int err; |
5248 | 5261 | ||
5262 | printk(KERN_INFO "mISDN: HFC-multi driver %s\n", HFC_MULTI_VERSION); | ||
5263 | |||
5249 | #ifdef IRQ_DEBUG | 5264 | #ifdef IRQ_DEBUG |
5250 | printk(KERN_ERR "%s: IRQ_DEBUG IS ENABLED!\n", __func__); | 5265 | printk(KERN_DEBUG "%s: IRQ_DEBUG IS ENABLED!\n", __func__); |
5251 | #endif | 5266 | #endif |
5252 | 5267 | ||
5253 | spin_lock_init(&HFClock); | 5268 | spin_lock_init(&HFClock); |
@@ -5256,22 +5271,11 @@ HFCmulti_init(void) | |||
5256 | if (debug & DEBUG_HFCMULTI_INIT) | 5271 | if (debug & DEBUG_HFCMULTI_INIT) |
5257 | printk(KERN_DEBUG "%s: init entered\n", __func__); | 5272 | printk(KERN_DEBUG "%s: init entered\n", __func__); |
5258 | 5273 | ||
5259 | hfc_interrupt = symbol_get(ztdummy_extern_interrupt); | ||
5260 | register_interrupt = symbol_get(ztdummy_register_interrupt); | ||
5261 | unregister_interrupt = symbol_get(ztdummy_unregister_interrupt); | ||
5262 | printk(KERN_INFO "mISDN: HFC-multi driver %s\n", | ||
5263 | hfcmulti_revision); | ||
5264 | |||
5265 | switch (poll) { | 5274 | switch (poll) { |
5266 | case 0: | 5275 | case 0: |
5267 | poll_timer = 6; | 5276 | poll_timer = 6; |
5268 | poll = 128; | 5277 | poll = 128; |
5269 | break; | 5278 | break; |
5270 | /* | ||
5271 | * wenn dieses break nochmal verschwindet, | ||
5272 | * gibt es heisse ohren :-) | ||
5273 | * "without the break you will get hot ears ???" | ||
5274 | */ | ||
5275 | case 8: | 5279 | case 8: |
5276 | poll_timer = 2; | 5280 | poll_timer = 2; |
5277 | break; | 5281 | break; |
@@ -5298,20 +5302,12 @@ HFCmulti_init(void) | |||
5298 | 5302 | ||
5299 | } | 5303 | } |
5300 | 5304 | ||
5305 | if (!clock) | ||
5306 | clock = 1; | ||
5307 | |||
5301 | err = pci_register_driver(&hfcmultipci_driver); | 5308 | err = pci_register_driver(&hfcmultipci_driver); |
5302 | if (err < 0) { | 5309 | if (err < 0) { |
5303 | printk(KERN_ERR "error registering pci driver: %x\n", err); | 5310 | printk(KERN_ERR "error registering pci driver: %x\n", err); |
5304 | if (hfc_interrupt) | ||
5305 | symbol_put(ztdummy_extern_interrupt); | ||
5306 | if (register_interrupt) | ||
5307 | symbol_put(ztdummy_register_interrupt); | ||
5308 | if (unregister_interrupt) { | ||
5309 | if (interrupt_registered) { | ||
5310 | interrupt_registered = 0; | ||
5311 | unregister_interrupt(); | ||
5312 | } | ||
5313 | symbol_put(ztdummy_unregister_interrupt); | ||
5314 | } | ||
5315 | return err; | 5311 | return err; |
5316 | } | 5312 | } |
5317 | return 0; | 5313 | return 0; |
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c index cd8302af40eb..917bf41a293b 100644 --- a/drivers/isdn/hardware/mISDN/hfcpci.c +++ b/drivers/isdn/hardware/mISDN/hfcpci.c | |||
@@ -23,6 +23,25 @@ | |||
23 | * along with this program; if not, write to the Free Software | 23 | * along with this program; if not, write to the Free Software |
24 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 24 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
25 | * | 25 | * |
26 | * Module options: | ||
27 | * | ||
28 | * debug: | ||
29 | * NOTE: only one poll value must be given for all cards | ||
30 | * See hfc_pci.h for debug flags. | ||
31 | * | ||
32 | * poll: | ||
33 | * NOTE: only one poll value must be given for all cards | ||
34 | * Give the number of samples for each fifo process. | ||
35 | * By default 128 is used. Decrease to reduce delay, increase to | ||
36 | * reduce cpu load. If unsure, don't mess with it! | ||
37 | * A value of 128 will use controller's interrupt. Other values will | ||
38 | * use kernel timer, because the controller will not allow lower values | ||
39 | * than 128. | ||
40 | * Also note that the value depends on the kernel timer frequency. | ||
41 | * If kernel uses a frequency of 1000 Hz, steps of 8 samples are possible. | ||
42 | * If the kernel uses 100 Hz, steps of 80 samples are possible. | ||
43 | * If the kernel uses 300 Hz, steps of about 26 samples are possible. | ||
44 | * | ||
26 | */ | 45 | */ |
27 | 46 | ||
28 | #include <linux/module.h> | 47 | #include <linux/module.h> |
@@ -34,16 +53,16 @@ | |||
34 | 53 | ||
35 | static const char *hfcpci_revision = "2.0"; | 54 | static const char *hfcpci_revision = "2.0"; |
36 | 55 | ||
37 | #define MAX_CARDS 8 | ||
38 | static int HFC_cnt; | 56 | static int HFC_cnt; |
39 | static uint debug; | 57 | static uint debug; |
58 | static uint poll, tics; | ||
59 | struct timer_list hfc_tl; | ||
60 | u32 hfc_jiffies; | ||
40 | 61 | ||
41 | MODULE_AUTHOR("Karsten Keil"); | 62 | MODULE_AUTHOR("Karsten Keil"); |
42 | MODULE_LICENSE("GPL"); | 63 | MODULE_LICENSE("GPL"); |
43 | module_param(debug, uint, 0); | 64 | module_param(debug, uint, 0); |
44 | 65 | module_param(poll, uint, S_IRUGO | S_IWUSR); | |
45 | static LIST_HEAD(HFClist); | ||
46 | static DEFINE_RWLOCK(HFClock); | ||
47 | 66 | ||
48 | enum { | 67 | enum { |
49 | HFC_CCD_2BD0, | 68 | HFC_CCD_2BD0, |
@@ -114,7 +133,6 @@ struct hfcPCI_hw { | |||
114 | 133 | ||
115 | 134 | ||
116 | struct hfc_pci { | 135 | struct hfc_pci { |
117 | struct list_head list; | ||
118 | u_char subtype; | 136 | u_char subtype; |
119 | u_char chanlimit; | 137 | u_char chanlimit; |
120 | u_char initdone; | 138 | u_char initdone; |
@@ -520,9 +538,9 @@ receive_dmsg(struct hfc_pci *hc) | |||
520 | } | 538 | } |
521 | 539 | ||
522 | /* | 540 | /* |
523 | * check for transparent receive data and read max one threshold size if avail | 541 | * check for transparent receive data and read max one 'poll' size if avail |
524 | */ | 542 | */ |
525 | static int | 543 | static void |
526 | hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *bz, u_char *bdata) | 544 | hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *bz, u_char *bdata) |
527 | { | 545 | { |
528 | __le16 *z1r, *z2r; | 546 | __le16 *z1r, *z2r; |
@@ -534,17 +552,19 @@ hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *bz, u_char *bdata) | |||
534 | 552 | ||
535 | fcnt = le16_to_cpu(*z1r) - le16_to_cpu(*z2r); | 553 | fcnt = le16_to_cpu(*z1r) - le16_to_cpu(*z2r); |
536 | if (!fcnt) | 554 | if (!fcnt) |
537 | return 0; /* no data avail */ | 555 | return; /* no data avail */ |
538 | 556 | ||
539 | if (fcnt <= 0) | 557 | if (fcnt <= 0) |
540 | fcnt += B_FIFO_SIZE; /* bytes actually buffered */ | 558 | fcnt += B_FIFO_SIZE; /* bytes actually buffered */ |
541 | if (fcnt > HFCPCI_BTRANS_THRESHOLD) | ||
542 | fcnt = HFCPCI_BTRANS_THRESHOLD; /* limit size */ | ||
543 | |||
544 | new_z2 = le16_to_cpu(*z2r) + fcnt; /* new position in fifo */ | 559 | new_z2 = le16_to_cpu(*z2r) + fcnt; /* new position in fifo */ |
545 | if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL)) | 560 | if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL)) |
546 | new_z2 -= B_FIFO_SIZE; /* buffer wrap */ | 561 | new_z2 -= B_FIFO_SIZE; /* buffer wrap */ |
547 | 562 | ||
563 | if (fcnt > MAX_DATA_SIZE) { /* flush, if oversized */ | ||
564 | *z2r = cpu_to_le16(new_z2); /* new position */ | ||
565 | return; | ||
566 | } | ||
567 | |||
548 | bch->rx_skb = mI_alloc_skb(fcnt, GFP_ATOMIC); | 568 | bch->rx_skb = mI_alloc_skb(fcnt, GFP_ATOMIC); |
549 | if (bch->rx_skb) { | 569 | if (bch->rx_skb) { |
550 | ptr = skb_put(bch->rx_skb, fcnt); | 570 | ptr = skb_put(bch->rx_skb, fcnt); |
@@ -569,7 +589,6 @@ hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *bz, u_char *bdata) | |||
569 | printk(KERN_WARNING "HFCPCI: receive out of memory\n"); | 589 | printk(KERN_WARNING "HFCPCI: receive out of memory\n"); |
570 | 590 | ||
571 | *z2r = cpu_to_le16(new_z2); /* new position */ | 591 | *z2r = cpu_to_le16(new_z2); /* new position */ |
572 | return 1; | ||
573 | } | 592 | } |
574 | 593 | ||
575 | /* | 594 | /* |
@@ -580,12 +599,11 @@ main_rec_hfcpci(struct bchannel *bch) | |||
580 | { | 599 | { |
581 | struct hfc_pci *hc = bch->hw; | 600 | struct hfc_pci *hc = bch->hw; |
582 | int rcnt, real_fifo; | 601 | int rcnt, real_fifo; |
583 | int receive, count = 5; | 602 | int receive = 0, count = 5; |
584 | struct bzfifo *bz; | 603 | struct bzfifo *bz; |
585 | u_char *bdata; | 604 | u_char *bdata; |
586 | struct zt *zp; | 605 | struct zt *zp; |
587 | 606 | ||
588 | |||
589 | if ((bch->nr & 2) && (!hc->hw.bswapped)) { | 607 | if ((bch->nr & 2) && (!hc->hw.bswapped)) { |
590 | bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b2; | 608 | bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b2; |
591 | bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.rxdat_b2; | 609 | bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.rxdat_b2; |
@@ -625,9 +643,10 @@ Begin: | |||
625 | receive = 1; | 643 | receive = 1; |
626 | else | 644 | else |
627 | receive = 0; | 645 | receive = 0; |
628 | } else if (test_bit(FLG_TRANSPARENT, &bch->Flags)) | 646 | } else if (test_bit(FLG_TRANSPARENT, &bch->Flags)) { |
629 | receive = hfcpci_empty_fifo_trans(bch, bz, bdata); | 647 | hfcpci_empty_fifo_trans(bch, bz, bdata); |
630 | else | 648 | return; |
649 | } else | ||
631 | receive = 0; | 650 | receive = 0; |
632 | if (count && receive) | 651 | if (count && receive) |
633 | goto Begin; | 652 | goto Begin; |
@@ -751,11 +770,41 @@ hfcpci_fill_fifo(struct bchannel *bch) | |||
751 | /* fcnt contains available bytes in fifo */ | 770 | /* fcnt contains available bytes in fifo */ |
752 | fcnt = B_FIFO_SIZE - fcnt; | 771 | fcnt = B_FIFO_SIZE - fcnt; |
753 | /* remaining bytes to send (bytes in fifo) */ | 772 | /* remaining bytes to send (bytes in fifo) */ |
773 | |||
774 | /* "fill fifo if empty" feature */ | ||
775 | if (test_bit(FLG_FILLEMPTY, &bch->Flags) && !fcnt) { | ||
776 | /* printk(KERN_DEBUG "%s: buffer empty, so we have " | ||
777 | "underrun\n", __func__); */ | ||
778 | /* fill buffer, to prevent future underrun */ | ||
779 | count = HFCPCI_FILLEMPTY; | ||
780 | new_z1 = le16_to_cpu(*z1t) + count; | ||
781 | /* new buffer Position */ | ||
782 | if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL)) | ||
783 | new_z1 -= B_FIFO_SIZE; /* buffer wrap */ | ||
784 | dst = bdata + (le16_to_cpu(*z1t) - B_SUB_VAL); | ||
785 | maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(*z1t); | ||
786 | /* end of fifo */ | ||
787 | if (bch->debug & DEBUG_HW_BFIFO) | ||
788 | printk(KERN_DEBUG "hfcpci_FFt fillempty " | ||
789 | "fcnt(%d) maxl(%d) nz1(%x) dst(%p)\n", | ||
790 | fcnt, maxlen, new_z1, dst); | ||
791 | fcnt += count; | ||
792 | if (maxlen > count) | ||
793 | maxlen = count; /* limit size */ | ||
794 | memset(dst, 0x2a, maxlen); /* first copy */ | ||
795 | count -= maxlen; /* remaining bytes */ | ||
796 | if (count) { | ||
797 | dst = bdata; /* start of buffer */ | ||
798 | memset(dst, 0x2a, count); | ||
799 | } | ||
800 | *z1t = cpu_to_le16(new_z1); /* now send data */ | ||
801 | } | ||
802 | |||
754 | next_t_frame: | 803 | next_t_frame: |
755 | count = bch->tx_skb->len - bch->tx_idx; | 804 | count = bch->tx_skb->len - bch->tx_idx; |
756 | /* maximum fill shall be HFCPCI_BTRANS_MAX */ | 805 | /* maximum fill shall be poll*2 */ |
757 | if (count > HFCPCI_BTRANS_MAX - fcnt) | 806 | if (count > (poll << 1) - fcnt) |
758 | count = HFCPCI_BTRANS_MAX - fcnt; | 807 | count = (poll << 1) - fcnt; |
759 | if (count <= 0) | 808 | if (count <= 0) |
760 | return; | 809 | return; |
761 | /* data is suitable for fifo */ | 810 | /* data is suitable for fifo */ |
@@ -1135,37 +1184,37 @@ hfcpci_int(int intno, void *dev_id) | |||
1135 | val &= ~0x80; | 1184 | val &= ~0x80; |
1136 | Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt | HFCPCI_CLTIMER); | 1185 | Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt | HFCPCI_CLTIMER); |
1137 | } | 1186 | } |
1138 | if (val & 0x08) { | 1187 | if (val & 0x08) { /* B1 rx */ |
1139 | bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1); | 1188 | bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1); |
1140 | if (bch) | 1189 | if (bch) |
1141 | main_rec_hfcpci(bch); | 1190 | main_rec_hfcpci(bch); |
1142 | else if (hc->dch.debug) | 1191 | else if (hc->dch.debug) |
1143 | printk(KERN_DEBUG "hfcpci spurious 0x08 IRQ\n"); | 1192 | printk(KERN_DEBUG "hfcpci spurious 0x08 IRQ\n"); |
1144 | } | 1193 | } |
1145 | if (val & 0x10) { | 1194 | if (val & 0x10) { /* B2 rx */ |
1146 | bch = Sel_BCS(hc, 2); | 1195 | bch = Sel_BCS(hc, 2); |
1147 | if (bch) | 1196 | if (bch) |
1148 | main_rec_hfcpci(bch); | 1197 | main_rec_hfcpci(bch); |
1149 | else if (hc->dch.debug) | 1198 | else if (hc->dch.debug) |
1150 | printk(KERN_DEBUG "hfcpci spurious 0x10 IRQ\n"); | 1199 | printk(KERN_DEBUG "hfcpci spurious 0x10 IRQ\n"); |
1151 | } | 1200 | } |
1152 | if (val & 0x01) { | 1201 | if (val & 0x01) { /* B1 tx */ |
1153 | bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1); | 1202 | bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1); |
1154 | if (bch) | 1203 | if (bch) |
1155 | tx_birq(bch); | 1204 | tx_birq(bch); |
1156 | else if (hc->dch.debug) | 1205 | else if (hc->dch.debug) |
1157 | printk(KERN_DEBUG "hfcpci spurious 0x01 IRQ\n"); | 1206 | printk(KERN_DEBUG "hfcpci spurious 0x01 IRQ\n"); |
1158 | } | 1207 | } |
1159 | if (val & 0x02) { | 1208 | if (val & 0x02) { /* B2 tx */ |
1160 | bch = Sel_BCS(hc, 2); | 1209 | bch = Sel_BCS(hc, 2); |
1161 | if (bch) | 1210 | if (bch) |
1162 | tx_birq(bch); | 1211 | tx_birq(bch); |
1163 | else if (hc->dch.debug) | 1212 | else if (hc->dch.debug) |
1164 | printk(KERN_DEBUG "hfcpci spurious 0x02 IRQ\n"); | 1213 | printk(KERN_DEBUG "hfcpci spurious 0x02 IRQ\n"); |
1165 | } | 1214 | } |
1166 | if (val & 0x20) | 1215 | if (val & 0x20) /* D rx */ |
1167 | receive_dmsg(hc); | 1216 | receive_dmsg(hc); |
1168 | if (val & 0x04) { /* dframe transmitted */ | 1217 | if (val & 0x04) { /* D tx */ |
1169 | if (test_and_clear_bit(FLG_BUSY_TIMER, &hc->dch.Flags)) | 1218 | if (test_and_clear_bit(FLG_BUSY_TIMER, &hc->dch.Flags)) |
1170 | del_timer(&hc->dch.timer); | 1219 | del_timer(&hc->dch.timer); |
1171 | tx_dirq(&hc->dch); | 1220 | tx_dirq(&hc->dch); |
@@ -1283,14 +1332,16 @@ mode_hfcpci(struct bchannel *bch, int bc, int protocol) | |||
1283 | } | 1332 | } |
1284 | if (fifo2 & 2) { | 1333 | if (fifo2 & 2) { |
1285 | hc->hw.fifo_en |= HFCPCI_FIFOEN_B2; | 1334 | hc->hw.fifo_en |= HFCPCI_FIFOEN_B2; |
1286 | hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS + | 1335 | if (!tics) |
1287 | HFCPCI_INTS_B2REC); | 1336 | hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS + |
1337 | HFCPCI_INTS_B2REC); | ||
1288 | hc->hw.ctmt |= 2; | 1338 | hc->hw.ctmt |= 2; |
1289 | hc->hw.conn &= ~0x18; | 1339 | hc->hw.conn &= ~0x18; |
1290 | } else { | 1340 | } else { |
1291 | hc->hw.fifo_en |= HFCPCI_FIFOEN_B1; | 1341 | hc->hw.fifo_en |= HFCPCI_FIFOEN_B1; |
1292 | hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS + | 1342 | if (!tics) |
1293 | HFCPCI_INTS_B1REC); | 1343 | hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS + |
1344 | HFCPCI_INTS_B1REC); | ||
1294 | hc->hw.ctmt |= 1; | 1345 | hc->hw.ctmt |= 1; |
1295 | hc->hw.conn &= ~0x03; | 1346 | hc->hw.conn &= ~0x03; |
1296 | } | 1347 | } |
@@ -1398,7 +1449,8 @@ set_hfcpci_rxtest(struct bchannel *bch, int protocol, int chan) | |||
1398 | if (chan & 2) { | 1449 | if (chan & 2) { |
1399 | hc->hw.sctrl_r |= SCTRL_B2_ENA; | 1450 | hc->hw.sctrl_r |= SCTRL_B2_ENA; |
1400 | hc->hw.fifo_en |= HFCPCI_FIFOEN_B2RX; | 1451 | hc->hw.fifo_en |= HFCPCI_FIFOEN_B2RX; |
1401 | hc->hw.int_m1 |= HFCPCI_INTS_B2REC; | 1452 | if (!tics) |
1453 | hc->hw.int_m1 |= HFCPCI_INTS_B2REC; | ||
1402 | hc->hw.ctmt |= 2; | 1454 | hc->hw.ctmt |= 2; |
1403 | hc->hw.conn &= ~0x18; | 1455 | hc->hw.conn &= ~0x18; |
1404 | #ifdef REVERSE_BITORDER | 1456 | #ifdef REVERSE_BITORDER |
@@ -1407,7 +1459,8 @@ set_hfcpci_rxtest(struct bchannel *bch, int protocol, int chan) | |||
1407 | } else { | 1459 | } else { |
1408 | hc->hw.sctrl_r |= SCTRL_B1_ENA; | 1460 | hc->hw.sctrl_r |= SCTRL_B1_ENA; |
1409 | hc->hw.fifo_en |= HFCPCI_FIFOEN_B1RX; | 1461 | hc->hw.fifo_en |= HFCPCI_FIFOEN_B1RX; |
1410 | hc->hw.int_m1 |= HFCPCI_INTS_B1REC; | 1462 | if (!tics) |
1463 | hc->hw.int_m1 |= HFCPCI_INTS_B1REC; | ||
1411 | hc->hw.ctmt |= 1; | 1464 | hc->hw.ctmt |= 1; |
1412 | hc->hw.conn &= ~0x03; | 1465 | hc->hw.conn &= ~0x03; |
1413 | #ifdef REVERSE_BITORDER | 1466 | #ifdef REVERSE_BITORDER |
@@ -1481,11 +1534,17 @@ deactivate_bchannel(struct bchannel *bch) | |||
1481 | static int | 1534 | static int |
1482 | channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) | 1535 | channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) |
1483 | { | 1536 | { |
1484 | int ret = 0; | 1537 | int ret = 0; |
1485 | 1538 | ||
1486 | switch (cq->op) { | 1539 | switch (cq->op) { |
1487 | case MISDN_CTRL_GETOP: | 1540 | case MISDN_CTRL_GETOP: |
1488 | cq->op = 0; | 1541 | cq->op = MISDN_CTRL_FILL_EMPTY; |
1542 | break; | ||
1543 | case MISDN_CTRL_FILL_EMPTY: /* fill fifo, if empty */ | ||
1544 | test_and_set_bit(FLG_FILLEMPTY, &bch->Flags); | ||
1545 | if (debug & DEBUG_HW_OPEN) | ||
1546 | printk(KERN_DEBUG "%s: FILL_EMPTY request (nr=%d " | ||
1547 | "off=%d)\n", __func__, bch->nr, !!cq->p1); | ||
1489 | break; | 1548 | break; |
1490 | default: | 1549 | default: |
1491 | printk(KERN_WARNING "%s: unknown Op %x\n", __func__, cq->op); | 1550 | printk(KERN_WARNING "%s: unknown Op %x\n", __func__, cq->op); |
@@ -1859,6 +1918,10 @@ open_dchannel(struct hfc_pci *hc, struct mISDNchannel *ch, | |||
1859 | hc->dch.dev.id, __builtin_return_address(0)); | 1918 | hc->dch.dev.id, __builtin_return_address(0)); |
1860 | if (rq->protocol == ISDN_P_NONE) | 1919 | if (rq->protocol == ISDN_P_NONE) |
1861 | return -EINVAL; | 1920 | return -EINVAL; |
1921 | if (rq->adr.channel == 1) { | ||
1922 | /* TODO: E-Channel */ | ||
1923 | return -EINVAL; | ||
1924 | } | ||
1862 | if (!hc->initdone) { | 1925 | if (!hc->initdone) { |
1863 | if (rq->protocol == ISDN_P_TE_S0) { | 1926 | if (rq->protocol == ISDN_P_TE_S0) { |
1864 | err = create_l1(&hc->dch, hfc_l1callback); | 1927 | err = create_l1(&hc->dch, hfc_l1callback); |
@@ -1874,6 +1937,11 @@ open_dchannel(struct hfc_pci *hc, struct mISDNchannel *ch, | |||
1874 | if (rq->protocol != ch->protocol) { | 1937 | if (rq->protocol != ch->protocol) { |
1875 | if (hc->hw.protocol == ISDN_P_TE_S0) | 1938 | if (hc->hw.protocol == ISDN_P_TE_S0) |
1876 | l1_event(hc->dch.l1, CLOSE_CHANNEL); | 1939 | l1_event(hc->dch.l1, CLOSE_CHANNEL); |
1940 | if (rq->protocol == ISDN_P_TE_S0) { | ||
1941 | err = create_l1(&hc->dch, hfc_l1callback); | ||
1942 | if (err) | ||
1943 | return err; | ||
1944 | } | ||
1877 | hc->hw.protocol = rq->protocol; | 1945 | hc->hw.protocol = rq->protocol; |
1878 | ch->protocol = rq->protocol; | 1946 | ch->protocol = rq->protocol; |
1879 | hfcpci_setmode(hc); | 1947 | hfcpci_setmode(hc); |
@@ -1903,6 +1971,7 @@ open_bchannel(struct hfc_pci *hc, struct channel_req *rq) | |||
1903 | bch = &hc->bch[rq->adr.channel - 1]; | 1971 | bch = &hc->bch[rq->adr.channel - 1]; |
1904 | if (test_and_set_bit(FLG_OPEN, &bch->Flags)) | 1972 | if (test_and_set_bit(FLG_OPEN, &bch->Flags)) |
1905 | return -EBUSY; /* b-channel can be only open once */ | 1973 | return -EBUSY; /* b-channel can be only open once */ |
1974 | test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags); | ||
1906 | bch->ch.protocol = rq->protocol; | 1975 | bch->ch.protocol = rq->protocol; |
1907 | rq->ch = &bch->ch; /* TODO: E-channel */ | 1976 | rq->ch = &bch->ch; /* TODO: E-channel */ |
1908 | if (!try_module_get(THIS_MODULE)) | 1977 | if (!try_module_get(THIS_MODULE)) |
@@ -1928,7 +1997,8 @@ hfc_dctrl(struct mISDNchannel *ch, u_int cmd, void *arg) | |||
1928 | switch (cmd) { | 1997 | switch (cmd) { |
1929 | case OPEN_CHANNEL: | 1998 | case OPEN_CHANNEL: |
1930 | rq = arg; | 1999 | rq = arg; |
1931 | if (rq->adr.channel == 0) | 2000 | if ((rq->protocol == ISDN_P_TE_S0) || |
2001 | (rq->protocol == ISDN_P_NT_S0)) | ||
1932 | err = open_dchannel(hc, ch, rq); | 2002 | err = open_dchannel(hc, ch, rq); |
1933 | else | 2003 | else |
1934 | err = open_bchannel(hc, rq); | 2004 | err = open_bchannel(hc, rq); |
@@ -2027,7 +2097,6 @@ release_card(struct hfc_pci *hc) { | |||
2027 | mISDN_freebchannel(&hc->bch[1]); | 2097 | mISDN_freebchannel(&hc->bch[1]); |
2028 | mISDN_freebchannel(&hc->bch[0]); | 2098 | mISDN_freebchannel(&hc->bch[0]); |
2029 | mISDN_freedchannel(&hc->dch); | 2099 | mISDN_freedchannel(&hc->dch); |
2030 | list_del(&hc->list); | ||
2031 | pci_set_drvdata(hc->pdev, NULL); | 2100 | pci_set_drvdata(hc->pdev, NULL); |
2032 | kfree(hc); | 2101 | kfree(hc); |
2033 | } | 2102 | } |
@@ -2037,12 +2106,8 @@ setup_card(struct hfc_pci *card) | |||
2037 | { | 2106 | { |
2038 | int err = -EINVAL; | 2107 | int err = -EINVAL; |
2039 | u_int i; | 2108 | u_int i; |
2040 | u_long flags; | ||
2041 | char name[MISDN_MAX_IDLEN]; | 2109 | char name[MISDN_MAX_IDLEN]; |
2042 | 2110 | ||
2043 | if (HFC_cnt >= MAX_CARDS) | ||
2044 | return -EINVAL; /* maybe better value */ | ||
2045 | |||
2046 | card->dch.debug = debug; | 2111 | card->dch.debug = debug; |
2047 | spin_lock_init(&card->lock); | 2112 | spin_lock_init(&card->lock); |
2048 | mISDN_initdchannel(&card->dch, MAX_DFRAME_LEN_L1, ph_state); | 2113 | mISDN_initdchannel(&card->dch, MAX_DFRAME_LEN_L1, ph_state); |
@@ -2068,13 +2133,10 @@ setup_card(struct hfc_pci *card) | |||
2068 | if (err) | 2133 | if (err) |
2069 | goto error; | 2134 | goto error; |
2070 | snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-pci.%d", HFC_cnt + 1); | 2135 | snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-pci.%d", HFC_cnt + 1); |
2071 | err = mISDN_register_device(&card->dch.dev, name); | 2136 | err = mISDN_register_device(&card->dch.dev, &card->pdev->dev, name); |
2072 | if (err) | 2137 | if (err) |
2073 | goto error; | 2138 | goto error; |
2074 | HFC_cnt++; | 2139 | HFC_cnt++; |
2075 | write_lock_irqsave(&HFClock, flags); | ||
2076 | list_add_tail(&card->list, &HFClist); | ||
2077 | write_unlock_irqrestore(&HFClock, flags); | ||
2078 | printk(KERN_INFO "HFC %d cards installed\n", HFC_cnt); | 2140 | printk(KERN_INFO "HFC %d cards installed\n", HFC_cnt); |
2079 | return 0; | 2141 | return 0; |
2080 | error: | 2142 | error: |
@@ -2210,15 +2272,12 @@ static void __devexit | |||
2210 | hfc_remove_pci(struct pci_dev *pdev) | 2272 | hfc_remove_pci(struct pci_dev *pdev) |
2211 | { | 2273 | { |
2212 | struct hfc_pci *card = pci_get_drvdata(pdev); | 2274 | struct hfc_pci *card = pci_get_drvdata(pdev); |
2213 | u_long flags; | ||
2214 | 2275 | ||
2215 | if (card) { | 2276 | if (card) |
2216 | write_lock_irqsave(&HFClock, flags); | ||
2217 | release_card(card); | 2277 | release_card(card); |
2218 | write_unlock_irqrestore(&HFClock, flags); | 2278 | else |
2219 | } else | ||
2220 | if (debug) | 2279 | if (debug) |
2221 | printk(KERN_WARNING "%s: drvdata allready removed\n", | 2280 | printk(KERN_WARNING "%s: drvdata already removed\n", |
2222 | __func__); | 2281 | __func__); |
2223 | } | 2282 | } |
2224 | 2283 | ||
@@ -2230,25 +2289,97 @@ static struct pci_driver hfc_driver = { | |||
2230 | .id_table = hfc_ids, | 2289 | .id_table = hfc_ids, |
2231 | }; | 2290 | }; |
2232 | 2291 | ||
2292 | static int | ||
2293 | _hfcpci_softirq(struct device *dev, void *arg) | ||
2294 | { | ||
2295 | struct hfc_pci *hc = dev_get_drvdata(dev); | ||
2296 | struct bchannel *bch; | ||
2297 | if (hc == NULL) | ||
2298 | return 0; | ||
2299 | |||
2300 | if (hc->hw.int_m2 & HFCPCI_IRQ_ENABLE) { | ||
2301 | spin_lock(&hc->lock); | ||
2302 | bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1); | ||
2303 | if (bch && bch->state == ISDN_P_B_RAW) { /* B1 rx&tx */ | ||
2304 | main_rec_hfcpci(bch); | ||
2305 | tx_birq(bch); | ||
2306 | } | ||
2307 | bch = Sel_BCS(hc, hc->hw.bswapped ? 1 : 2); | ||
2308 | if (bch && bch->state == ISDN_P_B_RAW) { /* B2 rx&tx */ | ||
2309 | main_rec_hfcpci(bch); | ||
2310 | tx_birq(bch); | ||
2311 | } | ||
2312 | spin_unlock(&hc->lock); | ||
2313 | } | ||
2314 | return 0; | ||
2315 | } | ||
2316 | |||
2317 | static void | ||
2318 | hfcpci_softirq(void *arg) | ||
2319 | { | ||
2320 | (void) driver_for_each_device(&hfc_driver.driver, NULL, arg, | ||
2321 | _hfcpci_softirq); | ||
2322 | |||
2323 | /* if next event would be in the past ... */ | ||
2324 | if ((s32)(hfc_jiffies + tics - jiffies) <= 0) | ||
2325 | hfc_jiffies = jiffies + 1; | ||
2326 | else | ||
2327 | hfc_jiffies += tics; | ||
2328 | hfc_tl.expires = hfc_jiffies; | ||
2329 | add_timer(&hfc_tl); | ||
2330 | } | ||
2331 | |||
2233 | static int __init | 2332 | static int __init |
2234 | HFC_init(void) | 2333 | HFC_init(void) |
2235 | { | 2334 | { |
2236 | int err; | 2335 | int err; |
2237 | 2336 | ||
2337 | if (!poll) | ||
2338 | poll = HFCPCI_BTRANS_THRESHOLD; | ||
2339 | |||
2340 | if (poll != HFCPCI_BTRANS_THRESHOLD) { | ||
2341 | tics = (poll * HZ) / 8000; | ||
2342 | if (tics < 1) | ||
2343 | tics = 1; | ||
2344 | poll = (tics * 8000) / HZ; | ||
2345 | if (poll > 256 || poll < 8) { | ||
2346 | printk(KERN_ERR "%s: Wrong poll value %d not in range " | ||
2347 | "of 8..256.\n", __func__, poll); | ||
2348 | err = -EINVAL; | ||
2349 | return err; | ||
2350 | } | ||
2351 | } | ||
2352 | if (poll != HFCPCI_BTRANS_THRESHOLD) { | ||
2353 | printk(KERN_INFO "%s: Using alternative poll value of %d\n", | ||
2354 | __func__, poll); | ||
2355 | hfc_tl.function = (void *)hfcpci_softirq; | ||
2356 | hfc_tl.data = 0; | ||
2357 | init_timer(&hfc_tl); | ||
2358 | hfc_tl.expires = jiffies + tics; | ||
2359 | hfc_jiffies = hfc_tl.expires; | ||
2360 | add_timer(&hfc_tl); | ||
2361 | } else | ||
2362 | tics = 0; /* indicate the use of controller's timer */ | ||
2363 | |||
2238 | err = pci_register_driver(&hfc_driver); | 2364 | err = pci_register_driver(&hfc_driver); |
2365 | if (err) { | ||
2366 | if (timer_pending(&hfc_tl)) | ||
2367 | del_timer(&hfc_tl); | ||
2368 | } | ||
2369 | |||
2239 | return err; | 2370 | return err; |
2240 | } | 2371 | } |
2241 | 2372 | ||
2242 | static void __exit | 2373 | static void __exit |
2243 | HFC_cleanup(void) | 2374 | HFC_cleanup(void) |
2244 | { | 2375 | { |
2245 | struct hfc_pci *card, *next; | 2376 | if (timer_pending(&hfc_tl)) |
2377 | del_timer(&hfc_tl); | ||
2246 | 2378 | ||
2247 | list_for_each_entry_safe(card, next, &HFClist, list) { | ||
2248 | release_card(card); | ||
2249 | } | ||
2250 | pci_unregister_driver(&hfc_driver); | 2379 | pci_unregister_driver(&hfc_driver); |
2251 | } | 2380 | } |
2252 | 2381 | ||
2253 | module_init(HFC_init); | 2382 | module_init(HFC_init); |
2254 | module_exit(HFC_cleanup); | 2383 | module_exit(HFC_cleanup); |
2384 | |||
2385 | MODULE_DEVICE_TABLE(pci, hfc_ids); | ||
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c new file mode 100644 index 000000000000..ba6925fbf38a --- /dev/null +++ b/drivers/isdn/hardware/mISDN/hfcsusb.c | |||
@@ -0,0 +1,2196 @@ | |||
1 | /* hfcsusb.c | ||
2 | * mISDN driver for Colognechip HFC-S USB chip | ||
3 | * | ||
4 | * Copyright 2001 by Peter Sprenger (sprenger@moving-bytes.de) | ||
5 | * Copyright 2008 by Martin Bachem (info@bachem-it.com) | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2, or (at your option) | ||
10 | * any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
20 | * | ||
21 | * | ||
22 | * module params | ||
23 | * debug=<n>, default=0, with n=0xHHHHGGGG | ||
24 | * H - l1 driver flags described in hfcsusb.h | ||
25 | * G - common mISDN debug flags described at mISDNhw.h | ||
26 | * | ||
27 | * poll=<n>, default 128 | ||
28 | * n : burst size of PH_DATA_IND at transparent rx data | ||
29 | * | ||
30 | */ | ||
31 | |||
32 | #include <linux/module.h> | ||
33 | #include <linux/delay.h> | ||
34 | #include <linux/usb.h> | ||
35 | #include <linux/mISDNhw.h> | ||
36 | #include "hfcsusb.h" | ||
37 | |||
38 | const char *hfcsusb_rev = "Revision: 0.3.3 (socket), 2008-11-05"; | ||
39 | |||
40 | static unsigned int debug; | ||
41 | static int poll = DEFAULT_TRANSP_BURST_SZ; | ||
42 | |||
43 | static LIST_HEAD(HFClist); | ||
44 | static DEFINE_RWLOCK(HFClock); | ||
45 | |||
46 | |||
47 | MODULE_AUTHOR("Martin Bachem"); | ||
48 | MODULE_LICENSE("GPL"); | ||
49 | module_param(debug, uint, S_IRUGO | S_IWUSR); | ||
50 | module_param(poll, int, 0); | ||
51 | |||
52 | static int hfcsusb_cnt; | ||
53 | |||
54 | /* some function prototypes */ | ||
55 | static void hfcsusb_ph_command(struct hfcsusb *hw, u_char command); | ||
56 | static void release_hw(struct hfcsusb *hw); | ||
57 | static void reset_hfcsusb(struct hfcsusb *hw); | ||
58 | static void setPortMode(struct hfcsusb *hw); | ||
59 | static void hfcsusb_start_endpoint(struct hfcsusb *hw, int channel); | ||
60 | static void hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel); | ||
61 | static int hfcsusb_setup_bch(struct bchannel *bch, int protocol); | ||
62 | static void deactivate_bchannel(struct bchannel *bch); | ||
63 | static void hfcsusb_ph_info(struct hfcsusb *hw); | ||
64 | |||
65 | /* start next background transfer for control channel */ | ||
66 | static void | ||
67 | ctrl_start_transfer(struct hfcsusb *hw) | ||
68 | { | ||
69 | if (debug & DBG_HFC_CALL_TRACE) | ||
70 | printk(KERN_DEBUG "%s: %s\n", hw->name, __func__); | ||
71 | |||
72 | if (hw->ctrl_cnt) { | ||
73 | hw->ctrl_urb->pipe = hw->ctrl_out_pipe; | ||
74 | hw->ctrl_urb->setup_packet = (u_char *)&hw->ctrl_write; | ||
75 | hw->ctrl_urb->transfer_buffer = NULL; | ||
76 | hw->ctrl_urb->transfer_buffer_length = 0; | ||
77 | hw->ctrl_write.wIndex = | ||
78 | cpu_to_le16(hw->ctrl_buff[hw->ctrl_out_idx].hfcs_reg); | ||
79 | hw->ctrl_write.wValue = | ||
80 | cpu_to_le16(hw->ctrl_buff[hw->ctrl_out_idx].reg_val); | ||
81 | |||
82 | usb_submit_urb(hw->ctrl_urb, GFP_ATOMIC); | ||
83 | } | ||
84 | } | ||
85 | |||
86 | /* | ||
87 | * queue a control transfer request to write HFC-S USB | ||
88 | * chip register using CTRL resuest queue | ||
89 | */ | ||
90 | static int write_reg(struct hfcsusb *hw, __u8 reg, __u8 val) | ||
91 | { | ||
92 | struct ctrl_buf *buf; | ||
93 | |||
94 | if (debug & DBG_HFC_CALL_TRACE) | ||
95 | printk(KERN_DEBUG "%s: %s reg(0x%02x) val(0x%02x)\n", | ||
96 | hw->name, __func__, reg, val); | ||
97 | |||
98 | spin_lock(&hw->ctrl_lock); | ||
99 | if (hw->ctrl_cnt >= HFC_CTRL_BUFSIZE) | ||
100 | return 1; | ||
101 | buf = &hw->ctrl_buff[hw->ctrl_in_idx]; | ||
102 | buf->hfcs_reg = reg; | ||
103 | buf->reg_val = val; | ||
104 | if (++hw->ctrl_in_idx >= HFC_CTRL_BUFSIZE) | ||
105 | hw->ctrl_in_idx = 0; | ||
106 | if (++hw->ctrl_cnt == 1) | ||
107 | ctrl_start_transfer(hw); | ||
108 | spin_unlock(&hw->ctrl_lock); | ||
109 | |||
110 | return 0; | ||
111 | } | ||
112 | |||
113 | /* control completion routine handling background control cmds */ | ||
114 | static void | ||
115 | ctrl_complete(struct urb *urb) | ||
116 | { | ||
117 | struct hfcsusb *hw = (struct hfcsusb *) urb->context; | ||
118 | struct ctrl_buf *buf; | ||
119 | |||
120 | if (debug & DBG_HFC_CALL_TRACE) | ||
121 | printk(KERN_DEBUG "%s: %s\n", hw->name, __func__); | ||
122 | |||
123 | urb->dev = hw->dev; | ||
124 | if (hw->ctrl_cnt) { | ||
125 | buf = &hw->ctrl_buff[hw->ctrl_out_idx]; | ||
126 | hw->ctrl_cnt--; /* decrement actual count */ | ||
127 | if (++hw->ctrl_out_idx >= HFC_CTRL_BUFSIZE) | ||
128 | hw->ctrl_out_idx = 0; /* pointer wrap */ | ||
129 | |||
130 | ctrl_start_transfer(hw); /* start next transfer */ | ||
131 | } | ||
132 | } | ||
133 | |||
134 | /* handle LED bits */ | ||
135 | static void | ||
136 | set_led_bit(struct hfcsusb *hw, signed short led_bits, int set_on) | ||
137 | { | ||
138 | if (set_on) { | ||
139 | if (led_bits < 0) | ||
140 | hw->led_state &= ~abs(led_bits); | ||
141 | else | ||
142 | hw->led_state |= led_bits; | ||
143 | } else { | ||
144 | if (led_bits < 0) | ||
145 | hw->led_state |= abs(led_bits); | ||
146 | else | ||
147 | hw->led_state &= ~led_bits; | ||
148 | } | ||
149 | } | ||
150 | |||
151 | /* handle LED requests */ | ||
152 | static void | ||
153 | handle_led(struct hfcsusb *hw, int event) | ||
154 | { | ||
155 | struct hfcsusb_vdata *driver_info = (struct hfcsusb_vdata *) | ||
156 | hfcsusb_idtab[hw->vend_idx].driver_info; | ||
157 | __u8 tmpled; | ||
158 | |||
159 | if (driver_info->led_scheme == LED_OFF) | ||
160 | return; | ||
161 | tmpled = hw->led_state; | ||
162 | |||
163 | switch (event) { | ||
164 | case LED_POWER_ON: | ||
165 | set_led_bit(hw, driver_info->led_bits[0], 1); | ||
166 | set_led_bit(hw, driver_info->led_bits[1], 0); | ||
167 | set_led_bit(hw, driver_info->led_bits[2], 0); | ||
168 | set_led_bit(hw, driver_info->led_bits[3], 0); | ||
169 | break; | ||
170 | case LED_POWER_OFF: | ||
171 | set_led_bit(hw, driver_info->led_bits[0], 0); | ||
172 | set_led_bit(hw, driver_info->led_bits[1], 0); | ||
173 | set_led_bit(hw, driver_info->led_bits[2], 0); | ||
174 | set_led_bit(hw, driver_info->led_bits[3], 0); | ||
175 | break; | ||
176 | case LED_S0_ON: | ||
177 | set_led_bit(hw, driver_info->led_bits[1], 1); | ||
178 | break; | ||
179 | case LED_S0_OFF: | ||
180 | set_led_bit(hw, driver_info->led_bits[1], 0); | ||
181 | break; | ||
182 | case LED_B1_ON: | ||
183 | set_led_bit(hw, driver_info->led_bits[2], 1); | ||
184 | break; | ||
185 | case LED_B1_OFF: | ||
186 | set_led_bit(hw, driver_info->led_bits[2], 0); | ||
187 | break; | ||
188 | case LED_B2_ON: | ||
189 | set_led_bit(hw, driver_info->led_bits[3], 1); | ||
190 | break; | ||
191 | case LED_B2_OFF: | ||
192 | set_led_bit(hw, driver_info->led_bits[3], 0); | ||
193 | break; | ||
194 | } | ||
195 | |||
196 | if (hw->led_state != tmpled) { | ||
197 | if (debug & DBG_HFC_CALL_TRACE) | ||
198 | printk(KERN_DEBUG "%s: %s reg(0x%02x) val(x%02x)\n", | ||
199 | hw->name, __func__, | ||
200 | HFCUSB_P_DATA, hw->led_state); | ||
201 | |||
202 | write_reg(hw, HFCUSB_P_DATA, hw->led_state); | ||
203 | } | ||
204 | } | ||
205 | |||
206 | /* | ||
207 | * Layer2 -> Layer 1 Bchannel data | ||
208 | */ | ||
209 | static int | ||
210 | hfcusb_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb) | ||
211 | { | ||
212 | struct bchannel *bch = container_of(ch, struct bchannel, ch); | ||
213 | struct hfcsusb *hw = bch->hw; | ||
214 | int ret = -EINVAL; | ||
215 | struct mISDNhead *hh = mISDN_HEAD_P(skb); | ||
216 | u_long flags; | ||
217 | |||
218 | if (debug & DBG_HFC_CALL_TRACE) | ||
219 | printk(KERN_DEBUG "%s: %s\n", hw->name, __func__); | ||
220 | |||
221 | switch (hh->prim) { | ||
222 | case PH_DATA_REQ: | ||
223 | spin_lock_irqsave(&hw->lock, flags); | ||
224 | ret = bchannel_senddata(bch, skb); | ||
225 | spin_unlock_irqrestore(&hw->lock, flags); | ||
226 | if (debug & DBG_HFC_CALL_TRACE) | ||
227 | printk(KERN_DEBUG "%s: %s PH_DATA_REQ ret(%i)\n", | ||
228 | hw->name, __func__, ret); | ||
229 | if (ret > 0) { | ||
230 | /* | ||
231 | * other l1 drivers don't send early confirms on | ||
232 | * transp data, but hfcsusb does because tx_next | ||
233 | * skb is needed in tx_iso_complete() | ||
234 | */ | ||
235 | queue_ch_frame(ch, PH_DATA_CNF, hh->id, NULL); | ||
236 | ret = 0; | ||
237 | } | ||
238 | return ret; | ||
239 | case PH_ACTIVATE_REQ: | ||
240 | if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags)) { | ||
241 | hfcsusb_start_endpoint(hw, bch->nr); | ||
242 | ret = hfcsusb_setup_bch(bch, ch->protocol); | ||
243 | } else | ||
244 | ret = 0; | ||
245 | if (!ret) | ||
246 | _queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, | ||
247 | 0, NULL, GFP_KERNEL); | ||
248 | break; | ||
249 | case PH_DEACTIVATE_REQ: | ||
250 | deactivate_bchannel(bch); | ||
251 | _queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY, | ||
252 | 0, NULL, GFP_KERNEL); | ||
253 | ret = 0; | ||
254 | break; | ||
255 | } | ||
256 | if (!ret) | ||
257 | dev_kfree_skb(skb); | ||
258 | return ret; | ||
259 | } | ||
260 | |||
261 | /* | ||
262 | * send full D/B channel status information | ||
263 | * as MPH_INFORMATION_IND | ||
264 | */ | ||
265 | static void | ||
266 | hfcsusb_ph_info(struct hfcsusb *hw) | ||
267 | { | ||
268 | struct ph_info *phi; | ||
269 | struct dchannel *dch = &hw->dch; | ||
270 | int i; | ||
271 | |||
272 | phi = kzalloc(sizeof(struct ph_info) + | ||
273 | dch->dev.nrbchan * sizeof(struct ph_info_ch), GFP_ATOMIC); | ||
274 | phi->dch.ch.protocol = hw->protocol; | ||
275 | phi->dch.ch.Flags = dch->Flags; | ||
276 | phi->dch.state = dch->state; | ||
277 | phi->dch.num_bch = dch->dev.nrbchan; | ||
278 | for (i = 0; i < dch->dev.nrbchan; i++) { | ||
279 | phi->bch[i].protocol = hw->bch[i].ch.protocol; | ||
280 | phi->bch[i].Flags = hw->bch[i].Flags; | ||
281 | } | ||
282 | _queue_data(&dch->dev.D, MPH_INFORMATION_IND, MISDN_ID_ANY, | ||
283 | sizeof(struct ph_info_dch) + dch->dev.nrbchan * | ||
284 | sizeof(struct ph_info_ch), phi, GFP_ATOMIC); | ||
285 | } | ||
286 | |||
287 | /* | ||
288 | * Layer2 -> Layer 1 Dchannel data | ||
289 | */ | ||
290 | static int | ||
291 | hfcusb_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb) | ||
292 | { | ||
293 | struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D); | ||
294 | struct dchannel *dch = container_of(dev, struct dchannel, dev); | ||
295 | struct mISDNhead *hh = mISDN_HEAD_P(skb); | ||
296 | struct hfcsusb *hw = dch->hw; | ||
297 | int ret = -EINVAL; | ||
298 | u_long flags; | ||
299 | |||
300 | switch (hh->prim) { | ||
301 | case PH_DATA_REQ: | ||
302 | if (debug & DBG_HFC_CALL_TRACE) | ||
303 | printk(KERN_DEBUG "%s: %s: PH_DATA_REQ\n", | ||
304 | hw->name, __func__); | ||
305 | |||
306 | spin_lock_irqsave(&hw->lock, flags); | ||
307 | ret = dchannel_senddata(dch, skb); | ||
308 | spin_unlock_irqrestore(&hw->lock, flags); | ||
309 | if (ret > 0) { | ||
310 | ret = 0; | ||
311 | queue_ch_frame(ch, PH_DATA_CNF, hh->id, NULL); | ||
312 | } | ||
313 | break; | ||
314 | |||
315 | case PH_ACTIVATE_REQ: | ||
316 | if (debug & DBG_HFC_CALL_TRACE) | ||
317 | printk(KERN_DEBUG "%s: %s: PH_ACTIVATE_REQ %s\n", | ||
318 | hw->name, __func__, | ||
319 | (hw->protocol == ISDN_P_NT_S0) ? "NT" : "TE"); | ||
320 | |||
321 | if (hw->protocol == ISDN_P_NT_S0) { | ||
322 | ret = 0; | ||
323 | if (test_bit(FLG_ACTIVE, &dch->Flags)) { | ||
324 | _queue_data(&dch->dev.D, | ||
325 | PH_ACTIVATE_IND, MISDN_ID_ANY, 0, | ||
326 | NULL, GFP_ATOMIC); | ||
327 | } else { | ||
328 | hfcsusb_ph_command(hw, | ||
329 | HFC_L1_ACTIVATE_NT); | ||
330 | test_and_set_bit(FLG_L2_ACTIVATED, | ||
331 | &dch->Flags); | ||
332 | } | ||
333 | } else { | ||
334 | hfcsusb_ph_command(hw, HFC_L1_ACTIVATE_TE); | ||
335 | ret = l1_event(dch->l1, hh->prim); | ||
336 | } | ||
337 | break; | ||
338 | |||
339 | case PH_DEACTIVATE_REQ: | ||
340 | if (debug & DBG_HFC_CALL_TRACE) | ||
341 | printk(KERN_DEBUG "%s: %s: PH_DEACTIVATE_REQ\n", | ||
342 | hw->name, __func__); | ||
343 | test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags); | ||
344 | |||
345 | if (hw->protocol == ISDN_P_NT_S0) { | ||
346 | hfcsusb_ph_command(hw, HFC_L1_DEACTIVATE_NT); | ||
347 | spin_lock_irqsave(&hw->lock, flags); | ||
348 | skb_queue_purge(&dch->squeue); | ||
349 | if (dch->tx_skb) { | ||
350 | dev_kfree_skb(dch->tx_skb); | ||
351 | dch->tx_skb = NULL; | ||
352 | } | ||
353 | dch->tx_idx = 0; | ||
354 | if (dch->rx_skb) { | ||
355 | dev_kfree_skb(dch->rx_skb); | ||
356 | dch->rx_skb = NULL; | ||
357 | } | ||
358 | test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); | ||
359 | spin_unlock_irqrestore(&hw->lock, flags); | ||
360 | #ifdef FIXME | ||
361 | if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags)) | ||
362 | dchannel_sched_event(&hc->dch, D_CLEARBUSY); | ||
363 | #endif | ||
364 | ret = 0; | ||
365 | } else | ||
366 | ret = l1_event(dch->l1, hh->prim); | ||
367 | break; | ||
368 | case MPH_INFORMATION_REQ: | ||
369 | hfcsusb_ph_info(hw); | ||
370 | ret = 0; | ||
371 | break; | ||
372 | } | ||
373 | |||
374 | return ret; | ||
375 | } | ||
376 | |||
377 | /* | ||
378 | * Layer 1 callback function | ||
379 | */ | ||
380 | static int | ||
381 | hfc_l1callback(struct dchannel *dch, u_int cmd) | ||
382 | { | ||
383 | struct hfcsusb *hw = dch->hw; | ||
384 | |||
385 | if (debug & DBG_HFC_CALL_TRACE) | ||
386 | printk(KERN_DEBUG "%s: %s cmd 0x%x\n", | ||
387 | hw->name, __func__, cmd); | ||
388 | |||
389 | switch (cmd) { | ||
390 | case INFO3_P8: | ||
391 | case INFO3_P10: | ||
392 | case HW_RESET_REQ: | ||
393 | case HW_POWERUP_REQ: | ||
394 | break; | ||
395 | |||
396 | case HW_DEACT_REQ: | ||
397 | skb_queue_purge(&dch->squeue); | ||
398 | if (dch->tx_skb) { | ||
399 | dev_kfree_skb(dch->tx_skb); | ||
400 | dch->tx_skb = NULL; | ||
401 | } | ||
402 | dch->tx_idx = 0; | ||
403 | if (dch->rx_skb) { | ||
404 | dev_kfree_skb(dch->rx_skb); | ||
405 | dch->rx_skb = NULL; | ||
406 | } | ||
407 | test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); | ||
408 | break; | ||
409 | case PH_ACTIVATE_IND: | ||
410 | test_and_set_bit(FLG_ACTIVE, &dch->Flags); | ||
411 | _queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL, | ||
412 | GFP_ATOMIC); | ||
413 | break; | ||
414 | case PH_DEACTIVATE_IND: | ||
415 | test_and_clear_bit(FLG_ACTIVE, &dch->Flags); | ||
416 | _queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL, | ||
417 | GFP_ATOMIC); | ||
418 | break; | ||
419 | default: | ||
420 | if (dch->debug & DEBUG_HW) | ||
421 | printk(KERN_DEBUG "%s: %s: unknown cmd %x\n", | ||
422 | hw->name, __func__, cmd); | ||
423 | return -1; | ||
424 | } | ||
425 | hfcsusb_ph_info(hw); | ||
426 | return 0; | ||
427 | } | ||
428 | |||
429 | static int | ||
430 | open_dchannel(struct hfcsusb *hw, struct mISDNchannel *ch, | ||
431 | struct channel_req *rq) | ||
432 | { | ||
433 | int err = 0; | ||
434 | |||
435 | if (debug & DEBUG_HW_OPEN) | ||
436 | printk(KERN_DEBUG "%s: %s: dev(%d) open addr(%i) from %p\n", | ||
437 | hw->name, __func__, hw->dch.dev.id, rq->adr.channel, | ||
438 | __builtin_return_address(0)); | ||
439 | if (rq->protocol == ISDN_P_NONE) | ||
440 | return -EINVAL; | ||
441 | |||
442 | test_and_clear_bit(FLG_ACTIVE, &hw->dch.Flags); | ||
443 | test_and_clear_bit(FLG_ACTIVE, &hw->ech.Flags); | ||
444 | hfcsusb_start_endpoint(hw, HFC_CHAN_D); | ||
445 | |||
446 | /* E-Channel logging */ | ||
447 | if (rq->adr.channel == 1) { | ||
448 | if (hw->fifos[HFCUSB_PCM_RX].pipe) { | ||
449 | hfcsusb_start_endpoint(hw, HFC_CHAN_E); | ||
450 | set_bit(FLG_ACTIVE, &hw->ech.Flags); | ||
451 | _queue_data(&hw->ech.dev.D, PH_ACTIVATE_IND, | ||
452 | MISDN_ID_ANY, 0, NULL, GFP_ATOMIC); | ||
453 | } else | ||
454 | return -EINVAL; | ||
455 | } | ||
456 | |||
457 | if (!hw->initdone) { | ||
458 | hw->protocol = rq->protocol; | ||
459 | if (rq->protocol == ISDN_P_TE_S0) { | ||
460 | err = create_l1(&hw->dch, hfc_l1callback); | ||
461 | if (err) | ||
462 | return err; | ||
463 | } | ||
464 | setPortMode(hw); | ||
465 | ch->protocol = rq->protocol; | ||
466 | hw->initdone = 1; | ||
467 | } else { | ||
468 | if (rq->protocol != ch->protocol) | ||
469 | return -EPROTONOSUPPORT; | ||
470 | } | ||
471 | |||
472 | if (((ch->protocol == ISDN_P_NT_S0) && (hw->dch.state == 3)) || | ||
473 | ((ch->protocol == ISDN_P_TE_S0) && (hw->dch.state == 7))) | ||
474 | _queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, | ||
475 | 0, NULL, GFP_KERNEL); | ||
476 | rq->ch = ch; | ||
477 | if (!try_module_get(THIS_MODULE)) | ||
478 | printk(KERN_WARNING "%s: %s: cannot get module\n", | ||
479 | hw->name, __func__); | ||
480 | return 0; | ||
481 | } | ||
482 | |||
483 | static int | ||
484 | open_bchannel(struct hfcsusb *hw, struct channel_req *rq) | ||
485 | { | ||
486 | struct bchannel *bch; | ||
487 | |||
488 | if (rq->adr.channel > 2) | ||
489 | return -EINVAL; | ||
490 | if (rq->protocol == ISDN_P_NONE) | ||
491 | return -EINVAL; | ||
492 | |||
493 | if (debug & DBG_HFC_CALL_TRACE) | ||
494 | printk(KERN_DEBUG "%s: %s B%i\n", | ||
495 | hw->name, __func__, rq->adr.channel); | ||
496 | |||
497 | bch = &hw->bch[rq->adr.channel - 1]; | ||
498 | if (test_and_set_bit(FLG_OPEN, &bch->Flags)) | ||
499 | return -EBUSY; /* b-channel can be only open once */ | ||
500 | test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags); | ||
501 | bch->ch.protocol = rq->protocol; | ||
502 | rq->ch = &bch->ch; | ||
503 | |||
504 | /* start USB endpoint for bchannel */ | ||
505 | if (rq->adr.channel == 1) | ||
506 | hfcsusb_start_endpoint(hw, HFC_CHAN_B1); | ||
507 | else | ||
508 | hfcsusb_start_endpoint(hw, HFC_CHAN_B2); | ||
509 | |||
510 | if (!try_module_get(THIS_MODULE)) | ||
511 | printk(KERN_WARNING "%s: %s:cannot get module\n", | ||
512 | hw->name, __func__); | ||
513 | return 0; | ||
514 | } | ||
515 | |||
516 | static int | ||
517 | channel_ctrl(struct hfcsusb *hw, struct mISDN_ctrl_req *cq) | ||
518 | { | ||
519 | int ret = 0; | ||
520 | |||
521 | if (debug & DBG_HFC_CALL_TRACE) | ||
522 | printk(KERN_DEBUG "%s: %s op(0x%x) channel(0x%x)\n", | ||
523 | hw->name, __func__, (cq->op), (cq->channel)); | ||
524 | |||
525 | switch (cq->op) { | ||
526 | case MISDN_CTRL_GETOP: | ||
527 | cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_CONNECT | | ||
528 | MISDN_CTRL_DISCONNECT; | ||
529 | break; | ||
530 | default: | ||
531 | printk(KERN_WARNING "%s: %s: unknown Op %x\n", | ||
532 | hw->name, __func__, cq->op); | ||
533 | ret = -EINVAL; | ||
534 | break; | ||
535 | } | ||
536 | return ret; | ||
537 | } | ||
538 | |||
539 | /* | ||
540 | * device control function | ||
541 | */ | ||
542 | static int | ||
543 | hfc_dctrl(struct mISDNchannel *ch, u_int cmd, void *arg) | ||
544 | { | ||
545 | struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D); | ||
546 | struct dchannel *dch = container_of(dev, struct dchannel, dev); | ||
547 | struct hfcsusb *hw = dch->hw; | ||
548 | struct channel_req *rq; | ||
549 | int err = 0; | ||
550 | |||
551 | if (dch->debug & DEBUG_HW) | ||
552 | printk(KERN_DEBUG "%s: %s: cmd:%x %p\n", | ||
553 | hw->name, __func__, cmd, arg); | ||
554 | switch (cmd) { | ||
555 | case OPEN_CHANNEL: | ||
556 | rq = arg; | ||
557 | if ((rq->protocol == ISDN_P_TE_S0) || | ||
558 | (rq->protocol == ISDN_P_NT_S0)) | ||
559 | err = open_dchannel(hw, ch, rq); | ||
560 | else | ||
561 | err = open_bchannel(hw, rq); | ||
562 | if (!err) | ||
563 | hw->open++; | ||
564 | break; | ||
565 | case CLOSE_CHANNEL: | ||
566 | hw->open--; | ||
567 | if (debug & DEBUG_HW_OPEN) | ||
568 | printk(KERN_DEBUG | ||
569 | "%s: %s: dev(%d) close from %p (open %d)\n", | ||
570 | hw->name, __func__, hw->dch.dev.id, | ||
571 | __builtin_return_address(0), hw->open); | ||
572 | if (!hw->open) { | ||
573 | hfcsusb_stop_endpoint(hw, HFC_CHAN_D); | ||
574 | if (hw->fifos[HFCUSB_PCM_RX].pipe) | ||
575 | hfcsusb_stop_endpoint(hw, HFC_CHAN_E); | ||
576 | handle_led(hw, LED_POWER_ON); | ||
577 | } | ||
578 | module_put(THIS_MODULE); | ||
579 | break; | ||
580 | case CONTROL_CHANNEL: | ||
581 | err = channel_ctrl(hw, arg); | ||
582 | break; | ||
583 | default: | ||
584 | if (dch->debug & DEBUG_HW) | ||
585 | printk(KERN_DEBUG "%s: %s: unknown command %x\n", | ||
586 | hw->name, __func__, cmd); | ||
587 | return -EINVAL; | ||
588 | } | ||
589 | return err; | ||
590 | } | ||
591 | |||
592 | /* | ||
593 | * S0 TE state change event handler | ||
594 | */ | ||
595 | static void | ||
596 | ph_state_te(struct dchannel *dch) | ||
597 | { | ||
598 | struct hfcsusb *hw = dch->hw; | ||
599 | |||
600 | if (debug & DEBUG_HW) { | ||
601 | if (dch->state <= HFC_MAX_TE_LAYER1_STATE) | ||
602 | printk(KERN_DEBUG "%s: %s: %s\n", hw->name, __func__, | ||
603 | HFC_TE_LAYER1_STATES[dch->state]); | ||
604 | else | ||
605 | printk(KERN_DEBUG "%s: %s: TE F%d\n", | ||
606 | hw->name, __func__, dch->state); | ||
607 | } | ||
608 | |||
609 | switch (dch->state) { | ||
610 | case 0: | ||
611 | l1_event(dch->l1, HW_RESET_IND); | ||
612 | break; | ||
613 | case 3: | ||
614 | l1_event(dch->l1, HW_DEACT_IND); | ||
615 | break; | ||
616 | case 5: | ||
617 | case 8: | ||
618 | l1_event(dch->l1, ANYSIGNAL); | ||
619 | break; | ||
620 | case 6: | ||
621 | l1_event(dch->l1, INFO2); | ||
622 | break; | ||
623 | case 7: | ||
624 | l1_event(dch->l1, INFO4_P8); | ||
625 | break; | ||
626 | } | ||
627 | if (dch->state == 7) | ||
628 | handle_led(hw, LED_S0_ON); | ||
629 | else | ||
630 | handle_led(hw, LED_S0_OFF); | ||
631 | } | ||
632 | |||
633 | /* | ||
634 | * S0 NT state change event handler | ||
635 | */ | ||
636 | static void | ||
637 | ph_state_nt(struct dchannel *dch) | ||
638 | { | ||
639 | struct hfcsusb *hw = dch->hw; | ||
640 | |||
641 | if (debug & DEBUG_HW) { | ||
642 | if (dch->state <= HFC_MAX_NT_LAYER1_STATE) | ||
643 | printk(KERN_DEBUG "%s: %s: %s\n", | ||
644 | hw->name, __func__, | ||
645 | HFC_NT_LAYER1_STATES[dch->state]); | ||
646 | |||
647 | else | ||
648 | printk(KERN_INFO DRIVER_NAME "%s: %s: NT G%d\n", | ||
649 | hw->name, __func__, dch->state); | ||
650 | } | ||
651 | |||
652 | switch (dch->state) { | ||
653 | case (1): | ||
654 | test_and_clear_bit(FLG_ACTIVE, &dch->Flags); | ||
655 | test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags); | ||
656 | hw->nt_timer = 0; | ||
657 | hw->timers &= ~NT_ACTIVATION_TIMER; | ||
658 | handle_led(hw, LED_S0_OFF); | ||
659 | break; | ||
660 | |||
661 | case (2): | ||
662 | if (hw->nt_timer < 0) { | ||
663 | hw->nt_timer = 0; | ||
664 | hw->timers &= ~NT_ACTIVATION_TIMER; | ||
665 | hfcsusb_ph_command(dch->hw, HFC_L1_DEACTIVATE_NT); | ||
666 | } else { | ||
667 | hw->timers |= NT_ACTIVATION_TIMER; | ||
668 | hw->nt_timer = NT_T1_COUNT; | ||
669 | /* allow G2 -> G3 transition */ | ||
670 | write_reg(hw, HFCUSB_STATES, 2 | HFCUSB_NT_G2_G3); | ||
671 | } | ||
672 | break; | ||
673 | case (3): | ||
674 | hw->nt_timer = 0; | ||
675 | hw->timers &= ~NT_ACTIVATION_TIMER; | ||
676 | test_and_set_bit(FLG_ACTIVE, &dch->Flags); | ||
677 | _queue_data(&dch->dev.D, PH_ACTIVATE_IND, | ||
678 | MISDN_ID_ANY, 0, NULL, GFP_ATOMIC); | ||
679 | handle_led(hw, LED_S0_ON); | ||
680 | break; | ||
681 | case (4): | ||
682 | hw->nt_timer = 0; | ||
683 | hw->timers &= ~NT_ACTIVATION_TIMER; | ||
684 | break; | ||
685 | default: | ||
686 | break; | ||
687 | } | ||
688 | hfcsusb_ph_info(hw); | ||
689 | } | ||
690 | |||
691 | static void | ||
692 | ph_state(struct dchannel *dch) | ||
693 | { | ||
694 | struct hfcsusb *hw = dch->hw; | ||
695 | |||
696 | if (hw->protocol == ISDN_P_NT_S0) | ||
697 | ph_state_nt(dch); | ||
698 | else if (hw->protocol == ISDN_P_TE_S0) | ||
699 | ph_state_te(dch); | ||
700 | } | ||
701 | |||
702 | /* | ||
703 | * disable/enable BChannel for desired protocoll | ||
704 | */ | ||
705 | static int | ||
706 | hfcsusb_setup_bch(struct bchannel *bch, int protocol) | ||
707 | { | ||
708 | struct hfcsusb *hw = bch->hw; | ||
709 | __u8 conhdlc, sctrl, sctrl_r; | ||
710 | |||
711 | if (debug & DEBUG_HW) | ||
712 | printk(KERN_DEBUG "%s: %s: protocol %x-->%x B%d\n", | ||
713 | hw->name, __func__, bch->state, protocol, | ||
714 | bch->nr); | ||
715 | |||
716 | /* setup val for CON_HDLC */ | ||
717 | conhdlc = 0; | ||
718 | if (protocol > ISDN_P_NONE) | ||
719 | conhdlc = 8; /* enable FIFO */ | ||
720 | |||
721 | switch (protocol) { | ||
722 | case (-1): /* used for init */ | ||
723 | bch->state = -1; | ||
724 | /* fall trough */ | ||
725 | case (ISDN_P_NONE): | ||
726 | if (bch->state == ISDN_P_NONE) | ||
727 | return 0; /* already in idle state */ | ||
728 | bch->state = ISDN_P_NONE; | ||
729 | clear_bit(FLG_HDLC, &bch->Flags); | ||
730 | clear_bit(FLG_TRANSPARENT, &bch->Flags); | ||
731 | break; | ||
732 | case (ISDN_P_B_RAW): | ||
733 | conhdlc |= 2; | ||
734 | bch->state = protocol; | ||
735 | set_bit(FLG_TRANSPARENT, &bch->Flags); | ||
736 | break; | ||
737 | case (ISDN_P_B_HDLC): | ||
738 | bch->state = protocol; | ||
739 | set_bit(FLG_HDLC, &bch->Flags); | ||
740 | break; | ||
741 | default: | ||
742 | if (debug & DEBUG_HW) | ||
743 | printk(KERN_DEBUG "%s: %s: prot not known %x\n", | ||
744 | hw->name, __func__, protocol); | ||
745 | return -ENOPROTOOPT; | ||
746 | } | ||
747 | |||
748 | if (protocol >= ISDN_P_NONE) { | ||
749 | write_reg(hw, HFCUSB_FIFO, (bch->nr == 1) ? 0 : 2); | ||
750 | write_reg(hw, HFCUSB_CON_HDLC, conhdlc); | ||
751 | write_reg(hw, HFCUSB_INC_RES_F, 2); | ||
752 | write_reg(hw, HFCUSB_FIFO, (bch->nr == 1) ? 1 : 3); | ||
753 | write_reg(hw, HFCUSB_CON_HDLC, conhdlc); | ||
754 | write_reg(hw, HFCUSB_INC_RES_F, 2); | ||
755 | |||
756 | sctrl = 0x40 + ((hw->protocol == ISDN_P_TE_S0) ? 0x00 : 0x04); | ||
757 | sctrl_r = 0x0; | ||
758 | if (test_bit(FLG_ACTIVE, &hw->bch[0].Flags)) { | ||
759 | sctrl |= 1; | ||
760 | sctrl_r |= 1; | ||
761 | } | ||
762 | if (test_bit(FLG_ACTIVE, &hw->bch[1].Flags)) { | ||
763 | sctrl |= 2; | ||
764 | sctrl_r |= 2; | ||
765 | } | ||
766 | write_reg(hw, HFCUSB_SCTRL, sctrl); | ||
767 | write_reg(hw, HFCUSB_SCTRL_R, sctrl_r); | ||
768 | |||
769 | if (protocol > ISDN_P_NONE) | ||
770 | handle_led(hw, (bch->nr == 1) ? LED_B1_ON : LED_B2_ON); | ||
771 | else | ||
772 | handle_led(hw, (bch->nr == 1) ? LED_B1_OFF : | ||
773 | LED_B2_OFF); | ||
774 | } | ||
775 | hfcsusb_ph_info(hw); | ||
776 | return 0; | ||
777 | } | ||
778 | |||
779 | static void | ||
780 | hfcsusb_ph_command(struct hfcsusb *hw, u_char command) | ||
781 | { | ||
782 | if (debug & DEBUG_HW) | ||
783 | printk(KERN_DEBUG "%s: %s: %x\n", | ||
784 | hw->name, __func__, command); | ||
785 | |||
786 | switch (command) { | ||
787 | case HFC_L1_ACTIVATE_TE: | ||
788 | /* force sending sending INFO1 */ | ||
789 | write_reg(hw, HFCUSB_STATES, 0x14); | ||
790 | /* start l1 activation */ | ||
791 | write_reg(hw, HFCUSB_STATES, 0x04); | ||
792 | break; | ||
793 | |||
794 | case HFC_L1_FORCE_DEACTIVATE_TE: | ||
795 | write_reg(hw, HFCUSB_STATES, 0x10); | ||
796 | write_reg(hw, HFCUSB_STATES, 0x03); | ||
797 | break; | ||
798 | |||
799 | case HFC_L1_ACTIVATE_NT: | ||
800 | if (hw->dch.state == 3) | ||
801 | _queue_data(&hw->dch.dev.D, PH_ACTIVATE_IND, | ||
802 | MISDN_ID_ANY, 0, NULL, GFP_ATOMIC); | ||
803 | else | ||
804 | write_reg(hw, HFCUSB_STATES, HFCUSB_ACTIVATE | | ||
805 | HFCUSB_DO_ACTION | HFCUSB_NT_G2_G3); | ||
806 | break; | ||
807 | |||
808 | case HFC_L1_DEACTIVATE_NT: | ||
809 | write_reg(hw, HFCUSB_STATES, | ||
810 | HFCUSB_DO_ACTION); | ||
811 | break; | ||
812 | } | ||
813 | } | ||
814 | |||
815 | /* | ||
816 | * Layer 1 B-channel hardware access | ||
817 | */ | ||
818 | static int | ||
819 | channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) | ||
820 | { | ||
821 | int ret = 0; | ||
822 | |||
823 | switch (cq->op) { | ||
824 | case MISDN_CTRL_GETOP: | ||
825 | cq->op = MISDN_CTRL_FILL_EMPTY; | ||
826 | break; | ||
827 | case MISDN_CTRL_FILL_EMPTY: /* fill fifo, if empty */ | ||
828 | test_and_set_bit(FLG_FILLEMPTY, &bch->Flags); | ||
829 | if (debug & DEBUG_HW_OPEN) | ||
830 | printk(KERN_DEBUG "%s: FILL_EMPTY request (nr=%d " | ||
831 | "off=%d)\n", __func__, bch->nr, !!cq->p1); | ||
832 | break; | ||
833 | default: | ||
834 | printk(KERN_WARNING "%s: unknown Op %x\n", __func__, cq->op); | ||
835 | ret = -EINVAL; | ||
836 | break; | ||
837 | } | ||
838 | return ret; | ||
839 | } | ||
840 | |||
841 | /* collect data from incoming interrupt or isochron USB data */ | ||
842 | static void | ||
843 | hfcsusb_rx_frame(struct usb_fifo *fifo, __u8 *data, unsigned int len, | ||
844 | int finish) | ||
845 | { | ||
846 | struct hfcsusb *hw = fifo->hw; | ||
847 | struct sk_buff *rx_skb = NULL; | ||
848 | int maxlen = 0; | ||
849 | int fifon = fifo->fifonum; | ||
850 | int i; | ||
851 | int hdlc = 0; | ||
852 | |||
853 | if (debug & DBG_HFC_CALL_TRACE) | ||
854 | printk(KERN_DEBUG "%s: %s: fifo(%i) len(%i) " | ||
855 | "dch(%p) bch(%p) ech(%p)\n", | ||
856 | hw->name, __func__, fifon, len, | ||
857 | fifo->dch, fifo->bch, fifo->ech); | ||
858 | |||
859 | if (!len) | ||
860 | return; | ||
861 | |||
862 | if ((!!fifo->dch + !!fifo->bch + !!fifo->ech) != 1) { | ||
863 | printk(KERN_DEBUG "%s: %s: undefined channel\n", | ||
864 | hw->name, __func__); | ||
865 | return; | ||
866 | } | ||
867 | |||
868 | spin_lock(&hw->lock); | ||
869 | if (fifo->dch) { | ||
870 | rx_skb = fifo->dch->rx_skb; | ||
871 | maxlen = fifo->dch->maxlen; | ||
872 | hdlc = 1; | ||
873 | } | ||
874 | if (fifo->bch) { | ||
875 | rx_skb = fifo->bch->rx_skb; | ||
876 | maxlen = fifo->bch->maxlen; | ||
877 | hdlc = test_bit(FLG_HDLC, &fifo->bch->Flags); | ||
878 | } | ||
879 | if (fifo->ech) { | ||
880 | rx_skb = fifo->ech->rx_skb; | ||
881 | maxlen = fifo->ech->maxlen; | ||
882 | hdlc = 1; | ||
883 | } | ||
884 | |||
885 | if (!rx_skb) { | ||
886 | rx_skb = mI_alloc_skb(maxlen, GFP_ATOMIC); | ||
887 | if (rx_skb) { | ||
888 | if (fifo->dch) | ||
889 | fifo->dch->rx_skb = rx_skb; | ||
890 | if (fifo->bch) | ||
891 | fifo->bch->rx_skb = rx_skb; | ||
892 | if (fifo->ech) | ||
893 | fifo->ech->rx_skb = rx_skb; | ||
894 | skb_trim(rx_skb, 0); | ||
895 | } else { | ||
896 | printk(KERN_DEBUG "%s: %s: No mem for rx_skb\n", | ||
897 | hw->name, __func__); | ||
898 | spin_unlock(&hw->lock); | ||
899 | return; | ||
900 | } | ||
901 | } | ||
902 | |||
903 | if (fifo->dch || fifo->ech) { | ||
904 | /* D/E-Channel SKB range check */ | ||
905 | if ((rx_skb->len + len) >= MAX_DFRAME_LEN_L1) { | ||
906 | printk(KERN_DEBUG "%s: %s: sbk mem exceeded " | ||
907 | "for fifo(%d) HFCUSB_D_RX\n", | ||
908 | hw->name, __func__, fifon); | ||
909 | skb_trim(rx_skb, 0); | ||
910 | spin_unlock(&hw->lock); | ||
911 | return; | ||
912 | } | ||
913 | } else if (fifo->bch) { | ||
914 | /* B-Channel SKB range check */ | ||
915 | if ((rx_skb->len + len) >= (MAX_BCH_SIZE + 3)) { | ||
916 | printk(KERN_DEBUG "%s: %s: sbk mem exceeded " | ||
917 | "for fifo(%d) HFCUSB_B_RX\n", | ||
918 | hw->name, __func__, fifon); | ||
919 | skb_trim(rx_skb, 0); | ||
920 | spin_unlock(&hw->lock); | ||
921 | return; | ||
922 | } | ||
923 | } | ||
924 | |||
925 | memcpy(skb_put(rx_skb, len), data, len); | ||
926 | |||
927 | if (hdlc) { | ||
928 | /* we have a complete hdlc packet */ | ||
929 | if (finish) { | ||
930 | if ((rx_skb->len > 3) && | ||
931 | (!(rx_skb->data[rx_skb->len - 1]))) { | ||
932 | if (debug & DBG_HFC_FIFO_VERBOSE) { | ||
933 | printk(KERN_DEBUG "%s: %s: fifon(%i)" | ||
934 | " new RX len(%i): ", | ||
935 | hw->name, __func__, fifon, | ||
936 | rx_skb->len); | ||
937 | i = 0; | ||
938 | while (i < rx_skb->len) | ||
939 | printk("%02x ", | ||
940 | rx_skb->data[i++]); | ||
941 | printk("\n"); | ||
942 | } | ||
943 | |||
944 | /* remove CRC & status */ | ||
945 | skb_trim(rx_skb, rx_skb->len - 3); | ||
946 | |||
947 | if (fifo->dch) | ||
948 | recv_Dchannel(fifo->dch); | ||
949 | if (fifo->bch) | ||
950 | recv_Bchannel(fifo->bch); | ||
951 | if (fifo->ech) | ||
952 | recv_Echannel(fifo->ech, | ||
953 | &hw->dch); | ||
954 | } else { | ||
955 | if (debug & DBG_HFC_FIFO_VERBOSE) { | ||
956 | printk(KERN_DEBUG | ||
957 | "%s: CRC or minlen ERROR fifon(%i) " | ||
958 | "RX len(%i): ", | ||
959 | hw->name, fifon, rx_skb->len); | ||
960 | i = 0; | ||
961 | while (i < rx_skb->len) | ||
962 | printk("%02x ", | ||
963 | rx_skb->data[i++]); | ||
964 | printk("\n"); | ||
965 | } | ||
966 | skb_trim(rx_skb, 0); | ||
967 | } | ||
968 | } | ||
969 | } else { | ||
970 | /* deliver transparent data to layer2 */ | ||
971 | if (rx_skb->len >= poll) | ||
972 | recv_Bchannel(fifo->bch); | ||
973 | } | ||
974 | spin_unlock(&hw->lock); | ||
975 | } | ||
976 | |||
977 | void | ||
978 | fill_isoc_urb(struct urb *urb, struct usb_device *dev, unsigned int pipe, | ||
979 | void *buf, int num_packets, int packet_size, int interval, | ||
980 | usb_complete_t complete, void *context) | ||
981 | { | ||
982 | int k; | ||
983 | |||
984 | usb_fill_bulk_urb(urb, dev, pipe, buf, packet_size * num_packets, | ||
985 | complete, context); | ||
986 | |||
987 | urb->number_of_packets = num_packets; | ||
988 | urb->transfer_flags = URB_ISO_ASAP; | ||
989 | urb->actual_length = 0; | ||
990 | urb->interval = interval; | ||
991 | |||
992 | for (k = 0; k < num_packets; k++) { | ||
993 | urb->iso_frame_desc[k].offset = packet_size * k; | ||
994 | urb->iso_frame_desc[k].length = packet_size; | ||
995 | urb->iso_frame_desc[k].actual_length = 0; | ||
996 | } | ||
997 | } | ||
998 | |||
999 | /* receive completion routine for all ISO tx fifos */ | ||
1000 | static void | ||
1001 | rx_iso_complete(struct urb *urb) | ||
1002 | { | ||
1003 | struct iso_urb *context_iso_urb = (struct iso_urb *) urb->context; | ||
1004 | struct usb_fifo *fifo = context_iso_urb->owner_fifo; | ||
1005 | struct hfcsusb *hw = fifo->hw; | ||
1006 | int k, len, errcode, offset, num_isoc_packets, fifon, maxlen, | ||
1007 | status, iso_status, i; | ||
1008 | __u8 *buf; | ||
1009 | static __u8 eof[8]; | ||
1010 | __u8 s0_state; | ||
1011 | |||
1012 | fifon = fifo->fifonum; | ||
1013 | status = urb->status; | ||
1014 | |||
1015 | spin_lock(&hw->lock); | ||
1016 | if (fifo->stop_gracefull) { | ||
1017 | fifo->stop_gracefull = 0; | ||
1018 | fifo->active = 0; | ||
1019 | spin_unlock(&hw->lock); | ||
1020 | return; | ||
1021 | } | ||
1022 | spin_unlock(&hw->lock); | ||
1023 | |||
1024 | /* | ||
1025 | * ISO transfer only partially completed, | ||
1026 | * look at individual frame status for details | ||
1027 | */ | ||
1028 | if (status == -EXDEV) { | ||
1029 | if (debug & DEBUG_HW) | ||
1030 | printk(KERN_DEBUG "%s: %s: with -EXDEV " | ||
1031 | "urb->status %d, fifonum %d\n", | ||
1032 | hw->name, __func__, status, fifon); | ||
1033 | |||
1034 | /* clear status, so go on with ISO transfers */ | ||
1035 | status = 0; | ||
1036 | } | ||
1037 | |||
1038 | s0_state = 0; | ||
1039 | if (fifo->active && !status) { | ||
1040 | num_isoc_packets = iso_packets[fifon]; | ||
1041 | maxlen = fifo->usb_packet_maxlen; | ||
1042 | |||
1043 | for (k = 0; k < num_isoc_packets; ++k) { | ||
1044 | len = urb->iso_frame_desc[k].actual_length; | ||
1045 | offset = urb->iso_frame_desc[k].offset; | ||
1046 | buf = context_iso_urb->buffer + offset; | ||
1047 | iso_status = urb->iso_frame_desc[k].status; | ||
1048 | |||
1049 | if (iso_status && (debug & DBG_HFC_FIFO_VERBOSE)) { | ||
1050 | printk(KERN_DEBUG "%s: %s: " | ||
1051 | "ISO packet %i, status: %i\n", | ||
1052 | hw->name, __func__, k, iso_status); | ||
1053 | } | ||
1054 | |||
1055 | /* USB data log for every D ISO in */ | ||
1056 | if ((fifon == HFCUSB_D_RX) && | ||
1057 | (debug & DBG_HFC_USB_VERBOSE)) { | ||
1058 | printk(KERN_DEBUG | ||
1059 | "%s: %s: %d (%d/%d) len(%d) ", | ||
1060 | hw->name, __func__, urb->start_frame, | ||
1061 | k, num_isoc_packets-1, | ||
1062 | len); | ||
1063 | for (i = 0; i < len; i++) | ||
1064 | printk("%x ", buf[i]); | ||
1065 | printk("\n"); | ||
1066 | } | ||
1067 | |||
1068 | if (!iso_status) { | ||
1069 | if (fifo->last_urblen != maxlen) { | ||
1070 | /* | ||
1071 | * save fifo fill-level threshold bits | ||
1072 | * to use them later in TX ISO URB | ||
1073 | * completions | ||
1074 | */ | ||
1075 | hw->threshold_mask = buf[1]; | ||
1076 | |||
1077 | if (fifon == HFCUSB_D_RX) | ||
1078 | s0_state = (buf[0] >> 4); | ||
1079 | |||
1080 | eof[fifon] = buf[0] & 1; | ||
1081 | if (len > 2) | ||
1082 | hfcsusb_rx_frame(fifo, buf + 2, | ||
1083 | len - 2, (len < maxlen) | ||
1084 | ? eof[fifon] : 0); | ||
1085 | } else | ||
1086 | hfcsusb_rx_frame(fifo, buf, len, | ||
1087 | (len < maxlen) ? | ||
1088 | eof[fifon] : 0); | ||
1089 | fifo->last_urblen = len; | ||
1090 | } | ||
1091 | } | ||
1092 | |||
1093 | /* signal S0 layer1 state change */ | ||
1094 | if ((s0_state) && (hw->initdone) && | ||
1095 | (s0_state != hw->dch.state)) { | ||
1096 | hw->dch.state = s0_state; | ||
1097 | schedule_event(&hw->dch, FLG_PHCHANGE); | ||
1098 | } | ||
1099 | |||
1100 | fill_isoc_urb(urb, fifo->hw->dev, fifo->pipe, | ||
1101 | context_iso_urb->buffer, num_isoc_packets, | ||
1102 | fifo->usb_packet_maxlen, fifo->intervall, | ||
1103 | (usb_complete_t)rx_iso_complete, urb->context); | ||
1104 | errcode = usb_submit_urb(urb, GFP_ATOMIC); | ||
1105 | if (errcode < 0) { | ||
1106 | if (debug & DEBUG_HW) | ||
1107 | printk(KERN_DEBUG "%s: %s: error submitting " | ||
1108 | "ISO URB: %d\n", | ||
1109 | hw->name, __func__, errcode); | ||
1110 | } | ||
1111 | } else { | ||
1112 | if (status && (debug & DBG_HFC_URB_INFO)) | ||
1113 | printk(KERN_DEBUG "%s: %s: rx_iso_complete : " | ||
1114 | "urb->status %d, fifonum %d\n", | ||
1115 | hw->name, __func__, status, fifon); | ||
1116 | } | ||
1117 | } | ||
1118 | |||
1119 | /* receive completion routine for all interrupt rx fifos */ | ||
1120 | static void | ||
1121 | rx_int_complete(struct urb *urb) | ||
1122 | { | ||
1123 | int len, status, i; | ||
1124 | __u8 *buf, maxlen, fifon; | ||
1125 | struct usb_fifo *fifo = (struct usb_fifo *) urb->context; | ||
1126 | struct hfcsusb *hw = fifo->hw; | ||
1127 | static __u8 eof[8]; | ||
1128 | |||
1129 | spin_lock(&hw->lock); | ||
1130 | if (fifo->stop_gracefull) { | ||
1131 | fifo->stop_gracefull = 0; | ||
1132 | fifo->active = 0; | ||
1133 | spin_unlock(&hw->lock); | ||
1134 | return; | ||
1135 | } | ||
1136 | spin_unlock(&hw->lock); | ||
1137 | |||
1138 | fifon = fifo->fifonum; | ||
1139 | if ((!fifo->active) || (urb->status)) { | ||
1140 | if (debug & DBG_HFC_URB_ERROR) | ||
1141 | printk(KERN_DEBUG | ||
1142 | "%s: %s: RX-Fifo %i is going down (%i)\n", | ||
1143 | hw->name, __func__, fifon, urb->status); | ||
1144 | |||
1145 | fifo->urb->interval = 0; /* cancel automatic rescheduling */ | ||
1146 | return; | ||
1147 | } | ||
1148 | len = urb->actual_length; | ||
1149 | buf = fifo->buffer; | ||
1150 | maxlen = fifo->usb_packet_maxlen; | ||
1151 | |||
1152 | /* USB data log for every D INT in */ | ||
1153 | if ((fifon == HFCUSB_D_RX) && (debug & DBG_HFC_USB_VERBOSE)) { | ||
1154 | printk(KERN_DEBUG "%s: %s: D RX INT len(%d) ", | ||
1155 | hw->name, __func__, len); | ||
1156 | for (i = 0; i < len; i++) | ||
1157 | printk("%02x ", buf[i]); | ||
1158 | printk("\n"); | ||
1159 | } | ||
1160 | |||
1161 | if (fifo->last_urblen != fifo->usb_packet_maxlen) { | ||
1162 | /* the threshold mask is in the 2nd status byte */ | ||
1163 | hw->threshold_mask = buf[1]; | ||
1164 | |||
1165 | /* signal S0 layer1 state change */ | ||
1166 | if (hw->initdone && ((buf[0] >> 4) != hw->dch.state)) { | ||
1167 | hw->dch.state = (buf[0] >> 4); | ||
1168 | schedule_event(&hw->dch, FLG_PHCHANGE); | ||
1169 | } | ||
1170 | |||
1171 | eof[fifon] = buf[0] & 1; | ||
1172 | /* if we have more than the 2 status bytes -> collect data */ | ||
1173 | if (len > 2) | ||
1174 | hfcsusb_rx_frame(fifo, buf + 2, | ||
1175 | urb->actual_length - 2, | ||
1176 | (len < maxlen) ? eof[fifon] : 0); | ||
1177 | } else { | ||
1178 | hfcsusb_rx_frame(fifo, buf, urb->actual_length, | ||
1179 | (len < maxlen) ? eof[fifon] : 0); | ||
1180 | } | ||
1181 | fifo->last_urblen = urb->actual_length; | ||
1182 | |||
1183 | status = usb_submit_urb(urb, GFP_ATOMIC); | ||
1184 | if (status) { | ||
1185 | if (debug & DEBUG_HW) | ||
1186 | printk(KERN_DEBUG "%s: %s: error resubmitting USB\n", | ||
1187 | hw->name, __func__); | ||
1188 | } | ||
1189 | } | ||
1190 | |||
1191 | /* transmit completion routine for all ISO tx fifos */ | ||
1192 | static void | ||
1193 | tx_iso_complete(struct urb *urb) | ||
1194 | { | ||
1195 | struct iso_urb *context_iso_urb = (struct iso_urb *) urb->context; | ||
1196 | struct usb_fifo *fifo = context_iso_urb->owner_fifo; | ||
1197 | struct hfcsusb *hw = fifo->hw; | ||
1198 | struct sk_buff *tx_skb; | ||
1199 | int k, tx_offset, num_isoc_packets, sink, remain, current_len, | ||
1200 | errcode, hdlc, i; | ||
1201 | int *tx_idx; | ||
1202 | int frame_complete, fifon, status; | ||
1203 | __u8 threshbit; | ||
1204 | |||
1205 | spin_lock(&hw->lock); | ||
1206 | if (fifo->stop_gracefull) { | ||
1207 | fifo->stop_gracefull = 0; | ||
1208 | fifo->active = 0; | ||
1209 | spin_unlock(&hw->lock); | ||
1210 | return; | ||
1211 | } | ||
1212 | |||
1213 | if (fifo->dch) { | ||
1214 | tx_skb = fifo->dch->tx_skb; | ||
1215 | tx_idx = &fifo->dch->tx_idx; | ||
1216 | hdlc = 1; | ||
1217 | } else if (fifo->bch) { | ||
1218 | tx_skb = fifo->bch->tx_skb; | ||
1219 | tx_idx = &fifo->bch->tx_idx; | ||
1220 | hdlc = test_bit(FLG_HDLC, &fifo->bch->Flags); | ||
1221 | } else { | ||
1222 | printk(KERN_DEBUG "%s: %s: neither BCH nor DCH\n", | ||
1223 | hw->name, __func__); | ||
1224 | spin_unlock(&hw->lock); | ||
1225 | return; | ||
1226 | } | ||
1227 | |||
1228 | fifon = fifo->fifonum; | ||
1229 | status = urb->status; | ||
1230 | |||
1231 | tx_offset = 0; | ||
1232 | |||
1233 | /* | ||
1234 | * ISO transfer only partially completed, | ||
1235 | * look at individual frame status for details | ||
1236 | */ | ||
1237 | if (status == -EXDEV) { | ||
1238 | if (debug & DBG_HFC_URB_ERROR) | ||
1239 | printk(KERN_DEBUG "%s: %s: " | ||
1240 | "-EXDEV (%i) fifon (%d)\n", | ||
1241 | hw->name, __func__, status, fifon); | ||
1242 | |||
1243 | /* clear status, so go on with ISO transfers */ | ||
1244 | status = 0; | ||
1245 | } | ||
1246 | |||
1247 | if (fifo->active && !status) { | ||
1248 | /* is FifoFull-threshold set for our channel? */ | ||
1249 | threshbit = (hw->threshold_mask & (1 << fifon)); | ||
1250 | num_isoc_packets = iso_packets[fifon]; | ||
1251 | |||
1252 | /* predict dataflow to avoid fifo overflow */ | ||
1253 | if (fifon >= HFCUSB_D_TX) | ||
1254 | sink = (threshbit) ? SINK_DMIN : SINK_DMAX; | ||
1255 | else | ||
1256 | sink = (threshbit) ? SINK_MIN : SINK_MAX; | ||
1257 | fill_isoc_urb(urb, fifo->hw->dev, fifo->pipe, | ||
1258 | context_iso_urb->buffer, num_isoc_packets, | ||
1259 | fifo->usb_packet_maxlen, fifo->intervall, | ||
1260 | (usb_complete_t)tx_iso_complete, urb->context); | ||
1261 | memset(context_iso_urb->buffer, 0, | ||
1262 | sizeof(context_iso_urb->buffer)); | ||
1263 | frame_complete = 0; | ||
1264 | |||
1265 | for (k = 0; k < num_isoc_packets; ++k) { | ||
1266 | /* analyze tx success of previous ISO packets */ | ||
1267 | if (debug & DBG_HFC_URB_ERROR) { | ||
1268 | errcode = urb->iso_frame_desc[k].status; | ||
1269 | if (errcode) { | ||
1270 | printk(KERN_DEBUG "%s: %s: " | ||
1271 | "ISO packet %i, status: %i\n", | ||
1272 | hw->name, __func__, k, errcode); | ||
1273 | } | ||
1274 | } | ||
1275 | |||
1276 | /* Generate next ISO Packets */ | ||
1277 | if (tx_skb) | ||
1278 | remain = tx_skb->len - *tx_idx; | ||
1279 | else | ||
1280 | remain = 0; | ||
1281 | |||
1282 | if (remain > 0) { | ||
1283 | fifo->bit_line -= sink; | ||
1284 | current_len = (0 - fifo->bit_line) / 8; | ||
1285 | if (current_len > 14) | ||
1286 | current_len = 14; | ||
1287 | if (current_len < 0) | ||
1288 | current_len = 0; | ||
1289 | if (remain < current_len) | ||
1290 | current_len = remain; | ||
1291 | |||
1292 | /* how much bit do we put on the line? */ | ||
1293 | fifo->bit_line += current_len * 8; | ||
1294 | |||
1295 | context_iso_urb->buffer[tx_offset] = 0; | ||
1296 | if (current_len == remain) { | ||
1297 | if (hdlc) { | ||
1298 | /* signal frame completion */ | ||
1299 | context_iso_urb-> | ||
1300 | buffer[tx_offset] = 1; | ||
1301 | /* add 2 byte flags and 16bit | ||
1302 | * CRC at end of ISDN frame */ | ||
1303 | fifo->bit_line += 32; | ||
1304 | } | ||
1305 | frame_complete = 1; | ||
1306 | } | ||
1307 | |||
1308 | /* copy tx data to iso-urb buffer */ | ||
1309 | memcpy(context_iso_urb->buffer + tx_offset + 1, | ||
1310 | (tx_skb->data + *tx_idx), current_len); | ||
1311 | *tx_idx += current_len; | ||
1312 | |||
1313 | urb->iso_frame_desc[k].offset = tx_offset; | ||
1314 | urb->iso_frame_desc[k].length = current_len + 1; | ||
1315 | |||
1316 | /* USB data log for every D ISO out */ | ||
1317 | if ((fifon == HFCUSB_D_RX) && | ||
1318 | (debug & DBG_HFC_USB_VERBOSE)) { | ||
1319 | printk(KERN_DEBUG | ||
1320 | "%s: %s (%d/%d) offs(%d) len(%d) ", | ||
1321 | hw->name, __func__, | ||
1322 | k, num_isoc_packets-1, | ||
1323 | urb->iso_frame_desc[k].offset, | ||
1324 | urb->iso_frame_desc[k].length); | ||
1325 | |||
1326 | for (i = urb->iso_frame_desc[k].offset; | ||
1327 | i < (urb->iso_frame_desc[k].offset | ||
1328 | + urb->iso_frame_desc[k].length); | ||
1329 | i++) | ||
1330 | printk("%x ", | ||
1331 | context_iso_urb->buffer[i]); | ||
1332 | |||
1333 | printk(" skb->len(%i) tx-idx(%d)\n", | ||
1334 | tx_skb->len, *tx_idx); | ||
1335 | } | ||
1336 | |||
1337 | tx_offset += (current_len + 1); | ||
1338 | } else { | ||
1339 | urb->iso_frame_desc[k].offset = tx_offset++; | ||
1340 | urb->iso_frame_desc[k].length = 1; | ||
1341 | /* we lower data margin every msec */ | ||
1342 | fifo->bit_line -= sink; | ||
1343 | if (fifo->bit_line < BITLINE_INF) | ||
1344 | fifo->bit_line = BITLINE_INF; | ||
1345 | } | ||
1346 | |||
1347 | if (frame_complete) { | ||
1348 | frame_complete = 0; | ||
1349 | |||
1350 | if (debug & DBG_HFC_FIFO_VERBOSE) { | ||
1351 | printk(KERN_DEBUG "%s: %s: " | ||
1352 | "fifon(%i) new TX len(%i): ", | ||
1353 | hw->name, __func__, | ||
1354 | fifon, tx_skb->len); | ||
1355 | i = 0; | ||
1356 | while (i < tx_skb->len) | ||
1357 | printk("%02x ", | ||
1358 | tx_skb->data[i++]); | ||
1359 | printk("\n"); | ||
1360 | } | ||
1361 | |||
1362 | dev_kfree_skb(tx_skb); | ||
1363 | tx_skb = NULL; | ||
1364 | if (fifo->dch && get_next_dframe(fifo->dch)) | ||
1365 | tx_skb = fifo->dch->tx_skb; | ||
1366 | else if (fifo->bch && | ||
1367 | get_next_bframe(fifo->bch)) { | ||
1368 | if (test_bit(FLG_TRANSPARENT, | ||
1369 | &fifo->bch->Flags)) | ||
1370 | confirm_Bsend(fifo->bch); | ||
1371 | tx_skb = fifo->bch->tx_skb; | ||
1372 | } | ||
1373 | } | ||
1374 | } | ||
1375 | errcode = usb_submit_urb(urb, GFP_ATOMIC); | ||
1376 | if (errcode < 0) { | ||
1377 | if (debug & DEBUG_HW) | ||
1378 | printk(KERN_DEBUG | ||
1379 | "%s: %s: error submitting ISO URB: %d \n", | ||
1380 | hw->name, __func__, errcode); | ||
1381 | } | ||
1382 | |||
1383 | /* | ||
1384 | * abuse DChannel tx iso completion to trigger NT mode state | ||
1385 | * changes tx_iso_complete is assumed to be called every | ||
1386 | * fifo->intervall (ms) | ||
1387 | */ | ||
1388 | if ((fifon == HFCUSB_D_TX) && (hw->protocol == ISDN_P_NT_S0) | ||
1389 | && (hw->timers & NT_ACTIVATION_TIMER)) { | ||
1390 | if ((--hw->nt_timer) < 0) | ||
1391 | schedule_event(&hw->dch, FLG_PHCHANGE); | ||
1392 | } | ||
1393 | |||
1394 | } else { | ||
1395 | if (status && (debug & DBG_HFC_URB_ERROR)) | ||
1396 | printk(KERN_DEBUG "%s: %s: urb->status %s (%i)" | ||
1397 | "fifonum=%d\n", | ||
1398 | hw->name, __func__, | ||
1399 | symbolic(urb_errlist, status), status, fifon); | ||
1400 | } | ||
1401 | spin_unlock(&hw->lock); | ||
1402 | } | ||
1403 | |||
1404 | /* | ||
1405 | * allocs urbs and start isoc transfer with two pending urbs to avoid | ||
1406 | * gaps in the transfer chain | ||
1407 | */ | ||
1408 | static int | ||
1409 | start_isoc_chain(struct usb_fifo *fifo, int num_packets_per_urb, | ||
1410 | usb_complete_t complete, int packet_size) | ||
1411 | { | ||
1412 | struct hfcsusb *hw = fifo->hw; | ||
1413 | int i, k, errcode; | ||
1414 | |||
1415 | if (debug) | ||
1416 | printk(KERN_DEBUG "%s: %s: fifo %i\n", | ||
1417 | hw->name, __func__, fifo->fifonum); | ||
1418 | |||
1419 | /* allocate Memory for Iso out Urbs */ | ||
1420 | for (i = 0; i < 2; i++) { | ||
1421 | if (!(fifo->iso[i].urb)) { | ||
1422 | fifo->iso[i].urb = | ||
1423 | usb_alloc_urb(num_packets_per_urb, GFP_KERNEL); | ||
1424 | if (!(fifo->iso[i].urb)) { | ||
1425 | printk(KERN_DEBUG | ||
1426 | "%s: %s: alloc urb for fifo %i failed", | ||
1427 | hw->name, __func__, fifo->fifonum); | ||
1428 | } | ||
1429 | fifo->iso[i].owner_fifo = (struct usb_fifo *) fifo; | ||
1430 | fifo->iso[i].indx = i; | ||
1431 | |||
1432 | /* Init the first iso */ | ||
1433 | if (ISO_BUFFER_SIZE >= | ||
1434 | (fifo->usb_packet_maxlen * | ||
1435 | num_packets_per_urb)) { | ||
1436 | fill_isoc_urb(fifo->iso[i].urb, | ||
1437 | fifo->hw->dev, fifo->pipe, | ||
1438 | fifo->iso[i].buffer, | ||
1439 | num_packets_per_urb, | ||
1440 | fifo->usb_packet_maxlen, | ||
1441 | fifo->intervall, complete, | ||
1442 | &fifo->iso[i]); | ||
1443 | memset(fifo->iso[i].buffer, 0, | ||
1444 | sizeof(fifo->iso[i].buffer)); | ||
1445 | |||
1446 | for (k = 0; k < num_packets_per_urb; k++) { | ||
1447 | fifo->iso[i].urb-> | ||
1448 | iso_frame_desc[k].offset = | ||
1449 | k * packet_size; | ||
1450 | fifo->iso[i].urb-> | ||
1451 | iso_frame_desc[k].length = | ||
1452 | packet_size; | ||
1453 | } | ||
1454 | } else { | ||
1455 | printk(KERN_DEBUG | ||
1456 | "%s: %s: ISO Buffer size to small!\n", | ||
1457 | hw->name, __func__); | ||
1458 | } | ||
1459 | } | ||
1460 | fifo->bit_line = BITLINE_INF; | ||
1461 | |||
1462 | errcode = usb_submit_urb(fifo->iso[i].urb, GFP_KERNEL); | ||
1463 | fifo->active = (errcode >= 0) ? 1 : 0; | ||
1464 | fifo->stop_gracefull = 0; | ||
1465 | if (errcode < 0) { | ||
1466 | printk(KERN_DEBUG "%s: %s: %s URB nr:%d\n", | ||
1467 | hw->name, __func__, | ||
1468 | symbolic(urb_errlist, errcode), i); | ||
1469 | } | ||
1470 | } | ||
1471 | return fifo->active; | ||
1472 | } | ||
1473 | |||
1474 | static void | ||
1475 | stop_iso_gracefull(struct usb_fifo *fifo) | ||
1476 | { | ||
1477 | struct hfcsusb *hw = fifo->hw; | ||
1478 | int i, timeout; | ||
1479 | u_long flags; | ||
1480 | |||
1481 | for (i = 0; i < 2; i++) { | ||
1482 | spin_lock_irqsave(&hw->lock, flags); | ||
1483 | if (debug) | ||
1484 | printk(KERN_DEBUG "%s: %s for fifo %i.%i\n", | ||
1485 | hw->name, __func__, fifo->fifonum, i); | ||
1486 | fifo->stop_gracefull = 1; | ||
1487 | spin_unlock_irqrestore(&hw->lock, flags); | ||
1488 | } | ||
1489 | |||
1490 | for (i = 0; i < 2; i++) { | ||
1491 | timeout = 3; | ||
1492 | while (fifo->stop_gracefull && timeout--) | ||
1493 | schedule_timeout_interruptible((HZ/1000)*16); | ||
1494 | if (debug && fifo->stop_gracefull) | ||
1495 | printk(KERN_DEBUG "%s: ERROR %s for fifo %i.%i\n", | ||
1496 | hw->name, __func__, fifo->fifonum, i); | ||
1497 | } | ||
1498 | } | ||
1499 | |||
1500 | static void | ||
1501 | stop_int_gracefull(struct usb_fifo *fifo) | ||
1502 | { | ||
1503 | struct hfcsusb *hw = fifo->hw; | ||
1504 | int timeout; | ||
1505 | u_long flags; | ||
1506 | |||
1507 | spin_lock_irqsave(&hw->lock, flags); | ||
1508 | if (debug) | ||
1509 | printk(KERN_DEBUG "%s: %s for fifo %i\n", | ||
1510 | hw->name, __func__, fifo->fifonum); | ||
1511 | fifo->stop_gracefull = 1; | ||
1512 | spin_unlock_irqrestore(&hw->lock, flags); | ||
1513 | |||
1514 | timeout = 3; | ||
1515 | while (fifo->stop_gracefull && timeout--) | ||
1516 | schedule_timeout_interruptible((HZ/1000)*3); | ||
1517 | if (debug && fifo->stop_gracefull) | ||
1518 | printk(KERN_DEBUG "%s: ERROR %s for fifo %i\n", | ||
1519 | hw->name, __func__, fifo->fifonum); | ||
1520 | } | ||
1521 | |||
1522 | /* start the interrupt transfer for the given fifo */ | ||
1523 | static void | ||
1524 | start_int_fifo(struct usb_fifo *fifo) | ||
1525 | { | ||
1526 | struct hfcsusb *hw = fifo->hw; | ||
1527 | int errcode; | ||
1528 | |||
1529 | if (debug) | ||
1530 | printk(KERN_DEBUG "%s: %s: INT IN fifo:%d\n", | ||
1531 | hw->name, __func__, fifo->fifonum); | ||
1532 | |||
1533 | if (!fifo->urb) { | ||
1534 | fifo->urb = usb_alloc_urb(0, GFP_KERNEL); | ||
1535 | if (!fifo->urb) | ||
1536 | return; | ||
1537 | } | ||
1538 | usb_fill_int_urb(fifo->urb, fifo->hw->dev, fifo->pipe, | ||
1539 | fifo->buffer, fifo->usb_packet_maxlen, | ||
1540 | (usb_complete_t)rx_int_complete, fifo, fifo->intervall); | ||
1541 | fifo->active = 1; | ||
1542 | fifo->stop_gracefull = 0; | ||
1543 | errcode = usb_submit_urb(fifo->urb, GFP_KERNEL); | ||
1544 | if (errcode) { | ||
1545 | printk(KERN_DEBUG "%s: %s: submit URB: status:%i\n", | ||
1546 | hw->name, __func__, errcode); | ||
1547 | fifo->active = 0; | ||
1548 | } | ||
1549 | } | ||
1550 | |||
1551 | static void | ||
1552 | setPortMode(struct hfcsusb *hw) | ||
1553 | { | ||
1554 | if (debug & DEBUG_HW) | ||
1555 | printk(KERN_DEBUG "%s: %s %s\n", hw->name, __func__, | ||
1556 | (hw->protocol == ISDN_P_TE_S0) ? "TE" : "NT"); | ||
1557 | |||
1558 | if (hw->protocol == ISDN_P_TE_S0) { | ||
1559 | write_reg(hw, HFCUSB_SCTRL, 0x40); | ||
1560 | write_reg(hw, HFCUSB_SCTRL_E, 0x00); | ||
1561 | write_reg(hw, HFCUSB_CLKDEL, CLKDEL_TE); | ||
1562 | write_reg(hw, HFCUSB_STATES, 3 | 0x10); | ||
1563 | write_reg(hw, HFCUSB_STATES, 3); | ||
1564 | } else { | ||
1565 | write_reg(hw, HFCUSB_SCTRL, 0x44); | ||
1566 | write_reg(hw, HFCUSB_SCTRL_E, 0x09); | ||
1567 | write_reg(hw, HFCUSB_CLKDEL, CLKDEL_NT); | ||
1568 | write_reg(hw, HFCUSB_STATES, 1 | 0x10); | ||
1569 | write_reg(hw, HFCUSB_STATES, 1); | ||
1570 | } | ||
1571 | } | ||
1572 | |||
1573 | static void | ||
1574 | reset_hfcsusb(struct hfcsusb *hw) | ||
1575 | { | ||
1576 | struct usb_fifo *fifo; | ||
1577 | int i; | ||
1578 | |||
1579 | if (debug & DEBUG_HW) | ||
1580 | printk(KERN_DEBUG "%s: %s\n", hw->name, __func__); | ||
1581 | |||
1582 | /* do Chip reset */ | ||
1583 | write_reg(hw, HFCUSB_CIRM, 8); | ||
1584 | |||
1585 | /* aux = output, reset off */ | ||
1586 | write_reg(hw, HFCUSB_CIRM, 0x10); | ||
1587 | |||
1588 | /* set USB_SIZE to match the wMaxPacketSize for INT or BULK transfers */ | ||
1589 | write_reg(hw, HFCUSB_USB_SIZE, (hw->packet_size / 8) | | ||
1590 | ((hw->packet_size / 8) << 4)); | ||
1591 | |||
1592 | /* set USB_SIZE_I to match the the wMaxPacketSize for ISO transfers */ | ||
1593 | write_reg(hw, HFCUSB_USB_SIZE_I, hw->iso_packet_size); | ||
1594 | |||
1595 | /* enable PCM/GCI master mode */ | ||
1596 | write_reg(hw, HFCUSB_MST_MODE1, 0); /* set default values */ | ||
1597 | write_reg(hw, HFCUSB_MST_MODE0, 1); /* enable master mode */ | ||
1598 | |||
1599 | /* init the fifos */ | ||
1600 | write_reg(hw, HFCUSB_F_THRES, | ||
1601 | (HFCUSB_TX_THRESHOLD / 8) | ((HFCUSB_RX_THRESHOLD / 8) << 4)); | ||
1602 | |||
1603 | fifo = hw->fifos; | ||
1604 | for (i = 0; i < HFCUSB_NUM_FIFOS; i++) { | ||
1605 | write_reg(hw, HFCUSB_FIFO, i); /* select the desired fifo */ | ||
1606 | fifo[i].max_size = | ||
1607 | (i <= HFCUSB_B2_RX) ? MAX_BCH_SIZE : MAX_DFRAME_LEN; | ||
1608 | fifo[i].last_urblen = 0; | ||
1609 | |||
1610 | /* set 2 bit for D- & E-channel */ | ||
1611 | write_reg(hw, HFCUSB_HDLC_PAR, ((i <= HFCUSB_B2_RX) ? 0 : 2)); | ||
1612 | |||
1613 | /* enable all fifos */ | ||
1614 | if (i == HFCUSB_D_TX) | ||
1615 | write_reg(hw, HFCUSB_CON_HDLC, | ||
1616 | (hw->protocol == ISDN_P_NT_S0) ? 0x08 : 0x09); | ||
1617 | else | ||
1618 | write_reg(hw, HFCUSB_CON_HDLC, 0x08); | ||
1619 | write_reg(hw, HFCUSB_INC_RES_F, 2); /* reset the fifo */ | ||
1620 | } | ||
1621 | |||
1622 | write_reg(hw, HFCUSB_SCTRL_R, 0); /* disable both B receivers */ | ||
1623 | handle_led(hw, LED_POWER_ON); | ||
1624 | } | ||
1625 | |||
1626 | /* start USB data pipes dependand on device's endpoint configuration */ | ||
1627 | static void | ||
1628 | hfcsusb_start_endpoint(struct hfcsusb *hw, int channel) | ||
1629 | { | ||
1630 | /* quick check if endpoint already running */ | ||
1631 | if ((channel == HFC_CHAN_D) && (hw->fifos[HFCUSB_D_RX].active)) | ||
1632 | return; | ||
1633 | if ((channel == HFC_CHAN_B1) && (hw->fifos[HFCUSB_B1_RX].active)) | ||
1634 | return; | ||
1635 | if ((channel == HFC_CHAN_B2) && (hw->fifos[HFCUSB_B2_RX].active)) | ||
1636 | return; | ||
1637 | if ((channel == HFC_CHAN_E) && (hw->fifos[HFCUSB_PCM_RX].active)) | ||
1638 | return; | ||
1639 | |||
1640 | /* start rx endpoints using USB INT IN method */ | ||
1641 | if (hw->cfg_used == CNF_3INT3ISO || hw->cfg_used == CNF_4INT3ISO) | ||
1642 | start_int_fifo(hw->fifos + channel*2 + 1); | ||
1643 | |||
1644 | /* start rx endpoints using USB ISO IN method */ | ||
1645 | if (hw->cfg_used == CNF_3ISO3ISO || hw->cfg_used == CNF_4ISO3ISO) { | ||
1646 | switch (channel) { | ||
1647 | case HFC_CHAN_D: | ||
1648 | start_isoc_chain(hw->fifos + HFCUSB_D_RX, | ||
1649 | ISOC_PACKETS_D, | ||
1650 | (usb_complete_t)rx_iso_complete, | ||
1651 | 16); | ||
1652 | break; | ||
1653 | case HFC_CHAN_E: | ||
1654 | start_isoc_chain(hw->fifos + HFCUSB_PCM_RX, | ||
1655 | ISOC_PACKETS_D, | ||
1656 | (usb_complete_t)rx_iso_complete, | ||
1657 | 16); | ||
1658 | break; | ||
1659 | case HFC_CHAN_B1: | ||
1660 | start_isoc_chain(hw->fifos + HFCUSB_B1_RX, | ||
1661 | ISOC_PACKETS_B, | ||
1662 | (usb_complete_t)rx_iso_complete, | ||
1663 | 16); | ||
1664 | break; | ||
1665 | case HFC_CHAN_B2: | ||
1666 | start_isoc_chain(hw->fifos + HFCUSB_B2_RX, | ||
1667 | ISOC_PACKETS_B, | ||
1668 | (usb_complete_t)rx_iso_complete, | ||
1669 | 16); | ||
1670 | break; | ||
1671 | } | ||
1672 | } | ||
1673 | |||
1674 | /* start tx endpoints using USB ISO OUT method */ | ||
1675 | switch (channel) { | ||
1676 | case HFC_CHAN_D: | ||
1677 | start_isoc_chain(hw->fifos + HFCUSB_D_TX, | ||
1678 | ISOC_PACKETS_B, | ||
1679 | (usb_complete_t)tx_iso_complete, 1); | ||
1680 | break; | ||
1681 | case HFC_CHAN_B1: | ||
1682 | start_isoc_chain(hw->fifos + HFCUSB_B1_TX, | ||
1683 | ISOC_PACKETS_D, | ||
1684 | (usb_complete_t)tx_iso_complete, 1); | ||
1685 | break; | ||
1686 | case HFC_CHAN_B2: | ||
1687 | start_isoc_chain(hw->fifos + HFCUSB_B2_TX, | ||
1688 | ISOC_PACKETS_B, | ||
1689 | (usb_complete_t)tx_iso_complete, 1); | ||
1690 | break; | ||
1691 | } | ||
1692 | } | ||
1693 | |||
1694 | /* stop USB data pipes dependand on device's endpoint configuration */ | ||
1695 | static void | ||
1696 | hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel) | ||
1697 | { | ||
1698 | /* quick check if endpoint currently running */ | ||
1699 | if ((channel == HFC_CHAN_D) && (!hw->fifos[HFCUSB_D_RX].active)) | ||
1700 | return; | ||
1701 | if ((channel == HFC_CHAN_B1) && (!hw->fifos[HFCUSB_B1_RX].active)) | ||
1702 | return; | ||
1703 | if ((channel == HFC_CHAN_B2) && (!hw->fifos[HFCUSB_B2_RX].active)) | ||
1704 | return; | ||
1705 | if ((channel == HFC_CHAN_E) && (!hw->fifos[HFCUSB_PCM_RX].active)) | ||
1706 | return; | ||
1707 | |||
1708 | /* rx endpoints using USB INT IN method */ | ||
1709 | if (hw->cfg_used == CNF_3INT3ISO || hw->cfg_used == CNF_4INT3ISO) | ||
1710 | stop_int_gracefull(hw->fifos + channel*2 + 1); | ||
1711 | |||
1712 | /* rx endpoints using USB ISO IN method */ | ||
1713 | if (hw->cfg_used == CNF_3ISO3ISO || hw->cfg_used == CNF_4ISO3ISO) | ||
1714 | stop_iso_gracefull(hw->fifos + channel*2 + 1); | ||
1715 | |||
1716 | /* tx endpoints using USB ISO OUT method */ | ||
1717 | if (channel != HFC_CHAN_E) | ||
1718 | stop_iso_gracefull(hw->fifos + channel*2); | ||
1719 | } | ||
1720 | |||
1721 | |||
1722 | /* Hardware Initialization */ | ||
1723 | int | ||
1724 | setup_hfcsusb(struct hfcsusb *hw) | ||
1725 | { | ||
1726 | int err; | ||
1727 | u_char b; | ||
1728 | |||
1729 | if (debug & DBG_HFC_CALL_TRACE) | ||
1730 | printk(KERN_DEBUG "%s: %s\n", hw->name, __func__); | ||
1731 | |||
1732 | /* check the chip id */ | ||
1733 | if (read_reg_atomic(hw, HFCUSB_CHIP_ID, &b) != 1) { | ||
1734 | printk(KERN_DEBUG "%s: %s: cannot read chip id\n", | ||
1735 | hw->name, __func__); | ||
1736 | return 1; | ||
1737 | } | ||
1738 | if (b != HFCUSB_CHIPID) { | ||
1739 | printk(KERN_DEBUG "%s: %s: Invalid chip id 0x%02x\n", | ||
1740 | hw->name, __func__, b); | ||
1741 | return 1; | ||
1742 | } | ||
1743 | |||
1744 | /* first set the needed config, interface and alternate */ | ||
1745 | err = usb_set_interface(hw->dev, hw->if_used, hw->alt_used); | ||
1746 | |||
1747 | hw->led_state = 0; | ||
1748 | |||
1749 | /* init the background machinery for control requests */ | ||
1750 | hw->ctrl_read.bRequestType = 0xc0; | ||
1751 | hw->ctrl_read.bRequest = 1; | ||
1752 | hw->ctrl_read.wLength = cpu_to_le16(1); | ||
1753 | hw->ctrl_write.bRequestType = 0x40; | ||
1754 | hw->ctrl_write.bRequest = 0; | ||
1755 | hw->ctrl_write.wLength = 0; | ||
1756 | usb_fill_control_urb(hw->ctrl_urb, hw->dev, hw->ctrl_out_pipe, | ||
1757 | (u_char *)&hw->ctrl_write, NULL, 0, | ||
1758 | (usb_complete_t)ctrl_complete, hw); | ||
1759 | |||
1760 | reset_hfcsusb(hw); | ||
1761 | return 0; | ||
1762 | } | ||
1763 | |||
1764 | static void | ||
1765 | release_hw(struct hfcsusb *hw) | ||
1766 | { | ||
1767 | if (debug & DBG_HFC_CALL_TRACE) | ||
1768 | printk(KERN_DEBUG "%s: %s\n", hw->name, __func__); | ||
1769 | |||
1770 | /* | ||
1771 | * stop all endpoints gracefully | ||
1772 | * TODO: mISDN_core should generate CLOSE_CHANNEL | ||
1773 | * signals after calling mISDN_unregister_device() | ||
1774 | */ | ||
1775 | hfcsusb_stop_endpoint(hw, HFC_CHAN_D); | ||
1776 | hfcsusb_stop_endpoint(hw, HFC_CHAN_B1); | ||
1777 | hfcsusb_stop_endpoint(hw, HFC_CHAN_B2); | ||
1778 | if (hw->fifos[HFCUSB_PCM_RX].pipe) | ||
1779 | hfcsusb_stop_endpoint(hw, HFC_CHAN_E); | ||
1780 | if (hw->protocol == ISDN_P_TE_S0) | ||
1781 | l1_event(hw->dch.l1, CLOSE_CHANNEL); | ||
1782 | |||
1783 | mISDN_unregister_device(&hw->dch.dev); | ||
1784 | mISDN_freebchannel(&hw->bch[1]); | ||
1785 | mISDN_freebchannel(&hw->bch[0]); | ||
1786 | mISDN_freedchannel(&hw->dch); | ||
1787 | |||
1788 | if (hw->ctrl_urb) { | ||
1789 | usb_kill_urb(hw->ctrl_urb); | ||
1790 | usb_free_urb(hw->ctrl_urb); | ||
1791 | hw->ctrl_urb = NULL; | ||
1792 | } | ||
1793 | |||
1794 | if (hw->intf) | ||
1795 | usb_set_intfdata(hw->intf, NULL); | ||
1796 | list_del(&hw->list); | ||
1797 | kfree(hw); | ||
1798 | hw = NULL; | ||
1799 | } | ||
1800 | |||
1801 | static void | ||
1802 | deactivate_bchannel(struct bchannel *bch) | ||
1803 | { | ||
1804 | struct hfcsusb *hw = bch->hw; | ||
1805 | u_long flags; | ||
1806 | |||
1807 | if (bch->debug & DEBUG_HW) | ||
1808 | printk(KERN_DEBUG "%s: %s: bch->nr(%i)\n", | ||
1809 | hw->name, __func__, bch->nr); | ||
1810 | |||
1811 | spin_lock_irqsave(&hw->lock, flags); | ||
1812 | if (test_and_clear_bit(FLG_TX_NEXT, &bch->Flags)) { | ||
1813 | dev_kfree_skb(bch->next_skb); | ||
1814 | bch->next_skb = NULL; | ||
1815 | } | ||
1816 | if (bch->tx_skb) { | ||
1817 | dev_kfree_skb(bch->tx_skb); | ||
1818 | bch->tx_skb = NULL; | ||
1819 | } | ||
1820 | bch->tx_idx = 0; | ||
1821 | if (bch->rx_skb) { | ||
1822 | dev_kfree_skb(bch->rx_skb); | ||
1823 | bch->rx_skb = NULL; | ||
1824 | } | ||
1825 | clear_bit(FLG_ACTIVE, &bch->Flags); | ||
1826 | clear_bit(FLG_TX_BUSY, &bch->Flags); | ||
1827 | spin_unlock_irqrestore(&hw->lock, flags); | ||
1828 | hfcsusb_setup_bch(bch, ISDN_P_NONE); | ||
1829 | hfcsusb_stop_endpoint(hw, bch->nr); | ||
1830 | } | ||
1831 | |||
1832 | /* | ||
1833 | * Layer 1 B-channel hardware access | ||
1834 | */ | ||
1835 | static int | ||
1836 | hfc_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg) | ||
1837 | { | ||
1838 | struct bchannel *bch = container_of(ch, struct bchannel, ch); | ||
1839 | int ret = -EINVAL; | ||
1840 | |||
1841 | if (bch->debug & DEBUG_HW) | ||
1842 | printk(KERN_DEBUG "%s: cmd:%x %p\n", __func__, cmd, arg); | ||
1843 | |||
1844 | switch (cmd) { | ||
1845 | case HW_TESTRX_RAW: | ||
1846 | case HW_TESTRX_HDLC: | ||
1847 | case HW_TESTRX_OFF: | ||
1848 | ret = -EINVAL; | ||
1849 | break; | ||
1850 | |||
1851 | case CLOSE_CHANNEL: | ||
1852 | test_and_clear_bit(FLG_OPEN, &bch->Flags); | ||
1853 | if (test_bit(FLG_ACTIVE, &bch->Flags)) | ||
1854 | deactivate_bchannel(bch); | ||
1855 | ch->protocol = ISDN_P_NONE; | ||
1856 | ch->peer = NULL; | ||
1857 | module_put(THIS_MODULE); | ||
1858 | ret = 0; | ||
1859 | break; | ||
1860 | case CONTROL_CHANNEL: | ||
1861 | ret = channel_bctrl(bch, arg); | ||
1862 | break; | ||
1863 | default: | ||
1864 | printk(KERN_WARNING "%s: unknown prim(%x)\n", | ||
1865 | __func__, cmd); | ||
1866 | } | ||
1867 | return ret; | ||
1868 | } | ||
1869 | |||
1870 | static int | ||
1871 | setup_instance(struct hfcsusb *hw, struct device *parent) | ||
1872 | { | ||
1873 | u_long flags; | ||
1874 | int err, i; | ||
1875 | |||
1876 | if (debug & DBG_HFC_CALL_TRACE) | ||
1877 | printk(KERN_DEBUG "%s: %s\n", hw->name, __func__); | ||
1878 | |||
1879 | spin_lock_init(&hw->ctrl_lock); | ||
1880 | spin_lock_init(&hw->lock); | ||
1881 | |||
1882 | mISDN_initdchannel(&hw->dch, MAX_DFRAME_LEN_L1, ph_state); | ||
1883 | hw->dch.debug = debug & 0xFFFF; | ||
1884 | hw->dch.hw = hw; | ||
1885 | hw->dch.dev.Dprotocols = (1 << ISDN_P_TE_S0) | (1 << ISDN_P_NT_S0); | ||
1886 | hw->dch.dev.D.send = hfcusb_l2l1D; | ||
1887 | hw->dch.dev.D.ctrl = hfc_dctrl; | ||
1888 | |||
1889 | /* enable E-Channel logging */ | ||
1890 | if (hw->fifos[HFCUSB_PCM_RX].pipe) | ||
1891 | mISDN_initdchannel(&hw->ech, MAX_DFRAME_LEN_L1, NULL); | ||
1892 | |||
1893 | hw->dch.dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) | | ||
1894 | (1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK)); | ||
1895 | hw->dch.dev.nrbchan = 2; | ||
1896 | for (i = 0; i < 2; i++) { | ||
1897 | hw->bch[i].nr = i + 1; | ||
1898 | set_channelmap(i + 1, hw->dch.dev.channelmap); | ||
1899 | hw->bch[i].debug = debug; | ||
1900 | mISDN_initbchannel(&hw->bch[i], MAX_DATA_MEM); | ||
1901 | hw->bch[i].hw = hw; | ||
1902 | hw->bch[i].ch.send = hfcusb_l2l1B; | ||
1903 | hw->bch[i].ch.ctrl = hfc_bctrl; | ||
1904 | hw->bch[i].ch.nr = i + 1; | ||
1905 | list_add(&hw->bch[i].ch.list, &hw->dch.dev.bchannels); | ||
1906 | } | ||
1907 | |||
1908 | hw->fifos[HFCUSB_B1_TX].bch = &hw->bch[0]; | ||
1909 | hw->fifos[HFCUSB_B1_RX].bch = &hw->bch[0]; | ||
1910 | hw->fifos[HFCUSB_B2_TX].bch = &hw->bch[1]; | ||
1911 | hw->fifos[HFCUSB_B2_RX].bch = &hw->bch[1]; | ||
1912 | hw->fifos[HFCUSB_D_TX].dch = &hw->dch; | ||
1913 | hw->fifos[HFCUSB_D_RX].dch = &hw->dch; | ||
1914 | hw->fifos[HFCUSB_PCM_RX].ech = &hw->ech; | ||
1915 | hw->fifos[HFCUSB_PCM_TX].ech = &hw->ech; | ||
1916 | |||
1917 | err = setup_hfcsusb(hw); | ||
1918 | if (err) | ||
1919 | goto out; | ||
1920 | |||
1921 | snprintf(hw->name, MISDN_MAX_IDLEN - 1, "%s.%d", DRIVER_NAME, | ||
1922 | hfcsusb_cnt + 1); | ||
1923 | printk(KERN_INFO "%s: registered as '%s'\n", | ||
1924 | DRIVER_NAME, hw->name); | ||
1925 | |||
1926 | err = mISDN_register_device(&hw->dch.dev, parent, hw->name); | ||
1927 | if (err) | ||
1928 | goto out; | ||
1929 | |||
1930 | hfcsusb_cnt++; | ||
1931 | write_lock_irqsave(&HFClock, flags); | ||
1932 | list_add_tail(&hw->list, &HFClist); | ||
1933 | write_unlock_irqrestore(&HFClock, flags); | ||
1934 | return 0; | ||
1935 | |||
1936 | out: | ||
1937 | mISDN_freebchannel(&hw->bch[1]); | ||
1938 | mISDN_freebchannel(&hw->bch[0]); | ||
1939 | mISDN_freedchannel(&hw->dch); | ||
1940 | kfree(hw); | ||
1941 | return err; | ||
1942 | } | ||
1943 | |||
1944 | static int | ||
1945 | hfcsusb_probe(struct usb_interface *intf, const struct usb_device_id *id) | ||
1946 | { | ||
1947 | struct hfcsusb *hw; | ||
1948 | struct usb_device *dev = interface_to_usbdev(intf); | ||
1949 | struct usb_host_interface *iface = intf->cur_altsetting; | ||
1950 | struct usb_host_interface *iface_used = NULL; | ||
1951 | struct usb_host_endpoint *ep; | ||
1952 | struct hfcsusb_vdata *driver_info; | ||
1953 | int ifnum = iface->desc.bInterfaceNumber, i, idx, alt_idx, | ||
1954 | probe_alt_setting, vend_idx, cfg_used, *vcf, attr, cfg_found, | ||
1955 | ep_addr, cmptbl[16], small_match, iso_packet_size, packet_size, | ||
1956 | alt_used = 0; | ||
1957 | |||
1958 | vend_idx = 0xffff; | ||
1959 | for (i = 0; hfcsusb_idtab[i].idVendor; i++) { | ||
1960 | if ((le16_to_cpu(dev->descriptor.idVendor) | ||
1961 | == hfcsusb_idtab[i].idVendor) && | ||
1962 | (le16_to_cpu(dev->descriptor.idProduct) | ||
1963 | == hfcsusb_idtab[i].idProduct)) { | ||
1964 | vend_idx = i; | ||
1965 | continue; | ||
1966 | } | ||
1967 | } | ||
1968 | |||
1969 | printk(KERN_DEBUG | ||
1970 | "%s: interface(%d) actalt(%d) minor(%d) vend_idx(%d)\n", | ||
1971 | __func__, ifnum, iface->desc.bAlternateSetting, | ||
1972 | intf->minor, vend_idx); | ||
1973 | |||
1974 | if (vend_idx == 0xffff) { | ||
1975 | printk(KERN_WARNING | ||
1976 | "%s: no valid vendor found in USB descriptor\n", | ||
1977 | __func__); | ||
1978 | return -EIO; | ||
1979 | } | ||
1980 | /* if vendor and product ID is OK, start probing alternate settings */ | ||
1981 | alt_idx = 0; | ||
1982 | small_match = -1; | ||
1983 | |||
1984 | /* default settings */ | ||
1985 | iso_packet_size = 16; | ||
1986 | packet_size = 64; | ||
1987 | |||
1988 | while (alt_idx < intf->num_altsetting) { | ||
1989 | iface = intf->altsetting + alt_idx; | ||
1990 | probe_alt_setting = iface->desc.bAlternateSetting; | ||
1991 | cfg_used = 0; | ||
1992 | |||
1993 | while (validconf[cfg_used][0]) { | ||
1994 | cfg_found = 1; | ||
1995 | vcf = validconf[cfg_used]; | ||
1996 | ep = iface->endpoint; | ||
1997 | memcpy(cmptbl, vcf, 16 * sizeof(int)); | ||
1998 | |||
1999 | /* check for all endpoints in this alternate setting */ | ||
2000 | for (i = 0; i < iface->desc.bNumEndpoints; i++) { | ||
2001 | ep_addr = ep->desc.bEndpointAddress; | ||
2002 | |||
2003 | /* get endpoint base */ | ||
2004 | idx = ((ep_addr & 0x7f) - 1) * 2; | ||
2005 | if (ep_addr & 0x80) | ||
2006 | idx++; | ||
2007 | attr = ep->desc.bmAttributes; | ||
2008 | |||
2009 | if (cmptbl[idx] != EP_NOP) { | ||
2010 | if (cmptbl[idx] == EP_NUL) | ||
2011 | cfg_found = 0; | ||
2012 | if (attr == USB_ENDPOINT_XFER_INT | ||
2013 | && cmptbl[idx] == EP_INT) | ||
2014 | cmptbl[idx] = EP_NUL; | ||
2015 | if (attr == USB_ENDPOINT_XFER_BULK | ||
2016 | && cmptbl[idx] == EP_BLK) | ||
2017 | cmptbl[idx] = EP_NUL; | ||
2018 | if (attr == USB_ENDPOINT_XFER_ISOC | ||
2019 | && cmptbl[idx] == EP_ISO) | ||
2020 | cmptbl[idx] = EP_NUL; | ||
2021 | |||
2022 | if (attr == USB_ENDPOINT_XFER_INT && | ||
2023 | ep->desc.bInterval < vcf[17]) { | ||
2024 | cfg_found = 0; | ||
2025 | } | ||
2026 | } | ||
2027 | ep++; | ||
2028 | } | ||
2029 | |||
2030 | for (i = 0; i < 16; i++) | ||
2031 | if (cmptbl[i] != EP_NOP && cmptbl[i] != EP_NUL) | ||
2032 | cfg_found = 0; | ||
2033 | |||
2034 | if (cfg_found) { | ||
2035 | if (small_match < cfg_used) { | ||
2036 | small_match = cfg_used; | ||
2037 | alt_used = probe_alt_setting; | ||
2038 | iface_used = iface; | ||
2039 | } | ||
2040 | } | ||
2041 | cfg_used++; | ||
2042 | } | ||
2043 | alt_idx++; | ||
2044 | } /* (alt_idx < intf->num_altsetting) */ | ||
2045 | |||
2046 | /* not found a valid USB Ta Endpoint config */ | ||
2047 | if (small_match == -1) | ||
2048 | return -EIO; | ||
2049 | |||
2050 | iface = iface_used; | ||
2051 | hw = kzalloc(sizeof(struct hfcsusb), GFP_KERNEL); | ||
2052 | if (!hw) | ||
2053 | return -ENOMEM; /* got no mem */ | ||
2054 | snprintf(hw->name, MISDN_MAX_IDLEN - 1, "%s", DRIVER_NAME); | ||
2055 | |||
2056 | ep = iface->endpoint; | ||
2057 | vcf = validconf[small_match]; | ||
2058 | |||
2059 | for (i = 0; i < iface->desc.bNumEndpoints; i++) { | ||
2060 | struct usb_fifo *f; | ||
2061 | |||
2062 | ep_addr = ep->desc.bEndpointAddress; | ||
2063 | /* get endpoint base */ | ||
2064 | idx = ((ep_addr & 0x7f) - 1) * 2; | ||
2065 | if (ep_addr & 0x80) | ||
2066 | idx++; | ||
2067 | f = &hw->fifos[idx & 7]; | ||
2068 | |||
2069 | /* init Endpoints */ | ||
2070 | if (vcf[idx] == EP_NOP || vcf[idx] == EP_NUL) { | ||
2071 | ep++; | ||
2072 | continue; | ||
2073 | } | ||
2074 | switch (ep->desc.bmAttributes) { | ||
2075 | case USB_ENDPOINT_XFER_INT: | ||
2076 | f->pipe = usb_rcvintpipe(dev, | ||
2077 | ep->desc.bEndpointAddress); | ||
2078 | f->usb_transfer_mode = USB_INT; | ||
2079 | packet_size = le16_to_cpu(ep->desc.wMaxPacketSize); | ||
2080 | break; | ||
2081 | case USB_ENDPOINT_XFER_BULK: | ||
2082 | if (ep_addr & 0x80) | ||
2083 | f->pipe = usb_rcvbulkpipe(dev, | ||
2084 | ep->desc.bEndpointAddress); | ||
2085 | else | ||
2086 | f->pipe = usb_sndbulkpipe(dev, | ||
2087 | ep->desc.bEndpointAddress); | ||
2088 | f->usb_transfer_mode = USB_BULK; | ||
2089 | packet_size = le16_to_cpu(ep->desc.wMaxPacketSize); | ||
2090 | break; | ||
2091 | case USB_ENDPOINT_XFER_ISOC: | ||
2092 | if (ep_addr & 0x80) | ||
2093 | f->pipe = usb_rcvisocpipe(dev, | ||
2094 | ep->desc.bEndpointAddress); | ||
2095 | else | ||
2096 | f->pipe = usb_sndisocpipe(dev, | ||
2097 | ep->desc.bEndpointAddress); | ||
2098 | f->usb_transfer_mode = USB_ISOC; | ||
2099 | iso_packet_size = le16_to_cpu(ep->desc.wMaxPacketSize); | ||
2100 | break; | ||
2101 | default: | ||
2102 | f->pipe = 0; | ||
2103 | } | ||
2104 | |||
2105 | if (f->pipe) { | ||
2106 | f->fifonum = idx & 7; | ||
2107 | f->hw = hw; | ||
2108 | f->usb_packet_maxlen = | ||
2109 | le16_to_cpu(ep->desc.wMaxPacketSize); | ||
2110 | f->intervall = ep->desc.bInterval; | ||
2111 | } | ||
2112 | ep++; | ||
2113 | } | ||
2114 | hw->dev = dev; /* save device */ | ||
2115 | hw->if_used = ifnum; /* save used interface */ | ||
2116 | hw->alt_used = alt_used; /* and alternate config */ | ||
2117 | hw->ctrl_paksize = dev->descriptor.bMaxPacketSize0; /* control size */ | ||
2118 | hw->cfg_used = vcf[16]; /* store used config */ | ||
2119 | hw->vend_idx = vend_idx; /* store found vendor */ | ||
2120 | hw->packet_size = packet_size; | ||
2121 | hw->iso_packet_size = iso_packet_size; | ||
2122 | |||
2123 | /* create the control pipes needed for register access */ | ||
2124 | hw->ctrl_in_pipe = usb_rcvctrlpipe(hw->dev, 0); | ||
2125 | hw->ctrl_out_pipe = usb_sndctrlpipe(hw->dev, 0); | ||
2126 | hw->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL); | ||
2127 | |||
2128 | driver_info = | ||
2129 | (struct hfcsusb_vdata *)hfcsusb_idtab[vend_idx].driver_info; | ||
2130 | printk(KERN_DEBUG "%s: %s: detected \"%s\" (%s, if=%d alt=%d)\n", | ||
2131 | hw->name, __func__, driver_info->vend_name, | ||
2132 | conf_str[small_match], ifnum, alt_used); | ||
2133 | |||
2134 | if (setup_instance(hw, dev->dev.parent)) | ||
2135 | return -EIO; | ||
2136 | |||
2137 | hw->intf = intf; | ||
2138 | usb_set_intfdata(hw->intf, hw); | ||
2139 | return 0; | ||
2140 | } | ||
2141 | |||
2142 | /* function called when an active device is removed */ | ||
2143 | static void | ||
2144 | hfcsusb_disconnect(struct usb_interface *intf) | ||
2145 | { | ||
2146 | struct hfcsusb *hw = usb_get_intfdata(intf); | ||
2147 | struct hfcsusb *next; | ||
2148 | int cnt = 0; | ||
2149 | |||
2150 | printk(KERN_INFO "%s: device disconnected\n", hw->name); | ||
2151 | |||
2152 | handle_led(hw, LED_POWER_OFF); | ||
2153 | release_hw(hw); | ||
2154 | |||
2155 | list_for_each_entry_safe(hw, next, &HFClist, list) | ||
2156 | cnt++; | ||
2157 | if (!cnt) | ||
2158 | hfcsusb_cnt = 0; | ||
2159 | |||
2160 | usb_set_intfdata(intf, NULL); | ||
2161 | } | ||
2162 | |||
2163 | static struct usb_driver hfcsusb_drv = { | ||
2164 | .name = DRIVER_NAME, | ||
2165 | .id_table = hfcsusb_idtab, | ||
2166 | .probe = hfcsusb_probe, | ||
2167 | .disconnect = hfcsusb_disconnect, | ||
2168 | }; | ||
2169 | |||
2170 | static int __init | ||
2171 | hfcsusb_init(void) | ||
2172 | { | ||
2173 | printk(KERN_INFO DRIVER_NAME " driver Rev. %s debug(0x%x) poll(%i)\n", | ||
2174 | hfcsusb_rev, debug, poll); | ||
2175 | |||
2176 | if (usb_register(&hfcsusb_drv)) { | ||
2177 | printk(KERN_INFO DRIVER_NAME | ||
2178 | ": Unable to register hfcsusb module at usb stack\n"); | ||
2179 | return -ENODEV; | ||
2180 | } | ||
2181 | |||
2182 | return 0; | ||
2183 | } | ||
2184 | |||
2185 | static void __exit | ||
2186 | hfcsusb_cleanup(void) | ||
2187 | { | ||
2188 | if (debug & DBG_HFC_CALL_TRACE) | ||
2189 | printk(KERN_INFO DRIVER_NAME ": %s\n", __func__); | ||
2190 | |||
2191 | /* unregister Hardware */ | ||
2192 | usb_deregister(&hfcsusb_drv); /* release our driver */ | ||
2193 | } | ||
2194 | |||
2195 | module_init(hfcsusb_init); | ||
2196 | module_exit(hfcsusb_cleanup); | ||
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.h b/drivers/isdn/hardware/mISDN/hfcsusb.h new file mode 100644 index 000000000000..098486b8e8d2 --- /dev/null +++ b/drivers/isdn/hardware/mISDN/hfcsusb.h | |||
@@ -0,0 +1,418 @@ | |||
1 | /* | ||
2 | * hfcsusb.h, HFC-S USB mISDN driver | ||
3 | */ | ||
4 | |||
5 | #ifndef __HFCSUSB_H__ | ||
6 | #define __HFCSUSB_H__ | ||
7 | |||
8 | |||
9 | #define DRIVER_NAME "HFC-S_USB" | ||
10 | |||
11 | #define DBG_HFC_CALL_TRACE 0x00010000 | ||
12 | #define DBG_HFC_FIFO_VERBOSE 0x00020000 | ||
13 | #define DBG_HFC_USB_VERBOSE 0x00100000 | ||
14 | #define DBG_HFC_URB_INFO 0x00200000 | ||
15 | #define DBG_HFC_URB_ERROR 0x00400000 | ||
16 | |||
17 | #define DEFAULT_TRANSP_BURST_SZ 128 | ||
18 | |||
19 | #define HFC_CTRL_TIMEOUT 20 /* 5ms timeout writing/reading regs */ | ||
20 | #define CLKDEL_TE 0x0f /* CLKDEL in TE mode */ | ||
21 | #define CLKDEL_NT 0x6c /* CLKDEL in NT mode */ | ||
22 | |||
23 | /* hfcsusb Layer1 commands */ | ||
24 | #define HFC_L1_ACTIVATE_TE 1 | ||
25 | #define HFC_L1_ACTIVATE_NT 2 | ||
26 | #define HFC_L1_DEACTIVATE_NT 3 | ||
27 | #define HFC_L1_FORCE_DEACTIVATE_TE 4 | ||
28 | |||
29 | /* cmd FLAGS in HFCUSB_STATES register */ | ||
30 | #define HFCUSB_LOAD_STATE 0x10 | ||
31 | #define HFCUSB_ACTIVATE 0x20 | ||
32 | #define HFCUSB_DO_ACTION 0x40 | ||
33 | #define HFCUSB_NT_G2_G3 0x80 | ||
34 | |||
35 | /* timers */ | ||
36 | #define NT_ACTIVATION_TIMER 0x01 /* enables NT mode activation Timer */ | ||
37 | #define NT_T1_COUNT 10 | ||
38 | |||
39 | #define MAX_BCH_SIZE 2048 /* allowed B-channel packet size */ | ||
40 | |||
41 | #define HFCUSB_RX_THRESHOLD 64 /* threshold for fifo report bit rx */ | ||
42 | #define HFCUSB_TX_THRESHOLD 96 /* threshold for fifo report bit tx */ | ||
43 | |||
44 | #define HFCUSB_CHIP_ID 0x16 /* Chip ID register index */ | ||
45 | #define HFCUSB_CIRM 0x00 /* cirm register index */ | ||
46 | #define HFCUSB_USB_SIZE 0x07 /* int length register */ | ||
47 | #define HFCUSB_USB_SIZE_I 0x06 /* iso length register */ | ||
48 | #define HFCUSB_F_CROSS 0x0b /* bit order register */ | ||
49 | #define HFCUSB_CLKDEL 0x37 /* bit delay register */ | ||
50 | #define HFCUSB_CON_HDLC 0xfa /* channel connect register */ | ||
51 | #define HFCUSB_HDLC_PAR 0xfb | ||
52 | #define HFCUSB_SCTRL 0x31 /* S-bus control register (tx) */ | ||
53 | #define HFCUSB_SCTRL_E 0x32 /* same for E and special funcs */ | ||
54 | #define HFCUSB_SCTRL_R 0x33 /* S-bus control register (rx) */ | ||
55 | #define HFCUSB_F_THRES 0x0c /* threshold register */ | ||
56 | #define HFCUSB_FIFO 0x0f /* fifo select register */ | ||
57 | #define HFCUSB_F_USAGE 0x1a /* fifo usage register */ | ||
58 | #define HFCUSB_MST_MODE0 0x14 | ||
59 | #define HFCUSB_MST_MODE1 0x15 | ||
60 | #define HFCUSB_P_DATA 0x1f | ||
61 | #define HFCUSB_INC_RES_F 0x0e | ||
62 | #define HFCUSB_B1_SSL 0x20 | ||
63 | #define HFCUSB_B2_SSL 0x21 | ||
64 | #define HFCUSB_B1_RSL 0x24 | ||
65 | #define HFCUSB_B2_RSL 0x25 | ||
66 | #define HFCUSB_STATES 0x30 | ||
67 | |||
68 | |||
69 | #define HFCUSB_CHIPID 0x40 /* ID value of HFC-S USB */ | ||
70 | |||
71 | /* fifo registers */ | ||
72 | #define HFCUSB_NUM_FIFOS 8 /* maximum number of fifos */ | ||
73 | #define HFCUSB_B1_TX 0 /* index for B1 transmit bulk/int */ | ||
74 | #define HFCUSB_B1_RX 1 /* index for B1 receive bulk/int */ | ||
75 | #define HFCUSB_B2_TX 2 | ||
76 | #define HFCUSB_B2_RX 3 | ||
77 | #define HFCUSB_D_TX 4 | ||
78 | #define HFCUSB_D_RX 5 | ||
79 | #define HFCUSB_PCM_TX 6 | ||
80 | #define HFCUSB_PCM_RX 7 | ||
81 | |||
82 | |||
83 | #define USB_INT 0 | ||
84 | #define USB_BULK 1 | ||
85 | #define USB_ISOC 2 | ||
86 | |||
87 | #define ISOC_PACKETS_D 8 | ||
88 | #define ISOC_PACKETS_B 8 | ||
89 | #define ISO_BUFFER_SIZE 128 | ||
90 | |||
91 | /* defines how much ISO packets are handled in one URB */ | ||
92 | static int iso_packets[8] = | ||
93 | { ISOC_PACKETS_B, ISOC_PACKETS_B, ISOC_PACKETS_B, ISOC_PACKETS_B, | ||
94 | ISOC_PACKETS_D, ISOC_PACKETS_D, ISOC_PACKETS_D, ISOC_PACKETS_D | ||
95 | }; | ||
96 | |||
97 | |||
98 | /* Fifo flow Control for TX ISO */ | ||
99 | #define SINK_MAX 68 | ||
100 | #define SINK_MIN 48 | ||
101 | #define SINK_DMIN 12 | ||
102 | #define SINK_DMAX 18 | ||
103 | #define BITLINE_INF (-96*8) | ||
104 | |||
105 | /* HFC-S USB register access by Control-URSs */ | ||
106 | #define write_reg_atomic(a, b, c) \ | ||
107 | usb_control_msg((a)->dev, (a)->ctrl_out_pipe, 0, 0x40, (c), (b), \ | ||
108 | 0, 0, HFC_CTRL_TIMEOUT) | ||
109 | #define read_reg_atomic(a, b, c) \ | ||
110 | usb_control_msg((a)->dev, (a)->ctrl_in_pipe, 1, 0xC0, 0, (b), (c), \ | ||
111 | 1, HFC_CTRL_TIMEOUT) | ||
112 | #define HFC_CTRL_BUFSIZE 64 | ||
113 | |||
114 | struct ctrl_buf { | ||
115 | __u8 hfcs_reg; /* register number */ | ||
116 | __u8 reg_val; /* value to be written (or read) */ | ||
117 | }; | ||
118 | |||
119 | /* | ||
120 | * URB error codes | ||
121 | * Used to represent a list of values and their respective symbolic names | ||
122 | */ | ||
123 | struct hfcusb_symbolic_list { | ||
124 | const int num; | ||
125 | const char *name; | ||
126 | }; | ||
127 | |||
128 | static struct hfcusb_symbolic_list urb_errlist[] = { | ||
129 | {-ENOMEM, "No memory for allocation of internal structures"}, | ||
130 | {-ENOSPC, "The host controller's bandwidth is already consumed"}, | ||
131 | {-ENOENT, "URB was canceled by unlink_urb"}, | ||
132 | {-EXDEV, "ISO transfer only partially completed"}, | ||
133 | {-EAGAIN, "Too match scheduled for the future"}, | ||
134 | {-ENXIO, "URB already queued"}, | ||
135 | {-EFBIG, "Too much ISO frames requested"}, | ||
136 | {-ENOSR, "Buffer error (overrun)"}, | ||
137 | {-EPIPE, "Specified endpoint is stalled (device not responding)"}, | ||
138 | {-EOVERFLOW, "Babble (bad cable?)"}, | ||
139 | {-EPROTO, "Bit-stuff error (bad cable?)"}, | ||
140 | {-EILSEQ, "CRC/Timeout"}, | ||
141 | {-ETIMEDOUT, "NAK (device does not respond)"}, | ||
142 | {-ESHUTDOWN, "Device unplugged"}, | ||
143 | {-1, NULL} | ||
144 | }; | ||
145 | |||
146 | static inline const char * | ||
147 | symbolic(struct hfcusb_symbolic_list list[], const int num) | ||
148 | { | ||
149 | int i; | ||
150 | for (i = 0; list[i].name != NULL; i++) | ||
151 | if (list[i].num == num) | ||
152 | return list[i].name; | ||
153 | return "<unkown USB Error>"; | ||
154 | } | ||
155 | |||
156 | /* USB descriptor need to contain one of the following EndPoint combination: */ | ||
157 | #define CNF_4INT3ISO 1 /* 4 INT IN, 3 ISO OUT */ | ||
158 | #define CNF_3INT3ISO 2 /* 3 INT IN, 3 ISO OUT */ | ||
159 | #define CNF_4ISO3ISO 3 /* 4 ISO IN, 3 ISO OUT */ | ||
160 | #define CNF_3ISO3ISO 4 /* 3 ISO IN, 3 ISO OUT */ | ||
161 | |||
162 | #define EP_NUL 1 /* Endpoint at this position not allowed */ | ||
163 | #define EP_NOP 2 /* all type of endpoints allowed at this position */ | ||
164 | #define EP_ISO 3 /* Isochron endpoint mandatory at this position */ | ||
165 | #define EP_BLK 4 /* Bulk endpoint mandatory at this position */ | ||
166 | #define EP_INT 5 /* Interrupt endpoint mandatory at this position */ | ||
167 | |||
168 | #define HFC_CHAN_B1 0 | ||
169 | #define HFC_CHAN_B2 1 | ||
170 | #define HFC_CHAN_D 2 | ||
171 | #define HFC_CHAN_E 3 | ||
172 | |||
173 | |||
174 | /* | ||
175 | * List of all supported enpoints configiration sets, used to find the | ||
176 | * best matching endpoint configuration within a devices' USB descriptor. | ||
177 | * We need at least 3 RX endpoints, and 3 TX endpoints, either | ||
178 | * INT-in and ISO-out, or ISO-in and ISO-out) | ||
179 | * with 4 RX endpoints even E-Channel logging is possible | ||
180 | */ | ||
181 | static int | ||
182 | validconf[][19] = { | ||
183 | /* INT in, ISO out config */ | ||
184 | {EP_NUL, EP_INT, EP_NUL, EP_INT, EP_NUL, EP_INT, EP_NOP, EP_INT, | ||
185 | EP_ISO, EP_NUL, EP_ISO, EP_NUL, EP_ISO, EP_NUL, EP_NUL, EP_NUL, | ||
186 | CNF_4INT3ISO, 2, 1}, | ||
187 | {EP_NUL, EP_INT, EP_NUL, EP_INT, EP_NUL, EP_INT, EP_NUL, EP_NUL, | ||
188 | EP_ISO, EP_NUL, EP_ISO, EP_NUL, EP_ISO, EP_NUL, EP_NUL, EP_NUL, | ||
189 | CNF_3INT3ISO, 2, 0}, | ||
190 | /* ISO in, ISO out config */ | ||
191 | {EP_NOP, EP_NOP, EP_NOP, EP_NOP, EP_NOP, EP_NOP, EP_NOP, EP_NOP, | ||
192 | EP_ISO, EP_ISO, EP_ISO, EP_ISO, EP_ISO, EP_ISO, EP_NOP, EP_ISO, | ||
193 | CNF_4ISO3ISO, 2, 1}, | ||
194 | {EP_NUL, EP_NUL, EP_NUL, EP_NUL, EP_NUL, EP_NUL, EP_NUL, EP_NUL, | ||
195 | EP_ISO, EP_ISO, EP_ISO, EP_ISO, EP_ISO, EP_ISO, EP_NUL, EP_NUL, | ||
196 | CNF_3ISO3ISO, 2, 0}, | ||
197 | {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} /* EOL element */ | ||
198 | }; | ||
199 | |||
200 | /* string description of chosen config */ | ||
201 | char *conf_str[] = { | ||
202 | "4 Interrupt IN + 3 Isochron OUT", | ||
203 | "3 Interrupt IN + 3 Isochron OUT", | ||
204 | "4 Isochron IN + 3 Isochron OUT", | ||
205 | "3 Isochron IN + 3 Isochron OUT" | ||
206 | }; | ||
207 | |||
208 | |||
209 | #define LED_OFF 0 /* no LED support */ | ||
210 | #define LED_SCHEME1 1 /* LED standard scheme */ | ||
211 | #define LED_SCHEME2 2 /* not used yet... */ | ||
212 | |||
213 | #define LED_POWER_ON 1 | ||
214 | #define LED_POWER_OFF 2 | ||
215 | #define LED_S0_ON 3 | ||
216 | #define LED_S0_OFF 4 | ||
217 | #define LED_B1_ON 5 | ||
218 | #define LED_B1_OFF 6 | ||
219 | #define LED_B1_DATA 7 | ||
220 | #define LED_B2_ON 8 | ||
221 | #define LED_B2_OFF 9 | ||
222 | #define LED_B2_DATA 10 | ||
223 | |||
224 | #define LED_NORMAL 0 /* LEDs are normal */ | ||
225 | #define LED_INVERTED 1 /* LEDs are inverted */ | ||
226 | |||
227 | /* time in ms to perform a Flashing LED when B-Channel has traffic */ | ||
228 | #define LED_TIME 250 | ||
229 | |||
230 | |||
231 | |||
232 | struct hfcsusb; | ||
233 | struct usb_fifo; | ||
234 | |||
235 | /* structure defining input+output fifos (interrupt/bulk mode) */ | ||
236 | struct iso_urb { | ||
237 | struct urb *urb; | ||
238 | __u8 buffer[ISO_BUFFER_SIZE]; /* buffer rx/tx USB URB data */ | ||
239 | struct usb_fifo *owner_fifo; /* pointer to owner fifo */ | ||
240 | __u8 indx; /* Fifos's ISO double buffer 0 or 1 ? */ | ||
241 | #ifdef ISO_FRAME_START_DEBUG | ||
242 | int start_frames[ISO_FRAME_START_RING_COUNT]; | ||
243 | __u8 iso_frm_strt_pos; /* index in start_frame[] */ | ||
244 | #endif | ||
245 | }; | ||
246 | |||
247 | struct usb_fifo { | ||
248 | int fifonum; /* fifo index attached to this structure */ | ||
249 | int active; /* fifo is currently active */ | ||
250 | struct hfcsusb *hw; /* pointer to main structure */ | ||
251 | int pipe; /* address of endpoint */ | ||
252 | __u8 usb_packet_maxlen; /* maximum length for usb transfer */ | ||
253 | unsigned int max_size; /* maximum size of receive/send packet */ | ||
254 | __u8 intervall; /* interrupt interval */ | ||
255 | struct urb *urb; /* transfer structure for usb routines */ | ||
256 | __u8 buffer[128]; /* buffer USB INT OUT URB data */ | ||
257 | int bit_line; /* how much bits are in the fifo? */ | ||
258 | |||
259 | __u8 usb_transfer_mode; /* switched between ISO and INT */ | ||
260 | struct iso_urb iso[2]; /* two urbs to have one always | ||
261 | one pending */ | ||
262 | |||
263 | struct dchannel *dch; /* link to hfcsusb_t->dch */ | ||
264 | struct bchannel *bch; /* link to hfcsusb_t->bch */ | ||
265 | struct dchannel *ech; /* link to hfcsusb_t->ech, TODO: E-CHANNEL */ | ||
266 | int last_urblen; /* remember length of last packet */ | ||
267 | __u8 stop_gracefull; /* stops URB retransmission */ | ||
268 | }; | ||
269 | |||
270 | struct hfcsusb { | ||
271 | struct list_head list; | ||
272 | struct dchannel dch; | ||
273 | struct bchannel bch[2]; | ||
274 | struct dchannel ech; /* TODO : wait for struct echannel ;) */ | ||
275 | |||
276 | struct usb_device *dev; /* our device */ | ||
277 | struct usb_interface *intf; /* used interface */ | ||
278 | int if_used; /* used interface number */ | ||
279 | int alt_used; /* used alternate config */ | ||
280 | int cfg_used; /* configuration index used */ | ||
281 | int vend_idx; /* index in hfcsusb_idtab */ | ||
282 | int packet_size; | ||
283 | int iso_packet_size; | ||
284 | struct usb_fifo fifos[HFCUSB_NUM_FIFOS]; | ||
285 | |||
286 | /* control pipe background handling */ | ||
287 | struct ctrl_buf ctrl_buff[HFC_CTRL_BUFSIZE]; | ||
288 | int ctrl_in_idx, ctrl_out_idx, ctrl_cnt; | ||
289 | struct urb *ctrl_urb; | ||
290 | struct usb_ctrlrequest ctrl_write; | ||
291 | struct usb_ctrlrequest ctrl_read; | ||
292 | int ctrl_paksize; | ||
293 | int ctrl_in_pipe, ctrl_out_pipe; | ||
294 | spinlock_t ctrl_lock; /* lock for ctrl */ | ||
295 | spinlock_t lock; | ||
296 | |||
297 | __u8 threshold_mask; | ||
298 | __u8 led_state; | ||
299 | |||
300 | __u8 protocol; | ||
301 | int nt_timer; | ||
302 | int open; | ||
303 | __u8 timers; | ||
304 | __u8 initdone; | ||
305 | char name[MISDN_MAX_IDLEN]; | ||
306 | }; | ||
307 | |||
308 | /* private vendor specific data */ | ||
309 | struct hfcsusb_vdata { | ||
310 | __u8 led_scheme; /* led display scheme */ | ||
311 | signed short led_bits[8]; /* array of 8 possible LED bitmask */ | ||
312 | char *vend_name; /* device name */ | ||
313 | }; | ||
314 | |||
315 | |||
316 | #define HFC_MAX_TE_LAYER1_STATE 8 | ||
317 | #define HFC_MAX_NT_LAYER1_STATE 4 | ||
318 | |||
319 | const char *HFC_TE_LAYER1_STATES[HFC_MAX_TE_LAYER1_STATE + 1] = { | ||
320 | "TE F0 - Reset", | ||
321 | "TE F1 - Reset", | ||
322 | "TE F2 - Sensing", | ||
323 | "TE F3 - Deactivated", | ||
324 | "TE F4 - Awaiting signal", | ||
325 | "TE F5 - Identifying input", | ||
326 | "TE F6 - Synchronized", | ||
327 | "TE F7 - Activated", | ||
328 | "TE F8 - Lost framing", | ||
329 | }; | ||
330 | |||
331 | const char *HFC_NT_LAYER1_STATES[HFC_MAX_NT_LAYER1_STATE + 1] = { | ||
332 | "NT G0 - Reset", | ||
333 | "NT G1 - Deactive", | ||
334 | "NT G2 - Pending activation", | ||
335 | "NT G3 - Active", | ||
336 | "NT G4 - Pending deactivation", | ||
337 | }; | ||
338 | |||
339 | /* supported devices */ | ||
340 | static struct usb_device_id hfcsusb_idtab[] = { | ||
341 | { | ||
342 | USB_DEVICE(0x0959, 0x2bd0), | ||
343 | .driver_info = (unsigned long) &((struct hfcsusb_vdata) | ||
344 | {LED_OFF, {4, 0, 2, 1}, | ||
345 | "ISDN USB TA (Cologne Chip HFC-S USB based)"}), | ||
346 | }, | ||
347 | { | ||
348 | USB_DEVICE(0x0675, 0x1688), | ||
349 | .driver_info = (unsigned long) &((struct hfcsusb_vdata) | ||
350 | {LED_SCHEME1, {1, 2, 0, 0}, | ||
351 | "DrayTek miniVigor 128 USB ISDN TA"}), | ||
352 | }, | ||
353 | { | ||
354 | USB_DEVICE(0x07b0, 0x0007), | ||
355 | .driver_info = (unsigned long) &((struct hfcsusb_vdata) | ||
356 | {LED_SCHEME1, {0x80, -64, -32, -16}, | ||
357 | "Billion tiny USB ISDN TA 128"}), | ||
358 | }, | ||
359 | { | ||
360 | USB_DEVICE(0x0742, 0x2008), | ||
361 | .driver_info = (unsigned long) &((struct hfcsusb_vdata) | ||
362 | {LED_SCHEME1, {4, 0, 2, 1}, | ||
363 | "Stollmann USB TA"}), | ||
364 | }, | ||
365 | { | ||
366 | USB_DEVICE(0x0742, 0x2009), | ||
367 | .driver_info = (unsigned long) &((struct hfcsusb_vdata) | ||
368 | {LED_SCHEME1, {4, 0, 2, 1}, | ||
369 | "Aceex USB ISDN TA"}), | ||
370 | }, | ||
371 | { | ||
372 | USB_DEVICE(0x0742, 0x200A), | ||
373 | .driver_info = (unsigned long) &((struct hfcsusb_vdata) | ||
374 | {LED_SCHEME1, {4, 0, 2, 1}, | ||
375 | "OEM USB ISDN TA"}), | ||
376 | }, | ||
377 | { | ||
378 | USB_DEVICE(0x08e3, 0x0301), | ||
379 | .driver_info = (unsigned long) &((struct hfcsusb_vdata) | ||
380 | {LED_SCHEME1, {2, 0, 1, 4}, | ||
381 | "Olitec USB RNIS"}), | ||
382 | }, | ||
383 | { | ||
384 | USB_DEVICE(0x07fa, 0x0846), | ||
385 | .driver_info = (unsigned long) &((struct hfcsusb_vdata) | ||
386 | {LED_SCHEME1, {0x80, -64, -32, -16}, | ||
387 | "Bewan Modem RNIS USB"}), | ||
388 | }, | ||
389 | { | ||
390 | USB_DEVICE(0x07fa, 0x0847), | ||
391 | .driver_info = (unsigned long) &((struct hfcsusb_vdata) | ||
392 | {LED_SCHEME1, {0x80, -64, -32, -16}, | ||
393 | "Djinn Numeris USB"}), | ||
394 | }, | ||
395 | { | ||
396 | USB_DEVICE(0x07b0, 0x0006), | ||
397 | .driver_info = (unsigned long) &((struct hfcsusb_vdata) | ||
398 | {LED_SCHEME1, {0x80, -64, -32, -16}, | ||
399 | "Twister ISDN TA"}), | ||
400 | }, | ||
401 | { | ||
402 | USB_DEVICE(0x071d, 0x1005), | ||
403 | .driver_info = (unsigned long) &((struct hfcsusb_vdata) | ||
404 | {LED_SCHEME1, {0x02, 0, 0x01, 0x04}, | ||
405 | "Eicon DIVA USB 4.0"}), | ||
406 | }, | ||
407 | { | ||
408 | USB_DEVICE(0x0586, 0x0102), | ||
409 | .driver_info = (unsigned long) &((struct hfcsusb_vdata) | ||
410 | {LED_SCHEME1, {0x88, -64, -32, -16}, | ||
411 | "ZyXEL OMNI.NET USB II"}), | ||
412 | }, | ||
413 | { } | ||
414 | }; | ||
415 | |||
416 | MODULE_DEVICE_TABLE(usb, hfcsusb_idtab); | ||
417 | |||
418 | #endif /* __HFCSUSB_H__ */ | ||
diff --git a/drivers/isdn/mISDN/Makefile b/drivers/isdn/mISDN/Makefile index 1cb5e633cf75..0a6bd2a9e730 100644 --- a/drivers/isdn/mISDN/Makefile +++ b/drivers/isdn/mISDN/Makefile | |||
@@ -8,6 +8,6 @@ obj-$(CONFIG_MISDN_L1OIP) += l1oip.o | |||
8 | 8 | ||
9 | # multi objects | 9 | # multi objects |
10 | 10 | ||
11 | mISDN_core-objs := core.o fsm.o socket.o hwchannel.o stack.o layer1.o layer2.o tei.o timerdev.o | 11 | mISDN_core-objs := core.o fsm.o socket.o clock.o hwchannel.o stack.o layer1.o layer2.o tei.o timerdev.o |
12 | mISDN_dsp-objs := dsp_core.o dsp_cmx.o dsp_tones.o dsp_dtmf.o dsp_audio.o dsp_blowfish.o dsp_pipeline.o dsp_hwec.o | 12 | mISDN_dsp-objs := dsp_core.o dsp_cmx.o dsp_tones.o dsp_dtmf.o dsp_audio.o dsp_blowfish.o dsp_pipeline.o dsp_hwec.o |
13 | l1oip-objs := l1oip_core.o l1oip_codec.o | 13 | l1oip-objs := l1oip_core.o l1oip_codec.o |
diff --git a/drivers/isdn/mISDN/clock.c b/drivers/isdn/mISDN/clock.c new file mode 100644 index 000000000000..44d9c3d5d33d --- /dev/null +++ b/drivers/isdn/mISDN/clock.c | |||
@@ -0,0 +1,216 @@ | |||
1 | /* | ||
2 | * Copyright 2008 by Andreas Eversberg <andreas@eversberg.eu> | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * Quick API description: | ||
14 | * | ||
15 | * A clock source registers using mISDN_register_clock: | ||
16 | * name = text string to name clock source | ||
17 | * priority = value to priorize clock sources (0 = default) | ||
18 | * ctl = callback function to enable/disable clock source | ||
19 | * priv = private pointer of clock source | ||
20 | * return = pointer to clock source structure; | ||
21 | * | ||
22 | * Note: Callback 'ctl' can be called before mISDN_register_clock returns! | ||
23 | * Also it can be called during mISDN_unregister_clock. | ||
24 | * | ||
25 | * A clock source calls mISDN_clock_update with given samples elapsed, if | ||
26 | * enabled. If function call is delayed, tv must be set with the timestamp | ||
27 | * of the actual event. | ||
28 | * | ||
29 | * A clock source unregisters using mISDN_unregister_clock. | ||
30 | * | ||
31 | * To get current clock, call mISDN_clock_get. The signed short value | ||
32 | * counts the number of samples since. Time since last clock event is added. | ||
33 | * | ||
34 | */ | ||
35 | |||
36 | #include <linux/types.h> | ||
37 | #include <linux/stddef.h> | ||
38 | #include <linux/spinlock.h> | ||
39 | #include <linux/mISDNif.h> | ||
40 | #include "core.h" | ||
41 | |||
42 | static u_int *debug; | ||
43 | static LIST_HEAD(iclock_list); | ||
44 | DEFINE_RWLOCK(iclock_lock); | ||
45 | u16 iclock_count; /* counter of last clock */ | ||
46 | struct timeval iclock_tv; /* time stamp of last clock */ | ||
47 | int iclock_tv_valid; /* already received one timestamp */ | ||
48 | struct mISDNclock *iclock_current; | ||
49 | |||
50 | void | ||
51 | mISDN_init_clock(u_int *dp) | ||
52 | { | ||
53 | debug = dp; | ||
54 | do_gettimeofday(&iclock_tv); | ||
55 | } | ||
56 | |||
57 | static void | ||
58 | select_iclock(void) | ||
59 | { | ||
60 | struct mISDNclock *iclock, *bestclock = NULL, *lastclock = NULL; | ||
61 | int pri = -128; | ||
62 | |||
63 | list_for_each_entry(iclock, &iclock_list, list) { | ||
64 | if (iclock->pri > pri) { | ||
65 | pri = iclock->pri; | ||
66 | bestclock = iclock; | ||
67 | } | ||
68 | if (iclock_current == iclock) | ||
69 | lastclock = iclock; | ||
70 | } | ||
71 | if (lastclock && bestclock != lastclock) { | ||
72 | /* last used clock source still exists but changes, disable */ | ||
73 | if (*debug & DEBUG_CLOCK) | ||
74 | printk(KERN_DEBUG "Old clock source '%s' disable.\n", | ||
75 | lastclock->name); | ||
76 | lastclock->ctl(lastclock->priv, 0); | ||
77 | } | ||
78 | if (bestclock && bestclock != iclock_current) { | ||
79 | /* new clock source selected, enable */ | ||
80 | if (*debug & DEBUG_CLOCK) | ||
81 | printk(KERN_DEBUG "New clock source '%s' enable.\n", | ||
82 | bestclock->name); | ||
83 | bestclock->ctl(bestclock->priv, 1); | ||
84 | } | ||
85 | if (bestclock != iclock_current) { | ||
86 | /* no clock received yet */ | ||
87 | iclock_tv_valid = 0; | ||
88 | } | ||
89 | iclock_current = bestclock; | ||
90 | } | ||
91 | |||
92 | struct mISDNclock | ||
93 | *mISDN_register_clock(char *name, int pri, clockctl_func_t *ctl, void *priv) | ||
94 | { | ||
95 | u_long flags; | ||
96 | struct mISDNclock *iclock; | ||
97 | |||
98 | if (*debug & (DEBUG_CORE | DEBUG_CLOCK)) | ||
99 | printk(KERN_DEBUG "%s: %s %d\n", __func__, name, pri); | ||
100 | iclock = kzalloc(sizeof(struct mISDNclock), GFP_ATOMIC); | ||
101 | if (!iclock) { | ||
102 | printk(KERN_ERR "%s: No memory for clock entry.\n", __func__); | ||
103 | return NULL; | ||
104 | } | ||
105 | strncpy(iclock->name, name, sizeof(iclock->name)-1); | ||
106 | iclock->pri = pri; | ||
107 | iclock->priv = priv; | ||
108 | iclock->ctl = ctl; | ||
109 | write_lock_irqsave(&iclock_lock, flags); | ||
110 | list_add_tail(&iclock->list, &iclock_list); | ||
111 | select_iclock(); | ||
112 | write_unlock_irqrestore(&iclock_lock, flags); | ||
113 | return iclock; | ||
114 | } | ||
115 | EXPORT_SYMBOL(mISDN_register_clock); | ||
116 | |||
117 | void | ||
118 | mISDN_unregister_clock(struct mISDNclock *iclock) | ||
119 | { | ||
120 | u_long flags; | ||
121 | |||
122 | if (*debug & (DEBUG_CORE | DEBUG_CLOCK)) | ||
123 | printk(KERN_DEBUG "%s: %s %d\n", __func__, iclock->name, | ||
124 | iclock->pri); | ||
125 | write_lock_irqsave(&iclock_lock, flags); | ||
126 | if (iclock_current == iclock) { | ||
127 | if (*debug & DEBUG_CLOCK) | ||
128 | printk(KERN_DEBUG | ||
129 | "Current clock source '%s' unregisters.\n", | ||
130 | iclock->name); | ||
131 | iclock->ctl(iclock->priv, 0); | ||
132 | } | ||
133 | list_del(&iclock->list); | ||
134 | select_iclock(); | ||
135 | write_unlock_irqrestore(&iclock_lock, flags); | ||
136 | } | ||
137 | EXPORT_SYMBOL(mISDN_unregister_clock); | ||
138 | |||
139 | void | ||
140 | mISDN_clock_update(struct mISDNclock *iclock, int samples, struct timeval *tv) | ||
141 | { | ||
142 | u_long flags; | ||
143 | struct timeval tv_now; | ||
144 | time_t elapsed_sec; | ||
145 | int elapsed_8000th; | ||
146 | |||
147 | write_lock_irqsave(&iclock_lock, flags); | ||
148 | if (iclock_current != iclock) { | ||
149 | printk(KERN_ERR "%s: '%s' sends us clock updates, but we do " | ||
150 | "listen to '%s'. This is a bug!\n", __func__, | ||
151 | iclock->name, | ||
152 | iclock_current ? iclock_current->name : "nothing"); | ||
153 | iclock->ctl(iclock->priv, 0); | ||
154 | write_unlock_irqrestore(&iclock_lock, flags); | ||
155 | return; | ||
156 | } | ||
157 | if (iclock_tv_valid) { | ||
158 | /* increment sample counter by given samples */ | ||
159 | iclock_count += samples; | ||
160 | if (tv) { /* tv must be set, if function call is delayed */ | ||
161 | iclock_tv.tv_sec = tv->tv_sec; | ||
162 | iclock_tv.tv_usec = tv->tv_usec; | ||
163 | } else | ||
164 | do_gettimeofday(&iclock_tv); | ||
165 | } else { | ||
166 | /* calc elapsed time by system clock */ | ||
167 | if (tv) { /* tv must be set, if function call is delayed */ | ||
168 | tv_now.tv_sec = tv->tv_sec; | ||
169 | tv_now.tv_usec = tv->tv_usec; | ||
170 | } else | ||
171 | do_gettimeofday(&tv_now); | ||
172 | elapsed_sec = tv_now.tv_sec - iclock_tv.tv_sec; | ||
173 | elapsed_8000th = (tv_now.tv_usec / 125) | ||
174 | - (iclock_tv.tv_usec / 125); | ||
175 | if (elapsed_8000th < 0) { | ||
176 | elapsed_sec -= 1; | ||
177 | elapsed_8000th += 8000; | ||
178 | } | ||
179 | /* add elapsed time to counter and set new timestamp */ | ||
180 | iclock_count += elapsed_sec * 8000 + elapsed_8000th; | ||
181 | iclock_tv.tv_sec = tv_now.tv_sec; | ||
182 | iclock_tv.tv_usec = tv_now.tv_usec; | ||
183 | iclock_tv_valid = 1; | ||
184 | if (*debug & DEBUG_CLOCK) | ||
185 | printk("Received first clock from source '%s'.\n", | ||
186 | iclock_current ? iclock_current->name : "nothing"); | ||
187 | } | ||
188 | write_unlock_irqrestore(&iclock_lock, flags); | ||
189 | } | ||
190 | EXPORT_SYMBOL(mISDN_clock_update); | ||
191 | |||
192 | unsigned short | ||
193 | mISDN_clock_get(void) | ||
194 | { | ||
195 | u_long flags; | ||
196 | struct timeval tv_now; | ||
197 | time_t elapsed_sec; | ||
198 | int elapsed_8000th; | ||
199 | u16 count; | ||
200 | |||
201 | read_lock_irqsave(&iclock_lock, flags); | ||
202 | /* calc elapsed time by system clock */ | ||
203 | do_gettimeofday(&tv_now); | ||
204 | elapsed_sec = tv_now.tv_sec - iclock_tv.tv_sec; | ||
205 | elapsed_8000th = (tv_now.tv_usec / 125) - (iclock_tv.tv_usec / 125); | ||
206 | if (elapsed_8000th < 0) { | ||
207 | elapsed_sec -= 1; | ||
208 | elapsed_8000th += 8000; | ||
209 | } | ||
210 | /* add elapsed time to counter */ | ||
211 | count = iclock_count + elapsed_sec * 8000 + elapsed_8000th; | ||
212 | read_unlock_irqrestore(&iclock_lock, flags); | ||
213 | return count; | ||
214 | } | ||
215 | EXPORT_SYMBOL(mISDN_clock_get); | ||
216 | |||
diff --git a/drivers/isdn/mISDN/core.c b/drivers/isdn/mISDN/core.c index 751665c448d0..9426c9827e47 100644 --- a/drivers/isdn/mISDN/core.c +++ b/drivers/isdn/mISDN/core.c | |||
@@ -25,39 +25,183 @@ MODULE_AUTHOR("Karsten Keil"); | |||
25 | MODULE_LICENSE("GPL"); | 25 | MODULE_LICENSE("GPL"); |
26 | module_param(debug, uint, S_IRUGO | S_IWUSR); | 26 | module_param(debug, uint, S_IRUGO | S_IWUSR); |
27 | 27 | ||
28 | static LIST_HEAD(devices); | ||
29 | static DEFINE_RWLOCK(device_lock); | ||
30 | static u64 device_ids; | 28 | static u64 device_ids; |
31 | #define MAX_DEVICE_ID 63 | 29 | #define MAX_DEVICE_ID 63 |
32 | 30 | ||
33 | static LIST_HEAD(Bprotocols); | 31 | static LIST_HEAD(Bprotocols); |
34 | static DEFINE_RWLOCK(bp_lock); | 32 | static DEFINE_RWLOCK(bp_lock); |
35 | 33 | ||
34 | static void mISDN_dev_release(struct device *dev) | ||
35 | { | ||
36 | /* nothing to do: the device is part of its parent's data structure */ | ||
37 | } | ||
38 | |||
39 | static ssize_t _show_id(struct device *dev, | ||
40 | struct device_attribute *attr, char *buf) | ||
41 | { | ||
42 | struct mISDNdevice *mdev = dev_to_mISDN(dev); | ||
43 | |||
44 | if (!mdev) | ||
45 | return -ENODEV; | ||
46 | return sprintf(buf, "%d\n", mdev->id); | ||
47 | } | ||
48 | |||
49 | static ssize_t _show_nrbchan(struct device *dev, | ||
50 | struct device_attribute *attr, char *buf) | ||
51 | { | ||
52 | struct mISDNdevice *mdev = dev_to_mISDN(dev); | ||
53 | |||
54 | if (!mdev) | ||
55 | return -ENODEV; | ||
56 | return sprintf(buf, "%d\n", mdev->nrbchan); | ||
57 | } | ||
58 | |||
59 | static ssize_t _show_d_protocols(struct device *dev, | ||
60 | struct device_attribute *attr, char *buf) | ||
61 | { | ||
62 | struct mISDNdevice *mdev = dev_to_mISDN(dev); | ||
63 | |||
64 | if (!mdev) | ||
65 | return -ENODEV; | ||
66 | return sprintf(buf, "%d\n", mdev->Dprotocols); | ||
67 | } | ||
68 | |||
69 | static ssize_t _show_b_protocols(struct device *dev, | ||
70 | struct device_attribute *attr, char *buf) | ||
71 | { | ||
72 | struct mISDNdevice *mdev = dev_to_mISDN(dev); | ||
73 | |||
74 | if (!mdev) | ||
75 | return -ENODEV; | ||
76 | return sprintf(buf, "%d\n", mdev->Bprotocols | get_all_Bprotocols()); | ||
77 | } | ||
78 | |||
79 | static ssize_t _show_protocol(struct device *dev, | ||
80 | struct device_attribute *attr, char *buf) | ||
81 | { | ||
82 | struct mISDNdevice *mdev = dev_to_mISDN(dev); | ||
83 | |||
84 | if (!mdev) | ||
85 | return -ENODEV; | ||
86 | return sprintf(buf, "%d\n", mdev->D.protocol); | ||
87 | } | ||
88 | |||
89 | static ssize_t _show_name(struct device *dev, | ||
90 | struct device_attribute *attr, char *buf) | ||
91 | { | ||
92 | strcpy(buf, dev_name(dev)); | ||
93 | return strlen(buf); | ||
94 | } | ||
95 | |||
96 | #if 0 /* hangs */ | ||
97 | static ssize_t _set_name(struct device *dev, struct device_attribute *attr, | ||
98 | const char *buf, size_t count) | ||
99 | { | ||
100 | int err = 0; | ||
101 | char *out = kmalloc(count + 1, GFP_KERNEL); | ||
102 | |||
103 | if (!out) | ||
104 | return -ENOMEM; | ||
105 | |||
106 | memcpy(out, buf, count); | ||
107 | if (count && out[count - 1] == '\n') | ||
108 | out[--count] = 0; | ||
109 | if (count) | ||
110 | err = device_rename(dev, out); | ||
111 | kfree(out); | ||
112 | |||
113 | return (err < 0) ? err : count; | ||
114 | } | ||
115 | #endif | ||
116 | |||
117 | static ssize_t _show_channelmap(struct device *dev, | ||
118 | struct device_attribute *attr, char *buf) | ||
119 | { | ||
120 | struct mISDNdevice *mdev = dev_to_mISDN(dev); | ||
121 | char *bp = buf; | ||
122 | int i; | ||
123 | |||
124 | for (i = 0; i <= mdev->nrbchan; i++) | ||
125 | *bp++ = test_channelmap(i, mdev->channelmap) ? '1' : '0'; | ||
126 | |||
127 | return bp - buf; | ||
128 | } | ||
129 | |||
130 | static struct device_attribute mISDN_dev_attrs[] = { | ||
131 | __ATTR(id, S_IRUGO, _show_id, NULL), | ||
132 | __ATTR(d_protocols, S_IRUGO, _show_d_protocols, NULL), | ||
133 | __ATTR(b_protocols, S_IRUGO, _show_b_protocols, NULL), | ||
134 | __ATTR(protocol, S_IRUGO, _show_protocol, NULL), | ||
135 | __ATTR(channelmap, S_IRUGO, _show_channelmap, NULL), | ||
136 | __ATTR(nrbchan, S_IRUGO, _show_nrbchan, NULL), | ||
137 | __ATTR(name, S_IRUGO, _show_name, NULL), | ||
138 | /* __ATTR(name, S_IRUGO|S_IWUSR, _show_name, _set_name), */ | ||
139 | {} | ||
140 | }; | ||
141 | |||
142 | #ifdef CONFIG_HOTPLUG | ||
143 | static int mISDN_uevent(struct device *dev, struct kobj_uevent_env *env) | ||
144 | { | ||
145 | struct mISDNdevice *mdev = dev_to_mISDN(dev); | ||
146 | |||
147 | if (!mdev) | ||
148 | return 0; | ||
149 | |||
150 | if (add_uevent_var(env, "nchans=%d", mdev->nrbchan)) | ||
151 | return -ENOMEM; | ||
152 | |||
153 | return 0; | ||
154 | } | ||
155 | #endif | ||
156 | |||
157 | static void mISDN_class_release(struct class *cls) | ||
158 | { | ||
159 | /* do nothing, it's static */ | ||
160 | } | ||
161 | |||
162 | static struct class mISDN_class = { | ||
163 | .name = "mISDN", | ||
164 | .owner = THIS_MODULE, | ||
165 | #ifdef CONFIG_HOTPLUG | ||
166 | .dev_uevent = mISDN_uevent, | ||
167 | #endif | ||
168 | .dev_attrs = mISDN_dev_attrs, | ||
169 | .dev_release = mISDN_dev_release, | ||
170 | .class_release = mISDN_class_release, | ||
171 | }; | ||
172 | |||
173 | static int | ||
174 | _get_mdevice(struct device *dev, void *id) | ||
175 | { | ||
176 | struct mISDNdevice *mdev = dev_to_mISDN(dev); | ||
177 | |||
178 | if (!mdev) | ||
179 | return 0; | ||
180 | if (mdev->id != *(u_int *)id) | ||
181 | return 0; | ||
182 | return 1; | ||
183 | } | ||
184 | |||
36 | struct mISDNdevice | 185 | struct mISDNdevice |
37 | *get_mdevice(u_int id) | 186 | *get_mdevice(u_int id) |
38 | { | 187 | { |
39 | struct mISDNdevice *dev; | 188 | return dev_to_mISDN(class_find_device(&mISDN_class, NULL, &id, |
189 | _get_mdevice)); | ||
190 | } | ||
40 | 191 | ||
41 | read_lock(&device_lock); | 192 | static int |
42 | list_for_each_entry(dev, &devices, D.list) | 193 | _get_mdevice_count(struct device *dev, void *cnt) |
43 | if (dev->id == id) { | 194 | { |
44 | read_unlock(&device_lock); | 195 | *(int *)cnt += 1; |
45 | return dev; | 196 | return 0; |
46 | } | ||
47 | read_unlock(&device_lock); | ||
48 | return NULL; | ||
49 | } | 197 | } |
50 | 198 | ||
51 | int | 199 | int |
52 | get_mdevice_count(void) | 200 | get_mdevice_count(void) |
53 | { | 201 | { |
54 | struct mISDNdevice *dev; | 202 | int cnt = 0; |
55 | int cnt = 0; | ||
56 | 203 | ||
57 | read_lock(&device_lock); | 204 | class_for_each_device(&mISDN_class, NULL, &cnt, _get_mdevice_count); |
58 | list_for_each_entry(dev, &devices, D.list) | ||
59 | cnt++; | ||
60 | read_unlock(&device_lock); | ||
61 | return cnt; | 205 | return cnt; |
62 | } | 206 | } |
63 | 207 | ||
@@ -68,48 +212,66 @@ get_free_devid(void) | |||
68 | 212 | ||
69 | for (i = 0; i <= MAX_DEVICE_ID; i++) | 213 | for (i = 0; i <= MAX_DEVICE_ID; i++) |
70 | if (!test_and_set_bit(i, (u_long *)&device_ids)) | 214 | if (!test_and_set_bit(i, (u_long *)&device_ids)) |
71 | return i; | 215 | break; |
72 | return -1; | 216 | if (i > MAX_DEVICE_ID) |
217 | return -1; | ||
218 | return i; | ||
73 | } | 219 | } |
74 | 220 | ||
75 | int | 221 | int |
76 | mISDN_register_device(struct mISDNdevice *dev, char *name) | 222 | mISDN_register_device(struct mISDNdevice *dev, |
223 | struct device *parent, char *name) | ||
77 | { | 224 | { |
78 | u_long flags; | ||
79 | int err; | 225 | int err; |
80 | 226 | ||
81 | dev->id = get_free_devid(); | 227 | dev->id = get_free_devid(); |
228 | err = -EBUSY; | ||
82 | if (dev->id < 0) | 229 | if (dev->id < 0) |
83 | return -EBUSY; | 230 | goto error1; |
231 | |||
232 | device_initialize(&dev->dev); | ||
84 | if (name && name[0]) | 233 | if (name && name[0]) |
85 | strcpy(dev->name, name); | 234 | dev_set_name(&dev->dev, "%s", name); |
86 | else | 235 | else |
87 | sprintf(dev->name, "mISDN%d", dev->id); | 236 | dev_set_name(&dev->dev, "mISDN%d", dev->id); |
88 | if (debug & DEBUG_CORE) | 237 | if (debug & DEBUG_CORE) |
89 | printk(KERN_DEBUG "mISDN_register %s %d\n", | 238 | printk(KERN_DEBUG "mISDN_register %s %d\n", |
90 | dev->name, dev->id); | 239 | dev_name(&dev->dev), dev->id); |
91 | err = create_stack(dev); | 240 | err = create_stack(dev); |
92 | if (err) | 241 | if (err) |
93 | return err; | 242 | goto error1; |
94 | write_lock_irqsave(&device_lock, flags); | 243 | |
95 | list_add_tail(&dev->D.list, &devices); | 244 | dev->dev.class = &mISDN_class; |
96 | write_unlock_irqrestore(&device_lock, flags); | 245 | dev->dev.platform_data = dev; |
246 | dev->dev.parent = parent; | ||
247 | dev_set_drvdata(&dev->dev, dev); | ||
248 | |||
249 | err = device_add(&dev->dev); | ||
250 | if (err) | ||
251 | goto error3; | ||
97 | return 0; | 252 | return 0; |
253 | |||
254 | error3: | ||
255 | delete_stack(dev); | ||
256 | return err; | ||
257 | error1: | ||
258 | return err; | ||
259 | |||
98 | } | 260 | } |
99 | EXPORT_SYMBOL(mISDN_register_device); | 261 | EXPORT_SYMBOL(mISDN_register_device); |
100 | 262 | ||
101 | void | 263 | void |
102 | mISDN_unregister_device(struct mISDNdevice *dev) { | 264 | mISDN_unregister_device(struct mISDNdevice *dev) { |
103 | u_long flags; | ||
104 | |||
105 | if (debug & DEBUG_CORE) | 265 | if (debug & DEBUG_CORE) |
106 | printk(KERN_DEBUG "mISDN_unregister %s %d\n", | 266 | printk(KERN_DEBUG "mISDN_unregister %s %d\n", |
107 | dev->name, dev->id); | 267 | dev_name(&dev->dev), dev->id); |
108 | write_lock_irqsave(&device_lock, flags); | 268 | /* sysfs_remove_link(&dev->dev.kobj, "device"); */ |
109 | list_del(&dev->D.list); | 269 | device_del(&dev->dev); |
110 | write_unlock_irqrestore(&device_lock, flags); | 270 | dev_set_drvdata(&dev->dev, NULL); |
271 | |||
111 | test_and_clear_bit(dev->id, (u_long *)&device_ids); | 272 | test_and_clear_bit(dev->id, (u_long *)&device_ids); |
112 | delete_stack(dev); | 273 | delete_stack(dev); |
274 | put_device(&dev->dev); | ||
113 | } | 275 | } |
114 | EXPORT_SYMBOL(mISDN_unregister_device); | 276 | EXPORT_SYMBOL(mISDN_unregister_device); |
115 | 277 | ||
@@ -199,43 +361,45 @@ mISDNInit(void) | |||
199 | 361 | ||
200 | printk(KERN_INFO "Modular ISDN core version %d.%d.%d\n", | 362 | printk(KERN_INFO "Modular ISDN core version %d.%d.%d\n", |
201 | MISDN_MAJOR_VERSION, MISDN_MINOR_VERSION, MISDN_RELEASE); | 363 | MISDN_MAJOR_VERSION, MISDN_MINOR_VERSION, MISDN_RELEASE); |
364 | mISDN_init_clock(&debug); | ||
202 | mISDN_initstack(&debug); | 365 | mISDN_initstack(&debug); |
366 | err = class_register(&mISDN_class); | ||
367 | if (err) | ||
368 | goto error1; | ||
203 | err = mISDN_inittimer(&debug); | 369 | err = mISDN_inittimer(&debug); |
204 | if (err) | 370 | if (err) |
205 | goto error; | 371 | goto error2; |
206 | err = l1_init(&debug); | 372 | err = l1_init(&debug); |
207 | if (err) { | 373 | if (err) |
208 | mISDN_timer_cleanup(); | 374 | goto error3; |
209 | goto error; | ||
210 | } | ||
211 | err = Isdnl2_Init(&debug); | 375 | err = Isdnl2_Init(&debug); |
212 | if (err) { | 376 | if (err) |
213 | mISDN_timer_cleanup(); | 377 | goto error4; |
214 | l1_cleanup(); | ||
215 | goto error; | ||
216 | } | ||
217 | err = misdn_sock_init(&debug); | 378 | err = misdn_sock_init(&debug); |
218 | if (err) { | 379 | if (err) |
219 | mISDN_timer_cleanup(); | 380 | goto error5; |
220 | l1_cleanup(); | 381 | return 0; |
221 | Isdnl2_cleanup(); | 382 | |
222 | } | 383 | error5: |
223 | error: | 384 | Isdnl2_cleanup(); |
385 | error4: | ||
386 | l1_cleanup(); | ||
387 | error3: | ||
388 | mISDN_timer_cleanup(); | ||
389 | error2: | ||
390 | class_unregister(&mISDN_class); | ||
391 | error1: | ||
224 | return err; | 392 | return err; |
225 | } | 393 | } |
226 | 394 | ||
227 | static void mISDN_cleanup(void) | 395 | static void mISDN_cleanup(void) |
228 | { | 396 | { |
229 | misdn_sock_cleanup(); | 397 | misdn_sock_cleanup(); |
230 | mISDN_timer_cleanup(); | ||
231 | l1_cleanup(); | ||
232 | Isdnl2_cleanup(); | 398 | Isdnl2_cleanup(); |
399 | l1_cleanup(); | ||
400 | mISDN_timer_cleanup(); | ||
401 | class_unregister(&mISDN_class); | ||
233 | 402 | ||
234 | if (!list_empty(&devices)) | ||
235 | printk(KERN_ERR "%s devices still registered\n", __func__); | ||
236 | |||
237 | if (!list_empty(&Bprotocols)) | ||
238 | printk(KERN_ERR "%s Bprotocols still registered\n", __func__); | ||
239 | printk(KERN_DEBUG "mISDNcore unloaded\n"); | 403 | printk(KERN_DEBUG "mISDNcore unloaded\n"); |
240 | } | 404 | } |
241 | 405 | ||
diff --git a/drivers/isdn/mISDN/core.h b/drivers/isdn/mISDN/core.h index 7da7233b4c1a..7ac2f81a812b 100644 --- a/drivers/isdn/mISDN/core.h +++ b/drivers/isdn/mISDN/core.h | |||
@@ -74,4 +74,6 @@ extern void l1_cleanup(void); | |||
74 | extern int Isdnl2_Init(u_int *); | 74 | extern int Isdnl2_Init(u_int *); |
75 | extern void Isdnl2_cleanup(void); | 75 | extern void Isdnl2_cleanup(void); |
76 | 76 | ||
77 | extern void mISDN_init_clock(u_int *); | ||
78 | |||
77 | #endif | 79 | #endif |
diff --git a/drivers/isdn/mISDN/dsp.h b/drivers/isdn/mISDN/dsp.h index 6c3fed6b8d4f..98a33c58f091 100644 --- a/drivers/isdn/mISDN/dsp.h +++ b/drivers/isdn/mISDN/dsp.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #define DEBUG_DSP_TONE 0x0020 | 15 | #define DEBUG_DSP_TONE 0x0020 |
16 | #define DEBUG_DSP_BLOWFISH 0x0040 | 16 | #define DEBUG_DSP_BLOWFISH 0x0040 |
17 | #define DEBUG_DSP_DELAY 0x0100 | 17 | #define DEBUG_DSP_DELAY 0x0100 |
18 | #define DEBUG_DSP_CLOCK 0x0200 | ||
18 | #define DEBUG_DSP_DTMFCOEFF 0x8000 /* heavy output */ | 19 | #define DEBUG_DSP_DTMFCOEFF 0x8000 /* heavy output */ |
19 | 20 | ||
20 | /* options may be: | 21 | /* options may be: |
@@ -198,6 +199,7 @@ struct dsp { | |||
198 | /* hardware stuff */ | 199 | /* hardware stuff */ |
199 | struct dsp_features features; | 200 | struct dsp_features features; |
200 | int features_rx_off; /* set if rx_off is featured */ | 201 | int features_rx_off; /* set if rx_off is featured */ |
202 | int features_fill_empty; /* set if fill_empty is featured */ | ||
201 | int pcm_slot_rx; /* current PCM slot (or -1) */ | 203 | int pcm_slot_rx; /* current PCM slot (or -1) */ |
202 | int pcm_bank_rx; | 204 | int pcm_bank_rx; |
203 | int pcm_slot_tx; | 205 | int pcm_slot_tx; |
diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c index c884511e2d49..0ac67bff303a 100644 --- a/drivers/isdn/mISDN/dsp_cmx.c +++ b/drivers/isdn/mISDN/dsp_cmx.c | |||
@@ -137,6 +137,7 @@ | |||
137 | /* #define CMX_CONF_DEBUG */ | 137 | /* #define CMX_CONF_DEBUG */ |
138 | 138 | ||
139 | /*#define CMX_DEBUG * massive read/write pointer output */ | 139 | /*#define CMX_DEBUG * massive read/write pointer output */ |
140 | /*#define CMX_DELAY_DEBUG * gives rx-buffer delay overview */ | ||
140 | /*#define CMX_TX_DEBUG * massive read/write on tx-buffer with content */ | 141 | /*#define CMX_TX_DEBUG * massive read/write on tx-buffer with content */ |
141 | 142 | ||
142 | static inline int | 143 | static inline int |
@@ -744,11 +745,11 @@ conf_software: | |||
744 | if (dsp->pcm_slot_rx >= 0 && | 745 | if (dsp->pcm_slot_rx >= 0 && |
745 | dsp->pcm_slot_rx < | 746 | dsp->pcm_slot_rx < |
746 | sizeof(freeslots)) | 747 | sizeof(freeslots)) |
747 | freeslots[dsp->pcm_slot_tx] = 0; | 748 | freeslots[dsp->pcm_slot_rx] = 0; |
748 | if (dsp->pcm_slot_tx >= 0 && | 749 | if (dsp->pcm_slot_tx >= 0 && |
749 | dsp->pcm_slot_tx < | 750 | dsp->pcm_slot_tx < |
750 | sizeof(freeslots)) | 751 | sizeof(freeslots)) |
751 | freeslots[dsp->pcm_slot_rx] = 0; | 752 | freeslots[dsp->pcm_slot_tx] = 0; |
752 | } | 753 | } |
753 | } | 754 | } |
754 | i = 0; | 755 | i = 0; |
@@ -836,11 +837,11 @@ conf_software: | |||
836 | if (dsp->pcm_slot_rx >= 0 && | 837 | if (dsp->pcm_slot_rx >= 0 && |
837 | dsp->pcm_slot_rx < | 838 | dsp->pcm_slot_rx < |
838 | sizeof(freeslots)) | 839 | sizeof(freeslots)) |
839 | freeslots[dsp->pcm_slot_tx] = 0; | 840 | freeslots[dsp->pcm_slot_rx] = 0; |
840 | if (dsp->pcm_slot_tx >= 0 && | 841 | if (dsp->pcm_slot_tx >= 0 && |
841 | dsp->pcm_slot_tx < | 842 | dsp->pcm_slot_tx < |
842 | sizeof(freeslots)) | 843 | sizeof(freeslots)) |
843 | freeslots[dsp->pcm_slot_rx] = 0; | 844 | freeslots[dsp->pcm_slot_tx] = 0; |
844 | } | 845 | } |
845 | } | 846 | } |
846 | i1 = 0; | 847 | i1 = 0; |
@@ -926,10 +927,6 @@ conf_software: | |||
926 | 927 | ||
927 | /* for more than two members.. */ | 928 | /* for more than two members.. */ |
928 | 929 | ||
929 | /* in case of hdlc, we change to software */ | ||
930 | if (dsp->hdlc) | ||
931 | goto conf_software; | ||
932 | |||
933 | /* if all members already have the same conference */ | 930 | /* if all members already have the same conference */ |
934 | if (all_conf) | 931 | if (all_conf) |
935 | return; | 932 | return; |
@@ -940,6 +937,9 @@ conf_software: | |||
940 | if (current_conf >= 0) { | 937 | if (current_conf >= 0) { |
941 | join_members: | 938 | join_members: |
942 | list_for_each_entry(member, &conf->mlist, list) { | 939 | list_for_each_entry(member, &conf->mlist, list) { |
940 | /* in case of hdlc, change to software */ | ||
941 | if (member->dsp->hdlc) | ||
942 | goto conf_software; | ||
943 | /* join to current conference */ | 943 | /* join to current conference */ |
944 | if (member->dsp->hfc_conf == current_conf) | 944 | if (member->dsp->hfc_conf == current_conf) |
945 | continue; | 945 | continue; |
@@ -1135,6 +1135,25 @@ dsp_cmx_conf(struct dsp *dsp, u32 conf_id) | |||
1135 | return 0; | 1135 | return 0; |
1136 | } | 1136 | } |
1137 | 1137 | ||
1138 | #ifdef CMX_DELAY_DEBUG | ||
1139 | int delaycount; | ||
1140 | static void | ||
1141 | showdelay(struct dsp *dsp, int samples, int delay) | ||
1142 | { | ||
1143 | char bar[] = "--------------------------------------------------|"; | ||
1144 | int sdelay; | ||
1145 | |||
1146 | delaycount += samples; | ||
1147 | if (delaycount < 8000) | ||
1148 | return; | ||
1149 | delaycount = 0; | ||
1150 | |||
1151 | sdelay = delay * 50 / (dsp_poll << 2); | ||
1152 | |||
1153 | printk(KERN_DEBUG "DELAY (%s) %3d >%s\n", dsp->name, delay, | ||
1154 | sdelay > 50 ? "..." : bar + 50 - sdelay); | ||
1155 | } | ||
1156 | #endif | ||
1138 | 1157 | ||
1139 | /* | 1158 | /* |
1140 | * audio data is received from card | 1159 | * audio data is received from card |
@@ -1168,11 +1187,18 @@ dsp_cmx_receive(struct dsp *dsp, struct sk_buff *skb) | |||
1168 | dsp->rx_init = 0; | 1187 | dsp->rx_init = 0; |
1169 | if (dsp->features.unordered) { | 1188 | if (dsp->features.unordered) { |
1170 | dsp->rx_R = (hh->id & CMX_BUFF_MASK); | 1189 | dsp->rx_R = (hh->id & CMX_BUFF_MASK); |
1171 | dsp->rx_W = (dsp->rx_R + dsp->cmx_delay) | 1190 | if (dsp->cmx_delay) |
1172 | & CMX_BUFF_MASK; | 1191 | dsp->rx_W = (dsp->rx_R + dsp->cmx_delay) |
1192 | & CMX_BUFF_MASK; | ||
1193 | else | ||
1194 | dsp->rx_W = (dsp->rx_R + (dsp_poll >> 1)) | ||
1195 | & CMX_BUFF_MASK; | ||
1173 | } else { | 1196 | } else { |
1174 | dsp->rx_R = 0; | 1197 | dsp->rx_R = 0; |
1175 | dsp->rx_W = dsp->cmx_delay; | 1198 | if (dsp->cmx_delay) |
1199 | dsp->rx_W = dsp->cmx_delay; | ||
1200 | else | ||
1201 | dsp->rx_W = dsp_poll >> 1; | ||
1176 | } | 1202 | } |
1177 | } | 1203 | } |
1178 | /* if frame contains time code, write directly */ | 1204 | /* if frame contains time code, write directly */ |
@@ -1185,19 +1211,25 @@ dsp_cmx_receive(struct dsp *dsp, struct sk_buff *skb) | |||
1185 | * we set our new read pointer, and write silence to buffer | 1211 | * we set our new read pointer, and write silence to buffer |
1186 | */ | 1212 | */ |
1187 | if (((dsp->rx_W-dsp->rx_R) & CMX_BUFF_MASK) >= CMX_BUFF_HALF) { | 1213 | if (((dsp->rx_W-dsp->rx_R) & CMX_BUFF_MASK) >= CMX_BUFF_HALF) { |
1188 | if (dsp_debug & DEBUG_DSP_CMX) | 1214 | if (dsp_debug & DEBUG_DSP_CLOCK) |
1189 | printk(KERN_DEBUG | 1215 | printk(KERN_DEBUG |
1190 | "cmx_receive(dsp=%lx): UNDERRUN (or overrun the " | 1216 | "cmx_receive(dsp=%lx): UNDERRUN (or overrun the " |
1191 | "maximum delay), adjusting read pointer! " | 1217 | "maximum delay), adjusting read pointer! " |
1192 | "(inst %s)\n", (u_long)dsp, dsp->name); | 1218 | "(inst %s)\n", (u_long)dsp, dsp->name); |
1193 | /* flush buffer */ | 1219 | /* flush rx buffer and set delay to dsp_poll / 2 */ |
1194 | if (dsp->features.unordered) { | 1220 | if (dsp->features.unordered) { |
1195 | dsp->rx_R = (hh->id & CMX_BUFF_MASK); | 1221 | dsp->rx_R = (hh->id & CMX_BUFF_MASK); |
1196 | dsp->rx_W = (dsp->rx_R + dsp->cmx_delay) | 1222 | if (dsp->cmx_delay) |
1197 | & CMX_BUFF_MASK; | 1223 | dsp->rx_W = (dsp->rx_R + dsp->cmx_delay) |
1224 | & CMX_BUFF_MASK; | ||
1225 | dsp->rx_W = (dsp->rx_R + (dsp_poll >> 1)) | ||
1226 | & CMX_BUFF_MASK; | ||
1198 | } else { | 1227 | } else { |
1199 | dsp->rx_R = 0; | 1228 | dsp->rx_R = 0; |
1200 | dsp->rx_W = dsp->cmx_delay; | 1229 | if (dsp->cmx_delay) |
1230 | dsp->rx_W = dsp->cmx_delay; | ||
1231 | else | ||
1232 | dsp->rx_W = dsp_poll >> 1; | ||
1201 | } | 1233 | } |
1202 | memset(dsp->rx_buff, dsp_silence, sizeof(dsp->rx_buff)); | 1234 | memset(dsp->rx_buff, dsp_silence, sizeof(dsp->rx_buff)); |
1203 | } | 1235 | } |
@@ -1205,7 +1237,7 @@ dsp_cmx_receive(struct dsp *dsp, struct sk_buff *skb) | |||
1205 | if (dsp->cmx_delay) | 1237 | if (dsp->cmx_delay) |
1206 | if (((dsp->rx_W - dsp->rx_R) & CMX_BUFF_MASK) >= | 1238 | if (((dsp->rx_W - dsp->rx_R) & CMX_BUFF_MASK) >= |
1207 | (dsp->cmx_delay << 1)) { | 1239 | (dsp->cmx_delay << 1)) { |
1208 | if (dsp_debug & DEBUG_DSP_CMX) | 1240 | if (dsp_debug & DEBUG_DSP_CLOCK) |
1209 | printk(KERN_DEBUG | 1241 | printk(KERN_DEBUG |
1210 | "cmx_receive(dsp=%lx): OVERRUN (because " | 1242 | "cmx_receive(dsp=%lx): OVERRUN (because " |
1211 | "twice the delay is reached), adjusting " | 1243 | "twice the delay is reached), adjusting " |
@@ -1243,6 +1275,9 @@ dsp_cmx_receive(struct dsp *dsp, struct sk_buff *skb) | |||
1243 | 1275 | ||
1244 | /* increase write-pointer */ | 1276 | /* increase write-pointer */ |
1245 | dsp->rx_W = ((dsp->rx_W+len) & CMX_BUFF_MASK); | 1277 | dsp->rx_W = ((dsp->rx_W+len) & CMX_BUFF_MASK); |
1278 | #ifdef CMX_DELAY_DEBUG | ||
1279 | showdelay(dsp, len, (dsp->rx_W-dsp->rx_R) & CMX_BUFF_MASK); | ||
1280 | #endif | ||
1246 | } | 1281 | } |
1247 | 1282 | ||
1248 | 1283 | ||
@@ -1360,8 +1395,12 @@ dsp_cmx_send_member(struct dsp *dsp, int len, s32 *c, int members) | |||
1360 | t = (t+1) & CMX_BUFF_MASK; | 1395 | t = (t+1) & CMX_BUFF_MASK; |
1361 | r = (r+1) & CMX_BUFF_MASK; | 1396 | r = (r+1) & CMX_BUFF_MASK; |
1362 | } | 1397 | } |
1363 | if (r != rr) | 1398 | if (r != rr) { |
1399 | if (dsp_debug & DEBUG_DSP_CLOCK) | ||
1400 | printk(KERN_DEBUG "%s: RX empty\n", | ||
1401 | __func__); | ||
1364 | memset(d, dsp_silence, (rr-r)&CMX_BUFF_MASK); | 1402 | memset(d, dsp_silence, (rr-r)&CMX_BUFF_MASK); |
1403 | } | ||
1365 | /* -> if echo is enabled */ | 1404 | /* -> if echo is enabled */ |
1366 | } else { | 1405 | } else { |
1367 | /* | 1406 | /* |
@@ -1540,13 +1579,11 @@ send_packet: | |||
1540 | schedule_work(&dsp->workq); | 1579 | schedule_work(&dsp->workq); |
1541 | } | 1580 | } |
1542 | 1581 | ||
1543 | static u32 samplecount; | 1582 | static u32 jittercount; /* counter for jitter check */; |
1544 | struct timer_list dsp_spl_tl; | 1583 | struct timer_list dsp_spl_tl; |
1545 | u32 dsp_spl_jiffies; /* calculate the next time to fire */ | 1584 | u32 dsp_spl_jiffies; /* calculate the next time to fire */ |
1546 | #ifdef UNUSED | 1585 | static u16 dsp_count; /* last sample count */ |
1547 | static u32 dsp_start_jiffies; /* jiffies at the time, the calculation begins */ | 1586 | static int dsp_count_valid ; /* if we have last sample count */ |
1548 | #endif /* UNUSED */ | ||
1549 | static struct timeval dsp_start_tv; /* time at start of calculation */ | ||
1550 | 1587 | ||
1551 | void | 1588 | void |
1552 | dsp_cmx_send(void *arg) | 1589 | dsp_cmx_send(void *arg) |
@@ -1560,38 +1597,32 @@ dsp_cmx_send(void *arg) | |||
1560 | int r, rr; | 1597 | int r, rr; |
1561 | int jittercheck = 0, delay, i; | 1598 | int jittercheck = 0, delay, i; |
1562 | u_long flags; | 1599 | u_long flags; |
1563 | struct timeval tv; | 1600 | u16 length, count; |
1564 | u32 elapsed; | ||
1565 | s16 length; | ||
1566 | 1601 | ||
1567 | /* lock */ | 1602 | /* lock */ |
1568 | spin_lock_irqsave(&dsp_lock, flags); | 1603 | spin_lock_irqsave(&dsp_lock, flags); |
1569 | 1604 | ||
1570 | if (!dsp_start_tv.tv_sec) { | 1605 | if (!dsp_count_valid) { |
1571 | do_gettimeofday(&dsp_start_tv); | 1606 | dsp_count = mISDN_clock_get(); |
1572 | length = dsp_poll; | 1607 | length = dsp_poll; |
1608 | dsp_count_valid = 1; | ||
1573 | } else { | 1609 | } else { |
1574 | do_gettimeofday(&tv); | 1610 | count = mISDN_clock_get(); |
1575 | elapsed = ((tv.tv_sec - dsp_start_tv.tv_sec) * 8000) | 1611 | length = count - dsp_count; |
1576 | + ((s32)(tv.tv_usec / 125) - (dsp_start_tv.tv_usec / 125)); | 1612 | dsp_count = count; |
1577 | dsp_start_tv.tv_sec = tv.tv_sec; | ||
1578 | dsp_start_tv.tv_usec = tv.tv_usec; | ||
1579 | length = elapsed; | ||
1580 | } | 1613 | } |
1581 | if (length > MAX_POLL + 100) | 1614 | if (length > MAX_POLL + 100) |
1582 | length = MAX_POLL + 100; | 1615 | length = MAX_POLL + 100; |
1583 | /* printk(KERN_DEBUG "len=%d dsp_count=0x%x.%04x dsp_poll_diff=0x%x.%04x\n", | 1616 | /* printk(KERN_DEBUG "len=%d dsp_count=0x%x\n", length, dsp_count); */ |
1584 | length, dsp_count >> 16, dsp_count & 0xffff, dsp_poll_diff >> 16, | ||
1585 | dsp_poll_diff & 0xffff); | ||
1586 | */ | ||
1587 | 1617 | ||
1588 | /* | 1618 | /* |
1589 | * check if jitter needs to be checked | 1619 | * check if jitter needs to be checked (this is every second) |
1590 | * (this is about every second = 8192 samples) | ||
1591 | */ | 1620 | */ |
1592 | samplecount += length; | 1621 | jittercount += length; |
1593 | if ((samplecount & 8191) < length) | 1622 | if (jittercount >= 8000) { |
1623 | jittercount -= 8000; | ||
1594 | jittercheck = 1; | 1624 | jittercheck = 1; |
1625 | } | ||
1595 | 1626 | ||
1596 | /* loop all members that do not require conference mixing */ | 1627 | /* loop all members that do not require conference mixing */ |
1597 | list_for_each_entry(dsp, &dsp_ilist, list) { | 1628 | list_for_each_entry(dsp, &dsp_ilist, list) { |
@@ -1704,17 +1735,19 @@ dsp_cmx_send(void *arg) | |||
1704 | } | 1735 | } |
1705 | /* | 1736 | /* |
1706 | * remove rx_delay only if we have delay AND we | 1737 | * remove rx_delay only if we have delay AND we |
1707 | * have not preset cmx_delay | 1738 | * have not preset cmx_delay AND |
1739 | * the delay is greater dsp_poll | ||
1708 | */ | 1740 | */ |
1709 | if (delay && !dsp->cmx_delay) { | 1741 | if (delay > dsp_poll && !dsp->cmx_delay) { |
1710 | if (dsp_debug & DEBUG_DSP_CMX) | 1742 | if (dsp_debug & DEBUG_DSP_CLOCK) |
1711 | printk(KERN_DEBUG | 1743 | printk(KERN_DEBUG |
1712 | "%s lowest rx_delay of %d bytes for" | 1744 | "%s lowest rx_delay of %d bytes for" |
1713 | " dsp %s are now removed.\n", | 1745 | " dsp %s are now removed.\n", |
1714 | __func__, delay, | 1746 | __func__, delay, |
1715 | dsp->name); | 1747 | dsp->name); |
1716 | r = dsp->rx_R; | 1748 | r = dsp->rx_R; |
1717 | rr = (r + delay) & CMX_BUFF_MASK; | 1749 | rr = (r + delay - (dsp_poll >> 1)) |
1750 | & CMX_BUFF_MASK; | ||
1718 | /* delete rx-data */ | 1751 | /* delete rx-data */ |
1719 | while (r != rr) { | 1752 | while (r != rr) { |
1720 | p[r] = dsp_silence; | 1753 | p[r] = dsp_silence; |
@@ -1736,15 +1769,16 @@ dsp_cmx_send(void *arg) | |||
1736 | * remove delay only if we have delay AND we | 1769 | * remove delay only if we have delay AND we |
1737 | * have enabled tx_dejitter | 1770 | * have enabled tx_dejitter |
1738 | */ | 1771 | */ |
1739 | if (delay && dsp->tx_dejitter) { | 1772 | if (delay > dsp_poll && dsp->tx_dejitter) { |
1740 | if (dsp_debug & DEBUG_DSP_CMX) | 1773 | if (dsp_debug & DEBUG_DSP_CLOCK) |
1741 | printk(KERN_DEBUG | 1774 | printk(KERN_DEBUG |
1742 | "%s lowest tx_delay of %d bytes for" | 1775 | "%s lowest tx_delay of %d bytes for" |
1743 | " dsp %s are now removed.\n", | 1776 | " dsp %s are now removed.\n", |
1744 | __func__, delay, | 1777 | __func__, delay, |
1745 | dsp->name); | 1778 | dsp->name); |
1746 | r = dsp->tx_R; | 1779 | r = dsp->tx_R; |
1747 | rr = (r + delay) & CMX_BUFF_MASK; | 1780 | rr = (r + delay - (dsp_poll >> 1)) |
1781 | & CMX_BUFF_MASK; | ||
1748 | /* delete tx-data */ | 1782 | /* delete tx-data */ |
1749 | while (r != rr) { | 1783 | while (r != rr) { |
1750 | q[r] = dsp_silence; | 1784 | q[r] = dsp_silence; |
@@ -1797,14 +1831,16 @@ dsp_cmx_transmit(struct dsp *dsp, struct sk_buff *skb) | |||
1797 | ww = dsp->tx_R; | 1831 | ww = dsp->tx_R; |
1798 | p = dsp->tx_buff; | 1832 | p = dsp->tx_buff; |
1799 | d = skb->data; | 1833 | d = skb->data; |
1800 | space = ww-w; | 1834 | space = (ww - w - 1) & CMX_BUFF_MASK; |
1801 | if (space <= 0) | ||
1802 | space += CMX_BUFF_SIZE; | ||
1803 | /* write-pointer should not overrun nor reach read pointer */ | 1835 | /* write-pointer should not overrun nor reach read pointer */ |
1804 | if (space-1 < skb->len) | 1836 | if (space < skb->len) { |
1805 | /* write to the space we have left */ | 1837 | /* write to the space we have left */ |
1806 | ww = (ww - 1) & CMX_BUFF_MASK; | 1838 | ww = (ww - 1) & CMX_BUFF_MASK; /* end one byte prior tx_R */ |
1807 | else | 1839 | if (dsp_debug & DEBUG_DSP_CLOCK) |
1840 | printk(KERN_DEBUG "%s: TX overflow space=%d skb->len=" | ||
1841 | "%d, w=0x%04x, ww=0x%04x\n", __func__, space, | ||
1842 | skb->len, w, ww); | ||
1843 | } else | ||
1808 | /* write until all byte are copied */ | 1844 | /* write until all byte are copied */ |
1809 | ww = (w + skb->len) & CMX_BUFF_MASK; | 1845 | ww = (w + skb->len) & CMX_BUFF_MASK; |
1810 | dsp->tx_W = ww; | 1846 | dsp->tx_W = ww; |
diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c index 1dc21d803410..3083338716b2 100644 --- a/drivers/isdn/mISDN/dsp_core.c +++ b/drivers/isdn/mISDN/dsp_core.c | |||
@@ -191,6 +191,8 @@ dsp_rx_off_member(struct dsp *dsp) | |||
191 | struct mISDN_ctrl_req cq; | 191 | struct mISDN_ctrl_req cq; |
192 | int rx_off = 1; | 192 | int rx_off = 1; |
193 | 193 | ||
194 | memset(&cq, 0, sizeof(cq)); | ||
195 | |||
194 | if (!dsp->features_rx_off) | 196 | if (!dsp->features_rx_off) |
195 | return; | 197 | return; |
196 | 198 | ||
@@ -249,6 +251,32 @@ dsp_rx_off(struct dsp *dsp) | |||
249 | } | 251 | } |
250 | } | 252 | } |
251 | 253 | ||
254 | /* enable "fill empty" feature */ | ||
255 | static void | ||
256 | dsp_fill_empty(struct dsp *dsp) | ||
257 | { | ||
258 | struct mISDN_ctrl_req cq; | ||
259 | |||
260 | memset(&cq, 0, sizeof(cq)); | ||
261 | |||
262 | if (!dsp->ch.peer) { | ||
263 | if (dsp_debug & DEBUG_DSP_CORE) | ||
264 | printk(KERN_DEBUG "%s: no peer, no fill_empty\n", | ||
265 | __func__); | ||
266 | return; | ||
267 | } | ||
268 | cq.op = MISDN_CTRL_FILL_EMPTY; | ||
269 | cq.p1 = 1; | ||
270 | if (dsp->ch.peer->ctrl(dsp->ch.peer, CONTROL_CHANNEL, &cq)) { | ||
271 | printk(KERN_DEBUG "%s: CONTROL_CHANNEL failed\n", | ||
272 | __func__); | ||
273 | return; | ||
274 | } | ||
275 | if (dsp_debug & DEBUG_DSP_CORE) | ||
276 | printk(KERN_DEBUG "%s: %s set fill_empty = 1\n", | ||
277 | __func__, dsp->name); | ||
278 | } | ||
279 | |||
252 | static int | 280 | static int |
253 | dsp_control_req(struct dsp *dsp, struct mISDNhead *hh, struct sk_buff *skb) | 281 | dsp_control_req(struct dsp *dsp, struct mISDNhead *hh, struct sk_buff *skb) |
254 | { | 282 | { |
@@ -273,8 +301,9 @@ dsp_control_req(struct dsp *dsp, struct mISDNhead *hh, struct sk_buff *skb) | |||
273 | if (dsp_debug & DEBUG_DSP_CORE) | 301 | if (dsp_debug & DEBUG_DSP_CORE) |
274 | printk(KERN_DEBUG "%s: start dtmf\n", __func__); | 302 | printk(KERN_DEBUG "%s: start dtmf\n", __func__); |
275 | if (len == sizeof(int)) { | 303 | if (len == sizeof(int)) { |
276 | printk(KERN_NOTICE "changing DTMF Threshold " | 304 | if (dsp_debug & DEBUG_DSP_CORE) |
277 | "to %d\n", *((int *)data)); | 305 | printk(KERN_NOTICE "changing DTMF Threshold " |
306 | "to %d\n", *((int *)data)); | ||
278 | dsp->dtmf.treshold = (*(int *)data) * 10000; | 307 | dsp->dtmf.treshold = (*(int *)data) * 10000; |
279 | } | 308 | } |
280 | /* init goertzel */ | 309 | /* init goertzel */ |
@@ -593,8 +622,6 @@ get_features(struct mISDNchannel *ch) | |||
593 | struct dsp *dsp = container_of(ch, struct dsp, ch); | 622 | struct dsp *dsp = container_of(ch, struct dsp, ch); |
594 | struct mISDN_ctrl_req cq; | 623 | struct mISDN_ctrl_req cq; |
595 | 624 | ||
596 | if (dsp_options & DSP_OPT_NOHARDWARE) | ||
597 | return; | ||
598 | if (!ch->peer) { | 625 | if (!ch->peer) { |
599 | if (dsp_debug & DEBUG_DSP_CORE) | 626 | if (dsp_debug & DEBUG_DSP_CORE) |
600 | printk(KERN_DEBUG "%s: no peer, no features\n", | 627 | printk(KERN_DEBUG "%s: no peer, no features\n", |
@@ -610,6 +637,10 @@ get_features(struct mISDNchannel *ch) | |||
610 | } | 637 | } |
611 | if (cq.op & MISDN_CTRL_RX_OFF) | 638 | if (cq.op & MISDN_CTRL_RX_OFF) |
612 | dsp->features_rx_off = 1; | 639 | dsp->features_rx_off = 1; |
640 | if (cq.op & MISDN_CTRL_FILL_EMPTY) | ||
641 | dsp->features_fill_empty = 1; | ||
642 | if (dsp_options & DSP_OPT_NOHARDWARE) | ||
643 | return; | ||
613 | if ((cq.op & MISDN_CTRL_HW_FEATURES_OP)) { | 644 | if ((cq.op & MISDN_CTRL_HW_FEATURES_OP)) { |
614 | cq.op = MISDN_CTRL_HW_FEATURES; | 645 | cq.op = MISDN_CTRL_HW_FEATURES; |
615 | *((u_long *)&cq.p1) = (u_long)&dsp->features; | 646 | *((u_long *)&cq.p1) = (u_long)&dsp->features; |
@@ -837,11 +868,14 @@ dsp_function(struct mISDNchannel *ch, struct sk_buff *skb) | |||
837 | } | 868 | } |
838 | if (dsp->hdlc) { | 869 | if (dsp->hdlc) { |
839 | /* hdlc */ | 870 | /* hdlc */ |
840 | spin_lock_irqsave(&dsp_lock, flags); | 871 | if (!dsp->b_active) { |
841 | if (dsp->b_active) { | 872 | ret = -EIO; |
842 | skb_queue_tail(&dsp->sendq, skb); | 873 | break; |
843 | schedule_work(&dsp->workq); | ||
844 | } | 874 | } |
875 | hh->prim = PH_DATA_REQ; | ||
876 | spin_lock_irqsave(&dsp_lock, flags); | ||
877 | skb_queue_tail(&dsp->sendq, skb); | ||
878 | schedule_work(&dsp->workq); | ||
845 | spin_unlock_irqrestore(&dsp_lock, flags); | 879 | spin_unlock_irqrestore(&dsp_lock, flags); |
846 | return 0; | 880 | return 0; |
847 | } | 881 | } |
@@ -865,6 +899,9 @@ dsp_function(struct mISDNchannel *ch, struct sk_buff *skb) | |||
865 | if (dsp->dtmf.hardware || dsp->dtmf.software) | 899 | if (dsp->dtmf.hardware || dsp->dtmf.software) |
866 | dsp_dtmf_goertzel_init(dsp); | 900 | dsp_dtmf_goertzel_init(dsp); |
867 | get_features(ch); | 901 | get_features(ch); |
902 | /* enable fill_empty feature */ | ||
903 | if (dsp->features_fill_empty) | ||
904 | dsp_fill_empty(dsp); | ||
868 | /* send ph_activate */ | 905 | /* send ph_activate */ |
869 | hh->prim = PH_ACTIVATE_REQ; | 906 | hh->prim = PH_ACTIVATE_REQ; |
870 | if (ch->peer) | 907 | if (ch->peer) |
@@ -1105,7 +1142,7 @@ static int dsp_init(void) | |||
1105 | } else { | 1142 | } else { |
1106 | poll = 8; | 1143 | poll = 8; |
1107 | while (poll <= MAX_POLL) { | 1144 | while (poll <= MAX_POLL) { |
1108 | tics = poll * HZ / 8000; | 1145 | tics = (poll * HZ) / 8000; |
1109 | if (tics * 8000 == poll * HZ) { | 1146 | if (tics * 8000 == poll * HZ) { |
1110 | dsp_tics = tics; | 1147 | dsp_tics = tics; |
1111 | dsp_poll = poll; | 1148 | dsp_poll = poll; |
diff --git a/drivers/isdn/mISDN/dsp_pipeline.c b/drivers/isdn/mISDN/dsp_pipeline.c index 83639be7f7ad..bf999bdc41c3 100644 --- a/drivers/isdn/mISDN/dsp_pipeline.c +++ b/drivers/isdn/mISDN/dsp_pipeline.c | |||
@@ -75,6 +75,15 @@ static struct device_attribute element_attributes[] = { | |||
75 | __ATTR(args, 0444, attr_show_args, NULL), | 75 | __ATTR(args, 0444, attr_show_args, NULL), |
76 | }; | 76 | }; |
77 | 77 | ||
78 | static void | ||
79 | mISDN_dsp_dev_release(struct device *dev) | ||
80 | { | ||
81 | struct dsp_element_entry *entry = | ||
82 | container_of(dev, struct dsp_element_entry, dev); | ||
83 | list_del(&entry->list); | ||
84 | kfree(entry); | ||
85 | } | ||
86 | |||
78 | int mISDN_dsp_element_register(struct mISDN_dsp_element *elem) | 87 | int mISDN_dsp_element_register(struct mISDN_dsp_element *elem) |
79 | { | 88 | { |
80 | struct dsp_element_entry *entry; | 89 | struct dsp_element_entry *entry; |
@@ -83,13 +92,14 @@ int mISDN_dsp_element_register(struct mISDN_dsp_element *elem) | |||
83 | if (!elem) | 92 | if (!elem) |
84 | return -EINVAL; | 93 | return -EINVAL; |
85 | 94 | ||
86 | entry = kzalloc(sizeof(struct dsp_element_entry), GFP_KERNEL); | 95 | entry = kzalloc(sizeof(struct dsp_element_entry), GFP_ATOMIC); |
87 | if (!entry) | 96 | if (!entry) |
88 | return -ENOMEM; | 97 | return -ENOMEM; |
89 | 98 | ||
90 | entry->elem = elem; | 99 | entry->elem = elem; |
91 | 100 | ||
92 | entry->dev.class = elements_class; | 101 | entry->dev.class = elements_class; |
102 | entry->dev.release = mISDN_dsp_dev_release; | ||
93 | dev_set_drvdata(&entry->dev, elem); | 103 | dev_set_drvdata(&entry->dev, elem); |
94 | dev_set_name(&entry->dev, elem->name); | 104 | dev_set_name(&entry->dev, elem->name); |
95 | ret = device_register(&entry->dev); | 105 | ret = device_register(&entry->dev); |
@@ -98,6 +108,7 @@ int mISDN_dsp_element_register(struct mISDN_dsp_element *elem) | |||
98 | __func__, elem->name); | 108 | __func__, elem->name); |
99 | goto err1; | 109 | goto err1; |
100 | } | 110 | } |
111 | list_add_tail(&entry->list, &dsp_elements); | ||
101 | 112 | ||
102 | for (i = 0; i < (sizeof(element_attributes) | 113 | for (i = 0; i < (sizeof(element_attributes) |
103 | / sizeof(struct device_attribute)); ++i) | 114 | / sizeof(struct device_attribute)); ++i) |
@@ -109,14 +120,15 @@ int mISDN_dsp_element_register(struct mISDN_dsp_element *elem) | |||
109 | goto err2; | 120 | goto err2; |
110 | } | 121 | } |
111 | 122 | ||
112 | list_add_tail(&entry->list, &dsp_elements); | 123 | #ifdef PIPELINE_DEBUG |
113 | |||
114 | printk(KERN_DEBUG "%s: %s registered\n", __func__, elem->name); | 124 | printk(KERN_DEBUG "%s: %s registered\n", __func__, elem->name); |
125 | #endif | ||
115 | 126 | ||
116 | return 0; | 127 | return 0; |
117 | 128 | ||
118 | err2: | 129 | err2: |
119 | device_unregister(&entry->dev); | 130 | device_unregister(&entry->dev); |
131 | return ret; | ||
120 | err1: | 132 | err1: |
121 | kfree(entry); | 133 | kfree(entry); |
122 | return ret; | 134 | return ret; |
@@ -132,11 +144,11 @@ void mISDN_dsp_element_unregister(struct mISDN_dsp_element *elem) | |||
132 | 144 | ||
133 | list_for_each_entry_safe(entry, n, &dsp_elements, list) | 145 | list_for_each_entry_safe(entry, n, &dsp_elements, list) |
134 | if (entry->elem == elem) { | 146 | if (entry->elem == elem) { |
135 | list_del(&entry->list); | ||
136 | device_unregister(&entry->dev); | 147 | device_unregister(&entry->dev); |
137 | kfree(entry); | 148 | #ifdef PIPELINE_DEBUG |
138 | printk(KERN_DEBUG "%s: %s unregistered\n", | 149 | printk(KERN_DEBUG "%s: %s unregistered\n", |
139 | __func__, elem->name); | 150 | __func__, elem->name); |
151 | #endif | ||
140 | return; | 152 | return; |
141 | } | 153 | } |
142 | printk(KERN_ERR "%s: element %s not in list.\n", __func__, elem->name); | 154 | printk(KERN_ERR "%s: element %s not in list.\n", __func__, elem->name); |
@@ -173,7 +185,9 @@ void dsp_pipeline_module_exit(void) | |||
173 | kfree(entry); | 185 | kfree(entry); |
174 | } | 186 | } |
175 | 187 | ||
188 | #ifdef PIPELINE_DEBUG | ||
176 | printk(KERN_DEBUG "%s: dsp pipeline module exited\n", __func__); | 189 | printk(KERN_DEBUG "%s: dsp pipeline module exited\n", __func__); |
190 | #endif | ||
177 | } | 191 | } |
178 | 192 | ||
179 | int dsp_pipeline_init(struct dsp_pipeline *pipeline) | 193 | int dsp_pipeline_init(struct dsp_pipeline *pipeline) |
@@ -239,7 +253,7 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg) | |||
239 | if (!len) | 253 | if (!len) |
240 | return 0; | 254 | return 0; |
241 | 255 | ||
242 | dup = kmalloc(len + 1, GFP_KERNEL); | 256 | dup = kmalloc(len + 1, GFP_ATOMIC); |
243 | if (!dup) | 257 | if (!dup) |
244 | return 0; | 258 | return 0; |
245 | strcpy(dup, cfg); | 259 | strcpy(dup, cfg); |
@@ -256,9 +270,9 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg) | |||
256 | elem = entry->elem; | 270 | elem = entry->elem; |
257 | 271 | ||
258 | pipeline_entry = kmalloc(sizeof(struct | 272 | pipeline_entry = kmalloc(sizeof(struct |
259 | dsp_pipeline_entry), GFP_KERNEL); | 273 | dsp_pipeline_entry), GFP_ATOMIC); |
260 | if (!pipeline_entry) { | 274 | if (!pipeline_entry) { |
261 | printk(KERN_DEBUG "%s: failed to add " | 275 | printk(KERN_ERR "%s: failed to add " |
262 | "entry to pipeline: %s (out of " | 276 | "entry to pipeline: %s (out of " |
263 | "memory)\n", __func__, elem->name); | 277 | "memory)\n", __func__, elem->name); |
264 | incomplete = 1; | 278 | incomplete = 1; |
@@ -286,7 +300,7 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg) | |||
286 | args : ""); | 300 | args : ""); |
287 | #endif | 301 | #endif |
288 | } else { | 302 | } else { |
289 | printk(KERN_DEBUG "%s: failed " | 303 | printk(KERN_ERR "%s: failed " |
290 | "to add entry to pipeline: " | 304 | "to add entry to pipeline: " |
291 | "%s (new() returned NULL)\n", | 305 | "%s (new() returned NULL)\n", |
292 | __func__, elem->name); | 306 | __func__, elem->name); |
@@ -301,7 +315,7 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg) | |||
301 | if (found) | 315 | if (found) |
302 | found = 0; | 316 | found = 0; |
303 | else { | 317 | else { |
304 | printk(KERN_DEBUG "%s: element not found, skipping: " | 318 | printk(KERN_ERR "%s: element not found, skipping: " |
305 | "%s\n", __func__, name); | 319 | "%s\n", __func__, name); |
306 | incomplete = 1; | 320 | incomplete = 1; |
307 | } | 321 | } |
diff --git a/drivers/isdn/mISDN/hwchannel.c b/drivers/isdn/mISDN/hwchannel.c index 2596fba4e614..ab1168a110ae 100644 --- a/drivers/isdn/mISDN/hwchannel.c +++ b/drivers/isdn/mISDN/hwchannel.c | |||
@@ -50,9 +50,6 @@ bchannel_bh(struct work_struct *ws) | |||
50 | 50 | ||
51 | if (test_and_clear_bit(FLG_RECVQUEUE, &bch->Flags)) { | 51 | if (test_and_clear_bit(FLG_RECVQUEUE, &bch->Flags)) { |
52 | while ((skb = skb_dequeue(&bch->rqueue))) { | 52 | while ((skb = skb_dequeue(&bch->rqueue))) { |
53 | if (bch->rcount >= 64) | ||
54 | printk(KERN_WARNING "B-channel %p receive " | ||
55 | "queue if full, but empties...\n", bch); | ||
56 | bch->rcount--; | 53 | bch->rcount--; |
57 | if (likely(bch->ch.peer)) { | 54 | if (likely(bch->ch.peer)) { |
58 | err = bch->ch.recv(bch->ch.peer, skb); | 55 | err = bch->ch.recv(bch->ch.peer, skb); |
@@ -169,6 +166,25 @@ recv_Dchannel(struct dchannel *dch) | |||
169 | EXPORT_SYMBOL(recv_Dchannel); | 166 | EXPORT_SYMBOL(recv_Dchannel); |
170 | 167 | ||
171 | void | 168 | void |
169 | recv_Echannel(struct dchannel *ech, struct dchannel *dch) | ||
170 | { | ||
171 | struct mISDNhead *hh; | ||
172 | |||
173 | if (ech->rx_skb->len < 2) { /* at least 2 for sapi / tei */ | ||
174 | dev_kfree_skb(ech->rx_skb); | ||
175 | ech->rx_skb = NULL; | ||
176 | return; | ||
177 | } | ||
178 | hh = mISDN_HEAD_P(ech->rx_skb); | ||
179 | hh->prim = PH_DATA_E_IND; | ||
180 | hh->id = get_sapi_tei(ech->rx_skb->data); | ||
181 | skb_queue_tail(&dch->rqueue, ech->rx_skb); | ||
182 | ech->rx_skb = NULL; | ||
183 | schedule_event(dch, FLG_RECVQUEUE); | ||
184 | } | ||
185 | EXPORT_SYMBOL(recv_Echannel); | ||
186 | |||
187 | void | ||
172 | recv_Bchannel(struct bchannel *bch) | 188 | recv_Bchannel(struct bchannel *bch) |
173 | { | 189 | { |
174 | struct mISDNhead *hh; | 190 | struct mISDNhead *hh; |
@@ -177,8 +193,10 @@ recv_Bchannel(struct bchannel *bch) | |||
177 | hh->prim = PH_DATA_IND; | 193 | hh->prim = PH_DATA_IND; |
178 | hh->id = MISDN_ID_ANY; | 194 | hh->id = MISDN_ID_ANY; |
179 | if (bch->rcount >= 64) { | 195 | if (bch->rcount >= 64) { |
180 | dev_kfree_skb(bch->rx_skb); | 196 | printk(KERN_WARNING "B-channel %p receive queue overflow, " |
181 | bch->rx_skb = NULL; | 197 | "fushing!\n", bch); |
198 | skb_queue_purge(&bch->rqueue); | ||
199 | bch->rcount = 0; | ||
182 | return; | 200 | return; |
183 | } | 201 | } |
184 | bch->rcount++; | 202 | bch->rcount++; |
@@ -200,8 +218,10 @@ void | |||
200 | recv_Bchannel_skb(struct bchannel *bch, struct sk_buff *skb) | 218 | recv_Bchannel_skb(struct bchannel *bch, struct sk_buff *skb) |
201 | { | 219 | { |
202 | if (bch->rcount >= 64) { | 220 | if (bch->rcount >= 64) { |
203 | dev_kfree_skb(skb); | 221 | printk(KERN_WARNING "B-channel %p receive queue overflow, " |
204 | return; | 222 | "fushing!\n", bch); |
223 | skb_queue_purge(&bch->rqueue); | ||
224 | bch->rcount = 0; | ||
205 | } | 225 | } |
206 | bch->rcount++; | 226 | bch->rcount++; |
207 | skb_queue_tail(&bch->rqueue, skb); | 227 | skb_queue_tail(&bch->rqueue, skb); |
@@ -245,8 +265,12 @@ confirm_Bsend(struct bchannel *bch) | |||
245 | { | 265 | { |
246 | struct sk_buff *skb; | 266 | struct sk_buff *skb; |
247 | 267 | ||
248 | if (bch->rcount >= 64) | 268 | if (bch->rcount >= 64) { |
249 | return; | 269 | printk(KERN_WARNING "B-channel %p receive queue overflow, " |
270 | "fushing!\n", bch); | ||
271 | skb_queue_purge(&bch->rqueue); | ||
272 | bch->rcount = 0; | ||
273 | } | ||
250 | skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(bch->tx_skb), | 274 | skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(bch->tx_skb), |
251 | 0, NULL, GFP_ATOMIC); | 275 | 0, NULL, GFP_ATOMIC); |
252 | if (!skb) { | 276 | if (!skb) { |
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c index 0884dd6892f8..abe574989572 100644 --- a/drivers/isdn/mISDN/l1oip_core.c +++ b/drivers/isdn/mISDN/l1oip_core.c | |||
@@ -777,6 +777,8 @@ fail: | |||
777 | static void | 777 | static void |
778 | l1oip_socket_close(struct l1oip *hc) | 778 | l1oip_socket_close(struct l1oip *hc) |
779 | { | 779 | { |
780 | struct dchannel *dch = hc->chan[hc->d_idx].dch; | ||
781 | |||
780 | /* kill thread */ | 782 | /* kill thread */ |
781 | if (hc->socket_thread) { | 783 | if (hc->socket_thread) { |
782 | if (debug & DEBUG_L1OIP_SOCKET) | 784 | if (debug & DEBUG_L1OIP_SOCKET) |
@@ -785,6 +787,16 @@ l1oip_socket_close(struct l1oip *hc) | |||
785 | send_sig(SIGTERM, hc->socket_thread, 0); | 787 | send_sig(SIGTERM, hc->socket_thread, 0); |
786 | wait_for_completion(&hc->socket_complete); | 788 | wait_for_completion(&hc->socket_complete); |
787 | } | 789 | } |
790 | |||
791 | /* if active, we send up a PH_DEACTIVATE and deactivate */ | ||
792 | if (test_bit(FLG_ACTIVE, &dch->Flags)) { | ||
793 | if (debug & (DEBUG_L1OIP_MSG|DEBUG_L1OIP_SOCKET)) | ||
794 | printk(KERN_DEBUG "%s: interface become deactivated " | ||
795 | "due to timeout\n", __func__); | ||
796 | test_and_clear_bit(FLG_ACTIVE, &dch->Flags); | ||
797 | _queue_data(&dch->dev.D, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0, | ||
798 | NULL, GFP_ATOMIC); | ||
799 | } | ||
788 | } | 800 | } |
789 | 801 | ||
790 | static int | 802 | static int |
@@ -944,7 +956,8 @@ channel_dctrl(struct dchannel *dch, struct mISDN_ctrl_req *cq) | |||
944 | 956 | ||
945 | switch (cq->op) { | 957 | switch (cq->op) { |
946 | case MISDN_CTRL_GETOP: | 958 | case MISDN_CTRL_GETOP: |
947 | cq->op = MISDN_CTRL_SETPEER | MISDN_CTRL_UNSETPEER; | 959 | cq->op = MISDN_CTRL_SETPEER | MISDN_CTRL_UNSETPEER |
960 | | MISDN_CTRL_GETPEER; | ||
948 | break; | 961 | break; |
949 | case MISDN_CTRL_SETPEER: | 962 | case MISDN_CTRL_SETPEER: |
950 | hc->remoteip = (u32)cq->p1; | 963 | hc->remoteip = (u32)cq->p1; |
@@ -964,6 +977,13 @@ channel_dctrl(struct dchannel *dch, struct mISDN_ctrl_req *cq) | |||
964 | hc->remoteip = 0; | 977 | hc->remoteip = 0; |
965 | l1oip_socket_open(hc); | 978 | l1oip_socket_open(hc); |
966 | break; | 979 | break; |
980 | case MISDN_CTRL_GETPEER: | ||
981 | if (debug & DEBUG_L1OIP_SOCKET) | ||
982 | printk(KERN_DEBUG "%s: getting ip address.\n", | ||
983 | __func__); | ||
984 | cq->p1 = hc->remoteip; | ||
985 | cq->p2 = hc->remoteport | (hc->localport << 16); | ||
986 | break; | ||
967 | default: | 987 | default: |
968 | printk(KERN_WARNING "%s: unknown Op %x\n", | 988 | printk(KERN_WARNING "%s: unknown Op %x\n", |
969 | __func__, cq->op); | 989 | __func__, cq->op); |
@@ -1413,7 +1433,8 @@ init_card(struct l1oip *hc, int pri, int bundle) | |||
1413 | hc->chan[i + ch].bch = bch; | 1433 | hc->chan[i + ch].bch = bch; |
1414 | set_channelmap(bch->nr, dch->dev.channelmap); | 1434 | set_channelmap(bch->nr, dch->dev.channelmap); |
1415 | } | 1435 | } |
1416 | ret = mISDN_register_device(&dch->dev, hc->name); | 1436 | /* TODO: create a parent device for this driver */ |
1437 | ret = mISDN_register_device(&dch->dev, NULL, hc->name); | ||
1417 | if (ret) | 1438 | if (ret) |
1418 | return ret; | 1439 | return ret; |
1419 | hc->registered = 1; | 1440 | hc->registered = 1; |
diff --git a/drivers/isdn/mISDN/layer1.c b/drivers/isdn/mISDN/layer1.c index b73e952d12cf..e826eeb1ecec 100644 --- a/drivers/isdn/mISDN/layer1.c +++ b/drivers/isdn/mISDN/layer1.c | |||
@@ -101,7 +101,7 @@ l1m_debug(struct FsmInst *fi, char *fmt, ...) | |||
101 | va_list va; | 101 | va_list va; |
102 | 102 | ||
103 | va_start(va, fmt); | 103 | va_start(va, fmt); |
104 | printk(KERN_DEBUG "%s: ", l1->dch->dev.name); | 104 | printk(KERN_DEBUG "%s: ", dev_name(&l1->dch->dev.dev)); |
105 | vprintk(fmt, va); | 105 | vprintk(fmt, va); |
106 | printk("\n"); | 106 | printk("\n"); |
107 | va_end(va); | 107 | va_end(va); |
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c index 37a2de18cfd0..508945d1b9c1 100644 --- a/drivers/isdn/mISDN/socket.c +++ b/drivers/isdn/mISDN/socket.c | |||
@@ -381,7 +381,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | |||
381 | memcpy(di.channelmap, dev->channelmap, | 381 | memcpy(di.channelmap, dev->channelmap, |
382 | sizeof(di.channelmap)); | 382 | sizeof(di.channelmap)); |
383 | di.nrbchan = dev->nrbchan; | 383 | di.nrbchan = dev->nrbchan; |
384 | strcpy(di.name, dev->name); | 384 | strcpy(di.name, dev_name(&dev->dev)); |
385 | if (copy_to_user((void __user *)arg, &di, sizeof(di))) | 385 | if (copy_to_user((void __user *)arg, &di, sizeof(di))) |
386 | err = -EFAULT; | 386 | err = -EFAULT; |
387 | } else | 387 | } else |
@@ -460,6 +460,8 @@ data_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) | |||
460 | { | 460 | { |
461 | struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr; | 461 | struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr; |
462 | struct sock *sk = sock->sk; | 462 | struct sock *sk = sock->sk; |
463 | struct hlist_node *node; | ||
464 | struct sock *csk; | ||
463 | int err = 0; | 465 | int err = 0; |
464 | 466 | ||
465 | if (*debug & DEBUG_SOCKET) | 467 | if (*debug & DEBUG_SOCKET) |
@@ -480,6 +482,26 @@ data_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) | |||
480 | err = -ENODEV; | 482 | err = -ENODEV; |
481 | goto done; | 483 | goto done; |
482 | } | 484 | } |
485 | |||
486 | if (sk->sk_protocol < ISDN_P_B_START) { | ||
487 | read_lock_bh(&data_sockets.lock); | ||
488 | sk_for_each(csk, node, &data_sockets.head) { | ||
489 | if (sk == csk) | ||
490 | continue; | ||
491 | if (_pms(csk)->dev != _pms(sk)->dev) | ||
492 | continue; | ||
493 | if (csk->sk_protocol >= ISDN_P_B_START) | ||
494 | continue; | ||
495 | if (IS_ISDN_P_TE(csk->sk_protocol) | ||
496 | == IS_ISDN_P_TE(sk->sk_protocol)) | ||
497 | continue; | ||
498 | read_unlock_bh(&data_sockets.lock); | ||
499 | err = -EBUSY; | ||
500 | goto done; | ||
501 | } | ||
502 | read_unlock_bh(&data_sockets.lock); | ||
503 | } | ||
504 | |||
483 | _pms(sk)->ch.send = mISDN_send; | 505 | _pms(sk)->ch.send = mISDN_send; |
484 | _pms(sk)->ch.ctrl = mISDN_ctrl; | 506 | _pms(sk)->ch.ctrl = mISDN_ctrl; |
485 | 507 | ||
@@ -639,12 +661,27 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | |||
639 | memcpy(di.channelmap, dev->channelmap, | 661 | memcpy(di.channelmap, dev->channelmap, |
640 | sizeof(di.channelmap)); | 662 | sizeof(di.channelmap)); |
641 | di.nrbchan = dev->nrbchan; | 663 | di.nrbchan = dev->nrbchan; |
642 | strcpy(di.name, dev->name); | 664 | strcpy(di.name, dev_name(&dev->dev)); |
643 | if (copy_to_user((void __user *)arg, &di, sizeof(di))) | 665 | if (copy_to_user((void __user *)arg, &di, sizeof(di))) |
644 | err = -EFAULT; | 666 | err = -EFAULT; |
645 | } else | 667 | } else |
646 | err = -ENODEV; | 668 | err = -ENODEV; |
647 | break; | 669 | break; |
670 | case IMSETDEVNAME: | ||
671 | { | ||
672 | struct mISDN_devrename dn; | ||
673 | if (copy_from_user(&dn, (void __user *)arg, | ||
674 | sizeof(dn))) { | ||
675 | err = -EFAULT; | ||
676 | break; | ||
677 | } | ||
678 | dev = get_mdevice(dn.id); | ||
679 | if (dev) | ||
680 | err = device_rename(&dev->dev, dn.name); | ||
681 | else | ||
682 | err = -ENODEV; | ||
683 | } | ||
684 | break; | ||
648 | default: | 685 | default: |
649 | err = -EINVAL; | 686 | err = -EINVAL; |
650 | } | 687 | } |
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c index d55b14ae4e99..e2f45019ebf0 100644 --- a/drivers/isdn/mISDN/stack.c +++ b/drivers/isdn/mISDN/stack.c | |||
@@ -172,7 +172,8 @@ send_msg_to_layer(struct mISDNstack *st, struct sk_buff *skb) | |||
172 | else | 172 | else |
173 | printk(KERN_WARNING | 173 | printk(KERN_WARNING |
174 | "%s: dev(%s) prim(%x) id(%x) no channel\n", | 174 | "%s: dev(%s) prim(%x) id(%x) no channel\n", |
175 | __func__, st->dev->name, hh->prim, hh->id); | 175 | __func__, dev_name(&st->dev->dev), hh->prim, |
176 | hh->id); | ||
176 | } else if (lm == 0x8) { | 177 | } else if (lm == 0x8) { |
177 | WARN_ON(lm == 0x8); | 178 | WARN_ON(lm == 0x8); |
178 | ch = get_channel4id(st, hh->id); | 179 | ch = get_channel4id(st, hh->id); |
@@ -181,11 +182,12 @@ send_msg_to_layer(struct mISDNstack *st, struct sk_buff *skb) | |||
181 | else | 182 | else |
182 | printk(KERN_WARNING | 183 | printk(KERN_WARNING |
183 | "%s: dev(%s) prim(%x) id(%x) no channel\n", | 184 | "%s: dev(%s) prim(%x) id(%x) no channel\n", |
184 | __func__, st->dev->name, hh->prim, hh->id); | 185 | __func__, dev_name(&st->dev->dev), hh->prim, |
186 | hh->id); | ||
185 | } else { | 187 | } else { |
186 | /* broadcast not handled yet */ | 188 | /* broadcast not handled yet */ |
187 | printk(KERN_WARNING "%s: dev(%s) prim %x not delivered\n", | 189 | printk(KERN_WARNING "%s: dev(%s) prim %x not delivered\n", |
188 | __func__, st->dev->name, hh->prim); | 190 | __func__, dev_name(&st->dev->dev), hh->prim); |
189 | } | 191 | } |
190 | return -ESRCH; | 192 | return -ESRCH; |
191 | } | 193 | } |
@@ -209,7 +211,8 @@ mISDNStackd(void *data) | |||
209 | unlock_kernel(); | 211 | unlock_kernel(); |
210 | #endif | 212 | #endif |
211 | if (*debug & DEBUG_MSG_THREAD) | 213 | if (*debug & DEBUG_MSG_THREAD) |
212 | printk(KERN_DEBUG "mISDNStackd %s started\n", st->dev->name); | 214 | printk(KERN_DEBUG "mISDNStackd %s started\n", |
215 | dev_name(&st->dev->dev)); | ||
213 | 216 | ||
214 | if (st->notify != NULL) { | 217 | if (st->notify != NULL) { |
215 | complete(st->notify); | 218 | complete(st->notify); |
@@ -245,7 +248,7 @@ mISDNStackd(void *data) | |||
245 | printk(KERN_DEBUG | 248 | printk(KERN_DEBUG |
246 | "%s: %s prim(%x) id(%x) " | 249 | "%s: %s prim(%x) id(%x) " |
247 | "send call(%d)\n", | 250 | "send call(%d)\n", |
248 | __func__, st->dev->name, | 251 | __func__, dev_name(&st->dev->dev), |
249 | mISDN_HEAD_PRIM(skb), | 252 | mISDN_HEAD_PRIM(skb), |
250 | mISDN_HEAD_ID(skb), err); | 253 | mISDN_HEAD_ID(skb), err); |
251 | dev_kfree_skb(skb); | 254 | dev_kfree_skb(skb); |
@@ -288,7 +291,7 @@ mISDNStackd(void *data) | |||
288 | mISDN_STACK_ACTION_MASK)); | 291 | mISDN_STACK_ACTION_MASK)); |
289 | if (*debug & DEBUG_MSG_THREAD) | 292 | if (*debug & DEBUG_MSG_THREAD) |
290 | printk(KERN_DEBUG "%s: %s wake status %08lx\n", | 293 | printk(KERN_DEBUG "%s: %s wake status %08lx\n", |
291 | __func__, st->dev->name, st->status); | 294 | __func__, dev_name(&st->dev->dev), st->status); |
292 | test_and_set_bit(mISDN_STACK_ACTIVE, &st->status); | 295 | test_and_set_bit(mISDN_STACK_ACTIVE, &st->status); |
293 | 296 | ||
294 | test_and_clear_bit(mISDN_STACK_WAKEUP, &st->status); | 297 | test_and_clear_bit(mISDN_STACK_WAKEUP, &st->status); |
@@ -303,15 +306,16 @@ mISDNStackd(void *data) | |||
303 | #ifdef MISDN_MSG_STATS | 306 | #ifdef MISDN_MSG_STATS |
304 | printk(KERN_DEBUG "mISDNStackd daemon for %s proceed %d " | 307 | printk(KERN_DEBUG "mISDNStackd daemon for %s proceed %d " |
305 | "msg %d sleep %d stopped\n", | 308 | "msg %d sleep %d stopped\n", |
306 | st->dev->name, st->msg_cnt, st->sleep_cnt, st->stopped_cnt); | 309 | dev_name(&st->dev->dev), st->msg_cnt, st->sleep_cnt, |
310 | st->stopped_cnt); | ||
307 | printk(KERN_DEBUG | 311 | printk(KERN_DEBUG |
308 | "mISDNStackd daemon for %s utime(%ld) stime(%ld)\n", | 312 | "mISDNStackd daemon for %s utime(%ld) stime(%ld)\n", |
309 | st->dev->name, st->thread->utime, st->thread->stime); | 313 | dev_name(&st->dev->dev), st->thread->utime, st->thread->stime); |
310 | printk(KERN_DEBUG | 314 | printk(KERN_DEBUG |
311 | "mISDNStackd daemon for %s nvcsw(%ld) nivcsw(%ld)\n", | 315 | "mISDNStackd daemon for %s nvcsw(%ld) nivcsw(%ld)\n", |
312 | st->dev->name, st->thread->nvcsw, st->thread->nivcsw); | 316 | dev_name(&st->dev->dev), st->thread->nvcsw, st->thread->nivcsw); |
313 | printk(KERN_DEBUG "mISDNStackd daemon for %s killed now\n", | 317 | printk(KERN_DEBUG "mISDNStackd daemon for %s killed now\n", |
314 | st->dev->name); | 318 | dev_name(&st->dev->dev)); |
315 | #endif | 319 | #endif |
316 | test_and_set_bit(mISDN_STACK_KILLED, &st->status); | 320 | test_and_set_bit(mISDN_STACK_KILLED, &st->status); |
317 | test_and_clear_bit(mISDN_STACK_RUNNING, &st->status); | 321 | test_and_clear_bit(mISDN_STACK_RUNNING, &st->status); |
@@ -401,15 +405,16 @@ create_stack(struct mISDNdevice *dev) | |||
401 | newst->own.send = mISDN_queue_message; | 405 | newst->own.send = mISDN_queue_message; |
402 | newst->own.recv = mISDN_queue_message; | 406 | newst->own.recv = mISDN_queue_message; |
403 | if (*debug & DEBUG_CORE_FUNC) | 407 | if (*debug & DEBUG_CORE_FUNC) |
404 | printk(KERN_DEBUG "%s: st(%s)\n", __func__, newst->dev->name); | 408 | printk(KERN_DEBUG "%s: st(%s)\n", __func__, |
409 | dev_name(&newst->dev->dev)); | ||
405 | newst->notify = &done; | 410 | newst->notify = &done; |
406 | newst->thread = kthread_run(mISDNStackd, (void *)newst, "mISDN_%s", | 411 | newst->thread = kthread_run(mISDNStackd, (void *)newst, "mISDN_%s", |
407 | newst->dev->name); | 412 | dev_name(&newst->dev->dev)); |
408 | if (IS_ERR(newst->thread)) { | 413 | if (IS_ERR(newst->thread)) { |
409 | err = PTR_ERR(newst->thread); | 414 | err = PTR_ERR(newst->thread); |
410 | printk(KERN_ERR | 415 | printk(KERN_ERR |
411 | "mISDN:cannot create kernel thread for %s (%d)\n", | 416 | "mISDN:cannot create kernel thread for %s (%d)\n", |
412 | newst->dev->name, err); | 417 | dev_name(&newst->dev->dev), err); |
413 | delete_teimanager(dev->teimgr); | 418 | delete_teimanager(dev->teimgr); |
414 | kfree(newst); | 419 | kfree(newst); |
415 | } else | 420 | } else |
@@ -428,29 +433,21 @@ connect_layer1(struct mISDNdevice *dev, struct mISDNchannel *ch, | |||
428 | 433 | ||
429 | if (*debug & DEBUG_CORE_FUNC) | 434 | if (*debug & DEBUG_CORE_FUNC) |
430 | printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n", | 435 | printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n", |
431 | __func__, dev->name, protocol, adr->dev, adr->channel, | 436 | __func__, dev_name(&dev->dev), protocol, adr->dev, |
432 | adr->sapi, adr->tei); | 437 | adr->channel, adr->sapi, adr->tei); |
433 | switch (protocol) { | 438 | switch (protocol) { |
434 | case ISDN_P_NT_S0: | 439 | case ISDN_P_NT_S0: |
435 | case ISDN_P_NT_E1: | 440 | case ISDN_P_NT_E1: |
436 | case ISDN_P_TE_S0: | 441 | case ISDN_P_TE_S0: |
437 | case ISDN_P_TE_E1: | 442 | case ISDN_P_TE_E1: |
438 | #ifdef PROTOCOL_CHECK | ||
439 | /* this should be enhanced */ | ||
440 | if (!list_empty(&dev->D.st->layer2) | ||
441 | && dev->D.protocol != protocol) | ||
442 | return -EBUSY; | ||
443 | if (!hlist_empty(&dev->D.st->l1sock.head) | ||
444 | && dev->D.protocol != protocol) | ||
445 | return -EBUSY; | ||
446 | #endif | ||
447 | ch->recv = mISDN_queue_message; | 443 | ch->recv = mISDN_queue_message; |
448 | ch->peer = &dev->D.st->own; | 444 | ch->peer = &dev->D.st->own; |
449 | ch->st = dev->D.st; | 445 | ch->st = dev->D.st; |
450 | rq.protocol = protocol; | 446 | rq.protocol = protocol; |
451 | rq.adr.channel = 0; | 447 | rq.adr.channel = adr->channel; |
452 | err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq); | 448 | err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq); |
453 | printk(KERN_DEBUG "%s: ret 1 %d\n", __func__, err); | 449 | printk(KERN_DEBUG "%s: ret %d (dev %d)\n", __func__, err, |
450 | dev->id); | ||
454 | if (err) | 451 | if (err) |
455 | return err; | 452 | return err; |
456 | write_lock_bh(&dev->D.st->l1sock.lock); | 453 | write_lock_bh(&dev->D.st->l1sock.lock); |
@@ -473,7 +470,7 @@ connect_Bstack(struct mISDNdevice *dev, struct mISDNchannel *ch, | |||
473 | 470 | ||
474 | if (*debug & DEBUG_CORE_FUNC) | 471 | if (*debug & DEBUG_CORE_FUNC) |
475 | printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n", | 472 | printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n", |
476 | __func__, dev->name, protocol, | 473 | __func__, dev_name(&dev->dev), protocol, |
477 | adr->dev, adr->channel, adr->sapi, | 474 | adr->dev, adr->channel, adr->sapi, |
478 | adr->tei); | 475 | adr->tei); |
479 | ch->st = dev->D.st; | 476 | ch->st = dev->D.st; |
@@ -529,7 +526,7 @@ create_l2entity(struct mISDNdevice *dev, struct mISDNchannel *ch, | |||
529 | 526 | ||
530 | if (*debug & DEBUG_CORE_FUNC) | 527 | if (*debug & DEBUG_CORE_FUNC) |
531 | printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n", | 528 | printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n", |
532 | __func__, dev->name, protocol, | 529 | __func__, dev_name(&dev->dev), protocol, |
533 | adr->dev, adr->channel, adr->sapi, | 530 | adr->dev, adr->channel, adr->sapi, |
534 | adr->tei); | 531 | adr->tei); |
535 | rq.protocol = ISDN_P_TE_S0; | 532 | rq.protocol = ISDN_P_TE_S0; |
@@ -541,15 +538,6 @@ create_l2entity(struct mISDNdevice *dev, struct mISDNchannel *ch, | |||
541 | if (dev->Dprotocols & (1 << ISDN_P_NT_E1)) | 538 | if (dev->Dprotocols & (1 << ISDN_P_NT_E1)) |
542 | rq.protocol = ISDN_P_NT_E1; | 539 | rq.protocol = ISDN_P_NT_E1; |
543 | case ISDN_P_LAPD_TE: | 540 | case ISDN_P_LAPD_TE: |
544 | #ifdef PROTOCOL_CHECK | ||
545 | /* this should be enhanced */ | ||
546 | if (!list_empty(&dev->D.st->layer2) | ||
547 | && dev->D.protocol != protocol) | ||
548 | return -EBUSY; | ||
549 | if (!hlist_empty(&dev->D.st->l1sock.head) | ||
550 | && dev->D.protocol != protocol) | ||
551 | return -EBUSY; | ||
552 | #endif | ||
553 | ch->recv = mISDN_queue_message; | 541 | ch->recv = mISDN_queue_message; |
554 | ch->peer = &dev->D.st->own; | 542 | ch->peer = &dev->D.st->own; |
555 | ch->st = dev->D.st; | 543 | ch->st = dev->D.st; |
@@ -590,7 +578,7 @@ delete_channel(struct mISDNchannel *ch) | |||
590 | } | 578 | } |
591 | if (*debug & DEBUG_CORE_FUNC) | 579 | if (*debug & DEBUG_CORE_FUNC) |
592 | printk(KERN_DEBUG "%s: st(%s) protocol(%x)\n", __func__, | 580 | printk(KERN_DEBUG "%s: st(%s) protocol(%x)\n", __func__, |
593 | ch->st->dev->name, ch->protocol); | 581 | dev_name(&ch->st->dev->dev), ch->protocol); |
594 | if (ch->protocol >= ISDN_P_B_START) { | 582 | if (ch->protocol >= ISDN_P_B_START) { |
595 | if (ch->peer) { | 583 | if (ch->peer) { |
596 | ch->peer->ctrl(ch->peer, CLOSE_CHANNEL, NULL); | 584 | ch->peer->ctrl(ch->peer, CLOSE_CHANNEL, NULL); |
@@ -643,7 +631,7 @@ delete_stack(struct mISDNdevice *dev) | |||
643 | 631 | ||
644 | if (*debug & DEBUG_CORE_FUNC) | 632 | if (*debug & DEBUG_CORE_FUNC) |
645 | printk(KERN_DEBUG "%s: st(%s)\n", __func__, | 633 | printk(KERN_DEBUG "%s: st(%s)\n", __func__, |
646 | st->dev->name); | 634 | dev_name(&st->dev->dev)); |
647 | if (dev->teimgr) | 635 | if (dev->teimgr) |
648 | delete_teimanager(dev->teimgr); | 636 | delete_teimanager(dev->teimgr); |
649 | if (st->thread) { | 637 | if (st->thread) { |
diff --git a/drivers/isdn/mISDN/tei.c b/drivers/isdn/mISDN/tei.c index 5c43d19e7c11..b452dead8fd0 100644 --- a/drivers/isdn/mISDN/tei.c +++ b/drivers/isdn/mISDN/tei.c | |||
@@ -968,9 +968,9 @@ create_teimgr(struct manager *mgr, struct channel_req *crq) | |||
968 | 968 | ||
969 | if (*debug & DEBUG_L2_TEI) | 969 | if (*debug & DEBUG_L2_TEI) |
970 | printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n", | 970 | printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n", |
971 | __func__, mgr->ch.st->dev->name, crq->protocol, | 971 | __func__, dev_name(&mgr->ch.st->dev->dev), |
972 | crq->adr.dev, crq->adr.channel, crq->adr.sapi, | 972 | crq->protocol, crq->adr.dev, crq->adr.channel, |
973 | crq->adr.tei); | 973 | crq->adr.sapi, crq->adr.tei); |
974 | if (crq->adr.sapi != 0) /* not supported yet */ | 974 | if (crq->adr.sapi != 0) /* not supported yet */ |
975 | return -EINVAL; | 975 | return -EINVAL; |
976 | if (crq->adr.tei > GROUP_TEI) | 976 | if (crq->adr.tei > GROUP_TEI) |
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index e7fb7d2fcbfc..a4a1ae214630 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig | |||
@@ -63,6 +63,12 @@ config LEDS_WRAP | |||
63 | help | 63 | help |
64 | This option enables support for the PCEngines WRAP programmable LEDs. | 64 | This option enables support for the PCEngines WRAP programmable LEDs. |
65 | 65 | ||
66 | config LEDS_ALIX2 | ||
67 | tristate "LED Support for ALIX.2 and ALIX.3 series" | ||
68 | depends on LEDS_CLASS && X86 && EXPERIMENTAL | ||
69 | help | ||
70 | This option enables support for the PCEngines ALIX.2 and ALIX.3 LEDs. | ||
71 | |||
66 | config LEDS_H1940 | 72 | config LEDS_H1940 |
67 | tristate "LED Support for iPAQ H1940 device" | 73 | tristate "LED Support for iPAQ H1940 device" |
68 | depends on LEDS_CLASS && ARCH_H1940 | 74 | depends on LEDS_CLASS && ARCH_H1940 |
@@ -77,7 +83,7 @@ config LEDS_COBALT_QUBE | |||
77 | 83 | ||
78 | config LEDS_COBALT_RAQ | 84 | config LEDS_COBALT_RAQ |
79 | bool "LED Support for the Cobalt Raq series" | 85 | bool "LED Support for the Cobalt Raq series" |
80 | depends on LEDS_CLASS && MIPS_COBALT | 86 | depends on LEDS_CLASS=y && MIPS_COBALT |
81 | select LEDS_TRIGGERS | 87 | select LEDS_TRIGGERS |
82 | help | 88 | help |
83 | This option enables support for the Cobalt Raq series LEDs. | 89 | This option enables support for the Cobalt Raq series LEDs. |
@@ -158,6 +164,13 @@ config LEDS_PCA955X | |||
158 | LED driver chips accessed via the I2C bus. Supported | 164 | LED driver chips accessed via the I2C bus. Supported |
159 | devices include PCA9550, PCA9551, PCA9552, and PCA9553. | 165 | devices include PCA9550, PCA9551, PCA9552, and PCA9553. |
160 | 166 | ||
167 | config LEDS_WM8350 | ||
168 | tristate "LED Support for WM8350 AudioPlus PMIC" | ||
169 | depends on LEDS_CLASS && MFD_WM8350 | ||
170 | help | ||
171 | This option enables support for LEDs driven by the Wolfson | ||
172 | Microelectronics WM8350 AudioPlus PMIC. | ||
173 | |||
161 | config LEDS_DA903X | 174 | config LEDS_DA903X |
162 | tristate "LED Support for DA9030/DA9034 PMIC" | 175 | tristate "LED Support for DA9030/DA9034 PMIC" |
163 | depends on LEDS_CLASS && PMIC_DA903X | 176 | depends on LEDS_CLASS && PMIC_DA903X |
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile index e1967a29850e..bc247cb02e82 100644 --- a/drivers/leds/Makefile +++ b/drivers/leds/Makefile | |||
@@ -11,6 +11,7 @@ obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o | |||
11 | obj-$(CONFIG_LEDS_AMS_DELTA) += leds-ams-delta.o | 11 | obj-$(CONFIG_LEDS_AMS_DELTA) += leds-ams-delta.o |
12 | obj-$(CONFIG_LEDS_NET48XX) += leds-net48xx.o | 12 | obj-$(CONFIG_LEDS_NET48XX) += leds-net48xx.o |
13 | obj-$(CONFIG_LEDS_WRAP) += leds-wrap.o | 13 | obj-$(CONFIG_LEDS_WRAP) += leds-wrap.o |
14 | obj-$(CONFIG_LEDS_ALIX2) += leds-alix2.o | ||
14 | obj-$(CONFIG_LEDS_H1940) += leds-h1940.o | 15 | obj-$(CONFIG_LEDS_H1940) += leds-h1940.o |
15 | obj-$(CONFIG_LEDS_COBALT_QUBE) += leds-cobalt-qube.o | 16 | obj-$(CONFIG_LEDS_COBALT_QUBE) += leds-cobalt-qube.o |
16 | obj-$(CONFIG_LEDS_COBALT_RAQ) += leds-cobalt-raq.o | 17 | obj-$(CONFIG_LEDS_COBALT_RAQ) += leds-cobalt-raq.o |
@@ -23,6 +24,7 @@ obj-$(CONFIG_LEDS_FSG) += leds-fsg.o | |||
23 | obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o | 24 | obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o |
24 | obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o | 25 | obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o |
25 | obj-$(CONFIG_LEDS_HP_DISK) += leds-hp-disk.o | 26 | obj-$(CONFIG_LEDS_HP_DISK) += leds-hp-disk.o |
27 | obj-$(CONFIG_LEDS_WM8350) += leds-wm8350.o | ||
26 | 28 | ||
27 | # LED Triggers | 29 | # LED Triggers |
28 | obj-$(CONFIG_LEDS_TRIGGER_TIMER) += ledtrig-timer.o | 30 | obj-$(CONFIG_LEDS_TRIGGER_TIMER) += ledtrig-timer.o |
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index 6c4a326176d7..52f82e3ea13a 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c | |||
@@ -91,9 +91,29 @@ void led_classdev_resume(struct led_classdev *led_cdev) | |||
91 | } | 91 | } |
92 | EXPORT_SYMBOL_GPL(led_classdev_resume); | 92 | EXPORT_SYMBOL_GPL(led_classdev_resume); |
93 | 93 | ||
94 | static int led_suspend(struct device *dev, pm_message_t state) | ||
95 | { | ||
96 | struct led_classdev *led_cdev = dev_get_drvdata(dev); | ||
97 | |||
98 | if (led_cdev->flags & LED_CORE_SUSPENDRESUME) | ||
99 | led_classdev_suspend(led_cdev); | ||
100 | |||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | static int led_resume(struct device *dev) | ||
105 | { | ||
106 | struct led_classdev *led_cdev = dev_get_drvdata(dev); | ||
107 | |||
108 | if (led_cdev->flags & LED_CORE_SUSPENDRESUME) | ||
109 | led_classdev_resume(led_cdev); | ||
110 | |||
111 | return 0; | ||
112 | } | ||
113 | |||
94 | /** | 114 | /** |
95 | * led_classdev_register - register a new object of led_classdev class. | 115 | * led_classdev_register - register a new object of led_classdev class. |
96 | * @dev: The device to register. | 116 | * @parent: The device to register. |
97 | * @led_cdev: the led_classdev structure for this device. | 117 | * @led_cdev: the led_classdev structure for this device. |
98 | */ | 118 | */ |
99 | int led_classdev_register(struct device *parent, struct led_classdev *led_cdev) | 119 | int led_classdev_register(struct device *parent, struct led_classdev *led_cdev) |
@@ -174,6 +194,8 @@ static int __init leds_init(void) | |||
174 | leds_class = class_create(THIS_MODULE, "leds"); | 194 | leds_class = class_create(THIS_MODULE, "leds"); |
175 | if (IS_ERR(leds_class)) | 195 | if (IS_ERR(leds_class)) |
176 | return PTR_ERR(leds_class); | 196 | return PTR_ERR(leds_class); |
197 | leds_class->suspend = led_suspend; | ||
198 | leds_class->resume = led_resume; | ||
177 | return 0; | 199 | return 0; |
178 | } | 200 | } |
179 | 201 | ||
diff --git a/drivers/leds/leds-alix2.c b/drivers/leds/leds-alix2.c new file mode 100644 index 000000000000..ddbd7730dfc8 --- /dev/null +++ b/drivers/leds/leds-alix2.c | |||
@@ -0,0 +1,181 @@ | |||
1 | /* | ||
2 | * LEDs driver for PCEngines ALIX.2 and ALIX.3 | ||
3 | * | ||
4 | * Copyright (C) 2008 Constantin Baranov <const@mimas.ru> | ||
5 | */ | ||
6 | |||
7 | #include <linux/err.h> | ||
8 | #include <linux/io.h> | ||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/leds.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/platform_device.h> | ||
13 | #include <linux/string.h> | ||
14 | |||
15 | static int force = 0; | ||
16 | module_param(force, bool, 0444); | ||
17 | MODULE_PARM_DESC(force, "Assume system has ALIX.2 style LEDs"); | ||
18 | |||
19 | struct alix_led { | ||
20 | struct led_classdev cdev; | ||
21 | unsigned short port; | ||
22 | unsigned int on_value; | ||
23 | unsigned int off_value; | ||
24 | }; | ||
25 | |||
26 | static void alix_led_set(struct led_classdev *led_cdev, | ||
27 | enum led_brightness brightness) | ||
28 | { | ||
29 | struct alix_led *led_dev = | ||
30 | container_of(led_cdev, struct alix_led, cdev); | ||
31 | |||
32 | if (brightness) | ||
33 | outl(led_dev->on_value, led_dev->port); | ||
34 | else | ||
35 | outl(led_dev->off_value, led_dev->port); | ||
36 | } | ||
37 | |||
38 | static struct alix_led alix_leds[] = { | ||
39 | { | ||
40 | .cdev = { | ||
41 | .name = "alix:1", | ||
42 | .brightness_set = alix_led_set, | ||
43 | }, | ||
44 | .port = 0x6100, | ||
45 | .on_value = 1 << 22, | ||
46 | .off_value = 1 << 6, | ||
47 | }, | ||
48 | { | ||
49 | .cdev = { | ||
50 | .name = "alix:2", | ||
51 | .brightness_set = alix_led_set, | ||
52 | }, | ||
53 | .port = 0x6180, | ||
54 | .on_value = 1 << 25, | ||
55 | .off_value = 1 << 9, | ||
56 | }, | ||
57 | { | ||
58 | .cdev = { | ||
59 | .name = "alix:3", | ||
60 | .brightness_set = alix_led_set, | ||
61 | }, | ||
62 | .port = 0x6180, | ||
63 | .on_value = 1 << 27, | ||
64 | .off_value = 1 << 11, | ||
65 | }, | ||
66 | }; | ||
67 | |||
68 | static int __init alix_led_probe(struct platform_device *pdev) | ||
69 | { | ||
70 | int i; | ||
71 | int ret; | ||
72 | |||
73 | for (i = 0; i < ARRAY_SIZE(alix_leds); i++) { | ||
74 | alix_leds[i].cdev.flags |= LED_CORE_SUSPENDRESUME; | ||
75 | ret = led_classdev_register(&pdev->dev, &alix_leds[i].cdev); | ||
76 | if (ret < 0) | ||
77 | goto fail; | ||
78 | } | ||
79 | return 0; | ||
80 | |||
81 | fail: | ||
82 | while (--i >= 0) | ||
83 | led_classdev_unregister(&alix_leds[i].cdev); | ||
84 | return ret; | ||
85 | } | ||
86 | |||
87 | static int alix_led_remove(struct platform_device *pdev) | ||
88 | { | ||
89 | int i; | ||
90 | |||
91 | for (i = 0; i < ARRAY_SIZE(alix_leds); i++) | ||
92 | led_classdev_unregister(&alix_leds[i].cdev); | ||
93 | return 0; | ||
94 | } | ||
95 | |||
96 | static struct platform_driver alix_led_driver = { | ||
97 | .remove = alix_led_remove, | ||
98 | .driver = { | ||
99 | .name = KBUILD_MODNAME, | ||
100 | .owner = THIS_MODULE, | ||
101 | }, | ||
102 | }; | ||
103 | |||
104 | static int __init alix_present(void) | ||
105 | { | ||
106 | const unsigned long bios_phys = 0x000f0000; | ||
107 | const size_t bios_len = 0x00010000; | ||
108 | const char alix_sig[] = "PC Engines ALIX."; | ||
109 | const size_t alix_sig_len = sizeof(alix_sig) - 1; | ||
110 | |||
111 | const char *bios_virt; | ||
112 | const char *scan_end; | ||
113 | const char *p; | ||
114 | int ret = 0; | ||
115 | |||
116 | if (force) { | ||
117 | printk(KERN_NOTICE "%s: forced to skip BIOS test, " | ||
118 | "assume system has ALIX.2 style LEDs\n", | ||
119 | KBUILD_MODNAME); | ||
120 | ret = 1; | ||
121 | goto out; | ||
122 | } | ||
123 | |||
124 | bios_virt = phys_to_virt(bios_phys); | ||
125 | scan_end = bios_virt + bios_len - (alix_sig_len + 2); | ||
126 | for (p = bios_virt; p < scan_end; p++) { | ||
127 | const char *tail; | ||
128 | |||
129 | if (memcmp(p, alix_sig, alix_sig_len) != 0) { | ||
130 | continue; | ||
131 | } | ||
132 | |||
133 | tail = p + alix_sig_len; | ||
134 | if ((tail[0] == '2' || tail[0] == '3') && tail[1] == '\0') { | ||
135 | printk(KERN_INFO | ||
136 | "%s: system is recognized as \"%s\"\n", | ||
137 | KBUILD_MODNAME, p); | ||
138 | ret = 1; | ||
139 | break; | ||
140 | } | ||
141 | } | ||
142 | |||
143 | out: | ||
144 | return ret; | ||
145 | } | ||
146 | |||
147 | static struct platform_device *pdev; | ||
148 | |||
149 | static int __init alix_led_init(void) | ||
150 | { | ||
151 | int ret; | ||
152 | |||
153 | if (!alix_present()) { | ||
154 | ret = -ENODEV; | ||
155 | goto out; | ||
156 | } | ||
157 | |||
158 | pdev = platform_device_register_simple(KBUILD_MODNAME, -1, NULL, 0); | ||
159 | if (!IS_ERR(pdev)) { | ||
160 | ret = platform_driver_probe(&alix_led_driver, alix_led_probe); | ||
161 | if (ret) | ||
162 | platform_device_unregister(pdev); | ||
163 | } else | ||
164 | ret = PTR_ERR(pdev); | ||
165 | |||
166 | out: | ||
167 | return ret; | ||
168 | } | ||
169 | |||
170 | static void __exit alix_led_exit(void) | ||
171 | { | ||
172 | platform_device_unregister(pdev); | ||
173 | platform_driver_unregister(&alix_led_driver); | ||
174 | } | ||
175 | |||
176 | module_init(alix_led_init); | ||
177 | module_exit(alix_led_exit); | ||
178 | |||
179 | MODULE_AUTHOR("Constantin Baranov <const@mimas.ru>"); | ||
180 | MODULE_DESCRIPTION("PCEngines ALIX.2 and ALIX.3 LED driver"); | ||
181 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/leds/leds-ams-delta.c b/drivers/leds/leds-ams-delta.c index 1bd590bb3a6e..446050759b4d 100644 --- a/drivers/leds/leds-ams-delta.c +++ b/drivers/leds/leds-ams-delta.c | |||
@@ -79,37 +79,12 @@ static struct ams_delta_led ams_delta_leds[] = { | |||
79 | }, | 79 | }, |
80 | }; | 80 | }; |
81 | 81 | ||
82 | #ifdef CONFIG_PM | ||
83 | static int ams_delta_led_suspend(struct platform_device *dev, | ||
84 | pm_message_t state) | ||
85 | { | ||
86 | int i; | ||
87 | |||
88 | for (i = 0; i < ARRAY_SIZE(ams_delta_leds); i++) | ||
89 | led_classdev_suspend(&ams_delta_leds[i].cdev); | ||
90 | |||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | static int ams_delta_led_resume(struct platform_device *dev) | ||
95 | { | ||
96 | int i; | ||
97 | |||
98 | for (i = 0; i < ARRAY_SIZE(ams_delta_leds); i++) | ||
99 | led_classdev_resume(&ams_delta_leds[i].cdev); | ||
100 | |||
101 | return 0; | ||
102 | } | ||
103 | #else | ||
104 | #define ams_delta_led_suspend NULL | ||
105 | #define ams_delta_led_resume NULL | ||
106 | #endif | ||
107 | |||
108 | static int ams_delta_led_probe(struct platform_device *pdev) | 82 | static int ams_delta_led_probe(struct platform_device *pdev) |
109 | { | 83 | { |
110 | int i, ret; | 84 | int i, ret; |
111 | 85 | ||
112 | for (i = 0; i < ARRAY_SIZE(ams_delta_leds); i++) { | 86 | for (i = 0; i < ARRAY_SIZE(ams_delta_leds); i++) { |
87 | ams_delta_leds[i].cdev.flags |= LED_CORE_SUSPENDRESUME; | ||
113 | ret = led_classdev_register(&pdev->dev, | 88 | ret = led_classdev_register(&pdev->dev, |
114 | &ams_delta_leds[i].cdev); | 89 | &ams_delta_leds[i].cdev); |
115 | if (ret < 0) | 90 | if (ret < 0) |
@@ -127,7 +102,7 @@ static int ams_delta_led_remove(struct platform_device *pdev) | |||
127 | { | 102 | { |
128 | int i; | 103 | int i; |
129 | 104 | ||
130 | for (i = 0; i < ARRAY_SIZE(ams_delta_leds); i--) | 105 | for (i = 0; i < ARRAY_SIZE(ams_delta_leds); i++) |
131 | led_classdev_unregister(&ams_delta_leds[i].cdev); | 106 | led_classdev_unregister(&ams_delta_leds[i].cdev); |
132 | 107 | ||
133 | return 0; | 108 | return 0; |
@@ -136,8 +111,6 @@ static int ams_delta_led_remove(struct platform_device *pdev) | |||
136 | static struct platform_driver ams_delta_led_driver = { | 111 | static struct platform_driver ams_delta_led_driver = { |
137 | .probe = ams_delta_led_probe, | 112 | .probe = ams_delta_led_probe, |
138 | .remove = ams_delta_led_remove, | 113 | .remove = ams_delta_led_remove, |
139 | .suspend = ams_delta_led_suspend, | ||
140 | .resume = ams_delta_led_resume, | ||
141 | .driver = { | 114 | .driver = { |
142 | .name = "ams-delta-led", | 115 | .name = "ams-delta-led", |
143 | .owner = THIS_MODULE, | 116 | .owner = THIS_MODULE, |
@@ -151,7 +124,7 @@ static int __init ams_delta_led_init(void) | |||
151 | 124 | ||
152 | static void __exit ams_delta_led_exit(void) | 125 | static void __exit ams_delta_led_exit(void) |
153 | { | 126 | { |
154 | return platform_driver_unregister(&ams_delta_led_driver); | 127 | platform_driver_unregister(&ams_delta_led_driver); |
155 | } | 128 | } |
156 | 129 | ||
157 | module_init(ams_delta_led_init); | 130 | module_init(ams_delta_led_init); |
diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c index eb3415e88f43..1813c84ea5fc 100644 --- a/drivers/leds/leds-clevo-mail.c +++ b/drivers/leds/leds-clevo-mail.c | |||
@@ -142,6 +142,7 @@ static struct led_classdev clevo_mail_led = { | |||
142 | .name = "clevo::mail", | 142 | .name = "clevo::mail", |
143 | .brightness_set = clevo_mail_led_set, | 143 | .brightness_set = clevo_mail_led_set, |
144 | .blink_set = clevo_mail_led_blink, | 144 | .blink_set = clevo_mail_led_blink, |
145 | .flags = LED_CORE_SUSPENDRESUME, | ||
145 | }; | 146 | }; |
146 | 147 | ||
147 | static int __init clevo_mail_led_probe(struct platform_device *pdev) | 148 | static int __init clevo_mail_led_probe(struct platform_device *pdev) |
@@ -155,29 +156,9 @@ static int clevo_mail_led_remove(struct platform_device *pdev) | |||
155 | return 0; | 156 | return 0; |
156 | } | 157 | } |
157 | 158 | ||
158 | #ifdef CONFIG_PM | ||
159 | static int clevo_mail_led_suspend(struct platform_device *dev, | ||
160 | pm_message_t state) | ||
161 | { | ||
162 | led_classdev_suspend(&clevo_mail_led); | ||
163 | return 0; | ||
164 | } | ||
165 | |||
166 | static int clevo_mail_led_resume(struct platform_device *dev) | ||
167 | { | ||
168 | led_classdev_resume(&clevo_mail_led); | ||
169 | return 0; | ||
170 | } | ||
171 | #else | ||
172 | #define clevo_mail_led_suspend NULL | ||
173 | #define clevo_mail_led_resume NULL | ||
174 | #endif | ||
175 | |||
176 | static struct platform_driver clevo_mail_led_driver = { | 159 | static struct platform_driver clevo_mail_led_driver = { |
177 | .probe = clevo_mail_led_probe, | 160 | .probe = clevo_mail_led_probe, |
178 | .remove = clevo_mail_led_remove, | 161 | .remove = clevo_mail_led_remove, |
179 | .suspend = clevo_mail_led_suspend, | ||
180 | .resume = clevo_mail_led_resume, | ||
181 | .driver = { | 162 | .driver = { |
182 | .name = KBUILD_MODNAME, | 163 | .name = KBUILD_MODNAME, |
183 | .owner = THIS_MODULE, | 164 | .owner = THIS_MODULE, |
diff --git a/drivers/leds/leds-fsg.c b/drivers/leds/leds-fsg.c index 34935155c1c0..5f7c9c5c09b1 100644 --- a/drivers/leds/leds-fsg.c +++ b/drivers/leds/leds-fsg.c | |||
@@ -99,64 +99,43 @@ static void fsg_led_ring_set(struct led_classdev *led_cdev, | |||
99 | } | 99 | } |
100 | 100 | ||
101 | 101 | ||
102 | |||
103 | static struct led_classdev fsg_wlan_led = { | 102 | static struct led_classdev fsg_wlan_led = { |
104 | .name = "fsg:blue:wlan", | 103 | .name = "fsg:blue:wlan", |
105 | .brightness_set = fsg_led_wlan_set, | 104 | .brightness_set = fsg_led_wlan_set, |
105 | .flags = LED_CORE_SUSPENDRESUME, | ||
106 | }; | 106 | }; |
107 | 107 | ||
108 | static struct led_classdev fsg_wan_led = { | 108 | static struct led_classdev fsg_wan_led = { |
109 | .name = "fsg:blue:wan", | 109 | .name = "fsg:blue:wan", |
110 | .brightness_set = fsg_led_wan_set, | 110 | .brightness_set = fsg_led_wan_set, |
111 | .flags = LED_CORE_SUSPENDRESUME, | ||
111 | }; | 112 | }; |
112 | 113 | ||
113 | static struct led_classdev fsg_sata_led = { | 114 | static struct led_classdev fsg_sata_led = { |
114 | .name = "fsg:blue:sata", | 115 | .name = "fsg:blue:sata", |
115 | .brightness_set = fsg_led_sata_set, | 116 | .brightness_set = fsg_led_sata_set, |
117 | .flags = LED_CORE_SUSPENDRESUME, | ||
116 | }; | 118 | }; |
117 | 119 | ||
118 | static struct led_classdev fsg_usb_led = { | 120 | static struct led_classdev fsg_usb_led = { |
119 | .name = "fsg:blue:usb", | 121 | .name = "fsg:blue:usb", |
120 | .brightness_set = fsg_led_usb_set, | 122 | .brightness_set = fsg_led_usb_set, |
123 | .flags = LED_CORE_SUSPENDRESUME, | ||
121 | }; | 124 | }; |
122 | 125 | ||
123 | static struct led_classdev fsg_sync_led = { | 126 | static struct led_classdev fsg_sync_led = { |
124 | .name = "fsg:blue:sync", | 127 | .name = "fsg:blue:sync", |
125 | .brightness_set = fsg_led_sync_set, | 128 | .brightness_set = fsg_led_sync_set, |
129 | .flags = LED_CORE_SUSPENDRESUME, | ||
126 | }; | 130 | }; |
127 | 131 | ||
128 | static struct led_classdev fsg_ring_led = { | 132 | static struct led_classdev fsg_ring_led = { |
129 | .name = "fsg:blue:ring", | 133 | .name = "fsg:blue:ring", |
130 | .brightness_set = fsg_led_ring_set, | 134 | .brightness_set = fsg_led_ring_set, |
135 | .flags = LED_CORE_SUSPENDRESUME, | ||
131 | }; | 136 | }; |
132 | 137 | ||
133 | 138 | ||
134 | |||
135 | #ifdef CONFIG_PM | ||
136 | static int fsg_led_suspend(struct platform_device *dev, pm_message_t state) | ||
137 | { | ||
138 | led_classdev_suspend(&fsg_wlan_led); | ||
139 | led_classdev_suspend(&fsg_wan_led); | ||
140 | led_classdev_suspend(&fsg_sata_led); | ||
141 | led_classdev_suspend(&fsg_usb_led); | ||
142 | led_classdev_suspend(&fsg_sync_led); | ||
143 | led_classdev_suspend(&fsg_ring_led); | ||
144 | return 0; | ||
145 | } | ||
146 | |||
147 | static int fsg_led_resume(struct platform_device *dev) | ||
148 | { | ||
149 | led_classdev_resume(&fsg_wlan_led); | ||
150 | led_classdev_resume(&fsg_wan_led); | ||
151 | led_classdev_resume(&fsg_sata_led); | ||
152 | led_classdev_resume(&fsg_usb_led); | ||
153 | led_classdev_resume(&fsg_sync_led); | ||
154 | led_classdev_resume(&fsg_ring_led); | ||
155 | return 0; | ||
156 | } | ||
157 | #endif | ||
158 | |||
159 | |||
160 | static int fsg_led_probe(struct platform_device *pdev) | 139 | static int fsg_led_probe(struct platform_device *pdev) |
161 | { | 140 | { |
162 | int ret; | 141 | int ret; |
@@ -232,10 +211,6 @@ static int fsg_led_remove(struct platform_device *pdev) | |||
232 | static struct platform_driver fsg_led_driver = { | 211 | static struct platform_driver fsg_led_driver = { |
233 | .probe = fsg_led_probe, | 212 | .probe = fsg_led_probe, |
234 | .remove = fsg_led_remove, | 213 | .remove = fsg_led_remove, |
235 | #ifdef CONFIG_PM | ||
236 | .suspend = fsg_led_suspend, | ||
237 | .resume = fsg_led_resume, | ||
238 | #endif | ||
239 | .driver = { | 214 | .driver = { |
240 | .name = "fsg-led", | 215 | .name = "fsg-led", |
241 | }, | 216 | }, |
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c index b13bd2950e95..2e3df08b649b 100644 --- a/drivers/leds/leds-gpio.c +++ b/drivers/leds/leds-gpio.c | |||
@@ -105,6 +105,7 @@ static int gpio_led_probe(struct platform_device *pdev) | |||
105 | } | 105 | } |
106 | led_dat->cdev.brightness_set = gpio_led_set; | 106 | led_dat->cdev.brightness_set = gpio_led_set; |
107 | led_dat->cdev.brightness = LED_OFF; | 107 | led_dat->cdev.brightness = LED_OFF; |
108 | led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME; | ||
108 | 109 | ||
109 | gpio_direction_output(led_dat->gpio, led_dat->active_low); | 110 | gpio_direction_output(led_dat->gpio, led_dat->active_low); |
110 | 111 | ||
@@ -154,44 +155,9 @@ static int __devexit gpio_led_remove(struct platform_device *pdev) | |||
154 | return 0; | 155 | return 0; |
155 | } | 156 | } |
156 | 157 | ||
157 | #ifdef CONFIG_PM | ||
158 | static int gpio_led_suspend(struct platform_device *pdev, pm_message_t state) | ||
159 | { | ||
160 | struct gpio_led_platform_data *pdata = pdev->dev.platform_data; | ||
161 | struct gpio_led_data *leds_data; | ||
162 | int i; | ||
163 | |||
164 | leds_data = platform_get_drvdata(pdev); | ||
165 | |||
166 | for (i = 0; i < pdata->num_leds; i++) | ||
167 | led_classdev_suspend(&leds_data[i].cdev); | ||
168 | |||
169 | return 0; | ||
170 | } | ||
171 | |||
172 | static int gpio_led_resume(struct platform_device *pdev) | ||
173 | { | ||
174 | struct gpio_led_platform_data *pdata = pdev->dev.platform_data; | ||
175 | struct gpio_led_data *leds_data; | ||
176 | int i; | ||
177 | |||
178 | leds_data = platform_get_drvdata(pdev); | ||
179 | |||
180 | for (i = 0; i < pdata->num_leds; i++) | ||
181 | led_classdev_resume(&leds_data[i].cdev); | ||
182 | |||
183 | return 0; | ||
184 | } | ||
185 | #else | ||
186 | #define gpio_led_suspend NULL | ||
187 | #define gpio_led_resume NULL | ||
188 | #endif | ||
189 | |||
190 | static struct platform_driver gpio_led_driver = { | 158 | static struct platform_driver gpio_led_driver = { |
191 | .probe = gpio_led_probe, | 159 | .probe = gpio_led_probe, |
192 | .remove = __devexit_p(gpio_led_remove), | 160 | .remove = __devexit_p(gpio_led_remove), |
193 | .suspend = gpio_led_suspend, | ||
194 | .resume = gpio_led_resume, | ||
195 | .driver = { | 161 | .driver = { |
196 | .name = "leds-gpio", | 162 | .name = "leds-gpio", |
197 | .owner = THIS_MODULE, | 163 | .owner = THIS_MODULE, |
diff --git a/drivers/leds/leds-hp-disk.c b/drivers/leds/leds-hp-disk.c index 44fa757d8254..d786adc8c5e3 100644 --- a/drivers/leds/leds-hp-disk.c +++ b/drivers/leds/leds-hp-disk.c | |||
@@ -68,25 +68,9 @@ static struct led_classdev hpled_led = { | |||
68 | .name = "hp:red:hddprotection", | 68 | .name = "hp:red:hddprotection", |
69 | .default_trigger = "heartbeat", | 69 | .default_trigger = "heartbeat", |
70 | .brightness_set = hpled_set, | 70 | .brightness_set = hpled_set, |
71 | .flags = LED_CORE_SUSPENDRESUME, | ||
71 | }; | 72 | }; |
72 | 73 | ||
73 | #ifdef CONFIG_PM | ||
74 | static int hpled_suspend(struct acpi_device *dev, pm_message_t state) | ||
75 | { | ||
76 | led_classdev_suspend(&hpled_led); | ||
77 | return 0; | ||
78 | } | ||
79 | |||
80 | static int hpled_resume(struct acpi_device *dev) | ||
81 | { | ||
82 | led_classdev_resume(&hpled_led); | ||
83 | return 0; | ||
84 | } | ||
85 | #else | ||
86 | #define hpled_suspend NULL | ||
87 | #define hpled_resume NULL | ||
88 | #endif | ||
89 | |||
90 | static int hpled_add(struct acpi_device *device) | 74 | static int hpled_add(struct acpi_device *device) |
91 | { | 75 | { |
92 | int ret; | 76 | int ret; |
@@ -121,8 +105,6 @@ static struct acpi_driver leds_hp_driver = { | |||
121 | .ops = { | 105 | .ops = { |
122 | .add = hpled_add, | 106 | .add = hpled_add, |
123 | .remove = hpled_remove, | 107 | .remove = hpled_remove, |
124 | .suspend = hpled_suspend, | ||
125 | .resume = hpled_resume, | ||
126 | } | 108 | } |
127 | }; | 109 | }; |
128 | 110 | ||
diff --git a/drivers/leds/leds-hp6xx.c b/drivers/leds/leds-hp6xx.c index e8fb1baf8a50..e4ce1fd46338 100644 --- a/drivers/leds/leds-hp6xx.c +++ b/drivers/leds/leds-hp6xx.c | |||
@@ -45,30 +45,16 @@ static struct led_classdev hp6xx_red_led = { | |||
45 | .name = "hp6xx:red", | 45 | .name = "hp6xx:red", |
46 | .default_trigger = "hp6xx-charge", | 46 | .default_trigger = "hp6xx-charge", |
47 | .brightness_set = hp6xxled_red_set, | 47 | .brightness_set = hp6xxled_red_set, |
48 | .flags = LED_CORE_SUSPENDRESUME, | ||
48 | }; | 49 | }; |
49 | 50 | ||
50 | static struct led_classdev hp6xx_green_led = { | 51 | static struct led_classdev hp6xx_green_led = { |
51 | .name = "hp6xx:green", | 52 | .name = "hp6xx:green", |
52 | .default_trigger = "ide-disk", | 53 | .default_trigger = "ide-disk", |
53 | .brightness_set = hp6xxled_green_set, | 54 | .brightness_set = hp6xxled_green_set, |
55 | .flags = LED_CORE_SUSPENDRESUME, | ||
54 | }; | 56 | }; |
55 | 57 | ||
56 | #ifdef CONFIG_PM | ||
57 | static int hp6xxled_suspend(struct platform_device *dev, pm_message_t state) | ||
58 | { | ||
59 | led_classdev_suspend(&hp6xx_red_led); | ||
60 | led_classdev_suspend(&hp6xx_green_led); | ||
61 | return 0; | ||
62 | } | ||
63 | |||
64 | static int hp6xxled_resume(struct platform_device *dev) | ||
65 | { | ||
66 | led_classdev_resume(&hp6xx_red_led); | ||
67 | led_classdev_resume(&hp6xx_green_led); | ||
68 | return 0; | ||
69 | } | ||
70 | #endif | ||
71 | |||
72 | static int hp6xxled_probe(struct platform_device *pdev) | 58 | static int hp6xxled_probe(struct platform_device *pdev) |
73 | { | 59 | { |
74 | int ret; | 60 | int ret; |
@@ -98,10 +84,6 @@ MODULE_ALIAS("platform:hp6xx-led"); | |||
98 | static struct platform_driver hp6xxled_driver = { | 84 | static struct platform_driver hp6xxled_driver = { |
99 | .probe = hp6xxled_probe, | 85 | .probe = hp6xxled_probe, |
100 | .remove = hp6xxled_remove, | 86 | .remove = hp6xxled_remove, |
101 | #ifdef CONFIG_PM | ||
102 | .suspend = hp6xxled_suspend, | ||
103 | .resume = hp6xxled_resume, | ||
104 | #endif | ||
105 | .driver = { | 87 | .driver = { |
106 | .name = "hp6xx-led", | 88 | .name = "hp6xx-led", |
107 | .owner = THIS_MODULE, | 89 | .owner = THIS_MODULE, |
diff --git a/drivers/leds/leds-net48xx.c b/drivers/leds/leds-net48xx.c index 054360473c94..93987a12da49 100644 --- a/drivers/leds/leds-net48xx.c +++ b/drivers/leds/leds-net48xx.c | |||
@@ -33,26 +33,9 @@ static void net48xx_error_led_set(struct led_classdev *led_cdev, | |||
33 | static struct led_classdev net48xx_error_led = { | 33 | static struct led_classdev net48xx_error_led = { |
34 | .name = "net48xx::error", | 34 | .name = "net48xx::error", |
35 | .brightness_set = net48xx_error_led_set, | 35 | .brightness_set = net48xx_error_led_set, |
36 | .flags = LED_CORE_SUSPENDRESUME, | ||
36 | }; | 37 | }; |
37 | 38 | ||
38 | #ifdef CONFIG_PM | ||
39 | static int net48xx_led_suspend(struct platform_device *dev, | ||
40 | pm_message_t state) | ||
41 | { | ||
42 | led_classdev_suspend(&net48xx_error_led); | ||
43 | return 0; | ||
44 | } | ||
45 | |||
46 | static int net48xx_led_resume(struct platform_device *dev) | ||
47 | { | ||
48 | led_classdev_resume(&net48xx_error_led); | ||
49 | return 0; | ||
50 | } | ||
51 | #else | ||
52 | #define net48xx_led_suspend NULL | ||
53 | #define net48xx_led_resume NULL | ||
54 | #endif | ||
55 | |||
56 | static int net48xx_led_probe(struct platform_device *pdev) | 39 | static int net48xx_led_probe(struct platform_device *pdev) |
57 | { | 40 | { |
58 | return led_classdev_register(&pdev->dev, &net48xx_error_led); | 41 | return led_classdev_register(&pdev->dev, &net48xx_error_led); |
@@ -67,8 +50,6 @@ static int net48xx_led_remove(struct platform_device *pdev) | |||
67 | static struct platform_driver net48xx_led_driver = { | 50 | static struct platform_driver net48xx_led_driver = { |
68 | .probe = net48xx_led_probe, | 51 | .probe = net48xx_led_probe, |
69 | .remove = net48xx_led_remove, | 52 | .remove = net48xx_led_remove, |
70 | .suspend = net48xx_led_suspend, | ||
71 | .resume = net48xx_led_resume, | ||
72 | .driver = { | 53 | .driver = { |
73 | .name = DRVNAME, | 54 | .name = DRVNAME, |
74 | .owner = THIS_MODULE, | 55 | .owner = THIS_MODULE, |
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c index 4064d4f6b33b..76ec7498e2d5 100644 --- a/drivers/leds/leds-pca9532.c +++ b/drivers/leds/leds-pca9532.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/leds.h> | 16 | #include <linux/leds.h> |
17 | #include <linux/input.h> | 17 | #include <linux/input.h> |
18 | #include <linux/mutex.h> | 18 | #include <linux/mutex.h> |
19 | #include <linux/workqueue.h> | ||
19 | #include <linux/leds-pca9532.h> | 20 | #include <linux/leds-pca9532.h> |
20 | 21 | ||
21 | static const unsigned short normal_i2c[] = { /*0x60,*/ I2C_CLIENT_END}; | 22 | static const unsigned short normal_i2c[] = { /*0x60,*/ I2C_CLIENT_END}; |
@@ -34,6 +35,7 @@ struct pca9532_data { | |||
34 | struct pca9532_led leds[16]; | 35 | struct pca9532_led leds[16]; |
35 | struct mutex update_lock; | 36 | struct mutex update_lock; |
36 | struct input_dev *idev; | 37 | struct input_dev *idev; |
38 | struct work_struct work; | ||
37 | u8 pwm[2]; | 39 | u8 pwm[2]; |
38 | u8 psc[2]; | 40 | u8 psc[2]; |
39 | }; | 41 | }; |
@@ -63,7 +65,7 @@ static struct i2c_driver pca9532_driver = { | |||
63 | * as a compromise we average one pwm to the values requested by all | 65 | * as a compromise we average one pwm to the values requested by all |
64 | * leds that are not ON/OFF. | 66 | * leds that are not ON/OFF. |
65 | * */ | 67 | * */ |
66 | static int pca9532_setpwm(struct i2c_client *client, int pwm, int blink, | 68 | static int pca9532_calcpwm(struct i2c_client *client, int pwm, int blink, |
67 | enum led_brightness value) | 69 | enum led_brightness value) |
68 | { | 70 | { |
69 | int a = 0, b = 0, i = 0; | 71 | int a = 0, b = 0, i = 0; |
@@ -84,11 +86,17 @@ static int pca9532_setpwm(struct i2c_client *client, int pwm, int blink, | |||
84 | b = b/a; | 86 | b = b/a; |
85 | if (b > 0xFF) | 87 | if (b > 0xFF) |
86 | return -EINVAL; | 88 | return -EINVAL; |
87 | mutex_lock(&data->update_lock); | ||
88 | data->pwm[pwm] = b; | 89 | data->pwm[pwm] = b; |
90 | data->psc[pwm] = blink; | ||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | static int pca9532_setpwm(struct i2c_client *client, int pwm) | ||
95 | { | ||
96 | struct pca9532_data *data = i2c_get_clientdata(client); | ||
97 | mutex_lock(&data->update_lock); | ||
89 | i2c_smbus_write_byte_data(client, PCA9532_REG_PWM(pwm), | 98 | i2c_smbus_write_byte_data(client, PCA9532_REG_PWM(pwm), |
90 | data->pwm[pwm]); | 99 | data->pwm[pwm]); |
91 | data->psc[pwm] = blink; | ||
92 | i2c_smbus_write_byte_data(client, PCA9532_REG_PSC(pwm), | 100 | i2c_smbus_write_byte_data(client, PCA9532_REG_PSC(pwm), |
93 | data->psc[pwm]); | 101 | data->psc[pwm]); |
94 | mutex_unlock(&data->update_lock); | 102 | mutex_unlock(&data->update_lock); |
@@ -124,11 +132,11 @@ static void pca9532_set_brightness(struct led_classdev *led_cdev, | |||
124 | led->state = PCA9532_ON; | 132 | led->state = PCA9532_ON; |
125 | else { | 133 | else { |
126 | led->state = PCA9532_PWM0; /* Thecus: hardcode one pwm */ | 134 | led->state = PCA9532_PWM0; /* Thecus: hardcode one pwm */ |
127 | err = pca9532_setpwm(led->client, 0, 0, value); | 135 | err = pca9532_calcpwm(led->client, 0, 0, value); |
128 | if (err) | 136 | if (err) |
129 | return; /* XXX: led api doesn't allow error code? */ | 137 | return; /* XXX: led api doesn't allow error code? */ |
130 | } | 138 | } |
131 | pca9532_setled(led); | 139 | schedule_work(&led->work); |
132 | } | 140 | } |
133 | 141 | ||
134 | static int pca9532_set_blink(struct led_classdev *led_cdev, | 142 | static int pca9532_set_blink(struct led_classdev *led_cdev, |
@@ -137,6 +145,7 @@ static int pca9532_set_blink(struct led_classdev *led_cdev, | |||
137 | struct pca9532_led *led = ldev_to_led(led_cdev); | 145 | struct pca9532_led *led = ldev_to_led(led_cdev); |
138 | struct i2c_client *client = led->client; | 146 | struct i2c_client *client = led->client; |
139 | int psc; | 147 | int psc; |
148 | int err = 0; | ||
140 | 149 | ||
141 | if (*delay_on == 0 && *delay_off == 0) { | 150 | if (*delay_on == 0 && *delay_off == 0) { |
142 | /* led subsystem ask us for a blink rate */ | 151 | /* led subsystem ask us for a blink rate */ |
@@ -148,11 +157,15 @@ static int pca9532_set_blink(struct led_classdev *led_cdev, | |||
148 | 157 | ||
149 | /* Thecus specific: only use PSC/PWM 0 */ | 158 | /* Thecus specific: only use PSC/PWM 0 */ |
150 | psc = (*delay_on * 152-1)/1000; | 159 | psc = (*delay_on * 152-1)/1000; |
151 | return pca9532_setpwm(client, 0, psc, led_cdev->brightness); | 160 | err = pca9532_calcpwm(client, 0, psc, led_cdev->brightness); |
161 | if (err) | ||
162 | return err; | ||
163 | schedule_work(&led->work); | ||
164 | return 0; | ||
152 | } | 165 | } |
153 | 166 | ||
154 | int pca9532_event(struct input_dev *dev, unsigned int type, unsigned int code, | 167 | static int pca9532_event(struct input_dev *dev, unsigned int type, |
155 | int value) | 168 | unsigned int code, int value) |
156 | { | 169 | { |
157 | struct pca9532_data *data = input_get_drvdata(dev); | 170 | struct pca9532_data *data = input_get_drvdata(dev); |
158 | 171 | ||
@@ -165,13 +178,28 @@ int pca9532_event(struct input_dev *dev, unsigned int type, unsigned int code, | |||
165 | else | 178 | else |
166 | data->pwm[1] = 0; | 179 | data->pwm[1] = 0; |
167 | 180 | ||
168 | dev_info(&dev->dev, "setting beep to %d \n", data->pwm[1]); | 181 | schedule_work(&data->work); |
182 | |||
183 | return 0; | ||
184 | } | ||
185 | |||
186 | static void pca9532_input_work(struct work_struct *work) | ||
187 | { | ||
188 | struct pca9532_data *data; | ||
189 | data = container_of(work, struct pca9532_data, work); | ||
169 | mutex_lock(&data->update_lock); | 190 | mutex_lock(&data->update_lock); |
170 | i2c_smbus_write_byte_data(data->client, PCA9532_REG_PWM(1), | 191 | i2c_smbus_write_byte_data(data->client, PCA9532_REG_PWM(1), |
171 | data->pwm[1]); | 192 | data->pwm[1]); |
172 | mutex_unlock(&data->update_lock); | 193 | mutex_unlock(&data->update_lock); |
194 | } | ||
173 | 195 | ||
174 | return 0; | 196 | static void pca9532_led_work(struct work_struct *work) |
197 | { | ||
198 | struct pca9532_led *led; | ||
199 | led = container_of(work, struct pca9532_led, work); | ||
200 | if (led->state == PCA9532_PWM0) | ||
201 | pca9532_setpwm(led->client, 0); | ||
202 | pca9532_setled(led); | ||
175 | } | 203 | } |
176 | 204 | ||
177 | static int pca9532_configure(struct i2c_client *client, | 205 | static int pca9532_configure(struct i2c_client *client, |
@@ -204,8 +232,9 @@ static int pca9532_configure(struct i2c_client *client, | |||
204 | led->ldev.brightness = LED_OFF; | 232 | led->ldev.brightness = LED_OFF; |
205 | led->ldev.brightness_set = pca9532_set_brightness; | 233 | led->ldev.brightness_set = pca9532_set_brightness; |
206 | led->ldev.blink_set = pca9532_set_blink; | 234 | led->ldev.blink_set = pca9532_set_blink; |
207 | if (led_classdev_register(&client->dev, | 235 | INIT_WORK(&led->work, pca9532_led_work); |
208 | &led->ldev) < 0) { | 236 | err = led_classdev_register(&client->dev, &led->ldev); |
237 | if (err < 0) { | ||
209 | dev_err(&client->dev, | 238 | dev_err(&client->dev, |
210 | "couldn't register LED %s\n", | 239 | "couldn't register LED %s\n", |
211 | led->name); | 240 | led->name); |
@@ -233,9 +262,11 @@ static int pca9532_configure(struct i2c_client *client, | |||
233 | BIT_MASK(SND_TONE); | 262 | BIT_MASK(SND_TONE); |
234 | data->idev->event = pca9532_event; | 263 | data->idev->event = pca9532_event; |
235 | input_set_drvdata(data->idev, data); | 264 | input_set_drvdata(data->idev, data); |
265 | INIT_WORK(&data->work, pca9532_input_work); | ||
236 | err = input_register_device(data->idev); | 266 | err = input_register_device(data->idev); |
237 | if (err) { | 267 | if (err) { |
238 | input_free_device(data->idev); | 268 | input_free_device(data->idev); |
269 | cancel_work_sync(&data->work); | ||
239 | data->idev = NULL; | 270 | data->idev = NULL; |
240 | goto exit; | 271 | goto exit; |
241 | } | 272 | } |
@@ -252,18 +283,19 @@ exit: | |||
252 | break; | 283 | break; |
253 | case PCA9532_TYPE_LED: | 284 | case PCA9532_TYPE_LED: |
254 | led_classdev_unregister(&data->leds[i].ldev); | 285 | led_classdev_unregister(&data->leds[i].ldev); |
286 | cancel_work_sync(&data->leds[i].work); | ||
255 | break; | 287 | break; |
256 | case PCA9532_TYPE_N2100_BEEP: | 288 | case PCA9532_TYPE_N2100_BEEP: |
257 | if (data->idev != NULL) { | 289 | if (data->idev != NULL) { |
258 | input_unregister_device(data->idev); | 290 | input_unregister_device(data->idev); |
259 | input_free_device(data->idev); | 291 | input_free_device(data->idev); |
292 | cancel_work_sync(&data->work); | ||
260 | data->idev = NULL; | 293 | data->idev = NULL; |
261 | } | 294 | } |
262 | break; | 295 | break; |
263 | } | 296 | } |
264 | 297 | ||
265 | return err; | 298 | return err; |
266 | |||
267 | } | 299 | } |
268 | 300 | ||
269 | static int pca9532_probe(struct i2c_client *client, | 301 | static int pca9532_probe(struct i2c_client *client, |
@@ -271,12 +303,16 @@ static int pca9532_probe(struct i2c_client *client, | |||
271 | { | 303 | { |
272 | struct pca9532_data *data = i2c_get_clientdata(client); | 304 | struct pca9532_data *data = i2c_get_clientdata(client); |
273 | struct pca9532_platform_data *pca9532_pdata = client->dev.platform_data; | 305 | struct pca9532_platform_data *pca9532_pdata = client->dev.platform_data; |
306 | int err; | ||
307 | |||
308 | if (!pca9532_pdata) | ||
309 | return -EIO; | ||
274 | 310 | ||
275 | if (!i2c_check_functionality(client->adapter, | 311 | if (!i2c_check_functionality(client->adapter, |
276 | I2C_FUNC_SMBUS_BYTE_DATA)) | 312 | I2C_FUNC_SMBUS_BYTE_DATA)) |
277 | return -EIO; | 313 | return -EIO; |
278 | 314 | ||
279 | data = kzalloc(sizeof(struct pca9532_data), GFP_KERNEL); | 315 | data = kzalloc(sizeof(*data), GFP_KERNEL); |
280 | if (!data) | 316 | if (!data) |
281 | return -ENOMEM; | 317 | return -ENOMEM; |
282 | 318 | ||
@@ -285,12 +321,13 @@ static int pca9532_probe(struct i2c_client *client, | |||
285 | data->client = client; | 321 | data->client = client; |
286 | mutex_init(&data->update_lock); | 322 | mutex_init(&data->update_lock); |
287 | 323 | ||
288 | if (pca9532_pdata == NULL) | 324 | err = pca9532_configure(client, data, pca9532_pdata); |
289 | return -EIO; | 325 | if (err) { |
290 | 326 | kfree(data); | |
291 | pca9532_configure(client, data, pca9532_pdata); | 327 | i2c_set_clientdata(client, NULL); |
292 | return 0; | 328 | } |
293 | 329 | ||
330 | return err; | ||
294 | } | 331 | } |
295 | 332 | ||
296 | static int pca9532_remove(struct i2c_client *client) | 333 | static int pca9532_remove(struct i2c_client *client) |
@@ -303,11 +340,13 @@ static int pca9532_remove(struct i2c_client *client) | |||
303 | break; | 340 | break; |
304 | case PCA9532_TYPE_LED: | 341 | case PCA9532_TYPE_LED: |
305 | led_classdev_unregister(&data->leds[i].ldev); | 342 | led_classdev_unregister(&data->leds[i].ldev); |
343 | cancel_work_sync(&data->leds[i].work); | ||
306 | break; | 344 | break; |
307 | case PCA9532_TYPE_N2100_BEEP: | 345 | case PCA9532_TYPE_N2100_BEEP: |
308 | if (data->idev != NULL) { | 346 | if (data->idev != NULL) { |
309 | input_unregister_device(data->idev); | 347 | input_unregister_device(data->idev); |
310 | input_free_device(data->idev); | 348 | input_free_device(data->idev); |
349 | cancel_work_sync(&data->work); | ||
311 | data->idev = NULL; | 350 | data->idev = NULL; |
312 | } | 351 | } |
313 | break; | 352 | break; |
diff --git a/drivers/leds/leds-s3c24xx.c b/drivers/leds/leds-s3c24xx.c index 25a07f2643ad..4d81131542ae 100644 --- a/drivers/leds/leds-s3c24xx.c +++ b/drivers/leds/leds-s3c24xx.c | |||
@@ -82,6 +82,7 @@ static int s3c24xx_led_probe(struct platform_device *dev) | |||
82 | led->cdev.brightness_set = s3c24xx_led_set; | 82 | led->cdev.brightness_set = s3c24xx_led_set; |
83 | led->cdev.default_trigger = pdata->def_trigger; | 83 | led->cdev.default_trigger = pdata->def_trigger; |
84 | led->cdev.name = pdata->name; | 84 | led->cdev.name = pdata->name; |
85 | led->cdev.flags |= LED_CORE_SUSPENDRESUME; | ||
85 | 86 | ||
86 | led->pdata = pdata; | 87 | led->pdata = pdata; |
87 | 88 | ||
@@ -111,33 +112,9 @@ static int s3c24xx_led_probe(struct platform_device *dev) | |||
111 | return ret; | 112 | return ret; |
112 | } | 113 | } |
113 | 114 | ||
114 | |||
115 | #ifdef CONFIG_PM | ||
116 | static int s3c24xx_led_suspend(struct platform_device *dev, pm_message_t state) | ||
117 | { | ||
118 | struct s3c24xx_gpio_led *led = pdev_to_gpio(dev); | ||
119 | |||
120 | led_classdev_suspend(&led->cdev); | ||
121 | return 0; | ||
122 | } | ||
123 | |||
124 | static int s3c24xx_led_resume(struct platform_device *dev) | ||
125 | { | ||
126 | struct s3c24xx_gpio_led *led = pdev_to_gpio(dev); | ||
127 | |||
128 | led_classdev_resume(&led->cdev); | ||
129 | return 0; | ||
130 | } | ||
131 | #else | ||
132 | #define s3c24xx_led_suspend NULL | ||
133 | #define s3c24xx_led_resume NULL | ||
134 | #endif | ||
135 | |||
136 | static struct platform_driver s3c24xx_led_driver = { | 115 | static struct platform_driver s3c24xx_led_driver = { |
137 | .probe = s3c24xx_led_probe, | 116 | .probe = s3c24xx_led_probe, |
138 | .remove = s3c24xx_led_remove, | 117 | .remove = s3c24xx_led_remove, |
139 | .suspend = s3c24xx_led_suspend, | ||
140 | .resume = s3c24xx_led_resume, | ||
141 | .driver = { | 118 | .driver = { |
142 | .name = "s3c24xx_led", | 119 | .name = "s3c24xx_led", |
143 | .owner = THIS_MODULE, | 120 | .owner = THIS_MODULE, |
diff --git a/drivers/leds/leds-wm8350.c b/drivers/leds/leds-wm8350.c new file mode 100644 index 000000000000..38c6bcb07e6c --- /dev/null +++ b/drivers/leds/leds-wm8350.c | |||
@@ -0,0 +1,311 @@ | |||
1 | /* | ||
2 | * LED driver for WM8350 driven LEDS. | ||
3 | * | ||
4 | * Copyright(C) 2007, 2008 Wolfson Microelectronics PLC. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/platform_device.h> | ||
15 | #include <linux/leds.h> | ||
16 | #include <linux/err.h> | ||
17 | #include <linux/mfd/wm8350/pmic.h> | ||
18 | #include <linux/regulator/consumer.h> | ||
19 | |||
20 | /* Microamps */ | ||
21 | static const int isink_cur[] = { | ||
22 | 4, | ||
23 | 5, | ||
24 | 6, | ||
25 | 7, | ||
26 | 8, | ||
27 | 10, | ||
28 | 11, | ||
29 | 14, | ||
30 | 16, | ||
31 | 19, | ||
32 | 23, | ||
33 | 27, | ||
34 | 32, | ||
35 | 39, | ||
36 | 46, | ||
37 | 54, | ||
38 | 65, | ||
39 | 77, | ||
40 | 92, | ||
41 | 109, | ||
42 | 130, | ||
43 | 154, | ||
44 | 183, | ||
45 | 218, | ||
46 | 259, | ||
47 | 308, | ||
48 | 367, | ||
49 | 436, | ||
50 | 518, | ||
51 | 616, | ||
52 | 733, | ||
53 | 872, | ||
54 | 1037, | ||
55 | 1233, | ||
56 | 1466, | ||
57 | 1744, | ||
58 | 2073, | ||
59 | 2466, | ||
60 | 2933, | ||
61 | 3487, | ||
62 | 4147, | ||
63 | 4932, | ||
64 | 5865, | ||
65 | 6975, | ||
66 | 8294, | ||
67 | 9864, | ||
68 | 11730, | ||
69 | 13949, | ||
70 | 16589, | ||
71 | 19728, | ||
72 | 23460, | ||
73 | 27899, | ||
74 | 33178, | ||
75 | 39455, | ||
76 | 46920, | ||
77 | 55798, | ||
78 | 66355, | ||
79 | 78910, | ||
80 | 93840, | ||
81 | 111596, | ||
82 | 132710, | ||
83 | 157820, | ||
84 | 187681, | ||
85 | 223191 | ||
86 | }; | ||
87 | |||
88 | #define to_wm8350_led(led_cdev) \ | ||
89 | container_of(led_cdev, struct wm8350_led, cdev) | ||
90 | |||
91 | static void wm8350_led_enable(struct wm8350_led *led) | ||
92 | { | ||
93 | int ret; | ||
94 | |||
95 | if (led->enabled) | ||
96 | return; | ||
97 | |||
98 | ret = regulator_enable(led->isink); | ||
99 | if (ret != 0) { | ||
100 | dev_err(led->cdev.dev, "Failed to enable ISINK: %d\n", ret); | ||
101 | return; | ||
102 | } | ||
103 | |||
104 | ret = regulator_enable(led->dcdc); | ||
105 | if (ret != 0) { | ||
106 | dev_err(led->cdev.dev, "Failed to enable DCDC: %d\n", ret); | ||
107 | regulator_disable(led->isink); | ||
108 | return; | ||
109 | } | ||
110 | |||
111 | led->enabled = 1; | ||
112 | } | ||
113 | |||
114 | static void wm8350_led_disable(struct wm8350_led *led) | ||
115 | { | ||
116 | int ret; | ||
117 | |||
118 | if (!led->enabled) | ||
119 | return; | ||
120 | |||
121 | ret = regulator_disable(led->dcdc); | ||
122 | if (ret != 0) { | ||
123 | dev_err(led->cdev.dev, "Failed to disable DCDC: %d\n", ret); | ||
124 | return; | ||
125 | } | ||
126 | |||
127 | ret = regulator_disable(led->isink); | ||
128 | if (ret != 0) { | ||
129 | dev_err(led->cdev.dev, "Failed to disable ISINK: %d\n", ret); | ||
130 | regulator_enable(led->dcdc); | ||
131 | return; | ||
132 | } | ||
133 | |||
134 | led->enabled = 0; | ||
135 | } | ||
136 | |||
137 | static void led_work(struct work_struct *work) | ||
138 | { | ||
139 | struct wm8350_led *led = container_of(work, struct wm8350_led, work); | ||
140 | int ret; | ||
141 | int uA; | ||
142 | unsigned long flags; | ||
143 | |||
144 | mutex_lock(&led->mutex); | ||
145 | |||
146 | spin_lock_irqsave(&led->value_lock, flags); | ||
147 | |||
148 | if (led->value == LED_OFF) { | ||
149 | spin_unlock_irqrestore(&led->value_lock, flags); | ||
150 | wm8350_led_disable(led); | ||
151 | goto out; | ||
152 | } | ||
153 | |||
154 | /* This scales linearly into the index of valid current | ||
155 | * settings which results in a linear scaling of perceived | ||
156 | * brightness due to the non-linear current settings provided | ||
157 | * by the hardware. | ||
158 | */ | ||
159 | uA = (led->max_uA_index * led->value) / LED_FULL; | ||
160 | spin_unlock_irqrestore(&led->value_lock, flags); | ||
161 | BUG_ON(uA >= ARRAY_SIZE(isink_cur)); | ||
162 | |||
163 | ret = regulator_set_current_limit(led->isink, isink_cur[uA], | ||
164 | isink_cur[uA]); | ||
165 | if (ret != 0) | ||
166 | dev_err(led->cdev.dev, "Failed to set %duA: %d\n", | ||
167 | isink_cur[uA], ret); | ||
168 | |||
169 | wm8350_led_enable(led); | ||
170 | |||
171 | out: | ||
172 | mutex_unlock(&led->mutex); | ||
173 | } | ||
174 | |||
175 | static void wm8350_led_set(struct led_classdev *led_cdev, | ||
176 | enum led_brightness value) | ||
177 | { | ||
178 | struct wm8350_led *led = to_wm8350_led(led_cdev); | ||
179 | unsigned long flags; | ||
180 | |||
181 | spin_lock_irqsave(&led->value_lock, flags); | ||
182 | led->value = value; | ||
183 | schedule_work(&led->work); | ||
184 | spin_unlock_irqrestore(&led->value_lock, flags); | ||
185 | } | ||
186 | |||
187 | static void wm8350_led_shutdown(struct platform_device *pdev) | ||
188 | { | ||
189 | struct wm8350_led *led = platform_get_drvdata(pdev); | ||
190 | |||
191 | mutex_lock(&led->mutex); | ||
192 | led->value = LED_OFF; | ||
193 | wm8350_led_disable(led); | ||
194 | mutex_unlock(&led->mutex); | ||
195 | } | ||
196 | |||
197 | static int wm8350_led_probe(struct platform_device *pdev) | ||
198 | { | ||
199 | struct regulator *isink, *dcdc; | ||
200 | struct wm8350_led *led; | ||
201 | struct wm8350_led_platform_data *pdata = pdev->dev.platform_data; | ||
202 | int ret, i; | ||
203 | |||
204 | if (pdata == NULL) { | ||
205 | dev_err(&pdev->dev, "no platform data\n"); | ||
206 | return -ENODEV; | ||
207 | } | ||
208 | |||
209 | if (pdata->max_uA < isink_cur[0]) { | ||
210 | dev_err(&pdev->dev, "Invalid maximum current %duA\n", | ||
211 | pdata->max_uA); | ||
212 | return -EINVAL; | ||
213 | } | ||
214 | |||
215 | isink = regulator_get(&pdev->dev, "led_isink"); | ||
216 | if (IS_ERR(isink)) { | ||
217 | printk(KERN_ERR "%s: cant get ISINK\n", __func__); | ||
218 | return PTR_ERR(isink); | ||
219 | } | ||
220 | |||
221 | dcdc = regulator_get(&pdev->dev, "led_vcc"); | ||
222 | if (IS_ERR(dcdc)) { | ||
223 | printk(KERN_ERR "%s: cant get DCDC\n", __func__); | ||
224 | ret = PTR_ERR(dcdc); | ||
225 | goto err_isink; | ||
226 | } | ||
227 | |||
228 | led = kzalloc(sizeof(*led), GFP_KERNEL); | ||
229 | if (led == NULL) { | ||
230 | ret = -ENOMEM; | ||
231 | goto err_dcdc; | ||
232 | } | ||
233 | |||
234 | led->cdev.brightness_set = wm8350_led_set; | ||
235 | led->cdev.default_trigger = pdata->default_trigger; | ||
236 | led->cdev.name = pdata->name; | ||
237 | led->cdev.flags |= LED_CORE_SUSPENDRESUME; | ||
238 | led->enabled = regulator_is_enabled(isink); | ||
239 | led->isink = isink; | ||
240 | led->dcdc = dcdc; | ||
241 | |||
242 | for (i = 0; i < ARRAY_SIZE(isink_cur) - 1; i++) | ||
243 | if (isink_cur[i] >= pdata->max_uA) | ||
244 | break; | ||
245 | led->max_uA_index = i; | ||
246 | if (pdata->max_uA != isink_cur[i]) | ||
247 | dev_warn(&pdev->dev, | ||
248 | "Maximum current %duA is not directly supported," | ||
249 | " check platform data\n", | ||
250 | pdata->max_uA); | ||
251 | |||
252 | spin_lock_init(&led->value_lock); | ||
253 | mutex_init(&led->mutex); | ||
254 | INIT_WORK(&led->work, led_work); | ||
255 | led->value = LED_OFF; | ||
256 | platform_set_drvdata(pdev, led); | ||
257 | |||
258 | ret = led_classdev_register(&pdev->dev, &led->cdev); | ||
259 | if (ret < 0) | ||
260 | goto err_led; | ||
261 | |||
262 | return 0; | ||
263 | |||
264 | err_led: | ||
265 | kfree(led); | ||
266 | err_dcdc: | ||
267 | regulator_put(dcdc); | ||
268 | err_isink: | ||
269 | regulator_put(isink); | ||
270 | return ret; | ||
271 | } | ||
272 | |||
273 | static int wm8350_led_remove(struct platform_device *pdev) | ||
274 | { | ||
275 | struct wm8350_led *led = platform_get_drvdata(pdev); | ||
276 | |||
277 | led_classdev_unregister(&led->cdev); | ||
278 | flush_scheduled_work(); | ||
279 | wm8350_led_disable(led); | ||
280 | regulator_put(led->dcdc); | ||
281 | regulator_put(led->isink); | ||
282 | kfree(led); | ||
283 | return 0; | ||
284 | } | ||
285 | |||
286 | static struct platform_driver wm8350_led_driver = { | ||
287 | .driver = { | ||
288 | .name = "wm8350-led", | ||
289 | .owner = THIS_MODULE, | ||
290 | }, | ||
291 | .probe = wm8350_led_probe, | ||
292 | .remove = wm8350_led_remove, | ||
293 | .shutdown = wm8350_led_shutdown, | ||
294 | }; | ||
295 | |||
296 | static int __devinit wm8350_led_init(void) | ||
297 | { | ||
298 | return platform_driver_register(&wm8350_led_driver); | ||
299 | } | ||
300 | module_init(wm8350_led_init); | ||
301 | |||
302 | static void wm8350_led_exit(void) | ||
303 | { | ||
304 | platform_driver_unregister(&wm8350_led_driver); | ||
305 | } | ||
306 | module_exit(wm8350_led_exit); | ||
307 | |||
308 | MODULE_AUTHOR("Mark Brown"); | ||
309 | MODULE_DESCRIPTION("WM8350 LED driver"); | ||
310 | MODULE_LICENSE("GPL"); | ||
311 | MODULE_ALIAS("platform:wm8350-led"); | ||
diff --git a/drivers/leds/leds-wrap.c b/drivers/leds/leds-wrap.c index 2f3aa87f2a1f..2982c86ac4cf 100644 --- a/drivers/leds/leds-wrap.c +++ b/drivers/leds/leds-wrap.c | |||
@@ -56,40 +56,21 @@ static struct led_classdev wrap_power_led = { | |||
56 | .name = "wrap::power", | 56 | .name = "wrap::power", |
57 | .brightness_set = wrap_power_led_set, | 57 | .brightness_set = wrap_power_led_set, |
58 | .default_trigger = "default-on", | 58 | .default_trigger = "default-on", |
59 | .flags = LED_CORE_SUSPENDRESUME, | ||
59 | }; | 60 | }; |
60 | 61 | ||
61 | static struct led_classdev wrap_error_led = { | 62 | static struct led_classdev wrap_error_led = { |
62 | .name = "wrap::error", | 63 | .name = "wrap::error", |
63 | .brightness_set = wrap_error_led_set, | 64 | .brightness_set = wrap_error_led_set, |
65 | .flags = LED_CORE_SUSPENDRESUME, | ||
64 | }; | 66 | }; |
65 | 67 | ||
66 | static struct led_classdev wrap_extra_led = { | 68 | static struct led_classdev wrap_extra_led = { |
67 | .name = "wrap::extra", | 69 | .name = "wrap::extra", |
68 | .brightness_set = wrap_extra_led_set, | 70 | .brightness_set = wrap_extra_led_set, |
71 | .flags = LED_CORE_SUSPENDRESUME, | ||
69 | }; | 72 | }; |
70 | 73 | ||
71 | #ifdef CONFIG_PM | ||
72 | static int wrap_led_suspend(struct platform_device *dev, | ||
73 | pm_message_t state) | ||
74 | { | ||
75 | led_classdev_suspend(&wrap_power_led); | ||
76 | led_classdev_suspend(&wrap_error_led); | ||
77 | led_classdev_suspend(&wrap_extra_led); | ||
78 | return 0; | ||
79 | } | ||
80 | |||
81 | static int wrap_led_resume(struct platform_device *dev) | ||
82 | { | ||
83 | led_classdev_resume(&wrap_power_led); | ||
84 | led_classdev_resume(&wrap_error_led); | ||
85 | led_classdev_resume(&wrap_extra_led); | ||
86 | return 0; | ||
87 | } | ||
88 | #else | ||
89 | #define wrap_led_suspend NULL | ||
90 | #define wrap_led_resume NULL | ||
91 | #endif | ||
92 | |||
93 | static int wrap_led_probe(struct platform_device *pdev) | 74 | static int wrap_led_probe(struct platform_device *pdev) |
94 | { | 75 | { |
95 | int ret; | 76 | int ret; |
@@ -127,8 +108,6 @@ static int wrap_led_remove(struct platform_device *pdev) | |||
127 | static struct platform_driver wrap_led_driver = { | 108 | static struct platform_driver wrap_led_driver = { |
128 | .probe = wrap_led_probe, | 109 | .probe = wrap_led_probe, |
129 | .remove = wrap_led_remove, | 110 | .remove = wrap_led_remove, |
130 | .suspend = wrap_led_suspend, | ||
131 | .resume = wrap_led_resume, | ||
132 | .driver = { | 111 | .driver = { |
133 | .name = DRVNAME, | 112 | .name = DRVNAME, |
134 | .owner = THIS_MODULE, | 113 | .owner = THIS_MODULE, |
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c index db681962d7bb..3d6531396dda 100644 --- a/drivers/leds/ledtrig-timer.c +++ b/drivers/leds/ledtrig-timer.c | |||
@@ -199,6 +199,7 @@ err_out: | |||
199 | static void timer_trig_deactivate(struct led_classdev *led_cdev) | 199 | static void timer_trig_deactivate(struct led_classdev *led_cdev) |
200 | { | 200 | { |
201 | struct timer_trig_data *timer_data = led_cdev->trigger_data; | 201 | struct timer_trig_data *timer_data = led_cdev->trigger_data; |
202 | unsigned long on = 0, off = 0; | ||
202 | 203 | ||
203 | if (timer_data) { | 204 | if (timer_data) { |
204 | device_remove_file(led_cdev->dev, &dev_attr_delay_on); | 205 | device_remove_file(led_cdev->dev, &dev_attr_delay_on); |
@@ -206,6 +207,10 @@ static void timer_trig_deactivate(struct led_classdev *led_cdev) | |||
206 | del_timer_sync(&timer_data->timer); | 207 | del_timer_sync(&timer_data->timer); |
207 | kfree(timer_data); | 208 | kfree(timer_data); |
208 | } | 209 | } |
210 | |||
211 | /* If there is hardware support for blinking, stop it */ | ||
212 | if (led_cdev->blink_set) | ||
213 | led_cdev->blink_set(led_cdev, &on, &off); | ||
209 | } | 214 | } |
210 | 215 | ||
211 | static struct led_trigger timer_led_trigger = { | 216 | static struct led_trigger timer_led_trigger = { |
diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c index 3a273ccef3f2..f92595c8f165 100644 --- a/drivers/mfd/wm8350-core.c +++ b/drivers/mfd/wm8350-core.c | |||
@@ -1453,6 +1453,9 @@ void wm8350_device_exit(struct wm8350 *wm8350) | |||
1453 | { | 1453 | { |
1454 | int i; | 1454 | int i; |
1455 | 1455 | ||
1456 | for (i = 0; i < ARRAY_SIZE(wm8350->pmic.led); i++) | ||
1457 | platform_device_unregister(wm8350->pmic.led[i].pdev); | ||
1458 | |||
1456 | for (i = 0; i < ARRAY_SIZE(wm8350->pmic.pdev); i++) | 1459 | for (i = 0; i < ARRAY_SIZE(wm8350->pmic.pdev); i++) |
1457 | platform_device_unregister(wm8350->pmic.pdev[i]); | 1460 | platform_device_unregister(wm8350->pmic.pdev[i]); |
1458 | 1461 | ||
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c index c68c496b2c49..7aa35248181b 100644 --- a/drivers/regulator/wm8350-regulator.c +++ b/drivers/regulator/wm8350-regulator.c | |||
@@ -1412,6 +1412,97 @@ int wm8350_register_regulator(struct wm8350 *wm8350, int reg, | |||
1412 | } | 1412 | } |
1413 | EXPORT_SYMBOL_GPL(wm8350_register_regulator); | 1413 | EXPORT_SYMBOL_GPL(wm8350_register_regulator); |
1414 | 1414 | ||
1415 | /** | ||
1416 | * wm8350_register_led - Register a WM8350 LED output | ||
1417 | * | ||
1418 | * @param wm8350 The WM8350 device to configure. | ||
1419 | * @param lednum LED device index to create. | ||
1420 | * @param dcdc The DCDC to use for the LED. | ||
1421 | * @param isink The ISINK to use for the LED. | ||
1422 | * @param pdata Configuration for the LED. | ||
1423 | * | ||
1424 | * The WM8350 supports the use of an ISINK together with a DCDC to | ||
1425 | * provide a power-efficient LED driver. This function registers the | ||
1426 | * regulators and instantiates the platform device for a LED. The | ||
1427 | * operating modes for the LED regulators must be configured using | ||
1428 | * wm8350_isink_set_flash(), wm8350_dcdc25_set_mode() and | ||
1429 | * wm8350_dcdc_set_slot() prior to calling this function. | ||
1430 | */ | ||
1431 | int wm8350_register_led(struct wm8350 *wm8350, int lednum, int dcdc, int isink, | ||
1432 | struct wm8350_led_platform_data *pdata) | ||
1433 | { | ||
1434 | struct wm8350_led *led; | ||
1435 | struct platform_device *pdev; | ||
1436 | int ret; | ||
1437 | |||
1438 | if (lednum > ARRAY_SIZE(wm8350->pmic.led) || lednum < 0) { | ||
1439 | dev_err(wm8350->dev, "Invalid LED index %d\n", lednum); | ||
1440 | return -ENODEV; | ||
1441 | } | ||
1442 | |||
1443 | led = &wm8350->pmic.led[lednum]; | ||
1444 | |||
1445 | if (led->pdev) { | ||
1446 | dev_err(wm8350->dev, "LED %d already allocated\n", lednum); | ||
1447 | return -EINVAL; | ||
1448 | } | ||
1449 | |||
1450 | pdev = platform_device_alloc("wm8350-led", lednum); | ||
1451 | if (pdev == NULL) { | ||
1452 | dev_err(wm8350->dev, "Failed to allocate LED %d\n", lednum); | ||
1453 | return -ENOMEM; | ||
1454 | } | ||
1455 | |||
1456 | led->isink_consumer.dev = &pdev->dev; | ||
1457 | led->isink_consumer.supply = "led_isink"; | ||
1458 | led->isink_init.num_consumer_supplies = 1; | ||
1459 | led->isink_init.consumer_supplies = &led->isink_consumer; | ||
1460 | led->isink_init.constraints.min_uA = 0; | ||
1461 | led->isink_init.constraints.max_uA = pdata->max_uA; | ||
1462 | led->isink_init.constraints.valid_ops_mask = REGULATOR_CHANGE_CURRENT; | ||
1463 | led->isink_init.constraints.valid_modes_mask = REGULATOR_MODE_NORMAL; | ||
1464 | ret = wm8350_register_regulator(wm8350, isink, &led->isink_init); | ||
1465 | if (ret != 0) { | ||
1466 | platform_device_put(pdev); | ||
1467 | return ret; | ||
1468 | } | ||
1469 | |||
1470 | led->dcdc_consumer.dev = &pdev->dev; | ||
1471 | led->dcdc_consumer.supply = "led_vcc"; | ||
1472 | led->dcdc_init.num_consumer_supplies = 1; | ||
1473 | led->dcdc_init.consumer_supplies = &led->dcdc_consumer; | ||
1474 | led->dcdc_init.constraints.valid_modes_mask = REGULATOR_MODE_NORMAL; | ||
1475 | ret = wm8350_register_regulator(wm8350, dcdc, &led->dcdc_init); | ||
1476 | if (ret != 0) { | ||
1477 | platform_device_put(pdev); | ||
1478 | return ret; | ||
1479 | } | ||
1480 | |||
1481 | switch (isink) { | ||
1482 | case WM8350_ISINK_A: | ||
1483 | wm8350->pmic.isink_A_dcdc = dcdc; | ||
1484 | break; | ||
1485 | case WM8350_ISINK_B: | ||
1486 | wm8350->pmic.isink_B_dcdc = dcdc; | ||
1487 | break; | ||
1488 | } | ||
1489 | |||
1490 | pdev->dev.platform_data = pdata; | ||
1491 | pdev->dev.parent = wm8350->dev; | ||
1492 | ret = platform_device_add(pdev); | ||
1493 | if (ret != 0) { | ||
1494 | dev_err(wm8350->dev, "Failed to register LED %d: %d\n", | ||
1495 | lednum, ret); | ||
1496 | platform_device_put(pdev); | ||
1497 | return ret; | ||
1498 | } | ||
1499 | |||
1500 | led->pdev = pdev; | ||
1501 | |||
1502 | return 0; | ||
1503 | } | ||
1504 | EXPORT_SYMBOL_GPL(wm8350_register_led); | ||
1505 | |||
1415 | static struct platform_driver wm8350_regulator_driver = { | 1506 | static struct platform_driver wm8350_regulator_driver = { |
1416 | .probe = wm8350_regulator_probe, | 1507 | .probe = wm8350_regulator_probe, |
1417 | .remove = wm8350_regulator_remove, | 1508 | .remove = wm8350_regulator_remove, |
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 570ae59c1d5e..bd5914994142 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -336,6 +336,9 @@ static int | |||
336 | dasd_state_ready_to_online(struct dasd_device * device) | 336 | dasd_state_ready_to_online(struct dasd_device * device) |
337 | { | 337 | { |
338 | int rc; | 338 | int rc; |
339 | struct gendisk *disk; | ||
340 | struct disk_part_iter piter; | ||
341 | struct hd_struct *part; | ||
339 | 342 | ||
340 | if (device->discipline->ready_to_online) { | 343 | if (device->discipline->ready_to_online) { |
341 | rc = device->discipline->ready_to_online(device); | 344 | rc = device->discipline->ready_to_online(device); |
@@ -343,8 +346,14 @@ dasd_state_ready_to_online(struct dasd_device * device) | |||
343 | return rc; | 346 | return rc; |
344 | } | 347 | } |
345 | device->state = DASD_STATE_ONLINE; | 348 | device->state = DASD_STATE_ONLINE; |
346 | if (device->block) | 349 | if (device->block) { |
347 | dasd_schedule_block_bh(device->block); | 350 | dasd_schedule_block_bh(device->block); |
351 | disk = device->block->bdev->bd_disk; | ||
352 | disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); | ||
353 | while ((part = disk_part_iter_next(&piter))) | ||
354 | kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); | ||
355 | disk_part_iter_exit(&piter); | ||
356 | } | ||
348 | return 0; | 357 | return 0; |
349 | } | 358 | } |
350 | 359 | ||
@@ -354,6 +363,9 @@ dasd_state_ready_to_online(struct dasd_device * device) | |||
354 | static int dasd_state_online_to_ready(struct dasd_device *device) | 363 | static int dasd_state_online_to_ready(struct dasd_device *device) |
355 | { | 364 | { |
356 | int rc; | 365 | int rc; |
366 | struct gendisk *disk; | ||
367 | struct disk_part_iter piter; | ||
368 | struct hd_struct *part; | ||
357 | 369 | ||
358 | if (device->discipline->online_to_ready) { | 370 | if (device->discipline->online_to_ready) { |
359 | rc = device->discipline->online_to_ready(device); | 371 | rc = device->discipline->online_to_ready(device); |
@@ -361,6 +373,13 @@ static int dasd_state_online_to_ready(struct dasd_device *device) | |||
361 | return rc; | 373 | return rc; |
362 | } | 374 | } |
363 | device->state = DASD_STATE_READY; | 375 | device->state = DASD_STATE_READY; |
376 | if (device->block) { | ||
377 | disk = device->block->bdev->bd_disk; | ||
378 | disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); | ||
379 | while ((part = disk_part_iter_next(&piter))) | ||
380 | kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); | ||
381 | disk_part_iter_exit(&piter); | ||
382 | } | ||
364 | return 0; | 383 | return 0; |
365 | } | 384 | } |
366 | 385 | ||
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index 2ef25731d197..300e28a531f8 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c | |||
@@ -206,6 +206,8 @@ dasd_feature_list(char *str, char **endp) | |||
206 | features |= DASD_FEATURE_USEDIAG; | 206 | features |= DASD_FEATURE_USEDIAG; |
207 | else if (len == 6 && !strncmp(str, "erplog", 6)) | 207 | else if (len == 6 && !strncmp(str, "erplog", 6)) |
208 | features |= DASD_FEATURE_ERPLOG; | 208 | features |= DASD_FEATURE_ERPLOG; |
209 | else if (len == 8 && !strncmp(str, "failfast", 8)) | ||
210 | features |= DASD_FEATURE_FAILFAST; | ||
209 | else { | 211 | else { |
210 | MESSAGE(KERN_WARNING, | 212 | MESSAGE(KERN_WARNING, |
211 | "unsupported feature: %*s, " | 213 | "unsupported feature: %*s, " |
@@ -667,6 +669,51 @@ dasd_device_from_cdev(struct ccw_device *cdev) | |||
667 | */ | 669 | */ |
668 | 670 | ||
669 | /* | 671 | /* |
672 | * failfast controls the behaviour, if no path is available | ||
673 | */ | ||
674 | static ssize_t dasd_ff_show(struct device *dev, struct device_attribute *attr, | ||
675 | char *buf) | ||
676 | { | ||
677 | struct dasd_devmap *devmap; | ||
678 | int ff_flag; | ||
679 | |||
680 | devmap = dasd_find_busid(dev->bus_id); | ||
681 | if (!IS_ERR(devmap)) | ||
682 | ff_flag = (devmap->features & DASD_FEATURE_FAILFAST) != 0; | ||
683 | else | ||
684 | ff_flag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_FAILFAST) != 0; | ||
685 | return snprintf(buf, PAGE_SIZE, ff_flag ? "1\n" : "0\n"); | ||
686 | } | ||
687 | |||
688 | static ssize_t dasd_ff_store(struct device *dev, struct device_attribute *attr, | ||
689 | const char *buf, size_t count) | ||
690 | { | ||
691 | struct dasd_devmap *devmap; | ||
692 | int val; | ||
693 | char *endp; | ||
694 | |||
695 | devmap = dasd_devmap_from_cdev(to_ccwdev(dev)); | ||
696 | if (IS_ERR(devmap)) | ||
697 | return PTR_ERR(devmap); | ||
698 | |||
699 | val = simple_strtoul(buf, &endp, 0); | ||
700 | if (((endp + 1) < (buf + count)) || (val > 1)) | ||
701 | return -EINVAL; | ||
702 | |||
703 | spin_lock(&dasd_devmap_lock); | ||
704 | if (val) | ||
705 | devmap->features |= DASD_FEATURE_FAILFAST; | ||
706 | else | ||
707 | devmap->features &= ~DASD_FEATURE_FAILFAST; | ||
708 | if (devmap->device) | ||
709 | devmap->device->features = devmap->features; | ||
710 | spin_unlock(&dasd_devmap_lock); | ||
711 | return count; | ||
712 | } | ||
713 | |||
714 | static DEVICE_ATTR(failfast, 0644, dasd_ff_show, dasd_ff_store); | ||
715 | |||
716 | /* | ||
670 | * readonly controls the readonly status of a dasd | 717 | * readonly controls the readonly status of a dasd |
671 | */ | 718 | */ |
672 | static ssize_t | 719 | static ssize_t |
@@ -1020,6 +1067,7 @@ static struct attribute * dasd_attrs[] = { | |||
1020 | &dev_attr_use_diag.attr, | 1067 | &dev_attr_use_diag.attr, |
1021 | &dev_attr_eer_enabled.attr, | 1068 | &dev_attr_eer_enabled.attr, |
1022 | &dev_attr_erplog.attr, | 1069 | &dev_attr_erplog.attr, |
1070 | &dev_attr_failfast.attr, | ||
1023 | NULL, | 1071 | NULL, |
1024 | }; | 1072 | }; |
1025 | 1073 | ||
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index 7844461a995b..ef2a56952054 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c | |||
@@ -544,7 +544,8 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev, | |||
544 | } | 544 | } |
545 | cqr->retries = DIAG_MAX_RETRIES; | 545 | cqr->retries = DIAG_MAX_RETRIES; |
546 | cqr->buildclk = get_clock(); | 546 | cqr->buildclk = get_clock(); |
547 | if (blk_noretry_request(req)) | 547 | if (blk_noretry_request(req) || |
548 | block->base->features & DASD_FEATURE_FAILFAST) | ||
548 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); | 549 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); |
549 | cqr->startdev = memdev; | 550 | cqr->startdev = memdev; |
550 | cqr->memdev = memdev; | 551 | cqr->memdev = memdev; |
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index bd2c52e20762..bdb87998f364 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -1700,7 +1700,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev, | |||
1700 | recid++; | 1700 | recid++; |
1701 | } | 1701 | } |
1702 | } | 1702 | } |
1703 | if (blk_noretry_request(req)) | 1703 | if (blk_noretry_request(req) || |
1704 | block->base->features & DASD_FEATURE_FAILFAST) | ||
1704 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); | 1705 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); |
1705 | cqr->startdev = startdev; | 1706 | cqr->startdev = startdev; |
1706 | cqr->memdev = startdev; | 1707 | cqr->memdev = startdev; |
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index 7d442aeff3d1..f1d176021694 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c | |||
@@ -355,7 +355,8 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev, | |||
355 | recid++; | 355 | recid++; |
356 | } | 356 | } |
357 | } | 357 | } |
358 | if (blk_noretry_request(req)) | 358 | if (blk_noretry_request(req) || |
359 | block->base->features & DASD_FEATURE_FAILFAST) | ||
359 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); | 360 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); |
360 | cqr->startdev = memdev; | 361 | cqr->startdev = memdev; |
361 | cqr->memdev = memdev; | 362 | cqr->memdev = memdev; |
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig index 643033890e34..0769ced52dbd 100644 --- a/drivers/s390/char/Kconfig +++ b/drivers/s390/char/Kconfig | |||
@@ -100,7 +100,7 @@ comment "S/390 tape interface support" | |||
100 | 100 | ||
101 | config S390_TAPE_BLOCK | 101 | config S390_TAPE_BLOCK |
102 | bool "Support for tape block devices" | 102 | bool "Support for tape block devices" |
103 | depends on S390_TAPE | 103 | depends on S390_TAPE && BLOCK |
104 | help | 104 | help |
105 | Select this option if you want to access your channel-attached tape | 105 | Select this option if you want to access your channel-attached tape |
106 | devices using the block device interface. This interface is similar | 106 | devices using the block device interface. This interface is similar |
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c index f8a3b6967f69..da7afb04e71f 100644 --- a/drivers/s390/cio/qdio_debug.c +++ b/drivers/s390/cio/qdio_debug.c | |||
@@ -169,6 +169,8 @@ static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev) | |||
169 | q->nr); | 169 | q->nr); |
170 | debugfs_queues[i] = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR, | 170 | debugfs_queues[i] = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR, |
171 | debugfs_root, q, &debugfs_fops); | 171 | debugfs_root, q, &debugfs_fops); |
172 | if (IS_ERR(debugfs_queues[i])) | ||
173 | debugfs_queues[i] = NULL; | ||
172 | } | 174 | } |
173 | 175 | ||
174 | void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev) | 176 | void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev) |
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig index 4a4dd9adc328..72facb9eb7db 100644 --- a/drivers/video/backlight/Kconfig +++ b/drivers/video/backlight/Kconfig | |||
@@ -52,11 +52,11 @@ config LCD_ILI9320 | |||
52 | then say y to include a power driver for it. | 52 | then say y to include a power driver for it. |
53 | 53 | ||
54 | config LCD_TDO24M | 54 | config LCD_TDO24M |
55 | tristate "Toppoly TDO24M LCD Panels support" | 55 | tristate "Toppoly TDO24M and TDO35S LCD Panels support" |
56 | depends on LCD_CLASS_DEVICE && SPI_MASTER | 56 | depends on LCD_CLASS_DEVICE && SPI_MASTER |
57 | default n | 57 | default n |
58 | help | 58 | help |
59 | If you have a Toppoly TDO24M series LCD panel, say y here to | 59 | If you have a Toppoly TDO24M/TDO35S series LCD panel, say y here to |
60 | include the support for it. | 60 | include the support for it. |
61 | 61 | ||
62 | config LCD_VGG2432A4 | 62 | config LCD_VGG2432A4 |
@@ -123,17 +123,14 @@ config BACKLIGHT_ATMEL_PWM | |||
123 | To compile this driver as a module, choose M here: the module will be | 123 | To compile this driver as a module, choose M here: the module will be |
124 | called atmel-pwm-bl. | 124 | called atmel-pwm-bl. |
125 | 125 | ||
126 | config BACKLIGHT_CORGI | 126 | config BACKLIGHT_GENERIC |
127 | tristate "Generic (aka Sharp Corgi) Backlight Driver (DEPRECATED)" | 127 | tristate "Generic (aka Sharp Corgi) Backlight Driver" |
128 | depends on BACKLIGHT_CLASS_DEVICE | 128 | depends on BACKLIGHT_CLASS_DEVICE |
129 | default n | 129 | default y |
130 | help | 130 | help |
131 | Say y to enable the generic platform backlight driver previously | 131 | Say y to enable the generic platform backlight driver previously |
132 | known as the Corgi backlight driver. If you have a Sharp Zaurus | 132 | known as the Corgi backlight driver. If you have a Sharp Zaurus |
133 | SL-C7xx, SL-Cxx00 or SL-6000x say y. Most users can say n. | 133 | SL-C7xx, SL-Cxx00 or SL-6000x say y. |
134 | |||
135 | Note: this driver is marked as deprecated, try enable SPI and | ||
136 | use the new corgi_lcd driver with integrated backlight control | ||
137 | 134 | ||
138 | config BACKLIGHT_LOCOMO | 135 | config BACKLIGHT_LOCOMO |
139 | tristate "Sharp LOCOMO LCD/Backlight Driver" | 136 | tristate "Sharp LOCOMO LCD/Backlight Driver" |
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile index 103427de6703..363b3cb2f01b 100644 --- a/drivers/video/backlight/Makefile +++ b/drivers/video/backlight/Makefile | |||
@@ -11,7 +11,7 @@ obj-$(CONFIG_LCD_TOSA) += tosa_lcd.o | |||
11 | 11 | ||
12 | obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o | 12 | obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o |
13 | obj-$(CONFIG_BACKLIGHT_ATMEL_PWM) += atmel-pwm-bl.o | 13 | obj-$(CONFIG_BACKLIGHT_ATMEL_PWM) += atmel-pwm-bl.o |
14 | obj-$(CONFIG_BACKLIGHT_CORGI) += corgi_bl.o | 14 | obj-$(CONFIG_BACKLIGHT_GENERIC) += generic_bl.o |
15 | obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o | 15 | obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o |
16 | obj-$(CONFIG_BACKLIGHT_LOCOMO) += locomolcd.o | 16 | obj-$(CONFIG_BACKLIGHT_LOCOMO) += locomolcd.o |
17 | obj-$(CONFIG_BACKLIGHT_OMAP1) += omap1_bl.o | 17 | obj-$(CONFIG_BACKLIGHT_OMAP1) += omap1_bl.o |
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c index 0664fc032235..157057c79ca3 100644 --- a/drivers/video/backlight/backlight.c +++ b/drivers/video/backlight/backlight.c | |||
@@ -40,6 +40,10 @@ static int fb_notifier_callback(struct notifier_block *self, | |||
40 | if (!bd->ops->check_fb || | 40 | if (!bd->ops->check_fb || |
41 | bd->ops->check_fb(evdata->info)) { | 41 | bd->ops->check_fb(evdata->info)) { |
42 | bd->props.fb_blank = *(int *)evdata->data; | 42 | bd->props.fb_blank = *(int *)evdata->data; |
43 | if (bd->props.fb_blank == FB_BLANK_UNBLANK) | ||
44 | bd->props.state &= ~BL_CORE_FBBLANK; | ||
45 | else | ||
46 | bd->props.state |= BL_CORE_FBBLANK; | ||
43 | backlight_update_status(bd); | 47 | backlight_update_status(bd); |
44 | } | 48 | } |
45 | mutex_unlock(&bd->ops_lock); | 49 | mutex_unlock(&bd->ops_lock); |
@@ -80,20 +84,18 @@ static ssize_t backlight_show_power(struct device *dev, | |||
80 | static ssize_t backlight_store_power(struct device *dev, | 84 | static ssize_t backlight_store_power(struct device *dev, |
81 | struct device_attribute *attr, const char *buf, size_t count) | 85 | struct device_attribute *attr, const char *buf, size_t count) |
82 | { | 86 | { |
83 | int rc = -ENXIO; | 87 | int rc; |
84 | char *endp; | ||
85 | struct backlight_device *bd = to_backlight_device(dev); | 88 | struct backlight_device *bd = to_backlight_device(dev); |
86 | int power = simple_strtoul(buf, &endp, 0); | 89 | unsigned long power; |
87 | size_t size = endp - buf; | ||
88 | 90 | ||
89 | if (*endp && isspace(*endp)) | 91 | rc = strict_strtoul(buf, 0, &power); |
90 | size++; | 92 | if (rc) |
91 | if (size != count) | 93 | return rc; |
92 | return -EINVAL; | ||
93 | 94 | ||
95 | rc = -ENXIO; | ||
94 | mutex_lock(&bd->ops_lock); | 96 | mutex_lock(&bd->ops_lock); |
95 | if (bd->ops) { | 97 | if (bd->ops) { |
96 | pr_debug("backlight: set power to %d\n", power); | 98 | pr_debug("backlight: set power to %lu\n", power); |
97 | if (bd->props.power != power) { | 99 | if (bd->props.power != power) { |
98 | bd->props.power = power; | 100 | bd->props.power = power; |
99 | backlight_update_status(bd); | 101 | backlight_update_status(bd); |
@@ -116,28 +118,25 @@ static ssize_t backlight_show_brightness(struct device *dev, | |||
116 | static ssize_t backlight_store_brightness(struct device *dev, | 118 | static ssize_t backlight_store_brightness(struct device *dev, |
117 | struct device_attribute *attr, const char *buf, size_t count) | 119 | struct device_attribute *attr, const char *buf, size_t count) |
118 | { | 120 | { |
119 | int rc = -ENXIO; | 121 | int rc; |
120 | char *endp; | ||
121 | struct backlight_device *bd = to_backlight_device(dev); | 122 | struct backlight_device *bd = to_backlight_device(dev); |
122 | int brightness = simple_strtoul(buf, &endp, 0); | 123 | unsigned long brightness; |
123 | size_t size = endp - buf; | 124 | |
125 | rc = strict_strtoul(buf, 0, &brightness); | ||
126 | if (rc) | ||
127 | return rc; | ||
124 | 128 | ||
125 | if (*endp && isspace(*endp)) | 129 | rc = -ENXIO; |
126 | size++; | ||
127 | if (size != count) | ||
128 | return -EINVAL; | ||
129 | 130 | ||
130 | mutex_lock(&bd->ops_lock); | 131 | mutex_lock(&bd->ops_lock); |
131 | if (bd->ops) { | 132 | if (bd->ops) { |
132 | if (brightness > bd->props.max_brightness) | 133 | if (brightness > bd->props.max_brightness) |
133 | rc = -EINVAL; | 134 | rc = -EINVAL; |
134 | else { | 135 | else { |
135 | pr_debug("backlight: set brightness to %d\n", | 136 | pr_debug("backlight: set brightness to %lu\n", |
136 | brightness); | 137 | brightness); |
137 | if (bd->props.brightness != brightness) { | 138 | bd->props.brightness = brightness; |
138 | bd->props.brightness = brightness; | 139 | backlight_update_status(bd); |
139 | backlight_update_status(bd); | ||
140 | } | ||
141 | rc = count; | 140 | rc = count; |
142 | } | 141 | } |
143 | } | 142 | } |
@@ -170,6 +169,34 @@ static ssize_t backlight_show_actual_brightness(struct device *dev, | |||
170 | 169 | ||
171 | static struct class *backlight_class; | 170 | static struct class *backlight_class; |
172 | 171 | ||
172 | static int backlight_suspend(struct device *dev, pm_message_t state) | ||
173 | { | ||
174 | struct backlight_device *bd = to_backlight_device(dev); | ||
175 | |||
176 | if (bd->ops->options & BL_CORE_SUSPENDRESUME) { | ||
177 | mutex_lock(&bd->ops_lock); | ||
178 | bd->props.state |= BL_CORE_SUSPENDED; | ||
179 | backlight_update_status(bd); | ||
180 | mutex_unlock(&bd->ops_lock); | ||
181 | } | ||
182 | |||
183 | return 0; | ||
184 | } | ||
185 | |||
186 | static int backlight_resume(struct device *dev) | ||
187 | { | ||
188 | struct backlight_device *bd = to_backlight_device(dev); | ||
189 | |||
190 | if (bd->ops->options & BL_CORE_SUSPENDRESUME) { | ||
191 | mutex_lock(&bd->ops_lock); | ||
192 | bd->props.state &= ~BL_CORE_SUSPENDED; | ||
193 | backlight_update_status(bd); | ||
194 | mutex_unlock(&bd->ops_lock); | ||
195 | } | ||
196 | |||
197 | return 0; | ||
198 | } | ||
199 | |||
173 | static void bl_device_release(struct device *dev) | 200 | static void bl_device_release(struct device *dev) |
174 | { | 201 | { |
175 | struct backlight_device *bd = to_backlight_device(dev); | 202 | struct backlight_device *bd = to_backlight_device(dev); |
@@ -286,6 +313,8 @@ static int __init backlight_class_init(void) | |||
286 | } | 313 | } |
287 | 314 | ||
288 | backlight_class->dev_attrs = bl_device_attributes; | 315 | backlight_class->dev_attrs = bl_device_attributes; |
316 | backlight_class->suspend = backlight_suspend; | ||
317 | backlight_class->resume = backlight_resume; | ||
289 | return 0; | 318 | return 0; |
290 | } | 319 | } |
291 | 320 | ||
diff --git a/drivers/video/backlight/corgi_bl.c b/drivers/video/backlight/corgi_bl.c deleted file mode 100644 index 4d4d037e3ec9..000000000000 --- a/drivers/video/backlight/corgi_bl.c +++ /dev/null | |||
@@ -1,169 +0,0 @@ | |||
1 | /* | ||
2 | * Backlight Driver for Sharp Zaurus Handhelds (various models) | ||
3 | * | ||
4 | * Copyright (c) 2004-2006 Richard Purdie | ||
5 | * | ||
6 | * Based on Sharp's 2.4 Backlight Driver | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <linux/module.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/platform_device.h> | ||
18 | #include <linux/mutex.h> | ||
19 | #include <linux/fb.h> | ||
20 | #include <linux/backlight.h> | ||
21 | |||
22 | static int corgibl_intensity; | ||
23 | static struct backlight_properties corgibl_data; | ||
24 | static struct backlight_device *corgi_backlight_device; | ||
25 | static struct generic_bl_info *bl_machinfo; | ||
26 | |||
27 | static unsigned long corgibl_flags; | ||
28 | #define CORGIBL_SUSPENDED 0x01 | ||
29 | #define CORGIBL_BATTLOW 0x02 | ||
30 | |||
31 | static int corgibl_send_intensity(struct backlight_device *bd) | ||
32 | { | ||
33 | int intensity = bd->props.brightness; | ||
34 | |||
35 | if (bd->props.power != FB_BLANK_UNBLANK) | ||
36 | intensity = 0; | ||
37 | if (bd->props.fb_blank != FB_BLANK_UNBLANK) | ||
38 | intensity = 0; | ||
39 | if (corgibl_flags & CORGIBL_SUSPENDED) | ||
40 | intensity = 0; | ||
41 | if (corgibl_flags & CORGIBL_BATTLOW) | ||
42 | intensity &= bl_machinfo->limit_mask; | ||
43 | |||
44 | bl_machinfo->set_bl_intensity(intensity); | ||
45 | |||
46 | corgibl_intensity = intensity; | ||
47 | |||
48 | if (bl_machinfo->kick_battery) | ||
49 | bl_machinfo->kick_battery(); | ||
50 | |||
51 | return 0; | ||
52 | } | ||
53 | |||
54 | #ifdef CONFIG_PM | ||
55 | static int corgibl_suspend(struct platform_device *pdev, pm_message_t state) | ||
56 | { | ||
57 | struct backlight_device *bd = platform_get_drvdata(pdev); | ||
58 | |||
59 | corgibl_flags |= CORGIBL_SUSPENDED; | ||
60 | backlight_update_status(bd); | ||
61 | return 0; | ||
62 | } | ||
63 | |||
64 | static int corgibl_resume(struct platform_device *pdev) | ||
65 | { | ||
66 | struct backlight_device *bd = platform_get_drvdata(pdev); | ||
67 | |||
68 | corgibl_flags &= ~CORGIBL_SUSPENDED; | ||
69 | backlight_update_status(bd); | ||
70 | return 0; | ||
71 | } | ||
72 | #else | ||
73 | #define corgibl_suspend NULL | ||
74 | #define corgibl_resume NULL | ||
75 | #endif | ||
76 | |||
77 | static int corgibl_get_intensity(struct backlight_device *bd) | ||
78 | { | ||
79 | return corgibl_intensity; | ||
80 | } | ||
81 | |||
82 | /* | ||
83 | * Called when the battery is low to limit the backlight intensity. | ||
84 | * If limit==0 clear any limit, otherwise limit the intensity | ||
85 | */ | ||
86 | void corgibl_limit_intensity(int limit) | ||
87 | { | ||
88 | if (limit) | ||
89 | corgibl_flags |= CORGIBL_BATTLOW; | ||
90 | else | ||
91 | corgibl_flags &= ~CORGIBL_BATTLOW; | ||
92 | backlight_update_status(corgi_backlight_device); | ||
93 | } | ||
94 | EXPORT_SYMBOL(corgibl_limit_intensity); | ||
95 | |||
96 | |||
97 | static struct backlight_ops corgibl_ops = { | ||
98 | .get_brightness = corgibl_get_intensity, | ||
99 | .update_status = corgibl_send_intensity, | ||
100 | }; | ||
101 | |||
102 | static int corgibl_probe(struct platform_device *pdev) | ||
103 | { | ||
104 | struct generic_bl_info *machinfo = pdev->dev.platform_data; | ||
105 | const char *name = "generic-bl"; | ||
106 | |||
107 | bl_machinfo = machinfo; | ||
108 | if (!machinfo->limit_mask) | ||
109 | machinfo->limit_mask = -1; | ||
110 | |||
111 | if (machinfo->name) | ||
112 | name = machinfo->name; | ||
113 | |||
114 | corgi_backlight_device = backlight_device_register (name, | ||
115 | &pdev->dev, NULL, &corgibl_ops); | ||
116 | if (IS_ERR (corgi_backlight_device)) | ||
117 | return PTR_ERR (corgi_backlight_device); | ||
118 | |||
119 | platform_set_drvdata(pdev, corgi_backlight_device); | ||
120 | |||
121 | corgi_backlight_device->props.max_brightness = machinfo->max_intensity; | ||
122 | corgi_backlight_device->props.power = FB_BLANK_UNBLANK; | ||
123 | corgi_backlight_device->props.brightness = machinfo->default_intensity; | ||
124 | backlight_update_status(corgi_backlight_device); | ||
125 | |||
126 | printk("Corgi Backlight Driver Initialized.\n"); | ||
127 | return 0; | ||
128 | } | ||
129 | |||
130 | static int corgibl_remove(struct platform_device *pdev) | ||
131 | { | ||
132 | struct backlight_device *bd = platform_get_drvdata(pdev); | ||
133 | |||
134 | corgibl_data.power = 0; | ||
135 | corgibl_data.brightness = 0; | ||
136 | backlight_update_status(bd); | ||
137 | |||
138 | backlight_device_unregister(bd); | ||
139 | |||
140 | printk("Corgi Backlight Driver Unloaded\n"); | ||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | static struct platform_driver corgibl_driver = { | ||
145 | .probe = corgibl_probe, | ||
146 | .remove = corgibl_remove, | ||
147 | .suspend = corgibl_suspend, | ||
148 | .resume = corgibl_resume, | ||
149 | .driver = { | ||
150 | .name = "generic-bl", | ||
151 | }, | ||
152 | }; | ||
153 | |||
154 | static int __init corgibl_init(void) | ||
155 | { | ||
156 | return platform_driver_register(&corgibl_driver); | ||
157 | } | ||
158 | |||
159 | static void __exit corgibl_exit(void) | ||
160 | { | ||
161 | platform_driver_unregister(&corgibl_driver); | ||
162 | } | ||
163 | |||
164 | module_init(corgibl_init); | ||
165 | module_exit(corgibl_exit); | ||
166 | |||
167 | MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>"); | ||
168 | MODULE_DESCRIPTION("Corgi Backlight Driver"); | ||
169 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c index 26add8898605..b9fe62b475c6 100644 --- a/drivers/video/backlight/cr_bllcd.c +++ b/drivers/video/backlight/cr_bllcd.c | |||
@@ -259,22 +259,18 @@ static int __init cr_backlight_init(void) | |||
259 | { | 259 | { |
260 | int ret = platform_driver_register(&cr_backlight_driver); | 260 | int ret = platform_driver_register(&cr_backlight_driver); |
261 | 261 | ||
262 | if (!ret) { | 262 | if (ret) |
263 | crp = platform_device_alloc("cr_backlight", -1); | 263 | return ret; |
264 | if (!crp) | ||
265 | return -ENOMEM; | ||
266 | 264 | ||
267 | ret = platform_device_add(crp); | 265 | crp = platform_device_register_simple("cr_backlight", -1, NULL, 0); |
268 | 266 | if (IS_ERR(crp)) { | |
269 | if (ret) { | 267 | platform_driver_unregister(&cr_backlight_driver); |
270 | platform_device_put(crp); | 268 | return PTR_ERR(crp); |
271 | platform_driver_unregister(&cr_backlight_driver); | ||
272 | } | ||
273 | } | 269 | } |
274 | 270 | ||
275 | printk("Carillo Ranch Backlight Driver Initialized.\n"); | 271 | printk("Carillo Ranch Backlight Driver Initialized.\n"); |
276 | 272 | ||
277 | return ret; | 273 | return 0; |
278 | } | 274 | } |
279 | 275 | ||
280 | static void __exit cr_backlight_exit(void) | 276 | static void __exit cr_backlight_exit(void) |
diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c new file mode 100644 index 000000000000..6d27f62fdcd0 --- /dev/null +++ b/drivers/video/backlight/generic_bl.c | |||
@@ -0,0 +1,147 @@ | |||
1 | /* | ||
2 | * Generic Backlight Driver | ||
3 | * | ||
4 | * Copyright (c) 2004-2008 Richard Purdie | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/platform_device.h> | ||
16 | #include <linux/mutex.h> | ||
17 | #include <linux/fb.h> | ||
18 | #include <linux/backlight.h> | ||
19 | |||
20 | static int genericbl_intensity; | ||
21 | static struct backlight_device *generic_backlight_device; | ||
22 | static struct generic_bl_info *bl_machinfo; | ||
23 | |||
24 | /* Flag to signal when the battery is low */ | ||
25 | #define GENERICBL_BATTLOW BL_CORE_DRIVER1 | ||
26 | |||
27 | static int genericbl_send_intensity(struct backlight_device *bd) | ||
28 | { | ||
29 | int intensity = bd->props.brightness; | ||
30 | |||
31 | if (bd->props.power != FB_BLANK_UNBLANK) | ||
32 | intensity = 0; | ||
33 | if (bd->props.state & BL_CORE_FBBLANK) | ||
34 | intensity = 0; | ||
35 | if (bd->props.state & BL_CORE_SUSPENDED) | ||
36 | intensity = 0; | ||
37 | if (bd->props.state & GENERICBL_BATTLOW) | ||
38 | intensity &= bl_machinfo->limit_mask; | ||
39 | |||
40 | bl_machinfo->set_bl_intensity(intensity); | ||
41 | |||
42 | genericbl_intensity = intensity; | ||
43 | |||
44 | if (bl_machinfo->kick_battery) | ||
45 | bl_machinfo->kick_battery(); | ||
46 | |||
47 | return 0; | ||
48 | } | ||
49 | |||
50 | static int genericbl_get_intensity(struct backlight_device *bd) | ||
51 | { | ||
52 | return genericbl_intensity; | ||
53 | } | ||
54 | |||
55 | /* | ||
56 | * Called when the battery is low to limit the backlight intensity. | ||
57 | * If limit==0 clear any limit, otherwise limit the intensity | ||
58 | */ | ||
59 | void corgibl_limit_intensity(int limit) | ||
60 | { | ||
61 | struct backlight_device *bd = generic_backlight_device; | ||
62 | |||
63 | mutex_lock(&bd->ops_lock); | ||
64 | if (limit) | ||
65 | bd->props.state |= GENERICBL_BATTLOW; | ||
66 | else | ||
67 | bd->props.state &= ~GENERICBL_BATTLOW; | ||
68 | backlight_update_status(generic_backlight_device); | ||
69 | mutex_unlock(&bd->ops_lock); | ||
70 | } | ||
71 | EXPORT_SYMBOL(corgibl_limit_intensity); | ||
72 | |||
73 | static struct backlight_ops genericbl_ops = { | ||
74 | .options = BL_CORE_SUSPENDRESUME, | ||
75 | .get_brightness = genericbl_get_intensity, | ||
76 | .update_status = genericbl_send_intensity, | ||
77 | }; | ||
78 | |||
79 | static int genericbl_probe(struct platform_device *pdev) | ||
80 | { | ||
81 | struct generic_bl_info *machinfo = pdev->dev.platform_data; | ||
82 | const char *name = "generic-bl"; | ||
83 | struct backlight_device *bd; | ||
84 | |||
85 | bl_machinfo = machinfo; | ||
86 | if (!machinfo->limit_mask) | ||
87 | machinfo->limit_mask = -1; | ||
88 | |||
89 | if (machinfo->name) | ||
90 | name = machinfo->name; | ||
91 | |||
92 | bd = backlight_device_register (name, | ||
93 | &pdev->dev, NULL, &genericbl_ops); | ||
94 | if (IS_ERR (bd)) | ||
95 | return PTR_ERR (bd); | ||
96 | |||
97 | platform_set_drvdata(pdev, bd); | ||
98 | |||
99 | bd->props.max_brightness = machinfo->max_intensity; | ||
100 | bd->props.power = FB_BLANK_UNBLANK; | ||
101 | bd->props.brightness = machinfo->default_intensity; | ||
102 | backlight_update_status(bd); | ||
103 | |||
104 | generic_backlight_device = bd; | ||
105 | |||
106 | printk("Generic Backlight Driver Initialized.\n"); | ||
107 | return 0; | ||
108 | } | ||
109 | |||
110 | static int genericbl_remove(struct platform_device *pdev) | ||
111 | { | ||
112 | struct backlight_device *bd = platform_get_drvdata(pdev); | ||
113 | |||
114 | bd->props.power = 0; | ||
115 | bd->props.brightness = 0; | ||
116 | backlight_update_status(bd); | ||
117 | |||
118 | backlight_device_unregister(bd); | ||
119 | |||
120 | printk("Generic Backlight Driver Unloaded\n"); | ||
121 | return 0; | ||
122 | } | ||
123 | |||
124 | static struct platform_driver genericbl_driver = { | ||
125 | .probe = genericbl_probe, | ||
126 | .remove = genericbl_remove, | ||
127 | .driver = { | ||
128 | .name = "generic-bl", | ||
129 | }, | ||
130 | }; | ||
131 | |||
132 | static int __init genericbl_init(void) | ||
133 | { | ||
134 | return platform_driver_register(&genericbl_driver); | ||
135 | } | ||
136 | |||
137 | static void __exit genericbl_exit(void) | ||
138 | { | ||
139 | platform_driver_unregister(&genericbl_driver); | ||
140 | } | ||
141 | |||
142 | module_init(genericbl_init); | ||
143 | module_exit(genericbl_exit); | ||
144 | |||
145 | MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>"); | ||
146 | MODULE_DESCRIPTION("Generic Backlight Driver"); | ||
147 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c index d4cfed0b26d5..5be55a20d8c7 100644 --- a/drivers/video/backlight/hp680_bl.c +++ b/drivers/video/backlight/hp680_bl.c | |||
@@ -151,19 +151,15 @@ static int __init hp680bl_init(void) | |||
151 | int ret; | 151 | int ret; |
152 | 152 | ||
153 | ret = platform_driver_register(&hp680bl_driver); | 153 | ret = platform_driver_register(&hp680bl_driver); |
154 | if (!ret) { | 154 | if (ret) |
155 | hp680bl_device = platform_device_alloc("hp680-bl", -1); | 155 | return ret; |
156 | if (!hp680bl_device) | 156 | hp680bl_device = platform_device_register_simple("hp680-bl", -1, |
157 | return -ENOMEM; | 157 | NULL, 0); |
158 | 158 | if (IS_ERR(hp680bl_device)) { | |
159 | ret = platform_device_add(hp680bl_device); | 159 | platform_driver_unregister(&hp680bl_driver); |
160 | 160 | return PTR_ERR(hp680bl_device); | |
161 | if (ret) { | ||
162 | platform_device_put(hp680bl_device); | ||
163 | platform_driver_unregister(&hp680bl_driver); | ||
164 | } | ||
165 | } | 161 | } |
166 | return ret; | 162 | return 0; |
167 | } | 163 | } |
168 | 164 | ||
169 | static void __exit hp680bl_exit(void) | 165 | static void __exit hp680bl_exit(void) |
diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c index 06964af761c6..65864c500455 100644 --- a/drivers/video/backlight/mbp_nvidia_bl.c +++ b/drivers/video/backlight/mbp_nvidia_bl.c | |||
@@ -70,6 +70,7 @@ static int mbp_get_intensity(struct backlight_device *bd) | |||
70 | } | 70 | } |
71 | 71 | ||
72 | static struct backlight_ops mbp_ops = { | 72 | static struct backlight_ops mbp_ops = { |
73 | .options = BL_CORE_SUSPENDRESUME, | ||
73 | .get_brightness = mbp_get_intensity, | 74 | .get_brightness = mbp_get_intensity, |
74 | .update_status = mbp_send_intensity, | 75 | .update_status = mbp_send_intensity, |
75 | }; | 76 | }; |
diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c index 15fb4d58b5bc..9edaf24fd82d 100644 --- a/drivers/video/backlight/progear_bl.c +++ b/drivers/video/backlight/progear_bl.c | |||
@@ -119,20 +119,16 @@ static int __init progearbl_init(void) | |||
119 | { | 119 | { |
120 | int ret = platform_driver_register(&progearbl_driver); | 120 | int ret = platform_driver_register(&progearbl_driver); |
121 | 121 | ||
122 | if (!ret) { | 122 | if (ret) |
123 | progearbl_device = platform_device_alloc("progear-bl", -1); | 123 | return ret; |
124 | if (!progearbl_device) | 124 | progearbl_device = platform_device_register_simple("progear-bl", -1, |
125 | return -ENOMEM; | 125 | NULL, 0); |
126 | 126 | if (IS_ERR(progearbl_device)) { | |
127 | ret = platform_device_add(progearbl_device); | 127 | platform_driver_unregister(&progearbl_driver); |
128 | 128 | return PTR_ERR(progearbl_device); | |
129 | if (ret) { | ||
130 | platform_device_put(progearbl_device); | ||
131 | platform_driver_unregister(&progearbl_driver); | ||
132 | } | ||
133 | } | 129 | } |
134 | 130 | ||
135 | return ret; | 131 | return 0; |
136 | } | 132 | } |
137 | 133 | ||
138 | static void __exit progearbl_exit(void) | 134 | static void __exit progearbl_exit(void) |
diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c index 8427669162ea..1dae7f8f3c6b 100644 --- a/drivers/video/backlight/tdo24m.c +++ b/drivers/video/backlight/tdo24m.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/device.h> | 15 | #include <linux/device.h> |
16 | #include <linux/spi/spi.h> | 16 | #include <linux/spi/spi.h> |
17 | #include <linux/spi/tdo24m.h> | ||
17 | #include <linux/fb.h> | 18 | #include <linux/fb.h> |
18 | #include <linux/lcd.h> | 19 | #include <linux/lcd.h> |
19 | 20 | ||
@@ -31,6 +32,9 @@ struct tdo24m { | |||
31 | struct spi_transfer xfer; | 32 | struct spi_transfer xfer; |
32 | uint8_t *buf; | 33 | uint8_t *buf; |
33 | 34 | ||
35 | int (*adj_mode)(struct tdo24m *lcd, int mode); | ||
36 | int color_invert; | ||
37 | |||
34 | int power; | 38 | int power; |
35 | int mode; | 39 | int mode; |
36 | }; | 40 | }; |
@@ -66,7 +70,7 @@ static uint32_t lcd_panel_off[] = { | |||
66 | CMD_NULL, | 70 | CMD_NULL, |
67 | }; | 71 | }; |
68 | 72 | ||
69 | static uint32_t lcd_vga_pass_through[] = { | 73 | static uint32_t lcd_vga_pass_through_tdo24m[] = { |
70 | CMD1(0xB0, 0x16), | 74 | CMD1(0xB0, 0x16), |
71 | CMD1(0xBC, 0x80), | 75 | CMD1(0xBC, 0x80), |
72 | CMD1(0xE1, 0x00), | 76 | CMD1(0xE1, 0x00), |
@@ -75,7 +79,7 @@ static uint32_t lcd_vga_pass_through[] = { | |||
75 | CMD_NULL, | 79 | CMD_NULL, |
76 | }; | 80 | }; |
77 | 81 | ||
78 | static uint32_t lcd_qvga_pass_through[] = { | 82 | static uint32_t lcd_qvga_pass_through_tdo24m[] = { |
79 | CMD1(0xB0, 0x16), | 83 | CMD1(0xB0, 0x16), |
80 | CMD1(0xBC, 0x81), | 84 | CMD1(0xBC, 0x81), |
81 | CMD1(0xE1, 0x00), | 85 | CMD1(0xE1, 0x00), |
@@ -84,7 +88,7 @@ static uint32_t lcd_qvga_pass_through[] = { | |||
84 | CMD_NULL, | 88 | CMD_NULL, |
85 | }; | 89 | }; |
86 | 90 | ||
87 | static uint32_t lcd_vga_transfer[] = { | 91 | static uint32_t lcd_vga_transfer_tdo24m[] = { |
88 | CMD1(0xcf, 0x02), /* Blanking period control (1) */ | 92 | CMD1(0xcf, 0x02), /* Blanking period control (1) */ |
89 | CMD2(0xd0, 0x08, 0x04), /* Blanking period control (2) */ | 93 | CMD2(0xd0, 0x08, 0x04), /* Blanking period control (2) */ |
90 | CMD1(0xd1, 0x01), /* CKV timing control on/off */ | 94 | CMD1(0xd1, 0x01), /* CKV timing control on/off */ |
@@ -110,6 +114,35 @@ static uint32_t lcd_qvga_transfer[] = { | |||
110 | CMD_NULL, | 114 | CMD_NULL, |
111 | }; | 115 | }; |
112 | 116 | ||
117 | static uint32_t lcd_vga_pass_through_tdo35s[] = { | ||
118 | CMD1(0xB0, 0x16), | ||
119 | CMD1(0xBC, 0x80), | ||
120 | CMD1(0xE1, 0x00), | ||
121 | CMD1(0x3B, 0x00), | ||
122 | CMD_NULL, | ||
123 | }; | ||
124 | |||
125 | static uint32_t lcd_qvga_pass_through_tdo35s[] = { | ||
126 | CMD1(0xB0, 0x16), | ||
127 | CMD1(0xBC, 0x81), | ||
128 | CMD1(0xE1, 0x00), | ||
129 | CMD1(0x3B, 0x22), | ||
130 | CMD_NULL, | ||
131 | }; | ||
132 | |||
133 | static uint32_t lcd_vga_transfer_tdo35s[] = { | ||
134 | CMD1(0xcf, 0x02), /* Blanking period control (1) */ | ||
135 | CMD2(0xd0, 0x08, 0x04), /* Blanking period control (2) */ | ||
136 | CMD1(0xd1, 0x01), /* CKV timing control on/off */ | ||
137 | CMD2(0xd2, 0x00, 0x1e), /* CKV 1,2 timing control */ | ||
138 | CMD2(0xd3, 0x14, 0x28), /* OEV timing control */ | ||
139 | CMD2(0xd4, 0x28, 0x64), /* ASW timing control (1) */ | ||
140 | CMD1(0xd5, 0x28), /* ASW timing control (2) */ | ||
141 | CMD0(0x21), /* Invert for normally black display */ | ||
142 | CMD0(0x29), /* Display on */ | ||
143 | CMD_NULL, | ||
144 | }; | ||
145 | |||
113 | static uint32_t lcd_panel_config[] = { | 146 | static uint32_t lcd_panel_config[] = { |
114 | CMD2(0xb8, 0xff, 0xf9), /* Output control */ | 147 | CMD2(0xb8, 0xff, 0xf9), /* Output control */ |
115 | CMD0(0x11), /* sleep out */ | 148 | CMD0(0x11), /* sleep out */ |
@@ -148,6 +181,8 @@ static int tdo24m_writes(struct tdo24m *lcd, uint32_t *array) | |||
148 | int nparams, err = 0; | 181 | int nparams, err = 0; |
149 | 182 | ||
150 | for (; *p != CMD_NULL; p++) { | 183 | for (; *p != CMD_NULL; p++) { |
184 | if (!lcd->color_invert && *p == CMD0(0x21)) | ||
185 | continue; | ||
151 | 186 | ||
152 | nparams = (*p >> 30) & 0x3; | 187 | nparams = (*p >> 30) & 0x3; |
153 | 188 | ||
@@ -184,12 +219,33 @@ static int tdo24m_adj_mode(struct tdo24m *lcd, int mode) | |||
184 | { | 219 | { |
185 | switch (mode) { | 220 | switch (mode) { |
186 | case MODE_VGA: | 221 | case MODE_VGA: |
187 | tdo24m_writes(lcd, lcd_vga_pass_through); | 222 | tdo24m_writes(lcd, lcd_vga_pass_through_tdo24m); |
188 | tdo24m_writes(lcd, lcd_panel_config); | 223 | tdo24m_writes(lcd, lcd_panel_config); |
189 | tdo24m_writes(lcd, lcd_vga_transfer); | 224 | tdo24m_writes(lcd, lcd_vga_transfer_tdo24m); |
190 | break; | 225 | break; |
191 | case MODE_QVGA: | 226 | case MODE_QVGA: |
192 | tdo24m_writes(lcd, lcd_qvga_pass_through); | 227 | tdo24m_writes(lcd, lcd_qvga_pass_through_tdo24m); |
228 | tdo24m_writes(lcd, lcd_panel_config); | ||
229 | tdo24m_writes(lcd, lcd_qvga_transfer); | ||
230 | break; | ||
231 | default: | ||
232 | return -EINVAL; | ||
233 | } | ||
234 | |||
235 | lcd->mode = mode; | ||
236 | return 0; | ||
237 | } | ||
238 | |||
239 | static int tdo35s_adj_mode(struct tdo24m *lcd, int mode) | ||
240 | { | ||
241 | switch (mode) { | ||
242 | case MODE_VGA: | ||
243 | tdo24m_writes(lcd, lcd_vga_pass_through_tdo35s); | ||
244 | tdo24m_writes(lcd, lcd_panel_config); | ||
245 | tdo24m_writes(lcd, lcd_vga_transfer_tdo35s); | ||
246 | break; | ||
247 | case MODE_QVGA: | ||
248 | tdo24m_writes(lcd, lcd_qvga_pass_through_tdo35s); | ||
193 | tdo24m_writes(lcd, lcd_panel_config); | 249 | tdo24m_writes(lcd, lcd_panel_config); |
194 | tdo24m_writes(lcd, lcd_qvga_transfer); | 250 | tdo24m_writes(lcd, lcd_qvga_transfer); |
195 | break; | 251 | break; |
@@ -213,7 +269,7 @@ static int tdo24m_power_on(struct tdo24m *lcd) | |||
213 | if (err) | 269 | if (err) |
214 | goto out; | 270 | goto out; |
215 | 271 | ||
216 | err = tdo24m_adj_mode(lcd, lcd->mode); | 272 | err = lcd->adj_mode(lcd, lcd->mode); |
217 | out: | 273 | out: |
218 | return err; | 274 | return err; |
219 | } | 275 | } |
@@ -262,7 +318,7 @@ static int tdo24m_set_mode(struct lcd_device *ld, struct fb_videomode *m) | |||
262 | if (lcd->mode == mode) | 318 | if (lcd->mode == mode) |
263 | return 0; | 319 | return 0; |
264 | 320 | ||
265 | return tdo24m_adj_mode(lcd, mode); | 321 | return lcd->adj_mode(lcd, mode); |
266 | } | 322 | } |
267 | 323 | ||
268 | static struct lcd_ops tdo24m_ops = { | 324 | static struct lcd_ops tdo24m_ops = { |
@@ -276,8 +332,16 @@ static int __devinit tdo24m_probe(struct spi_device *spi) | |||
276 | struct tdo24m *lcd; | 332 | struct tdo24m *lcd; |
277 | struct spi_message *m; | 333 | struct spi_message *m; |
278 | struct spi_transfer *x; | 334 | struct spi_transfer *x; |
335 | struct tdo24m_platform_data *pdata; | ||
336 | enum tdo24m_model model; | ||
279 | int err; | 337 | int err; |
280 | 338 | ||
339 | pdata = spi->dev.platform_data; | ||
340 | if (pdata) | ||
341 | model = pdata->model; | ||
342 | else | ||
343 | model = TDO24M; | ||
344 | |||
281 | spi->bits_per_word = 8; | 345 | spi->bits_per_word = 8; |
282 | spi->mode = SPI_MODE_3; | 346 | spi->mode = SPI_MODE_3; |
283 | err = spi_setup(spi); | 347 | err = spi_setup(spi); |
@@ -306,6 +370,20 @@ static int __devinit tdo24m_probe(struct spi_device *spi) | |||
306 | x->tx_buf = &lcd->buf[0]; | 370 | x->tx_buf = &lcd->buf[0]; |
307 | spi_message_add_tail(x, m); | 371 | spi_message_add_tail(x, m); |
308 | 372 | ||
373 | switch (model) { | ||
374 | case TDO24M: | ||
375 | lcd->color_invert = 1; | ||
376 | lcd->adj_mode = tdo24m_adj_mode; | ||
377 | break; | ||
378 | case TDO35S: | ||
379 | lcd->adj_mode = tdo35s_adj_mode; | ||
380 | lcd->color_invert = 0; | ||
381 | break; | ||
382 | default: | ||
383 | dev_err(&spi->dev, "Unsupported model"); | ||
384 | goto out_free; | ||
385 | } | ||
386 | |||
309 | lcd->lcd_dev = lcd_device_register("tdo24m", &spi->dev, | 387 | lcd->lcd_dev = lcd_device_register("tdo24m", &spi->dev, |
310 | lcd, &tdo24m_ops); | 388 | lcd, &tdo24m_ops); |
311 | if (IS_ERR(lcd->lcd_dev)) { | 389 | if (IS_ERR(lcd->lcd_dev)) { |
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c index 57a26649f1a5..b7fbc75a62fc 100644 --- a/drivers/video/backlight/tosa_lcd.c +++ b/drivers/video/backlight/tosa_lcd.c | |||
@@ -39,6 +39,7 @@ struct tosa_lcd_data { | |||
39 | struct i2c_client *i2c; | 39 | struct i2c_client *i2c; |
40 | 40 | ||
41 | int lcd_power; | 41 | int lcd_power; |
42 | bool is_vga; | ||
42 | }; | 43 | }; |
43 | 44 | ||
44 | static int tosa_tg_send(struct spi_device *spi, int adrs, uint8_t data) | 45 | static int tosa_tg_send(struct spi_device *spi, int adrs, uint8_t data) |
@@ -81,8 +82,12 @@ static void tosa_lcd_tg_init(struct tosa_lcd_data *data) | |||
81 | static void tosa_lcd_tg_on(struct tosa_lcd_data *data) | 82 | static void tosa_lcd_tg_on(struct tosa_lcd_data *data) |
82 | { | 83 | { |
83 | struct spi_device *spi = data->spi; | 84 | struct spi_device *spi = data->spi; |
84 | const int value = TG_REG0_COLOR | TG_REG0_UD | TG_REG0_LR; | 85 | int value = TG_REG0_COLOR | TG_REG0_UD | TG_REG0_LR; |
85 | tosa_tg_send(spi, TG_PNLCTL, value | TG_REG0_VQV); /* this depends on mode */ | 86 | |
87 | if (data->is_vga) | ||
88 | value |= TG_REG0_VQV; | ||
89 | |||
90 | tosa_tg_send(spi, TG_PNLCTL, value); | ||
86 | 91 | ||
87 | /* TG LCD pannel power up */ | 92 | /* TG LCD pannel power up */ |
88 | tosa_tg_send(spi, TG_PINICTL,0x4); | 93 | tosa_tg_send(spi, TG_PINICTL,0x4); |
@@ -142,9 +147,25 @@ static int tosa_lcd_get_power(struct lcd_device *lcd) | |||
142 | return data->lcd_power; | 147 | return data->lcd_power; |
143 | } | 148 | } |
144 | 149 | ||
150 | static int tosa_lcd_set_mode(struct lcd_device *lcd, struct fb_videomode *mode) | ||
151 | { | ||
152 | struct tosa_lcd_data *data = lcd_get_data(lcd); | ||
153 | |||
154 | if (mode->xres == 320 || mode->yres == 320) | ||
155 | data->is_vga = false; | ||
156 | else | ||
157 | data->is_vga = true; | ||
158 | |||
159 | if (POWER_IS_ON(data->lcd_power)) | ||
160 | tosa_lcd_tg_on(data); | ||
161 | |||
162 | return 0; | ||
163 | } | ||
164 | |||
145 | static struct lcd_ops tosa_lcd_ops = { | 165 | static struct lcd_ops tosa_lcd_ops = { |
146 | .set_power = tosa_lcd_set_power, | 166 | .set_power = tosa_lcd_set_power, |
147 | .get_power = tosa_lcd_get_power, | 167 | .get_power = tosa_lcd_get_power, |
168 | .set_mode = tosa_lcd_set_mode, | ||
148 | }; | 169 | }; |
149 | 170 | ||
150 | static int __devinit tosa_lcd_probe(struct spi_device *spi) | 171 | static int __devinit tosa_lcd_probe(struct spi_device *spi) |
@@ -156,6 +177,8 @@ static int __devinit tosa_lcd_probe(struct spi_device *spi) | |||
156 | if (!data) | 177 | if (!data) |
157 | return -ENOMEM; | 178 | return -ENOMEM; |
158 | 179 | ||
180 | data->is_vga = true; /* defaut to VGA mode */ | ||
181 | |||
159 | /* | 182 | /* |
160 | * bits_per_word cannot be configured in platform data | 183 | * bits_per_word cannot be configured in platform data |
161 | */ | 184 | */ |
diff --git a/drivers/video/backlight/vgg2432a4.c b/drivers/video/backlight/vgg2432a4.c index 593c7687d54a..8e653b8a6f17 100644 --- a/drivers/video/backlight/vgg2432a4.c +++ b/drivers/video/backlight/vgg2432a4.c | |||
@@ -137,7 +137,7 @@ static int vgg2432a4_lcd_init(struct ili9320 *lcd, | |||
137 | 137 | ||
138 | ili9320_write(lcd, ILI9320_RGB_IF1, cfg->rgb_if1); | 138 | ili9320_write(lcd, ILI9320_RGB_IF1, cfg->rgb_if1); |
139 | ili9320_write(lcd, ILI9320_FRAMEMAKER, 0x0); | 139 | ili9320_write(lcd, ILI9320_FRAMEMAKER, 0x0); |
140 | ili9320_write(lcd, ILI9320_RGB_IF2, ILI9320_RGBIF2_DPL); | 140 | ili9320_write(lcd, ILI9320_RGB_IF2, cfg->rgb_if2); |
141 | 141 | ||
142 | ret = ili9320_write_regs(lcd, vgg_init1, ARRAY_SIZE(vgg_init1)); | 142 | ret = ili9320_write_regs(lcd, vgg_init1, ARRAY_SIZE(vgg_init1)); |
143 | if (ret != 0) | 143 | if (ret != 0) |
diff --git a/fs/Kconfig b/fs/Kconfig index 02cff86af1b4..51307b0fdf0f 100644 --- a/fs/Kconfig +++ b/fs/Kconfig | |||
@@ -932,6 +932,58 @@ config CRAMFS | |||
932 | 932 | ||
933 | If unsure, say N. | 933 | If unsure, say N. |
934 | 934 | ||
935 | config SQUASHFS | ||
936 | tristate "SquashFS 4.0 - Squashed file system support" | ||
937 | depends on BLOCK | ||
938 | select ZLIB_INFLATE | ||
939 | help | ||
940 | Saying Y here includes support for SquashFS 4.0 (a Compressed | ||
941 | Read-Only File System). Squashfs is a highly compressed read-only | ||
942 | filesystem for Linux. It uses zlib compression to compress both | ||
943 | files, inodes and directories. Inodes in the system are very small | ||
944 | and all blocks are packed to minimise data overhead. Block sizes | ||
945 | greater than 4K are supported up to a maximum of 1 Mbytes (default | ||
946 | block size 128K). SquashFS 4.0 supports 64 bit filesystems and files | ||
947 | (larger than 4GB), full uid/gid information, hard links and | ||
948 | timestamps. | ||
949 | |||
950 | Squashfs is intended for general read-only filesystem use, for | ||
951 | archival use (i.e. in cases where a .tar.gz file may be used), and in | ||
952 | embedded systems where low overhead is needed. Further information | ||
953 | and tools are available from http://squashfs.sourceforge.net. | ||
954 | |||
955 | If you want to compile this as a module ( = code which can be | ||
956 | inserted in and removed from the running kernel whenever you want), | ||
957 | say M here and read <file:Documentation/modules.txt>. The module | ||
958 | will be called squashfs. Note that the root file system (the one | ||
959 | containing the directory /) cannot be compiled as a module. | ||
960 | |||
961 | If unsure, say N. | ||
962 | |||
963 | config SQUASHFS_EMBEDDED | ||
964 | |||
965 | bool "Additional option for memory-constrained systems" | ||
966 | depends on SQUASHFS | ||
967 | default n | ||
968 | help | ||
969 | Saying Y here allows you to specify cache size. | ||
970 | |||
971 | If unsure, say N. | ||
972 | |||
973 | config SQUASHFS_FRAGMENT_CACHE_SIZE | ||
974 | int "Number of fragments cached" if SQUASHFS_EMBEDDED | ||
975 | depends on SQUASHFS | ||
976 | default "3" | ||
977 | help | ||
978 | By default SquashFS caches the last 3 fragments read from | ||
979 | the filesystem. Increasing this amount may mean SquashFS | ||
980 | has to re-read fragments less often from disk, at the expense | ||
981 | of extra system memory. Decreasing this amount will mean | ||
982 | SquashFS uses less memory at the expense of extra reads from disk. | ||
983 | |||
984 | Note there must be at least one cached fragment. Anything | ||
985 | much more than three will probably not make much difference. | ||
986 | |||
935 | config VXFS_FS | 987 | config VXFS_FS |
936 | tristate "FreeVxFS file system support (VERITAS VxFS(TM) compatible)" | 988 | tristate "FreeVxFS file system support (VERITAS VxFS(TM) compatible)" |
937 | depends on BLOCK | 989 | depends on BLOCK |
diff --git a/fs/Makefile b/fs/Makefile index bc4e14df1082..38bc735c67ad 100644 --- a/fs/Makefile +++ b/fs/Makefile | |||
@@ -74,6 +74,7 @@ obj-$(CONFIG_JBD) += jbd/ | |||
74 | obj-$(CONFIG_JBD2) += jbd2/ | 74 | obj-$(CONFIG_JBD2) += jbd2/ |
75 | obj-$(CONFIG_EXT2_FS) += ext2/ | 75 | obj-$(CONFIG_EXT2_FS) += ext2/ |
76 | obj-$(CONFIG_CRAMFS) += cramfs/ | 76 | obj-$(CONFIG_CRAMFS) += cramfs/ |
77 | obj-$(CONFIG_SQUASHFS) += squashfs/ | ||
77 | obj-y += ramfs/ | 78 | obj-y += ramfs/ |
78 | obj-$(CONFIG_HUGETLBFS) += hugetlbfs/ | 79 | obj-$(CONFIG_HUGETLBFS) += hugetlbfs/ |
79 | obj-$(CONFIG_CODA_FS) += coda/ | 80 | obj-$(CONFIG_CODA_FS) += coda/ |
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index aa5b43205e37..f3e72c5c19f5 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c | |||
@@ -168,9 +168,6 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm, | |||
168 | struct elf_fdpic_params exec_params, interp_params; | 168 | struct elf_fdpic_params exec_params, interp_params; |
169 | struct elf_phdr *phdr; | 169 | struct elf_phdr *phdr; |
170 | unsigned long stack_size, entryaddr; | 170 | unsigned long stack_size, entryaddr; |
171 | #ifndef CONFIG_MMU | ||
172 | unsigned long fullsize; | ||
173 | #endif | ||
174 | #ifdef ELF_FDPIC_PLAT_INIT | 171 | #ifdef ELF_FDPIC_PLAT_INIT |
175 | unsigned long dynaddr; | 172 | unsigned long dynaddr; |
176 | #endif | 173 | #endif |
@@ -390,11 +387,6 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm, | |||
390 | goto error_kill; | 387 | goto error_kill; |
391 | } | 388 | } |
392 | 389 | ||
393 | /* expand the stack mapping to use up the entire allocation granule */ | ||
394 | fullsize = kobjsize((char *) current->mm->start_brk); | ||
395 | if (!IS_ERR_VALUE(do_mremap(current->mm->start_brk, stack_size, | ||
396 | fullsize, 0, 0))) | ||
397 | stack_size = fullsize; | ||
398 | up_write(¤t->mm->mmap_sem); | 390 | up_write(¤t->mm->mmap_sem); |
399 | 391 | ||
400 | current->mm->brk = current->mm->start_brk; | 392 | current->mm->brk = current->mm->start_brk; |
@@ -1567,11 +1559,9 @@ end_coredump: | |||
1567 | static int elf_fdpic_dump_segments(struct file *file, size_t *size, | 1559 | static int elf_fdpic_dump_segments(struct file *file, size_t *size, |
1568 | unsigned long *limit, unsigned long mm_flags) | 1560 | unsigned long *limit, unsigned long mm_flags) |
1569 | { | 1561 | { |
1570 | struct vm_list_struct *vml; | 1562 | struct vm_area_struct *vma; |
1571 | |||
1572 | for (vml = current->mm->context.vmlist; vml; vml = vml->next) { | ||
1573 | struct vm_area_struct *vma = vml->vma; | ||
1574 | 1563 | ||
1564 | for (vma = current->mm->mmap; vma; vma = vma->vm_next) { | ||
1575 | if (!maydump(vma, mm_flags)) | 1565 | if (!maydump(vma, mm_flags)) |
1576 | continue; | 1566 | continue; |
1577 | 1567 | ||
@@ -1617,9 +1607,6 @@ static int elf_fdpic_core_dump(long signr, struct pt_regs *regs, | |||
1617 | elf_fpxregset_t *xfpu = NULL; | 1607 | elf_fpxregset_t *xfpu = NULL; |
1618 | #endif | 1608 | #endif |
1619 | int thread_status_size = 0; | 1609 | int thread_status_size = 0; |
1620 | #ifndef CONFIG_MMU | ||
1621 | struct vm_list_struct *vml; | ||
1622 | #endif | ||
1623 | elf_addr_t *auxv; | 1610 | elf_addr_t *auxv; |
1624 | unsigned long mm_flags; | 1611 | unsigned long mm_flags; |
1625 | 1612 | ||
@@ -1685,13 +1672,7 @@ static int elf_fdpic_core_dump(long signr, struct pt_regs *regs, | |||
1685 | fill_prstatus(prstatus, current, signr); | 1672 | fill_prstatus(prstatus, current, signr); |
1686 | elf_core_copy_regs(&prstatus->pr_reg, regs); | 1673 | elf_core_copy_regs(&prstatus->pr_reg, regs); |
1687 | 1674 | ||
1688 | #ifdef CONFIG_MMU | ||
1689 | segs = current->mm->map_count; | 1675 | segs = current->mm->map_count; |
1690 | #else | ||
1691 | segs = 0; | ||
1692 | for (vml = current->mm->context.vmlist; vml; vml = vml->next) | ||
1693 | segs++; | ||
1694 | #endif | ||
1695 | #ifdef ELF_CORE_EXTRA_PHDRS | 1676 | #ifdef ELF_CORE_EXTRA_PHDRS |
1696 | segs += ELF_CORE_EXTRA_PHDRS; | 1677 | segs += ELF_CORE_EXTRA_PHDRS; |
1697 | #endif | 1678 | #endif |
@@ -1766,20 +1747,10 @@ static int elf_fdpic_core_dump(long signr, struct pt_regs *regs, | |||
1766 | mm_flags = current->mm->flags; | 1747 | mm_flags = current->mm->flags; |
1767 | 1748 | ||
1768 | /* write program headers for segments dump */ | 1749 | /* write program headers for segments dump */ |
1769 | for ( | 1750 | for (vma = current->mm->mmap; vma; vma = vma->vm_next) { |
1770 | #ifdef CONFIG_MMU | ||
1771 | vma = current->mm->mmap; vma; vma = vma->vm_next | ||
1772 | #else | ||
1773 | vml = current->mm->context.vmlist; vml; vml = vml->next | ||
1774 | #endif | ||
1775 | ) { | ||
1776 | struct elf_phdr phdr; | 1751 | struct elf_phdr phdr; |
1777 | size_t sz; | 1752 | size_t sz; |
1778 | 1753 | ||
1779 | #ifndef CONFIG_MMU | ||
1780 | vma = vml->vma; | ||
1781 | #endif | ||
1782 | |||
1783 | sz = vma->vm_end - vma->vm_start; | 1754 | sz = vma->vm_end - vma->vm_start; |
1784 | 1755 | ||
1785 | phdr.p_type = PT_LOAD; | 1756 | phdr.p_type = PT_LOAD; |
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index 7bbd5c6b3725..5cebf0b37798 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c | |||
@@ -417,8 +417,8 @@ static int load_flat_file(struct linux_binprm * bprm, | |||
417 | unsigned long textpos = 0, datapos = 0, result; | 417 | unsigned long textpos = 0, datapos = 0, result; |
418 | unsigned long realdatastart = 0; | 418 | unsigned long realdatastart = 0; |
419 | unsigned long text_len, data_len, bss_len, stack_len, flags; | 419 | unsigned long text_len, data_len, bss_len, stack_len, flags; |
420 | unsigned long len, reallen, memp = 0; | 420 | unsigned long len, memp = 0; |
421 | unsigned long extra, rlim; | 421 | unsigned long memp_size, extra, rlim; |
422 | unsigned long *reloc = 0, *rp; | 422 | unsigned long *reloc = 0, *rp; |
423 | struct inode *inode; | 423 | struct inode *inode; |
424 | int i, rev, relocs = 0; | 424 | int i, rev, relocs = 0; |
@@ -543,17 +543,10 @@ static int load_flat_file(struct linux_binprm * bprm, | |||
543 | } | 543 | } |
544 | 544 | ||
545 | len = data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long); | 545 | len = data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long); |
546 | len = PAGE_ALIGN(len); | ||
546 | down_write(¤t->mm->mmap_sem); | 547 | down_write(¤t->mm->mmap_sem); |
547 | realdatastart = do_mmap(0, 0, len, | 548 | realdatastart = do_mmap(0, 0, len, |
548 | PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, 0); | 549 | PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, 0); |
549 | /* Remap to use all availabe slack region space */ | ||
550 | if (realdatastart && (realdatastart < (unsigned long)-4096)) { | ||
551 | reallen = kobjsize((void *)realdatastart); | ||
552 | if (reallen > len) { | ||
553 | realdatastart = do_mremap(realdatastart, len, | ||
554 | reallen, MREMAP_FIXED, realdatastart); | ||
555 | } | ||
556 | } | ||
557 | up_write(¤t->mm->mmap_sem); | 550 | up_write(¤t->mm->mmap_sem); |
558 | 551 | ||
559 | if (realdatastart == 0 || realdatastart >= (unsigned long)-4096) { | 552 | if (realdatastart == 0 || realdatastart >= (unsigned long)-4096) { |
@@ -591,21 +584,14 @@ static int load_flat_file(struct linux_binprm * bprm, | |||
591 | 584 | ||
592 | reloc = (unsigned long *) (datapos+(ntohl(hdr->reloc_start)-text_len)); | 585 | reloc = (unsigned long *) (datapos+(ntohl(hdr->reloc_start)-text_len)); |
593 | memp = realdatastart; | 586 | memp = realdatastart; |
594 | 587 | memp_size = len; | |
595 | } else { | 588 | } else { |
596 | 589 | ||
597 | len = text_len + data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long); | 590 | len = text_len + data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long); |
591 | len = PAGE_ALIGN(len); | ||
598 | down_write(¤t->mm->mmap_sem); | 592 | down_write(¤t->mm->mmap_sem); |
599 | textpos = do_mmap(0, 0, len, | 593 | textpos = do_mmap(0, 0, len, |
600 | PROT_READ | PROT_EXEC | PROT_WRITE, MAP_PRIVATE, 0); | 594 | PROT_READ | PROT_EXEC | PROT_WRITE, MAP_PRIVATE, 0); |
601 | /* Remap to use all availabe slack region space */ | ||
602 | if (textpos && (textpos < (unsigned long) -4096)) { | ||
603 | reallen = kobjsize((void *)textpos); | ||
604 | if (reallen > len) { | ||
605 | textpos = do_mremap(textpos, len, reallen, | ||
606 | MREMAP_FIXED, textpos); | ||
607 | } | ||
608 | } | ||
609 | up_write(¤t->mm->mmap_sem); | 595 | up_write(¤t->mm->mmap_sem); |
610 | 596 | ||
611 | if (!textpos || textpos >= (unsigned long) -4096) { | 597 | if (!textpos || textpos >= (unsigned long) -4096) { |
@@ -622,7 +608,7 @@ static int load_flat_file(struct linux_binprm * bprm, | |||
622 | reloc = (unsigned long *) (textpos + ntohl(hdr->reloc_start) + | 608 | reloc = (unsigned long *) (textpos + ntohl(hdr->reloc_start) + |
623 | MAX_SHARED_LIBS * sizeof(unsigned long)); | 609 | MAX_SHARED_LIBS * sizeof(unsigned long)); |
624 | memp = textpos; | 610 | memp = textpos; |
625 | 611 | memp_size = len; | |
626 | #ifdef CONFIG_BINFMT_ZFLAT | 612 | #ifdef CONFIG_BINFMT_ZFLAT |
627 | /* | 613 | /* |
628 | * load it all in and treat it like a RAM load from now on | 614 | * load it all in and treat it like a RAM load from now on |
@@ -680,10 +666,12 @@ static int load_flat_file(struct linux_binprm * bprm, | |||
680 | * set up the brk stuff, uses any slack left in data/bss/stack | 666 | * set up the brk stuff, uses any slack left in data/bss/stack |
681 | * allocation. We put the brk after the bss (between the bss | 667 | * allocation. We put the brk after the bss (between the bss |
682 | * and stack) like other platforms. | 668 | * and stack) like other platforms. |
669 | * Userspace code relies on the stack pointer starting out at | ||
670 | * an address right at the end of a page. | ||
683 | */ | 671 | */ |
684 | current->mm->start_brk = datapos + data_len + bss_len; | 672 | current->mm->start_brk = datapos + data_len + bss_len; |
685 | current->mm->brk = (current->mm->start_brk + 3) & ~3; | 673 | current->mm->brk = (current->mm->start_brk + 3) & ~3; |
686 | current->mm->context.end_brk = memp + kobjsize((void *) memp) - stack_len; | 674 | current->mm->context.end_brk = memp + memp_size - stack_len; |
687 | } | 675 | } |
688 | 676 | ||
689 | if (flags & FLAT_FLAG_KTRACE) | 677 | if (flags & FLAT_FLAG_KTRACE) |
@@ -790,8 +778,8 @@ static int load_flat_file(struct linux_binprm * bprm, | |||
790 | 778 | ||
791 | /* zero the BSS, BRK and stack areas */ | 779 | /* zero the BSS, BRK and stack areas */ |
792 | memset((void*)(datapos + data_len), 0, bss_len + | 780 | memset((void*)(datapos + data_len), 0, bss_len + |
793 | (memp + kobjsize((void *) memp) - stack_len - /* end brk */ | 781 | (memp + memp_size - stack_len - /* end brk */ |
794 | libinfo->lib_list[id].start_brk) + /* start brk */ | 782 | libinfo->lib_list[id].start_brk) + /* start brk */ |
795 | stack_len); | 783 | stack_len); |
796 | 784 | ||
797 | return 0; | 785 | return 0; |
diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h index 1750445556c3..507ed6ec1847 100644 --- a/fs/jffs2/nodelist.h +++ b/fs/jffs2/nodelist.h | |||
@@ -366,9 +366,6 @@ void jffs2_free_ino_caches(struct jffs2_sb_info *c); | |||
366 | void jffs2_free_raw_node_refs(struct jffs2_sb_info *c); | 366 | void jffs2_free_raw_node_refs(struct jffs2_sb_info *c); |
367 | struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset); | 367 | struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset); |
368 | void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c_delete); | 368 | void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c_delete); |
369 | struct rb_node *rb_next(struct rb_node *); | ||
370 | struct rb_node *rb_prev(struct rb_node *); | ||
371 | void rb_replace_node(struct rb_node *victim, struct rb_node *new, struct rb_root *root); | ||
372 | int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn); | 369 | int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn); |
373 | uint32_t jffs2_truncate_fragtree (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size); | 370 | uint32_t jffs2_truncate_fragtree (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size); |
374 | struct jffs2_raw_node_ref *jffs2_link_node_ref(struct jffs2_sb_info *c, | 371 | struct jffs2_raw_node_ref *jffs2_link_node_ref(struct jffs2_sb_info *c, |
diff --git a/fs/proc/internal.h b/fs/proc/internal.h index 3e8aeb8b61ce..cd53ff838498 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h | |||
@@ -41,8 +41,6 @@ do { \ | |||
41 | (vmi)->used = 0; \ | 41 | (vmi)->used = 0; \ |
42 | (vmi)->largest_chunk = 0; \ | 42 | (vmi)->largest_chunk = 0; \ |
43 | } while(0) | 43 | } while(0) |
44 | |||
45 | extern int nommu_vma_show(struct seq_file *, struct vm_area_struct *); | ||
46 | #endif | 44 | #endif |
47 | 45 | ||
48 | extern int proc_tid_stat(struct seq_file *m, struct pid_namespace *ns, | 46 | extern int proc_tid_stat(struct seq_file *m, struct pid_namespace *ns, |
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index b1675c4e66da..43d23948384a 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c | |||
@@ -74,6 +74,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v) | |||
74 | "LowTotal: %8lu kB\n" | 74 | "LowTotal: %8lu kB\n" |
75 | "LowFree: %8lu kB\n" | 75 | "LowFree: %8lu kB\n" |
76 | #endif | 76 | #endif |
77 | #ifndef CONFIG_MMU | ||
78 | "MmapCopy: %8lu kB\n" | ||
79 | #endif | ||
77 | "SwapTotal: %8lu kB\n" | 80 | "SwapTotal: %8lu kB\n" |
78 | "SwapFree: %8lu kB\n" | 81 | "SwapFree: %8lu kB\n" |
79 | "Dirty: %8lu kB\n" | 82 | "Dirty: %8lu kB\n" |
@@ -116,6 +119,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v) | |||
116 | K(i.totalram-i.totalhigh), | 119 | K(i.totalram-i.totalhigh), |
117 | K(i.freeram-i.freehigh), | 120 | K(i.freeram-i.freehigh), |
118 | #endif | 121 | #endif |
122 | #ifndef CONFIG_MMU | ||
123 | K((unsigned long) atomic_read(&mmap_pages_allocated)), | ||
124 | #endif | ||
119 | K(i.totalswap), | 125 | K(i.totalswap), |
120 | K(i.freeswap), | 126 | K(i.freeswap), |
121 | K(global_page_state(NR_FILE_DIRTY)), | 127 | K(global_page_state(NR_FILE_DIRTY)), |
diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c index 3f87d2632947..b446d7ad0b0d 100644 --- a/fs/proc/nommu.c +++ b/fs/proc/nommu.c | |||
@@ -33,33 +33,33 @@ | |||
33 | #include "internal.h" | 33 | #include "internal.h" |
34 | 34 | ||
35 | /* | 35 | /* |
36 | * display a single VMA to a sequenced file | 36 | * display a single region to a sequenced file |
37 | */ | 37 | */ |
38 | int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma) | 38 | static int nommu_region_show(struct seq_file *m, struct vm_region *region) |
39 | { | 39 | { |
40 | unsigned long ino = 0; | 40 | unsigned long ino = 0; |
41 | struct file *file; | 41 | struct file *file; |
42 | dev_t dev = 0; | 42 | dev_t dev = 0; |
43 | int flags, len; | 43 | int flags, len; |
44 | 44 | ||
45 | flags = vma->vm_flags; | 45 | flags = region->vm_flags; |
46 | file = vma->vm_file; | 46 | file = region->vm_file; |
47 | 47 | ||
48 | if (file) { | 48 | if (file) { |
49 | struct inode *inode = vma->vm_file->f_path.dentry->d_inode; | 49 | struct inode *inode = region->vm_file->f_path.dentry->d_inode; |
50 | dev = inode->i_sb->s_dev; | 50 | dev = inode->i_sb->s_dev; |
51 | ino = inode->i_ino; | 51 | ino = inode->i_ino; |
52 | } | 52 | } |
53 | 53 | ||
54 | seq_printf(m, | 54 | seq_printf(m, |
55 | "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", | 55 | "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", |
56 | vma->vm_start, | 56 | region->vm_start, |
57 | vma->vm_end, | 57 | region->vm_end, |
58 | flags & VM_READ ? 'r' : '-', | 58 | flags & VM_READ ? 'r' : '-', |
59 | flags & VM_WRITE ? 'w' : '-', | 59 | flags & VM_WRITE ? 'w' : '-', |
60 | flags & VM_EXEC ? 'x' : '-', | 60 | flags & VM_EXEC ? 'x' : '-', |
61 | flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p', | 61 | flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p', |
62 | ((loff_t)vma->vm_pgoff) << PAGE_SHIFT, | 62 | ((loff_t)region->vm_pgoff) << PAGE_SHIFT, |
63 | MAJOR(dev), MINOR(dev), ino, &len); | 63 | MAJOR(dev), MINOR(dev), ino, &len); |
64 | 64 | ||
65 | if (file) { | 65 | if (file) { |
@@ -75,61 +75,54 @@ int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma) | |||
75 | } | 75 | } |
76 | 76 | ||
77 | /* | 77 | /* |
78 | * display a list of all the VMAs the kernel knows about | 78 | * display a list of all the REGIONs the kernel knows about |
79 | * - nommu kernals have a single flat list | 79 | * - nommu kernals have a single flat list |
80 | */ | 80 | */ |
81 | static int nommu_vma_list_show(struct seq_file *m, void *v) | 81 | static int nommu_region_list_show(struct seq_file *m, void *_p) |
82 | { | 82 | { |
83 | struct vm_area_struct *vma; | 83 | struct rb_node *p = _p; |
84 | 84 | ||
85 | vma = rb_entry((struct rb_node *) v, struct vm_area_struct, vm_rb); | 85 | return nommu_region_show(m, rb_entry(p, struct vm_region, vm_rb)); |
86 | return nommu_vma_show(m, vma); | ||
87 | } | 86 | } |
88 | 87 | ||
89 | static void *nommu_vma_list_start(struct seq_file *m, loff_t *_pos) | 88 | static void *nommu_region_list_start(struct seq_file *m, loff_t *_pos) |
90 | { | 89 | { |
91 | struct rb_node *_rb; | 90 | struct rb_node *p; |
92 | loff_t pos = *_pos; | 91 | loff_t pos = *_pos; |
93 | void *next = NULL; | ||
94 | 92 | ||
95 | down_read(&nommu_vma_sem); | 93 | down_read(&nommu_region_sem); |
96 | 94 | ||
97 | for (_rb = rb_first(&nommu_vma_tree); _rb; _rb = rb_next(_rb)) { | 95 | for (p = rb_first(&nommu_region_tree); p; p = rb_next(p)) |
98 | if (pos == 0) { | 96 | if (pos-- == 0) |
99 | next = _rb; | 97 | return p; |
100 | break; | 98 | return NULL; |
101 | } | ||
102 | pos--; | ||
103 | } | ||
104 | |||
105 | return next; | ||
106 | } | 99 | } |
107 | 100 | ||
108 | static void nommu_vma_list_stop(struct seq_file *m, void *v) | 101 | static void nommu_region_list_stop(struct seq_file *m, void *v) |
109 | { | 102 | { |
110 | up_read(&nommu_vma_sem); | 103 | up_read(&nommu_region_sem); |
111 | } | 104 | } |
112 | 105 | ||
113 | static void *nommu_vma_list_next(struct seq_file *m, void *v, loff_t *pos) | 106 | static void *nommu_region_list_next(struct seq_file *m, void *v, loff_t *pos) |
114 | { | 107 | { |
115 | (*pos)++; | 108 | (*pos)++; |
116 | return rb_next((struct rb_node *) v); | 109 | return rb_next((struct rb_node *) v); |
117 | } | 110 | } |
118 | 111 | ||
119 | static const struct seq_operations proc_nommu_vma_list_seqop = { | 112 | static struct seq_operations proc_nommu_region_list_seqop = { |
120 | .start = nommu_vma_list_start, | 113 | .start = nommu_region_list_start, |
121 | .next = nommu_vma_list_next, | 114 | .next = nommu_region_list_next, |
122 | .stop = nommu_vma_list_stop, | 115 | .stop = nommu_region_list_stop, |
123 | .show = nommu_vma_list_show | 116 | .show = nommu_region_list_show |
124 | }; | 117 | }; |
125 | 118 | ||
126 | static int proc_nommu_vma_list_open(struct inode *inode, struct file *file) | 119 | static int proc_nommu_region_list_open(struct inode *inode, struct file *file) |
127 | { | 120 | { |
128 | return seq_open(file, &proc_nommu_vma_list_seqop); | 121 | return seq_open(file, &proc_nommu_region_list_seqop); |
129 | } | 122 | } |
130 | 123 | ||
131 | static const struct file_operations proc_nommu_vma_list_operations = { | 124 | static const struct file_operations proc_nommu_region_list_operations = { |
132 | .open = proc_nommu_vma_list_open, | 125 | .open = proc_nommu_region_list_open, |
133 | .read = seq_read, | 126 | .read = seq_read, |
134 | .llseek = seq_lseek, | 127 | .llseek = seq_lseek, |
135 | .release = seq_release, | 128 | .release = seq_release, |
@@ -137,7 +130,7 @@ static const struct file_operations proc_nommu_vma_list_operations = { | |||
137 | 130 | ||
138 | static int __init proc_nommu_init(void) | 131 | static int __init proc_nommu_init(void) |
139 | { | 132 | { |
140 | proc_create("maps", S_IRUGO, NULL, &proc_nommu_vma_list_operations); | 133 | proc_create("maps", S_IRUGO, NULL, &proc_nommu_region_list_operations); |
141 | return 0; | 134 | return 0; |
142 | } | 135 | } |
143 | 136 | ||
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c index d4a8be32b902..343ea1216bc8 100644 --- a/fs/proc/task_nommu.c +++ b/fs/proc/task_nommu.c | |||
@@ -15,25 +15,32 @@ | |||
15 | */ | 15 | */ |
16 | void task_mem(struct seq_file *m, struct mm_struct *mm) | 16 | void task_mem(struct seq_file *m, struct mm_struct *mm) |
17 | { | 17 | { |
18 | struct vm_list_struct *vml; | 18 | struct vm_area_struct *vma; |
19 | unsigned long bytes = 0, sbytes = 0, slack = 0; | 19 | struct vm_region *region; |
20 | struct rb_node *p; | ||
21 | unsigned long bytes = 0, sbytes = 0, slack = 0, size; | ||
20 | 22 | ||
21 | down_read(&mm->mmap_sem); | 23 | down_read(&mm->mmap_sem); |
22 | for (vml = mm->context.vmlist; vml; vml = vml->next) { | 24 | for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { |
23 | if (!vml->vma) | 25 | vma = rb_entry(p, struct vm_area_struct, vm_rb); |
24 | continue; | 26 | |
27 | bytes += kobjsize(vma); | ||
28 | |||
29 | region = vma->vm_region; | ||
30 | if (region) { | ||
31 | size = kobjsize(region); | ||
32 | size += region->vm_end - region->vm_start; | ||
33 | } else { | ||
34 | size = vma->vm_end - vma->vm_start; | ||
35 | } | ||
25 | 36 | ||
26 | bytes += kobjsize(vml); | ||
27 | if (atomic_read(&mm->mm_count) > 1 || | 37 | if (atomic_read(&mm->mm_count) > 1 || |
28 | atomic_read(&vml->vma->vm_usage) > 1 | 38 | vma->vm_flags & VM_MAYSHARE) { |
29 | ) { | 39 | sbytes += size; |
30 | sbytes += kobjsize((void *) vml->vma->vm_start); | ||
31 | sbytes += kobjsize(vml->vma); | ||
32 | } else { | 40 | } else { |
33 | bytes += kobjsize((void *) vml->vma->vm_start); | 41 | bytes += size; |
34 | bytes += kobjsize(vml->vma); | 42 | if (region) |
35 | slack += kobjsize((void *) vml->vma->vm_start) - | 43 | slack = region->vm_end - vma->vm_end; |
36 | (vml->vma->vm_end - vml->vma->vm_start); | ||
37 | } | 44 | } |
38 | } | 45 | } |
39 | 46 | ||
@@ -70,13 +77,14 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) | |||
70 | 77 | ||
71 | unsigned long task_vsize(struct mm_struct *mm) | 78 | unsigned long task_vsize(struct mm_struct *mm) |
72 | { | 79 | { |
73 | struct vm_list_struct *tbp; | 80 | struct vm_area_struct *vma; |
81 | struct rb_node *p; | ||
74 | unsigned long vsize = 0; | 82 | unsigned long vsize = 0; |
75 | 83 | ||
76 | down_read(&mm->mmap_sem); | 84 | down_read(&mm->mmap_sem); |
77 | for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) { | 85 | for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { |
78 | if (tbp->vma) | 86 | vma = rb_entry(p, struct vm_area_struct, vm_rb); |
79 | vsize += kobjsize((void *) tbp->vma->vm_start); | 87 | vsize += vma->vm_end - vma->vm_start; |
80 | } | 88 | } |
81 | up_read(&mm->mmap_sem); | 89 | up_read(&mm->mmap_sem); |
82 | return vsize; | 90 | return vsize; |
@@ -85,15 +93,19 @@ unsigned long task_vsize(struct mm_struct *mm) | |||
85 | int task_statm(struct mm_struct *mm, int *shared, int *text, | 93 | int task_statm(struct mm_struct *mm, int *shared, int *text, |
86 | int *data, int *resident) | 94 | int *data, int *resident) |
87 | { | 95 | { |
88 | struct vm_list_struct *tbp; | 96 | struct vm_area_struct *vma; |
97 | struct vm_region *region; | ||
98 | struct rb_node *p; | ||
89 | int size = kobjsize(mm); | 99 | int size = kobjsize(mm); |
90 | 100 | ||
91 | down_read(&mm->mmap_sem); | 101 | down_read(&mm->mmap_sem); |
92 | for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) { | 102 | for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { |
93 | size += kobjsize(tbp); | 103 | vma = rb_entry(p, struct vm_area_struct, vm_rb); |
94 | if (tbp->vma) { | 104 | size += kobjsize(vma); |
95 | size += kobjsize(tbp->vma); | 105 | region = vma->vm_region; |
96 | size += kobjsize((void *) tbp->vma->vm_start); | 106 | if (region) { |
107 | size += kobjsize(region); | ||
108 | size += region->vm_end - region->vm_start; | ||
97 | } | 109 | } |
98 | } | 110 | } |
99 | 111 | ||
@@ -105,20 +117,62 @@ int task_statm(struct mm_struct *mm, int *shared, int *text, | |||
105 | } | 117 | } |
106 | 118 | ||
107 | /* | 119 | /* |
120 | * display a single VMA to a sequenced file | ||
121 | */ | ||
122 | static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma) | ||
123 | { | ||
124 | unsigned long ino = 0; | ||
125 | struct file *file; | ||
126 | dev_t dev = 0; | ||
127 | int flags, len; | ||
128 | |||
129 | flags = vma->vm_flags; | ||
130 | file = vma->vm_file; | ||
131 | |||
132 | if (file) { | ||
133 | struct inode *inode = vma->vm_file->f_path.dentry->d_inode; | ||
134 | dev = inode->i_sb->s_dev; | ||
135 | ino = inode->i_ino; | ||
136 | } | ||
137 | |||
138 | seq_printf(m, | ||
139 | "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n", | ||
140 | vma->vm_start, | ||
141 | vma->vm_end, | ||
142 | flags & VM_READ ? 'r' : '-', | ||
143 | flags & VM_WRITE ? 'w' : '-', | ||
144 | flags & VM_EXEC ? 'x' : '-', | ||
145 | flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p', | ||
146 | vma->vm_pgoff << PAGE_SHIFT, | ||
147 | MAJOR(dev), MINOR(dev), ino, &len); | ||
148 | |||
149 | if (file) { | ||
150 | len = 25 + sizeof(void *) * 6 - len; | ||
151 | if (len < 1) | ||
152 | len = 1; | ||
153 | seq_printf(m, "%*c", len, ' '); | ||
154 | seq_path(m, &file->f_path, ""); | ||
155 | } | ||
156 | |||
157 | seq_putc(m, '\n'); | ||
158 | return 0; | ||
159 | } | ||
160 | |||
161 | /* | ||
108 | * display mapping lines for a particular process's /proc/pid/maps | 162 | * display mapping lines for a particular process's /proc/pid/maps |
109 | */ | 163 | */ |
110 | static int show_map(struct seq_file *m, void *_vml) | 164 | static int show_map(struct seq_file *m, void *_p) |
111 | { | 165 | { |
112 | struct vm_list_struct *vml = _vml; | 166 | struct rb_node *p = _p; |
113 | 167 | ||
114 | return nommu_vma_show(m, vml->vma); | 168 | return nommu_vma_show(m, rb_entry(p, struct vm_area_struct, vm_rb)); |
115 | } | 169 | } |
116 | 170 | ||
117 | static void *m_start(struct seq_file *m, loff_t *pos) | 171 | static void *m_start(struct seq_file *m, loff_t *pos) |
118 | { | 172 | { |
119 | struct proc_maps_private *priv = m->private; | 173 | struct proc_maps_private *priv = m->private; |
120 | struct vm_list_struct *vml; | ||
121 | struct mm_struct *mm; | 174 | struct mm_struct *mm; |
175 | struct rb_node *p; | ||
122 | loff_t n = *pos; | 176 | loff_t n = *pos; |
123 | 177 | ||
124 | /* pin the task and mm whilst we play with them */ | 178 | /* pin the task and mm whilst we play with them */ |
@@ -134,9 +188,9 @@ static void *m_start(struct seq_file *m, loff_t *pos) | |||
134 | } | 188 | } |
135 | 189 | ||
136 | /* start from the Nth VMA */ | 190 | /* start from the Nth VMA */ |
137 | for (vml = mm->context.vmlist; vml; vml = vml->next) | 191 | for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) |
138 | if (n-- == 0) | 192 | if (n-- == 0) |
139 | return vml; | 193 | return p; |
140 | return NULL; | 194 | return NULL; |
141 | } | 195 | } |
142 | 196 | ||
@@ -152,12 +206,12 @@ static void m_stop(struct seq_file *m, void *_vml) | |||
152 | } | 206 | } |
153 | } | 207 | } |
154 | 208 | ||
155 | static void *m_next(struct seq_file *m, void *_vml, loff_t *pos) | 209 | static void *m_next(struct seq_file *m, void *_p, loff_t *pos) |
156 | { | 210 | { |
157 | struct vm_list_struct *vml = _vml; | 211 | struct rb_node *p = _p; |
158 | 212 | ||
159 | (*pos)++; | 213 | (*pos)++; |
160 | return vml ? vml->next : NULL; | 214 | return p ? rb_next(p) : NULL; |
161 | } | 215 | } |
162 | 216 | ||
163 | static const struct seq_operations proc_pid_maps_ops = { | 217 | static const struct seq_operations proc_pid_maps_ops = { |
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c index 76acdbc34611..b9b567a28376 100644 --- a/fs/ramfs/file-nommu.c +++ b/fs/ramfs/file-nommu.c | |||
@@ -262,11 +262,11 @@ unsigned long ramfs_nommu_get_unmapped_area(struct file *file, | |||
262 | ret = -ENOMEM; | 262 | ret = -ENOMEM; |
263 | pages = kzalloc(lpages * sizeof(struct page *), GFP_KERNEL); | 263 | pages = kzalloc(lpages * sizeof(struct page *), GFP_KERNEL); |
264 | if (!pages) | 264 | if (!pages) |
265 | goto out; | 265 | goto out_free; |
266 | 266 | ||
267 | nr = find_get_pages(inode->i_mapping, pgoff, lpages, pages); | 267 | nr = find_get_pages(inode->i_mapping, pgoff, lpages, pages); |
268 | if (nr != lpages) | 268 | if (nr != lpages) |
269 | goto out; /* leave if some pages were missing */ | 269 | goto out_free_pages; /* leave if some pages were missing */ |
270 | 270 | ||
271 | /* check the pages for physical adjacency */ | 271 | /* check the pages for physical adjacency */ |
272 | ptr = pages; | 272 | ptr = pages; |
@@ -274,19 +274,18 @@ unsigned long ramfs_nommu_get_unmapped_area(struct file *file, | |||
274 | page++; | 274 | page++; |
275 | for (loop = lpages; loop > 1; loop--) | 275 | for (loop = lpages; loop > 1; loop--) |
276 | if (*ptr++ != page++) | 276 | if (*ptr++ != page++) |
277 | goto out; | 277 | goto out_free_pages; |
278 | 278 | ||
279 | /* okay - all conditions fulfilled */ | 279 | /* okay - all conditions fulfilled */ |
280 | ret = (unsigned long) page_address(pages[0]); | 280 | ret = (unsigned long) page_address(pages[0]); |
281 | 281 | ||
282 | out: | 282 | out_free_pages: |
283 | if (pages) { | 283 | ptr = pages; |
284 | ptr = pages; | 284 | for (loop = nr; loop > 0; loop--) |
285 | for (loop = lpages; loop > 0; loop--) | 285 | put_page(*ptr++); |
286 | put_page(*ptr++); | 286 | out_free: |
287 | kfree(pages); | 287 | kfree(pages); |
288 | } | 288 | out: |
289 | |||
290 | return ret; | 289 | return ret; |
291 | } | 290 | } |
292 | 291 | ||
diff --git a/fs/squashfs/Makefile b/fs/squashfs/Makefile new file mode 100644 index 000000000000..8258cf9a0317 --- /dev/null +++ b/fs/squashfs/Makefile | |||
@@ -0,0 +1,8 @@ | |||
1 | # | ||
2 | # Makefile for the linux squashfs routines. | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_SQUASHFS) += squashfs.o | ||
6 | squashfs-y += block.o cache.o dir.o export.o file.o fragment.o id.o inode.o | ||
7 | squashfs-y += namei.o super.o symlink.o | ||
8 | #squashfs-y += squashfs2_0.o | ||
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c new file mode 100644 index 000000000000..c837dfc2b3c6 --- /dev/null +++ b/fs/squashfs/block.c | |||
@@ -0,0 +1,274 @@ | |||
1 | /* | ||
2 | * Squashfs - a compressed read only filesystem for Linux | ||
3 | * | ||
4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 | ||
5 | * Phillip Lougher <phillip@lougher.demon.co.uk> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version 2, | ||
10 | * or (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
20 | * | ||
21 | * block.c | ||
22 | */ | ||
23 | |||
24 | /* | ||
25 | * This file implements the low-level routines to read and decompress | ||
26 | * datablocks and metadata blocks. | ||
27 | */ | ||
28 | |||
29 | #include <linux/fs.h> | ||
30 | #include <linux/vfs.h> | ||
31 | #include <linux/slab.h> | ||
32 | #include <linux/mutex.h> | ||
33 | #include <linux/string.h> | ||
34 | #include <linux/buffer_head.h> | ||
35 | #include <linux/zlib.h> | ||
36 | |||
37 | #include "squashfs_fs.h" | ||
38 | #include "squashfs_fs_sb.h" | ||
39 | #include "squashfs_fs_i.h" | ||
40 | #include "squashfs.h" | ||
41 | |||
42 | /* | ||
43 | * Read the metadata block length, this is stored in the first two | ||
44 | * bytes of the metadata block. | ||
45 | */ | ||
46 | static struct buffer_head *get_block_length(struct super_block *sb, | ||
47 | u64 *cur_index, int *offset, int *length) | ||
48 | { | ||
49 | struct squashfs_sb_info *msblk = sb->s_fs_info; | ||
50 | struct buffer_head *bh; | ||
51 | |||
52 | bh = sb_bread(sb, *cur_index); | ||
53 | if (bh == NULL) | ||
54 | return NULL; | ||
55 | |||
56 | if (msblk->devblksize - *offset == 1) { | ||
57 | *length = (unsigned char) bh->b_data[*offset]; | ||
58 | put_bh(bh); | ||
59 | bh = sb_bread(sb, ++(*cur_index)); | ||
60 | if (bh == NULL) | ||
61 | return NULL; | ||
62 | *length |= (unsigned char) bh->b_data[0] << 8; | ||
63 | *offset = 1; | ||
64 | } else { | ||
65 | *length = (unsigned char) bh->b_data[*offset] | | ||
66 | (unsigned char) bh->b_data[*offset + 1] << 8; | ||
67 | *offset += 2; | ||
68 | } | ||
69 | |||
70 | return bh; | ||
71 | } | ||
72 | |||
73 | |||
74 | /* | ||
75 | * Read and decompress a metadata block or datablock. Length is non-zero | ||
76 | * if a datablock is being read (the size is stored elsewhere in the | ||
77 | * filesystem), otherwise the length is obtained from the first two bytes of | ||
78 | * the metadata block. A bit in the length field indicates if the block | ||
79 | * is stored uncompressed in the filesystem (usually because compression | ||
80 | * generated a larger block - this does occasionally happen with zlib). | ||
81 | */ | ||
82 | int squashfs_read_data(struct super_block *sb, void **buffer, u64 index, | ||
83 | int length, u64 *next_index, int srclength) | ||
84 | { | ||
85 | struct squashfs_sb_info *msblk = sb->s_fs_info; | ||
86 | struct buffer_head **bh; | ||
87 | int offset = index & ((1 << msblk->devblksize_log2) - 1); | ||
88 | u64 cur_index = index >> msblk->devblksize_log2; | ||
89 | int bytes, compressed, b = 0, k = 0, page = 0, avail; | ||
90 | |||
91 | |||
92 | bh = kcalloc((msblk->block_size >> msblk->devblksize_log2) + 1, | ||
93 | sizeof(*bh), GFP_KERNEL); | ||
94 | if (bh == NULL) | ||
95 | return -ENOMEM; | ||
96 | |||
97 | if (length) { | ||
98 | /* | ||
99 | * Datablock. | ||
100 | */ | ||
101 | bytes = -offset; | ||
102 | compressed = SQUASHFS_COMPRESSED_BLOCK(length); | ||
103 | length = SQUASHFS_COMPRESSED_SIZE_BLOCK(length); | ||
104 | if (next_index) | ||
105 | *next_index = index + length; | ||
106 | |||
107 | TRACE("Block @ 0x%llx, %scompressed size %d, src size %d\n", | ||
108 | index, compressed ? "" : "un", length, srclength); | ||
109 | |||
110 | if (length < 0 || length > srclength || | ||
111 | (index + length) > msblk->bytes_used) | ||
112 | goto read_failure; | ||
113 | |||
114 | for (b = 0; bytes < length; b++, cur_index++) { | ||
115 | bh[b] = sb_getblk(sb, cur_index); | ||
116 | if (bh[b] == NULL) | ||
117 | goto block_release; | ||
118 | bytes += msblk->devblksize; | ||
119 | } | ||
120 | ll_rw_block(READ, b, bh); | ||
121 | } else { | ||
122 | /* | ||
123 | * Metadata block. | ||
124 | */ | ||
125 | if ((index + 2) > msblk->bytes_used) | ||
126 | goto read_failure; | ||
127 | |||
128 | bh[0] = get_block_length(sb, &cur_index, &offset, &length); | ||
129 | if (bh[0] == NULL) | ||
130 | goto read_failure; | ||
131 | b = 1; | ||
132 | |||
133 | bytes = msblk->devblksize - offset; | ||
134 | compressed = SQUASHFS_COMPRESSED(length); | ||
135 | length = SQUASHFS_COMPRESSED_SIZE(length); | ||
136 | if (next_index) | ||
137 | *next_index = index + length + 2; | ||
138 | |||
139 | TRACE("Block @ 0x%llx, %scompressed size %d\n", index, | ||
140 | compressed ? "" : "un", length); | ||
141 | |||
142 | if (length < 0 || length > srclength || | ||
143 | (index + length) > msblk->bytes_used) | ||
144 | goto block_release; | ||
145 | |||
146 | for (; bytes < length; b++) { | ||
147 | bh[b] = sb_getblk(sb, ++cur_index); | ||
148 | if (bh[b] == NULL) | ||
149 | goto block_release; | ||
150 | bytes += msblk->devblksize; | ||
151 | } | ||
152 | ll_rw_block(READ, b - 1, bh + 1); | ||
153 | } | ||
154 | |||
155 | if (compressed) { | ||
156 | int zlib_err = 0, zlib_init = 0; | ||
157 | |||
158 | /* | ||
159 | * Uncompress block. | ||
160 | */ | ||
161 | |||
162 | mutex_lock(&msblk->read_data_mutex); | ||
163 | |||
164 | msblk->stream.avail_out = 0; | ||
165 | msblk->stream.avail_in = 0; | ||
166 | |||
167 | bytes = length; | ||
168 | do { | ||
169 | if (msblk->stream.avail_in == 0 && k < b) { | ||
170 | avail = min(bytes, msblk->devblksize - offset); | ||
171 | bytes -= avail; | ||
172 | wait_on_buffer(bh[k]); | ||
173 | if (!buffer_uptodate(bh[k])) | ||
174 | goto release_mutex; | ||
175 | |||
176 | if (avail == 0) { | ||
177 | offset = 0; | ||
178 | put_bh(bh[k++]); | ||
179 | continue; | ||
180 | } | ||
181 | |||
182 | msblk->stream.next_in = bh[k]->b_data + offset; | ||
183 | msblk->stream.avail_in = avail; | ||
184 | offset = 0; | ||
185 | } | ||
186 | |||
187 | if (msblk->stream.avail_out == 0) { | ||
188 | msblk->stream.next_out = buffer[page++]; | ||
189 | msblk->stream.avail_out = PAGE_CACHE_SIZE; | ||
190 | } | ||
191 | |||
192 | if (!zlib_init) { | ||
193 | zlib_err = zlib_inflateInit(&msblk->stream); | ||
194 | if (zlib_err != Z_OK) { | ||
195 | ERROR("zlib_inflateInit returned" | ||
196 | " unexpected result 0x%x," | ||
197 | " srclength %d\n", zlib_err, | ||
198 | srclength); | ||
199 | goto release_mutex; | ||
200 | } | ||
201 | zlib_init = 1; | ||
202 | } | ||
203 | |||
204 | zlib_err = zlib_inflate(&msblk->stream, Z_NO_FLUSH); | ||
205 | |||
206 | if (msblk->stream.avail_in == 0 && k < b) | ||
207 | put_bh(bh[k++]); | ||
208 | } while (zlib_err == Z_OK); | ||
209 | |||
210 | if (zlib_err != Z_STREAM_END) { | ||
211 | ERROR("zlib_inflate returned unexpected result" | ||
212 | " 0x%x, srclength %d, avail_in %d," | ||
213 | " avail_out %d\n", zlib_err, srclength, | ||
214 | msblk->stream.avail_in, | ||
215 | msblk->stream.avail_out); | ||
216 | goto release_mutex; | ||
217 | } | ||
218 | |||
219 | zlib_err = zlib_inflateEnd(&msblk->stream); | ||
220 | if (zlib_err != Z_OK) { | ||
221 | ERROR("zlib_inflateEnd returned unexpected result 0x%x," | ||
222 | " srclength %d\n", zlib_err, srclength); | ||
223 | goto release_mutex; | ||
224 | } | ||
225 | length = msblk->stream.total_out; | ||
226 | mutex_unlock(&msblk->read_data_mutex); | ||
227 | } else { | ||
228 | /* | ||
229 | * Block is uncompressed. | ||
230 | */ | ||
231 | int i, in, pg_offset = 0; | ||
232 | |||
233 | for (i = 0; i < b; i++) { | ||
234 | wait_on_buffer(bh[i]); | ||
235 | if (!buffer_uptodate(bh[i])) | ||
236 | goto block_release; | ||
237 | } | ||
238 | |||
239 | for (bytes = length; k < b; k++) { | ||
240 | in = min(bytes, msblk->devblksize - offset); | ||
241 | bytes -= in; | ||
242 | while (in) { | ||
243 | if (pg_offset == PAGE_CACHE_SIZE) { | ||
244 | page++; | ||
245 | pg_offset = 0; | ||
246 | } | ||
247 | avail = min_t(int, in, PAGE_CACHE_SIZE - | ||
248 | pg_offset); | ||
249 | memcpy(buffer[page] + pg_offset, | ||
250 | bh[k]->b_data + offset, avail); | ||
251 | in -= avail; | ||
252 | pg_offset += avail; | ||
253 | offset += avail; | ||
254 | } | ||
255 | offset = 0; | ||
256 | put_bh(bh[k]); | ||
257 | } | ||
258 | } | ||
259 | |||
260 | kfree(bh); | ||
261 | return length; | ||
262 | |||
263 | release_mutex: | ||
264 | mutex_unlock(&msblk->read_data_mutex); | ||
265 | |||
266 | block_release: | ||
267 | for (; k < b; k++) | ||
268 | put_bh(bh[k]); | ||
269 | |||
270 | read_failure: | ||
271 | ERROR("sb_bread failed reading block 0x%llx\n", cur_index); | ||
272 | kfree(bh); | ||
273 | return -EIO; | ||
274 | } | ||
diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c new file mode 100644 index 000000000000..f29eda16d25e --- /dev/null +++ b/fs/squashfs/cache.c | |||
@@ -0,0 +1,412 @@ | |||
1 | /* | ||
2 | * Squashfs - a compressed read only filesystem for Linux | ||
3 | * | ||
4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 | ||
5 | * Phillip Lougher <phillip@lougher.demon.co.uk> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version 2, | ||
10 | * or (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
20 | * | ||
21 | * cache.c | ||
22 | */ | ||
23 | |||
24 | /* | ||
25 | * Blocks in Squashfs are compressed. To avoid repeatedly decompressing | ||
26 | * recently accessed data Squashfs uses two small metadata and fragment caches. | ||
27 | * | ||
28 | * This file implements a generic cache implementation used for both caches, | ||
29 | * plus functions layered ontop of the generic cache implementation to | ||
30 | * access the metadata and fragment caches. | ||
31 | * | ||
32 | * To avoid out of memory and fragmentation isssues with vmalloc the cache | ||
33 | * uses sequences of kmalloced PAGE_CACHE_SIZE buffers. | ||
34 | * | ||
35 | * It should be noted that the cache is not used for file datablocks, these | ||
36 | * are decompressed and cached in the page-cache in the normal way. The | ||
37 | * cache is only used to temporarily cache fragment and metadata blocks | ||
38 | * which have been read as as a result of a metadata (i.e. inode or | ||
39 | * directory) or fragment access. Because metadata and fragments are packed | ||
40 | * together into blocks (to gain greater compression) the read of a particular | ||
41 | * piece of metadata or fragment will retrieve other metadata/fragments which | ||
42 | * have been packed with it, these because of locality-of-reference may be read | ||
43 | * in the near future. Temporarily caching them ensures they are available for | ||
44 | * near future access without requiring an additional read and decompress. | ||
45 | */ | ||
46 | |||
47 | #include <linux/fs.h> | ||
48 | #include <linux/vfs.h> | ||
49 | #include <linux/slab.h> | ||
50 | #include <linux/vmalloc.h> | ||
51 | #include <linux/sched.h> | ||
52 | #include <linux/spinlock.h> | ||
53 | #include <linux/wait.h> | ||
54 | #include <linux/zlib.h> | ||
55 | #include <linux/pagemap.h> | ||
56 | |||
57 | #include "squashfs_fs.h" | ||
58 | #include "squashfs_fs_sb.h" | ||
59 | #include "squashfs_fs_i.h" | ||
60 | #include "squashfs.h" | ||
61 | |||
62 | /* | ||
63 | * Look-up block in cache, and increment usage count. If not in cache, read | ||
64 | * and decompress it from disk. | ||
65 | */ | ||
66 | struct squashfs_cache_entry *squashfs_cache_get(struct super_block *sb, | ||
67 | struct squashfs_cache *cache, u64 block, int length) | ||
68 | { | ||
69 | int i, n; | ||
70 | struct squashfs_cache_entry *entry; | ||
71 | |||
72 | spin_lock(&cache->lock); | ||
73 | |||
74 | while (1) { | ||
75 | for (i = 0; i < cache->entries; i++) | ||
76 | if (cache->entry[i].block == block) | ||
77 | break; | ||
78 | |||
79 | if (i == cache->entries) { | ||
80 | /* | ||
81 | * Block not in cache, if all cache entries are used | ||
82 | * go to sleep waiting for one to become available. | ||
83 | */ | ||
84 | if (cache->unused == 0) { | ||
85 | cache->num_waiters++; | ||
86 | spin_unlock(&cache->lock); | ||
87 | wait_event(cache->wait_queue, cache->unused); | ||
88 | spin_lock(&cache->lock); | ||
89 | cache->num_waiters--; | ||
90 | continue; | ||
91 | } | ||
92 | |||
93 | /* | ||
94 | * At least one unused cache entry. A simple | ||
95 | * round-robin strategy is used to choose the entry to | ||
96 | * be evicted from the cache. | ||
97 | */ | ||
98 | i = cache->next_blk; | ||
99 | for (n = 0; n < cache->entries; n++) { | ||
100 | if (cache->entry[i].refcount == 0) | ||
101 | break; | ||
102 | i = (i + 1) % cache->entries; | ||
103 | } | ||
104 | |||
105 | cache->next_blk = (i + 1) % cache->entries; | ||
106 | entry = &cache->entry[i]; | ||
107 | |||
108 | /* | ||
109 | * Initialise choosen cache entry, and fill it in from | ||
110 | * disk. | ||
111 | */ | ||
112 | cache->unused--; | ||
113 | entry->block = block; | ||
114 | entry->refcount = 1; | ||
115 | entry->pending = 1; | ||
116 | entry->num_waiters = 0; | ||
117 | entry->error = 0; | ||
118 | spin_unlock(&cache->lock); | ||
119 | |||
120 | entry->length = squashfs_read_data(sb, entry->data, | ||
121 | block, length, &entry->next_index, | ||
122 | cache->block_size); | ||
123 | |||
124 | spin_lock(&cache->lock); | ||
125 | |||
126 | if (entry->length < 0) | ||
127 | entry->error = entry->length; | ||
128 | |||
129 | entry->pending = 0; | ||
130 | |||
131 | /* | ||
132 | * While filling this entry one or more other processes | ||
133 | * have looked it up in the cache, and have slept | ||
134 | * waiting for it to become available. | ||
135 | */ | ||
136 | if (entry->num_waiters) { | ||
137 | spin_unlock(&cache->lock); | ||
138 | wake_up_all(&entry->wait_queue); | ||
139 | } else | ||
140 | spin_unlock(&cache->lock); | ||
141 | |||
142 | goto out; | ||
143 | } | ||
144 | |||
145 | /* | ||
146 | * Block already in cache. Increment refcount so it doesn't | ||
147 | * get reused until we're finished with it, if it was | ||
148 | * previously unused there's one less cache entry available | ||
149 | * for reuse. | ||
150 | */ | ||
151 | entry = &cache->entry[i]; | ||
152 | if (entry->refcount == 0) | ||
153 | cache->unused--; | ||
154 | entry->refcount++; | ||
155 | |||
156 | /* | ||
157 | * If the entry is currently being filled in by another process | ||
158 | * go to sleep waiting for it to become available. | ||
159 | */ | ||
160 | if (entry->pending) { | ||
161 | entry->num_waiters++; | ||
162 | spin_unlock(&cache->lock); | ||
163 | wait_event(entry->wait_queue, !entry->pending); | ||
164 | } else | ||
165 | spin_unlock(&cache->lock); | ||
166 | |||
167 | goto out; | ||
168 | } | ||
169 | |||
170 | out: | ||
171 | TRACE("Got %s %d, start block %lld, refcount %d, error %d\n", | ||
172 | cache->name, i, entry->block, entry->refcount, entry->error); | ||
173 | |||
174 | if (entry->error) | ||
175 | ERROR("Unable to read %s cache entry [%llx]\n", cache->name, | ||
176 | block); | ||
177 | return entry; | ||
178 | } | ||
179 | |||
180 | |||
181 | /* | ||
182 | * Release cache entry, once usage count is zero it can be reused. | ||
183 | */ | ||
184 | void squashfs_cache_put(struct squashfs_cache_entry *entry) | ||
185 | { | ||
186 | struct squashfs_cache *cache = entry->cache; | ||
187 | |||
188 | spin_lock(&cache->lock); | ||
189 | entry->refcount--; | ||
190 | if (entry->refcount == 0) { | ||
191 | cache->unused++; | ||
192 | /* | ||
193 | * If there's any processes waiting for a block to become | ||
194 | * available, wake one up. | ||
195 | */ | ||
196 | if (cache->num_waiters) { | ||
197 | spin_unlock(&cache->lock); | ||
198 | wake_up(&cache->wait_queue); | ||
199 | return; | ||
200 | } | ||
201 | } | ||
202 | spin_unlock(&cache->lock); | ||
203 | } | ||
204 | |||
205 | /* | ||
206 | * Delete cache reclaiming all kmalloced buffers. | ||
207 | */ | ||
208 | void squashfs_cache_delete(struct squashfs_cache *cache) | ||
209 | { | ||
210 | int i, j; | ||
211 | |||
212 | if (cache == NULL) | ||
213 | return; | ||
214 | |||
215 | for (i = 0; i < cache->entries; i++) { | ||
216 | if (cache->entry[i].data) { | ||
217 | for (j = 0; j < cache->pages; j++) | ||
218 | kfree(cache->entry[i].data[j]); | ||
219 | kfree(cache->entry[i].data); | ||
220 | } | ||
221 | } | ||
222 | |||
223 | kfree(cache->entry); | ||
224 | kfree(cache); | ||
225 | } | ||
226 | |||
227 | |||
228 | /* | ||
229 | * Initialise cache allocating the specified number of entries, each of | ||
230 | * size block_size. To avoid vmalloc fragmentation issues each entry | ||
231 | * is allocated as a sequence of kmalloced PAGE_CACHE_SIZE buffers. | ||
232 | */ | ||
233 | struct squashfs_cache *squashfs_cache_init(char *name, int entries, | ||
234 | int block_size) | ||
235 | { | ||
236 | int i, j; | ||
237 | struct squashfs_cache *cache = kzalloc(sizeof(*cache), GFP_KERNEL); | ||
238 | |||
239 | if (cache == NULL) { | ||
240 | ERROR("Failed to allocate %s cache\n", name); | ||
241 | return NULL; | ||
242 | } | ||
243 | |||
244 | cache->entry = kcalloc(entries, sizeof(*(cache->entry)), GFP_KERNEL); | ||
245 | if (cache->entry == NULL) { | ||
246 | ERROR("Failed to allocate %s cache\n", name); | ||
247 | goto cleanup; | ||
248 | } | ||
249 | |||
250 | cache->next_blk = 0; | ||
251 | cache->unused = entries; | ||
252 | cache->entries = entries; | ||
253 | cache->block_size = block_size; | ||
254 | cache->pages = block_size >> PAGE_CACHE_SHIFT; | ||
255 | cache->name = name; | ||
256 | cache->num_waiters = 0; | ||
257 | spin_lock_init(&cache->lock); | ||
258 | init_waitqueue_head(&cache->wait_queue); | ||
259 | |||
260 | for (i = 0; i < entries; i++) { | ||
261 | struct squashfs_cache_entry *entry = &cache->entry[i]; | ||
262 | |||
263 | init_waitqueue_head(&cache->entry[i].wait_queue); | ||
264 | entry->cache = cache; | ||
265 | entry->block = SQUASHFS_INVALID_BLK; | ||
266 | entry->data = kcalloc(cache->pages, sizeof(void *), GFP_KERNEL); | ||
267 | if (entry->data == NULL) { | ||
268 | ERROR("Failed to allocate %s cache entry\n", name); | ||
269 | goto cleanup; | ||
270 | } | ||
271 | |||
272 | for (j = 0; j < cache->pages; j++) { | ||
273 | entry->data[j] = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL); | ||
274 | if (entry->data[j] == NULL) { | ||
275 | ERROR("Failed to allocate %s buffer\n", name); | ||
276 | goto cleanup; | ||
277 | } | ||
278 | } | ||
279 | } | ||
280 | |||
281 | return cache; | ||
282 | |||
283 | cleanup: | ||
284 | squashfs_cache_delete(cache); | ||
285 | return NULL; | ||
286 | } | ||
287 | |||
288 | |||
289 | /* | ||
290 | * Copy upto length bytes from cache entry to buffer starting at offset bytes | ||
291 | * into the cache entry. If there's not length bytes then copy the number of | ||
292 | * bytes available. In all cases return the number of bytes copied. | ||
293 | */ | ||
294 | int squashfs_copy_data(void *buffer, struct squashfs_cache_entry *entry, | ||
295 | int offset, int length) | ||
296 | { | ||
297 | int remaining = length; | ||
298 | |||
299 | if (length == 0) | ||
300 | return 0; | ||
301 | else if (buffer == NULL) | ||
302 | return min(length, entry->length - offset); | ||
303 | |||
304 | while (offset < entry->length) { | ||
305 | void *buff = entry->data[offset / PAGE_CACHE_SIZE] | ||
306 | + (offset % PAGE_CACHE_SIZE); | ||
307 | int bytes = min_t(int, entry->length - offset, | ||
308 | PAGE_CACHE_SIZE - (offset % PAGE_CACHE_SIZE)); | ||
309 | |||
310 | if (bytes >= remaining) { | ||
311 | memcpy(buffer, buff, remaining); | ||
312 | remaining = 0; | ||
313 | break; | ||
314 | } | ||
315 | |||
316 | memcpy(buffer, buff, bytes); | ||
317 | buffer += bytes; | ||
318 | remaining -= bytes; | ||
319 | offset += bytes; | ||
320 | } | ||
321 | |||
322 | return length - remaining; | ||
323 | } | ||
324 | |||
325 | |||
326 | /* | ||
327 | * Read length bytes from metadata position <block, offset> (block is the | ||
328 | * start of the compressed block on disk, and offset is the offset into | ||
329 | * the block once decompressed). Data is packed into consecutive blocks, | ||
330 | * and length bytes may require reading more than one block. | ||
331 | */ | ||
332 | int squashfs_read_metadata(struct super_block *sb, void *buffer, | ||
333 | u64 *block, int *offset, int length) | ||
334 | { | ||
335 | struct squashfs_sb_info *msblk = sb->s_fs_info; | ||
336 | int bytes, copied = length; | ||
337 | struct squashfs_cache_entry *entry; | ||
338 | |||
339 | TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset); | ||
340 | |||
341 | while (length) { | ||
342 | entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0); | ||
343 | if (entry->error) | ||
344 | return entry->error; | ||
345 | else if (*offset >= entry->length) | ||
346 | return -EIO; | ||
347 | |||
348 | bytes = squashfs_copy_data(buffer, entry, *offset, length); | ||
349 | if (buffer) | ||
350 | buffer += bytes; | ||
351 | length -= bytes; | ||
352 | *offset += bytes; | ||
353 | |||
354 | if (*offset == entry->length) { | ||
355 | *block = entry->next_index; | ||
356 | *offset = 0; | ||
357 | } | ||
358 | |||
359 | squashfs_cache_put(entry); | ||
360 | } | ||
361 | |||
362 | return copied; | ||
363 | } | ||
364 | |||
365 | |||
366 | /* | ||
367 | * Look-up in the fragmment cache the fragment located at <start_block> in the | ||
368 | * filesystem. If necessary read and decompress it from disk. | ||
369 | */ | ||
370 | struct squashfs_cache_entry *squashfs_get_fragment(struct super_block *sb, | ||
371 | u64 start_block, int length) | ||
372 | { | ||
373 | struct squashfs_sb_info *msblk = sb->s_fs_info; | ||
374 | |||
375 | return squashfs_cache_get(sb, msblk->fragment_cache, start_block, | ||
376 | length); | ||
377 | } | ||
378 | |||
379 | |||
380 | /* | ||
381 | * Read and decompress the datablock located at <start_block> in the | ||
382 | * filesystem. The cache is used here to avoid duplicating locking and | ||
383 | * read/decompress code. | ||
384 | */ | ||
385 | struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *sb, | ||
386 | u64 start_block, int length) | ||
387 | { | ||
388 | struct squashfs_sb_info *msblk = sb->s_fs_info; | ||
389 | |||
390 | return squashfs_cache_get(sb, msblk->read_page, start_block, length); | ||
391 | } | ||
392 | |||
393 | |||
394 | /* | ||
395 | * Read a filesystem table (uncompressed sequence of bytes) from disk | ||
396 | */ | ||
397 | int squashfs_read_table(struct super_block *sb, void *buffer, u64 block, | ||
398 | int length) | ||
399 | { | ||
400 | int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | ||
401 | int i, res; | ||
402 | void **data = kcalloc(pages, sizeof(void *), GFP_KERNEL); | ||
403 | if (data == NULL) | ||
404 | return -ENOMEM; | ||
405 | |||
406 | for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE) | ||
407 | data[i] = buffer; | ||
408 | res = squashfs_read_data(sb, data, block, length | | ||
409 | SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length); | ||
410 | kfree(data); | ||
411 | return res; | ||
412 | } | ||
diff --git a/fs/squashfs/dir.c b/fs/squashfs/dir.c new file mode 100644 index 000000000000..566b0eaed868 --- /dev/null +++ b/fs/squashfs/dir.c | |||
@@ -0,0 +1,235 @@ | |||
1 | /* | ||
2 | * Squashfs - a compressed read only filesystem for Linux | ||
3 | * | ||
4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 | ||
5 | * Phillip Lougher <phillip@lougher.demon.co.uk> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version 2, | ||
10 | * or (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
20 | * | ||
21 | * dir.c | ||
22 | */ | ||
23 | |||
24 | /* | ||
25 | * This file implements code to read directories from disk. | ||
26 | * | ||
27 | * See namei.c for a description of directory organisation on disk. | ||
28 | */ | ||
29 | |||
30 | #include <linux/fs.h> | ||
31 | #include <linux/vfs.h> | ||
32 | #include <linux/slab.h> | ||
33 | #include <linux/zlib.h> | ||
34 | |||
35 | #include "squashfs_fs.h" | ||
36 | #include "squashfs_fs_sb.h" | ||
37 | #include "squashfs_fs_i.h" | ||
38 | #include "squashfs.h" | ||
39 | |||
40 | static const unsigned char squashfs_filetype_table[] = { | ||
41 | DT_UNKNOWN, DT_DIR, DT_REG, DT_LNK, DT_BLK, DT_CHR, DT_FIFO, DT_SOCK | ||
42 | }; | ||
43 | |||
44 | /* | ||
45 | * Lookup offset (f_pos) in the directory index, returning the | ||
46 | * metadata block containing it. | ||
47 | * | ||
48 | * If we get an error reading the index then return the part of the index | ||
49 | * (if any) we have managed to read - the index isn't essential, just | ||
50 | * quicker. | ||
51 | */ | ||
52 | static int get_dir_index_using_offset(struct super_block *sb, | ||
53 | u64 *next_block, int *next_offset, u64 index_start, int index_offset, | ||
54 | int i_count, u64 f_pos) | ||
55 | { | ||
56 | struct squashfs_sb_info *msblk = sb->s_fs_info; | ||
57 | int err, i, index, length = 0; | ||
58 | struct squashfs_dir_index dir_index; | ||
59 | |||
60 | TRACE("Entered get_dir_index_using_offset, i_count %d, f_pos %lld\n", | ||
61 | i_count, f_pos); | ||
62 | |||
63 | /* | ||
64 | * Translate from external f_pos to the internal f_pos. This | ||
65 | * is offset by 3 because we invent "." and ".." entries which are | ||
66 | * not actually stored in the directory. | ||
67 | */ | ||
68 | if (f_pos < 3) | ||
69 | return f_pos; | ||
70 | f_pos -= 3; | ||
71 | |||
72 | for (i = 0; i < i_count; i++) { | ||
73 | err = squashfs_read_metadata(sb, &dir_index, &index_start, | ||
74 | &index_offset, sizeof(dir_index)); | ||
75 | if (err < 0) | ||
76 | break; | ||
77 | |||
78 | index = le32_to_cpu(dir_index.index); | ||
79 | if (index > f_pos) | ||
80 | /* | ||
81 | * Found the index we're looking for. | ||
82 | */ | ||
83 | break; | ||
84 | |||
85 | err = squashfs_read_metadata(sb, NULL, &index_start, | ||
86 | &index_offset, le32_to_cpu(dir_index.size) + 1); | ||
87 | if (err < 0) | ||
88 | break; | ||
89 | |||
90 | length = index; | ||
91 | *next_block = le32_to_cpu(dir_index.start_block) + | ||
92 | msblk->directory_table; | ||
93 | } | ||
94 | |||
95 | *next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE; | ||
96 | |||
97 | /* | ||
98 | * Translate back from internal f_pos to external f_pos. | ||
99 | */ | ||
100 | return length + 3; | ||
101 | } | ||
102 | |||
103 | |||
104 | static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir) | ||
105 | { | ||
106 | struct inode *inode = file->f_dentry->d_inode; | ||
107 | struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; | ||
108 | u64 block = squashfs_i(inode)->start + msblk->directory_table; | ||
109 | int offset = squashfs_i(inode)->offset, length = 0, dir_count, size, | ||
110 | type, err; | ||
111 | unsigned int inode_number; | ||
112 | struct squashfs_dir_header dirh; | ||
113 | struct squashfs_dir_entry *dire; | ||
114 | |||
115 | TRACE("Entered squashfs_readdir [%llx:%x]\n", block, offset); | ||
116 | |||
117 | dire = kmalloc(sizeof(*dire) + SQUASHFS_NAME_LEN + 1, GFP_KERNEL); | ||
118 | if (dire == NULL) { | ||
119 | ERROR("Failed to allocate squashfs_dir_entry\n"); | ||
120 | goto finish; | ||
121 | } | ||
122 | |||
123 | /* | ||
124 | * Return "." and ".." entries as the first two filenames in the | ||
125 | * directory. To maximise compression these two entries are not | ||
126 | * stored in the directory, and so we invent them here. | ||
127 | * | ||
128 | * It also means that the external f_pos is offset by 3 from the | ||
129 | * on-disk directory f_pos. | ||
130 | */ | ||
131 | while (file->f_pos < 3) { | ||
132 | char *name; | ||
133 | int i_ino; | ||
134 | |||
135 | if (file->f_pos == 0) { | ||
136 | name = "."; | ||
137 | size = 1; | ||
138 | i_ino = inode->i_ino; | ||
139 | } else { | ||
140 | name = ".."; | ||
141 | size = 2; | ||
142 | i_ino = squashfs_i(inode)->parent; | ||
143 | } | ||
144 | |||
145 | TRACE("Calling filldir(%p, %s, %d, %lld, %d, %d)\n", | ||
146 | dirent, name, size, file->f_pos, i_ino, | ||
147 | squashfs_filetype_table[1]); | ||
148 | |||
149 | if (filldir(dirent, name, size, file->f_pos, i_ino, | ||
150 | squashfs_filetype_table[1]) < 0) { | ||
151 | TRACE("Filldir returned less than 0\n"); | ||
152 | goto finish; | ||
153 | } | ||
154 | |||
155 | file->f_pos += size; | ||
156 | } | ||
157 | |||
158 | length = get_dir_index_using_offset(inode->i_sb, &block, &offset, | ||
159 | squashfs_i(inode)->dir_idx_start, | ||
160 | squashfs_i(inode)->dir_idx_offset, | ||
161 | squashfs_i(inode)->dir_idx_cnt, | ||
162 | file->f_pos); | ||
163 | |||
164 | while (length < i_size_read(inode)) { | ||
165 | /* | ||
166 | * Read directory header | ||
167 | */ | ||
168 | err = squashfs_read_metadata(inode->i_sb, &dirh, &block, | ||
169 | &offset, sizeof(dirh)); | ||
170 | if (err < 0) | ||
171 | goto failed_read; | ||
172 | |||
173 | length += sizeof(dirh); | ||
174 | |||
175 | dir_count = le32_to_cpu(dirh.count) + 1; | ||
176 | while (dir_count--) { | ||
177 | /* | ||
178 | * Read directory entry. | ||
179 | */ | ||
180 | err = squashfs_read_metadata(inode->i_sb, dire, &block, | ||
181 | &offset, sizeof(*dire)); | ||
182 | if (err < 0) | ||
183 | goto failed_read; | ||
184 | |||
185 | size = le16_to_cpu(dire->size) + 1; | ||
186 | |||
187 | err = squashfs_read_metadata(inode->i_sb, dire->name, | ||
188 | &block, &offset, size); | ||
189 | if (err < 0) | ||
190 | goto failed_read; | ||
191 | |||
192 | length += sizeof(*dire) + size; | ||
193 | |||
194 | if (file->f_pos >= length) | ||
195 | continue; | ||
196 | |||
197 | dire->name[size] = '\0'; | ||
198 | inode_number = le32_to_cpu(dirh.inode_number) + | ||
199 | ((short) le16_to_cpu(dire->inode_number)); | ||
200 | type = le16_to_cpu(dire->type); | ||
201 | |||
202 | TRACE("Calling filldir(%p, %s, %d, %lld, %x:%x, %d, %d)" | ||
203 | "\n", dirent, dire->name, size, | ||
204 | file->f_pos, | ||
205 | le32_to_cpu(dirh.start_block), | ||
206 | le16_to_cpu(dire->offset), | ||
207 | inode_number, | ||
208 | squashfs_filetype_table[type]); | ||
209 | |||
210 | if (filldir(dirent, dire->name, size, file->f_pos, | ||
211 | inode_number, | ||
212 | squashfs_filetype_table[type]) < 0) { | ||
213 | TRACE("Filldir returned less than 0\n"); | ||
214 | goto finish; | ||
215 | } | ||
216 | |||
217 | file->f_pos = length; | ||
218 | } | ||
219 | } | ||
220 | |||
221 | finish: | ||
222 | kfree(dire); | ||
223 | return 0; | ||
224 | |||
225 | failed_read: | ||
226 | ERROR("Unable to read directory block [%llx:%x]\n", block, offset); | ||
227 | kfree(dire); | ||
228 | return 0; | ||
229 | } | ||
230 | |||
231 | |||
232 | const struct file_operations squashfs_dir_ops = { | ||
233 | .read = generic_read_dir, | ||
234 | .readdir = squashfs_readdir | ||
235 | }; | ||
diff --git a/fs/squashfs/export.c b/fs/squashfs/export.c new file mode 100644 index 000000000000..69e971d5ddc1 --- /dev/null +++ b/fs/squashfs/export.c | |||
@@ -0,0 +1,155 @@ | |||
1 | /* | ||
2 | * Squashfs - a compressed read only filesystem for Linux | ||
3 | * | ||
4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 | ||
5 | * Phillip Lougher <phillip@lougher.demon.co.uk> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version 2, | ||
10 | * or (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
20 | * | ||
21 | * export.c | ||
22 | */ | ||
23 | |||
24 | /* | ||
25 | * This file implements code to make Squashfs filesystems exportable (NFS etc.) | ||
26 | * | ||
27 | * The export code uses an inode lookup table to map inode numbers passed in | ||
28 | * filehandles to an inode location on disk. This table is stored compressed | ||
29 | * into metadata blocks. A second index table is used to locate these. This | ||
30 | * second index table for speed of access (and because it is small) is read at | ||
31 | * mount time and cached in memory. | ||
32 | * | ||
33 | * The inode lookup table is used only by the export code, inode disk | ||
34 | * locations are directly encoded in directories, enabling direct access | ||
35 | * without an intermediate lookup for all operations except the export ops. | ||
36 | */ | ||
37 | |||
38 | #include <linux/fs.h> | ||
39 | #include <linux/vfs.h> | ||
40 | #include <linux/dcache.h> | ||
41 | #include <linux/exportfs.h> | ||
42 | #include <linux/zlib.h> | ||
43 | |||
44 | #include "squashfs_fs.h" | ||
45 | #include "squashfs_fs_sb.h" | ||
46 | #include "squashfs_fs_i.h" | ||
47 | #include "squashfs.h" | ||
48 | |||
49 | /* | ||
50 | * Look-up inode number (ino) in table, returning the inode location. | ||
51 | */ | ||
52 | static long long squashfs_inode_lookup(struct super_block *sb, int ino_num) | ||
53 | { | ||
54 | struct squashfs_sb_info *msblk = sb->s_fs_info; | ||
55 | int blk = SQUASHFS_LOOKUP_BLOCK(ino_num - 1); | ||
56 | int offset = SQUASHFS_LOOKUP_BLOCK_OFFSET(ino_num - 1); | ||
57 | u64 start = le64_to_cpu(msblk->inode_lookup_table[blk]); | ||
58 | __le64 ino; | ||
59 | int err; | ||
60 | |||
61 | TRACE("Entered squashfs_inode_lookup, inode_number = %d\n", ino_num); | ||
62 | |||
63 | err = squashfs_read_metadata(sb, &ino, &start, &offset, sizeof(ino)); | ||
64 | if (err < 0) | ||
65 | return err; | ||
66 | |||
67 | TRACE("squashfs_inode_lookup, inode = 0x%llx\n", | ||
68 | (u64) le64_to_cpu(ino)); | ||
69 | |||
70 | return le64_to_cpu(ino); | ||
71 | } | ||
72 | |||
73 | |||
74 | static struct dentry *squashfs_export_iget(struct super_block *sb, | ||
75 | unsigned int ino_num) | ||
76 | { | ||
77 | long long ino; | ||
78 | struct dentry *dentry = ERR_PTR(-ENOENT); | ||
79 | |||
80 | TRACE("Entered squashfs_export_iget\n"); | ||
81 | |||
82 | ino = squashfs_inode_lookup(sb, ino_num); | ||
83 | if (ino >= 0) | ||
84 | dentry = d_obtain_alias(squashfs_iget(sb, ino, ino_num)); | ||
85 | |||
86 | return dentry; | ||
87 | } | ||
88 | |||
89 | |||
90 | static struct dentry *squashfs_fh_to_dentry(struct super_block *sb, | ||
91 | struct fid *fid, int fh_len, int fh_type) | ||
92 | { | ||
93 | if ((fh_type != FILEID_INO32_GEN && fh_type != FILEID_INO32_GEN_PARENT) | ||
94 | || fh_len < 2) | ||
95 | return NULL; | ||
96 | |||
97 | return squashfs_export_iget(sb, fid->i32.ino); | ||
98 | } | ||
99 | |||
100 | |||
101 | static struct dentry *squashfs_fh_to_parent(struct super_block *sb, | ||
102 | struct fid *fid, int fh_len, int fh_type) | ||
103 | { | ||
104 | if (fh_type != FILEID_INO32_GEN_PARENT || fh_len < 4) | ||
105 | return NULL; | ||
106 | |||
107 | return squashfs_export_iget(sb, fid->i32.parent_ino); | ||
108 | } | ||
109 | |||
110 | |||
111 | static struct dentry *squashfs_get_parent(struct dentry *child) | ||
112 | { | ||
113 | struct inode *inode = child->d_inode; | ||
114 | unsigned int parent_ino = squashfs_i(inode)->parent; | ||
115 | |||
116 | return squashfs_export_iget(inode->i_sb, parent_ino); | ||
117 | } | ||
118 | |||
119 | |||
120 | /* | ||
121 | * Read uncompressed inode lookup table indexes off disk into memory | ||
122 | */ | ||
123 | __le64 *squashfs_read_inode_lookup_table(struct super_block *sb, | ||
124 | u64 lookup_table_start, unsigned int inodes) | ||
125 | { | ||
126 | unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(inodes); | ||
127 | __le64 *inode_lookup_table; | ||
128 | int err; | ||
129 | |||
130 | TRACE("In read_inode_lookup_table, length %d\n", length); | ||
131 | |||
132 | /* Allocate inode lookup table indexes */ | ||
133 | inode_lookup_table = kmalloc(length, GFP_KERNEL); | ||
134 | if (inode_lookup_table == NULL) { | ||
135 | ERROR("Failed to allocate inode lookup table\n"); | ||
136 | return ERR_PTR(-ENOMEM); | ||
137 | } | ||
138 | |||
139 | err = squashfs_read_table(sb, inode_lookup_table, lookup_table_start, | ||
140 | length); | ||
141 | if (err < 0) { | ||
142 | ERROR("unable to read inode lookup table\n"); | ||
143 | kfree(inode_lookup_table); | ||
144 | return ERR_PTR(err); | ||
145 | } | ||
146 | |||
147 | return inode_lookup_table; | ||
148 | } | ||
149 | |||
150 | |||
151 | const struct export_operations squashfs_export_ops = { | ||
152 | .fh_to_dentry = squashfs_fh_to_dentry, | ||
153 | .fh_to_parent = squashfs_fh_to_parent, | ||
154 | .get_parent = squashfs_get_parent | ||
155 | }; | ||
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c new file mode 100644 index 000000000000..717767d831df --- /dev/null +++ b/fs/squashfs/file.c | |||
@@ -0,0 +1,502 @@ | |||
1 | /* | ||
2 | * Squashfs - a compressed read only filesystem for Linux | ||
3 | * | ||
4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 | ||
5 | * Phillip Lougher <phillip@lougher.demon.co.uk> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version 2, | ||
10 | * or (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
20 | * | ||
21 | * file.c | ||
22 | */ | ||
23 | |||
24 | /* | ||
25 | * This file contains code for handling regular files. A regular file | ||
26 | * consists of a sequence of contiguous compressed blocks, and/or a | ||
27 | * compressed fragment block (tail-end packed block). The compressed size | ||
28 | * of each datablock is stored in a block list contained within the | ||
29 | * file inode (itself stored in one or more compressed metadata blocks). | ||
30 | * | ||
31 | * To speed up access to datablocks when reading 'large' files (256 Mbytes or | ||
32 | * larger), the code implements an index cache that caches the mapping from | ||
33 | * block index to datablock location on disk. | ||
34 | * | ||
35 | * The index cache allows Squashfs to handle large files (up to 1.75 TiB) while | ||
36 | * retaining a simple and space-efficient block list on disk. The cache | ||
37 | * is split into slots, caching up to eight 224 GiB files (128 KiB blocks). | ||
38 | * Larger files use multiple slots, with 1.75 TiB files using all 8 slots. | ||
39 | * The index cache is designed to be memory efficient, and by default uses | ||
40 | * 16 KiB. | ||
41 | */ | ||
42 | |||
43 | #include <linux/fs.h> | ||
44 | #include <linux/vfs.h> | ||
45 | #include <linux/kernel.h> | ||
46 | #include <linux/slab.h> | ||
47 | #include <linux/string.h> | ||
48 | #include <linux/pagemap.h> | ||
49 | #include <linux/mutex.h> | ||
50 | #include <linux/zlib.h> | ||
51 | |||
52 | #include "squashfs_fs.h" | ||
53 | #include "squashfs_fs_sb.h" | ||
54 | #include "squashfs_fs_i.h" | ||
55 | #include "squashfs.h" | ||
56 | |||
57 | /* | ||
58 | * Locate cache slot in range [offset, index] for specified inode. If | ||
59 | * there's more than one return the slot closest to index. | ||
60 | */ | ||
61 | static struct meta_index *locate_meta_index(struct inode *inode, int offset, | ||
62 | int index) | ||
63 | { | ||
64 | struct meta_index *meta = NULL; | ||
65 | struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; | ||
66 | int i; | ||
67 | |||
68 | mutex_lock(&msblk->meta_index_mutex); | ||
69 | |||
70 | TRACE("locate_meta_index: index %d, offset %d\n", index, offset); | ||
71 | |||
72 | if (msblk->meta_index == NULL) | ||
73 | goto not_allocated; | ||
74 | |||
75 | for (i = 0; i < SQUASHFS_META_SLOTS; i++) { | ||
76 | if (msblk->meta_index[i].inode_number == inode->i_ino && | ||
77 | msblk->meta_index[i].offset >= offset && | ||
78 | msblk->meta_index[i].offset <= index && | ||
79 | msblk->meta_index[i].locked == 0) { | ||
80 | TRACE("locate_meta_index: entry %d, offset %d\n", i, | ||
81 | msblk->meta_index[i].offset); | ||
82 | meta = &msblk->meta_index[i]; | ||
83 | offset = meta->offset; | ||
84 | } | ||
85 | } | ||
86 | |||
87 | if (meta) | ||
88 | meta->locked = 1; | ||
89 | |||
90 | not_allocated: | ||
91 | mutex_unlock(&msblk->meta_index_mutex); | ||
92 | |||
93 | return meta; | ||
94 | } | ||
95 | |||
96 | |||
97 | /* | ||
98 | * Find and initialise an empty cache slot for index offset. | ||
99 | */ | ||
100 | static struct meta_index *empty_meta_index(struct inode *inode, int offset, | ||
101 | int skip) | ||
102 | { | ||
103 | struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; | ||
104 | struct meta_index *meta = NULL; | ||
105 | int i; | ||
106 | |||
107 | mutex_lock(&msblk->meta_index_mutex); | ||
108 | |||
109 | TRACE("empty_meta_index: offset %d, skip %d\n", offset, skip); | ||
110 | |||
111 | if (msblk->meta_index == NULL) { | ||
112 | /* | ||
113 | * First time cache index has been used, allocate and | ||
114 | * initialise. The cache index could be allocated at | ||
115 | * mount time but doing it here means it is allocated only | ||
116 | * if a 'large' file is read. | ||
117 | */ | ||
118 | msblk->meta_index = kcalloc(SQUASHFS_META_SLOTS, | ||
119 | sizeof(*(msblk->meta_index)), GFP_KERNEL); | ||
120 | if (msblk->meta_index == NULL) { | ||
121 | ERROR("Failed to allocate meta_index\n"); | ||
122 | goto failed; | ||
123 | } | ||
124 | for (i = 0; i < SQUASHFS_META_SLOTS; i++) { | ||
125 | msblk->meta_index[i].inode_number = 0; | ||
126 | msblk->meta_index[i].locked = 0; | ||
127 | } | ||
128 | msblk->next_meta_index = 0; | ||
129 | } | ||
130 | |||
131 | for (i = SQUASHFS_META_SLOTS; i && | ||
132 | msblk->meta_index[msblk->next_meta_index].locked; i--) | ||
133 | msblk->next_meta_index = (msblk->next_meta_index + 1) % | ||
134 | SQUASHFS_META_SLOTS; | ||
135 | |||
136 | if (i == 0) { | ||
137 | TRACE("empty_meta_index: failed!\n"); | ||
138 | goto failed; | ||
139 | } | ||
140 | |||
141 | TRACE("empty_meta_index: returned meta entry %d, %p\n", | ||
142 | msblk->next_meta_index, | ||
143 | &msblk->meta_index[msblk->next_meta_index]); | ||
144 | |||
145 | meta = &msblk->meta_index[msblk->next_meta_index]; | ||
146 | msblk->next_meta_index = (msblk->next_meta_index + 1) % | ||
147 | SQUASHFS_META_SLOTS; | ||
148 | |||
149 | meta->inode_number = inode->i_ino; | ||
150 | meta->offset = offset; | ||
151 | meta->skip = skip; | ||
152 | meta->entries = 0; | ||
153 | meta->locked = 1; | ||
154 | |||
155 | failed: | ||
156 | mutex_unlock(&msblk->meta_index_mutex); | ||
157 | return meta; | ||
158 | } | ||
159 | |||
160 | |||
161 | static void release_meta_index(struct inode *inode, struct meta_index *meta) | ||
162 | { | ||
163 | struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; | ||
164 | mutex_lock(&msblk->meta_index_mutex); | ||
165 | meta->locked = 0; | ||
166 | mutex_unlock(&msblk->meta_index_mutex); | ||
167 | } | ||
168 | |||
169 | |||
170 | /* | ||
171 | * Read the next n blocks from the block list, starting from | ||
172 | * metadata block <start_block, offset>. | ||
173 | */ | ||
174 | static long long read_indexes(struct super_block *sb, int n, | ||
175 | u64 *start_block, int *offset) | ||
176 | { | ||
177 | int err, i; | ||
178 | long long block = 0; | ||
179 | __le32 *blist = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL); | ||
180 | |||
181 | if (blist == NULL) { | ||
182 | ERROR("read_indexes: Failed to allocate block_list\n"); | ||
183 | return -ENOMEM; | ||
184 | } | ||
185 | |||
186 | while (n) { | ||
187 | int blocks = min_t(int, n, PAGE_CACHE_SIZE >> 2); | ||
188 | |||
189 | err = squashfs_read_metadata(sb, blist, start_block, | ||
190 | offset, blocks << 2); | ||
191 | if (err < 0) { | ||
192 | ERROR("read_indexes: reading block [%llx:%x]\n", | ||
193 | *start_block, *offset); | ||
194 | goto failure; | ||
195 | } | ||
196 | |||
197 | for (i = 0; i < blocks; i++) { | ||
198 | int size = le32_to_cpu(blist[i]); | ||
199 | block += SQUASHFS_COMPRESSED_SIZE_BLOCK(size); | ||
200 | } | ||
201 | n -= blocks; | ||
202 | } | ||
203 | |||
204 | kfree(blist); | ||
205 | return block; | ||
206 | |||
207 | failure: | ||
208 | kfree(blist); | ||
209 | return err; | ||
210 | } | ||
211 | |||
212 | |||
213 | /* | ||
214 | * Each cache index slot has SQUASHFS_META_ENTRIES, each of which | ||
215 | * can cache one index -> datablock/blocklist-block mapping. We wish | ||
216 | * to distribute these over the length of the file, entry[0] maps index x, | ||
217 | * entry[1] maps index x + skip, entry[2] maps index x + 2 * skip, and so on. | ||
218 | * The larger the file, the greater the skip factor. The skip factor is | ||
219 | * limited to the size of the metadata cache (SQUASHFS_CACHED_BLKS) to ensure | ||
220 | * the number of metadata blocks that need to be read fits into the cache. | ||
221 | * If the skip factor is limited in this way then the file will use multiple | ||
222 | * slots. | ||
223 | */ | ||
224 | static inline int calculate_skip(int blocks) | ||
225 | { | ||
226 | int skip = blocks / ((SQUASHFS_META_ENTRIES + 1) | ||
227 | * SQUASHFS_META_INDEXES); | ||
228 | return min(SQUASHFS_CACHED_BLKS - 1, skip + 1); | ||
229 | } | ||
230 | |||
231 | |||
232 | /* | ||
233 | * Search and grow the index cache for the specified inode, returning the | ||
234 | * on-disk locations of the datablock and block list metadata block | ||
235 | * <index_block, index_offset> for index (scaled to nearest cache index). | ||
236 | */ | ||
237 | static int fill_meta_index(struct inode *inode, int index, | ||
238 | u64 *index_block, int *index_offset, u64 *data_block) | ||
239 | { | ||
240 | struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; | ||
241 | int skip = calculate_skip(i_size_read(inode) >> msblk->block_log); | ||
242 | int offset = 0; | ||
243 | struct meta_index *meta; | ||
244 | struct meta_entry *meta_entry; | ||
245 | u64 cur_index_block = squashfs_i(inode)->block_list_start; | ||
246 | int cur_offset = squashfs_i(inode)->offset; | ||
247 | u64 cur_data_block = squashfs_i(inode)->start; | ||
248 | int err, i; | ||
249 | |||
250 | /* | ||
251 | * Scale index to cache index (cache slot entry) | ||
252 | */ | ||
253 | index /= SQUASHFS_META_INDEXES * skip; | ||
254 | |||
255 | while (offset < index) { | ||
256 | meta = locate_meta_index(inode, offset + 1, index); | ||
257 | |||
258 | if (meta == NULL) { | ||
259 | meta = empty_meta_index(inode, offset + 1, skip); | ||
260 | if (meta == NULL) | ||
261 | goto all_done; | ||
262 | } else { | ||
263 | offset = index < meta->offset + meta->entries ? index : | ||
264 | meta->offset + meta->entries - 1; | ||
265 | meta_entry = &meta->meta_entry[offset - meta->offset]; | ||
266 | cur_index_block = meta_entry->index_block + | ||
267 | msblk->inode_table; | ||
268 | cur_offset = meta_entry->offset; | ||
269 | cur_data_block = meta_entry->data_block; | ||
270 | TRACE("get_meta_index: offset %d, meta->offset %d, " | ||
271 | "meta->entries %d\n", offset, meta->offset, | ||
272 | meta->entries); | ||
273 | TRACE("get_meta_index: index_block 0x%llx, offset 0x%x" | ||
274 | " data_block 0x%llx\n", cur_index_block, | ||
275 | cur_offset, cur_data_block); | ||
276 | } | ||
277 | |||
278 | /* | ||
279 | * If necessary grow cache slot by reading block list. Cache | ||
280 | * slot is extended up to index or to the end of the slot, in | ||
281 | * which case further slots will be used. | ||
282 | */ | ||
283 | for (i = meta->offset + meta->entries; i <= index && | ||
284 | i < meta->offset + SQUASHFS_META_ENTRIES; i++) { | ||
285 | int blocks = skip * SQUASHFS_META_INDEXES; | ||
286 | long long res = read_indexes(inode->i_sb, blocks, | ||
287 | &cur_index_block, &cur_offset); | ||
288 | |||
289 | if (res < 0) { | ||
290 | if (meta->entries == 0) | ||
291 | /* | ||
292 | * Don't leave an empty slot on read | ||
293 | * error allocated to this inode... | ||
294 | */ | ||
295 | meta->inode_number = 0; | ||
296 | err = res; | ||
297 | goto failed; | ||
298 | } | ||
299 | |||
300 | cur_data_block += res; | ||
301 | meta_entry = &meta->meta_entry[i - meta->offset]; | ||
302 | meta_entry->index_block = cur_index_block - | ||
303 | msblk->inode_table; | ||
304 | meta_entry->offset = cur_offset; | ||
305 | meta_entry->data_block = cur_data_block; | ||
306 | meta->entries++; | ||
307 | offset++; | ||
308 | } | ||
309 | |||
310 | TRACE("get_meta_index: meta->offset %d, meta->entries %d\n", | ||
311 | meta->offset, meta->entries); | ||
312 | |||
313 | release_meta_index(inode, meta); | ||
314 | } | ||
315 | |||
316 | all_done: | ||
317 | *index_block = cur_index_block; | ||
318 | *index_offset = cur_offset; | ||
319 | *data_block = cur_data_block; | ||
320 | |||
321 | /* | ||
322 | * Scale cache index (cache slot entry) to index | ||
323 | */ | ||
324 | return offset * SQUASHFS_META_INDEXES * skip; | ||
325 | |||
326 | failed: | ||
327 | release_meta_index(inode, meta); | ||
328 | return err; | ||
329 | } | ||
330 | |||
331 | |||
332 | /* | ||
333 | * Get the on-disk location and compressed size of the datablock | ||
334 | * specified by index. Fill_meta_index() does most of the work. | ||
335 | */ | ||
336 | static int read_blocklist(struct inode *inode, int index, u64 *block) | ||
337 | { | ||
338 | u64 start; | ||
339 | long long blks; | ||
340 | int offset; | ||
341 | __le32 size; | ||
342 | int res = fill_meta_index(inode, index, &start, &offset, block); | ||
343 | |||
344 | TRACE("read_blocklist: res %d, index %d, start 0x%llx, offset" | ||
345 | " 0x%x, block 0x%llx\n", res, index, start, offset, | ||
346 | *block); | ||
347 | |||
348 | if (res < 0) | ||
349 | return res; | ||
350 | |||
351 | /* | ||
352 | * res contains the index of the mapping returned by fill_meta_index(), | ||
353 | * this will likely be less than the desired index (because the | ||
354 | * meta_index cache works at a higher granularity). Read any | ||
355 | * extra block indexes needed. | ||
356 | */ | ||
357 | if (res < index) { | ||
358 | blks = read_indexes(inode->i_sb, index - res, &start, &offset); | ||
359 | if (blks < 0) | ||
360 | return (int) blks; | ||
361 | *block += blks; | ||
362 | } | ||
363 | |||
364 | /* | ||
365 | * Read length of block specified by index. | ||
366 | */ | ||
367 | res = squashfs_read_metadata(inode->i_sb, &size, &start, &offset, | ||
368 | sizeof(size)); | ||
369 | if (res < 0) | ||
370 | return res; | ||
371 | return le32_to_cpu(size); | ||
372 | } | ||
373 | |||
374 | |||
375 | static int squashfs_readpage(struct file *file, struct page *page) | ||
376 | { | ||
377 | struct inode *inode = page->mapping->host; | ||
378 | struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; | ||
379 | int bytes, i, offset = 0, sparse = 0; | ||
380 | struct squashfs_cache_entry *buffer = NULL; | ||
381 | void *pageaddr; | ||
382 | |||
383 | int mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1; | ||
384 | int index = page->index >> (msblk->block_log - PAGE_CACHE_SHIFT); | ||
385 | int start_index = page->index & ~mask; | ||
386 | int end_index = start_index | mask; | ||
387 | int file_end = i_size_read(inode) >> msblk->block_log; | ||
388 | |||
389 | TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n", | ||
390 | page->index, squashfs_i(inode)->start); | ||
391 | |||
392 | if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> | ||
393 | PAGE_CACHE_SHIFT)) | ||
394 | goto out; | ||
395 | |||
396 | if (index < file_end || squashfs_i(inode)->fragment_block == | ||
397 | SQUASHFS_INVALID_BLK) { | ||
398 | /* | ||
399 | * Reading a datablock from disk. Need to read block list | ||
400 | * to get location and block size. | ||
401 | */ | ||
402 | u64 block = 0; | ||
403 | int bsize = read_blocklist(inode, index, &block); | ||
404 | if (bsize < 0) | ||
405 | goto error_out; | ||
406 | |||
407 | if (bsize == 0) { /* hole */ | ||
408 | bytes = index == file_end ? | ||
409 | (i_size_read(inode) & (msblk->block_size - 1)) : | ||
410 | msblk->block_size; | ||
411 | sparse = 1; | ||
412 | } else { | ||
413 | /* | ||
414 | * Read and decompress datablock. | ||
415 | */ | ||
416 | buffer = squashfs_get_datablock(inode->i_sb, | ||
417 | block, bsize); | ||
418 | if (buffer->error) { | ||
419 | ERROR("Unable to read page, block %llx, size %x" | ||
420 | "\n", block, bsize); | ||
421 | squashfs_cache_put(buffer); | ||
422 | goto error_out; | ||
423 | } | ||
424 | bytes = buffer->length; | ||
425 | } | ||
426 | } else { | ||
427 | /* | ||
428 | * Datablock is stored inside a fragment (tail-end packed | ||
429 | * block). | ||
430 | */ | ||
431 | buffer = squashfs_get_fragment(inode->i_sb, | ||
432 | squashfs_i(inode)->fragment_block, | ||
433 | squashfs_i(inode)->fragment_size); | ||
434 | |||
435 | if (buffer->error) { | ||
436 | ERROR("Unable to read page, block %llx, size %x\n", | ||
437 | squashfs_i(inode)->fragment_block, | ||
438 | squashfs_i(inode)->fragment_size); | ||
439 | squashfs_cache_put(buffer); | ||
440 | goto error_out; | ||
441 | } | ||
442 | bytes = i_size_read(inode) & (msblk->block_size - 1); | ||
443 | offset = squashfs_i(inode)->fragment_offset; | ||
444 | } | ||
445 | |||
446 | /* | ||
447 | * Loop copying datablock into pages. As the datablock likely covers | ||
448 | * many PAGE_CACHE_SIZE pages (default block size is 128 KiB) explicitly | ||
449 | * grab the pages from the page cache, except for the page that we've | ||
450 | * been called to fill. | ||
451 | */ | ||
452 | for (i = start_index; i <= end_index && bytes > 0; i++, | ||
453 | bytes -= PAGE_CACHE_SIZE, offset += PAGE_CACHE_SIZE) { | ||
454 | struct page *push_page; | ||
455 | int avail = sparse ? 0 : min_t(int, bytes, PAGE_CACHE_SIZE); | ||
456 | |||
457 | TRACE("bytes %d, i %d, available_bytes %d\n", bytes, i, avail); | ||
458 | |||
459 | push_page = (i == page->index) ? page : | ||
460 | grab_cache_page_nowait(page->mapping, i); | ||
461 | |||
462 | if (!push_page) | ||
463 | continue; | ||
464 | |||
465 | if (PageUptodate(push_page)) | ||
466 | goto skip_page; | ||
467 | |||
468 | pageaddr = kmap_atomic(push_page, KM_USER0); | ||
469 | squashfs_copy_data(pageaddr, buffer, offset, avail); | ||
470 | memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail); | ||
471 | kunmap_atomic(pageaddr, KM_USER0); | ||
472 | flush_dcache_page(push_page); | ||
473 | SetPageUptodate(push_page); | ||
474 | skip_page: | ||
475 | unlock_page(push_page); | ||
476 | if (i != page->index) | ||
477 | page_cache_release(push_page); | ||
478 | } | ||
479 | |||
480 | if (!sparse) | ||
481 | squashfs_cache_put(buffer); | ||
482 | |||
483 | return 0; | ||
484 | |||
485 | error_out: | ||
486 | SetPageError(page); | ||
487 | out: | ||
488 | pageaddr = kmap_atomic(page, KM_USER0); | ||
489 | memset(pageaddr, 0, PAGE_CACHE_SIZE); | ||
490 | kunmap_atomic(pageaddr, KM_USER0); | ||
491 | flush_dcache_page(page); | ||
492 | if (!PageError(page)) | ||
493 | SetPageUptodate(page); | ||
494 | unlock_page(page); | ||
495 | |||
496 | return 0; | ||
497 | } | ||
498 | |||
499 | |||
500 | const struct address_space_operations squashfs_aops = { | ||
501 | .readpage = squashfs_readpage | ||
502 | }; | ||
diff --git a/fs/squashfs/fragment.c b/fs/squashfs/fragment.c new file mode 100644 index 000000000000..b5a2c15bbbc7 --- /dev/null +++ b/fs/squashfs/fragment.c | |||
@@ -0,0 +1,98 @@ | |||
1 | /* | ||
2 | * Squashfs - a compressed read only filesystem for Linux | ||
3 | * | ||
4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 | ||
5 | * Phillip Lougher <phillip@lougher.demon.co.uk> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version 2, | ||
10 | * or (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
20 | * | ||
21 | * fragment.c | ||
22 | */ | ||
23 | |||
24 | /* | ||
25 | * This file implements code to handle compressed fragments (tail-end packed | ||
26 | * datablocks). | ||
27 | * | ||
28 | * Regular files contain a fragment index which is mapped to a fragment | ||
29 | * location on disk and compressed size using a fragment lookup table. | ||
30 | * Like everything in Squashfs this fragment lookup table is itself stored | ||
31 | * compressed into metadata blocks. A second index table is used to locate | ||
32 | * these. This second index table for speed of access (and because it | ||
33 | * is small) is read at mount time and cached in memory. | ||
34 | */ | ||
35 | |||
36 | #include <linux/fs.h> | ||
37 | #include <linux/vfs.h> | ||
38 | #include <linux/slab.h> | ||
39 | #include <linux/zlib.h> | ||
40 | |||
41 | #include "squashfs_fs.h" | ||
42 | #include "squashfs_fs_sb.h" | ||
43 | #include "squashfs_fs_i.h" | ||
44 | #include "squashfs.h" | ||
45 | |||
46 | /* | ||
47 | * Look-up fragment using the fragment index table. Return the on disk | ||
48 | * location of the fragment and its compressed size | ||
49 | */ | ||
50 | int squashfs_frag_lookup(struct super_block *sb, unsigned int fragment, | ||
51 | u64 *fragment_block) | ||
52 | { | ||
53 | struct squashfs_sb_info *msblk = sb->s_fs_info; | ||
54 | int block = SQUASHFS_FRAGMENT_INDEX(fragment); | ||
55 | int offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment); | ||
56 | u64 start_block = le64_to_cpu(msblk->fragment_index[block]); | ||
57 | struct squashfs_fragment_entry fragment_entry; | ||
58 | int size; | ||
59 | |||
60 | size = squashfs_read_metadata(sb, &fragment_entry, &start_block, | ||
61 | &offset, sizeof(fragment_entry)); | ||
62 | if (size < 0) | ||
63 | return size; | ||
64 | |||
65 | *fragment_block = le64_to_cpu(fragment_entry.start_block); | ||
66 | size = le32_to_cpu(fragment_entry.size); | ||
67 | |||
68 | return size; | ||
69 | } | ||
70 | |||
71 | |||
72 | /* | ||
73 | * Read the uncompressed fragment lookup table indexes off disk into memory | ||
74 | */ | ||
75 | __le64 *squashfs_read_fragment_index_table(struct super_block *sb, | ||
76 | u64 fragment_table_start, unsigned int fragments) | ||
77 | { | ||
78 | unsigned int length = SQUASHFS_FRAGMENT_INDEX_BYTES(fragments); | ||
79 | __le64 *fragment_index; | ||
80 | int err; | ||
81 | |||
82 | /* Allocate fragment lookup table indexes */ | ||
83 | fragment_index = kmalloc(length, GFP_KERNEL); | ||
84 | if (fragment_index == NULL) { | ||
85 | ERROR("Failed to allocate fragment index table\n"); | ||
86 | return ERR_PTR(-ENOMEM); | ||
87 | } | ||
88 | |||
89 | err = squashfs_read_table(sb, fragment_index, fragment_table_start, | ||
90 | length); | ||
91 | if (err < 0) { | ||
92 | ERROR("unable to read fragment index table\n"); | ||
93 | kfree(fragment_index); | ||
94 | return ERR_PTR(err); | ||
95 | } | ||
96 | |||
97 | return fragment_index; | ||
98 | } | ||
diff --git a/fs/squashfs/id.c b/fs/squashfs/id.c new file mode 100644 index 000000000000..3795b837ba28 --- /dev/null +++ b/fs/squashfs/id.c | |||
@@ -0,0 +1,94 @@ | |||
1 | /* | ||
2 | * Squashfs - a compressed read only filesystem for Linux | ||
3 | * | ||
4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 | ||
5 | * Phillip Lougher <phillip@lougher.demon.co.uk> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version 2, | ||
10 | * or (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
20 | * | ||
21 | * id.c | ||
22 | */ | ||
23 | |||
24 | /* | ||
25 | * This file implements code to handle uids and gids. | ||
26 | * | ||
27 | * For space efficiency regular files store uid and gid indexes, which are | ||
28 | * converted to 32-bit uids/gids using an id look up table. This table is | ||
29 | * stored compressed into metadata blocks. A second index table is used to | ||
30 | * locate these. This second index table for speed of access (and because it | ||
31 | * is small) is read at mount time and cached in memory. | ||
32 | */ | ||
33 | |||
34 | #include <linux/fs.h> | ||
35 | #include <linux/vfs.h> | ||
36 | #include <linux/slab.h> | ||
37 | #include <linux/zlib.h> | ||
38 | |||
39 | #include "squashfs_fs.h" | ||
40 | #include "squashfs_fs_sb.h" | ||
41 | #include "squashfs_fs_i.h" | ||
42 | #include "squashfs.h" | ||
43 | |||
44 | /* | ||
45 | * Map uid/gid index into real 32-bit uid/gid using the id look up table | ||
46 | */ | ||
47 | int squashfs_get_id(struct super_block *sb, unsigned int index, | ||
48 | unsigned int *id) | ||
49 | { | ||
50 | struct squashfs_sb_info *msblk = sb->s_fs_info; | ||
51 | int block = SQUASHFS_ID_BLOCK(index); | ||
52 | int offset = SQUASHFS_ID_BLOCK_OFFSET(index); | ||
53 | u64 start_block = le64_to_cpu(msblk->id_table[block]); | ||
54 | __le32 disk_id; | ||
55 | int err; | ||
56 | |||
57 | err = squashfs_read_metadata(sb, &disk_id, &start_block, &offset, | ||
58 | sizeof(disk_id)); | ||
59 | if (err < 0) | ||
60 | return err; | ||
61 | |||
62 | *id = le32_to_cpu(disk_id); | ||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | |||
67 | /* | ||
68 | * Read uncompressed id lookup table indexes from disk into memory | ||
69 | */ | ||
70 | __le64 *squashfs_read_id_index_table(struct super_block *sb, | ||
71 | u64 id_table_start, unsigned short no_ids) | ||
72 | { | ||
73 | unsigned int length = SQUASHFS_ID_BLOCK_BYTES(no_ids); | ||
74 | __le64 *id_table; | ||
75 | int err; | ||
76 | |||
77 | TRACE("In read_id_index_table, length %d\n", length); | ||
78 | |||
79 | /* Allocate id lookup table indexes */ | ||
80 | id_table = kmalloc(length, GFP_KERNEL); | ||
81 | if (id_table == NULL) { | ||
82 | ERROR("Failed to allocate id index table\n"); | ||
83 | return ERR_PTR(-ENOMEM); | ||
84 | } | ||
85 | |||
86 | err = squashfs_read_table(sb, id_table, id_table_start, length); | ||
87 | if (err < 0) { | ||
88 | ERROR("unable to read id index table\n"); | ||
89 | kfree(id_table); | ||
90 | return ERR_PTR(err); | ||
91 | } | ||
92 | |||
93 | return id_table; | ||
94 | } | ||
diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c new file mode 100644 index 000000000000..7a63398bb855 --- /dev/null +++ b/fs/squashfs/inode.c | |||
@@ -0,0 +1,346 @@ | |||
1 | /* | ||
2 | * Squashfs - a compressed read only filesystem for Linux | ||
3 | * | ||
4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 | ||
5 | * Phillip Lougher <phillip@lougher.demon.co.uk> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version 2, | ||
10 | * or (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
20 | * | ||
21 | * inode.c | ||
22 | */ | ||
23 | |||
24 | /* | ||
25 | * This file implements code to create and read inodes from disk. | ||
26 | * | ||
27 | * Inodes in Squashfs are identified by a 48-bit inode which encodes the | ||
28 | * location of the compressed metadata block containing the inode, and the byte | ||
29 | * offset into that block where the inode is placed (<block, offset>). | ||
30 | * | ||
31 | * To maximise compression there are different inodes for each file type | ||
32 | * (regular file, directory, device, etc.), the inode contents and length | ||
33 | * varying with the type. | ||
34 | * | ||
35 | * To further maximise compression, two types of regular file inode and | ||
36 | * directory inode are defined: inodes optimised for frequently occurring | ||
37 | * regular files and directories, and extended types where extra | ||
38 | * information has to be stored. | ||
39 | */ | ||
40 | |||
41 | #include <linux/fs.h> | ||
42 | #include <linux/vfs.h> | ||
43 | #include <linux/zlib.h> | ||
44 | |||
45 | #include "squashfs_fs.h" | ||
46 | #include "squashfs_fs_sb.h" | ||
47 | #include "squashfs_fs_i.h" | ||
48 | #include "squashfs.h" | ||
49 | |||
50 | /* | ||
51 | * Initialise VFS inode with the base inode information common to all | ||
52 | * Squashfs inode types. Sqsh_ino contains the unswapped base inode | ||
53 | * off disk. | ||
54 | */ | ||
55 | static int squashfs_new_inode(struct super_block *sb, struct inode *inode, | ||
56 | struct squashfs_base_inode *sqsh_ino) | ||
57 | { | ||
58 | int err; | ||
59 | |||
60 | err = squashfs_get_id(sb, le16_to_cpu(sqsh_ino->uid), &inode->i_uid); | ||
61 | if (err) | ||
62 | return err; | ||
63 | |||
64 | err = squashfs_get_id(sb, le16_to_cpu(sqsh_ino->guid), &inode->i_gid); | ||
65 | if (err) | ||
66 | return err; | ||
67 | |||
68 | inode->i_ino = le32_to_cpu(sqsh_ino->inode_number); | ||
69 | inode->i_mtime.tv_sec = le32_to_cpu(sqsh_ino->mtime); | ||
70 | inode->i_atime.tv_sec = inode->i_mtime.tv_sec; | ||
71 | inode->i_ctime.tv_sec = inode->i_mtime.tv_sec; | ||
72 | inode->i_mode = le16_to_cpu(sqsh_ino->mode); | ||
73 | inode->i_size = 0; | ||
74 | |||
75 | return err; | ||
76 | } | ||
77 | |||
78 | |||
79 | struct inode *squashfs_iget(struct super_block *sb, long long ino, | ||
80 | unsigned int ino_number) | ||
81 | { | ||
82 | struct inode *inode = iget_locked(sb, ino_number); | ||
83 | int err; | ||
84 | |||
85 | TRACE("Entered squashfs_iget\n"); | ||
86 | |||
87 | if (!inode) | ||
88 | return ERR_PTR(-ENOMEM); | ||
89 | if (!(inode->i_state & I_NEW)) | ||
90 | return inode; | ||
91 | |||
92 | err = squashfs_read_inode(inode, ino); | ||
93 | if (err) { | ||
94 | iget_failed(inode); | ||
95 | return ERR_PTR(err); | ||
96 | } | ||
97 | |||
98 | unlock_new_inode(inode); | ||
99 | return inode; | ||
100 | } | ||
101 | |||
102 | |||
103 | /* | ||
104 | * Initialise VFS inode by reading inode from inode table (compressed | ||
105 | * metadata). The format and amount of data read depends on type. | ||
106 | */ | ||
107 | int squashfs_read_inode(struct inode *inode, long long ino) | ||
108 | { | ||
109 | struct super_block *sb = inode->i_sb; | ||
110 | struct squashfs_sb_info *msblk = sb->s_fs_info; | ||
111 | u64 block = SQUASHFS_INODE_BLK(ino) + msblk->inode_table; | ||
112 | int err, type, offset = SQUASHFS_INODE_OFFSET(ino); | ||
113 | union squashfs_inode squashfs_ino; | ||
114 | struct squashfs_base_inode *sqshb_ino = &squashfs_ino.base; | ||
115 | |||
116 | TRACE("Entered squashfs_read_inode\n"); | ||
117 | |||
118 | /* | ||
119 | * Read inode base common to all inode types. | ||
120 | */ | ||
121 | err = squashfs_read_metadata(sb, sqshb_ino, &block, | ||
122 | &offset, sizeof(*sqshb_ino)); | ||
123 | if (err < 0) | ||
124 | goto failed_read; | ||
125 | |||
126 | err = squashfs_new_inode(sb, inode, sqshb_ino); | ||
127 | if (err) | ||
128 | goto failed_read; | ||
129 | |||
130 | block = SQUASHFS_INODE_BLK(ino) + msblk->inode_table; | ||
131 | offset = SQUASHFS_INODE_OFFSET(ino); | ||
132 | |||
133 | type = le16_to_cpu(sqshb_ino->inode_type); | ||
134 | switch (type) { | ||
135 | case SQUASHFS_REG_TYPE: { | ||
136 | unsigned int frag_offset, frag_size, frag; | ||
137 | u64 frag_blk; | ||
138 | struct squashfs_reg_inode *sqsh_ino = &squashfs_ino.reg; | ||
139 | |||
140 | err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset, | ||
141 | sizeof(*sqsh_ino)); | ||
142 | if (err < 0) | ||
143 | goto failed_read; | ||
144 | |||
145 | frag = le32_to_cpu(sqsh_ino->fragment); | ||
146 | if (frag != SQUASHFS_INVALID_FRAG) { | ||
147 | frag_offset = le32_to_cpu(sqsh_ino->offset); | ||
148 | frag_size = squashfs_frag_lookup(sb, frag, &frag_blk); | ||
149 | if (frag_size < 0) { | ||
150 | err = frag_size; | ||
151 | goto failed_read; | ||
152 | } | ||
153 | } else { | ||
154 | frag_blk = SQUASHFS_INVALID_BLK; | ||
155 | frag_size = 0; | ||
156 | frag_offset = 0; | ||
157 | } | ||
158 | |||
159 | inode->i_nlink = 1; | ||
160 | inode->i_size = le32_to_cpu(sqsh_ino->file_size); | ||
161 | inode->i_fop = &generic_ro_fops; | ||
162 | inode->i_mode |= S_IFREG; | ||
163 | inode->i_blocks = ((inode->i_size - 1) >> 9) + 1; | ||
164 | squashfs_i(inode)->fragment_block = frag_blk; | ||
165 | squashfs_i(inode)->fragment_size = frag_size; | ||
166 | squashfs_i(inode)->fragment_offset = frag_offset; | ||
167 | squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block); | ||
168 | squashfs_i(inode)->block_list_start = block; | ||
169 | squashfs_i(inode)->offset = offset; | ||
170 | inode->i_data.a_ops = &squashfs_aops; | ||
171 | |||
172 | TRACE("File inode %x:%x, start_block %llx, block_list_start " | ||
173 | "%llx, offset %x\n", SQUASHFS_INODE_BLK(ino), | ||
174 | offset, squashfs_i(inode)->start, block, offset); | ||
175 | break; | ||
176 | } | ||
177 | case SQUASHFS_LREG_TYPE: { | ||
178 | unsigned int frag_offset, frag_size, frag; | ||
179 | u64 frag_blk; | ||
180 | struct squashfs_lreg_inode *sqsh_ino = &squashfs_ino.lreg; | ||
181 | |||
182 | err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset, | ||
183 | sizeof(*sqsh_ino)); | ||
184 | if (err < 0) | ||
185 | goto failed_read; | ||
186 | |||
187 | frag = le32_to_cpu(sqsh_ino->fragment); | ||
188 | if (frag != SQUASHFS_INVALID_FRAG) { | ||
189 | frag_offset = le32_to_cpu(sqsh_ino->offset); | ||
190 | frag_size = squashfs_frag_lookup(sb, frag, &frag_blk); | ||
191 | if (frag_size < 0) { | ||
192 | err = frag_size; | ||
193 | goto failed_read; | ||
194 | } | ||
195 | } else { | ||
196 | frag_blk = SQUASHFS_INVALID_BLK; | ||
197 | frag_size = 0; | ||
198 | frag_offset = 0; | ||
199 | } | ||
200 | |||
201 | inode->i_nlink = le32_to_cpu(sqsh_ino->nlink); | ||
202 | inode->i_size = le64_to_cpu(sqsh_ino->file_size); | ||
203 | inode->i_fop = &generic_ro_fops; | ||
204 | inode->i_mode |= S_IFREG; | ||
205 | inode->i_blocks = ((inode->i_size - | ||
206 | le64_to_cpu(sqsh_ino->sparse) - 1) >> 9) + 1; | ||
207 | |||
208 | squashfs_i(inode)->fragment_block = frag_blk; | ||
209 | squashfs_i(inode)->fragment_size = frag_size; | ||
210 | squashfs_i(inode)->fragment_offset = frag_offset; | ||
211 | squashfs_i(inode)->start = le64_to_cpu(sqsh_ino->start_block); | ||
212 | squashfs_i(inode)->block_list_start = block; | ||
213 | squashfs_i(inode)->offset = offset; | ||
214 | inode->i_data.a_ops = &squashfs_aops; | ||
215 | |||
216 | TRACE("File inode %x:%x, start_block %llx, block_list_start " | ||
217 | "%llx, offset %x\n", SQUASHFS_INODE_BLK(ino), | ||
218 | offset, squashfs_i(inode)->start, block, offset); | ||
219 | break; | ||
220 | } | ||
221 | case SQUASHFS_DIR_TYPE: { | ||
222 | struct squashfs_dir_inode *sqsh_ino = &squashfs_ino.dir; | ||
223 | |||
224 | err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset, | ||
225 | sizeof(*sqsh_ino)); | ||
226 | if (err < 0) | ||
227 | goto failed_read; | ||
228 | |||
229 | inode->i_nlink = le32_to_cpu(sqsh_ino->nlink); | ||
230 | inode->i_size = le16_to_cpu(sqsh_ino->file_size); | ||
231 | inode->i_op = &squashfs_dir_inode_ops; | ||
232 | inode->i_fop = &squashfs_dir_ops; | ||
233 | inode->i_mode |= S_IFDIR; | ||
234 | squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block); | ||
235 | squashfs_i(inode)->offset = le16_to_cpu(sqsh_ino->offset); | ||
236 | squashfs_i(inode)->dir_idx_cnt = 0; | ||
237 | squashfs_i(inode)->parent = le32_to_cpu(sqsh_ino->parent_inode); | ||
238 | |||
239 | TRACE("Directory inode %x:%x, start_block %llx, offset %x\n", | ||
240 | SQUASHFS_INODE_BLK(ino), offset, | ||
241 | squashfs_i(inode)->start, | ||
242 | le16_to_cpu(sqsh_ino->offset)); | ||
243 | break; | ||
244 | } | ||
245 | case SQUASHFS_LDIR_TYPE: { | ||
246 | struct squashfs_ldir_inode *sqsh_ino = &squashfs_ino.ldir; | ||
247 | |||
248 | err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset, | ||
249 | sizeof(*sqsh_ino)); | ||
250 | if (err < 0) | ||
251 | goto failed_read; | ||
252 | |||
253 | inode->i_nlink = le32_to_cpu(sqsh_ino->nlink); | ||
254 | inode->i_size = le32_to_cpu(sqsh_ino->file_size); | ||
255 | inode->i_op = &squashfs_dir_inode_ops; | ||
256 | inode->i_fop = &squashfs_dir_ops; | ||
257 | inode->i_mode |= S_IFDIR; | ||
258 | squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block); | ||
259 | squashfs_i(inode)->offset = le16_to_cpu(sqsh_ino->offset); | ||
260 | squashfs_i(inode)->dir_idx_start = block; | ||
261 | squashfs_i(inode)->dir_idx_offset = offset; | ||
262 | squashfs_i(inode)->dir_idx_cnt = le16_to_cpu(sqsh_ino->i_count); | ||
263 | squashfs_i(inode)->parent = le32_to_cpu(sqsh_ino->parent_inode); | ||
264 | |||
265 | TRACE("Long directory inode %x:%x, start_block %llx, offset " | ||
266 | "%x\n", SQUASHFS_INODE_BLK(ino), offset, | ||
267 | squashfs_i(inode)->start, | ||
268 | le16_to_cpu(sqsh_ino->offset)); | ||
269 | break; | ||
270 | } | ||
271 | case SQUASHFS_SYMLINK_TYPE: | ||
272 | case SQUASHFS_LSYMLINK_TYPE: { | ||
273 | struct squashfs_symlink_inode *sqsh_ino = &squashfs_ino.symlink; | ||
274 | |||
275 | err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset, | ||
276 | sizeof(*sqsh_ino)); | ||
277 | if (err < 0) | ||
278 | goto failed_read; | ||
279 | |||
280 | inode->i_nlink = le32_to_cpu(sqsh_ino->nlink); | ||
281 | inode->i_size = le32_to_cpu(sqsh_ino->symlink_size); | ||
282 | inode->i_op = &page_symlink_inode_operations; | ||
283 | inode->i_data.a_ops = &squashfs_symlink_aops; | ||
284 | inode->i_mode |= S_IFLNK; | ||
285 | squashfs_i(inode)->start = block; | ||
286 | squashfs_i(inode)->offset = offset; | ||
287 | |||
288 | TRACE("Symbolic link inode %x:%x, start_block %llx, offset " | ||
289 | "%x\n", SQUASHFS_INODE_BLK(ino), offset, | ||
290 | block, offset); | ||
291 | break; | ||
292 | } | ||
293 | case SQUASHFS_BLKDEV_TYPE: | ||
294 | case SQUASHFS_CHRDEV_TYPE: | ||
295 | case SQUASHFS_LBLKDEV_TYPE: | ||
296 | case SQUASHFS_LCHRDEV_TYPE: { | ||
297 | struct squashfs_dev_inode *sqsh_ino = &squashfs_ino.dev; | ||
298 | unsigned int rdev; | ||
299 | |||
300 | err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset, | ||
301 | sizeof(*sqsh_ino)); | ||
302 | if (err < 0) | ||
303 | goto failed_read; | ||
304 | |||
305 | if (type == SQUASHFS_CHRDEV_TYPE) | ||
306 | inode->i_mode |= S_IFCHR; | ||
307 | else | ||
308 | inode->i_mode |= S_IFBLK; | ||
309 | inode->i_nlink = le32_to_cpu(sqsh_ino->nlink); | ||
310 | rdev = le32_to_cpu(sqsh_ino->rdev); | ||
311 | init_special_inode(inode, inode->i_mode, new_decode_dev(rdev)); | ||
312 | |||
313 | TRACE("Device inode %x:%x, rdev %x\n", | ||
314 | SQUASHFS_INODE_BLK(ino), offset, rdev); | ||
315 | break; | ||
316 | } | ||
317 | case SQUASHFS_FIFO_TYPE: | ||
318 | case SQUASHFS_SOCKET_TYPE: | ||
319 | case SQUASHFS_LFIFO_TYPE: | ||
320 | case SQUASHFS_LSOCKET_TYPE: { | ||
321 | struct squashfs_ipc_inode *sqsh_ino = &squashfs_ino.ipc; | ||
322 | |||
323 | err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset, | ||
324 | sizeof(*sqsh_ino)); | ||
325 | if (err < 0) | ||
326 | goto failed_read; | ||
327 | |||
328 | if (type == SQUASHFS_FIFO_TYPE) | ||
329 | inode->i_mode |= S_IFIFO; | ||
330 | else | ||
331 | inode->i_mode |= S_IFSOCK; | ||
332 | inode->i_nlink = le32_to_cpu(sqsh_ino->nlink); | ||
333 | init_special_inode(inode, inode->i_mode, 0); | ||
334 | break; | ||
335 | } | ||
336 | default: | ||
337 | ERROR("Unknown inode type %d in squashfs_iget!\n", type); | ||
338 | return -EINVAL; | ||
339 | } | ||
340 | |||
341 | return 0; | ||
342 | |||
343 | failed_read: | ||
344 | ERROR("Unable to read inode 0x%llx\n", ino); | ||
345 | return err; | ||
346 | } | ||
diff --git a/fs/squashfs/namei.c b/fs/squashfs/namei.c new file mode 100644 index 000000000000..9e398653b22b --- /dev/null +++ b/fs/squashfs/namei.c | |||
@@ -0,0 +1,242 @@ | |||
1 | /* | ||
2 | * Squashfs - a compressed read only filesystem for Linux | ||
3 | * | ||
4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 | ||
5 | * Phillip Lougher <phillip@lougher.demon.co.uk> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version 2, | ||
10 | * or (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
20 | * | ||
21 | * namei.c | ||
22 | */ | ||
23 | |||
24 | /* | ||
25 | * This file implements code to do filename lookup in directories. | ||
26 | * | ||
27 | * Like inodes, directories are packed into compressed metadata blocks, stored | ||
28 | * in a directory table. Directories are accessed using the start address of | ||
29 | * the metablock containing the directory and the offset into the | ||
30 | * decompressed block (<block, offset>). | ||
31 | * | ||
32 | * Directories are organised in a slightly complex way, and are not simply | ||
33 | * a list of file names. The organisation takes advantage of the | ||
34 | * fact that (in most cases) the inodes of the files will be in the same | ||
35 | * compressed metadata block, and therefore, can share the start block. | ||
36 | * Directories are therefore organised in a two level list, a directory | ||
37 | * header containing the shared start block value, and a sequence of directory | ||
38 | * entries, each of which share the shared start block. A new directory header | ||
39 | * is written once/if the inode start block changes. The directory | ||
40 | * header/directory entry list is repeated as many times as necessary. | ||
41 | * | ||
42 | * Directories are sorted, and can contain a directory index to speed up | ||
43 | * file lookup. Directory indexes store one entry per metablock, each entry | ||
44 | * storing the index/filename mapping to the first directory header | ||
45 | * in each metadata block. Directories are sorted in alphabetical order, | ||
46 | * and at lookup the index is scanned linearly looking for the first filename | ||
47 | * alphabetically larger than the filename being looked up. At this point the | ||
48 | * location of the metadata block the filename is in has been found. | ||
49 | * The general idea of the index is ensure only one metadata block needs to be | ||
50 | * decompressed to do a lookup irrespective of the length of the directory. | ||
51 | * This scheme has the advantage that it doesn't require extra memory overhead | ||
52 | * and doesn't require much extra storage on disk. | ||
53 | */ | ||
54 | |||
55 | #include <linux/fs.h> | ||
56 | #include <linux/vfs.h> | ||
57 | #include <linux/slab.h> | ||
58 | #include <linux/string.h> | ||
59 | #include <linux/dcache.h> | ||
60 | #include <linux/zlib.h> | ||
61 | |||
62 | #include "squashfs_fs.h" | ||
63 | #include "squashfs_fs_sb.h" | ||
64 | #include "squashfs_fs_i.h" | ||
65 | #include "squashfs.h" | ||
66 | |||
67 | /* | ||
68 | * Lookup name in the directory index, returning the location of the metadata | ||
69 | * block containing it, and the directory index this represents. | ||
70 | * | ||
71 | * If we get an error reading the index then return the part of the index | ||
72 | * (if any) we have managed to read - the index isn't essential, just | ||
73 | * quicker. | ||
74 | */ | ||
75 | static int get_dir_index_using_name(struct super_block *sb, | ||
76 | u64 *next_block, int *next_offset, u64 index_start, | ||
77 | int index_offset, int i_count, const char *name, | ||
78 | int len) | ||
79 | { | ||
80 | struct squashfs_sb_info *msblk = sb->s_fs_info; | ||
81 | int i, size, length = 0, err; | ||
82 | struct squashfs_dir_index *index; | ||
83 | char *str; | ||
84 | |||
85 | TRACE("Entered get_dir_index_using_name, i_count %d\n", i_count); | ||
86 | |||
87 | index = kmalloc(sizeof(*index) + SQUASHFS_NAME_LEN * 2 + 2, GFP_KERNEL); | ||
88 | if (index == NULL) { | ||
89 | ERROR("Failed to allocate squashfs_dir_index\n"); | ||
90 | goto out; | ||
91 | } | ||
92 | |||
93 | str = &index->name[SQUASHFS_NAME_LEN + 1]; | ||
94 | strncpy(str, name, len); | ||
95 | str[len] = '\0'; | ||
96 | |||
97 | for (i = 0; i < i_count; i++) { | ||
98 | err = squashfs_read_metadata(sb, index, &index_start, | ||
99 | &index_offset, sizeof(*index)); | ||
100 | if (err < 0) | ||
101 | break; | ||
102 | |||
103 | |||
104 | size = le32_to_cpu(index->size) + 1; | ||
105 | |||
106 | err = squashfs_read_metadata(sb, index->name, &index_start, | ||
107 | &index_offset, size); | ||
108 | if (err < 0) | ||
109 | break; | ||
110 | |||
111 | index->name[size] = '\0'; | ||
112 | |||
113 | if (strcmp(index->name, str) > 0) | ||
114 | break; | ||
115 | |||
116 | length = le32_to_cpu(index->index); | ||
117 | *next_block = le32_to_cpu(index->start_block) + | ||
118 | msblk->directory_table; | ||
119 | } | ||
120 | |||
121 | *next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE; | ||
122 | kfree(index); | ||
123 | |||
124 | out: | ||
125 | /* | ||
126 | * Return index (f_pos) of the looked up metadata block. Translate | ||
127 | * from internal f_pos to external f_pos which is offset by 3 because | ||
128 | * we invent "." and ".." entries which are not actually stored in the | ||
129 | * directory. | ||
130 | */ | ||
131 | return length + 3; | ||
132 | } | ||
133 | |||
134 | |||
135 | static struct dentry *squashfs_lookup(struct inode *dir, struct dentry *dentry, | ||
136 | struct nameidata *nd) | ||
137 | { | ||
138 | const unsigned char *name = dentry->d_name.name; | ||
139 | int len = dentry->d_name.len; | ||
140 | struct inode *inode = NULL; | ||
141 | struct squashfs_sb_info *msblk = dir->i_sb->s_fs_info; | ||
142 | struct squashfs_dir_header dirh; | ||
143 | struct squashfs_dir_entry *dire; | ||
144 | u64 block = squashfs_i(dir)->start + msblk->directory_table; | ||
145 | int offset = squashfs_i(dir)->offset; | ||
146 | int err, length = 0, dir_count, size; | ||
147 | |||
148 | TRACE("Entered squashfs_lookup [%llx:%x]\n", block, offset); | ||
149 | |||
150 | dire = kmalloc(sizeof(*dire) + SQUASHFS_NAME_LEN + 1, GFP_KERNEL); | ||
151 | if (dire == NULL) { | ||
152 | ERROR("Failed to allocate squashfs_dir_entry\n"); | ||
153 | return ERR_PTR(-ENOMEM); | ||
154 | } | ||
155 | |||
156 | if (len > SQUASHFS_NAME_LEN) { | ||
157 | err = -ENAMETOOLONG; | ||
158 | goto failed; | ||
159 | } | ||
160 | |||
161 | length = get_dir_index_using_name(dir->i_sb, &block, &offset, | ||
162 | squashfs_i(dir)->dir_idx_start, | ||
163 | squashfs_i(dir)->dir_idx_offset, | ||
164 | squashfs_i(dir)->dir_idx_cnt, name, len); | ||
165 | |||
166 | while (length < i_size_read(dir)) { | ||
167 | /* | ||
168 | * Read directory header. | ||
169 | */ | ||
170 | err = squashfs_read_metadata(dir->i_sb, &dirh, &block, | ||
171 | &offset, sizeof(dirh)); | ||
172 | if (err < 0) | ||
173 | goto read_failure; | ||
174 | |||
175 | length += sizeof(dirh); | ||
176 | |||
177 | dir_count = le32_to_cpu(dirh.count) + 1; | ||
178 | while (dir_count--) { | ||
179 | /* | ||
180 | * Read directory entry. | ||
181 | */ | ||
182 | err = squashfs_read_metadata(dir->i_sb, dire, &block, | ||
183 | &offset, sizeof(*dire)); | ||
184 | if (err < 0) | ||
185 | goto read_failure; | ||
186 | |||
187 | size = le16_to_cpu(dire->size) + 1; | ||
188 | |||
189 | err = squashfs_read_metadata(dir->i_sb, dire->name, | ||
190 | &block, &offset, size); | ||
191 | if (err < 0) | ||
192 | goto read_failure; | ||
193 | |||
194 | length += sizeof(*dire) + size; | ||
195 | |||
196 | if (name[0] < dire->name[0]) | ||
197 | goto exit_lookup; | ||
198 | |||
199 | if (len == size && !strncmp(name, dire->name, len)) { | ||
200 | unsigned int blk, off, ino_num; | ||
201 | long long ino; | ||
202 | blk = le32_to_cpu(dirh.start_block); | ||
203 | off = le16_to_cpu(dire->offset); | ||
204 | ino_num = le32_to_cpu(dirh.inode_number) + | ||
205 | (short) le16_to_cpu(dire->inode_number); | ||
206 | ino = SQUASHFS_MKINODE(blk, off); | ||
207 | |||
208 | TRACE("calling squashfs_iget for directory " | ||
209 | "entry %s, inode %x:%x, %d\n", name, | ||
210 | blk, off, ino_num); | ||
211 | |||
212 | inode = squashfs_iget(dir->i_sb, ino, ino_num); | ||
213 | if (IS_ERR(inode)) { | ||
214 | err = PTR_ERR(inode); | ||
215 | goto failed; | ||
216 | } | ||
217 | |||
218 | goto exit_lookup; | ||
219 | } | ||
220 | } | ||
221 | } | ||
222 | |||
223 | exit_lookup: | ||
224 | kfree(dire); | ||
225 | if (inode) | ||
226 | return d_splice_alias(inode, dentry); | ||
227 | d_add(dentry, inode); | ||
228 | return ERR_PTR(0); | ||
229 | |||
230 | read_failure: | ||
231 | ERROR("Unable to read directory block [%llx:%x]\n", | ||
232 | squashfs_i(dir)->start + msblk->directory_table, | ||
233 | squashfs_i(dir)->offset); | ||
234 | failed: | ||
235 | kfree(dire); | ||
236 | return ERR_PTR(err); | ||
237 | } | ||
238 | |||
239 | |||
240 | const struct inode_operations squashfs_dir_inode_ops = { | ||
241 | .lookup = squashfs_lookup | ||
242 | }; | ||
diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h new file mode 100644 index 000000000000..6b2515d027d5 --- /dev/null +++ b/fs/squashfs/squashfs.h | |||
@@ -0,0 +1,90 @@ | |||
1 | /* | ||
2 | * Squashfs - a compressed read only filesystem for Linux | ||
3 | * | ||
4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 | ||
5 | * Phillip Lougher <phillip@lougher.demon.co.uk> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version 2, | ||
10 | * or (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
20 | * | ||
21 | * squashfs.h | ||
22 | */ | ||
23 | |||
24 | #define TRACE(s, args...) pr_debug("SQUASHFS: "s, ## args) | ||
25 | |||
26 | #define ERROR(s, args...) pr_err("SQUASHFS error: "s, ## args) | ||
27 | |||
28 | #define WARNING(s, args...) pr_warning("SQUASHFS: "s, ## args) | ||
29 | |||
30 | static inline struct squashfs_inode_info *squashfs_i(struct inode *inode) | ||
31 | { | ||
32 | return list_entry(inode, struct squashfs_inode_info, vfs_inode); | ||
33 | } | ||
34 | |||
35 | /* block.c */ | ||
36 | extern int squashfs_read_data(struct super_block *, void **, u64, int, u64 *, | ||
37 | int); | ||
38 | |||
39 | /* cache.c */ | ||
40 | extern struct squashfs_cache *squashfs_cache_init(char *, int, int); | ||
41 | extern void squashfs_cache_delete(struct squashfs_cache *); | ||
42 | extern struct squashfs_cache_entry *squashfs_cache_get(struct super_block *, | ||
43 | struct squashfs_cache *, u64, int); | ||
44 | extern void squashfs_cache_put(struct squashfs_cache_entry *); | ||
45 | extern int squashfs_copy_data(void *, struct squashfs_cache_entry *, int, int); | ||
46 | extern int squashfs_read_metadata(struct super_block *, void *, u64 *, | ||
47 | int *, int); | ||
48 | extern struct squashfs_cache_entry *squashfs_get_fragment(struct super_block *, | ||
49 | u64, int); | ||
50 | extern struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *, | ||
51 | u64, int); | ||
52 | extern int squashfs_read_table(struct super_block *, void *, u64, int); | ||
53 | |||
54 | /* export.c */ | ||
55 | extern __le64 *squashfs_read_inode_lookup_table(struct super_block *, u64, | ||
56 | unsigned int); | ||
57 | |||
58 | /* fragment.c */ | ||
59 | extern int squashfs_frag_lookup(struct super_block *, unsigned int, u64 *); | ||
60 | extern __le64 *squashfs_read_fragment_index_table(struct super_block *, | ||
61 | u64, unsigned int); | ||
62 | |||
63 | /* id.c */ | ||
64 | extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *); | ||
65 | extern __le64 *squashfs_read_id_index_table(struct super_block *, u64, | ||
66 | unsigned short); | ||
67 | |||
68 | /* inode.c */ | ||
69 | extern struct inode *squashfs_iget(struct super_block *, long long, | ||
70 | unsigned int); | ||
71 | extern int squashfs_read_inode(struct inode *, long long); | ||
72 | |||
73 | /* | ||
74 | * Inodes and files operations | ||
75 | */ | ||
76 | |||
77 | /* dir.c */ | ||
78 | extern const struct file_operations squashfs_dir_ops; | ||
79 | |||
80 | /* export.c */ | ||
81 | extern const struct export_operations squashfs_export_ops; | ||
82 | |||
83 | /* file.c */ | ||
84 | extern const struct address_space_operations squashfs_aops; | ||
85 | |||
86 | /* namei.c */ | ||
87 | extern const struct inode_operations squashfs_dir_inode_ops; | ||
88 | |||
89 | /* symlink.c */ | ||
90 | extern const struct address_space_operations squashfs_symlink_aops; | ||
diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h new file mode 100644 index 000000000000..6840da1bf21e --- /dev/null +++ b/fs/squashfs/squashfs_fs.h | |||
@@ -0,0 +1,381 @@ | |||
1 | #ifndef SQUASHFS_FS | ||
2 | #define SQUASHFS_FS | ||
3 | /* | ||
4 | * Squashfs | ||
5 | * | ||
6 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 | ||
7 | * Phillip Lougher <phillip@lougher.demon.co.uk> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License | ||
11 | * as published by the Free Software Foundation; either version 2, | ||
12 | * or (at your option) any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
22 | * | ||
23 | * squashfs_fs.h | ||
24 | */ | ||
25 | |||
26 | #define SQUASHFS_CACHED_FRAGMENTS CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE | ||
27 | #define SQUASHFS_MAJOR 4 | ||
28 | #define SQUASHFS_MINOR 0 | ||
29 | #define SQUASHFS_MAGIC 0x73717368 | ||
30 | #define SQUASHFS_START 0 | ||
31 | |||
32 | /* size of metadata (inode and directory) blocks */ | ||
33 | #define SQUASHFS_METADATA_SIZE 8192 | ||
34 | #define SQUASHFS_METADATA_LOG 13 | ||
35 | |||
36 | /* default size of data blocks */ | ||
37 | #define SQUASHFS_FILE_SIZE 131072 | ||
38 | #define SQUASHFS_FILE_LOG 17 | ||
39 | |||
40 | #define SQUASHFS_FILE_MAX_SIZE 1048576 | ||
41 | #define SQUASHFS_FILE_MAX_LOG 20 | ||
42 | |||
43 | /* Max number of uids and gids */ | ||
44 | #define SQUASHFS_IDS 65536 | ||
45 | |||
46 | /* Max length of filename (not 255) */ | ||
47 | #define SQUASHFS_NAME_LEN 256 | ||
48 | |||
49 | #define SQUASHFS_INVALID_FRAG (0xffffffffU) | ||
50 | #define SQUASHFS_INVALID_BLK (-1LL) | ||
51 | |||
52 | /* Filesystem flags */ | ||
53 | #define SQUASHFS_NOI 0 | ||
54 | #define SQUASHFS_NOD 1 | ||
55 | #define SQUASHFS_NOF 3 | ||
56 | #define SQUASHFS_NO_FRAG 4 | ||
57 | #define SQUASHFS_ALWAYS_FRAG 5 | ||
58 | #define SQUASHFS_DUPLICATE 6 | ||
59 | #define SQUASHFS_EXPORT 7 | ||
60 | |||
61 | #define SQUASHFS_BIT(flag, bit) ((flag >> bit) & 1) | ||
62 | |||
63 | #define SQUASHFS_UNCOMPRESSED_INODES(flags) SQUASHFS_BIT(flags, \ | ||
64 | SQUASHFS_NOI) | ||
65 | |||
66 | #define SQUASHFS_UNCOMPRESSED_DATA(flags) SQUASHFS_BIT(flags, \ | ||
67 | SQUASHFS_NOD) | ||
68 | |||
69 | #define SQUASHFS_UNCOMPRESSED_FRAGMENTS(flags) SQUASHFS_BIT(flags, \ | ||
70 | SQUASHFS_NOF) | ||
71 | |||
72 | #define SQUASHFS_NO_FRAGMENTS(flags) SQUASHFS_BIT(flags, \ | ||
73 | SQUASHFS_NO_FRAG) | ||
74 | |||
75 | #define SQUASHFS_ALWAYS_FRAGMENTS(flags) SQUASHFS_BIT(flags, \ | ||
76 | SQUASHFS_ALWAYS_FRAG) | ||
77 | |||
78 | #define SQUASHFS_DUPLICATES(flags) SQUASHFS_BIT(flags, \ | ||
79 | SQUASHFS_DUPLICATE) | ||
80 | |||
81 | #define SQUASHFS_EXPORTABLE(flags) SQUASHFS_BIT(flags, \ | ||
82 | SQUASHFS_EXPORT) | ||
83 | |||
84 | /* Max number of types and file types */ | ||
85 | #define SQUASHFS_DIR_TYPE 1 | ||
86 | #define SQUASHFS_REG_TYPE 2 | ||
87 | #define SQUASHFS_SYMLINK_TYPE 3 | ||
88 | #define SQUASHFS_BLKDEV_TYPE 4 | ||
89 | #define SQUASHFS_CHRDEV_TYPE 5 | ||
90 | #define SQUASHFS_FIFO_TYPE 6 | ||
91 | #define SQUASHFS_SOCKET_TYPE 7 | ||
92 | #define SQUASHFS_LDIR_TYPE 8 | ||
93 | #define SQUASHFS_LREG_TYPE 9 | ||
94 | #define SQUASHFS_LSYMLINK_TYPE 10 | ||
95 | #define SQUASHFS_LBLKDEV_TYPE 11 | ||
96 | #define SQUASHFS_LCHRDEV_TYPE 12 | ||
97 | #define SQUASHFS_LFIFO_TYPE 13 | ||
98 | #define SQUASHFS_LSOCKET_TYPE 14 | ||
99 | |||
100 | /* Flag whether block is compressed or uncompressed, bit is set if block is | ||
101 | * uncompressed */ | ||
102 | #define SQUASHFS_COMPRESSED_BIT (1 << 15) | ||
103 | |||
104 | #define SQUASHFS_COMPRESSED_SIZE(B) (((B) & ~SQUASHFS_COMPRESSED_BIT) ? \ | ||
105 | (B) & ~SQUASHFS_COMPRESSED_BIT : SQUASHFS_COMPRESSED_BIT) | ||
106 | |||
107 | #define SQUASHFS_COMPRESSED(B) (!((B) & SQUASHFS_COMPRESSED_BIT)) | ||
108 | |||
109 | #define SQUASHFS_COMPRESSED_BIT_BLOCK (1 << 24) | ||
110 | |||
111 | #define SQUASHFS_COMPRESSED_SIZE_BLOCK(B) ((B) & \ | ||
112 | ~SQUASHFS_COMPRESSED_BIT_BLOCK) | ||
113 | |||
114 | #define SQUASHFS_COMPRESSED_BLOCK(B) (!((B) & SQUASHFS_COMPRESSED_BIT_BLOCK)) | ||
115 | |||
116 | /* | ||
117 | * Inode number ops. Inodes consist of a compressed block number, and an | ||
118 | * uncompressed offset within that block | ||
119 | */ | ||
120 | #define SQUASHFS_INODE_BLK(A) ((unsigned int) ((A) >> 16)) | ||
121 | |||
122 | #define SQUASHFS_INODE_OFFSET(A) ((unsigned int) ((A) & 0xffff)) | ||
123 | |||
124 | #define SQUASHFS_MKINODE(A, B) ((long long)(((long long) (A)\ | ||
125 | << 16) + (B))) | ||
126 | |||
127 | /* Translate between VFS mode and squashfs mode */ | ||
128 | #define SQUASHFS_MODE(A) ((A) & 0xfff) | ||
129 | |||
130 | /* fragment and fragment table defines */ | ||
131 | #define SQUASHFS_FRAGMENT_BYTES(A) \ | ||
132 | ((A) * sizeof(struct squashfs_fragment_entry)) | ||
133 | |||
134 | #define SQUASHFS_FRAGMENT_INDEX(A) (SQUASHFS_FRAGMENT_BYTES(A) / \ | ||
135 | SQUASHFS_METADATA_SIZE) | ||
136 | |||
137 | #define SQUASHFS_FRAGMENT_INDEX_OFFSET(A) (SQUASHFS_FRAGMENT_BYTES(A) % \ | ||
138 | SQUASHFS_METADATA_SIZE) | ||
139 | |||
140 | #define SQUASHFS_FRAGMENT_INDEXES(A) ((SQUASHFS_FRAGMENT_BYTES(A) + \ | ||
141 | SQUASHFS_METADATA_SIZE - 1) / \ | ||
142 | SQUASHFS_METADATA_SIZE) | ||
143 | |||
144 | #define SQUASHFS_FRAGMENT_INDEX_BYTES(A) (SQUASHFS_FRAGMENT_INDEXES(A) *\ | ||
145 | sizeof(u64)) | ||
146 | |||
147 | /* inode lookup table defines */ | ||
148 | #define SQUASHFS_LOOKUP_BYTES(A) ((A) * sizeof(u64)) | ||
149 | |||
150 | #define SQUASHFS_LOOKUP_BLOCK(A) (SQUASHFS_LOOKUP_BYTES(A) / \ | ||
151 | SQUASHFS_METADATA_SIZE) | ||
152 | |||
153 | #define SQUASHFS_LOOKUP_BLOCK_OFFSET(A) (SQUASHFS_LOOKUP_BYTES(A) % \ | ||
154 | SQUASHFS_METADATA_SIZE) | ||
155 | |||
156 | #define SQUASHFS_LOOKUP_BLOCKS(A) ((SQUASHFS_LOOKUP_BYTES(A) + \ | ||
157 | SQUASHFS_METADATA_SIZE - 1) / \ | ||
158 | SQUASHFS_METADATA_SIZE) | ||
159 | |||
160 | #define SQUASHFS_LOOKUP_BLOCK_BYTES(A) (SQUASHFS_LOOKUP_BLOCKS(A) *\ | ||
161 | sizeof(u64)) | ||
162 | |||
163 | /* uid/gid lookup table defines */ | ||
164 | #define SQUASHFS_ID_BYTES(A) ((A) * sizeof(unsigned int)) | ||
165 | |||
166 | #define SQUASHFS_ID_BLOCK(A) (SQUASHFS_ID_BYTES(A) / \ | ||
167 | SQUASHFS_METADATA_SIZE) | ||
168 | |||
169 | #define SQUASHFS_ID_BLOCK_OFFSET(A) (SQUASHFS_ID_BYTES(A) % \ | ||
170 | SQUASHFS_METADATA_SIZE) | ||
171 | |||
172 | #define SQUASHFS_ID_BLOCKS(A) ((SQUASHFS_ID_BYTES(A) + \ | ||
173 | SQUASHFS_METADATA_SIZE - 1) / \ | ||
174 | SQUASHFS_METADATA_SIZE) | ||
175 | |||
176 | #define SQUASHFS_ID_BLOCK_BYTES(A) (SQUASHFS_ID_BLOCKS(A) *\ | ||
177 | sizeof(u64)) | ||
178 | |||
179 | /* cached data constants for filesystem */ | ||
180 | #define SQUASHFS_CACHED_BLKS 8 | ||
181 | |||
182 | #define SQUASHFS_MAX_FILE_SIZE_LOG 64 | ||
183 | |||
184 | #define SQUASHFS_MAX_FILE_SIZE (1LL << \ | ||
185 | (SQUASHFS_MAX_FILE_SIZE_LOG - 2)) | ||
186 | |||
187 | #define SQUASHFS_MARKER_BYTE 0xff | ||
188 | |||
189 | /* meta index cache */ | ||
190 | #define SQUASHFS_META_INDEXES (SQUASHFS_METADATA_SIZE / sizeof(unsigned int)) | ||
191 | #define SQUASHFS_META_ENTRIES 127 | ||
192 | #define SQUASHFS_META_SLOTS 8 | ||
193 | |||
194 | struct meta_entry { | ||
195 | u64 data_block; | ||
196 | unsigned int index_block; | ||
197 | unsigned short offset; | ||
198 | unsigned short pad; | ||
199 | }; | ||
200 | |||
201 | struct meta_index { | ||
202 | unsigned int inode_number; | ||
203 | unsigned int offset; | ||
204 | unsigned short entries; | ||
205 | unsigned short skip; | ||
206 | unsigned short locked; | ||
207 | unsigned short pad; | ||
208 | struct meta_entry meta_entry[SQUASHFS_META_ENTRIES]; | ||
209 | }; | ||
210 | |||
211 | |||
212 | /* | ||
213 | * definitions for structures on disk | ||
214 | */ | ||
215 | #define ZLIB_COMPRESSION 1 | ||
216 | |||
217 | struct squashfs_super_block { | ||
218 | __le32 s_magic; | ||
219 | __le32 inodes; | ||
220 | __le32 mkfs_time; | ||
221 | __le32 block_size; | ||
222 | __le32 fragments; | ||
223 | __le16 compression; | ||
224 | __le16 block_log; | ||
225 | __le16 flags; | ||
226 | __le16 no_ids; | ||
227 | __le16 s_major; | ||
228 | __le16 s_minor; | ||
229 | __le64 root_inode; | ||
230 | __le64 bytes_used; | ||
231 | __le64 id_table_start; | ||
232 | __le64 xattr_table_start; | ||
233 | __le64 inode_table_start; | ||
234 | __le64 directory_table_start; | ||
235 | __le64 fragment_table_start; | ||
236 | __le64 lookup_table_start; | ||
237 | }; | ||
238 | |||
239 | struct squashfs_dir_index { | ||
240 | __le32 index; | ||
241 | __le32 start_block; | ||
242 | __le32 size; | ||
243 | unsigned char name[0]; | ||
244 | }; | ||
245 | |||
246 | struct squashfs_base_inode { | ||
247 | __le16 inode_type; | ||
248 | __le16 mode; | ||
249 | __le16 uid; | ||
250 | __le16 guid; | ||
251 | __le32 mtime; | ||
252 | __le32 inode_number; | ||
253 | }; | ||
254 | |||
255 | struct squashfs_ipc_inode { | ||
256 | __le16 inode_type; | ||
257 | __le16 mode; | ||
258 | __le16 uid; | ||
259 | __le16 guid; | ||
260 | __le32 mtime; | ||
261 | __le32 inode_number; | ||
262 | __le32 nlink; | ||
263 | }; | ||
264 | |||
265 | struct squashfs_dev_inode { | ||
266 | __le16 inode_type; | ||
267 | __le16 mode; | ||
268 | __le16 uid; | ||
269 | __le16 guid; | ||
270 | __le32 mtime; | ||
271 | __le32 inode_number; | ||
272 | __le32 nlink; | ||
273 | __le32 rdev; | ||
274 | }; | ||
275 | |||
276 | struct squashfs_symlink_inode { | ||
277 | __le16 inode_type; | ||
278 | __le16 mode; | ||
279 | __le16 uid; | ||
280 | __le16 guid; | ||
281 | __le32 mtime; | ||
282 | __le32 inode_number; | ||
283 | __le32 nlink; | ||
284 | __le32 symlink_size; | ||
285 | char symlink[0]; | ||
286 | }; | ||
287 | |||
288 | struct squashfs_reg_inode { | ||
289 | __le16 inode_type; | ||
290 | __le16 mode; | ||
291 | __le16 uid; | ||
292 | __le16 guid; | ||
293 | __le32 mtime; | ||
294 | __le32 inode_number; | ||
295 | __le32 start_block; | ||
296 | __le32 fragment; | ||
297 | __le32 offset; | ||
298 | __le32 file_size; | ||
299 | __le16 block_list[0]; | ||
300 | }; | ||
301 | |||
302 | struct squashfs_lreg_inode { | ||
303 | __le16 inode_type; | ||
304 | __le16 mode; | ||
305 | __le16 uid; | ||
306 | __le16 guid; | ||
307 | __le32 mtime; | ||
308 | __le32 inode_number; | ||
309 | __le64 start_block; | ||
310 | __le64 file_size; | ||
311 | __le64 sparse; | ||
312 | __le32 nlink; | ||
313 | __le32 fragment; | ||
314 | __le32 offset; | ||
315 | __le32 xattr; | ||
316 | __le16 block_list[0]; | ||
317 | }; | ||
318 | |||
319 | struct squashfs_dir_inode { | ||
320 | __le16 inode_type; | ||
321 | __le16 mode; | ||
322 | __le16 uid; | ||
323 | __le16 guid; | ||
324 | __le32 mtime; | ||
325 | __le32 inode_number; | ||
326 | __le32 start_block; | ||
327 | __le32 nlink; | ||
328 | __le16 file_size; | ||
329 | __le16 offset; | ||
330 | __le32 parent_inode; | ||
331 | }; | ||
332 | |||
333 | struct squashfs_ldir_inode { | ||
334 | __le16 inode_type; | ||
335 | __le16 mode; | ||
336 | __le16 uid; | ||
337 | __le16 guid; | ||
338 | __le32 mtime; | ||
339 | __le32 inode_number; | ||
340 | __le32 nlink; | ||
341 | __le32 file_size; | ||
342 | __le32 start_block; | ||
343 | __le32 parent_inode; | ||
344 | __le16 i_count; | ||
345 | __le16 offset; | ||
346 | __le32 xattr; | ||
347 | struct squashfs_dir_index index[0]; | ||
348 | }; | ||
349 | |||
350 | union squashfs_inode { | ||
351 | struct squashfs_base_inode base; | ||
352 | struct squashfs_dev_inode dev; | ||
353 | struct squashfs_symlink_inode symlink; | ||
354 | struct squashfs_reg_inode reg; | ||
355 | struct squashfs_lreg_inode lreg; | ||
356 | struct squashfs_dir_inode dir; | ||
357 | struct squashfs_ldir_inode ldir; | ||
358 | struct squashfs_ipc_inode ipc; | ||
359 | }; | ||
360 | |||
361 | struct squashfs_dir_entry { | ||
362 | __le16 offset; | ||
363 | __le16 inode_number; | ||
364 | __le16 type; | ||
365 | __le16 size; | ||
366 | char name[0]; | ||
367 | }; | ||
368 | |||
369 | struct squashfs_dir_header { | ||
370 | __le32 count; | ||
371 | __le32 start_block; | ||
372 | __le32 inode_number; | ||
373 | }; | ||
374 | |||
375 | struct squashfs_fragment_entry { | ||
376 | __le64 start_block; | ||
377 | __le32 size; | ||
378 | unsigned int unused; | ||
379 | }; | ||
380 | |||
381 | #endif | ||
diff --git a/fs/squashfs/squashfs_fs_i.h b/fs/squashfs/squashfs_fs_i.h new file mode 100644 index 000000000000..fbfca30c0c68 --- /dev/null +++ b/fs/squashfs/squashfs_fs_i.h | |||
@@ -0,0 +1,45 @@ | |||
1 | #ifndef SQUASHFS_FS_I | ||
2 | #define SQUASHFS_FS_I | ||
3 | /* | ||
4 | * Squashfs | ||
5 | * | ||
6 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 | ||
7 | * Phillip Lougher <phillip@lougher.demon.co.uk> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License | ||
11 | * as published by the Free Software Foundation; either version 2, | ||
12 | * or (at your option) any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
22 | * | ||
23 | * squashfs_fs_i.h | ||
24 | */ | ||
25 | |||
26 | struct squashfs_inode_info { | ||
27 | u64 start; | ||
28 | int offset; | ||
29 | union { | ||
30 | struct { | ||
31 | u64 fragment_block; | ||
32 | int fragment_size; | ||
33 | int fragment_offset; | ||
34 | u64 block_list_start; | ||
35 | }; | ||
36 | struct { | ||
37 | u64 dir_idx_start; | ||
38 | int dir_idx_offset; | ||
39 | int dir_idx_cnt; | ||
40 | int parent; | ||
41 | }; | ||
42 | }; | ||
43 | struct inode vfs_inode; | ||
44 | }; | ||
45 | #endif | ||
diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h new file mode 100644 index 000000000000..c8c65614dd1c --- /dev/null +++ b/fs/squashfs/squashfs_fs_sb.h | |||
@@ -0,0 +1,76 @@ | |||
1 | #ifndef SQUASHFS_FS_SB | ||
2 | #define SQUASHFS_FS_SB | ||
3 | /* | ||
4 | * Squashfs | ||
5 | * | ||
6 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 | ||
7 | * Phillip Lougher <phillip@lougher.demon.co.uk> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License | ||
11 | * as published by the Free Software Foundation; either version 2, | ||
12 | * or (at your option) any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
22 | * | ||
23 | * squashfs_fs_sb.h | ||
24 | */ | ||
25 | |||
26 | #include "squashfs_fs.h" | ||
27 | |||
28 | struct squashfs_cache { | ||
29 | char *name; | ||
30 | int entries; | ||
31 | int next_blk; | ||
32 | int num_waiters; | ||
33 | int unused; | ||
34 | int block_size; | ||
35 | int pages; | ||
36 | spinlock_t lock; | ||
37 | wait_queue_head_t wait_queue; | ||
38 | struct squashfs_cache_entry *entry; | ||
39 | }; | ||
40 | |||
41 | struct squashfs_cache_entry { | ||
42 | u64 block; | ||
43 | int length; | ||
44 | int refcount; | ||
45 | u64 next_index; | ||
46 | int pending; | ||
47 | int error; | ||
48 | int num_waiters; | ||
49 | wait_queue_head_t wait_queue; | ||
50 | struct squashfs_cache *cache; | ||
51 | void **data; | ||
52 | }; | ||
53 | |||
54 | struct squashfs_sb_info { | ||
55 | int devblksize; | ||
56 | int devblksize_log2; | ||
57 | struct squashfs_cache *block_cache; | ||
58 | struct squashfs_cache *fragment_cache; | ||
59 | struct squashfs_cache *read_page; | ||
60 | int next_meta_index; | ||
61 | __le64 *id_table; | ||
62 | __le64 *fragment_index; | ||
63 | unsigned int *fragment_index_2; | ||
64 | struct mutex read_data_mutex; | ||
65 | struct mutex meta_index_mutex; | ||
66 | struct meta_index *meta_index; | ||
67 | z_stream stream; | ||
68 | __le64 *inode_lookup_table; | ||
69 | u64 inode_table; | ||
70 | u64 directory_table; | ||
71 | unsigned int block_size; | ||
72 | unsigned short block_log; | ||
73 | long long bytes_used; | ||
74 | unsigned int inodes; | ||
75 | }; | ||
76 | #endif | ||
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c new file mode 100644 index 000000000000..a0466d7467b2 --- /dev/null +++ b/fs/squashfs/super.c | |||
@@ -0,0 +1,440 @@ | |||
1 | /* | ||
2 | * Squashfs - a compressed read only filesystem for Linux | ||
3 | * | ||
4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 | ||
5 | * Phillip Lougher <phillip@lougher.demon.co.uk> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version 2, | ||
10 | * or (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
20 | * | ||
21 | * super.c | ||
22 | */ | ||
23 | |||
24 | /* | ||
25 | * This file implements code to read the superblock, read and initialise | ||
26 | * in-memory structures at mount time, and all the VFS glue code to register | ||
27 | * the filesystem. | ||
28 | */ | ||
29 | |||
30 | #include <linux/fs.h> | ||
31 | #include <linux/vfs.h> | ||
32 | #include <linux/slab.h> | ||
33 | #include <linux/mutex.h> | ||
34 | #include <linux/pagemap.h> | ||
35 | #include <linux/init.h> | ||
36 | #include <linux/module.h> | ||
37 | #include <linux/zlib.h> | ||
38 | |||
39 | #include "squashfs_fs.h" | ||
40 | #include "squashfs_fs_sb.h" | ||
41 | #include "squashfs_fs_i.h" | ||
42 | #include "squashfs.h" | ||
43 | |||
44 | static struct file_system_type squashfs_fs_type; | ||
45 | static struct super_operations squashfs_super_ops; | ||
46 | |||
47 | static int supported_squashfs_filesystem(short major, short minor, short comp) | ||
48 | { | ||
49 | if (major < SQUASHFS_MAJOR) { | ||
50 | ERROR("Major/Minor mismatch, older Squashfs %d.%d " | ||
51 | "filesystems are unsupported\n", major, minor); | ||
52 | return -EINVAL; | ||
53 | } else if (major > SQUASHFS_MAJOR || minor > SQUASHFS_MINOR) { | ||
54 | ERROR("Major/Minor mismatch, trying to mount newer " | ||
55 | "%d.%d filesystem\n", major, minor); | ||
56 | ERROR("Please update your kernel\n"); | ||
57 | return -EINVAL; | ||
58 | } | ||
59 | |||
60 | if (comp != ZLIB_COMPRESSION) | ||
61 | return -EINVAL; | ||
62 | |||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | |||
67 | static int squashfs_fill_super(struct super_block *sb, void *data, int silent) | ||
68 | { | ||
69 | struct squashfs_sb_info *msblk; | ||
70 | struct squashfs_super_block *sblk = NULL; | ||
71 | char b[BDEVNAME_SIZE]; | ||
72 | struct inode *root; | ||
73 | long long root_inode; | ||
74 | unsigned short flags; | ||
75 | unsigned int fragments; | ||
76 | u64 lookup_table_start; | ||
77 | int err; | ||
78 | |||
79 | TRACE("Entered squashfs_fill_superblock\n"); | ||
80 | |||
81 | sb->s_fs_info = kzalloc(sizeof(*msblk), GFP_KERNEL); | ||
82 | if (sb->s_fs_info == NULL) { | ||
83 | ERROR("Failed to allocate squashfs_sb_info\n"); | ||
84 | return -ENOMEM; | ||
85 | } | ||
86 | msblk = sb->s_fs_info; | ||
87 | |||
88 | msblk->stream.workspace = kmalloc(zlib_inflate_workspacesize(), | ||
89 | GFP_KERNEL); | ||
90 | if (msblk->stream.workspace == NULL) { | ||
91 | ERROR("Failed to allocate zlib workspace\n"); | ||
92 | goto failure; | ||
93 | } | ||
94 | |||
95 | sblk = kzalloc(sizeof(*sblk), GFP_KERNEL); | ||
96 | if (sblk == NULL) { | ||
97 | ERROR("Failed to allocate squashfs_super_block\n"); | ||
98 | goto failure; | ||
99 | } | ||
100 | |||
101 | msblk->devblksize = sb_min_blocksize(sb, BLOCK_SIZE); | ||
102 | msblk->devblksize_log2 = ffz(~msblk->devblksize); | ||
103 | |||
104 | mutex_init(&msblk->read_data_mutex); | ||
105 | mutex_init(&msblk->meta_index_mutex); | ||
106 | |||
107 | /* | ||
108 | * msblk->bytes_used is checked in squashfs_read_table to ensure reads | ||
109 | * are not beyond filesystem end. But as we're using | ||
110 | * squashfs_read_table here to read the superblock (including the value | ||
111 | * of bytes_used) we need to set it to an initial sensible dummy value | ||
112 | */ | ||
113 | msblk->bytes_used = sizeof(*sblk); | ||
114 | err = squashfs_read_table(sb, sblk, SQUASHFS_START, sizeof(*sblk)); | ||
115 | |||
116 | if (err < 0) { | ||
117 | ERROR("unable to read squashfs_super_block\n"); | ||
118 | goto failed_mount; | ||
119 | } | ||
120 | |||
121 | /* Check it is a SQUASHFS superblock */ | ||
122 | sb->s_magic = le32_to_cpu(sblk->s_magic); | ||
123 | if (sb->s_magic != SQUASHFS_MAGIC) { | ||
124 | if (!silent) | ||
125 | ERROR("Can't find a SQUASHFS superblock on %s\n", | ||
126 | bdevname(sb->s_bdev, b)); | ||
127 | err = -EINVAL; | ||
128 | goto failed_mount; | ||
129 | } | ||
130 | |||
131 | /* Check the MAJOR & MINOR versions and compression type */ | ||
132 | err = supported_squashfs_filesystem(le16_to_cpu(sblk->s_major), | ||
133 | le16_to_cpu(sblk->s_minor), | ||
134 | le16_to_cpu(sblk->compression)); | ||
135 | if (err < 0) | ||
136 | goto failed_mount; | ||
137 | |||
138 | err = -EINVAL; | ||
139 | |||
140 | /* | ||
141 | * Check if there's xattrs in the filesystem. These are not | ||
142 | * supported in this version, so warn that they will be ignored. | ||
143 | */ | ||
144 | if (le64_to_cpu(sblk->xattr_table_start) != SQUASHFS_INVALID_BLK) | ||
145 | ERROR("Xattrs in filesystem, these will be ignored\n"); | ||
146 | |||
147 | /* Check the filesystem does not extend beyond the end of the | ||
148 | block device */ | ||
149 | msblk->bytes_used = le64_to_cpu(sblk->bytes_used); | ||
150 | if (msblk->bytes_used < 0 || msblk->bytes_used > | ||
151 | i_size_read(sb->s_bdev->bd_inode)) | ||
152 | goto failed_mount; | ||
153 | |||
154 | /* Check block size for sanity */ | ||
155 | msblk->block_size = le32_to_cpu(sblk->block_size); | ||
156 | if (msblk->block_size > SQUASHFS_FILE_MAX_SIZE) | ||
157 | goto failed_mount; | ||
158 | |||
159 | msblk->block_log = le16_to_cpu(sblk->block_log); | ||
160 | if (msblk->block_log > SQUASHFS_FILE_MAX_LOG) | ||
161 | goto failed_mount; | ||
162 | |||
163 | /* Check the root inode for sanity */ | ||
164 | root_inode = le64_to_cpu(sblk->root_inode); | ||
165 | if (SQUASHFS_INODE_OFFSET(root_inode) > SQUASHFS_METADATA_SIZE) | ||
166 | goto failed_mount; | ||
167 | |||
168 | msblk->inode_table = le64_to_cpu(sblk->inode_table_start); | ||
169 | msblk->directory_table = le64_to_cpu(sblk->directory_table_start); | ||
170 | msblk->inodes = le32_to_cpu(sblk->inodes); | ||
171 | flags = le16_to_cpu(sblk->flags); | ||
172 | |||
173 | TRACE("Found valid superblock on %s\n", bdevname(sb->s_bdev, b)); | ||
174 | TRACE("Inodes are %scompressed\n", SQUASHFS_UNCOMPRESSED_INODES(flags) | ||
175 | ? "un" : ""); | ||
176 | TRACE("Data is %scompressed\n", SQUASHFS_UNCOMPRESSED_DATA(flags) | ||
177 | ? "un" : ""); | ||
178 | TRACE("Filesystem size %lld bytes\n", msblk->bytes_used); | ||
179 | TRACE("Block size %d\n", msblk->block_size); | ||
180 | TRACE("Number of inodes %d\n", msblk->inodes); | ||
181 | TRACE("Number of fragments %d\n", le32_to_cpu(sblk->fragments)); | ||
182 | TRACE("Number of ids %d\n", le16_to_cpu(sblk->no_ids)); | ||
183 | TRACE("sblk->inode_table_start %llx\n", msblk->inode_table); | ||
184 | TRACE("sblk->directory_table_start %llx\n", msblk->directory_table); | ||
185 | TRACE("sblk->fragment_table_start %llx\n", | ||
186 | (u64) le64_to_cpu(sblk->fragment_table_start)); | ||
187 | TRACE("sblk->id_table_start %llx\n", | ||
188 | (u64) le64_to_cpu(sblk->id_table_start)); | ||
189 | |||
190 | sb->s_maxbytes = MAX_LFS_FILESIZE; | ||
191 | sb->s_flags |= MS_RDONLY; | ||
192 | sb->s_op = &squashfs_super_ops; | ||
193 | |||
194 | err = -ENOMEM; | ||
195 | |||
196 | msblk->block_cache = squashfs_cache_init("metadata", | ||
197 | SQUASHFS_CACHED_BLKS, SQUASHFS_METADATA_SIZE); | ||
198 | if (msblk->block_cache == NULL) | ||
199 | goto failed_mount; | ||
200 | |||
201 | /* Allocate read_page block */ | ||
202 | msblk->read_page = squashfs_cache_init("data", 1, msblk->block_size); | ||
203 | if (msblk->read_page == NULL) { | ||
204 | ERROR("Failed to allocate read_page block\n"); | ||
205 | goto failed_mount; | ||
206 | } | ||
207 | |||
208 | /* Allocate and read id index table */ | ||
209 | msblk->id_table = squashfs_read_id_index_table(sb, | ||
210 | le64_to_cpu(sblk->id_table_start), le16_to_cpu(sblk->no_ids)); | ||
211 | if (IS_ERR(msblk->id_table)) { | ||
212 | err = PTR_ERR(msblk->id_table); | ||
213 | msblk->id_table = NULL; | ||
214 | goto failed_mount; | ||
215 | } | ||
216 | |||
217 | fragments = le32_to_cpu(sblk->fragments); | ||
218 | if (fragments == 0) | ||
219 | goto allocate_lookup_table; | ||
220 | |||
221 | msblk->fragment_cache = squashfs_cache_init("fragment", | ||
222 | SQUASHFS_CACHED_FRAGMENTS, msblk->block_size); | ||
223 | if (msblk->fragment_cache == NULL) { | ||
224 | err = -ENOMEM; | ||
225 | goto failed_mount; | ||
226 | } | ||
227 | |||
228 | /* Allocate and read fragment index table */ | ||
229 | msblk->fragment_index = squashfs_read_fragment_index_table(sb, | ||
230 | le64_to_cpu(sblk->fragment_table_start), fragments); | ||
231 | if (IS_ERR(msblk->fragment_index)) { | ||
232 | err = PTR_ERR(msblk->fragment_index); | ||
233 | msblk->fragment_index = NULL; | ||
234 | goto failed_mount; | ||
235 | } | ||
236 | |||
237 | allocate_lookup_table: | ||
238 | lookup_table_start = le64_to_cpu(sblk->lookup_table_start); | ||
239 | if (lookup_table_start == SQUASHFS_INVALID_BLK) | ||
240 | goto allocate_root; | ||
241 | |||
242 | /* Allocate and read inode lookup table */ | ||
243 | msblk->inode_lookup_table = squashfs_read_inode_lookup_table(sb, | ||
244 | lookup_table_start, msblk->inodes); | ||
245 | if (IS_ERR(msblk->inode_lookup_table)) { | ||
246 | err = PTR_ERR(msblk->inode_lookup_table); | ||
247 | msblk->inode_lookup_table = NULL; | ||
248 | goto failed_mount; | ||
249 | } | ||
250 | |||
251 | sb->s_export_op = &squashfs_export_ops; | ||
252 | |||
253 | allocate_root: | ||
254 | root = new_inode(sb); | ||
255 | if (!root) { | ||
256 | err = -ENOMEM; | ||
257 | goto failed_mount; | ||
258 | } | ||
259 | |||
260 | err = squashfs_read_inode(root, root_inode); | ||
261 | if (err) { | ||
262 | iget_failed(root); | ||
263 | goto failed_mount; | ||
264 | } | ||
265 | insert_inode_hash(root); | ||
266 | |||
267 | sb->s_root = d_alloc_root(root); | ||
268 | if (sb->s_root == NULL) { | ||
269 | ERROR("Root inode create failed\n"); | ||
270 | err = -ENOMEM; | ||
271 | iput(root); | ||
272 | goto failed_mount; | ||
273 | } | ||
274 | |||
275 | TRACE("Leaving squashfs_fill_super\n"); | ||
276 | kfree(sblk); | ||
277 | return 0; | ||
278 | |||
279 | failed_mount: | ||
280 | squashfs_cache_delete(msblk->block_cache); | ||
281 | squashfs_cache_delete(msblk->fragment_cache); | ||
282 | squashfs_cache_delete(msblk->read_page); | ||
283 | kfree(msblk->inode_lookup_table); | ||
284 | kfree(msblk->fragment_index); | ||
285 | kfree(msblk->id_table); | ||
286 | kfree(msblk->stream.workspace); | ||
287 | kfree(sb->s_fs_info); | ||
288 | sb->s_fs_info = NULL; | ||
289 | kfree(sblk); | ||
290 | return err; | ||
291 | |||
292 | failure: | ||
293 | kfree(msblk->stream.workspace); | ||
294 | kfree(sb->s_fs_info); | ||
295 | sb->s_fs_info = NULL; | ||
296 | return -ENOMEM; | ||
297 | } | ||
298 | |||
299 | |||
300 | static int squashfs_statfs(struct dentry *dentry, struct kstatfs *buf) | ||
301 | { | ||
302 | struct squashfs_sb_info *msblk = dentry->d_sb->s_fs_info; | ||
303 | |||
304 | TRACE("Entered squashfs_statfs\n"); | ||
305 | |||
306 | buf->f_type = SQUASHFS_MAGIC; | ||
307 | buf->f_bsize = msblk->block_size; | ||
308 | buf->f_blocks = ((msblk->bytes_used - 1) >> msblk->block_log) + 1; | ||
309 | buf->f_bfree = buf->f_bavail = 0; | ||
310 | buf->f_files = msblk->inodes; | ||
311 | buf->f_ffree = 0; | ||
312 | buf->f_namelen = SQUASHFS_NAME_LEN; | ||
313 | |||
314 | return 0; | ||
315 | } | ||
316 | |||
317 | |||
318 | static int squashfs_remount(struct super_block *sb, int *flags, char *data) | ||
319 | { | ||
320 | *flags |= MS_RDONLY; | ||
321 | return 0; | ||
322 | } | ||
323 | |||
324 | |||
325 | static void squashfs_put_super(struct super_block *sb) | ||
326 | { | ||
327 | if (sb->s_fs_info) { | ||
328 | struct squashfs_sb_info *sbi = sb->s_fs_info; | ||
329 | squashfs_cache_delete(sbi->block_cache); | ||
330 | squashfs_cache_delete(sbi->fragment_cache); | ||
331 | squashfs_cache_delete(sbi->read_page); | ||
332 | kfree(sbi->id_table); | ||
333 | kfree(sbi->fragment_index); | ||
334 | kfree(sbi->meta_index); | ||
335 | kfree(sbi->stream.workspace); | ||
336 | kfree(sb->s_fs_info); | ||
337 | sb->s_fs_info = NULL; | ||
338 | } | ||
339 | } | ||
340 | |||
341 | |||
342 | static int squashfs_get_sb(struct file_system_type *fs_type, int flags, | ||
343 | const char *dev_name, void *data, | ||
344 | struct vfsmount *mnt) | ||
345 | { | ||
346 | return get_sb_bdev(fs_type, flags, dev_name, data, squashfs_fill_super, | ||
347 | mnt); | ||
348 | } | ||
349 | |||
350 | |||
351 | static struct kmem_cache *squashfs_inode_cachep; | ||
352 | |||
353 | |||
354 | static void init_once(void *foo) | ||
355 | { | ||
356 | struct squashfs_inode_info *ei = foo; | ||
357 | |||
358 | inode_init_once(&ei->vfs_inode); | ||
359 | } | ||
360 | |||
361 | |||
362 | static int __init init_inodecache(void) | ||
363 | { | ||
364 | squashfs_inode_cachep = kmem_cache_create("squashfs_inode_cache", | ||
365 | sizeof(struct squashfs_inode_info), 0, | ||
366 | SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT, init_once); | ||
367 | |||
368 | return squashfs_inode_cachep ? 0 : -ENOMEM; | ||
369 | } | ||
370 | |||
371 | |||
372 | static void destroy_inodecache(void) | ||
373 | { | ||
374 | kmem_cache_destroy(squashfs_inode_cachep); | ||
375 | } | ||
376 | |||
377 | |||
378 | static int __init init_squashfs_fs(void) | ||
379 | { | ||
380 | int err = init_inodecache(); | ||
381 | |||
382 | if (err) | ||
383 | return err; | ||
384 | |||
385 | err = register_filesystem(&squashfs_fs_type); | ||
386 | if (err) { | ||
387 | destroy_inodecache(); | ||
388 | return err; | ||
389 | } | ||
390 | |||
391 | printk(KERN_INFO "squashfs: version 4.0 (2009/01/03) " | ||
392 | "Phillip Lougher\n"); | ||
393 | |||
394 | return 0; | ||
395 | } | ||
396 | |||
397 | |||
398 | static void __exit exit_squashfs_fs(void) | ||
399 | { | ||
400 | unregister_filesystem(&squashfs_fs_type); | ||
401 | destroy_inodecache(); | ||
402 | } | ||
403 | |||
404 | |||
405 | static struct inode *squashfs_alloc_inode(struct super_block *sb) | ||
406 | { | ||
407 | struct squashfs_inode_info *ei = | ||
408 | kmem_cache_alloc(squashfs_inode_cachep, GFP_KERNEL); | ||
409 | |||
410 | return ei ? &ei->vfs_inode : NULL; | ||
411 | } | ||
412 | |||
413 | |||
414 | static void squashfs_destroy_inode(struct inode *inode) | ||
415 | { | ||
416 | kmem_cache_free(squashfs_inode_cachep, squashfs_i(inode)); | ||
417 | } | ||
418 | |||
419 | |||
420 | static struct file_system_type squashfs_fs_type = { | ||
421 | .owner = THIS_MODULE, | ||
422 | .name = "squashfs", | ||
423 | .get_sb = squashfs_get_sb, | ||
424 | .kill_sb = kill_block_super, | ||
425 | .fs_flags = FS_REQUIRES_DEV | ||
426 | }; | ||
427 | |||
428 | static struct super_operations squashfs_super_ops = { | ||
429 | .alloc_inode = squashfs_alloc_inode, | ||
430 | .destroy_inode = squashfs_destroy_inode, | ||
431 | .statfs = squashfs_statfs, | ||
432 | .put_super = squashfs_put_super, | ||
433 | .remount_fs = squashfs_remount | ||
434 | }; | ||
435 | |||
436 | module_init(init_squashfs_fs); | ||
437 | module_exit(exit_squashfs_fs); | ||
438 | MODULE_DESCRIPTION("squashfs 4.0, a compressed read-only filesystem"); | ||
439 | MODULE_AUTHOR("Phillip Lougher <phillip@lougher.demon.co.uk>"); | ||
440 | MODULE_LICENSE("GPL"); | ||
diff --git a/fs/squashfs/symlink.c b/fs/squashfs/symlink.c new file mode 100644 index 000000000000..83d87880aac8 --- /dev/null +++ b/fs/squashfs/symlink.c | |||
@@ -0,0 +1,118 @@ | |||
1 | /* | ||
2 | * Squashfs - a compressed read only filesystem for Linux | ||
3 | * | ||
4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 | ||
5 | * Phillip Lougher <phillip@lougher.demon.co.uk> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version 2, | ||
10 | * or (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
20 | * | ||
21 | * symlink.c | ||
22 | */ | ||
23 | |||
24 | /* | ||
25 | * This file implements code to handle symbolic links. | ||
26 | * | ||
27 | * The data contents of symbolic links are stored inside the symbolic | ||
28 | * link inode within the inode table. This allows the normally small symbolic | ||
29 | * link to be compressed as part of the inode table, achieving much greater | ||
30 | * compression than if the symbolic link was compressed individually. | ||
31 | */ | ||
32 | |||
33 | #include <linux/fs.h> | ||
34 | #include <linux/vfs.h> | ||
35 | #include <linux/kernel.h> | ||
36 | #include <linux/slab.h> | ||
37 | #include <linux/string.h> | ||
38 | #include <linux/pagemap.h> | ||
39 | #include <linux/zlib.h> | ||
40 | |||
41 | #include "squashfs_fs.h" | ||
42 | #include "squashfs_fs_sb.h" | ||
43 | #include "squashfs_fs_i.h" | ||
44 | #include "squashfs.h" | ||
45 | |||
46 | static int squashfs_symlink_readpage(struct file *file, struct page *page) | ||
47 | { | ||
48 | struct inode *inode = page->mapping->host; | ||
49 | struct super_block *sb = inode->i_sb; | ||
50 | struct squashfs_sb_info *msblk = sb->s_fs_info; | ||
51 | int index = page->index << PAGE_CACHE_SHIFT; | ||
52 | u64 block = squashfs_i(inode)->start; | ||
53 | int offset = squashfs_i(inode)->offset; | ||
54 | int length = min_t(int, i_size_read(inode) - index, PAGE_CACHE_SIZE); | ||
55 | int bytes, copied; | ||
56 | void *pageaddr; | ||
57 | struct squashfs_cache_entry *entry; | ||
58 | |||
59 | TRACE("Entered squashfs_symlink_readpage, page index %ld, start block " | ||
60 | "%llx, offset %x\n", page->index, block, offset); | ||
61 | |||
62 | /* | ||
63 | * Skip index bytes into symlink metadata. | ||
64 | */ | ||
65 | if (index) { | ||
66 | bytes = squashfs_read_metadata(sb, NULL, &block, &offset, | ||
67 | index); | ||
68 | if (bytes < 0) { | ||
69 | ERROR("Unable to read symlink [%llx:%x]\n", | ||
70 | squashfs_i(inode)->start, | ||
71 | squashfs_i(inode)->offset); | ||
72 | goto error_out; | ||
73 | } | ||
74 | } | ||
75 | |||
76 | /* | ||
77 | * Read length bytes from symlink metadata. Squashfs_read_metadata | ||
78 | * is not used here because it can sleep and we want to use | ||
79 | * kmap_atomic to map the page. Instead call the underlying | ||
80 | * squashfs_cache_get routine. As length bytes may overlap metadata | ||
81 | * blocks, we may need to call squashfs_cache_get multiple times. | ||
82 | */ | ||
83 | for (bytes = 0; bytes < length; offset = 0, bytes += copied) { | ||
84 | entry = squashfs_cache_get(sb, msblk->block_cache, block, 0); | ||
85 | if (entry->error) { | ||
86 | ERROR("Unable to read symlink [%llx:%x]\n", | ||
87 | squashfs_i(inode)->start, | ||
88 | squashfs_i(inode)->offset); | ||
89 | squashfs_cache_put(entry); | ||
90 | goto error_out; | ||
91 | } | ||
92 | |||
93 | pageaddr = kmap_atomic(page, KM_USER0); | ||
94 | copied = squashfs_copy_data(pageaddr + bytes, entry, offset, | ||
95 | length - bytes); | ||
96 | if (copied == length - bytes) | ||
97 | memset(pageaddr + length, 0, PAGE_CACHE_SIZE - length); | ||
98 | else | ||
99 | block = entry->next_index; | ||
100 | kunmap_atomic(pageaddr, KM_USER0); | ||
101 | squashfs_cache_put(entry); | ||
102 | } | ||
103 | |||
104 | flush_dcache_page(page); | ||
105 | SetPageUptodate(page); | ||
106 | unlock_page(page); | ||
107 | return 0; | ||
108 | |||
109 | error_out: | ||
110 | SetPageError(page); | ||
111 | unlock_page(page); | ||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | |||
116 | const struct address_space_operations squashfs_symlink_aops = { | ||
117 | .readpage = squashfs_symlink_readpage | ||
118 | }; | ||
diff --git a/include/asm-frv/mmu.h b/include/asm-frv/mmu.h index 22c03714fb14..86ca0e86e7d2 100644 --- a/include/asm-frv/mmu.h +++ b/include/asm-frv/mmu.h | |||
@@ -22,7 +22,6 @@ typedef struct { | |||
22 | unsigned long dtlb_ptd_mapping; /* [DAMR5] PTD mapping for dtlb cached PGE */ | 22 | unsigned long dtlb_ptd_mapping; /* [DAMR5] PTD mapping for dtlb cached PGE */ |
23 | 23 | ||
24 | #else | 24 | #else |
25 | struct vm_list_struct *vmlist; | ||
26 | unsigned long end_brk; | 25 | unsigned long end_brk; |
27 | 26 | ||
28 | #endif | 27 | #endif |
diff --git a/include/asm-m32r/mmu.h b/include/asm-m32r/mmu.h index d9bd724479cf..150cb92bb666 100644 --- a/include/asm-m32r/mmu.h +++ b/include/asm-m32r/mmu.h | |||
@@ -4,7 +4,6 @@ | |||
4 | #if !defined(CONFIG_MMU) | 4 | #if !defined(CONFIG_MMU) |
5 | 5 | ||
6 | typedef struct { | 6 | typedef struct { |
7 | struct vm_list_struct *vmlist; | ||
8 | unsigned long end_brk; | 7 | unsigned long end_brk; |
9 | } mm_context_t; | 8 | } mm_context_t; |
10 | 9 | ||
diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 1ee9488ca2e4..79ca2da81c87 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h | |||
@@ -31,6 +31,10 @@ struct backlight_device; | |||
31 | struct fb_info; | 31 | struct fb_info; |
32 | 32 | ||
33 | struct backlight_ops { | 33 | struct backlight_ops { |
34 | unsigned int options; | ||
35 | |||
36 | #define BL_CORE_SUSPENDRESUME (1 << 0) | ||
37 | |||
34 | /* Notify the backlight driver some property has changed */ | 38 | /* Notify the backlight driver some property has changed */ |
35 | int (*update_status)(struct backlight_device *); | 39 | int (*update_status)(struct backlight_device *); |
36 | /* Return the current backlight brightness (accounting for power, | 40 | /* Return the current backlight brightness (accounting for power, |
@@ -51,7 +55,19 @@ struct backlight_properties { | |||
51 | modes; 4: full off), see FB_BLANK_XXX */ | 55 | modes; 4: full off), see FB_BLANK_XXX */ |
52 | int power; | 56 | int power; |
53 | /* FB Blanking active? (values as for power) */ | 57 | /* FB Blanking active? (values as for power) */ |
58 | /* Due to be removed, please use (state & BL_CORE_FBBLANK) */ | ||
54 | int fb_blank; | 59 | int fb_blank; |
60 | /* Flags used to signal drivers of state changes */ | ||
61 | /* Upper 4 bits are reserved for driver internal use */ | ||
62 | unsigned int state; | ||
63 | |||
64 | #define BL_CORE_SUSPENDED (1 << 0) /* backlight is suspended */ | ||
65 | #define BL_CORE_FBBLANK (1 << 1) /* backlight is under an fb blank event */ | ||
66 | #define BL_CORE_DRIVER4 (1 << 28) /* reserved for driver specific use */ | ||
67 | #define BL_CORE_DRIVER3 (1 << 29) /* reserved for driver specific use */ | ||
68 | #define BL_CORE_DRIVER2 (1 << 30) /* reserved for driver specific use */ | ||
69 | #define BL_CORE_DRIVER1 (1 << 31) /* reserved for driver specific use */ | ||
70 | |||
55 | }; | 71 | }; |
56 | 72 | ||
57 | struct backlight_device { | 73 | struct backlight_device { |
diff --git a/include/linux/leds-pca9532.h b/include/linux/leds-pca9532.h index 81b4207deb95..96eea90f01a8 100644 --- a/include/linux/leds-pca9532.h +++ b/include/linux/leds-pca9532.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #define __LINUX_PCA9532_H | 15 | #define __LINUX_PCA9532_H |
16 | 16 | ||
17 | #include <linux/leds.h> | 17 | #include <linux/leds.h> |
18 | #include <linux/workqueue.h> | ||
18 | 19 | ||
19 | enum pca9532_state { | 20 | enum pca9532_state { |
20 | PCA9532_OFF = 0x0, | 21 | PCA9532_OFF = 0x0, |
@@ -31,6 +32,7 @@ struct pca9532_led { | |||
31 | struct i2c_client *client; | 32 | struct i2c_client *client; |
32 | char *name; | 33 | char *name; |
33 | struct led_classdev ldev; | 34 | struct led_classdev ldev; |
35 | struct work_struct work; | ||
34 | enum pca9532_type type; | 36 | enum pca9532_type type; |
35 | enum pca9532_state state; | 37 | enum pca9532_state state; |
36 | }; | 38 | }; |
diff --git a/include/linux/leds.h b/include/linux/leds.h index d3a73f5a48c3..24489da701e3 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h | |||
@@ -32,7 +32,10 @@ struct led_classdev { | |||
32 | int brightness; | 32 | int brightness; |
33 | int flags; | 33 | int flags; |
34 | 34 | ||
35 | /* Lower 16 bits reflect status */ | ||
35 | #define LED_SUSPENDED (1 << 0) | 36 | #define LED_SUSPENDED (1 << 0) |
37 | /* Upper 16 bits reflect control information */ | ||
38 | #define LED_CORE_SUSPENDRESUME (1 << 16) | ||
36 | 39 | ||
37 | /* Set LED brightness level */ | 40 | /* Set LED brightness level */ |
38 | /* Must not sleep, use a workqueue if needed */ | 41 | /* Must not sleep, use a workqueue if needed */ |
@@ -62,7 +65,7 @@ struct led_classdev { | |||
62 | 65 | ||
63 | extern int led_classdev_register(struct device *parent, | 66 | extern int led_classdev_register(struct device *parent, |
64 | struct led_classdev *led_cdev); | 67 | struct led_classdev *led_cdev); |
65 | extern void led_classdev_unregister(struct led_classdev *lcd); | 68 | extern void led_classdev_unregister(struct led_classdev *led_cdev); |
66 | extern void led_classdev_suspend(struct led_classdev *led_cdev); | 69 | extern void led_classdev_suspend(struct led_classdev *led_cdev); |
67 | extern void led_classdev_resume(struct led_classdev *led_cdev); | 70 | extern void led_classdev_resume(struct led_classdev *led_cdev); |
68 | 71 | ||
diff --git a/include/linux/mISDNhw.h b/include/linux/mISDNhw.h index e794dfb87504..97ffdc1d3442 100644 --- a/include/linux/mISDNhw.h +++ b/include/linux/mISDNhw.h | |||
@@ -57,20 +57,21 @@ | |||
57 | #define FLG_L2DATA 14 /* channel use L2 DATA primitivs */ | 57 | #define FLG_L2DATA 14 /* channel use L2 DATA primitivs */ |
58 | #define FLG_ORIGIN 15 /* channel is on origin site */ | 58 | #define FLG_ORIGIN 15 /* channel is on origin site */ |
59 | /* channel specific stuff */ | 59 | /* channel specific stuff */ |
60 | #define FLG_FILLEMPTY 16 /* fill fifo on first frame (empty) */ | ||
60 | /* arcofi specific */ | 61 | /* arcofi specific */ |
61 | #define FLG_ARCOFI_TIMER 16 | 62 | #define FLG_ARCOFI_TIMER 17 |
62 | #define FLG_ARCOFI_ERROR 17 | 63 | #define FLG_ARCOFI_ERROR 18 |
63 | /* isar specific */ | 64 | /* isar specific */ |
64 | #define FLG_INITIALIZED 16 | 65 | #define FLG_INITIALIZED 17 |
65 | #define FLG_DLEETX 17 | 66 | #define FLG_DLEETX 18 |
66 | #define FLG_LASTDLE 18 | 67 | #define FLG_LASTDLE 19 |
67 | #define FLG_FIRST 19 | 68 | #define FLG_FIRST 20 |
68 | #define FLG_LASTDATA 20 | 69 | #define FLG_LASTDATA 21 |
69 | #define FLG_NMD_DATA 21 | 70 | #define FLG_NMD_DATA 22 |
70 | #define FLG_FTI_RUN 22 | 71 | #define FLG_FTI_RUN 23 |
71 | #define FLG_LL_OK 23 | 72 | #define FLG_LL_OK 24 |
72 | #define FLG_LL_CONN 24 | 73 | #define FLG_LL_CONN 25 |
73 | #define FLG_DTMFSEND 25 | 74 | #define FLG_DTMFSEND 26 |
74 | 75 | ||
75 | /* workq events */ | 76 | /* workq events */ |
76 | #define FLG_RECVQUEUE 30 | 77 | #define FLG_RECVQUEUE 30 |
@@ -183,6 +184,7 @@ extern void queue_ch_frame(struct mISDNchannel *, u_int, | |||
183 | extern int dchannel_senddata(struct dchannel *, struct sk_buff *); | 184 | extern int dchannel_senddata(struct dchannel *, struct sk_buff *); |
184 | extern int bchannel_senddata(struct bchannel *, struct sk_buff *); | 185 | extern int bchannel_senddata(struct bchannel *, struct sk_buff *); |
185 | extern void recv_Dchannel(struct dchannel *); | 186 | extern void recv_Dchannel(struct dchannel *); |
187 | extern void recv_Echannel(struct dchannel *, struct dchannel *); | ||
186 | extern void recv_Bchannel(struct bchannel *); | 188 | extern void recv_Bchannel(struct bchannel *); |
187 | extern void recv_Dchannel_skb(struct dchannel *, struct sk_buff *); | 189 | extern void recv_Dchannel_skb(struct dchannel *, struct sk_buff *); |
188 | extern void recv_Bchannel_skb(struct bchannel *, struct sk_buff *); | 190 | extern void recv_Bchannel_skb(struct bchannel *, struct sk_buff *); |
diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h index 8f2d60da04e7..557477ac3d5b 100644 --- a/include/linux/mISDNif.h +++ b/include/linux/mISDNif.h | |||
@@ -36,8 +36,8 @@ | |||
36 | * - should be incremented on every checkin | 36 | * - should be incremented on every checkin |
37 | */ | 37 | */ |
38 | #define MISDN_MAJOR_VERSION 1 | 38 | #define MISDN_MAJOR_VERSION 1 |
39 | #define MISDN_MINOR_VERSION 0 | 39 | #define MISDN_MINOR_VERSION 1 |
40 | #define MISDN_RELEASE 19 | 40 | #define MISDN_RELEASE 20 |
41 | 41 | ||
42 | /* primitives for information exchange | 42 | /* primitives for information exchange |
43 | * generell format | 43 | * generell format |
@@ -80,6 +80,7 @@ | |||
80 | #define PH_DEACTIVATE_IND 0x0202 | 80 | #define PH_DEACTIVATE_IND 0x0202 |
81 | #define PH_DEACTIVATE_CNF 0x4202 | 81 | #define PH_DEACTIVATE_CNF 0x4202 |
82 | #define PH_DATA_IND 0x2002 | 82 | #define PH_DATA_IND 0x2002 |
83 | #define PH_DATA_E_IND 0x3002 | ||
83 | #define MPH_ACTIVATE_IND 0x0502 | 84 | #define MPH_ACTIVATE_IND 0x0502 |
84 | #define MPH_DEACTIVATE_IND 0x0602 | 85 | #define MPH_DEACTIVATE_IND 0x0602 |
85 | #define MPH_INFORMATION_IND 0x0702 | 86 | #define MPH_INFORMATION_IND 0x0702 |
@@ -199,6 +200,18 @@ | |||
199 | #define ISDN_P_NT_S0 0x02 | 200 | #define ISDN_P_NT_S0 0x02 |
200 | #define ISDN_P_TE_E1 0x03 | 201 | #define ISDN_P_TE_E1 0x03 |
201 | #define ISDN_P_NT_E1 0x04 | 202 | #define ISDN_P_NT_E1 0x04 |
203 | #define ISDN_P_TE_UP0 0x05 | ||
204 | #define ISDN_P_NT_UP0 0x06 | ||
205 | |||
206 | #define IS_ISDN_P_TE(p) ((p == ISDN_P_TE_S0) || (p == ISDN_P_TE_E1) || \ | ||
207 | (p == ISDN_P_TE_UP0) || (p == ISDN_P_LAPD_TE)) | ||
208 | #define IS_ISDN_P_NT(p) ((p == ISDN_P_NT_S0) || (p == ISDN_P_NT_E1) || \ | ||
209 | (p == ISDN_P_NT_UP0) || (p == ISDN_P_LAPD_NT)) | ||
210 | #define IS_ISDN_P_S0(p) ((p == ISDN_P_TE_S0) || (p == ISDN_P_NT_S0)) | ||
211 | #define IS_ISDN_P_E1(p) ((p == ISDN_P_TE_E1) || (p == ISDN_P_NT_E1)) | ||
212 | #define IS_ISDN_P_UP0(p) ((p == ISDN_P_TE_UP0) || (p == ISDN_P_NT_UP0)) | ||
213 | |||
214 | |||
202 | #define ISDN_P_LAPD_TE 0x10 | 215 | #define ISDN_P_LAPD_TE 0x10 |
203 | #define ISDN_P_LAPD_NT 0x11 | 216 | #define ISDN_P_LAPD_NT 0x11 |
204 | 217 | ||
@@ -255,16 +268,6 @@ struct sockaddr_mISDN { | |||
255 | unsigned char tei; | 268 | unsigned char tei; |
256 | }; | 269 | }; |
257 | 270 | ||
258 | /* timer device ioctl */ | ||
259 | #define IMADDTIMER _IOR('I', 64, int) | ||
260 | #define IMDELTIMER _IOR('I', 65, int) | ||
261 | /* socket ioctls */ | ||
262 | #define IMGETVERSION _IOR('I', 66, int) | ||
263 | #define IMGETCOUNT _IOR('I', 67, int) | ||
264 | #define IMGETDEVINFO _IOR('I', 68, int) | ||
265 | #define IMCTRLREQ _IOR('I', 69, int) | ||
266 | #define IMCLEAR_L2 _IOR('I', 70, int) | ||
267 | |||
268 | struct mISDNversion { | 271 | struct mISDNversion { |
269 | unsigned char major; | 272 | unsigned char major; |
270 | unsigned char minor; | 273 | unsigned char minor; |
@@ -281,6 +284,40 @@ struct mISDN_devinfo { | |||
281 | char name[MISDN_MAX_IDLEN]; | 284 | char name[MISDN_MAX_IDLEN]; |
282 | }; | 285 | }; |
283 | 286 | ||
287 | struct mISDN_devrename { | ||
288 | u_int id; | ||
289 | char name[MISDN_MAX_IDLEN]; /* new name */ | ||
290 | }; | ||
291 | |||
292 | /* MPH_INFORMATION_REQ payload */ | ||
293 | struct ph_info_ch { | ||
294 | __u32 protocol; | ||
295 | __u64 Flags; | ||
296 | }; | ||
297 | |||
298 | struct ph_info_dch { | ||
299 | struct ph_info_ch ch; | ||
300 | __u16 state; | ||
301 | __u16 num_bch; | ||
302 | }; | ||
303 | |||
304 | struct ph_info { | ||
305 | struct ph_info_dch dch; | ||
306 | struct ph_info_ch bch[]; | ||
307 | }; | ||
308 | |||
309 | /* timer device ioctl */ | ||
310 | #define IMADDTIMER _IOR('I', 64, int) | ||
311 | #define IMDELTIMER _IOR('I', 65, int) | ||
312 | |||
313 | /* socket ioctls */ | ||
314 | #define IMGETVERSION _IOR('I', 66, int) | ||
315 | #define IMGETCOUNT _IOR('I', 67, int) | ||
316 | #define IMGETDEVINFO _IOR('I', 68, int) | ||
317 | #define IMCTRLREQ _IOR('I', 69, int) | ||
318 | #define IMCLEAR_L2 _IOR('I', 70, int) | ||
319 | #define IMSETDEVNAME _IOR('I', 71, struct mISDN_devrename) | ||
320 | |||
284 | static inline int | 321 | static inline int |
285 | test_channelmap(u_int nr, u_char *map) | 322 | test_channelmap(u_int nr, u_char *map) |
286 | { | 323 | { |
@@ -312,6 +349,8 @@ clear_channelmap(u_int nr, u_char *map) | |||
312 | #define MISDN_CTRL_SETPEER 0x0040 | 349 | #define MISDN_CTRL_SETPEER 0x0040 |
313 | #define MISDN_CTRL_UNSETPEER 0x0080 | 350 | #define MISDN_CTRL_UNSETPEER 0x0080 |
314 | #define MISDN_CTRL_RX_OFF 0x0100 | 351 | #define MISDN_CTRL_RX_OFF 0x0100 |
352 | #define MISDN_CTRL_FILL_EMPTY 0x0200 | ||
353 | #define MISDN_CTRL_GETPEER 0x0400 | ||
315 | #define MISDN_CTRL_HW_FEATURES_OP 0x2000 | 354 | #define MISDN_CTRL_HW_FEATURES_OP 0x2000 |
316 | #define MISDN_CTRL_HW_FEATURES 0x2001 | 355 | #define MISDN_CTRL_HW_FEATURES 0x2001 |
317 | #define MISDN_CTRL_HFC_OP 0x4000 | 356 | #define MISDN_CTRL_HFC_OP 0x4000 |
@@ -362,6 +401,7 @@ struct mISDN_ctrl_req { | |||
362 | #define DEBUG_L2_TEI 0x00100000 | 401 | #define DEBUG_L2_TEI 0x00100000 |
363 | #define DEBUG_L2_TEIFSM 0x00200000 | 402 | #define DEBUG_L2_TEIFSM 0x00200000 |
364 | #define DEBUG_TIMER 0x01000000 | 403 | #define DEBUG_TIMER 0x01000000 |
404 | #define DEBUG_CLOCK 0x02000000 | ||
365 | 405 | ||
366 | #define mISDN_HEAD_P(s) ((struct mISDNhead *)&s->cb[0]) | 406 | #define mISDN_HEAD_P(s) ((struct mISDNhead *)&s->cb[0]) |
367 | #define mISDN_HEAD_PRIM(s) (((struct mISDNhead *)&s->cb[0])->prim) | 407 | #define mISDN_HEAD_PRIM(s) (((struct mISDNhead *)&s->cb[0])->prim) |
@@ -375,6 +415,7 @@ struct mISDN_ctrl_req { | |||
375 | struct mISDNchannel; | 415 | struct mISDNchannel; |
376 | struct mISDNdevice; | 416 | struct mISDNdevice; |
377 | struct mISDNstack; | 417 | struct mISDNstack; |
418 | struct mISDNclock; | ||
378 | 419 | ||
379 | struct channel_req { | 420 | struct channel_req { |
380 | u_int protocol; | 421 | u_int protocol; |
@@ -423,7 +464,6 @@ struct mISDN_sock { | |||
423 | struct mISDNdevice { | 464 | struct mISDNdevice { |
424 | struct mISDNchannel D; | 465 | struct mISDNchannel D; |
425 | u_int id; | 466 | u_int id; |
426 | char name[MISDN_MAX_IDLEN]; | ||
427 | u_int Dprotocols; | 467 | u_int Dprotocols; |
428 | u_int Bprotocols; | 468 | u_int Bprotocols; |
429 | u_int nrbchan; | 469 | u_int nrbchan; |
@@ -452,6 +492,16 @@ struct mISDNstack { | |||
452 | #endif | 492 | #endif |
453 | }; | 493 | }; |
454 | 494 | ||
495 | typedef int (clockctl_func_t)(void *, int); | ||
496 | |||
497 | struct mISDNclock { | ||
498 | struct list_head list; | ||
499 | char name[64]; | ||
500 | int pri; | ||
501 | clockctl_func_t *ctl; | ||
502 | void *priv; | ||
503 | }; | ||
504 | |||
455 | /* global alloc/queue functions */ | 505 | /* global alloc/queue functions */ |
456 | 506 | ||
457 | static inline struct sk_buff * | 507 | static inline struct sk_buff * |
@@ -498,12 +548,23 @@ _queue_data(struct mISDNchannel *ch, u_int prim, | |||
498 | 548 | ||
499 | /* global register/unregister functions */ | 549 | /* global register/unregister functions */ |
500 | 550 | ||
501 | extern int mISDN_register_device(struct mISDNdevice *, char *name); | 551 | extern int mISDN_register_device(struct mISDNdevice *, |
552 | struct device *parent, char *name); | ||
502 | extern void mISDN_unregister_device(struct mISDNdevice *); | 553 | extern void mISDN_unregister_device(struct mISDNdevice *); |
503 | extern int mISDN_register_Bprotocol(struct Bprotocol *); | 554 | extern int mISDN_register_Bprotocol(struct Bprotocol *); |
504 | extern void mISDN_unregister_Bprotocol(struct Bprotocol *); | 555 | extern void mISDN_unregister_Bprotocol(struct Bprotocol *); |
556 | extern struct mISDNclock *mISDN_register_clock(char *, int, clockctl_func_t *, | ||
557 | void *); | ||
558 | extern void mISDN_unregister_clock(struct mISDNclock *); | ||
559 | |||
560 | static inline struct mISDNdevice *dev_to_mISDN(struct device *dev) | ||
561 | { | ||
562 | return dev_get_drvdata(dev); | ||
563 | } | ||
505 | 564 | ||
506 | extern void set_channel_address(struct mISDNchannel *, u_int, u_int); | 565 | extern void set_channel_address(struct mISDNchannel *, u_int, u_int); |
566 | extern void mISDN_clock_update(struct mISDNclock *, int, struct timeval *); | ||
567 | extern unsigned short mISDN_clock_get(void); | ||
507 | 568 | ||
508 | #endif /* __KERNEL__ */ | 569 | #endif /* __KERNEL__ */ |
509 | #endif /* mISDNIF_H */ | 570 | #endif /* mISDNIF_H */ |
diff --git a/include/linux/mfd/wm8350/pmic.h b/include/linux/mfd/wm8350/pmic.h index 96acbfc8aa12..be3264e286e0 100644 --- a/include/linux/mfd/wm8350/pmic.h +++ b/include/linux/mfd/wm8350/pmic.h | |||
@@ -13,6 +13,10 @@ | |||
13 | #ifndef __LINUX_MFD_WM8350_PMIC_H | 13 | #ifndef __LINUX_MFD_WM8350_PMIC_H |
14 | #define __LINUX_MFD_WM8350_PMIC_H | 14 | #define __LINUX_MFD_WM8350_PMIC_H |
15 | 15 | ||
16 | #include <linux/platform_device.h> | ||
17 | #include <linux/leds.h> | ||
18 | #include <linux/regulator/machine.h> | ||
19 | |||
16 | /* | 20 | /* |
17 | * Register values. | 21 | * Register values. |
18 | */ | 22 | */ |
@@ -700,6 +704,33 @@ struct wm8350; | |||
700 | struct platform_device; | 704 | struct platform_device; |
701 | struct regulator_init_data; | 705 | struct regulator_init_data; |
702 | 706 | ||
707 | /* | ||
708 | * WM8350 LED platform data | ||
709 | */ | ||
710 | struct wm8350_led_platform_data { | ||
711 | const char *name; | ||
712 | const char *default_trigger; | ||
713 | int max_uA; | ||
714 | }; | ||
715 | |||
716 | struct wm8350_led { | ||
717 | struct platform_device *pdev; | ||
718 | struct mutex mutex; | ||
719 | struct work_struct work; | ||
720 | spinlock_t value_lock; | ||
721 | enum led_brightness value; | ||
722 | struct led_classdev cdev; | ||
723 | int max_uA_index; | ||
724 | int enabled; | ||
725 | |||
726 | struct regulator *isink; | ||
727 | struct regulator_consumer_supply isink_consumer; | ||
728 | struct regulator_init_data isink_init; | ||
729 | struct regulator *dcdc; | ||
730 | struct regulator_consumer_supply dcdc_consumer; | ||
731 | struct regulator_init_data dcdc_init; | ||
732 | }; | ||
733 | |||
703 | struct wm8350_pmic { | 734 | struct wm8350_pmic { |
704 | /* Number of regulators of each type on this device */ | 735 | /* Number of regulators of each type on this device */ |
705 | int max_dcdc; | 736 | int max_dcdc; |
@@ -717,10 +748,15 @@ struct wm8350_pmic { | |||
717 | 748 | ||
718 | /* regulator devices */ | 749 | /* regulator devices */ |
719 | struct platform_device *pdev[NUM_WM8350_REGULATORS]; | 750 | struct platform_device *pdev[NUM_WM8350_REGULATORS]; |
751 | |||
752 | /* LED devices */ | ||
753 | struct wm8350_led led[2]; | ||
720 | }; | 754 | }; |
721 | 755 | ||
722 | int wm8350_register_regulator(struct wm8350 *wm8350, int reg, | 756 | int wm8350_register_regulator(struct wm8350 *wm8350, int reg, |
723 | struct regulator_init_data *initdata); | 757 | struct regulator_init_data *initdata); |
758 | int wm8350_register_led(struct wm8350 *wm8350, int lednum, int dcdc, int isink, | ||
759 | struct wm8350_led_platform_data *pdata); | ||
724 | 760 | ||
725 | /* | 761 | /* |
726 | * Additional DCDC control not supported via regulator API | 762 | * Additional DCDC control not supported via regulator API |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 4a3d28c86443..b91a73fd1bcc 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -56,19 +56,9 @@ extern unsigned long mmap_min_addr; | |||
56 | 56 | ||
57 | extern struct kmem_cache *vm_area_cachep; | 57 | extern struct kmem_cache *vm_area_cachep; |
58 | 58 | ||
59 | /* | ||
60 | * This struct defines the per-mm list of VMAs for uClinux. If CONFIG_MMU is | ||
61 | * disabled, then there's a single shared list of VMAs maintained by the | ||
62 | * system, and mm's subscribe to these individually | ||
63 | */ | ||
64 | struct vm_list_struct { | ||
65 | struct vm_list_struct *next; | ||
66 | struct vm_area_struct *vma; | ||
67 | }; | ||
68 | |||
69 | #ifndef CONFIG_MMU | 59 | #ifndef CONFIG_MMU |
70 | extern struct rb_root nommu_vma_tree; | 60 | extern struct rb_root nommu_region_tree; |
71 | extern struct rw_semaphore nommu_vma_sem; | 61 | extern struct rw_semaphore nommu_region_sem; |
72 | 62 | ||
73 | extern unsigned int kobjsize(const void *objp); | 63 | extern unsigned int kobjsize(const void *objp); |
74 | #endif | 64 | #endif |
@@ -1061,6 +1051,7 @@ extern void memmap_init_zone(unsigned long, int, unsigned long, | |||
1061 | unsigned long, enum memmap_context); | 1051 | unsigned long, enum memmap_context); |
1062 | extern void setup_per_zone_pages_min(void); | 1052 | extern void setup_per_zone_pages_min(void); |
1063 | extern void mem_init(void); | 1053 | extern void mem_init(void); |
1054 | extern void __init mmap_init(void); | ||
1064 | extern void show_mem(void); | 1055 | extern void show_mem(void); |
1065 | extern void si_meminfo(struct sysinfo * val); | 1056 | extern void si_meminfo(struct sysinfo * val); |
1066 | extern void si_meminfo_node(struct sysinfo *val, int nid); | 1057 | extern void si_meminfo_node(struct sysinfo *val, int nid); |
@@ -1072,6 +1063,9 @@ extern void setup_per_cpu_pageset(void); | |||
1072 | static inline void setup_per_cpu_pageset(void) {} | 1063 | static inline void setup_per_cpu_pageset(void) {} |
1073 | #endif | 1064 | #endif |
1074 | 1065 | ||
1066 | /* nommu.c */ | ||
1067 | extern atomic_t mmap_pages_allocated; | ||
1068 | |||
1075 | /* prio_tree.c */ | 1069 | /* prio_tree.c */ |
1076 | void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old); | 1070 | void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old); |
1077 | void vma_prio_tree_insert(struct vm_area_struct *, struct prio_tree_root *); | 1071 | void vma_prio_tree_insert(struct vm_area_struct *, struct prio_tree_root *); |
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 9cfc9b627fdd..92915e81443f 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
@@ -97,6 +97,23 @@ struct page { | |||
97 | }; | 97 | }; |
98 | 98 | ||
99 | /* | 99 | /* |
100 | * A region containing a mapping of a non-memory backed file under NOMMU | ||
101 | * conditions. These are held in a global tree and are pinned by the VMAs that | ||
102 | * map parts of them. | ||
103 | */ | ||
104 | struct vm_region { | ||
105 | struct rb_node vm_rb; /* link in global region tree */ | ||
106 | unsigned long vm_flags; /* VMA vm_flags */ | ||
107 | unsigned long vm_start; /* start address of region */ | ||
108 | unsigned long vm_end; /* region initialised to here */ | ||
109 | unsigned long vm_top; /* region allocated to here */ | ||
110 | unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */ | ||
111 | struct file *vm_file; /* the backing file or NULL */ | ||
112 | |||
113 | atomic_t vm_usage; /* region usage count */ | ||
114 | }; | ||
115 | |||
116 | /* | ||
100 | * This struct defines a memory VMM memory area. There is one of these | 117 | * This struct defines a memory VMM memory area. There is one of these |
101 | * per VM-area/task. A VM area is any part of the process virtual memory | 118 | * per VM-area/task. A VM area is any part of the process virtual memory |
102 | * space that has a special rule for the page-fault handlers (ie a shared | 119 | * space that has a special rule for the page-fault handlers (ie a shared |
@@ -152,7 +169,7 @@ struct vm_area_struct { | |||
152 | unsigned long vm_truncate_count;/* truncate_count or restart_addr */ | 169 | unsigned long vm_truncate_count;/* truncate_count or restart_addr */ |
153 | 170 | ||
154 | #ifndef CONFIG_MMU | 171 | #ifndef CONFIG_MMU |
155 | atomic_t vm_usage; /* refcount (VMAs shared if !MMU) */ | 172 | struct vm_region *vm_region; /* NOMMU mapping region */ |
156 | #endif | 173 | #endif |
157 | #ifdef CONFIG_NUMA | 174 | #ifdef CONFIG_NUMA |
158 | struct mempolicy *vm_policy; /* NUMA policy for the VMA */ | 175 | struct mempolicy *vm_policy; /* NUMA policy for the VMA */ |
diff --git a/include/linux/spi/tdo24m.h b/include/linux/spi/tdo24m.h new file mode 100644 index 000000000000..7572d4e1fe76 --- /dev/null +++ b/include/linux/spi/tdo24m.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef __TDO24M_H__ | ||
2 | #define __TDO24M_H__ | ||
3 | |||
4 | enum tdo24m_model { | ||
5 | TDO24M, | ||
6 | TDO35S, | ||
7 | }; | ||
8 | |||
9 | struct tdo24m_platform_data { | ||
10 | enum tdo24m_model model; | ||
11 | }; | ||
12 | |||
13 | #endif /* __TDO24M_H__ */ | ||
diff --git a/init/do_mounts_rd.c b/init/do_mounts_rd.c index a7c748fa977a..0f0f0cf3ba9a 100644 --- a/init/do_mounts_rd.c +++ b/init/do_mounts_rd.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/string.h> | 9 | #include <linux/string.h> |
10 | 10 | ||
11 | #include "do_mounts.h" | 11 | #include "do_mounts.h" |
12 | #include "../fs/squashfs/squashfs_fs.h" | ||
12 | 13 | ||
13 | int __initdata rd_prompt = 1;/* 1 = prompt for RAM disk, 0 = don't prompt */ | 14 | int __initdata rd_prompt = 1;/* 1 = prompt for RAM disk, 0 = don't prompt */ |
14 | 15 | ||
@@ -41,6 +42,7 @@ static int __init crd_load(int in_fd, int out_fd); | |||
41 | * ext2 | 42 | * ext2 |
42 | * romfs | 43 | * romfs |
43 | * cramfs | 44 | * cramfs |
45 | * squashfs | ||
44 | * gzip | 46 | * gzip |
45 | */ | 47 | */ |
46 | static int __init | 48 | static int __init |
@@ -51,6 +53,7 @@ identify_ramdisk_image(int fd, int start_block) | |||
51 | struct ext2_super_block *ext2sb; | 53 | struct ext2_super_block *ext2sb; |
52 | struct romfs_super_block *romfsb; | 54 | struct romfs_super_block *romfsb; |
53 | struct cramfs_super *cramfsb; | 55 | struct cramfs_super *cramfsb; |
56 | struct squashfs_super_block *squashfsb; | ||
54 | int nblocks = -1; | 57 | int nblocks = -1; |
55 | unsigned char *buf; | 58 | unsigned char *buf; |
56 | 59 | ||
@@ -62,6 +65,7 @@ identify_ramdisk_image(int fd, int start_block) | |||
62 | ext2sb = (struct ext2_super_block *) buf; | 65 | ext2sb = (struct ext2_super_block *) buf; |
63 | romfsb = (struct romfs_super_block *) buf; | 66 | romfsb = (struct romfs_super_block *) buf; |
64 | cramfsb = (struct cramfs_super *) buf; | 67 | cramfsb = (struct cramfs_super *) buf; |
68 | squashfsb = (struct squashfs_super_block *) buf; | ||
65 | memset(buf, 0xe5, size); | 69 | memset(buf, 0xe5, size); |
66 | 70 | ||
67 | /* | 71 | /* |
@@ -99,6 +103,16 @@ identify_ramdisk_image(int fd, int start_block) | |||
99 | goto done; | 103 | goto done; |
100 | } | 104 | } |
101 | 105 | ||
106 | /* squashfs is at block zero too */ | ||
107 | if (le32_to_cpu(squashfsb->s_magic) == SQUASHFS_MAGIC) { | ||
108 | printk(KERN_NOTICE | ||
109 | "RAMDISK: squashfs filesystem found at block %d\n", | ||
110 | start_block); | ||
111 | nblocks = (le64_to_cpu(squashfsb->bytes_used) + BLOCK_SIZE - 1) | ||
112 | >> BLOCK_SIZE_BITS; | ||
113 | goto done; | ||
114 | } | ||
115 | |||
102 | /* | 116 | /* |
103 | * Read block 1 to test for minix and ext2 superblock | 117 | * Read block 1 to test for minix and ext2 superblock |
104 | */ | 118 | */ |
diff --git a/init/initramfs.c b/init/initramfs.c index 4f5ba75aaa7c..d9c941c0c3ca 100644 --- a/init/initramfs.c +++ b/init/initramfs.c | |||
@@ -317,6 +317,7 @@ static int __init do_name(void) | |||
317 | if (wfd >= 0) { | 317 | if (wfd >= 0) { |
318 | sys_fchown(wfd, uid, gid); | 318 | sys_fchown(wfd, uid, gid); |
319 | sys_fchmod(wfd, mode); | 319 | sys_fchmod(wfd, mode); |
320 | sys_ftruncate(wfd, body_len); | ||
320 | vcollected = kstrdup(collected, GFP_KERNEL); | 321 | vcollected = kstrdup(collected, GFP_KERNEL); |
321 | state = CopyFile; | 322 | state = CopyFile; |
322 | } | 323 | } |
@@ -990,6 +990,7 @@ asmlinkage long sys_shmdt(char __user *shmaddr) | |||
990 | */ | 990 | */ |
991 | vma = find_vma(mm, addr); | 991 | vma = find_vma(mm, addr); |
992 | 992 | ||
993 | #ifdef CONFIG_MMU | ||
993 | while (vma) { | 994 | while (vma) { |
994 | next = vma->vm_next; | 995 | next = vma->vm_next; |
995 | 996 | ||
@@ -1034,6 +1035,17 @@ asmlinkage long sys_shmdt(char __user *shmaddr) | |||
1034 | vma = next; | 1035 | vma = next; |
1035 | } | 1036 | } |
1036 | 1037 | ||
1038 | #else /* CONFIG_MMU */ | ||
1039 | /* under NOMMU conditions, the exact address to be destroyed must be | ||
1040 | * given */ | ||
1041 | retval = -EINVAL; | ||
1042 | if (vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) { | ||
1043 | do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); | ||
1044 | retval = 0; | ||
1045 | } | ||
1046 | |||
1047 | #endif | ||
1048 | |||
1037 | up_write(&mm->mmap_sem); | 1049 | up_write(&mm->mmap_sem); |
1038 | return retval; | 1050 | return retval; |
1039 | } | 1051 | } |
diff --git a/kernel/cred.c b/kernel/cred.c index 043f78c133c4..3a039189d707 100644 --- a/kernel/cred.c +++ b/kernel/cred.c | |||
@@ -372,7 +372,8 @@ int commit_creds(struct cred *new) | |||
372 | old->fsuid != new->fsuid || | 372 | old->fsuid != new->fsuid || |
373 | old->fsgid != new->fsgid || | 373 | old->fsgid != new->fsgid || |
374 | !cap_issubset(new->cap_permitted, old->cap_permitted)) { | 374 | !cap_issubset(new->cap_permitted, old->cap_permitted)) { |
375 | set_dumpable(task->mm, suid_dumpable); | 375 | if (task->mm) |
376 | set_dumpable(task->mm, suid_dumpable); | ||
376 | task->pdeath_signal = 0; | 377 | task->pdeath_signal = 0; |
377 | smp_wmb(); | 378 | smp_wmb(); |
378 | } | 379 | } |
diff --git a/kernel/fork.c b/kernel/fork.c index 4018308048cf..1d68f1255dd8 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1481,12 +1481,10 @@ void __init proc_caches_init(void) | |||
1481 | fs_cachep = kmem_cache_create("fs_cache", | 1481 | fs_cachep = kmem_cache_create("fs_cache", |
1482 | sizeof(struct fs_struct), 0, | 1482 | sizeof(struct fs_struct), 0, |
1483 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); | 1483 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
1484 | vm_area_cachep = kmem_cache_create("vm_area_struct", | ||
1485 | sizeof(struct vm_area_struct), 0, | ||
1486 | SLAB_PANIC, NULL); | ||
1487 | mm_cachep = kmem_cache_create("mm_struct", | 1484 | mm_cachep = kmem_cache_create("mm_struct", |
1488 | sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, | 1485 | sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, |
1489 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); | 1486 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
1487 | mmap_init(); | ||
1490 | } | 1488 | } |
1491 | 1489 | ||
1492 | /* | 1490 | /* |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 92f6e5bc3c24..89d74436318c 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -82,6 +82,9 @@ extern int percpu_pagelist_fraction; | |||
82 | extern int compat_log; | 82 | extern int compat_log; |
83 | extern int latencytop_enabled; | 83 | extern int latencytop_enabled; |
84 | extern int sysctl_nr_open_min, sysctl_nr_open_max; | 84 | extern int sysctl_nr_open_min, sysctl_nr_open_max; |
85 | #ifndef CONFIG_MMU | ||
86 | extern int sysctl_nr_trim_pages; | ||
87 | #endif | ||
85 | #ifdef CONFIG_RCU_TORTURE_TEST | 88 | #ifdef CONFIG_RCU_TORTURE_TEST |
86 | extern int rcutorture_runnable; | 89 | extern int rcutorture_runnable; |
87 | #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ | 90 | #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ |
@@ -1102,6 +1105,17 @@ static struct ctl_table vm_table[] = { | |||
1102 | .mode = 0644, | 1105 | .mode = 0644, |
1103 | .proc_handler = &proc_dointvec | 1106 | .proc_handler = &proc_dointvec |
1104 | }, | 1107 | }, |
1108 | #else | ||
1109 | { | ||
1110 | .ctl_name = CTL_UNNUMBERED, | ||
1111 | .procname = "nr_trim_pages", | ||
1112 | .data = &sysctl_nr_trim_pages, | ||
1113 | .maxlen = sizeof(sysctl_nr_trim_pages), | ||
1114 | .mode = 0644, | ||
1115 | .proc_handler = &proc_dointvec_minmax, | ||
1116 | .strategy = &sysctl_intvec, | ||
1117 | .extra1 = &zero, | ||
1118 | }, | ||
1105 | #endif | 1119 | #endif |
1106 | { | 1120 | { |
1107 | .ctl_name = VM_LAPTOP_MODE, | 1121 | .ctl_name = VM_LAPTOP_MODE, |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 2e75478e9c69..d0a32aab03ff 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -512,6 +512,13 @@ config DEBUG_VIRTUAL | |||
512 | 512 | ||
513 | If unsure, say N. | 513 | If unsure, say N. |
514 | 514 | ||
515 | config DEBUG_NOMMU_REGIONS | ||
516 | bool "Debug the global anon/private NOMMU mapping region tree" | ||
517 | depends on DEBUG_KERNEL && !MMU | ||
518 | help | ||
519 | This option causes the global tree of anonymous and private mapping | ||
520 | regions to be regularly checked for invalid topology. | ||
521 | |||
515 | config DEBUG_WRITECOUNT | 522 | config DEBUG_WRITECOUNT |
516 | bool "Debug filesystem writers count" | 523 | bool "Debug filesystem writers count" |
517 | depends on DEBUG_KERNEL | 524 | depends on DEBUG_KERNEL |
@@ -2472,3 +2472,13 @@ void mm_drop_all_locks(struct mm_struct *mm) | |||
2472 | 2472 | ||
2473 | mutex_unlock(&mm_all_locks_mutex); | 2473 | mutex_unlock(&mm_all_locks_mutex); |
2474 | } | 2474 | } |
2475 | |||
2476 | /* | ||
2477 | * initialise the VMA slab | ||
2478 | */ | ||
2479 | void __init mmap_init(void) | ||
2480 | { | ||
2481 | vm_area_cachep = kmem_cache_create("vm_area_struct", | ||
2482 | sizeof(struct vm_area_struct), 0, | ||
2483 | SLAB_PANIC, NULL); | ||
2484 | } | ||
diff --git a/mm/nommu.c b/mm/nommu.c index 1c28ea3a4e9c..60ed8375c986 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -6,11 +6,11 @@ | |||
6 | * | 6 | * |
7 | * See Documentation/nommu-mmap.txt | 7 | * See Documentation/nommu-mmap.txt |
8 | * | 8 | * |
9 | * Copyright (c) 2004-2005 David Howells <dhowells@redhat.com> | 9 | * Copyright (c) 2004-2008 David Howells <dhowells@redhat.com> |
10 | * Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com> | 10 | * Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com> |
11 | * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org> | 11 | * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org> |
12 | * Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com> | 12 | * Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com> |
13 | * Copyright (c) 2007 Paul Mundt <lethal@linux-sh.org> | 13 | * Copyright (c) 2007-2008 Paul Mundt <lethal@linux-sh.org> |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
@@ -33,6 +33,28 @@ | |||
33 | #include <asm/uaccess.h> | 33 | #include <asm/uaccess.h> |
34 | #include <asm/tlb.h> | 34 | #include <asm/tlb.h> |
35 | #include <asm/tlbflush.h> | 35 | #include <asm/tlbflush.h> |
36 | #include "internal.h" | ||
37 | |||
38 | static inline __attribute__((format(printf, 1, 2))) | ||
39 | void no_printk(const char *fmt, ...) | ||
40 | { | ||
41 | } | ||
42 | |||
43 | #if 0 | ||
44 | #define kenter(FMT, ...) \ | ||
45 | printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__) | ||
46 | #define kleave(FMT, ...) \ | ||
47 | printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__) | ||
48 | #define kdebug(FMT, ...) \ | ||
49 | printk(KERN_DEBUG "xxx" FMT"yyy\n", ##__VA_ARGS__) | ||
50 | #else | ||
51 | #define kenter(FMT, ...) \ | ||
52 | no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__) | ||
53 | #define kleave(FMT, ...) \ | ||
54 | no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__) | ||
55 | #define kdebug(FMT, ...) \ | ||
56 | no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__) | ||
57 | #endif | ||
36 | 58 | ||
37 | #include "internal.h" | 59 | #include "internal.h" |
38 | 60 | ||
@@ -40,19 +62,22 @@ void *high_memory; | |||
40 | struct page *mem_map; | 62 | struct page *mem_map; |
41 | unsigned long max_mapnr; | 63 | unsigned long max_mapnr; |
42 | unsigned long num_physpages; | 64 | unsigned long num_physpages; |
43 | unsigned long askedalloc, realalloc; | ||
44 | atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0); | 65 | atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0); |
45 | int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ | 66 | int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ |
46 | int sysctl_overcommit_ratio = 50; /* default is 50% */ | 67 | int sysctl_overcommit_ratio = 50; /* default is 50% */ |
47 | int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; | 68 | int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; |
69 | int sysctl_nr_trim_pages = 1; /* page trimming behaviour */ | ||
48 | int heap_stack_gap = 0; | 70 | int heap_stack_gap = 0; |
49 | 71 | ||
72 | atomic_t mmap_pages_allocated; | ||
73 | |||
50 | EXPORT_SYMBOL(mem_map); | 74 | EXPORT_SYMBOL(mem_map); |
51 | EXPORT_SYMBOL(num_physpages); | 75 | EXPORT_SYMBOL(num_physpages); |
52 | 76 | ||
53 | /* list of shareable VMAs */ | 77 | /* list of mapped, potentially shareable regions */ |
54 | struct rb_root nommu_vma_tree = RB_ROOT; | 78 | static struct kmem_cache *vm_region_jar; |
55 | DECLARE_RWSEM(nommu_vma_sem); | 79 | struct rb_root nommu_region_tree = RB_ROOT; |
80 | DECLARE_RWSEM(nommu_region_sem); | ||
56 | 81 | ||
57 | struct vm_operations_struct generic_file_vm_ops = { | 82 | struct vm_operations_struct generic_file_vm_ops = { |
58 | }; | 83 | }; |
@@ -124,6 +149,20 @@ unsigned int kobjsize(const void *objp) | |||
124 | return ksize(objp); | 149 | return ksize(objp); |
125 | 150 | ||
126 | /* | 151 | /* |
152 | * If it's not a compound page, see if we have a matching VMA | ||
153 | * region. This test is intentionally done in reverse order, | ||
154 | * so if there's no VMA, we still fall through and hand back | ||
155 | * PAGE_SIZE for 0-order pages. | ||
156 | */ | ||
157 | if (!PageCompound(page)) { | ||
158 | struct vm_area_struct *vma; | ||
159 | |||
160 | vma = find_vma(current->mm, (unsigned long)objp); | ||
161 | if (vma) | ||
162 | return vma->vm_end - vma->vm_start; | ||
163 | } | ||
164 | |||
165 | /* | ||
127 | * The ksize() function is only guaranteed to work for pointers | 166 | * The ksize() function is only guaranteed to work for pointers |
128 | * returned by kmalloc(). So handle arbitrary pointers here. | 167 | * returned by kmalloc(). So handle arbitrary pointers here. |
129 | */ | 168 | */ |
@@ -401,129 +440,178 @@ asmlinkage unsigned long sys_brk(unsigned long brk) | |||
401 | return mm->brk = brk; | 440 | return mm->brk = brk; |
402 | } | 441 | } |
403 | 442 | ||
404 | #ifdef DEBUG | 443 | /* |
405 | static void show_process_blocks(void) | 444 | * initialise the VMA and region record slabs |
445 | */ | ||
446 | void __init mmap_init(void) | ||
406 | { | 447 | { |
407 | struct vm_list_struct *vml; | 448 | vm_region_jar = kmem_cache_create("vm_region_jar", |
408 | 449 | sizeof(struct vm_region), 0, | |
409 | printk("Process blocks %d:", current->pid); | 450 | SLAB_PANIC, NULL); |
410 | 451 | vm_area_cachep = kmem_cache_create("vm_area_struct", | |
411 | for (vml = ¤t->mm->context.vmlist; vml; vml = vml->next) { | 452 | sizeof(struct vm_area_struct), 0, |
412 | printk(" %p: %p", vml, vml->vma); | 453 | SLAB_PANIC, NULL); |
413 | if (vml->vma) | ||
414 | printk(" (%d @%lx #%d)", | ||
415 | kobjsize((void *) vml->vma->vm_start), | ||
416 | vml->vma->vm_start, | ||
417 | atomic_read(&vml->vma->vm_usage)); | ||
418 | printk(vml->next ? " ->" : ".\n"); | ||
419 | } | ||
420 | } | 454 | } |
421 | #endif /* DEBUG */ | ||
422 | 455 | ||
423 | /* | 456 | /* |
424 | * add a VMA into a process's mm_struct in the appropriate place in the list | 457 | * validate the region tree |
425 | * - should be called with mm->mmap_sem held writelocked | 458 | * - the caller must hold the region lock |
426 | */ | 459 | */ |
427 | static void add_vma_to_mm(struct mm_struct *mm, struct vm_list_struct *vml) | 460 | #ifdef CONFIG_DEBUG_NOMMU_REGIONS |
461 | static noinline void validate_nommu_regions(void) | ||
428 | { | 462 | { |
429 | struct vm_list_struct **ppv; | 463 | struct vm_region *region, *last; |
430 | 464 | struct rb_node *p, *lastp; | |
431 | for (ppv = ¤t->mm->context.vmlist; *ppv; ppv = &(*ppv)->next) | 465 | |
432 | if ((*ppv)->vma->vm_start > vml->vma->vm_start) | 466 | lastp = rb_first(&nommu_region_tree); |
433 | break; | 467 | if (!lastp) |
434 | 468 | return; | |
435 | vml->next = *ppv; | 469 | |
436 | *ppv = vml; | 470 | last = rb_entry(lastp, struct vm_region, vm_rb); |
471 | if (unlikely(last->vm_end <= last->vm_start)) | ||
472 | BUG(); | ||
473 | if (unlikely(last->vm_top < last->vm_end)) | ||
474 | BUG(); | ||
475 | |||
476 | while ((p = rb_next(lastp))) { | ||
477 | region = rb_entry(p, struct vm_region, vm_rb); | ||
478 | last = rb_entry(lastp, struct vm_region, vm_rb); | ||
479 | |||
480 | if (unlikely(region->vm_end <= region->vm_start)) | ||
481 | BUG(); | ||
482 | if (unlikely(region->vm_top < region->vm_end)) | ||
483 | BUG(); | ||
484 | if (unlikely(region->vm_start < last->vm_top)) | ||
485 | BUG(); | ||
486 | |||
487 | lastp = p; | ||
488 | } | ||
437 | } | 489 | } |
490 | #else | ||
491 | #define validate_nommu_regions() do {} while(0) | ||
492 | #endif | ||
438 | 493 | ||
439 | /* | 494 | /* |
440 | * look up the first VMA in which addr resides, NULL if none | 495 | * add a region into the global tree |
441 | * - should be called with mm->mmap_sem at least held readlocked | ||
442 | */ | 496 | */ |
443 | struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) | 497 | static void add_nommu_region(struct vm_region *region) |
444 | { | 498 | { |
445 | struct vm_list_struct *loop, *vml; | 499 | struct vm_region *pregion; |
500 | struct rb_node **p, *parent; | ||
446 | 501 | ||
447 | /* search the vm_start ordered list */ | 502 | validate_nommu_regions(); |
448 | vml = NULL; | 503 | |
449 | for (loop = mm->context.vmlist; loop; loop = loop->next) { | 504 | BUG_ON(region->vm_start & ~PAGE_MASK); |
450 | if (loop->vma->vm_start > addr) | 505 | |
451 | break; | 506 | parent = NULL; |
452 | vml = loop; | 507 | p = &nommu_region_tree.rb_node; |
508 | while (*p) { | ||
509 | parent = *p; | ||
510 | pregion = rb_entry(parent, struct vm_region, vm_rb); | ||
511 | if (region->vm_start < pregion->vm_start) | ||
512 | p = &(*p)->rb_left; | ||
513 | else if (region->vm_start > pregion->vm_start) | ||
514 | p = &(*p)->rb_right; | ||
515 | else if (pregion == region) | ||
516 | return; | ||
517 | else | ||
518 | BUG(); | ||
453 | } | 519 | } |
454 | 520 | ||
455 | if (vml && vml->vma->vm_end > addr) | 521 | rb_link_node(®ion->vm_rb, parent, p); |
456 | return vml->vma; | 522 | rb_insert_color(®ion->vm_rb, &nommu_region_tree); |
457 | 523 | ||
458 | return NULL; | 524 | validate_nommu_regions(); |
459 | } | 525 | } |
460 | EXPORT_SYMBOL(find_vma); | ||
461 | 526 | ||
462 | /* | 527 | /* |
463 | * find a VMA | 528 | * delete a region from the global tree |
464 | * - we don't extend stack VMAs under NOMMU conditions | ||
465 | */ | 529 | */ |
466 | struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr) | 530 | static void delete_nommu_region(struct vm_region *region) |
467 | { | 531 | { |
468 | return find_vma(mm, addr); | 532 | BUG_ON(!nommu_region_tree.rb_node); |
469 | } | ||
470 | 533 | ||
471 | int expand_stack(struct vm_area_struct *vma, unsigned long address) | 534 | validate_nommu_regions(); |
472 | { | 535 | rb_erase(®ion->vm_rb, &nommu_region_tree); |
473 | return -ENOMEM; | 536 | validate_nommu_regions(); |
474 | } | 537 | } |
475 | 538 | ||
476 | /* | 539 | /* |
477 | * look up the first VMA exactly that exactly matches addr | 540 | * free a contiguous series of pages |
478 | * - should be called with mm->mmap_sem at least held readlocked | ||
479 | */ | 541 | */ |
480 | static inline struct vm_area_struct *find_vma_exact(struct mm_struct *mm, | 542 | static void free_page_series(unsigned long from, unsigned long to) |
481 | unsigned long addr) | ||
482 | { | 543 | { |
483 | struct vm_list_struct *vml; | 544 | for (; from < to; from += PAGE_SIZE) { |
484 | 545 | struct page *page = virt_to_page(from); | |
485 | /* search the vm_start ordered list */ | 546 | |
486 | for (vml = mm->context.vmlist; vml; vml = vml->next) { | 547 | kdebug("- free %lx", from); |
487 | if (vml->vma->vm_start == addr) | 548 | atomic_dec(&mmap_pages_allocated); |
488 | return vml->vma; | 549 | if (page_count(page) != 1) |
489 | if (vml->vma->vm_start > addr) | 550 | kdebug("free page %p [%d]", page, page_count(page)); |
490 | break; | 551 | put_page(page); |
491 | } | 552 | } |
492 | |||
493 | return NULL; | ||
494 | } | 553 | } |
495 | 554 | ||
496 | /* | 555 | /* |
497 | * find a VMA in the global tree | 556 | * release a reference to a region |
557 | * - the caller must hold the region semaphore, which this releases | ||
558 | * - the region may not have been added to the tree yet, in which case vm_top | ||
559 | * will equal vm_start | ||
498 | */ | 560 | */ |
499 | static inline struct vm_area_struct *find_nommu_vma(unsigned long start) | 561 | static void __put_nommu_region(struct vm_region *region) |
562 | __releases(nommu_region_sem) | ||
500 | { | 563 | { |
501 | struct vm_area_struct *vma; | 564 | kenter("%p{%d}", region, atomic_read(®ion->vm_usage)); |
502 | struct rb_node *n = nommu_vma_tree.rb_node; | ||
503 | 565 | ||
504 | while (n) { | 566 | BUG_ON(!nommu_region_tree.rb_node); |
505 | vma = rb_entry(n, struct vm_area_struct, vm_rb); | ||
506 | 567 | ||
507 | if (start < vma->vm_start) | 568 | if (atomic_dec_and_test(®ion->vm_usage)) { |
508 | n = n->rb_left; | 569 | if (region->vm_top > region->vm_start) |
509 | else if (start > vma->vm_start) | 570 | delete_nommu_region(region); |
510 | n = n->rb_right; | 571 | up_write(&nommu_region_sem); |
511 | else | 572 | |
512 | return vma; | 573 | if (region->vm_file) |
574 | fput(region->vm_file); | ||
575 | |||
576 | /* IO memory and memory shared directly out of the pagecache | ||
577 | * from ramfs/tmpfs mustn't be released here */ | ||
578 | if (region->vm_flags & VM_MAPPED_COPY) { | ||
579 | kdebug("free series"); | ||
580 | free_page_series(region->vm_start, region->vm_top); | ||
581 | } | ||
582 | kmem_cache_free(vm_region_jar, region); | ||
583 | } else { | ||
584 | up_write(&nommu_region_sem); | ||
513 | } | 585 | } |
586 | } | ||
514 | 587 | ||
515 | return NULL; | 588 | /* |
589 | * release a reference to a region | ||
590 | */ | ||
591 | static void put_nommu_region(struct vm_region *region) | ||
592 | { | ||
593 | down_write(&nommu_region_sem); | ||
594 | __put_nommu_region(region); | ||
516 | } | 595 | } |
517 | 596 | ||
518 | /* | 597 | /* |
519 | * add a VMA in the global tree | 598 | * add a VMA into a process's mm_struct in the appropriate place in the list |
599 | * and tree and add to the address space's page tree also if not an anonymous | ||
600 | * page | ||
601 | * - should be called with mm->mmap_sem held writelocked | ||
520 | */ | 602 | */ |
521 | static void add_nommu_vma(struct vm_area_struct *vma) | 603 | static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) |
522 | { | 604 | { |
523 | struct vm_area_struct *pvma; | 605 | struct vm_area_struct *pvma, **pp; |
524 | struct address_space *mapping; | 606 | struct address_space *mapping; |
525 | struct rb_node **p = &nommu_vma_tree.rb_node; | 607 | struct rb_node **p, *parent; |
526 | struct rb_node *parent = NULL; | 608 | |
609 | kenter(",%p", vma); | ||
610 | |||
611 | BUG_ON(!vma->vm_region); | ||
612 | |||
613 | mm->map_count++; | ||
614 | vma->vm_mm = mm; | ||
527 | 615 | ||
528 | /* add the VMA to the mapping */ | 616 | /* add the VMA to the mapping */ |
529 | if (vma->vm_file) { | 617 | if (vma->vm_file) { |
@@ -534,42 +622,62 @@ static void add_nommu_vma(struct vm_area_struct *vma) | |||
534 | flush_dcache_mmap_unlock(mapping); | 622 | flush_dcache_mmap_unlock(mapping); |
535 | } | 623 | } |
536 | 624 | ||
537 | /* add the VMA to the master list */ | 625 | /* add the VMA to the tree */ |
626 | parent = NULL; | ||
627 | p = &mm->mm_rb.rb_node; | ||
538 | while (*p) { | 628 | while (*p) { |
539 | parent = *p; | 629 | parent = *p; |
540 | pvma = rb_entry(parent, struct vm_area_struct, vm_rb); | 630 | pvma = rb_entry(parent, struct vm_area_struct, vm_rb); |
541 | 631 | ||
542 | if (vma->vm_start < pvma->vm_start) { | 632 | /* sort by: start addr, end addr, VMA struct addr in that order |
633 | * (the latter is necessary as we may get identical VMAs) */ | ||
634 | if (vma->vm_start < pvma->vm_start) | ||
543 | p = &(*p)->rb_left; | 635 | p = &(*p)->rb_left; |
544 | } | 636 | else if (vma->vm_start > pvma->vm_start) |
545 | else if (vma->vm_start > pvma->vm_start) { | ||
546 | p = &(*p)->rb_right; | 637 | p = &(*p)->rb_right; |
547 | } | 638 | else if (vma->vm_end < pvma->vm_end) |
548 | else { | 639 | p = &(*p)->rb_left; |
549 | /* mappings are at the same address - this can only | 640 | else if (vma->vm_end > pvma->vm_end) |
550 | * happen for shared-mem chardevs and shared file | 641 | p = &(*p)->rb_right; |
551 | * mappings backed by ramfs/tmpfs */ | 642 | else if (vma < pvma) |
552 | BUG_ON(!(pvma->vm_flags & VM_SHARED)); | 643 | p = &(*p)->rb_left; |
553 | 644 | else if (vma > pvma) | |
554 | if (vma < pvma) | 645 | p = &(*p)->rb_right; |
555 | p = &(*p)->rb_left; | 646 | else |
556 | else if (vma > pvma) | 647 | BUG(); |
557 | p = &(*p)->rb_right; | ||
558 | else | ||
559 | BUG(); | ||
560 | } | ||
561 | } | 648 | } |
562 | 649 | ||
563 | rb_link_node(&vma->vm_rb, parent, p); | 650 | rb_link_node(&vma->vm_rb, parent, p); |
564 | rb_insert_color(&vma->vm_rb, &nommu_vma_tree); | 651 | rb_insert_color(&vma->vm_rb, &mm->mm_rb); |
652 | |||
653 | /* add VMA to the VMA list also */ | ||
654 | for (pp = &mm->mmap; (pvma = *pp); pp = &(*pp)->vm_next) { | ||
655 | if (pvma->vm_start > vma->vm_start) | ||
656 | break; | ||
657 | if (pvma->vm_start < vma->vm_start) | ||
658 | continue; | ||
659 | if (pvma->vm_end < vma->vm_end) | ||
660 | break; | ||
661 | } | ||
662 | |||
663 | vma->vm_next = *pp; | ||
664 | *pp = vma; | ||
565 | } | 665 | } |
566 | 666 | ||
567 | /* | 667 | /* |
568 | * delete a VMA from the global list | 668 | * delete a VMA from its owning mm_struct and address space |
569 | */ | 669 | */ |
570 | static void delete_nommu_vma(struct vm_area_struct *vma) | 670 | static void delete_vma_from_mm(struct vm_area_struct *vma) |
571 | { | 671 | { |
672 | struct vm_area_struct **pp; | ||
572 | struct address_space *mapping; | 673 | struct address_space *mapping; |
674 | struct mm_struct *mm = vma->vm_mm; | ||
675 | |||
676 | kenter("%p", vma); | ||
677 | |||
678 | mm->map_count--; | ||
679 | if (mm->mmap_cache == vma) | ||
680 | mm->mmap_cache = NULL; | ||
573 | 681 | ||
574 | /* remove the VMA from the mapping */ | 682 | /* remove the VMA from the mapping */ |
575 | if (vma->vm_file) { | 683 | if (vma->vm_file) { |
@@ -580,8 +688,115 @@ static void delete_nommu_vma(struct vm_area_struct *vma) | |||
580 | flush_dcache_mmap_unlock(mapping); | 688 | flush_dcache_mmap_unlock(mapping); |
581 | } | 689 | } |
582 | 690 | ||
583 | /* remove from the master list */ | 691 | /* remove from the MM's tree and list */ |
584 | rb_erase(&vma->vm_rb, &nommu_vma_tree); | 692 | rb_erase(&vma->vm_rb, &mm->mm_rb); |
693 | for (pp = &mm->mmap; *pp; pp = &(*pp)->vm_next) { | ||
694 | if (*pp == vma) { | ||
695 | *pp = vma->vm_next; | ||
696 | break; | ||
697 | } | ||
698 | } | ||
699 | |||
700 | vma->vm_mm = NULL; | ||
701 | } | ||
702 | |||
703 | /* | ||
704 | * destroy a VMA record | ||
705 | */ | ||
706 | static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) | ||
707 | { | ||
708 | kenter("%p", vma); | ||
709 | if (vma->vm_ops && vma->vm_ops->close) | ||
710 | vma->vm_ops->close(vma); | ||
711 | if (vma->vm_file) { | ||
712 | fput(vma->vm_file); | ||
713 | if (vma->vm_flags & VM_EXECUTABLE) | ||
714 | removed_exe_file_vma(mm); | ||
715 | } | ||
716 | put_nommu_region(vma->vm_region); | ||
717 | kmem_cache_free(vm_area_cachep, vma); | ||
718 | } | ||
719 | |||
720 | /* | ||
721 | * look up the first VMA in which addr resides, NULL if none | ||
722 | * - should be called with mm->mmap_sem at least held readlocked | ||
723 | */ | ||
724 | struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) | ||
725 | { | ||
726 | struct vm_area_struct *vma; | ||
727 | struct rb_node *n = mm->mm_rb.rb_node; | ||
728 | |||
729 | /* check the cache first */ | ||
730 | vma = mm->mmap_cache; | ||
731 | if (vma && vma->vm_start <= addr && vma->vm_end > addr) | ||
732 | return vma; | ||
733 | |||
734 | /* trawl the tree (there may be multiple mappings in which addr | ||
735 | * resides) */ | ||
736 | for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) { | ||
737 | vma = rb_entry(n, struct vm_area_struct, vm_rb); | ||
738 | if (vma->vm_start > addr) | ||
739 | return NULL; | ||
740 | if (vma->vm_end > addr) { | ||
741 | mm->mmap_cache = vma; | ||
742 | return vma; | ||
743 | } | ||
744 | } | ||
745 | |||
746 | return NULL; | ||
747 | } | ||
748 | EXPORT_SYMBOL(find_vma); | ||
749 | |||
750 | /* | ||
751 | * find a VMA | ||
752 | * - we don't extend stack VMAs under NOMMU conditions | ||
753 | */ | ||
754 | struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr) | ||
755 | { | ||
756 | return find_vma(mm, addr); | ||
757 | } | ||
758 | |||
759 | /* | ||
760 | * expand a stack to a given address | ||
761 | * - not supported under NOMMU conditions | ||
762 | */ | ||
763 | int expand_stack(struct vm_area_struct *vma, unsigned long address) | ||
764 | { | ||
765 | return -ENOMEM; | ||
766 | } | ||
767 | |||
768 | /* | ||
769 | * look up the first VMA exactly that exactly matches addr | ||
770 | * - should be called with mm->mmap_sem at least held readlocked | ||
771 | */ | ||
772 | static struct vm_area_struct *find_vma_exact(struct mm_struct *mm, | ||
773 | unsigned long addr, | ||
774 | unsigned long len) | ||
775 | { | ||
776 | struct vm_area_struct *vma; | ||
777 | struct rb_node *n = mm->mm_rb.rb_node; | ||
778 | unsigned long end = addr + len; | ||
779 | |||
780 | /* check the cache first */ | ||
781 | vma = mm->mmap_cache; | ||
782 | if (vma && vma->vm_start == addr && vma->vm_end == end) | ||
783 | return vma; | ||
784 | |||
785 | /* trawl the tree (there may be multiple mappings in which addr | ||
786 | * resides) */ | ||
787 | for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) { | ||
788 | vma = rb_entry(n, struct vm_area_struct, vm_rb); | ||
789 | if (vma->vm_start < addr) | ||
790 | continue; | ||
791 | if (vma->vm_start > addr) | ||
792 | return NULL; | ||
793 | if (vma->vm_end == end) { | ||
794 | mm->mmap_cache = vma; | ||
795 | return vma; | ||
796 | } | ||
797 | } | ||
798 | |||
799 | return NULL; | ||
585 | } | 800 | } |
586 | 801 | ||
587 | /* | 802 | /* |
@@ -596,7 +811,7 @@ static int validate_mmap_request(struct file *file, | |||
596 | unsigned long pgoff, | 811 | unsigned long pgoff, |
597 | unsigned long *_capabilities) | 812 | unsigned long *_capabilities) |
598 | { | 813 | { |
599 | unsigned long capabilities; | 814 | unsigned long capabilities, rlen; |
600 | unsigned long reqprot = prot; | 815 | unsigned long reqprot = prot; |
601 | int ret; | 816 | int ret; |
602 | 817 | ||
@@ -616,12 +831,12 @@ static int validate_mmap_request(struct file *file, | |||
616 | return -EINVAL; | 831 | return -EINVAL; |
617 | 832 | ||
618 | /* Careful about overflows.. */ | 833 | /* Careful about overflows.. */ |
619 | len = PAGE_ALIGN(len); | 834 | rlen = PAGE_ALIGN(len); |
620 | if (!len || len > TASK_SIZE) | 835 | if (!rlen || rlen > TASK_SIZE) |
621 | return -ENOMEM; | 836 | return -ENOMEM; |
622 | 837 | ||
623 | /* offset overflow? */ | 838 | /* offset overflow? */ |
624 | if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) | 839 | if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff) |
625 | return -EOVERFLOW; | 840 | return -EOVERFLOW; |
626 | 841 | ||
627 | if (file) { | 842 | if (file) { |
@@ -795,13 +1010,18 @@ static unsigned long determine_vm_flags(struct file *file, | |||
795 | } | 1010 | } |
796 | 1011 | ||
797 | /* | 1012 | /* |
798 | * set up a shared mapping on a file | 1013 | * set up a shared mapping on a file (the driver or filesystem provides and |
1014 | * pins the storage) | ||
799 | */ | 1015 | */ |
800 | static int do_mmap_shared_file(struct vm_area_struct *vma, unsigned long len) | 1016 | static int do_mmap_shared_file(struct vm_area_struct *vma) |
801 | { | 1017 | { |
802 | int ret; | 1018 | int ret; |
803 | 1019 | ||
804 | ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); | 1020 | ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); |
1021 | if (ret == 0) { | ||
1022 | vma->vm_region->vm_top = vma->vm_region->vm_end; | ||
1023 | return ret; | ||
1024 | } | ||
805 | if (ret != -ENOSYS) | 1025 | if (ret != -ENOSYS) |
806 | return ret; | 1026 | return ret; |
807 | 1027 | ||
@@ -815,10 +1035,14 @@ static int do_mmap_shared_file(struct vm_area_struct *vma, unsigned long len) | |||
815 | /* | 1035 | /* |
816 | * set up a private mapping or an anonymous shared mapping | 1036 | * set up a private mapping or an anonymous shared mapping |
817 | */ | 1037 | */ |
818 | static int do_mmap_private(struct vm_area_struct *vma, unsigned long len) | 1038 | static int do_mmap_private(struct vm_area_struct *vma, |
1039 | struct vm_region *region, | ||
1040 | unsigned long len) | ||
819 | { | 1041 | { |
1042 | struct page *pages; | ||
1043 | unsigned long total, point, n, rlen; | ||
820 | void *base; | 1044 | void *base; |
821 | int ret; | 1045 | int ret, order; |
822 | 1046 | ||
823 | /* invoke the file's mapping function so that it can keep track of | 1047 | /* invoke the file's mapping function so that it can keep track of |
824 | * shared mappings on devices or memory | 1048 | * shared mappings on devices or memory |
@@ -826,34 +1050,63 @@ static int do_mmap_private(struct vm_area_struct *vma, unsigned long len) | |||
826 | */ | 1050 | */ |
827 | if (vma->vm_file) { | 1051 | if (vma->vm_file) { |
828 | ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); | 1052 | ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); |
829 | if (ret != -ENOSYS) { | 1053 | if (ret == 0) { |
830 | /* shouldn't return success if we're not sharing */ | 1054 | /* shouldn't return success if we're not sharing */ |
831 | BUG_ON(ret == 0 && !(vma->vm_flags & VM_MAYSHARE)); | 1055 | BUG_ON(!(vma->vm_flags & VM_MAYSHARE)); |
832 | return ret; /* success or a real error */ | 1056 | vma->vm_region->vm_top = vma->vm_region->vm_end; |
1057 | return ret; | ||
833 | } | 1058 | } |
1059 | if (ret != -ENOSYS) | ||
1060 | return ret; | ||
834 | 1061 | ||
835 | /* getting an ENOSYS error indicates that direct mmap isn't | 1062 | /* getting an ENOSYS error indicates that direct mmap isn't |
836 | * possible (as opposed to tried but failed) so we'll try to | 1063 | * possible (as opposed to tried but failed) so we'll try to |
837 | * make a private copy of the data and map that instead */ | 1064 | * make a private copy of the data and map that instead */ |
838 | } | 1065 | } |
839 | 1066 | ||
1067 | rlen = PAGE_ALIGN(len); | ||
1068 | |||
840 | /* allocate some memory to hold the mapping | 1069 | /* allocate some memory to hold the mapping |
841 | * - note that this may not return a page-aligned address if the object | 1070 | * - note that this may not return a page-aligned address if the object |
842 | * we're allocating is smaller than a page | 1071 | * we're allocating is smaller than a page |
843 | */ | 1072 | */ |
844 | base = kmalloc(len, GFP_KERNEL|__GFP_COMP); | 1073 | order = get_order(rlen); |
845 | if (!base) | 1074 | kdebug("alloc order %d for %lx", order, len); |
1075 | |||
1076 | pages = alloc_pages(GFP_KERNEL, order); | ||
1077 | if (!pages) | ||
846 | goto enomem; | 1078 | goto enomem; |
847 | 1079 | ||
848 | vma->vm_start = (unsigned long) base; | 1080 | total = 1 << order; |
849 | vma->vm_end = vma->vm_start + len; | 1081 | atomic_add(total, &mmap_pages_allocated); |
850 | vma->vm_flags |= VM_MAPPED_COPY; | 1082 | |
1083 | point = rlen >> PAGE_SHIFT; | ||
1084 | |||
1085 | /* we allocated a power-of-2 sized page set, so we may want to trim off | ||
1086 | * the excess */ | ||
1087 | if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) { | ||
1088 | while (total > point) { | ||
1089 | order = ilog2(total - point); | ||
1090 | n = 1 << order; | ||
1091 | kdebug("shave %lu/%lu @%lu", n, total - point, total); | ||
1092 | atomic_sub(n, &mmap_pages_allocated); | ||
1093 | total -= n; | ||
1094 | set_page_refcounted(pages + total); | ||
1095 | __free_pages(pages + total, order); | ||
1096 | } | ||
1097 | } | ||
1098 | |||
1099 | for (point = 1; point < total; point++) | ||
1100 | set_page_refcounted(&pages[point]); | ||
851 | 1101 | ||
852 | #ifdef WARN_ON_SLACK | 1102 | base = page_address(pages); |
853 | if (len + WARN_ON_SLACK <= kobjsize(result)) | 1103 | region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY; |
854 | printk("Allocation of %lu bytes from process %d has %lu bytes of slack\n", | 1104 | region->vm_start = (unsigned long) base; |
855 | len, current->pid, kobjsize(result) - len); | 1105 | region->vm_end = region->vm_start + rlen; |
856 | #endif | 1106 | region->vm_top = region->vm_start + (total << PAGE_SHIFT); |
1107 | |||
1108 | vma->vm_start = region->vm_start; | ||
1109 | vma->vm_end = region->vm_start + len; | ||
857 | 1110 | ||
858 | if (vma->vm_file) { | 1111 | if (vma->vm_file) { |
859 | /* read the contents of a file into the copy */ | 1112 | /* read the contents of a file into the copy */ |
@@ -865,26 +1118,28 @@ static int do_mmap_private(struct vm_area_struct *vma, unsigned long len) | |||
865 | 1118 | ||
866 | old_fs = get_fs(); | 1119 | old_fs = get_fs(); |
867 | set_fs(KERNEL_DS); | 1120 | set_fs(KERNEL_DS); |
868 | ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos); | 1121 | ret = vma->vm_file->f_op->read(vma->vm_file, base, rlen, &fpos); |
869 | set_fs(old_fs); | 1122 | set_fs(old_fs); |
870 | 1123 | ||
871 | if (ret < 0) | 1124 | if (ret < 0) |
872 | goto error_free; | 1125 | goto error_free; |
873 | 1126 | ||
874 | /* clear the last little bit */ | 1127 | /* clear the last little bit */ |
875 | if (ret < len) | 1128 | if (ret < rlen) |
876 | memset(base + ret, 0, len - ret); | 1129 | memset(base + ret, 0, rlen - ret); |
877 | 1130 | ||
878 | } else { | 1131 | } else { |
879 | /* if it's an anonymous mapping, then just clear it */ | 1132 | /* if it's an anonymous mapping, then just clear it */ |
880 | memset(base, 0, len); | 1133 | memset(base, 0, rlen); |
881 | } | 1134 | } |
882 | 1135 | ||
883 | return 0; | 1136 | return 0; |
884 | 1137 | ||
885 | error_free: | 1138 | error_free: |
886 | kfree(base); | 1139 | free_page_series(region->vm_start, region->vm_end); |
887 | vma->vm_start = 0; | 1140 | region->vm_start = vma->vm_start = 0; |
1141 | region->vm_end = vma->vm_end = 0; | ||
1142 | region->vm_top = 0; | ||
888 | return ret; | 1143 | return ret; |
889 | 1144 | ||
890 | enomem: | 1145 | enomem: |
@@ -904,13 +1159,14 @@ unsigned long do_mmap_pgoff(struct file *file, | |||
904 | unsigned long flags, | 1159 | unsigned long flags, |
905 | unsigned long pgoff) | 1160 | unsigned long pgoff) |
906 | { | 1161 | { |
907 | struct vm_list_struct *vml = NULL; | 1162 | struct vm_area_struct *vma; |
908 | struct vm_area_struct *vma = NULL; | 1163 | struct vm_region *region; |
909 | struct rb_node *rb; | 1164 | struct rb_node *rb; |
910 | unsigned long capabilities, vm_flags; | 1165 | unsigned long capabilities, vm_flags, result; |
911 | void *result; | ||
912 | int ret; | 1166 | int ret; |
913 | 1167 | ||
1168 | kenter(",%lx,%lx,%lx,%lx,%lx", addr, len, prot, flags, pgoff); | ||
1169 | |||
914 | if (!(flags & MAP_FIXED)) | 1170 | if (!(flags & MAP_FIXED)) |
915 | addr = round_hint_to_min(addr); | 1171 | addr = round_hint_to_min(addr); |
916 | 1172 | ||
@@ -918,73 +1174,120 @@ unsigned long do_mmap_pgoff(struct file *file, | |||
918 | * mapping */ | 1174 | * mapping */ |
919 | ret = validate_mmap_request(file, addr, len, prot, flags, pgoff, | 1175 | ret = validate_mmap_request(file, addr, len, prot, flags, pgoff, |
920 | &capabilities); | 1176 | &capabilities); |
921 | if (ret < 0) | 1177 | if (ret < 0) { |
1178 | kleave(" = %d [val]", ret); | ||
922 | return ret; | 1179 | return ret; |
1180 | } | ||
923 | 1181 | ||
924 | /* we've determined that we can make the mapping, now translate what we | 1182 | /* we've determined that we can make the mapping, now translate what we |
925 | * now know into VMA flags */ | 1183 | * now know into VMA flags */ |
926 | vm_flags = determine_vm_flags(file, prot, flags, capabilities); | 1184 | vm_flags = determine_vm_flags(file, prot, flags, capabilities); |
927 | 1185 | ||
928 | /* we're going to need to record the mapping if it works */ | 1186 | /* we're going to need to record the mapping */ |
929 | vml = kzalloc(sizeof(struct vm_list_struct), GFP_KERNEL); | 1187 | region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL); |
930 | if (!vml) | 1188 | if (!region) |
931 | goto error_getting_vml; | 1189 | goto error_getting_region; |
1190 | |||
1191 | vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); | ||
1192 | if (!vma) | ||
1193 | goto error_getting_vma; | ||
1194 | |||
1195 | atomic_set(®ion->vm_usage, 1); | ||
1196 | region->vm_flags = vm_flags; | ||
1197 | region->vm_pgoff = pgoff; | ||
1198 | |||
1199 | INIT_LIST_HEAD(&vma->anon_vma_node); | ||
1200 | vma->vm_flags = vm_flags; | ||
1201 | vma->vm_pgoff = pgoff; | ||
932 | 1202 | ||
933 | down_write(&nommu_vma_sem); | 1203 | if (file) { |
1204 | region->vm_file = file; | ||
1205 | get_file(file); | ||
1206 | vma->vm_file = file; | ||
1207 | get_file(file); | ||
1208 | if (vm_flags & VM_EXECUTABLE) { | ||
1209 | added_exe_file_vma(current->mm); | ||
1210 | vma->vm_mm = current->mm; | ||
1211 | } | ||
1212 | } | ||
934 | 1213 | ||
935 | /* if we want to share, we need to check for VMAs created by other | 1214 | down_write(&nommu_region_sem); |
1215 | |||
1216 | /* if we want to share, we need to check for regions created by other | ||
936 | * mmap() calls that overlap with our proposed mapping | 1217 | * mmap() calls that overlap with our proposed mapping |
937 | * - we can only share with an exact match on most regular files | 1218 | * - we can only share with a superset match on most regular files |
938 | * - shared mappings on character devices and memory backed files are | 1219 | * - shared mappings on character devices and memory backed files are |
939 | * permitted to overlap inexactly as far as we are concerned for in | 1220 | * permitted to overlap inexactly as far as we are concerned for in |
940 | * these cases, sharing is handled in the driver or filesystem rather | 1221 | * these cases, sharing is handled in the driver or filesystem rather |
941 | * than here | 1222 | * than here |
942 | */ | 1223 | */ |
943 | if (vm_flags & VM_MAYSHARE) { | 1224 | if (vm_flags & VM_MAYSHARE) { |
944 | unsigned long pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; | 1225 | struct vm_region *pregion; |
945 | unsigned long vmpglen; | 1226 | unsigned long pglen, rpglen, pgend, rpgend, start; |
946 | 1227 | ||
947 | /* suppress VMA sharing for shared regions */ | 1228 | pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
948 | if (vm_flags & VM_SHARED && | 1229 | pgend = pgoff + pglen; |
949 | capabilities & BDI_CAP_MAP_DIRECT) | ||
950 | goto dont_share_VMAs; | ||
951 | 1230 | ||
952 | for (rb = rb_first(&nommu_vma_tree); rb; rb = rb_next(rb)) { | 1231 | for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) { |
953 | vma = rb_entry(rb, struct vm_area_struct, vm_rb); | 1232 | pregion = rb_entry(rb, struct vm_region, vm_rb); |
954 | 1233 | ||
955 | if (!(vma->vm_flags & VM_MAYSHARE)) | 1234 | if (!(pregion->vm_flags & VM_MAYSHARE)) |
956 | continue; | 1235 | continue; |
957 | 1236 | ||
958 | /* search for overlapping mappings on the same file */ | 1237 | /* search for overlapping mappings on the same file */ |
959 | if (vma->vm_file->f_path.dentry->d_inode != file->f_path.dentry->d_inode) | 1238 | if (pregion->vm_file->f_path.dentry->d_inode != |
1239 | file->f_path.dentry->d_inode) | ||
960 | continue; | 1240 | continue; |
961 | 1241 | ||
962 | if (vma->vm_pgoff >= pgoff + pglen) | 1242 | if (pregion->vm_pgoff >= pgend) |
963 | continue; | 1243 | continue; |
964 | 1244 | ||
965 | vmpglen = vma->vm_end - vma->vm_start + PAGE_SIZE - 1; | 1245 | rpglen = pregion->vm_end - pregion->vm_start; |
966 | vmpglen >>= PAGE_SHIFT; | 1246 | rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT; |
967 | if (pgoff >= vma->vm_pgoff + vmpglen) | 1247 | rpgend = pregion->vm_pgoff + rpglen; |
1248 | if (pgoff >= rpgend) | ||
968 | continue; | 1249 | continue; |
969 | 1250 | ||
970 | /* handle inexactly overlapping matches between mappings */ | 1251 | /* handle inexactly overlapping matches between |
971 | if (vma->vm_pgoff != pgoff || vmpglen != pglen) { | 1252 | * mappings */ |
1253 | if ((pregion->vm_pgoff != pgoff || rpglen != pglen) && | ||
1254 | !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) { | ||
1255 | /* new mapping is not a subset of the region */ | ||
972 | if (!(capabilities & BDI_CAP_MAP_DIRECT)) | 1256 | if (!(capabilities & BDI_CAP_MAP_DIRECT)) |
973 | goto sharing_violation; | 1257 | goto sharing_violation; |
974 | continue; | 1258 | continue; |
975 | } | 1259 | } |
976 | 1260 | ||
977 | /* we've found a VMA we can share */ | 1261 | /* we've found a region we can share */ |
978 | atomic_inc(&vma->vm_usage); | 1262 | atomic_inc(&pregion->vm_usage); |
979 | 1263 | vma->vm_region = pregion; | |
980 | vml->vma = vma; | 1264 | start = pregion->vm_start; |
981 | result = (void *) vma->vm_start; | 1265 | start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT; |
982 | goto shared; | 1266 | vma->vm_start = start; |
1267 | vma->vm_end = start + len; | ||
1268 | |||
1269 | if (pregion->vm_flags & VM_MAPPED_COPY) { | ||
1270 | kdebug("share copy"); | ||
1271 | vma->vm_flags |= VM_MAPPED_COPY; | ||
1272 | } else { | ||
1273 | kdebug("share mmap"); | ||
1274 | ret = do_mmap_shared_file(vma); | ||
1275 | if (ret < 0) { | ||
1276 | vma->vm_region = NULL; | ||
1277 | vma->vm_start = 0; | ||
1278 | vma->vm_end = 0; | ||
1279 | atomic_dec(&pregion->vm_usage); | ||
1280 | pregion = NULL; | ||
1281 | goto error_just_free; | ||
1282 | } | ||
1283 | } | ||
1284 | fput(region->vm_file); | ||
1285 | kmem_cache_free(vm_region_jar, region); | ||
1286 | region = pregion; | ||
1287 | result = start; | ||
1288 | goto share; | ||
983 | } | 1289 | } |
984 | 1290 | ||
985 | dont_share_VMAs: | ||
986 | vma = NULL; | ||
987 | |||
988 | /* obtain the address at which to make a shared mapping | 1291 | /* obtain the address at which to make a shared mapping |
989 | * - this is the hook for quasi-memory character devices to | 1292 | * - this is the hook for quasi-memory character devices to |
990 | * tell us the location of a shared mapping | 1293 | * tell us the location of a shared mapping |
@@ -995,113 +1298,93 @@ unsigned long do_mmap_pgoff(struct file *file, | |||
995 | if (IS_ERR((void *) addr)) { | 1298 | if (IS_ERR((void *) addr)) { |
996 | ret = addr; | 1299 | ret = addr; |
997 | if (ret != (unsigned long) -ENOSYS) | 1300 | if (ret != (unsigned long) -ENOSYS) |
998 | goto error; | 1301 | goto error_just_free; |
999 | 1302 | ||
1000 | /* the driver refused to tell us where to site | 1303 | /* the driver refused to tell us where to site |
1001 | * the mapping so we'll have to attempt to copy | 1304 | * the mapping so we'll have to attempt to copy |
1002 | * it */ | 1305 | * it */ |
1003 | ret = (unsigned long) -ENODEV; | 1306 | ret = (unsigned long) -ENODEV; |
1004 | if (!(capabilities & BDI_CAP_MAP_COPY)) | 1307 | if (!(capabilities & BDI_CAP_MAP_COPY)) |
1005 | goto error; | 1308 | goto error_just_free; |
1006 | 1309 | ||
1007 | capabilities &= ~BDI_CAP_MAP_DIRECT; | 1310 | capabilities &= ~BDI_CAP_MAP_DIRECT; |
1311 | } else { | ||
1312 | vma->vm_start = region->vm_start = addr; | ||
1313 | vma->vm_end = region->vm_end = addr + len; | ||
1008 | } | 1314 | } |
1009 | } | 1315 | } |
1010 | } | 1316 | } |
1011 | 1317 | ||
1012 | /* we're going to need a VMA struct as well */ | 1318 | vma->vm_region = region; |
1013 | vma = kzalloc(sizeof(struct vm_area_struct), GFP_KERNEL); | ||
1014 | if (!vma) | ||
1015 | goto error_getting_vma; | ||
1016 | |||
1017 | INIT_LIST_HEAD(&vma->anon_vma_node); | ||
1018 | atomic_set(&vma->vm_usage, 1); | ||
1019 | if (file) { | ||
1020 | get_file(file); | ||
1021 | if (vm_flags & VM_EXECUTABLE) { | ||
1022 | added_exe_file_vma(current->mm); | ||
1023 | vma->vm_mm = current->mm; | ||
1024 | } | ||
1025 | } | ||
1026 | vma->vm_file = file; | ||
1027 | vma->vm_flags = vm_flags; | ||
1028 | vma->vm_start = addr; | ||
1029 | vma->vm_end = addr + len; | ||
1030 | vma->vm_pgoff = pgoff; | ||
1031 | |||
1032 | vml->vma = vma; | ||
1033 | 1319 | ||
1034 | /* set up the mapping */ | 1320 | /* set up the mapping */ |
1035 | if (file && vma->vm_flags & VM_SHARED) | 1321 | if (file && vma->vm_flags & VM_SHARED) |
1036 | ret = do_mmap_shared_file(vma, len); | 1322 | ret = do_mmap_shared_file(vma); |
1037 | else | 1323 | else |
1038 | ret = do_mmap_private(vma, len); | 1324 | ret = do_mmap_private(vma, region, len); |
1039 | if (ret < 0) | 1325 | if (ret < 0) |
1040 | goto error; | 1326 | goto error_put_region; |
1041 | |||
1042 | /* okay... we have a mapping; now we have to register it */ | ||
1043 | result = (void *) vma->vm_start; | ||
1044 | 1327 | ||
1045 | if (vma->vm_flags & VM_MAPPED_COPY) { | 1328 | add_nommu_region(region); |
1046 | realalloc += kobjsize(result); | ||
1047 | askedalloc += len; | ||
1048 | } | ||
1049 | 1329 | ||
1050 | realalloc += kobjsize(vma); | 1330 | /* okay... we have a mapping; now we have to register it */ |
1051 | askedalloc += sizeof(*vma); | 1331 | result = vma->vm_start; |
1052 | 1332 | ||
1053 | current->mm->total_vm += len >> PAGE_SHIFT; | 1333 | current->mm->total_vm += len >> PAGE_SHIFT; |
1054 | 1334 | ||
1055 | add_nommu_vma(vma); | 1335 | share: |
1056 | 1336 | add_vma_to_mm(current->mm, vma); | |
1057 | shared: | ||
1058 | realalloc += kobjsize(vml); | ||
1059 | askedalloc += sizeof(*vml); | ||
1060 | |||
1061 | add_vma_to_mm(current->mm, vml); | ||
1062 | 1337 | ||
1063 | up_write(&nommu_vma_sem); | 1338 | up_write(&nommu_region_sem); |
1064 | 1339 | ||
1065 | if (prot & PROT_EXEC) | 1340 | if (prot & PROT_EXEC) |
1066 | flush_icache_range((unsigned long) result, | 1341 | flush_icache_range(result, result + len); |
1067 | (unsigned long) result + len); | ||
1068 | 1342 | ||
1069 | #ifdef DEBUG | 1343 | kleave(" = %lx", result); |
1070 | printk("do_mmap:\n"); | 1344 | return result; |
1071 | show_process_blocks(); | ||
1072 | #endif | ||
1073 | |||
1074 | return (unsigned long) result; | ||
1075 | 1345 | ||
1076 | error: | 1346 | error_put_region: |
1077 | up_write(&nommu_vma_sem); | 1347 | __put_nommu_region(region); |
1078 | kfree(vml); | ||
1079 | if (vma) { | 1348 | if (vma) { |
1080 | if (vma->vm_file) { | 1349 | if (vma->vm_file) { |
1081 | fput(vma->vm_file); | 1350 | fput(vma->vm_file); |
1082 | if (vma->vm_flags & VM_EXECUTABLE) | 1351 | if (vma->vm_flags & VM_EXECUTABLE) |
1083 | removed_exe_file_vma(vma->vm_mm); | 1352 | removed_exe_file_vma(vma->vm_mm); |
1084 | } | 1353 | } |
1085 | kfree(vma); | 1354 | kmem_cache_free(vm_area_cachep, vma); |
1086 | } | 1355 | } |
1356 | kleave(" = %d [pr]", ret); | ||
1087 | return ret; | 1357 | return ret; |
1088 | 1358 | ||
1089 | sharing_violation: | 1359 | error_just_free: |
1090 | up_write(&nommu_vma_sem); | 1360 | up_write(&nommu_region_sem); |
1091 | printk("Attempt to share mismatched mappings\n"); | 1361 | error: |
1092 | kfree(vml); | 1362 | fput(region->vm_file); |
1093 | return -EINVAL; | 1363 | kmem_cache_free(vm_region_jar, region); |
1364 | fput(vma->vm_file); | ||
1365 | if (vma->vm_flags & VM_EXECUTABLE) | ||
1366 | removed_exe_file_vma(vma->vm_mm); | ||
1367 | kmem_cache_free(vm_area_cachep, vma); | ||
1368 | kleave(" = %d", ret); | ||
1369 | return ret; | ||
1094 | 1370 | ||
1095 | error_getting_vma: | 1371 | sharing_violation: |
1096 | up_write(&nommu_vma_sem); | 1372 | up_write(&nommu_region_sem); |
1097 | kfree(vml); | 1373 | printk(KERN_WARNING "Attempt to share mismatched mappings\n"); |
1098 | printk("Allocation of vma for %lu byte allocation from process %d failed\n", | 1374 | ret = -EINVAL; |
1375 | goto error; | ||
1376 | |||
1377 | error_getting_vma: | ||
1378 | kmem_cache_free(vm_region_jar, region); | ||
1379 | printk(KERN_WARNING "Allocation of vma for %lu byte allocation" | ||
1380 | " from process %d failed\n", | ||
1099 | len, current->pid); | 1381 | len, current->pid); |
1100 | show_free_areas(); | 1382 | show_free_areas(); |
1101 | return -ENOMEM; | 1383 | return -ENOMEM; |
1102 | 1384 | ||
1103 | error_getting_vml: | 1385 | error_getting_region: |
1104 | printk("Allocation of vml for %lu byte allocation from process %d failed\n", | 1386 | printk(KERN_WARNING "Allocation of vm region for %lu byte allocation" |
1387 | " from process %d failed\n", | ||
1105 | len, current->pid); | 1388 | len, current->pid); |
1106 | show_free_areas(); | 1389 | show_free_areas(); |
1107 | return -ENOMEM; | 1390 | return -ENOMEM; |
@@ -1109,85 +1392,183 @@ unsigned long do_mmap_pgoff(struct file *file, | |||
1109 | EXPORT_SYMBOL(do_mmap_pgoff); | 1392 | EXPORT_SYMBOL(do_mmap_pgoff); |
1110 | 1393 | ||
1111 | /* | 1394 | /* |
1112 | * handle mapping disposal for uClinux | 1395 | * split a vma into two pieces at address 'addr', a new vma is allocated either |
1396 | * for the first part or the tail. | ||
1113 | */ | 1397 | */ |
1114 | static void put_vma(struct mm_struct *mm, struct vm_area_struct *vma) | 1398 | int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, |
1399 | unsigned long addr, int new_below) | ||
1115 | { | 1400 | { |
1116 | if (vma) { | 1401 | struct vm_area_struct *new; |
1117 | down_write(&nommu_vma_sem); | 1402 | struct vm_region *region; |
1403 | unsigned long npages; | ||
1118 | 1404 | ||
1119 | if (atomic_dec_and_test(&vma->vm_usage)) { | 1405 | kenter(""); |
1120 | delete_nommu_vma(vma); | ||
1121 | 1406 | ||
1122 | if (vma->vm_ops && vma->vm_ops->close) | 1407 | /* we're only permitted to split anonymous regions that have a single |
1123 | vma->vm_ops->close(vma); | 1408 | * owner */ |
1409 | if (vma->vm_file || | ||
1410 | atomic_read(&vma->vm_region->vm_usage) != 1) | ||
1411 | return -ENOMEM; | ||
1124 | 1412 | ||
1125 | /* IO memory and memory shared directly out of the pagecache from | 1413 | if (mm->map_count >= sysctl_max_map_count) |
1126 | * ramfs/tmpfs mustn't be released here */ | 1414 | return -ENOMEM; |
1127 | if (vma->vm_flags & VM_MAPPED_COPY) { | ||
1128 | realalloc -= kobjsize((void *) vma->vm_start); | ||
1129 | askedalloc -= vma->vm_end - vma->vm_start; | ||
1130 | kfree((void *) vma->vm_start); | ||
1131 | } | ||
1132 | 1415 | ||
1133 | realalloc -= kobjsize(vma); | 1416 | region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL); |
1134 | askedalloc -= sizeof(*vma); | 1417 | if (!region) |
1418 | return -ENOMEM; | ||
1135 | 1419 | ||
1136 | if (vma->vm_file) { | 1420 | new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); |
1137 | fput(vma->vm_file); | 1421 | if (!new) { |
1138 | if (vma->vm_flags & VM_EXECUTABLE) | 1422 | kmem_cache_free(vm_region_jar, region); |
1139 | removed_exe_file_vma(mm); | 1423 | return -ENOMEM; |
1140 | } | 1424 | } |
1141 | kfree(vma); | 1425 | |
1142 | } | 1426 | /* most fields are the same, copy all, and then fixup */ |
1427 | *new = *vma; | ||
1428 | *region = *vma->vm_region; | ||
1429 | new->vm_region = region; | ||
1430 | |||
1431 | npages = (addr - vma->vm_start) >> PAGE_SHIFT; | ||
1143 | 1432 | ||
1144 | up_write(&nommu_vma_sem); | 1433 | if (new_below) { |
1434 | region->vm_top = region->vm_end = new->vm_end = addr; | ||
1435 | } else { | ||
1436 | region->vm_start = new->vm_start = addr; | ||
1437 | region->vm_pgoff = new->vm_pgoff += npages; | ||
1438 | } | ||
1439 | |||
1440 | if (new->vm_ops && new->vm_ops->open) | ||
1441 | new->vm_ops->open(new); | ||
1442 | |||
1443 | delete_vma_from_mm(vma); | ||
1444 | down_write(&nommu_region_sem); | ||
1445 | delete_nommu_region(vma->vm_region); | ||
1446 | if (new_below) { | ||
1447 | vma->vm_region->vm_start = vma->vm_start = addr; | ||
1448 | vma->vm_region->vm_pgoff = vma->vm_pgoff += npages; | ||
1449 | } else { | ||
1450 | vma->vm_region->vm_end = vma->vm_end = addr; | ||
1451 | vma->vm_region->vm_top = addr; | ||
1145 | } | 1452 | } |
1453 | add_nommu_region(vma->vm_region); | ||
1454 | add_nommu_region(new->vm_region); | ||
1455 | up_write(&nommu_region_sem); | ||
1456 | add_vma_to_mm(mm, vma); | ||
1457 | add_vma_to_mm(mm, new); | ||
1458 | return 0; | ||
1146 | } | 1459 | } |
1147 | 1460 | ||
1148 | /* | 1461 | /* |
1149 | * release a mapping | 1462 | * shrink a VMA by removing the specified chunk from either the beginning or |
1150 | * - under NOMMU conditions the parameters must match exactly to the mapping to | 1463 | * the end |
1151 | * be removed | ||
1152 | */ | 1464 | */ |
1153 | int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len) | 1465 | static int shrink_vma(struct mm_struct *mm, |
1466 | struct vm_area_struct *vma, | ||
1467 | unsigned long from, unsigned long to) | ||
1154 | { | 1468 | { |
1155 | struct vm_list_struct *vml, **parent; | 1469 | struct vm_region *region; |
1156 | unsigned long end = addr + len; | ||
1157 | 1470 | ||
1158 | #ifdef DEBUG | 1471 | kenter(""); |
1159 | printk("do_munmap:\n"); | ||
1160 | #endif | ||
1161 | 1472 | ||
1162 | for (parent = &mm->context.vmlist; *parent; parent = &(*parent)->next) { | 1473 | /* adjust the VMA's pointers, which may reposition it in the MM's tree |
1163 | if ((*parent)->vma->vm_start > addr) | 1474 | * and list */ |
1164 | break; | 1475 | delete_vma_from_mm(vma); |
1165 | if ((*parent)->vma->vm_start == addr && | 1476 | if (from > vma->vm_start) |
1166 | ((len == 0) || ((*parent)->vma->vm_end == end))) | 1477 | vma->vm_end = from; |
1167 | goto found; | 1478 | else |
1479 | vma->vm_start = to; | ||
1480 | add_vma_to_mm(mm, vma); | ||
1481 | |||
1482 | /* cut the backing region down to size */ | ||
1483 | region = vma->vm_region; | ||
1484 | BUG_ON(atomic_read(®ion->vm_usage) != 1); | ||
1485 | |||
1486 | down_write(&nommu_region_sem); | ||
1487 | delete_nommu_region(region); | ||
1488 | if (from > region->vm_start) { | ||
1489 | to = region->vm_top; | ||
1490 | region->vm_top = region->vm_end = from; | ||
1491 | } else { | ||
1492 | region->vm_start = to; | ||
1168 | } | 1493 | } |
1494 | add_nommu_region(region); | ||
1495 | up_write(&nommu_region_sem); | ||
1169 | 1496 | ||
1170 | printk("munmap of non-mmaped memory by process %d (%s): %p\n", | 1497 | free_page_series(from, to); |
1171 | current->pid, current->comm, (void *) addr); | 1498 | return 0; |
1172 | return -EINVAL; | 1499 | } |
1173 | 1500 | ||
1174 | found: | 1501 | /* |
1175 | vml = *parent; | 1502 | * release a mapping |
1503 | * - under NOMMU conditions the chunk to be unmapped must be backed by a single | ||
1504 | * VMA, though it need not cover the whole VMA | ||
1505 | */ | ||
1506 | int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) | ||
1507 | { | ||
1508 | struct vm_area_struct *vma; | ||
1509 | struct rb_node *rb; | ||
1510 | unsigned long end = start + len; | ||
1511 | int ret; | ||
1176 | 1512 | ||
1177 | put_vma(mm, vml->vma); | 1513 | kenter(",%lx,%zx", start, len); |
1178 | 1514 | ||
1179 | *parent = vml->next; | 1515 | if (len == 0) |
1180 | realalloc -= kobjsize(vml); | 1516 | return -EINVAL; |
1181 | askedalloc -= sizeof(*vml); | ||
1182 | kfree(vml); | ||
1183 | 1517 | ||
1184 | update_hiwater_vm(mm); | 1518 | /* find the first potentially overlapping VMA */ |
1185 | mm->total_vm -= len >> PAGE_SHIFT; | 1519 | vma = find_vma(mm, start); |
1520 | if (!vma) { | ||
1521 | printk(KERN_WARNING | ||
1522 | "munmap of memory not mmapped by process %d (%s):" | ||
1523 | " 0x%lx-0x%lx\n", | ||
1524 | current->pid, current->comm, start, start + len - 1); | ||
1525 | return -EINVAL; | ||
1526 | } | ||
1186 | 1527 | ||
1187 | #ifdef DEBUG | 1528 | /* we're allowed to split an anonymous VMA but not a file-backed one */ |
1188 | show_process_blocks(); | 1529 | if (vma->vm_file) { |
1189 | #endif | 1530 | do { |
1531 | if (start > vma->vm_start) { | ||
1532 | kleave(" = -EINVAL [miss]"); | ||
1533 | return -EINVAL; | ||
1534 | } | ||
1535 | if (end == vma->vm_end) | ||
1536 | goto erase_whole_vma; | ||
1537 | rb = rb_next(&vma->vm_rb); | ||
1538 | vma = rb_entry(rb, struct vm_area_struct, vm_rb); | ||
1539 | } while (rb); | ||
1540 | kleave(" = -EINVAL [split file]"); | ||
1541 | return -EINVAL; | ||
1542 | } else { | ||
1543 | /* the chunk must be a subset of the VMA found */ | ||
1544 | if (start == vma->vm_start && end == vma->vm_end) | ||
1545 | goto erase_whole_vma; | ||
1546 | if (start < vma->vm_start || end > vma->vm_end) { | ||
1547 | kleave(" = -EINVAL [superset]"); | ||
1548 | return -EINVAL; | ||
1549 | } | ||
1550 | if (start & ~PAGE_MASK) { | ||
1551 | kleave(" = -EINVAL [unaligned start]"); | ||
1552 | return -EINVAL; | ||
1553 | } | ||
1554 | if (end != vma->vm_end && end & ~PAGE_MASK) { | ||
1555 | kleave(" = -EINVAL [unaligned split]"); | ||
1556 | return -EINVAL; | ||
1557 | } | ||
1558 | if (start != vma->vm_start && end != vma->vm_end) { | ||
1559 | ret = split_vma(mm, vma, start, 1); | ||
1560 | if (ret < 0) { | ||
1561 | kleave(" = %d [split]", ret); | ||
1562 | return ret; | ||
1563 | } | ||
1564 | } | ||
1565 | return shrink_vma(mm, vma, start, end); | ||
1566 | } | ||
1190 | 1567 | ||
1568 | erase_whole_vma: | ||
1569 | delete_vma_from_mm(vma); | ||
1570 | delete_vma(mm, vma); | ||
1571 | kleave(" = 0"); | ||
1191 | return 0; | 1572 | return 0; |
1192 | } | 1573 | } |
1193 | EXPORT_SYMBOL(do_munmap); | 1574 | EXPORT_SYMBOL(do_munmap); |
@@ -1204,32 +1585,26 @@ asmlinkage long sys_munmap(unsigned long addr, size_t len) | |||
1204 | } | 1585 | } |
1205 | 1586 | ||
1206 | /* | 1587 | /* |
1207 | * Release all mappings | 1588 | * release all the mappings made in a process's VM space |
1208 | */ | 1589 | */ |
1209 | void exit_mmap(struct mm_struct * mm) | 1590 | void exit_mmap(struct mm_struct *mm) |
1210 | { | 1591 | { |
1211 | struct vm_list_struct *tmp; | 1592 | struct vm_area_struct *vma; |
1212 | |||
1213 | if (mm) { | ||
1214 | #ifdef DEBUG | ||
1215 | printk("Exit_mmap:\n"); | ||
1216 | #endif | ||
1217 | 1593 | ||
1218 | mm->total_vm = 0; | 1594 | if (!mm) |
1595 | return; | ||
1219 | 1596 | ||
1220 | while ((tmp = mm->context.vmlist)) { | 1597 | kenter(""); |
1221 | mm->context.vmlist = tmp->next; | ||
1222 | put_vma(mm, tmp->vma); | ||
1223 | 1598 | ||
1224 | realalloc -= kobjsize(tmp); | 1599 | mm->total_vm = 0; |
1225 | askedalloc -= sizeof(*tmp); | ||
1226 | kfree(tmp); | ||
1227 | } | ||
1228 | 1600 | ||
1229 | #ifdef DEBUG | 1601 | while ((vma = mm->mmap)) { |
1230 | show_process_blocks(); | 1602 | mm->mmap = vma->vm_next; |
1231 | #endif | 1603 | delete_vma_from_mm(vma); |
1604 | delete_vma(mm, vma); | ||
1232 | } | 1605 | } |
1606 | |||
1607 | kleave(""); | ||
1233 | } | 1608 | } |
1234 | 1609 | ||
1235 | unsigned long do_brk(unsigned long addr, unsigned long len) | 1610 | unsigned long do_brk(unsigned long addr, unsigned long len) |
@@ -1242,8 +1617,8 @@ unsigned long do_brk(unsigned long addr, unsigned long len) | |||
1242 | * time (controlled by the MREMAP_MAYMOVE flag and available VM space) | 1617 | * time (controlled by the MREMAP_MAYMOVE flag and available VM space) |
1243 | * | 1618 | * |
1244 | * under NOMMU conditions, we only permit changing a mapping's size, and only | 1619 | * under NOMMU conditions, we only permit changing a mapping's size, and only |
1245 | * as long as it stays within the hole allocated by the kmalloc() call in | 1620 | * as long as it stays within the region allocated by do_mmap_private() and the |
1246 | * do_mmap_pgoff() and the block is not shareable | 1621 | * block is not shareable |
1247 | * | 1622 | * |
1248 | * MREMAP_FIXED is not supported under NOMMU conditions | 1623 | * MREMAP_FIXED is not supported under NOMMU conditions |
1249 | */ | 1624 | */ |
@@ -1254,13 +1629,16 @@ unsigned long do_mremap(unsigned long addr, | |||
1254 | struct vm_area_struct *vma; | 1629 | struct vm_area_struct *vma; |
1255 | 1630 | ||
1256 | /* insanity checks first */ | 1631 | /* insanity checks first */ |
1257 | if (new_len == 0) | 1632 | if (old_len == 0 || new_len == 0) |
1258 | return (unsigned long) -EINVAL; | 1633 | return (unsigned long) -EINVAL; |
1259 | 1634 | ||
1635 | if (addr & ~PAGE_MASK) | ||
1636 | return -EINVAL; | ||
1637 | |||
1260 | if (flags & MREMAP_FIXED && new_addr != addr) | 1638 | if (flags & MREMAP_FIXED && new_addr != addr) |
1261 | return (unsigned long) -EINVAL; | 1639 | return (unsigned long) -EINVAL; |
1262 | 1640 | ||
1263 | vma = find_vma_exact(current->mm, addr); | 1641 | vma = find_vma_exact(current->mm, addr, old_len); |
1264 | if (!vma) | 1642 | if (!vma) |
1265 | return (unsigned long) -EINVAL; | 1643 | return (unsigned long) -EINVAL; |
1266 | 1644 | ||
@@ -1270,22 +1648,19 @@ unsigned long do_mremap(unsigned long addr, | |||
1270 | if (vma->vm_flags & VM_MAYSHARE) | 1648 | if (vma->vm_flags & VM_MAYSHARE) |
1271 | return (unsigned long) -EPERM; | 1649 | return (unsigned long) -EPERM; |
1272 | 1650 | ||
1273 | if (new_len > kobjsize((void *) addr)) | 1651 | if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start) |
1274 | return (unsigned long) -ENOMEM; | 1652 | return (unsigned long) -ENOMEM; |
1275 | 1653 | ||
1276 | /* all checks complete - do it */ | 1654 | /* all checks complete - do it */ |
1277 | vma->vm_end = vma->vm_start + new_len; | 1655 | vma->vm_end = vma->vm_start + new_len; |
1278 | |||
1279 | askedalloc -= old_len; | ||
1280 | askedalloc += new_len; | ||
1281 | |||
1282 | return vma->vm_start; | 1656 | return vma->vm_start; |
1283 | } | 1657 | } |
1284 | EXPORT_SYMBOL(do_mremap); | 1658 | EXPORT_SYMBOL(do_mremap); |
1285 | 1659 | ||
1286 | asmlinkage unsigned long sys_mremap(unsigned long addr, | 1660 | asmlinkage |
1287 | unsigned long old_len, unsigned long new_len, | 1661 | unsigned long sys_mremap(unsigned long addr, |
1288 | unsigned long flags, unsigned long new_addr) | 1662 | unsigned long old_len, unsigned long new_len, |
1663 | unsigned long flags, unsigned long new_addr) | ||
1289 | { | 1664 | { |
1290 | unsigned long ret; | 1665 | unsigned long ret; |
1291 | 1666 | ||