diff options
author | Guruprasad Aphale <guru@koruna.cs.unc.edu> | 2010-05-12 21:44:51 -0400 |
---|---|---|
committer | Guruprasad Aphale <guru@koruna.cs.unc.edu> | 2010-05-12 21:44:51 -0400 |
commit | 05a753e08faacdefa380f3b60838fc65e13a5812 (patch) | |
tree | da52fb25bb4a2fd21caa770323947874ba01310e | |
parent | da2f4f4af74a973979db895efbdb805bcd121bce (diff) |
Renamed index.html to litmus2008.html and updated index.html for litmus2010
-rw-r--r-- | download/2010.1/32bit-config | 1956 | ||||
-rw-r--r-- | download/2010.1/64bit-config | 1879 | ||||
-rw-r--r-- | download/2010.1/SHA256SUMS | 4 | ||||
-rw-r--r-- | download/2010.1/liblitmus-2010.1.tgz | bin | 0 -> 583680 bytes | |||
-rw-r--r-- | download/2010.1/litmus-rt-2010.1.patch | 38980 | ||||
-rw-r--r-- | index.html | 1092 | ||||
-rw-r--r-- | litmus2008.html | 605 |
7 files changed, 43921 insertions, 595 deletions
diff --git a/download/2010.1/32bit-config b/download/2010.1/32bit-config new file mode 100644 index 0000000..8374035 --- /dev/null +++ b/download/2010.1/32bit-config | |||
@@ -0,0 +1,1956 @@ | |||
1 | # | ||
2 | # Automatically generated make config: don't edit | ||
3 | # Linux kernel version: 2.6.32-litmus2010 | ||
4 | # Sun Jan 17 13:41:43 2010 | ||
5 | # | ||
6 | # CONFIG_64BIT is not set | ||
7 | CONFIG_X86_32=y | ||
8 | # CONFIG_X86_64 is not set | ||
9 | CONFIG_X86=y | ||
10 | CONFIG_OUTPUT_FORMAT="elf32-i386" | ||
11 | CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig" | ||
12 | CONFIG_GENERIC_TIME=y | ||
13 | CONFIG_GENERIC_CMOS_UPDATE=y | ||
14 | CONFIG_CLOCKSOURCE_WATCHDOG=y | ||
15 | CONFIG_GENERIC_CLOCKEVENTS=y | ||
16 | CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y | ||
17 | CONFIG_LOCKDEP_SUPPORT=y | ||
18 | CONFIG_STACKTRACE_SUPPORT=y | ||
19 | CONFIG_HAVE_LATENCYTOP_SUPPORT=y | ||
20 | CONFIG_MMU=y | ||
21 | CONFIG_ZONE_DMA=y | ||
22 | CONFIG_GENERIC_ISA_DMA=y | ||
23 | CONFIG_GENERIC_IOMAP=y | ||
24 | CONFIG_GENERIC_BUG=y | ||
25 | CONFIG_GENERIC_HWEIGHT=y | ||
26 | CONFIG_ARCH_MAY_HAVE_PC_FDC=y | ||
27 | # CONFIG_RWSEM_GENERIC_SPINLOCK is not set | ||
28 | CONFIG_RWSEM_XCHGADD_ALGORITHM=y | ||
29 | CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y | ||
30 | CONFIG_GENERIC_CALIBRATE_DELAY=y | ||
31 | # CONFIG_GENERIC_TIME_VSYSCALL is not set | ||
32 | CONFIG_ARCH_HAS_CPU_RELAX=y | ||
33 | CONFIG_ARCH_HAS_DEFAULT_IDLE=y | ||
34 | CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y | ||
35 | CONFIG_HAVE_SETUP_PER_CPU_AREA=y | ||
36 | CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y | ||
37 | CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y | ||
38 | # CONFIG_HAVE_CPUMASK_OF_CPU_MAP is not set | ||
39 | CONFIG_ARCH_HIBERNATION_POSSIBLE=y | ||
40 | CONFIG_ARCH_SUSPEND_POSSIBLE=y | ||
41 | # CONFIG_ZONE_DMA32 is not set | ||
42 | CONFIG_ARCH_POPULATES_NODE_MAP=y | ||
43 | # CONFIG_AUDIT_ARCH is not set | ||
44 | CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y | ||
45 | CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y | ||
46 | CONFIG_GENERIC_HARDIRQS=y | ||
47 | CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y | ||
48 | CONFIG_GENERIC_IRQ_PROBE=y | ||
49 | CONFIG_GENERIC_PENDING_IRQ=y | ||
50 | CONFIG_USE_GENERIC_SMP_HELPERS=y | ||
51 | CONFIG_X86_32_SMP=y | ||
52 | CONFIG_X86_HT=y | ||
53 | CONFIG_X86_TRAMPOLINE=y | ||
54 | CONFIG_X86_32_LAZY_GS=y | ||
55 | CONFIG_KTIME_SCALAR=y | ||
56 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" | ||
57 | CONFIG_CONSTRUCTORS=y | ||
58 | |||
59 | # | ||
60 | # General setup | ||
61 | # | ||
62 | CONFIG_EXPERIMENTAL=y | ||
63 | CONFIG_LOCK_KERNEL=y | ||
64 | CONFIG_INIT_ENV_ARG_LIMIT=32 | ||
65 | CONFIG_LOCALVERSION="" | ||
66 | # CONFIG_LOCALVERSION_AUTO is not set | ||
67 | CONFIG_HAVE_KERNEL_GZIP=y | ||
68 | CONFIG_HAVE_KERNEL_BZIP2=y | ||
69 | CONFIG_HAVE_KERNEL_LZMA=y | ||
70 | CONFIG_KERNEL_GZIP=y | ||
71 | # CONFIG_KERNEL_BZIP2 is not set | ||
72 | # CONFIG_KERNEL_LZMA is not set | ||
73 | CONFIG_SWAP=y | ||
74 | CONFIG_SYSVIPC=y | ||
75 | CONFIG_SYSVIPC_SYSCTL=y | ||
76 | CONFIG_POSIX_MQUEUE=y | ||
77 | CONFIG_POSIX_MQUEUE_SYSCTL=y | ||
78 | CONFIG_BSD_PROCESS_ACCT=y | ||
79 | CONFIG_BSD_PROCESS_ACCT_V3=y | ||
80 | # CONFIG_TASKSTATS is not set | ||
81 | # CONFIG_AUDIT is not set | ||
82 | |||
83 | # | ||
84 | # RCU Subsystem | ||
85 | # | ||
86 | CONFIG_TREE_RCU=y | ||
87 | # CONFIG_TREE_PREEMPT_RCU is not set | ||
88 | # CONFIG_RCU_TRACE is not set | ||
89 | CONFIG_RCU_FANOUT=32 | ||
90 | # CONFIG_RCU_FANOUT_EXACT is not set | ||
91 | # CONFIG_TREE_RCU_TRACE is not set | ||
92 | # CONFIG_IKCONFIG is not set | ||
93 | CONFIG_LOG_BUF_SHIFT=17 | ||
94 | CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y | ||
95 | # CONFIG_GROUP_SCHED is not set | ||
96 | # CONFIG_CGROUPS is not set | ||
97 | # CONFIG_SYSFS_DEPRECATED_V2 is not set | ||
98 | # CONFIG_RELAY is not set | ||
99 | CONFIG_NAMESPACES=y | ||
100 | # CONFIG_UTS_NS is not set | ||
101 | # CONFIG_IPC_NS is not set | ||
102 | # CONFIG_USER_NS is not set | ||
103 | # CONFIG_PID_NS is not set | ||
104 | # CONFIG_NET_NS is not set | ||
105 | # CONFIG_BLK_DEV_INITRD is not set | ||
106 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | ||
107 | CONFIG_SYSCTL=y | ||
108 | CONFIG_ANON_INODES=y | ||
109 | # CONFIG_EMBEDDED is not set | ||
110 | CONFIG_UID16=y | ||
111 | CONFIG_SYSCTL_SYSCALL=y | ||
112 | CONFIG_KALLSYMS=y | ||
113 | # CONFIG_KALLSYMS_EXTRA_PASS is not set | ||
114 | CONFIG_HOTPLUG=y | ||
115 | CONFIG_PRINTK=y | ||
116 | CONFIG_BUG=y | ||
117 | CONFIG_ELF_CORE=y | ||
118 | CONFIG_PCSPKR_PLATFORM=y | ||
119 | CONFIG_BASE_FULL=y | ||
120 | CONFIG_FUTEX=y | ||
121 | CONFIG_EPOLL=y | ||
122 | CONFIG_SIGNALFD=y | ||
123 | CONFIG_TIMERFD=y | ||
124 | CONFIG_EVENTFD=y | ||
125 | CONFIG_SHMEM=y | ||
126 | CONFIG_AIO=y | ||
127 | CONFIG_HAVE_PERF_EVENTS=y | ||
128 | |||
129 | # | ||
130 | # Kernel Performance Events And Counters | ||
131 | # | ||
132 | CONFIG_PERF_EVENTS=y | ||
133 | CONFIG_EVENT_PROFILE=y | ||
134 | CONFIG_PERF_COUNTERS=y | ||
135 | CONFIG_VM_EVENT_COUNTERS=y | ||
136 | CONFIG_PCI_QUIRKS=y | ||
137 | CONFIG_SLUB_DEBUG=y | ||
138 | # CONFIG_COMPAT_BRK is not set | ||
139 | # CONFIG_SLAB is not set | ||
140 | CONFIG_SLUB=y | ||
141 | # CONFIG_SLOB is not set | ||
142 | # CONFIG_PROFILING is not set | ||
143 | CONFIG_TRACEPOINTS=y | ||
144 | CONFIG_HAVE_OPROFILE=y | ||
145 | # CONFIG_KPROBES is not set | ||
146 | CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y | ||
147 | CONFIG_HAVE_IOREMAP_PROT=y | ||
148 | CONFIG_HAVE_KPROBES=y | ||
149 | CONFIG_HAVE_KRETPROBES=y | ||
150 | CONFIG_HAVE_ARCH_TRACEHOOK=y | ||
151 | CONFIG_HAVE_DMA_ATTRS=y | ||
152 | CONFIG_HAVE_DMA_API_DEBUG=y | ||
153 | |||
154 | # | ||
155 | # GCOV-based kernel profiling | ||
156 | # | ||
157 | # CONFIG_GCOV_KERNEL is not set | ||
158 | CONFIG_SLOW_WORK=y | ||
159 | # CONFIG_SLOW_WORK_DEBUG is not set | ||
160 | CONFIG_HAVE_GENERIC_DMA_COHERENT=y | ||
161 | CONFIG_SLABINFO=y | ||
162 | CONFIG_RT_MUTEXES=y | ||
163 | CONFIG_BASE_SMALL=0 | ||
164 | CONFIG_MODULES=y | ||
165 | CONFIG_MODULE_FORCE_LOAD=y | ||
166 | CONFIG_MODULE_UNLOAD=y | ||
167 | CONFIG_MODULE_FORCE_UNLOAD=y | ||
168 | CONFIG_MODVERSIONS=y | ||
169 | # CONFIG_MODULE_SRCVERSION_ALL is not set | ||
170 | CONFIG_STOP_MACHINE=y | ||
171 | CONFIG_BLOCK=y | ||
172 | # CONFIG_LBDAF is not set | ||
173 | # CONFIG_BLK_DEV_BSG is not set | ||
174 | # CONFIG_BLK_DEV_INTEGRITY is not set | ||
175 | |||
176 | # | ||
177 | # IO Schedulers | ||
178 | # | ||
179 | CONFIG_IOSCHED_NOOP=y | ||
180 | CONFIG_IOSCHED_AS=y | ||
181 | CONFIG_IOSCHED_DEADLINE=y | ||
182 | CONFIG_IOSCHED_CFQ=y | ||
183 | # CONFIG_DEFAULT_AS is not set | ||
184 | # CONFIG_DEFAULT_DEADLINE is not set | ||
185 | CONFIG_DEFAULT_CFQ=y | ||
186 | # CONFIG_DEFAULT_NOOP is not set | ||
187 | CONFIG_DEFAULT_IOSCHED="cfq" | ||
188 | # CONFIG_FREEZER is not set | ||
189 | |||
190 | # | ||
191 | # Processor type and features | ||
192 | # | ||
193 | CONFIG_TICK_ONESHOT=y | ||
194 | # CONFIG_NO_HZ is not set | ||
195 | CONFIG_HIGH_RES_TIMERS=y | ||
196 | CONFIG_GENERIC_CLOCKEVENTS_BUILD=y | ||
197 | CONFIG_SMP=y | ||
198 | # CONFIG_SPARSE_IRQ is not set | ||
199 | # CONFIG_X86_MPPARSE is not set | ||
200 | # CONFIG_X86_BIGSMP is not set | ||
201 | # CONFIG_X86_EXTENDED_PLATFORM is not set | ||
202 | CONFIG_SCHED_OMIT_FRAME_POINTER=y | ||
203 | # CONFIG_PARAVIRT_GUEST is not set | ||
204 | # CONFIG_MEMTEST is not set | ||
205 | # CONFIG_M386 is not set | ||
206 | # CONFIG_M486 is not set | ||
207 | # CONFIG_M586 is not set | ||
208 | # CONFIG_M586TSC is not set | ||
209 | # CONFIG_M586MMX is not set | ||
210 | # CONFIG_M686 is not set | ||
211 | # CONFIG_MPENTIUMII is not set | ||
212 | # CONFIG_MPENTIUMIII is not set | ||
213 | # CONFIG_MPENTIUMM is not set | ||
214 | # CONFIG_MPENTIUM4 is not set | ||
215 | # CONFIG_MK6 is not set | ||
216 | # CONFIG_MK7 is not set | ||
217 | # CONFIG_MK8 is not set | ||
218 | # CONFIG_MCRUSOE is not set | ||
219 | # CONFIG_MEFFICEON is not set | ||
220 | # CONFIG_MWINCHIPC6 is not set | ||
221 | # CONFIG_MWINCHIP3D is not set | ||
222 | # CONFIG_MGEODEGX1 is not set | ||
223 | # CONFIG_MGEODE_LX is not set | ||
224 | # CONFIG_MCYRIXIII is not set | ||
225 | # CONFIG_MVIAC3_2 is not set | ||
226 | # CONFIG_MVIAC7 is not set | ||
227 | # CONFIG_MPSC is not set | ||
228 | CONFIG_MCORE2=y | ||
229 | # CONFIG_MATOM is not set | ||
230 | # CONFIG_GENERIC_CPU is not set | ||
231 | # CONFIG_X86_GENERIC is not set | ||
232 | CONFIG_X86_CPU=y | ||
233 | CONFIG_X86_L1_CACHE_BYTES=64 | ||
234 | CONFIG_X86_INTERNODE_CACHE_BYTES=64 | ||
235 | CONFIG_X86_CMPXCHG=y | ||
236 | CONFIG_X86_L1_CACHE_SHIFT=6 | ||
237 | CONFIG_X86_XADD=y | ||
238 | CONFIG_X86_WP_WORKS_OK=y | ||
239 | CONFIG_X86_INVLPG=y | ||
240 | CONFIG_X86_BSWAP=y | ||
241 | CONFIG_X86_POPAD_OK=y | ||
242 | CONFIG_X86_INTEL_USERCOPY=y | ||
243 | CONFIG_X86_USE_PPRO_CHECKSUM=y | ||
244 | CONFIG_X86_TSC=y | ||
245 | CONFIG_X86_CMPXCHG64=y | ||
246 | CONFIG_X86_CMOV=y | ||
247 | CONFIG_X86_MINIMUM_CPU_FAMILY=5 | ||
248 | CONFIG_X86_DEBUGCTLMSR=y | ||
249 | CONFIG_CPU_SUP_INTEL=y | ||
250 | CONFIG_CPU_SUP_CYRIX_32=y | ||
251 | CONFIG_CPU_SUP_AMD=y | ||
252 | CONFIG_CPU_SUP_CENTAUR=y | ||
253 | CONFIG_CPU_SUP_TRANSMETA_32=y | ||
254 | CONFIG_CPU_SUP_UMC_32=y | ||
255 | # CONFIG_X86_DS is not set | ||
256 | CONFIG_HPET_TIMER=y | ||
257 | CONFIG_HPET_EMULATE_RTC=y | ||
258 | CONFIG_DMI=y | ||
259 | # CONFIG_IOMMU_HELPER is not set | ||
260 | # CONFIG_IOMMU_API is not set | ||
261 | CONFIG_NR_CPUS=8 | ||
262 | # CONFIG_SCHED_SMT is not set | ||
263 | CONFIG_SCHED_MC=y | ||
264 | # CONFIG_PREEMPT_NONE is not set | ||
265 | # CONFIG_PREEMPT_VOLUNTARY is not set | ||
266 | CONFIG_PREEMPT=y | ||
267 | CONFIG_X86_LOCAL_APIC=y | ||
268 | CONFIG_X86_IO_APIC=y | ||
269 | # CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS is not set | ||
270 | # CONFIG_X86_MCE is not set | ||
271 | CONFIG_VM86=y | ||
272 | # CONFIG_TOSHIBA is not set | ||
273 | # CONFIG_I8K is not set | ||
274 | # CONFIG_X86_REBOOTFIXUPS is not set | ||
275 | # CONFIG_MICROCODE is not set | ||
276 | CONFIG_X86_MSR=y | ||
277 | CONFIG_X86_CPUID=y | ||
278 | # CONFIG_X86_CPU_DEBUG is not set | ||
279 | # CONFIG_NOHIGHMEM is not set | ||
280 | CONFIG_HIGHMEM4G=y | ||
281 | # CONFIG_HIGHMEM64G is not set | ||
282 | CONFIG_PAGE_OFFSET=0xC0000000 | ||
283 | CONFIG_HIGHMEM=y | ||
284 | # CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set | ||
285 | CONFIG_ARCH_FLATMEM_ENABLE=y | ||
286 | CONFIG_ARCH_SPARSEMEM_ENABLE=y | ||
287 | CONFIG_ARCH_SELECT_MEMORY_MODEL=y | ||
288 | CONFIG_SELECT_MEMORY_MODEL=y | ||
289 | CONFIG_FLATMEM_MANUAL=y | ||
290 | # CONFIG_DISCONTIGMEM_MANUAL is not set | ||
291 | # CONFIG_SPARSEMEM_MANUAL is not set | ||
292 | CONFIG_FLATMEM=y | ||
293 | CONFIG_FLAT_NODE_MEM_MAP=y | ||
294 | CONFIG_SPARSEMEM_STATIC=y | ||
295 | CONFIG_PAGEFLAGS_EXTENDED=y | ||
296 | CONFIG_SPLIT_PTLOCK_CPUS=4 | ||
297 | # CONFIG_PHYS_ADDR_T_64BIT is not set | ||
298 | CONFIG_ZONE_DMA_FLAG=1 | ||
299 | CONFIG_BOUNCE=y | ||
300 | CONFIG_VIRT_TO_BUS=y | ||
301 | CONFIG_HAVE_MLOCK=y | ||
302 | CONFIG_HAVE_MLOCKED_PAGE_BIT=y | ||
303 | # CONFIG_KSM is not set | ||
304 | CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 | ||
305 | # CONFIG_HIGHPTE is not set | ||
306 | # CONFIG_X86_CHECK_BIOS_CORRUPTION is not set | ||
307 | CONFIG_X86_RESERVE_LOW_64K=y | ||
308 | # CONFIG_MATH_EMULATION is not set | ||
309 | CONFIG_MTRR=y | ||
310 | CONFIG_MTRR_SANITIZER=y | ||
311 | CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=0 | ||
312 | CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 | ||
313 | CONFIG_X86_PAT=y | ||
314 | CONFIG_ARCH_USES_PG_UNCACHED=y | ||
315 | # CONFIG_EFI is not set | ||
316 | CONFIG_SECCOMP=y | ||
317 | # CONFIG_CC_STACKPROTECTOR is not set | ||
318 | # CONFIG_HZ_100 is not set | ||
319 | # CONFIG_HZ_250 is not set | ||
320 | # CONFIG_HZ_300 is not set | ||
321 | CONFIG_HZ_1000=y | ||
322 | CONFIG_HZ=1000 | ||
323 | CONFIG_SCHED_HRTICK=y | ||
324 | # CONFIG_KEXEC is not set | ||
325 | # CONFIG_CRASH_DUMP is not set | ||
326 | CONFIG_PHYSICAL_START=0x1000000 | ||
327 | # CONFIG_RELOCATABLE is not set | ||
328 | CONFIG_PHYSICAL_ALIGN=0x1000000 | ||
329 | # CONFIG_HOTPLUG_CPU is not set | ||
330 | # CONFIG_COMPAT_VDSO is not set | ||
331 | # CONFIG_CMDLINE_BOOL is not set | ||
332 | CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y | ||
333 | |||
334 | # | ||
335 | # Power management and ACPI options | ||
336 | # | ||
337 | CONFIG_PM=y | ||
338 | # CONFIG_PM_DEBUG is not set | ||
339 | # CONFIG_SUSPEND is not set | ||
340 | # CONFIG_HIBERNATION is not set | ||
341 | # CONFIG_PM_RUNTIME is not set | ||
342 | CONFIG_ACPI=y | ||
343 | # CONFIG_ACPI_PROCFS is not set | ||
344 | # CONFIG_ACPI_PROCFS_POWER is not set | ||
345 | CONFIG_ACPI_SYSFS_POWER=y | ||
346 | # CONFIG_ACPI_PROC_EVENT is not set | ||
347 | CONFIG_ACPI_AC=y | ||
348 | # CONFIG_ACPI_BATTERY is not set | ||
349 | CONFIG_ACPI_BUTTON=y | ||
350 | CONFIG_ACPI_FAN=y | ||
351 | CONFIG_ACPI_DOCK=y | ||
352 | CONFIG_ACPI_PROCESSOR=y | ||
353 | # CONFIG_ACPI_PROCESSOR_AGGREGATOR is not set | ||
354 | CONFIG_ACPI_THERMAL=y | ||
355 | # CONFIG_ACPI_CUSTOM_DSDT is not set | ||
356 | CONFIG_ACPI_BLACKLIST_YEAR=0 | ||
357 | # CONFIG_ACPI_DEBUG is not set | ||
358 | # CONFIG_ACPI_PCI_SLOT is not set | ||
359 | CONFIG_X86_PM_TIMER=y | ||
360 | # CONFIG_ACPI_CONTAINER is not set | ||
361 | # CONFIG_ACPI_SBS is not set | ||
362 | # CONFIG_SFI is not set | ||
363 | |||
364 | # | ||
365 | # CPU Frequency scaling | ||
366 | # | ||
367 | CONFIG_CPU_FREQ=y | ||
368 | CONFIG_CPU_FREQ_TABLE=y | ||
369 | # CONFIG_CPU_FREQ_DEBUG is not set | ||
370 | # CONFIG_CPU_FREQ_STAT is not set | ||
371 | CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y | ||
372 | # CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set | ||
373 | # CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set | ||
374 | # CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set | ||
375 | # CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set | ||
376 | CONFIG_CPU_FREQ_GOV_PERFORMANCE=y | ||
377 | # CONFIG_CPU_FREQ_GOV_POWERSAVE is not set | ||
378 | # CONFIG_CPU_FREQ_GOV_USERSPACE is not set | ||
379 | # CONFIG_CPU_FREQ_GOV_ONDEMAND is not set | ||
380 | # CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set | ||
381 | |||
382 | # | ||
383 | # CPUFreq processor drivers | ||
384 | # | ||
385 | CONFIG_X86_ACPI_CPUFREQ=y | ||
386 | # CONFIG_X86_POWERNOW_K6 is not set | ||
387 | # CONFIG_X86_POWERNOW_K7 is not set | ||
388 | # CONFIG_X86_POWERNOW_K8 is not set | ||
389 | # CONFIG_X86_GX_SUSPMOD is not set | ||
390 | # CONFIG_X86_SPEEDSTEP_CENTRINO is not set | ||
391 | # CONFIG_X86_SPEEDSTEP_ICH is not set | ||
392 | # CONFIG_X86_SPEEDSTEP_SMI is not set | ||
393 | # CONFIG_X86_P4_CLOCKMOD is not set | ||
394 | # CONFIG_X86_CPUFREQ_NFORCE2 is not set | ||
395 | # CONFIG_X86_LONGRUN is not set | ||
396 | # CONFIG_X86_LONGHAUL is not set | ||
397 | # CONFIG_X86_E_POWERSAVER is not set | ||
398 | |||
399 | # | ||
400 | # shared options | ||
401 | # | ||
402 | # CONFIG_X86_SPEEDSTEP_LIB is not set | ||
403 | CONFIG_CPU_IDLE=y | ||
404 | CONFIG_CPU_IDLE_GOV_LADDER=y | ||
405 | |||
406 | # | ||
407 | # Bus options (PCI etc.) | ||
408 | # | ||
409 | CONFIG_PCI=y | ||
410 | # CONFIG_PCI_GOBIOS is not set | ||
411 | # CONFIG_PCI_GOMMCONFIG is not set | ||
412 | # CONFIG_PCI_GODIRECT is not set | ||
413 | # CONFIG_PCI_GOOLPC is not set | ||
414 | CONFIG_PCI_GOANY=y | ||
415 | CONFIG_PCI_BIOS=y | ||
416 | CONFIG_PCI_DIRECT=y | ||
417 | CONFIG_PCI_MMCONFIG=y | ||
418 | CONFIG_PCI_DOMAINS=y | ||
419 | # CONFIG_PCIEPORTBUS is not set | ||
420 | CONFIG_ARCH_SUPPORTS_MSI=y | ||
421 | # CONFIG_PCI_MSI is not set | ||
422 | # CONFIG_PCI_LEGACY is not set | ||
423 | # CONFIG_PCI_STUB is not set | ||
424 | CONFIG_HT_IRQ=y | ||
425 | # CONFIG_PCI_IOV is not set | ||
426 | CONFIG_ISA_DMA_API=y | ||
427 | CONFIG_ISA=y | ||
428 | CONFIG_EISA=y | ||
429 | # CONFIG_EISA_VLB_PRIMING is not set | ||
430 | CONFIG_EISA_PCI_EISA=y | ||
431 | CONFIG_EISA_VIRTUAL_ROOT=y | ||
432 | CONFIG_EISA_NAMES=y | ||
433 | # CONFIG_MCA is not set | ||
434 | # CONFIG_SCx200 is not set | ||
435 | # CONFIG_OLPC is not set | ||
436 | # CONFIG_PCCARD is not set | ||
437 | # CONFIG_HOTPLUG_PCI is not set | ||
438 | |||
439 | # | ||
440 | # Executable file formats / Emulations | ||
441 | # | ||
442 | CONFIG_BINFMT_ELF=y | ||
443 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | ||
444 | CONFIG_HAVE_AOUT=y | ||
445 | # CONFIG_BINFMT_AOUT is not set | ||
446 | CONFIG_BINFMT_MISC=y | ||
447 | CONFIG_HAVE_ATOMIC_IOMAP=y | ||
448 | CONFIG_NET=y | ||
449 | |||
450 | # | ||
451 | # Networking options | ||
452 | # | ||
453 | CONFIG_PACKET=y | ||
454 | CONFIG_PACKET_MMAP=y | ||
455 | CONFIG_UNIX=y | ||
456 | CONFIG_XFRM=y | ||
457 | CONFIG_XFRM_USER=y | ||
458 | # CONFIG_XFRM_SUB_POLICY is not set | ||
459 | # CONFIG_XFRM_MIGRATE is not set | ||
460 | # CONFIG_XFRM_STATISTICS is not set | ||
461 | CONFIG_XFRM_IPCOMP=y | ||
462 | CONFIG_NET_KEY=y | ||
463 | # CONFIG_NET_KEY_MIGRATE is not set | ||
464 | CONFIG_INET=y | ||
465 | CONFIG_IP_MULTICAST=y | ||
466 | CONFIG_IP_ADVANCED_ROUTER=y | ||
467 | CONFIG_ASK_IP_FIB_HASH=y | ||
468 | # CONFIG_IP_FIB_TRIE is not set | ||
469 | CONFIG_IP_FIB_HASH=y | ||
470 | CONFIG_IP_MULTIPLE_TABLES=y | ||
471 | CONFIG_IP_ROUTE_MULTIPATH=y | ||
472 | CONFIG_IP_ROUTE_VERBOSE=y | ||
473 | # CONFIG_IP_PNP is not set | ||
474 | CONFIG_NET_IPIP=y | ||
475 | CONFIG_NET_IPGRE=y | ||
476 | CONFIG_NET_IPGRE_BROADCAST=y | ||
477 | CONFIG_IP_MROUTE=y | ||
478 | CONFIG_IP_PIMSM_V1=y | ||
479 | CONFIG_IP_PIMSM_V2=y | ||
480 | # CONFIG_ARPD is not set | ||
481 | CONFIG_SYN_COOKIES=y | ||
482 | CONFIG_INET_AH=y | ||
483 | CONFIG_INET_ESP=y | ||
484 | CONFIG_INET_IPCOMP=y | ||
485 | CONFIG_INET_XFRM_TUNNEL=y | ||
486 | CONFIG_INET_TUNNEL=y | ||
487 | CONFIG_INET_XFRM_MODE_TRANSPORT=y | ||
488 | CONFIG_INET_XFRM_MODE_TUNNEL=y | ||
489 | CONFIG_INET_XFRM_MODE_BEET=y | ||
490 | CONFIG_INET_LRO=y | ||
491 | CONFIG_INET_DIAG=y | ||
492 | CONFIG_INET_TCP_DIAG=y | ||
493 | # CONFIG_TCP_CONG_ADVANCED is not set | ||
494 | CONFIG_TCP_CONG_CUBIC=y | ||
495 | CONFIG_DEFAULT_TCP_CONG="cubic" | ||
496 | # CONFIG_TCP_MD5SIG is not set | ||
497 | # CONFIG_IPV6 is not set | ||
498 | CONFIG_NETWORK_SECMARK=y | ||
499 | CONFIG_NETFILTER=y | ||
500 | # CONFIG_NETFILTER_DEBUG is not set | ||
501 | CONFIG_NETFILTER_ADVANCED=y | ||
502 | |||
503 | # | ||
504 | # Core Netfilter Configuration | ||
505 | # | ||
506 | CONFIG_NETFILTER_NETLINK=y | ||
507 | CONFIG_NETFILTER_NETLINK_QUEUE=y | ||
508 | CONFIG_NETFILTER_NETLINK_LOG=y | ||
509 | CONFIG_NF_CONNTRACK=y | ||
510 | CONFIG_NF_CT_ACCT=y | ||
511 | CONFIG_NF_CONNTRACK_MARK=y | ||
512 | CONFIG_NF_CONNTRACK_SECMARK=y | ||
513 | CONFIG_NF_CONNTRACK_EVENTS=y | ||
514 | # CONFIG_NF_CT_PROTO_DCCP is not set | ||
515 | # CONFIG_NF_CT_PROTO_SCTP is not set | ||
516 | # CONFIG_NF_CT_PROTO_UDPLITE is not set | ||
517 | CONFIG_NF_CONNTRACK_AMANDA=y | ||
518 | CONFIG_NF_CONNTRACK_FTP=y | ||
519 | # CONFIG_NF_CONNTRACK_H323 is not set | ||
520 | # CONFIG_NF_CONNTRACK_IRC is not set | ||
521 | CONFIG_NF_CONNTRACK_NETBIOS_NS=y | ||
522 | # CONFIG_NF_CONNTRACK_PPTP is not set | ||
523 | # CONFIG_NF_CONNTRACK_SANE is not set | ||
524 | # CONFIG_NF_CONNTRACK_SIP is not set | ||
525 | CONFIG_NF_CONNTRACK_TFTP=y | ||
526 | # CONFIG_NF_CT_NETLINK is not set | ||
527 | CONFIG_NETFILTER_XTABLES=y | ||
528 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y | ||
529 | CONFIG_NETFILTER_XT_TARGET_CONNMARK=y | ||
530 | # CONFIG_NETFILTER_XT_TARGET_CONNSECMARK is not set | ||
531 | CONFIG_NETFILTER_XT_TARGET_HL=y | ||
532 | CONFIG_NETFILTER_XT_TARGET_MARK=y | ||
533 | CONFIG_NETFILTER_XT_TARGET_NFLOG=y | ||
534 | CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y | ||
535 | CONFIG_NETFILTER_XT_TARGET_NOTRACK=y | ||
536 | CONFIG_NETFILTER_XT_TARGET_RATEEST=y | ||
537 | CONFIG_NETFILTER_XT_TARGET_TRACE=y | ||
538 | # CONFIG_NETFILTER_XT_TARGET_SECMARK is not set | ||
539 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=y | ||
540 | # CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set | ||
541 | # CONFIG_NETFILTER_XT_MATCH_COMMENT is not set | ||
542 | CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y | ||
543 | CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y | ||
544 | CONFIG_NETFILTER_XT_MATCH_CONNMARK=y | ||
545 | CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y | ||
546 | # CONFIG_NETFILTER_XT_MATCH_DCCP is not set | ||
547 | # CONFIG_NETFILTER_XT_MATCH_DSCP is not set | ||
548 | # CONFIG_NETFILTER_XT_MATCH_ESP is not set | ||
549 | # CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set | ||
550 | # CONFIG_NETFILTER_XT_MATCH_HELPER is not set | ||
551 | CONFIG_NETFILTER_XT_MATCH_HL=y | ||
552 | CONFIG_NETFILTER_XT_MATCH_IPRANGE=y | ||
553 | CONFIG_NETFILTER_XT_MATCH_LENGTH=y | ||
554 | CONFIG_NETFILTER_XT_MATCH_LIMIT=y | ||
555 | CONFIG_NETFILTER_XT_MATCH_MAC=y | ||
556 | CONFIG_NETFILTER_XT_MATCH_MARK=y | ||
557 | CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y | ||
558 | CONFIG_NETFILTER_XT_MATCH_OWNER=y | ||
559 | # CONFIG_NETFILTER_XT_MATCH_POLICY is not set | ||
560 | CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y | ||
561 | # CONFIG_NETFILTER_XT_MATCH_QUOTA is not set | ||
562 | # CONFIG_NETFILTER_XT_MATCH_RATEEST is not set | ||
563 | # CONFIG_NETFILTER_XT_MATCH_REALM is not set | ||
564 | # CONFIG_NETFILTER_XT_MATCH_RECENT is not set | ||
565 | # CONFIG_NETFILTER_XT_MATCH_SCTP is not set | ||
566 | # CONFIG_NETFILTER_XT_MATCH_STATE is not set | ||
567 | # CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set | ||
568 | CONFIG_NETFILTER_XT_MATCH_STRING=y | ||
569 | CONFIG_NETFILTER_XT_MATCH_TCPMSS=y | ||
570 | CONFIG_NETFILTER_XT_MATCH_TIME=y | ||
571 | CONFIG_NETFILTER_XT_MATCH_U32=y | ||
572 | # CONFIG_NETFILTER_XT_MATCH_OSF is not set | ||
573 | # CONFIG_IP_VS is not set | ||
574 | |||
575 | # | ||
576 | # IP: Netfilter Configuration | ||
577 | # | ||
578 | CONFIG_NF_DEFRAG_IPV4=y | ||
579 | CONFIG_NF_CONNTRACK_IPV4=y | ||
580 | CONFIG_NF_CONNTRACK_PROC_COMPAT=y | ||
581 | # CONFIG_IP_NF_QUEUE is not set | ||
582 | CONFIG_IP_NF_IPTABLES=y | ||
583 | CONFIG_IP_NF_MATCH_ADDRTYPE=y | ||
584 | CONFIG_IP_NF_MATCH_AH=y | ||
585 | CONFIG_IP_NF_MATCH_ECN=y | ||
586 | CONFIG_IP_NF_MATCH_TTL=y | ||
587 | CONFIG_IP_NF_FILTER=y | ||
588 | CONFIG_IP_NF_TARGET_REJECT=y | ||
589 | CONFIG_IP_NF_TARGET_LOG=y | ||
590 | CONFIG_IP_NF_TARGET_ULOG=y | ||
591 | # CONFIG_NF_NAT is not set | ||
592 | # CONFIG_IP_NF_MANGLE is not set | ||
593 | CONFIG_IP_NF_TARGET_TTL=y | ||
594 | CONFIG_IP_NF_RAW=y | ||
595 | CONFIG_IP_NF_ARPTABLES=y | ||
596 | CONFIG_IP_NF_ARPFILTER=y | ||
597 | CONFIG_IP_NF_ARP_MANGLE=y | ||
598 | # CONFIG_IP_DCCP is not set | ||
599 | # CONFIG_IP_SCTP is not set | ||
600 | # CONFIG_RDS is not set | ||
601 | # CONFIG_TIPC is not set | ||
602 | # CONFIG_ATM is not set | ||
603 | # CONFIG_BRIDGE is not set | ||
604 | # CONFIG_NET_DSA is not set | ||
605 | # CONFIG_VLAN_8021Q is not set | ||
606 | # CONFIG_DECNET is not set | ||
607 | # CONFIG_LLC2 is not set | ||
608 | # CONFIG_IPX is not set | ||
609 | # CONFIG_ATALK is not set | ||
610 | # CONFIG_X25 is not set | ||
611 | # CONFIG_LAPB is not set | ||
612 | # CONFIG_ECONET is not set | ||
613 | # CONFIG_WAN_ROUTER is not set | ||
614 | # CONFIG_PHONET is not set | ||
615 | # CONFIG_IEEE802154 is not set | ||
616 | # CONFIG_NET_SCHED is not set | ||
617 | # CONFIG_DCB is not set | ||
618 | |||
619 | # | ||
620 | # Network testing | ||
621 | # | ||
622 | # CONFIG_NET_PKTGEN is not set | ||
623 | # CONFIG_NET_DROP_MONITOR is not set | ||
624 | # CONFIG_HAMRADIO is not set | ||
625 | # CONFIG_CAN is not set | ||
626 | # CONFIG_IRDA is not set | ||
627 | # CONFIG_BT is not set | ||
628 | # CONFIG_AF_RXRPC is not set | ||
629 | CONFIG_FIB_RULES=y | ||
630 | # CONFIG_WIRELESS is not set | ||
631 | # CONFIG_WIMAX is not set | ||
632 | # CONFIG_RFKILL is not set | ||
633 | # CONFIG_NET_9P is not set | ||
634 | |||
635 | # | ||
636 | # Device Drivers | ||
637 | # | ||
638 | |||
639 | # | ||
640 | # Generic Driver Options | ||
641 | # | ||
642 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | ||
643 | # CONFIG_DEVTMPFS is not set | ||
644 | CONFIG_STANDALONE=y | ||
645 | CONFIG_PREVENT_FIRMWARE_BUILD=y | ||
646 | CONFIG_FW_LOADER=y | ||
647 | CONFIG_FIRMWARE_IN_KERNEL=y | ||
648 | CONFIG_EXTRA_FIRMWARE="" | ||
649 | # CONFIG_SYS_HYPERVISOR is not set | ||
650 | # CONFIG_CONNECTOR is not set | ||
651 | # CONFIG_MTD is not set | ||
652 | # CONFIG_PARPORT is not set | ||
653 | CONFIG_PNP=y | ||
654 | # CONFIG_PNP_DEBUG_MESSAGES is not set | ||
655 | |||
656 | # | ||
657 | # Protocols | ||
658 | # | ||
659 | CONFIG_ISAPNP=y | ||
660 | # CONFIG_PNPBIOS is not set | ||
661 | CONFIG_PNPACPI=y | ||
662 | CONFIG_BLK_DEV=y | ||
663 | CONFIG_BLK_DEV_FD=y | ||
664 | # CONFIG_BLK_DEV_XD is not set | ||
665 | # CONFIG_BLK_CPQ_DA is not set | ||
666 | # CONFIG_BLK_CPQ_CISS_DA is not set | ||
667 | # CONFIG_BLK_DEV_DAC960 is not set | ||
668 | # CONFIG_BLK_DEV_UMEM is not set | ||
669 | # CONFIG_BLK_DEV_COW_COMMON is not set | ||
670 | CONFIG_BLK_DEV_LOOP=y | ||
671 | # CONFIG_BLK_DEV_CRYPTOLOOP is not set | ||
672 | # CONFIG_BLK_DEV_NBD is not set | ||
673 | # CONFIG_BLK_DEV_SX8 is not set | ||
674 | # CONFIG_BLK_DEV_UB is not set | ||
675 | # CONFIG_BLK_DEV_RAM is not set | ||
676 | CONFIG_CDROM_PKTCDVD=y | ||
677 | CONFIG_CDROM_PKTCDVD_BUFFERS=8 | ||
678 | # CONFIG_CDROM_PKTCDVD_WCACHE is not set | ||
679 | # CONFIG_ATA_OVER_ETH is not set | ||
680 | # CONFIG_BLK_DEV_HD is not set | ||
681 | # CONFIG_MISC_DEVICES is not set | ||
682 | CONFIG_HAVE_IDE=y | ||
683 | CONFIG_IDE=y | ||
684 | |||
685 | # | ||
686 | # Please see Documentation/ide/ide.txt for help/info on IDE drives | ||
687 | # | ||
688 | CONFIG_IDE_XFER_MODE=y | ||
689 | CONFIG_IDE_ATAPI=y | ||
690 | # CONFIG_BLK_DEV_IDE_SATA is not set | ||
691 | CONFIG_IDE_GD=y | ||
692 | CONFIG_IDE_GD_ATA=y | ||
693 | # CONFIG_IDE_GD_ATAPI is not set | ||
694 | CONFIG_BLK_DEV_IDECD=y | ||
695 | CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS=y | ||
696 | # CONFIG_BLK_DEV_IDETAPE is not set | ||
697 | CONFIG_BLK_DEV_IDEACPI=y | ||
698 | # CONFIG_IDE_TASK_IOCTL is not set | ||
699 | CONFIG_IDE_PROC_FS=y | ||
700 | |||
701 | # | ||
702 | # IDE chipset support/bugfixes | ||
703 | # | ||
704 | CONFIG_IDE_GENERIC=y | ||
705 | # CONFIG_BLK_DEV_PLATFORM is not set | ||
706 | # CONFIG_BLK_DEV_CMD640 is not set | ||
707 | CONFIG_BLK_DEV_IDEPNP=y | ||
708 | CONFIG_BLK_DEV_IDEDMA_SFF=y | ||
709 | |||
710 | # | ||
711 | # PCI IDE chipsets support | ||
712 | # | ||
713 | CONFIG_BLK_DEV_IDEPCI=y | ||
714 | # CONFIG_IDEPCI_PCIBUS_ORDER is not set | ||
715 | # CONFIG_BLK_DEV_GENERIC is not set | ||
716 | # CONFIG_BLK_DEV_OPTI621 is not set | ||
717 | # CONFIG_BLK_DEV_RZ1000 is not set | ||
718 | CONFIG_BLK_DEV_IDEDMA_PCI=y | ||
719 | # CONFIG_BLK_DEV_AEC62XX is not set | ||
720 | # CONFIG_BLK_DEV_ALI15X3 is not set | ||
721 | # CONFIG_BLK_DEV_AMD74XX is not set | ||
722 | # CONFIG_BLK_DEV_ATIIXP is not set | ||
723 | # CONFIG_BLK_DEV_CMD64X is not set | ||
724 | # CONFIG_BLK_DEV_TRIFLEX is not set | ||
725 | # CONFIG_BLK_DEV_CS5520 is not set | ||
726 | # CONFIG_BLK_DEV_CS5530 is not set | ||
727 | # CONFIG_BLK_DEV_CS5535 is not set | ||
728 | # CONFIG_BLK_DEV_CS5536 is not set | ||
729 | # CONFIG_BLK_DEV_HPT366 is not set | ||
730 | # CONFIG_BLK_DEV_JMICRON is not set | ||
731 | # CONFIG_BLK_DEV_SC1200 is not set | ||
732 | CONFIG_BLK_DEV_PIIX=y | ||
733 | # CONFIG_BLK_DEV_IT8172 is not set | ||
734 | # CONFIG_BLK_DEV_IT8213 is not set | ||
735 | # CONFIG_BLK_DEV_IT821X is not set | ||
736 | # CONFIG_BLK_DEV_NS87415 is not set | ||
737 | # CONFIG_BLK_DEV_PDC202XX_OLD is not set | ||
738 | # CONFIG_BLK_DEV_PDC202XX_NEW is not set | ||
739 | # CONFIG_BLK_DEV_SVWKS is not set | ||
740 | # CONFIG_BLK_DEV_SIIMAGE is not set | ||
741 | # CONFIG_BLK_DEV_SIS5513 is not set | ||
742 | # CONFIG_BLK_DEV_SLC90E66 is not set | ||
743 | # CONFIG_BLK_DEV_TRM290 is not set | ||
744 | # CONFIG_BLK_DEV_VIA82CXXX is not set | ||
745 | # CONFIG_BLK_DEV_TC86C001 is not set | ||
746 | |||
747 | # | ||
748 | # Other IDE chipsets support | ||
749 | # | ||
750 | |||
751 | # | ||
752 | # Note: most of these also require special kernel boot parameters | ||
753 | # | ||
754 | # CONFIG_BLK_DEV_4DRIVES is not set | ||
755 | # CONFIG_BLK_DEV_ALI14XX is not set | ||
756 | # CONFIG_BLK_DEV_DTC2278 is not set | ||
757 | # CONFIG_BLK_DEV_HT6560B is not set | ||
758 | # CONFIG_BLK_DEV_QD65XX is not set | ||
759 | # CONFIG_BLK_DEV_UMC8672 is not set | ||
760 | CONFIG_BLK_DEV_IDEDMA=y | ||
761 | |||
762 | # | ||
763 | # SCSI device support | ||
764 | # | ||
765 | # CONFIG_RAID_ATTRS is not set | ||
766 | CONFIG_SCSI=y | ||
767 | CONFIG_SCSI_DMA=y | ||
768 | # CONFIG_SCSI_TGT is not set | ||
769 | # CONFIG_SCSI_NETLINK is not set | ||
770 | # CONFIG_SCSI_PROC_FS is not set | ||
771 | |||
772 | # | ||
773 | # SCSI support type (disk, tape, CD-ROM) | ||
774 | # | ||
775 | CONFIG_BLK_DEV_SD=y | ||
776 | # CONFIG_CHR_DEV_ST is not set | ||
777 | # CONFIG_CHR_DEV_OSST is not set | ||
778 | CONFIG_BLK_DEV_SR=y | ||
779 | # CONFIG_BLK_DEV_SR_VENDOR is not set | ||
780 | CONFIG_CHR_DEV_SG=y | ||
781 | # CONFIG_CHR_DEV_SCH is not set | ||
782 | # CONFIG_SCSI_MULTI_LUN is not set | ||
783 | # CONFIG_SCSI_CONSTANTS is not set | ||
784 | # CONFIG_SCSI_LOGGING is not set | ||
785 | CONFIG_SCSI_SCAN_ASYNC=y | ||
786 | CONFIG_SCSI_WAIT_SCAN=m | ||
787 | |||
788 | # | ||
789 | # SCSI Transports | ||
790 | # | ||
791 | # CONFIG_SCSI_SPI_ATTRS is not set | ||
792 | # CONFIG_SCSI_FC_ATTRS is not set | ||
793 | # CONFIG_SCSI_ISCSI_ATTRS is not set | ||
794 | # CONFIG_SCSI_SAS_LIBSAS is not set | ||
795 | # CONFIG_SCSI_SRP_ATTRS is not set | ||
796 | # CONFIG_SCSI_LOWLEVEL is not set | ||
797 | # CONFIG_SCSI_DH is not set | ||
798 | # CONFIG_SCSI_OSD_INITIATOR is not set | ||
799 | CONFIG_ATA=y | ||
800 | # CONFIG_ATA_NONSTANDARD is not set | ||
801 | CONFIG_ATA_VERBOSE_ERROR=y | ||
802 | CONFIG_ATA_ACPI=y | ||
803 | CONFIG_SATA_PMP=y | ||
804 | CONFIG_SATA_AHCI=y | ||
805 | # CONFIG_SATA_SIL24 is not set | ||
806 | CONFIG_ATA_SFF=y | ||
807 | # CONFIG_SATA_SVW is not set | ||
808 | CONFIG_ATA_PIIX=y | ||
809 | # CONFIG_SATA_MV is not set | ||
810 | # CONFIG_SATA_NV is not set | ||
811 | # CONFIG_PDC_ADMA is not set | ||
812 | # CONFIG_SATA_QSTOR is not set | ||
813 | # CONFIG_SATA_PROMISE is not set | ||
814 | # CONFIG_SATA_SX4 is not set | ||
815 | # CONFIG_SATA_SIL is not set | ||
816 | # CONFIG_SATA_SIS is not set | ||
817 | # CONFIG_SATA_ULI is not set | ||
818 | # CONFIG_SATA_VIA is not set | ||
819 | # CONFIG_SATA_VITESSE is not set | ||
820 | # CONFIG_SATA_INIC162X is not set | ||
821 | # CONFIG_PATA_ACPI is not set | ||
822 | # CONFIG_PATA_ALI is not set | ||
823 | # CONFIG_PATA_AMD is not set | ||
824 | # CONFIG_PATA_ARTOP is not set | ||
825 | # CONFIG_PATA_ATP867X is not set | ||
826 | # CONFIG_PATA_ATIIXP is not set | ||
827 | # CONFIG_PATA_CMD640_PCI is not set | ||
828 | # CONFIG_PATA_CMD64X is not set | ||
829 | # CONFIG_PATA_CS5520 is not set | ||
830 | # CONFIG_PATA_CS5530 is not set | ||
831 | # CONFIG_PATA_CS5535 is not set | ||
832 | # CONFIG_PATA_CS5536 is not set | ||
833 | # CONFIG_PATA_CYPRESS is not set | ||
834 | # CONFIG_PATA_EFAR is not set | ||
835 | # CONFIG_ATA_GENERIC is not set | ||
836 | # CONFIG_PATA_HPT366 is not set | ||
837 | # CONFIG_PATA_HPT37X is not set | ||
838 | # CONFIG_PATA_HPT3X2N is not set | ||
839 | # CONFIG_PATA_HPT3X3 is not set | ||
840 | # CONFIG_PATA_ISAPNP is not set | ||
841 | # CONFIG_PATA_IT821X is not set | ||
842 | # CONFIG_PATA_IT8213 is not set | ||
843 | # CONFIG_PATA_JMICRON is not set | ||
844 | # CONFIG_PATA_LEGACY is not set | ||
845 | # CONFIG_PATA_TRIFLEX is not set | ||
846 | # CONFIG_PATA_MARVELL is not set | ||
847 | # CONFIG_PATA_MPIIX is not set | ||
848 | # CONFIG_PATA_OLDPIIX is not set | ||
849 | # CONFIG_PATA_NETCELL is not set | ||
850 | # CONFIG_PATA_NINJA32 is not set | ||
851 | # CONFIG_PATA_NS87410 is not set | ||
852 | # CONFIG_PATA_NS87415 is not set | ||
853 | # CONFIG_PATA_OPTI is not set | ||
854 | # CONFIG_PATA_OPTIDMA is not set | ||
855 | # CONFIG_PATA_PDC_OLD is not set | ||
856 | # CONFIG_PATA_QDI is not set | ||
857 | # CONFIG_PATA_RADISYS is not set | ||
858 | # CONFIG_PATA_RDC is not set | ||
859 | # CONFIG_PATA_RZ1000 is not set | ||
860 | # CONFIG_PATA_SC1200 is not set | ||
861 | # CONFIG_PATA_SERVERWORKS is not set | ||
862 | # CONFIG_PATA_PDC2027X is not set | ||
863 | # CONFIG_PATA_SIL680 is not set | ||
864 | # CONFIG_PATA_SIS is not set | ||
865 | # CONFIG_PATA_VIA is not set | ||
866 | # CONFIG_PATA_WINBOND is not set | ||
867 | # CONFIG_PATA_WINBOND_VLB is not set | ||
868 | # CONFIG_PATA_SCH is not set | ||
869 | CONFIG_MD=y | ||
870 | # CONFIG_BLK_DEV_MD is not set | ||
871 | CONFIG_BLK_DEV_DM=y | ||
872 | # CONFIG_DM_DEBUG is not set | ||
873 | # CONFIG_DM_CRYPT is not set | ||
874 | # CONFIG_DM_SNAPSHOT is not set | ||
875 | # CONFIG_DM_MIRROR is not set | ||
876 | # CONFIG_DM_ZERO is not set | ||
877 | # CONFIG_DM_MULTIPATH is not set | ||
878 | # CONFIG_DM_DELAY is not set | ||
879 | # CONFIG_DM_UEVENT is not set | ||
880 | # CONFIG_FUSION is not set | ||
881 | |||
882 | # | ||
883 | # IEEE 1394 (FireWire) support | ||
884 | # | ||
885 | |||
886 | # | ||
887 | # You can enable one or both FireWire driver stacks. | ||
888 | # | ||
889 | |||
890 | # | ||
891 | # See the help texts for more information. | ||
892 | # | ||
893 | # CONFIG_FIREWIRE is not set | ||
894 | # CONFIG_IEEE1394 is not set | ||
895 | # CONFIG_I2O is not set | ||
896 | # CONFIG_MACINTOSH_DRIVERS is not set | ||
897 | CONFIG_NETDEVICES=y | ||
898 | CONFIG_DUMMY=y | ||
899 | # CONFIG_BONDING is not set | ||
900 | # CONFIG_MACVLAN is not set | ||
901 | # CONFIG_EQUALIZER is not set | ||
902 | CONFIG_TUN=y | ||
903 | # CONFIG_VETH is not set | ||
904 | # CONFIG_NET_SB1000 is not set | ||
905 | # CONFIG_ARCNET is not set | ||
906 | CONFIG_PHYLIB=y | ||
907 | |||
908 | # | ||
909 | # MII PHY device drivers | ||
910 | # | ||
911 | # CONFIG_MARVELL_PHY is not set | ||
912 | # CONFIG_DAVICOM_PHY is not set | ||
913 | # CONFIG_QSEMI_PHY is not set | ||
914 | # CONFIG_LXT_PHY is not set | ||
915 | # CONFIG_CICADA_PHY is not set | ||
916 | # CONFIG_VITESSE_PHY is not set | ||
917 | # CONFIG_SMSC_PHY is not set | ||
918 | # CONFIG_BROADCOM_PHY is not set | ||
919 | # CONFIG_ICPLUS_PHY is not set | ||
920 | # CONFIG_REALTEK_PHY is not set | ||
921 | # CONFIG_NATIONAL_PHY is not set | ||
922 | # CONFIG_STE10XP is not set | ||
923 | # CONFIG_LSI_ET1011C_PHY is not set | ||
924 | # CONFIG_FIXED_PHY is not set | ||
925 | # CONFIG_MDIO_BITBANG is not set | ||
926 | CONFIG_NET_ETHERNET=y | ||
927 | CONFIG_MII=y | ||
928 | # CONFIG_HAPPYMEAL is not set | ||
929 | # CONFIG_SUNGEM is not set | ||
930 | # CONFIG_CASSINI is not set | ||
931 | # CONFIG_NET_VENDOR_3COM is not set | ||
932 | # CONFIG_LANCE is not set | ||
933 | # CONFIG_NET_VENDOR_SMC is not set | ||
934 | # CONFIG_ETHOC is not set | ||
935 | # CONFIG_NET_VENDOR_RACAL is not set | ||
936 | # CONFIG_DNET is not set | ||
937 | # CONFIG_NET_TULIP is not set | ||
938 | # CONFIG_AT1700 is not set | ||
939 | # CONFIG_DEPCA is not set | ||
940 | # CONFIG_HP100 is not set | ||
941 | # CONFIG_NET_ISA is not set | ||
942 | # CONFIG_IBM_NEW_EMAC_ZMII is not set | ||
943 | # CONFIG_IBM_NEW_EMAC_RGMII is not set | ||
944 | # CONFIG_IBM_NEW_EMAC_TAH is not set | ||
945 | # CONFIG_IBM_NEW_EMAC_EMAC4 is not set | ||
946 | # CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set | ||
947 | # CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set | ||
948 | # CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set | ||
949 | CONFIG_NET_PCI=y | ||
950 | CONFIG_PCNET32=y | ||
951 | CONFIG_AMD8111_ETH=y | ||
952 | # CONFIG_ADAPTEC_STARFIRE is not set | ||
953 | # CONFIG_AC3200 is not set | ||
954 | # CONFIG_APRICOT is not set | ||
955 | # CONFIG_B44 is not set | ||
956 | # CONFIG_FORCEDETH is not set | ||
957 | # CONFIG_CS89x0 is not set | ||
958 | CONFIG_E100=y | ||
959 | # CONFIG_LNE390 is not set | ||
960 | # CONFIG_FEALNX is not set | ||
961 | # CONFIG_NATSEMI is not set | ||
962 | # CONFIG_NE2K_PCI is not set | ||
963 | # CONFIG_NE3210 is not set | ||
964 | # CONFIG_ES3210 is not set | ||
965 | CONFIG_8139CP=y | ||
966 | CONFIG_8139TOO=y | ||
967 | CONFIG_8139TOO_PIO=y | ||
968 | # CONFIG_8139TOO_TUNE_TWISTER is not set | ||
969 | CONFIG_8139TOO_8129=y | ||
970 | # CONFIG_8139_OLD_RX_RESET is not set | ||
971 | # CONFIG_R6040 is not set | ||
972 | # CONFIG_SIS900 is not set | ||
973 | # CONFIG_EPIC100 is not set | ||
974 | # CONFIG_SMSC9420 is not set | ||
975 | # CONFIG_SUNDANCE is not set | ||
976 | # CONFIG_TLAN is not set | ||
977 | # CONFIG_KS8842 is not set | ||
978 | # CONFIG_KS8851_MLL is not set | ||
979 | # CONFIG_VIA_RHINE is not set | ||
980 | # CONFIG_SC92031 is not set | ||
981 | # CONFIG_ATL2 is not set | ||
982 | CONFIG_NETDEV_1000=y | ||
983 | # CONFIG_ACENIC is not set | ||
984 | # CONFIG_DL2K is not set | ||
985 | CONFIG_E1000=y | ||
986 | # CONFIG_E1000E is not set | ||
987 | # CONFIG_IP1000 is not set | ||
988 | # CONFIG_IGB is not set | ||
989 | # CONFIG_IGBVF is not set | ||
990 | # CONFIG_NS83820 is not set | ||
991 | # CONFIG_HAMACHI is not set | ||
992 | # CONFIG_YELLOWFIN is not set | ||
993 | # CONFIG_R8169 is not set | ||
994 | # CONFIG_SIS190 is not set | ||
995 | # CONFIG_SKGE is not set | ||
996 | # CONFIG_SKY2 is not set | ||
997 | # CONFIG_VIA_VELOCITY is not set | ||
998 | # CONFIG_TIGON3 is not set | ||
999 | # CONFIG_BNX2 is not set | ||
1000 | # CONFIG_CNIC is not set | ||
1001 | # CONFIG_QLA3XXX is not set | ||
1002 | # CONFIG_ATL1 is not set | ||
1003 | # CONFIG_ATL1E is not set | ||
1004 | # CONFIG_ATL1C is not set | ||
1005 | # CONFIG_JME is not set | ||
1006 | # CONFIG_NETDEV_10000 is not set | ||
1007 | # CONFIG_TR is not set | ||
1008 | CONFIG_WLAN=y | ||
1009 | # CONFIG_WLAN_PRE80211 is not set | ||
1010 | # CONFIG_WLAN_80211 is not set | ||
1011 | |||
1012 | # | ||
1013 | # Enable WiMAX (Networking options) to see the WiMAX drivers | ||
1014 | # | ||
1015 | |||
1016 | # | ||
1017 | # USB Network Adapters | ||
1018 | # | ||
1019 | # CONFIG_USB_CATC is not set | ||
1020 | # CONFIG_USB_KAWETH is not set | ||
1021 | # CONFIG_USB_PEGASUS is not set | ||
1022 | # CONFIG_USB_RTL8150 is not set | ||
1023 | # CONFIG_USB_USBNET is not set | ||
1024 | # CONFIG_WAN is not set | ||
1025 | # CONFIG_FDDI is not set | ||
1026 | # CONFIG_HIPPI is not set | ||
1027 | # CONFIG_PPP is not set | ||
1028 | # CONFIG_SLIP is not set | ||
1029 | # CONFIG_NET_FC is not set | ||
1030 | CONFIG_NETCONSOLE=m | ||
1031 | CONFIG_NETCONSOLE_DYNAMIC=y | ||
1032 | CONFIG_NETPOLL=y | ||
1033 | # CONFIG_NETPOLL_TRAP is not set | ||
1034 | CONFIG_NET_POLL_CONTROLLER=y | ||
1035 | # CONFIG_VMXNET3 is not set | ||
1036 | # CONFIG_ISDN is not set | ||
1037 | # CONFIG_PHONE is not set | ||
1038 | |||
1039 | # | ||
1040 | # Input device support | ||
1041 | # | ||
1042 | CONFIG_INPUT=y | ||
1043 | CONFIG_INPUT_FF_MEMLESS=y | ||
1044 | # CONFIG_INPUT_POLLDEV is not set | ||
1045 | |||
1046 | # | ||
1047 | # Userland interfaces | ||
1048 | # | ||
1049 | CONFIG_INPUT_MOUSEDEV=y | ||
1050 | CONFIG_INPUT_MOUSEDEV_PSAUX=y | ||
1051 | CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 | ||
1052 | CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 | ||
1053 | # CONFIG_INPUT_JOYDEV is not set | ||
1054 | CONFIG_INPUT_EVDEV=y | ||
1055 | # CONFIG_INPUT_EVBUG is not set | ||
1056 | |||
1057 | # | ||
1058 | # Input Device Drivers | ||
1059 | # | ||
1060 | CONFIG_INPUT_KEYBOARD=y | ||
1061 | # CONFIG_KEYBOARD_ADP5588 is not set | ||
1062 | CONFIG_KEYBOARD_ATKBD=y | ||
1063 | # CONFIG_QT2160 is not set | ||
1064 | # CONFIG_KEYBOARD_LKKBD is not set | ||
1065 | # CONFIG_KEYBOARD_MAX7359 is not set | ||
1066 | # CONFIG_KEYBOARD_NEWTON is not set | ||
1067 | # CONFIG_KEYBOARD_OPENCORES is not set | ||
1068 | # CONFIG_KEYBOARD_STOWAWAY is not set | ||
1069 | # CONFIG_KEYBOARD_SUNKBD is not set | ||
1070 | # CONFIG_KEYBOARD_XTKBD is not set | ||
1071 | CONFIG_INPUT_MOUSE=y | ||
1072 | CONFIG_MOUSE_PS2=y | ||
1073 | CONFIG_MOUSE_PS2_ALPS=y | ||
1074 | CONFIG_MOUSE_PS2_LOGIPS2PP=y | ||
1075 | CONFIG_MOUSE_PS2_SYNAPTICS=y | ||
1076 | CONFIG_MOUSE_PS2_LIFEBOOK=y | ||
1077 | CONFIG_MOUSE_PS2_TRACKPOINT=y | ||
1078 | # CONFIG_MOUSE_PS2_ELANTECH is not set | ||
1079 | # CONFIG_MOUSE_PS2_SENTELIC is not set | ||
1080 | # CONFIG_MOUSE_PS2_TOUCHKIT is not set | ||
1081 | # CONFIG_MOUSE_SERIAL is not set | ||
1082 | # CONFIG_MOUSE_APPLETOUCH is not set | ||
1083 | # CONFIG_MOUSE_BCM5974 is not set | ||
1084 | # CONFIG_MOUSE_INPORT is not set | ||
1085 | # CONFIG_MOUSE_LOGIBM is not set | ||
1086 | # CONFIG_MOUSE_PC110PAD is not set | ||
1087 | # CONFIG_MOUSE_VSXXXAA is not set | ||
1088 | # CONFIG_MOUSE_SYNAPTICS_I2C is not set | ||
1089 | # CONFIG_INPUT_JOYSTICK is not set | ||
1090 | # CONFIG_INPUT_TABLET is not set | ||
1091 | # CONFIG_INPUT_TOUCHSCREEN is not set | ||
1092 | # CONFIG_INPUT_MISC is not set | ||
1093 | |||
1094 | # | ||
1095 | # Hardware I/O ports | ||
1096 | # | ||
1097 | CONFIG_SERIO=y | ||
1098 | CONFIG_SERIO_I8042=y | ||
1099 | CONFIG_SERIO_SERPORT=y | ||
1100 | # CONFIG_SERIO_CT82C710 is not set | ||
1101 | # CONFIG_SERIO_PCIPS2 is not set | ||
1102 | CONFIG_SERIO_LIBPS2=y | ||
1103 | CONFIG_SERIO_RAW=y | ||
1104 | # CONFIG_GAMEPORT is not set | ||
1105 | |||
1106 | # | ||
1107 | # Character devices | ||
1108 | # | ||
1109 | CONFIG_VT=y | ||
1110 | CONFIG_CONSOLE_TRANSLATIONS=y | ||
1111 | CONFIG_VT_CONSOLE=y | ||
1112 | CONFIG_HW_CONSOLE=y | ||
1113 | # CONFIG_VT_HW_CONSOLE_BINDING is not set | ||
1114 | # CONFIG_DEVKMEM is not set | ||
1115 | # CONFIG_SERIAL_NONSTANDARD is not set | ||
1116 | # CONFIG_NOZOMI is not set | ||
1117 | |||
1118 | # | ||
1119 | # Serial drivers | ||
1120 | # | ||
1121 | CONFIG_SERIAL_8250=y | ||
1122 | CONFIG_SERIAL_8250_CONSOLE=y | ||
1123 | CONFIG_FIX_EARLYCON_MEM=y | ||
1124 | CONFIG_SERIAL_8250_PCI=y | ||
1125 | CONFIG_SERIAL_8250_PNP=y | ||
1126 | CONFIG_SERIAL_8250_NR_UARTS=32 | ||
1127 | CONFIG_SERIAL_8250_RUNTIME_UARTS=4 | ||
1128 | CONFIG_SERIAL_8250_EXTENDED=y | ||
1129 | # CONFIG_SERIAL_8250_MANY_PORTS is not set | ||
1130 | # CONFIG_SERIAL_8250_SHARE_IRQ is not set | ||
1131 | # CONFIG_SERIAL_8250_DETECT_IRQ is not set | ||
1132 | # CONFIG_SERIAL_8250_RSA is not set | ||
1133 | |||
1134 | # | ||
1135 | # Non-8250 serial port support | ||
1136 | # | ||
1137 | CONFIG_SERIAL_CORE=y | ||
1138 | CONFIG_SERIAL_CORE_CONSOLE=y | ||
1139 | # CONFIG_SERIAL_JSM is not set | ||
1140 | CONFIG_UNIX98_PTYS=y | ||
1141 | CONFIG_DEVPTS_MULTIPLE_INSTANCES=y | ||
1142 | # CONFIG_LEGACY_PTYS is not set | ||
1143 | # CONFIG_IPMI_HANDLER is not set | ||
1144 | CONFIG_HW_RANDOM=y | ||
1145 | # CONFIG_HW_RANDOM_TIMERIOMEM is not set | ||
1146 | CONFIG_HW_RANDOM_INTEL=y | ||
1147 | CONFIG_HW_RANDOM_AMD=y | ||
1148 | CONFIG_HW_RANDOM_GEODE=y | ||
1149 | # CONFIG_HW_RANDOM_VIA is not set | ||
1150 | # CONFIG_NVRAM is not set | ||
1151 | CONFIG_RTC=y | ||
1152 | # CONFIG_DTLK is not set | ||
1153 | # CONFIG_R3964 is not set | ||
1154 | # CONFIG_APPLICOM is not set | ||
1155 | # CONFIG_SONYPI is not set | ||
1156 | # CONFIG_MWAVE is not set | ||
1157 | # CONFIG_PC8736x_GPIO is not set | ||
1158 | # CONFIG_NSC_GPIO is not set | ||
1159 | # CONFIG_CS5535_GPIO is not set | ||
1160 | # CONFIG_RAW_DRIVER is not set | ||
1161 | CONFIG_HPET=y | ||
1162 | CONFIG_HPET_MMAP=y | ||
1163 | CONFIG_HANGCHECK_TIMER=y | ||
1164 | # CONFIG_TCG_TPM is not set | ||
1165 | # CONFIG_TELCLOCK is not set | ||
1166 | CONFIG_DEVPORT=y | ||
1167 | CONFIG_I2C=y | ||
1168 | CONFIG_I2C_BOARDINFO=y | ||
1169 | CONFIG_I2C_COMPAT=y | ||
1170 | CONFIG_I2C_CHARDEV=y | ||
1171 | CONFIG_I2C_HELPER_AUTO=y | ||
1172 | |||
1173 | # | ||
1174 | # I2C Hardware Bus support | ||
1175 | # | ||
1176 | |||
1177 | # | ||
1178 | # PC SMBus host controller drivers | ||
1179 | # | ||
1180 | # CONFIG_I2C_ALI1535 is not set | ||
1181 | # CONFIG_I2C_ALI1563 is not set | ||
1182 | # CONFIG_I2C_ALI15X3 is not set | ||
1183 | # CONFIG_I2C_AMD756 is not set | ||
1184 | # CONFIG_I2C_AMD8111 is not set | ||
1185 | CONFIG_I2C_I801=y | ||
1186 | CONFIG_I2C_ISCH=y | ||
1187 | # CONFIG_I2C_PIIX4 is not set | ||
1188 | # CONFIG_I2C_NFORCE2 is not set | ||
1189 | # CONFIG_I2C_SIS5595 is not set | ||
1190 | # CONFIG_I2C_SIS630 is not set | ||
1191 | # CONFIG_I2C_SIS96X is not set | ||
1192 | # CONFIG_I2C_VIA is not set | ||
1193 | # CONFIG_I2C_VIAPRO is not set | ||
1194 | |||
1195 | # | ||
1196 | # ACPI drivers | ||
1197 | # | ||
1198 | # CONFIG_I2C_SCMI is not set | ||
1199 | |||
1200 | # | ||
1201 | # I2C system bus drivers (mostly embedded / system-on-chip) | ||
1202 | # | ||
1203 | # CONFIG_I2C_OCORES is not set | ||
1204 | # CONFIG_I2C_SIMTEC is not set | ||
1205 | |||
1206 | # | ||
1207 | # External I2C/SMBus adapter drivers | ||
1208 | # | ||
1209 | # CONFIG_I2C_PARPORT_LIGHT is not set | ||
1210 | # CONFIG_I2C_TAOS_EVM is not set | ||
1211 | # CONFIG_I2C_TINY_USB is not set | ||
1212 | |||
1213 | # | ||
1214 | # Graphics adapter I2C/DDC channel drivers | ||
1215 | # | ||
1216 | # CONFIG_I2C_VOODOO3 is not set | ||
1217 | |||
1218 | # | ||
1219 | # Other I2C/SMBus bus drivers | ||
1220 | # | ||
1221 | # CONFIG_I2C_PCA_ISA is not set | ||
1222 | # CONFIG_I2C_PCA_PLATFORM is not set | ||
1223 | # CONFIG_I2C_STUB is not set | ||
1224 | # CONFIG_SCx200_ACB is not set | ||
1225 | |||
1226 | # | ||
1227 | # Miscellaneous I2C Chip support | ||
1228 | # | ||
1229 | # CONFIG_DS1682 is not set | ||
1230 | # CONFIG_SENSORS_TSL2550 is not set | ||
1231 | # CONFIG_I2C_DEBUG_CORE is not set | ||
1232 | # CONFIG_I2C_DEBUG_ALGO is not set | ||
1233 | # CONFIG_I2C_DEBUG_BUS is not set | ||
1234 | # CONFIG_I2C_DEBUG_CHIP is not set | ||
1235 | # CONFIG_SPI is not set | ||
1236 | |||
1237 | # | ||
1238 | # PPS support | ||
1239 | # | ||
1240 | # CONFIG_PPS is not set | ||
1241 | CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y | ||
1242 | # CONFIG_GPIOLIB is not set | ||
1243 | # CONFIG_W1 is not set | ||
1244 | CONFIG_POWER_SUPPLY=y | ||
1245 | # CONFIG_POWER_SUPPLY_DEBUG is not set | ||
1246 | # CONFIG_PDA_POWER is not set | ||
1247 | # CONFIG_BATTERY_DS2760 is not set | ||
1248 | # CONFIG_BATTERY_DS2782 is not set | ||
1249 | # CONFIG_BATTERY_BQ27x00 is not set | ||
1250 | # CONFIG_BATTERY_MAX17040 is not set | ||
1251 | # CONFIG_HWMON is not set | ||
1252 | CONFIG_THERMAL=y | ||
1253 | # CONFIG_WATCHDOG is not set | ||
1254 | CONFIG_SSB_POSSIBLE=y | ||
1255 | |||
1256 | # | ||
1257 | # Sonics Silicon Backplane | ||
1258 | # | ||
1259 | # CONFIG_SSB is not set | ||
1260 | |||
1261 | # | ||
1262 | # Multifunction device drivers | ||
1263 | # | ||
1264 | # CONFIG_MFD_CORE is not set | ||
1265 | # CONFIG_MFD_SM501 is not set | ||
1266 | # CONFIG_HTC_PASIC3 is not set | ||
1267 | # CONFIG_TWL4030_CORE is not set | ||
1268 | # CONFIG_MFD_TMIO is not set | ||
1269 | # CONFIG_PMIC_DA903X is not set | ||
1270 | # CONFIG_MFD_WM8400 is not set | ||
1271 | # CONFIG_MFD_WM831X is not set | ||
1272 | # CONFIG_MFD_WM8350_I2C is not set | ||
1273 | # CONFIG_MFD_PCF50633 is not set | ||
1274 | # CONFIG_AB3100_CORE is not set | ||
1275 | # CONFIG_REGULATOR is not set | ||
1276 | # CONFIG_MEDIA_SUPPORT is not set | ||
1277 | |||
1278 | # | ||
1279 | # Graphics support | ||
1280 | # | ||
1281 | # CONFIG_AGP is not set | ||
1282 | CONFIG_VGA_ARB=y | ||
1283 | # CONFIG_DRM is not set | ||
1284 | CONFIG_VGASTATE=y | ||
1285 | CONFIG_VIDEO_OUTPUT_CONTROL=y | ||
1286 | CONFIG_FB=y | ||
1287 | # CONFIG_FIRMWARE_EDID is not set | ||
1288 | # CONFIG_FB_DDC is not set | ||
1289 | CONFIG_FB_BOOT_VESA_SUPPORT=y | ||
1290 | CONFIG_FB_CFB_FILLRECT=y | ||
1291 | CONFIG_FB_CFB_COPYAREA=y | ||
1292 | CONFIG_FB_CFB_IMAGEBLIT=y | ||
1293 | # CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set | ||
1294 | # CONFIG_FB_SYS_FILLRECT is not set | ||
1295 | # CONFIG_FB_SYS_COPYAREA is not set | ||
1296 | # CONFIG_FB_SYS_IMAGEBLIT is not set | ||
1297 | # CONFIG_FB_FOREIGN_ENDIAN is not set | ||
1298 | # CONFIG_FB_SYS_FOPS is not set | ||
1299 | # CONFIG_FB_SVGALIB is not set | ||
1300 | # CONFIG_FB_MACMODES is not set | ||
1301 | # CONFIG_FB_BACKLIGHT is not set | ||
1302 | # CONFIG_FB_MODE_HELPERS is not set | ||
1303 | # CONFIG_FB_TILEBLITTING is not set | ||
1304 | |||
1305 | # | ||
1306 | # Frame buffer hardware drivers | ||
1307 | # | ||
1308 | # CONFIG_FB_CIRRUS is not set | ||
1309 | # CONFIG_FB_PM2 is not set | ||
1310 | # CONFIG_FB_CYBER2000 is not set | ||
1311 | # CONFIG_FB_ARC is not set | ||
1312 | # CONFIG_FB_ASILIANT is not set | ||
1313 | # CONFIG_FB_IMSTT is not set | ||
1314 | CONFIG_FB_VGA16=y | ||
1315 | CONFIG_FB_VESA=y | ||
1316 | # CONFIG_FB_N411 is not set | ||
1317 | # CONFIG_FB_HGA is not set | ||
1318 | # CONFIG_FB_S1D13XXX is not set | ||
1319 | # CONFIG_FB_NVIDIA is not set | ||
1320 | # CONFIG_FB_RIVA is not set | ||
1321 | # CONFIG_FB_LE80578 is not set | ||
1322 | # CONFIG_FB_MATROX is not set | ||
1323 | # CONFIG_FB_RADEON is not set | ||
1324 | # CONFIG_FB_ATY128 is not set | ||
1325 | # CONFIG_FB_ATY is not set | ||
1326 | # CONFIG_FB_S3 is not set | ||
1327 | # CONFIG_FB_SAVAGE is not set | ||
1328 | # CONFIG_FB_SIS is not set | ||
1329 | # CONFIG_FB_VIA is not set | ||
1330 | # CONFIG_FB_NEOMAGIC is not set | ||
1331 | # CONFIG_FB_KYRO is not set | ||
1332 | # CONFIG_FB_3DFX is not set | ||
1333 | # CONFIG_FB_VOODOO1 is not set | ||
1334 | # CONFIG_FB_VT8623 is not set | ||
1335 | # CONFIG_FB_TRIDENT is not set | ||
1336 | # CONFIG_FB_ARK is not set | ||
1337 | # CONFIG_FB_PM3 is not set | ||
1338 | # CONFIG_FB_CARMINE is not set | ||
1339 | # CONFIG_FB_GEODE is not set | ||
1340 | # CONFIG_FB_VIRTUAL is not set | ||
1341 | # CONFIG_FB_METRONOME is not set | ||
1342 | # CONFIG_FB_MB862XX is not set | ||
1343 | # CONFIG_FB_BROADSHEET is not set | ||
1344 | # CONFIG_BACKLIGHT_LCD_SUPPORT is not set | ||
1345 | |||
1346 | # | ||
1347 | # Display device support | ||
1348 | # | ||
1349 | # CONFIG_DISPLAY_SUPPORT is not set | ||
1350 | |||
1351 | # | ||
1352 | # Console display driver support | ||
1353 | # | ||
1354 | CONFIG_VGA_CONSOLE=y | ||
1355 | CONFIG_VGACON_SOFT_SCROLLBACK=y | ||
1356 | CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=256 | ||
1357 | # CONFIG_MDA_CONSOLE is not set | ||
1358 | CONFIG_DUMMY_CONSOLE=y | ||
1359 | CONFIG_FRAMEBUFFER_CONSOLE=y | ||
1360 | # CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set | ||
1361 | # CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set | ||
1362 | # CONFIG_FONTS is not set | ||
1363 | CONFIG_FONT_8x8=y | ||
1364 | CONFIG_FONT_8x16=y | ||
1365 | CONFIG_LOGO=y | ||
1366 | CONFIG_LOGO_LINUX_MONO=y | ||
1367 | # CONFIG_LOGO_LINUX_VGA16 is not set | ||
1368 | # CONFIG_LOGO_LINUX_CLUT224 is not set | ||
1369 | # CONFIG_SOUND is not set | ||
1370 | CONFIG_HID_SUPPORT=y | ||
1371 | CONFIG_HID=y | ||
1372 | CONFIG_HIDRAW=y | ||
1373 | |||
1374 | # | ||
1375 | # USB Input Devices | ||
1376 | # | ||
1377 | CONFIG_USB_HID=y | ||
1378 | CONFIG_HID_PID=y | ||
1379 | CONFIG_USB_HIDDEV=y | ||
1380 | |||
1381 | # | ||
1382 | # Special HID drivers | ||
1383 | # | ||
1384 | CONFIG_HID_A4TECH=y | ||
1385 | CONFIG_HID_APPLE=y | ||
1386 | CONFIG_HID_BELKIN=y | ||
1387 | CONFIG_HID_CHERRY=y | ||
1388 | CONFIG_HID_CHICONY=y | ||
1389 | CONFIG_HID_CYPRESS=y | ||
1390 | CONFIG_HID_DRAGONRISE=y | ||
1391 | # CONFIG_DRAGONRISE_FF is not set | ||
1392 | CONFIG_HID_EZKEY=y | ||
1393 | CONFIG_HID_KYE=y | ||
1394 | CONFIG_HID_GYRATION=y | ||
1395 | CONFIG_HID_TWINHAN=y | ||
1396 | CONFIG_HID_KENSINGTON=y | ||
1397 | CONFIG_HID_LOGITECH=y | ||
1398 | # CONFIG_LOGITECH_FF is not set | ||
1399 | CONFIG_LOGIRUMBLEPAD2_FF=y | ||
1400 | CONFIG_HID_MICROSOFT=y | ||
1401 | CONFIG_HID_MONTEREY=y | ||
1402 | CONFIG_HID_NTRIG=y | ||
1403 | CONFIG_HID_PANTHERLORD=y | ||
1404 | # CONFIG_PANTHERLORD_FF is not set | ||
1405 | CONFIG_HID_PETALYNX=y | ||
1406 | CONFIG_HID_SAMSUNG=y | ||
1407 | CONFIG_HID_SONY=y | ||
1408 | CONFIG_HID_SUNPLUS=y | ||
1409 | CONFIG_HID_GREENASIA=y | ||
1410 | # CONFIG_GREENASIA_FF is not set | ||
1411 | CONFIG_HID_SMARTJOYPLUS=y | ||
1412 | # CONFIG_SMARTJOYPLUS_FF is not set | ||
1413 | CONFIG_HID_TOPSEED=y | ||
1414 | CONFIG_HID_THRUSTMASTER=y | ||
1415 | # CONFIG_THRUSTMASTER_FF is not set | ||
1416 | CONFIG_HID_ZEROPLUS=y | ||
1417 | # CONFIG_ZEROPLUS_FF is not set | ||
1418 | CONFIG_USB_SUPPORT=y | ||
1419 | CONFIG_USB_ARCH_HAS_HCD=y | ||
1420 | CONFIG_USB_ARCH_HAS_OHCI=y | ||
1421 | CONFIG_USB_ARCH_HAS_EHCI=y | ||
1422 | CONFIG_USB=y | ||
1423 | # CONFIG_USB_DEBUG is not set | ||
1424 | CONFIG_USB_ANNOUNCE_NEW_DEVICES=y | ||
1425 | |||
1426 | # | ||
1427 | # Miscellaneous USB options | ||
1428 | # | ||
1429 | # CONFIG_USB_DEVICEFS is not set | ||
1430 | # CONFIG_USB_DEVICE_CLASS is not set | ||
1431 | # CONFIG_USB_DYNAMIC_MINORS is not set | ||
1432 | # CONFIG_USB_SUSPEND is not set | ||
1433 | # CONFIG_USB_OTG is not set | ||
1434 | CONFIG_USB_MON=y | ||
1435 | # CONFIG_USB_WUSB is not set | ||
1436 | # CONFIG_USB_WUSB_CBAF is not set | ||
1437 | |||
1438 | # | ||
1439 | # USB Host Controller Drivers | ||
1440 | # | ||
1441 | # CONFIG_USB_C67X00_HCD is not set | ||
1442 | # CONFIG_USB_XHCI_HCD is not set | ||
1443 | CONFIG_USB_EHCI_HCD=y | ||
1444 | CONFIG_USB_EHCI_ROOT_HUB_TT=y | ||
1445 | # CONFIG_USB_EHCI_TT_NEWSCHED is not set | ||
1446 | # CONFIG_USB_OXU210HP_HCD is not set | ||
1447 | # CONFIG_USB_ISP116X_HCD is not set | ||
1448 | # CONFIG_USB_ISP1760_HCD is not set | ||
1449 | # CONFIG_USB_ISP1362_HCD is not set | ||
1450 | CONFIG_USB_OHCI_HCD=y | ||
1451 | # CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set | ||
1452 | # CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set | ||
1453 | CONFIG_USB_OHCI_LITTLE_ENDIAN=y | ||
1454 | CONFIG_USB_UHCI_HCD=y | ||
1455 | # CONFIG_USB_SL811_HCD is not set | ||
1456 | # CONFIG_USB_R8A66597_HCD is not set | ||
1457 | # CONFIG_USB_WHCI_HCD is not set | ||
1458 | # CONFIG_USB_HWA_HCD is not set | ||
1459 | |||
1460 | # | ||
1461 | # USB Device Class drivers | ||
1462 | # | ||
1463 | # CONFIG_USB_ACM is not set | ||
1464 | # CONFIG_USB_PRINTER is not set | ||
1465 | # CONFIG_USB_WDM is not set | ||
1466 | # CONFIG_USB_TMC is not set | ||
1467 | |||
1468 | # | ||
1469 | # NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may | ||
1470 | # | ||
1471 | |||
1472 | # | ||
1473 | # also be needed; see USB_STORAGE Help for more info | ||
1474 | # | ||
1475 | CONFIG_USB_STORAGE=y | ||
1476 | # CONFIG_USB_STORAGE_DEBUG is not set | ||
1477 | CONFIG_USB_STORAGE_DATAFAB=y | ||
1478 | CONFIG_USB_STORAGE_FREECOM=y | ||
1479 | CONFIG_USB_STORAGE_ISD200=y | ||
1480 | CONFIG_USB_STORAGE_USBAT=y | ||
1481 | CONFIG_USB_STORAGE_SDDR09=y | ||
1482 | CONFIG_USB_STORAGE_SDDR55=y | ||
1483 | CONFIG_USB_STORAGE_JUMPSHOT=y | ||
1484 | CONFIG_USB_STORAGE_ALAUDA=y | ||
1485 | CONFIG_USB_STORAGE_ONETOUCH=y | ||
1486 | CONFIG_USB_STORAGE_KARMA=y | ||
1487 | CONFIG_USB_STORAGE_CYPRESS_ATACB=y | ||
1488 | # CONFIG_USB_LIBUSUAL is not set | ||
1489 | |||
1490 | # | ||
1491 | # USB Imaging devices | ||
1492 | # | ||
1493 | # CONFIG_USB_MDC800 is not set | ||
1494 | # CONFIG_USB_MICROTEK is not set | ||
1495 | |||
1496 | # | ||
1497 | # USB port drivers | ||
1498 | # | ||
1499 | # CONFIG_USB_SERIAL is not set | ||
1500 | |||
1501 | # | ||
1502 | # USB Miscellaneous drivers | ||
1503 | # | ||
1504 | # CONFIG_USB_EMI62 is not set | ||
1505 | # CONFIG_USB_EMI26 is not set | ||
1506 | # CONFIG_USB_ADUTUX is not set | ||
1507 | # CONFIG_USB_SEVSEG is not set | ||
1508 | # CONFIG_USB_RIO500 is not set | ||
1509 | # CONFIG_USB_LEGOTOWER is not set | ||
1510 | # CONFIG_USB_LCD is not set | ||
1511 | # CONFIG_USB_BERRY_CHARGE is not set | ||
1512 | # CONFIG_USB_LED is not set | ||
1513 | # CONFIG_USB_CYPRESS_CY7C63 is not set | ||
1514 | # CONFIG_USB_CYTHERM is not set | ||
1515 | # CONFIG_USB_IDMOUSE is not set | ||
1516 | # CONFIG_USB_FTDI_ELAN is not set | ||
1517 | # CONFIG_USB_APPLEDISPLAY is not set | ||
1518 | # CONFIG_USB_SISUSBVGA is not set | ||
1519 | # CONFIG_USB_LD is not set | ||
1520 | # CONFIG_USB_TRANCEVIBRATOR is not set | ||
1521 | # CONFIG_USB_IOWARRIOR is not set | ||
1522 | # CONFIG_USB_TEST is not set | ||
1523 | # CONFIG_USB_ISIGHTFW is not set | ||
1524 | # CONFIG_USB_VST is not set | ||
1525 | # CONFIG_USB_GADGET is not set | ||
1526 | |||
1527 | # | ||
1528 | # OTG and related infrastructure | ||
1529 | # | ||
1530 | # CONFIG_NOP_USB_XCEIV is not set | ||
1531 | # CONFIG_UWB is not set | ||
1532 | # CONFIG_MMC is not set | ||
1533 | # CONFIG_MEMSTICK is not set | ||
1534 | # CONFIG_NEW_LEDS is not set | ||
1535 | # CONFIG_ACCESSIBILITY is not set | ||
1536 | # CONFIG_INFINIBAND is not set | ||
1537 | # CONFIG_EDAC is not set | ||
1538 | # CONFIG_RTC_CLASS is not set | ||
1539 | # CONFIG_DMADEVICES is not set | ||
1540 | # CONFIG_AUXDISPLAY is not set | ||
1541 | # CONFIG_UIO is not set | ||
1542 | |||
1543 | # | ||
1544 | # TI VLYNQ | ||
1545 | # | ||
1546 | # CONFIG_STAGING is not set | ||
1547 | # CONFIG_X86_PLATFORM_DEVICES is not set | ||
1548 | |||
1549 | # | ||
1550 | # Firmware Drivers | ||
1551 | # | ||
1552 | CONFIG_EDD=y | ||
1553 | # CONFIG_EDD_OFF is not set | ||
1554 | CONFIG_FIRMWARE_MEMMAP=y | ||
1555 | # CONFIG_DELL_RBU is not set | ||
1556 | # CONFIG_DCDBAS is not set | ||
1557 | # CONFIG_DMIID is not set | ||
1558 | # CONFIG_ISCSI_IBFT_FIND is not set | ||
1559 | |||
1560 | # | ||
1561 | # File systems | ||
1562 | # | ||
1563 | CONFIG_EXT2_FS=y | ||
1564 | CONFIG_EXT2_FS_XATTR=y | ||
1565 | CONFIG_EXT2_FS_POSIX_ACL=y | ||
1566 | CONFIG_EXT2_FS_SECURITY=y | ||
1567 | # CONFIG_EXT2_FS_XIP is not set | ||
1568 | CONFIG_EXT3_FS=y | ||
1569 | CONFIG_EXT3_DEFAULTS_TO_ORDERED=y | ||
1570 | CONFIG_EXT3_FS_XATTR=y | ||
1571 | CONFIG_EXT3_FS_POSIX_ACL=y | ||
1572 | CONFIG_EXT3_FS_SECURITY=y | ||
1573 | # CONFIG_EXT4_FS is not set | ||
1574 | CONFIG_JBD=y | ||
1575 | # CONFIG_JBD_DEBUG is not set | ||
1576 | CONFIG_FS_MBCACHE=y | ||
1577 | # CONFIG_REISERFS_FS is not set | ||
1578 | # CONFIG_JFS_FS is not set | ||
1579 | CONFIG_FS_POSIX_ACL=y | ||
1580 | # CONFIG_XFS_FS is not set | ||
1581 | # CONFIG_OCFS2_FS is not set | ||
1582 | # CONFIG_BTRFS_FS is not set | ||
1583 | # CONFIG_NILFS2_FS is not set | ||
1584 | CONFIG_FILE_LOCKING=y | ||
1585 | CONFIG_FSNOTIFY=y | ||
1586 | CONFIG_DNOTIFY=y | ||
1587 | # CONFIG_INOTIFY is not set | ||
1588 | CONFIG_INOTIFY_USER=y | ||
1589 | # CONFIG_QUOTA is not set | ||
1590 | # CONFIG_AUTOFS_FS is not set | ||
1591 | # CONFIG_AUTOFS4_FS is not set | ||
1592 | # CONFIG_FUSE_FS is not set | ||
1593 | |||
1594 | # | ||
1595 | # Caches | ||
1596 | # | ||
1597 | CONFIG_FSCACHE=y | ||
1598 | CONFIG_FSCACHE_STATS=y | ||
1599 | # CONFIG_FSCACHE_HISTOGRAM is not set | ||
1600 | # CONFIG_FSCACHE_DEBUG is not set | ||
1601 | # CONFIG_FSCACHE_OBJECT_LIST is not set | ||
1602 | CONFIG_CACHEFILES=y | ||
1603 | # CONFIG_CACHEFILES_DEBUG is not set | ||
1604 | # CONFIG_CACHEFILES_HISTOGRAM is not set | ||
1605 | |||
1606 | # | ||
1607 | # CD-ROM/DVD Filesystems | ||
1608 | # | ||
1609 | CONFIG_ISO9660_FS=y | ||
1610 | CONFIG_JOLIET=y | ||
1611 | CONFIG_ZISOFS=y | ||
1612 | CONFIG_UDF_FS=y | ||
1613 | CONFIG_UDF_NLS=y | ||
1614 | |||
1615 | # | ||
1616 | # DOS/FAT/NT Filesystems | ||
1617 | # | ||
1618 | CONFIG_FAT_FS=y | ||
1619 | CONFIG_MSDOS_FS=y | ||
1620 | CONFIG_VFAT_FS=y | ||
1621 | CONFIG_FAT_DEFAULT_CODEPAGE=437 | ||
1622 | CONFIG_FAT_DEFAULT_IOCHARSET="utf8" | ||
1623 | CONFIG_NTFS_FS=y | ||
1624 | # CONFIG_NTFS_DEBUG is not set | ||
1625 | CONFIG_NTFS_RW=y | ||
1626 | |||
1627 | # | ||
1628 | # Pseudo filesystems | ||
1629 | # | ||
1630 | CONFIG_PROC_FS=y | ||
1631 | CONFIG_PROC_KCORE=y | ||
1632 | CONFIG_PROC_SYSCTL=y | ||
1633 | CONFIG_PROC_PAGE_MONITOR=y | ||
1634 | CONFIG_SYSFS=y | ||
1635 | CONFIG_TMPFS=y | ||
1636 | # CONFIG_TMPFS_POSIX_ACL is not set | ||
1637 | # CONFIG_HUGETLBFS is not set | ||
1638 | # CONFIG_HUGETLB_PAGE is not set | ||
1639 | CONFIG_CONFIGFS_FS=y | ||
1640 | # CONFIG_MISC_FILESYSTEMS is not set | ||
1641 | # CONFIG_NETWORK_FILESYSTEMS is not set | ||
1642 | |||
1643 | # | ||
1644 | # Partition Types | ||
1645 | # | ||
1646 | CONFIG_PARTITION_ADVANCED=y | ||
1647 | # CONFIG_ACORN_PARTITION is not set | ||
1648 | # CONFIG_OSF_PARTITION is not set | ||
1649 | # CONFIG_AMIGA_PARTITION is not set | ||
1650 | # CONFIG_ATARI_PARTITION is not set | ||
1651 | # CONFIG_MAC_PARTITION is not set | ||
1652 | CONFIG_MSDOS_PARTITION=y | ||
1653 | # CONFIG_BSD_DISKLABEL is not set | ||
1654 | # CONFIG_MINIX_SUBPARTITION is not set | ||
1655 | # CONFIG_SOLARIS_X86_PARTITION is not set | ||
1656 | # CONFIG_UNIXWARE_DISKLABEL is not set | ||
1657 | CONFIG_LDM_PARTITION=y | ||
1658 | CONFIG_LDM_DEBUG=y | ||
1659 | # CONFIG_SGI_PARTITION is not set | ||
1660 | # CONFIG_ULTRIX_PARTITION is not set | ||
1661 | # CONFIG_SUN_PARTITION is not set | ||
1662 | # CONFIG_KARMA_PARTITION is not set | ||
1663 | # CONFIG_EFI_PARTITION is not set | ||
1664 | # CONFIG_SYSV68_PARTITION is not set | ||
1665 | CONFIG_NLS=y | ||
1666 | CONFIG_NLS_DEFAULT="utf8" | ||
1667 | CONFIG_NLS_CODEPAGE_437=y | ||
1668 | # CONFIG_NLS_CODEPAGE_737 is not set | ||
1669 | # CONFIG_NLS_CODEPAGE_775 is not set | ||
1670 | CONFIG_NLS_CODEPAGE_850=y | ||
1671 | # CONFIG_NLS_CODEPAGE_852 is not set | ||
1672 | # CONFIG_NLS_CODEPAGE_855 is not set | ||
1673 | # CONFIG_NLS_CODEPAGE_857 is not set | ||
1674 | # CONFIG_NLS_CODEPAGE_860 is not set | ||
1675 | # CONFIG_NLS_CODEPAGE_861 is not set | ||
1676 | # CONFIG_NLS_CODEPAGE_862 is not set | ||
1677 | # CONFIG_NLS_CODEPAGE_863 is not set | ||
1678 | # CONFIG_NLS_CODEPAGE_864 is not set | ||
1679 | # CONFIG_NLS_CODEPAGE_865 is not set | ||
1680 | # CONFIG_NLS_CODEPAGE_866 is not set | ||
1681 | # CONFIG_NLS_CODEPAGE_869 is not set | ||
1682 | CONFIG_NLS_CODEPAGE_936=y | ||
1683 | # CONFIG_NLS_CODEPAGE_950 is not set | ||
1684 | # CONFIG_NLS_CODEPAGE_932 is not set | ||
1685 | # CONFIG_NLS_CODEPAGE_949 is not set | ||
1686 | # CONFIG_NLS_CODEPAGE_874 is not set | ||
1687 | # CONFIG_NLS_ISO8859_8 is not set | ||
1688 | # CONFIG_NLS_CODEPAGE_1250 is not set | ||
1689 | # CONFIG_NLS_CODEPAGE_1251 is not set | ||
1690 | CONFIG_NLS_ASCII=y | ||
1691 | CONFIG_NLS_ISO8859_1=y | ||
1692 | # CONFIG_NLS_ISO8859_2 is not set | ||
1693 | # CONFIG_NLS_ISO8859_3 is not set | ||
1694 | # CONFIG_NLS_ISO8859_4 is not set | ||
1695 | # CONFIG_NLS_ISO8859_5 is not set | ||
1696 | # CONFIG_NLS_ISO8859_6 is not set | ||
1697 | # CONFIG_NLS_ISO8859_7 is not set | ||
1698 | # CONFIG_NLS_ISO8859_9 is not set | ||
1699 | # CONFIG_NLS_ISO8859_13 is not set | ||
1700 | # CONFIG_NLS_ISO8859_14 is not set | ||
1701 | CONFIG_NLS_ISO8859_15=y | ||
1702 | # CONFIG_NLS_KOI8_R is not set | ||
1703 | # CONFIG_NLS_KOI8_U is not set | ||
1704 | CONFIG_NLS_UTF8=y | ||
1705 | # CONFIG_DLM is not set | ||
1706 | |||
1707 | # | ||
1708 | # Kernel hacking | ||
1709 | # | ||
1710 | CONFIG_TRACE_IRQFLAGS_SUPPORT=y | ||
1711 | CONFIG_PRINTK_TIME=y | ||
1712 | # CONFIG_ENABLE_WARN_DEPRECATED is not set | ||
1713 | # CONFIG_ENABLE_MUST_CHECK is not set | ||
1714 | CONFIG_FRAME_WARN=0 | ||
1715 | CONFIG_MAGIC_SYSRQ=y | ||
1716 | # CONFIG_STRIP_ASM_SYMS is not set | ||
1717 | # CONFIG_UNUSED_SYMBOLS is not set | ||
1718 | CONFIG_DEBUG_FS=y | ||
1719 | # CONFIG_HEADERS_CHECK is not set | ||
1720 | # CONFIG_DEBUG_KERNEL is not set | ||
1721 | # CONFIG_SLUB_DEBUG_ON is not set | ||
1722 | # CONFIG_SLUB_STATS is not set | ||
1723 | CONFIG_STACKTRACE=y | ||
1724 | CONFIG_DEBUG_BUGVERBOSE=y | ||
1725 | CONFIG_DEBUG_MEMORY_INIT=y | ||
1726 | CONFIG_ARCH_WANT_FRAME_POINTERS=y | ||
1727 | CONFIG_FRAME_POINTER=y | ||
1728 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set | ||
1729 | # CONFIG_LATENCYTOP is not set | ||
1730 | # CONFIG_SYSCTL_SYSCALL_CHECK is not set | ||
1731 | CONFIG_USER_STACKTRACE_SUPPORT=y | ||
1732 | CONFIG_NOP_TRACER=y | ||
1733 | CONFIG_HAVE_FTRACE_NMI_ENTER=y | ||
1734 | CONFIG_HAVE_FUNCTION_TRACER=y | ||
1735 | CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y | ||
1736 | CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST=y | ||
1737 | CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y | ||
1738 | CONFIG_HAVE_DYNAMIC_FTRACE=y | ||
1739 | CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y | ||
1740 | CONFIG_HAVE_SYSCALL_TRACEPOINTS=y | ||
1741 | CONFIG_RING_BUFFER=y | ||
1742 | CONFIG_FTRACE_NMI_ENTER=y | ||
1743 | CONFIG_EVENT_TRACING=y | ||
1744 | CONFIG_CONTEXT_SWITCH_TRACER=y | ||
1745 | CONFIG_TRACING=y | ||
1746 | CONFIG_GENERIC_TRACER=y | ||
1747 | CONFIG_TRACING_SUPPORT=y | ||
1748 | CONFIG_FTRACE=y | ||
1749 | CONFIG_FUNCTION_TRACER=y | ||
1750 | # CONFIG_FUNCTION_GRAPH_TRACER is not set | ||
1751 | # CONFIG_IRQSOFF_TRACER is not set | ||
1752 | # CONFIG_PREEMPT_TRACER is not set | ||
1753 | # CONFIG_SYSPROF_TRACER is not set | ||
1754 | # CONFIG_SCHED_TRACER is not set | ||
1755 | # CONFIG_FTRACE_SYSCALLS is not set | ||
1756 | # CONFIG_BOOT_TRACER is not set | ||
1757 | CONFIG_BRANCH_PROFILE_NONE=y | ||
1758 | # CONFIG_PROFILE_ANNOTATED_BRANCHES is not set | ||
1759 | # CONFIG_PROFILE_ALL_BRANCHES is not set | ||
1760 | # CONFIG_POWER_TRACER is not set | ||
1761 | # CONFIG_STACK_TRACER is not set | ||
1762 | # CONFIG_KMEMTRACE is not set | ||
1763 | # CONFIG_WORKQUEUE_TRACER is not set | ||
1764 | # CONFIG_BLK_DEV_IO_TRACE is not set | ||
1765 | CONFIG_DYNAMIC_FTRACE=y | ||
1766 | # CONFIG_FUNCTION_PROFILER is not set | ||
1767 | CONFIG_FTRACE_MCOUNT_RECORD=y | ||
1768 | # CONFIG_FTRACE_STARTUP_TEST is not set | ||
1769 | # CONFIG_MMIOTRACE is not set | ||
1770 | # CONFIG_RING_BUFFER_BENCHMARK is not set | ||
1771 | # CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set | ||
1772 | # CONFIG_DYNAMIC_DEBUG is not set | ||
1773 | # CONFIG_DMA_API_DEBUG is not set | ||
1774 | # CONFIG_SAMPLES is not set | ||
1775 | CONFIG_HAVE_ARCH_KGDB=y | ||
1776 | CONFIG_HAVE_ARCH_KMEMCHECK=y | ||
1777 | # CONFIG_STRICT_DEVMEM is not set | ||
1778 | CONFIG_X86_VERBOSE_BOOTUP=y | ||
1779 | CONFIG_EARLY_PRINTK=y | ||
1780 | # CONFIG_EARLY_PRINTK_DBGP is not set | ||
1781 | # CONFIG_4KSTACKS is not set | ||
1782 | CONFIG_DOUBLEFAULT=y | ||
1783 | # CONFIG_IOMMU_STRESS is not set | ||
1784 | CONFIG_HAVE_MMIOTRACE_SUPPORT=y | ||
1785 | CONFIG_IO_DELAY_TYPE_0X80=0 | ||
1786 | CONFIG_IO_DELAY_TYPE_0XED=1 | ||
1787 | CONFIG_IO_DELAY_TYPE_UDELAY=2 | ||
1788 | CONFIG_IO_DELAY_TYPE_NONE=3 | ||
1789 | CONFIG_IO_DELAY_0X80=y | ||
1790 | # CONFIG_IO_DELAY_0XED is not set | ||
1791 | # CONFIG_IO_DELAY_UDELAY is not set | ||
1792 | # CONFIG_IO_DELAY_NONE is not set | ||
1793 | CONFIG_DEFAULT_IO_DELAY_TYPE=0 | ||
1794 | # CONFIG_OPTIMIZE_INLINING is not set | ||
1795 | |||
1796 | # | ||
1797 | # Security options | ||
1798 | # | ||
1799 | # CONFIG_KEYS is not set | ||
1800 | # CONFIG_SECURITY is not set | ||
1801 | # CONFIG_SECURITYFS is not set | ||
1802 | # CONFIG_SECURITY_FILE_CAPABILITIES is not set | ||
1803 | # CONFIG_IMA is not set | ||
1804 | CONFIG_CRYPTO=y | ||
1805 | |||
1806 | # | ||
1807 | # Crypto core or helper | ||
1808 | # | ||
1809 | CONFIG_CRYPTO_FIPS=y | ||
1810 | CONFIG_CRYPTO_ALGAPI=y | ||
1811 | CONFIG_CRYPTO_ALGAPI2=y | ||
1812 | CONFIG_CRYPTO_AEAD=y | ||
1813 | CONFIG_CRYPTO_AEAD2=y | ||
1814 | CONFIG_CRYPTO_BLKCIPHER=y | ||
1815 | CONFIG_CRYPTO_BLKCIPHER2=y | ||
1816 | CONFIG_CRYPTO_HASH=y | ||
1817 | CONFIG_CRYPTO_HASH2=y | ||
1818 | CONFIG_CRYPTO_RNG=y | ||
1819 | CONFIG_CRYPTO_RNG2=y | ||
1820 | CONFIG_CRYPTO_PCOMP=y | ||
1821 | CONFIG_CRYPTO_MANAGER=y | ||
1822 | CONFIG_CRYPTO_MANAGER2=y | ||
1823 | CONFIG_CRYPTO_GF128MUL=y | ||
1824 | CONFIG_CRYPTO_NULL=y | ||
1825 | CONFIG_CRYPTO_WORKQUEUE=y | ||
1826 | # CONFIG_CRYPTO_CRYPTD is not set | ||
1827 | CONFIG_CRYPTO_AUTHENC=y | ||
1828 | # CONFIG_CRYPTO_TEST is not set | ||
1829 | |||
1830 | # | ||
1831 | # Authenticated Encryption with Associated Data | ||
1832 | # | ||
1833 | CONFIG_CRYPTO_CCM=y | ||
1834 | CONFIG_CRYPTO_GCM=y | ||
1835 | CONFIG_CRYPTO_SEQIV=y | ||
1836 | |||
1837 | # | ||
1838 | # Block modes | ||
1839 | # | ||
1840 | CONFIG_CRYPTO_CBC=y | ||
1841 | CONFIG_CRYPTO_CTR=y | ||
1842 | CONFIG_CRYPTO_CTS=y | ||
1843 | CONFIG_CRYPTO_ECB=y | ||
1844 | # CONFIG_CRYPTO_LRW is not set | ||
1845 | CONFIG_CRYPTO_PCBC=y | ||
1846 | # CONFIG_CRYPTO_XTS is not set | ||
1847 | |||
1848 | # | ||
1849 | # Hash modes | ||
1850 | # | ||
1851 | CONFIG_CRYPTO_HMAC=y | ||
1852 | CONFIG_CRYPTO_XCBC=y | ||
1853 | # CONFIG_CRYPTO_VMAC is not set | ||
1854 | |||
1855 | # | ||
1856 | # Digest | ||
1857 | # | ||
1858 | CONFIG_CRYPTO_CRC32C=y | ||
1859 | CONFIG_CRYPTO_CRC32C_INTEL=y | ||
1860 | CONFIG_CRYPTO_GHASH=y | ||
1861 | # CONFIG_CRYPTO_MD4 is not set | ||
1862 | CONFIG_CRYPTO_MD5=y | ||
1863 | # CONFIG_CRYPTO_MICHAEL_MIC is not set | ||
1864 | CONFIG_CRYPTO_RMD128=y | ||
1865 | CONFIG_CRYPTO_RMD160=y | ||
1866 | CONFIG_CRYPTO_RMD256=y | ||
1867 | CONFIG_CRYPTO_RMD320=y | ||
1868 | CONFIG_CRYPTO_SHA1=y | ||
1869 | CONFIG_CRYPTO_SHA256=y | ||
1870 | CONFIG_CRYPTO_SHA512=y | ||
1871 | CONFIG_CRYPTO_TGR192=y | ||
1872 | CONFIG_CRYPTO_WP512=y | ||
1873 | |||
1874 | # | ||
1875 | # Ciphers | ||
1876 | # | ||
1877 | CONFIG_CRYPTO_AES=y | ||
1878 | # CONFIG_CRYPTO_AES_586 is not set | ||
1879 | # CONFIG_CRYPTO_ANUBIS is not set | ||
1880 | CONFIG_CRYPTO_ARC4=y | ||
1881 | # CONFIG_CRYPTO_BLOWFISH is not set | ||
1882 | # CONFIG_CRYPTO_CAMELLIA is not set | ||
1883 | CONFIG_CRYPTO_CAST5=y | ||
1884 | CONFIG_CRYPTO_CAST6=y | ||
1885 | CONFIG_CRYPTO_DES=y | ||
1886 | CONFIG_CRYPTO_FCRYPT=y | ||
1887 | # CONFIG_CRYPTO_KHAZAD is not set | ||
1888 | # CONFIG_CRYPTO_SALSA20 is not set | ||
1889 | # CONFIG_CRYPTO_SALSA20_586 is not set | ||
1890 | # CONFIG_CRYPTO_SEED is not set | ||
1891 | CONFIG_CRYPTO_SERPENT=y | ||
1892 | CONFIG_CRYPTO_TEA=y | ||
1893 | CONFIG_CRYPTO_TWOFISH=y | ||
1894 | CONFIG_CRYPTO_TWOFISH_COMMON=y | ||
1895 | # CONFIG_CRYPTO_TWOFISH_586 is not set | ||
1896 | |||
1897 | # | ||
1898 | # Compression | ||
1899 | # | ||
1900 | CONFIG_CRYPTO_DEFLATE=y | ||
1901 | CONFIG_CRYPTO_ZLIB=y | ||
1902 | CONFIG_CRYPTO_LZO=y | ||
1903 | |||
1904 | # | ||
1905 | # Random Number Generation | ||
1906 | # | ||
1907 | CONFIG_CRYPTO_ANSI_CPRNG=y | ||
1908 | # CONFIG_CRYPTO_HW is not set | ||
1909 | CONFIG_HAVE_KVM=y | ||
1910 | # CONFIG_VIRTUALIZATION is not set | ||
1911 | CONFIG_BINARY_PRINTF=y | ||
1912 | |||
1913 | # | ||
1914 | # Library routines | ||
1915 | # | ||
1916 | CONFIG_BITREVERSE=y | ||
1917 | CONFIG_GENERIC_FIND_FIRST_BIT=y | ||
1918 | CONFIG_GENERIC_FIND_NEXT_BIT=y | ||
1919 | CONFIG_GENERIC_FIND_LAST_BIT=y | ||
1920 | CONFIG_CRC_CCITT=y | ||
1921 | CONFIG_CRC16=y | ||
1922 | # CONFIG_CRC_T10DIF is not set | ||
1923 | CONFIG_CRC_ITU_T=y | ||
1924 | CONFIG_CRC32=y | ||
1925 | # CONFIG_CRC7 is not set | ||
1926 | CONFIG_LIBCRC32C=y | ||
1927 | CONFIG_ZLIB_INFLATE=y | ||
1928 | CONFIG_ZLIB_DEFLATE=y | ||
1929 | CONFIG_LZO_COMPRESS=y | ||
1930 | CONFIG_LZO_DECOMPRESS=y | ||
1931 | CONFIG_TEXTSEARCH=y | ||
1932 | CONFIG_TEXTSEARCH_KMP=y | ||
1933 | CONFIG_TEXTSEARCH_BM=y | ||
1934 | CONFIG_TEXTSEARCH_FSM=y | ||
1935 | CONFIG_HAS_IOMEM=y | ||
1936 | CONFIG_HAS_IOPORT=y | ||
1937 | CONFIG_HAS_DMA=y | ||
1938 | CONFIG_NLATTR=y | ||
1939 | |||
1940 | # | ||
1941 | # LITMUS^RT | ||
1942 | # | ||
1943 | |||
1944 | # | ||
1945 | # Real-Time Synchronization | ||
1946 | # | ||
1947 | CONFIG_SRP=y | ||
1948 | CONFIG_FMLP=y | ||
1949 | |||
1950 | # | ||
1951 | # Tracing | ||
1952 | # | ||
1953 | CONFIG_FEATHER_TRACE=y | ||
1954 | CONFIG_SCHED_TASK_TRACE=y | ||
1955 | CONFIG_SCHED_OVERHEAD_TRACE=y | ||
1956 | CONFIG_SCHED_DEBUG_TRACE=y | ||
diff --git a/download/2010.1/64bit-config b/download/2010.1/64bit-config new file mode 100644 index 0000000..6f7b78f --- /dev/null +++ b/download/2010.1/64bit-config | |||
@@ -0,0 +1,1879 @@ | |||
1 | # | ||
2 | # Automatically generated make config: don't edit | ||
3 | # Linux kernel version: 2.6.32-litmus2010 | ||
4 | # Sun Jan 17 17:50:16 2010 | ||
5 | # | ||
6 | CONFIG_64BIT=y | ||
7 | # CONFIG_X86_32 is not set | ||
8 | CONFIG_X86_64=y | ||
9 | CONFIG_X86=y | ||
10 | CONFIG_OUTPUT_FORMAT="elf64-x86-64" | ||
11 | CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig" | ||
12 | CONFIG_GENERIC_TIME=y | ||
13 | CONFIG_GENERIC_CMOS_UPDATE=y | ||
14 | CONFIG_CLOCKSOURCE_WATCHDOG=y | ||
15 | CONFIG_GENERIC_CLOCKEVENTS=y | ||
16 | CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y | ||
17 | CONFIG_LOCKDEP_SUPPORT=y | ||
18 | CONFIG_STACKTRACE_SUPPORT=y | ||
19 | CONFIG_HAVE_LATENCYTOP_SUPPORT=y | ||
20 | CONFIG_MMU=y | ||
21 | CONFIG_ZONE_DMA=y | ||
22 | CONFIG_GENERIC_ISA_DMA=y | ||
23 | CONFIG_GENERIC_IOMAP=y | ||
24 | CONFIG_GENERIC_BUG=y | ||
25 | CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y | ||
26 | CONFIG_GENERIC_HWEIGHT=y | ||
27 | CONFIG_ARCH_MAY_HAVE_PC_FDC=y | ||
28 | CONFIG_RWSEM_GENERIC_SPINLOCK=y | ||
29 | # CONFIG_RWSEM_XCHGADD_ALGORITHM is not set | ||
30 | CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y | ||
31 | CONFIG_GENERIC_CALIBRATE_DELAY=y | ||
32 | CONFIG_GENERIC_TIME_VSYSCALL=y | ||
33 | CONFIG_ARCH_HAS_CPU_RELAX=y | ||
34 | CONFIG_ARCH_HAS_DEFAULT_IDLE=y | ||
35 | CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y | ||
36 | CONFIG_HAVE_SETUP_PER_CPU_AREA=y | ||
37 | CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y | ||
38 | CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y | ||
39 | CONFIG_HAVE_CPUMASK_OF_CPU_MAP=y | ||
40 | CONFIG_ARCH_HIBERNATION_POSSIBLE=y | ||
41 | CONFIG_ARCH_SUSPEND_POSSIBLE=y | ||
42 | CONFIG_ZONE_DMA32=y | ||
43 | CONFIG_ARCH_POPULATES_NODE_MAP=y | ||
44 | CONFIG_AUDIT_ARCH=y | ||
45 | CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y | ||
46 | CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y | ||
47 | CONFIG_GENERIC_HARDIRQS=y | ||
48 | CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y | ||
49 | CONFIG_GENERIC_IRQ_PROBE=y | ||
50 | CONFIG_GENERIC_PENDING_IRQ=y | ||
51 | CONFIG_USE_GENERIC_SMP_HELPERS=y | ||
52 | CONFIG_X86_64_SMP=y | ||
53 | CONFIG_X86_HT=y | ||
54 | CONFIG_X86_TRAMPOLINE=y | ||
55 | # CONFIG_KTIME_SCALAR is not set | ||
56 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" | ||
57 | CONFIG_CONSTRUCTORS=y | ||
58 | |||
59 | # | ||
60 | # General setup | ||
61 | # | ||
62 | CONFIG_EXPERIMENTAL=y | ||
63 | CONFIG_LOCK_KERNEL=y | ||
64 | CONFIG_INIT_ENV_ARG_LIMIT=32 | ||
65 | CONFIG_LOCALVERSION="" | ||
66 | # CONFIG_LOCALVERSION_AUTO is not set | ||
67 | CONFIG_HAVE_KERNEL_GZIP=y | ||
68 | CONFIG_HAVE_KERNEL_BZIP2=y | ||
69 | CONFIG_HAVE_KERNEL_LZMA=y | ||
70 | CONFIG_KERNEL_GZIP=y | ||
71 | # CONFIG_KERNEL_BZIP2 is not set | ||
72 | # CONFIG_KERNEL_LZMA is not set | ||
73 | CONFIG_SWAP=y | ||
74 | CONFIG_SYSVIPC=y | ||
75 | CONFIG_SYSVIPC_SYSCTL=y | ||
76 | CONFIG_POSIX_MQUEUE=y | ||
77 | CONFIG_POSIX_MQUEUE_SYSCTL=y | ||
78 | CONFIG_BSD_PROCESS_ACCT=y | ||
79 | CONFIG_BSD_PROCESS_ACCT_V3=y | ||
80 | # CONFIG_TASKSTATS is not set | ||
81 | # CONFIG_AUDIT is not set | ||
82 | |||
83 | # | ||
84 | # RCU Subsystem | ||
85 | # | ||
86 | CONFIG_TREE_RCU=y | ||
87 | # CONFIG_TREE_PREEMPT_RCU is not set | ||
88 | # CONFIG_RCU_TRACE is not set | ||
89 | CONFIG_RCU_FANOUT=32 | ||
90 | # CONFIG_RCU_FANOUT_EXACT is not set | ||
91 | # CONFIG_TREE_RCU_TRACE is not set | ||
92 | # CONFIG_IKCONFIG is not set | ||
93 | CONFIG_LOG_BUF_SHIFT=17 | ||
94 | CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y | ||
95 | # CONFIG_GROUP_SCHED is not set | ||
96 | # CONFIG_CGROUPS is not set | ||
97 | # CONFIG_SYSFS_DEPRECATED_V2 is not set | ||
98 | # CONFIG_RELAY is not set | ||
99 | CONFIG_NAMESPACES=y | ||
100 | # CONFIG_UTS_NS is not set | ||
101 | # CONFIG_IPC_NS is not set | ||
102 | # CONFIG_USER_NS is not set | ||
103 | # CONFIG_PID_NS is not set | ||
104 | # CONFIG_NET_NS is not set | ||
105 | # CONFIG_BLK_DEV_INITRD is not set | ||
106 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | ||
107 | CONFIG_SYSCTL=y | ||
108 | CONFIG_ANON_INODES=y | ||
109 | # CONFIG_EMBEDDED is not set | ||
110 | CONFIG_SYSCTL_SYSCALL=y | ||
111 | CONFIG_KALLSYMS=y | ||
112 | # CONFIG_KALLSYMS_EXTRA_PASS is not set | ||
113 | CONFIG_HOTPLUG=y | ||
114 | CONFIG_PRINTK=y | ||
115 | CONFIG_BUG=y | ||
116 | CONFIG_ELF_CORE=y | ||
117 | CONFIG_PCSPKR_PLATFORM=y | ||
118 | CONFIG_BASE_FULL=y | ||
119 | CONFIG_FUTEX=y | ||
120 | CONFIG_EPOLL=y | ||
121 | CONFIG_SIGNALFD=y | ||
122 | CONFIG_TIMERFD=y | ||
123 | CONFIG_EVENTFD=y | ||
124 | CONFIG_SHMEM=y | ||
125 | CONFIG_AIO=y | ||
126 | CONFIG_HAVE_PERF_EVENTS=y | ||
127 | |||
128 | # | ||
129 | # Kernel Performance Events And Counters | ||
130 | # | ||
131 | CONFIG_PERF_EVENTS=y | ||
132 | CONFIG_EVENT_PROFILE=y | ||
133 | CONFIG_PERF_COUNTERS=y | ||
134 | CONFIG_VM_EVENT_COUNTERS=y | ||
135 | CONFIG_PCI_QUIRKS=y | ||
136 | CONFIG_SLUB_DEBUG=y | ||
137 | # CONFIG_COMPAT_BRK is not set | ||
138 | # CONFIG_SLAB is not set | ||
139 | CONFIG_SLUB=y | ||
140 | # CONFIG_SLOB is not set | ||
141 | # CONFIG_PROFILING is not set | ||
142 | CONFIG_TRACEPOINTS=y | ||
143 | CONFIG_HAVE_OPROFILE=y | ||
144 | # CONFIG_KPROBES is not set | ||
145 | CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y | ||
146 | CONFIG_HAVE_IOREMAP_PROT=y | ||
147 | CONFIG_HAVE_KPROBES=y | ||
148 | CONFIG_HAVE_KRETPROBES=y | ||
149 | CONFIG_HAVE_ARCH_TRACEHOOK=y | ||
150 | CONFIG_HAVE_DMA_ATTRS=y | ||
151 | CONFIG_HAVE_DMA_API_DEBUG=y | ||
152 | |||
153 | # | ||
154 | # GCOV-based kernel profiling | ||
155 | # | ||
156 | # CONFIG_GCOV_KERNEL is not set | ||
157 | CONFIG_SLOW_WORK=y | ||
158 | # CONFIG_SLOW_WORK_DEBUG is not set | ||
159 | # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set | ||
160 | CONFIG_SLABINFO=y | ||
161 | CONFIG_RT_MUTEXES=y | ||
162 | CONFIG_BASE_SMALL=0 | ||
163 | CONFIG_MODULES=y | ||
164 | CONFIG_MODULE_FORCE_LOAD=y | ||
165 | CONFIG_MODULE_UNLOAD=y | ||
166 | CONFIG_MODULE_FORCE_UNLOAD=y | ||
167 | CONFIG_MODVERSIONS=y | ||
168 | # CONFIG_MODULE_SRCVERSION_ALL is not set | ||
169 | CONFIG_STOP_MACHINE=y | ||
170 | CONFIG_BLOCK=y | ||
171 | # CONFIG_BLK_DEV_BSG is not set | ||
172 | # CONFIG_BLK_DEV_INTEGRITY is not set | ||
173 | |||
174 | # | ||
175 | # IO Schedulers | ||
176 | # | ||
177 | CONFIG_IOSCHED_NOOP=y | ||
178 | CONFIG_IOSCHED_AS=y | ||
179 | CONFIG_IOSCHED_DEADLINE=y | ||
180 | CONFIG_IOSCHED_CFQ=y | ||
181 | # CONFIG_DEFAULT_AS is not set | ||
182 | # CONFIG_DEFAULT_DEADLINE is not set | ||
183 | CONFIG_DEFAULT_CFQ=y | ||
184 | # CONFIG_DEFAULT_NOOP is not set | ||
185 | CONFIG_DEFAULT_IOSCHED="cfq" | ||
186 | # CONFIG_FREEZER is not set | ||
187 | |||
188 | # | ||
189 | # Processor type and features | ||
190 | # | ||
191 | CONFIG_TICK_ONESHOT=y | ||
192 | # CONFIG_NO_HZ is not set | ||
193 | CONFIG_HIGH_RES_TIMERS=y | ||
194 | CONFIG_GENERIC_CLOCKEVENTS_BUILD=y | ||
195 | CONFIG_SMP=y | ||
196 | # CONFIG_SPARSE_IRQ is not set | ||
197 | # CONFIG_X86_MPPARSE is not set | ||
198 | # CONFIG_X86_EXTENDED_PLATFORM is not set | ||
199 | CONFIG_SCHED_OMIT_FRAME_POINTER=y | ||
200 | # CONFIG_PARAVIRT_GUEST is not set | ||
201 | # CONFIG_MEMTEST is not set | ||
202 | # CONFIG_M386 is not set | ||
203 | # CONFIG_M486 is not set | ||
204 | # CONFIG_M586 is not set | ||
205 | # CONFIG_M586TSC is not set | ||
206 | # CONFIG_M586MMX is not set | ||
207 | # CONFIG_M686 is not set | ||
208 | # CONFIG_MPENTIUMII is not set | ||
209 | # CONFIG_MPENTIUMIII is not set | ||
210 | # CONFIG_MPENTIUMM is not set | ||
211 | # CONFIG_MPENTIUM4 is not set | ||
212 | # CONFIG_MK6 is not set | ||
213 | # CONFIG_MK7 is not set | ||
214 | # CONFIG_MK8 is not set | ||
215 | # CONFIG_MCRUSOE is not set | ||
216 | # CONFIG_MEFFICEON is not set | ||
217 | # CONFIG_MWINCHIPC6 is not set | ||
218 | # CONFIG_MWINCHIP3D is not set | ||
219 | # CONFIG_MGEODEGX1 is not set | ||
220 | # CONFIG_MGEODE_LX is not set | ||
221 | # CONFIG_MCYRIXIII is not set | ||
222 | # CONFIG_MVIAC3_2 is not set | ||
223 | # CONFIG_MVIAC7 is not set | ||
224 | # CONFIG_MPSC is not set | ||
225 | CONFIG_MCORE2=y | ||
226 | # CONFIG_MATOM is not set | ||
227 | # CONFIG_GENERIC_CPU is not set | ||
228 | CONFIG_X86_CPU=y | ||
229 | CONFIG_X86_L1_CACHE_BYTES=64 | ||
230 | CONFIG_X86_INTERNODE_CACHE_BYTES=64 | ||
231 | CONFIG_X86_CMPXCHG=y | ||
232 | CONFIG_X86_L1_CACHE_SHIFT=6 | ||
233 | CONFIG_X86_WP_WORKS_OK=y | ||
234 | CONFIG_X86_INTEL_USERCOPY=y | ||
235 | CONFIG_X86_USE_PPRO_CHECKSUM=y | ||
236 | CONFIG_X86_P6_NOP=y | ||
237 | CONFIG_X86_TSC=y | ||
238 | CONFIG_X86_CMPXCHG64=y | ||
239 | CONFIG_X86_CMOV=y | ||
240 | CONFIG_X86_MINIMUM_CPU_FAMILY=64 | ||
241 | CONFIG_X86_DEBUGCTLMSR=y | ||
242 | CONFIG_CPU_SUP_INTEL=y | ||
243 | CONFIG_CPU_SUP_AMD=y | ||
244 | CONFIG_CPU_SUP_CENTAUR=y | ||
245 | # CONFIG_X86_DS is not set | ||
246 | CONFIG_HPET_TIMER=y | ||
247 | CONFIG_HPET_EMULATE_RTC=y | ||
248 | CONFIG_DMI=y | ||
249 | CONFIG_GART_IOMMU=y | ||
250 | # CONFIG_CALGARY_IOMMU is not set | ||
251 | # CONFIG_AMD_IOMMU is not set | ||
252 | CONFIG_SWIOTLB=y | ||
253 | CONFIG_IOMMU_HELPER=y | ||
254 | # CONFIG_IOMMU_API is not set | ||
255 | CONFIG_NR_CPUS=8 | ||
256 | # CONFIG_SCHED_SMT is not set | ||
257 | CONFIG_SCHED_MC=y | ||
258 | # CONFIG_PREEMPT_NONE is not set | ||
259 | # CONFIG_PREEMPT_VOLUNTARY is not set | ||
260 | CONFIG_PREEMPT=y | ||
261 | CONFIG_X86_LOCAL_APIC=y | ||
262 | CONFIG_X86_IO_APIC=y | ||
263 | # CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS is not set | ||
264 | # CONFIG_X86_MCE is not set | ||
265 | # CONFIG_I8K is not set | ||
266 | # CONFIG_MICROCODE is not set | ||
267 | CONFIG_X86_MSR=y | ||
268 | CONFIG_X86_CPUID=y | ||
269 | # CONFIG_X86_CPU_DEBUG is not set | ||
270 | CONFIG_ARCH_PHYS_ADDR_T_64BIT=y | ||
271 | CONFIG_DIRECT_GBPAGES=y | ||
272 | # CONFIG_NUMA is not set | ||
273 | CONFIG_ARCH_PROC_KCORE_TEXT=y | ||
274 | CONFIG_ARCH_SPARSEMEM_DEFAULT=y | ||
275 | CONFIG_ARCH_SPARSEMEM_ENABLE=y | ||
276 | CONFIG_ARCH_SELECT_MEMORY_MODEL=y | ||
277 | CONFIG_SELECT_MEMORY_MODEL=y | ||
278 | # CONFIG_FLATMEM_MANUAL is not set | ||
279 | # CONFIG_DISCONTIGMEM_MANUAL is not set | ||
280 | CONFIG_SPARSEMEM_MANUAL=y | ||
281 | CONFIG_SPARSEMEM=y | ||
282 | CONFIG_HAVE_MEMORY_PRESENT=y | ||
283 | CONFIG_SPARSEMEM_EXTREME=y | ||
284 | CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y | ||
285 | CONFIG_SPARSEMEM_VMEMMAP=y | ||
286 | # CONFIG_MEMORY_HOTPLUG is not set | ||
287 | CONFIG_PAGEFLAGS_EXTENDED=y | ||
288 | CONFIG_SPLIT_PTLOCK_CPUS=4 | ||
289 | CONFIG_PHYS_ADDR_T_64BIT=y | ||
290 | CONFIG_ZONE_DMA_FLAG=1 | ||
291 | CONFIG_BOUNCE=y | ||
292 | CONFIG_VIRT_TO_BUS=y | ||
293 | CONFIG_HAVE_MLOCK=y | ||
294 | CONFIG_HAVE_MLOCKED_PAGE_BIT=y | ||
295 | # CONFIG_KSM is not set | ||
296 | CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 | ||
297 | # CONFIG_X86_CHECK_BIOS_CORRUPTION is not set | ||
298 | CONFIG_X86_RESERVE_LOW_64K=y | ||
299 | CONFIG_MTRR=y | ||
300 | CONFIG_MTRR_SANITIZER=y | ||
301 | CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=0 | ||
302 | CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 | ||
303 | CONFIG_X86_PAT=y | ||
304 | CONFIG_ARCH_USES_PG_UNCACHED=y | ||
305 | # CONFIG_EFI is not set | ||
306 | CONFIG_SECCOMP=y | ||
307 | # CONFIG_CC_STACKPROTECTOR is not set | ||
308 | # CONFIG_HZ_100 is not set | ||
309 | # CONFIG_HZ_250 is not set | ||
310 | # CONFIG_HZ_300 is not set | ||
311 | CONFIG_HZ_1000=y | ||
312 | CONFIG_HZ=1000 | ||
313 | CONFIG_SCHED_HRTICK=y | ||
314 | # CONFIG_KEXEC is not set | ||
315 | # CONFIG_CRASH_DUMP is not set | ||
316 | CONFIG_PHYSICAL_START=0x1000000 | ||
317 | # CONFIG_RELOCATABLE is not set | ||
318 | CONFIG_PHYSICAL_ALIGN=0x1000000 | ||
319 | # CONFIG_HOTPLUG_CPU is not set | ||
320 | # CONFIG_CMDLINE_BOOL is not set | ||
321 | CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y | ||
322 | |||
323 | # | ||
324 | # Power management and ACPI options | ||
325 | # | ||
326 | CONFIG_PM=y | ||
327 | # CONFIG_PM_DEBUG is not set | ||
328 | # CONFIG_SUSPEND is not set | ||
329 | # CONFIG_HIBERNATION is not set | ||
330 | # CONFIG_PM_RUNTIME is not set | ||
331 | CONFIG_ACPI=y | ||
332 | # CONFIG_ACPI_PROCFS is not set | ||
333 | # CONFIG_ACPI_PROCFS_POWER is not set | ||
334 | CONFIG_ACPI_SYSFS_POWER=y | ||
335 | # CONFIG_ACPI_PROC_EVENT is not set | ||
336 | CONFIG_ACPI_AC=y | ||
337 | # CONFIG_ACPI_BATTERY is not set | ||
338 | CONFIG_ACPI_BUTTON=y | ||
339 | CONFIG_ACPI_FAN=y | ||
340 | CONFIG_ACPI_DOCK=y | ||
341 | CONFIG_ACPI_PROCESSOR=y | ||
342 | # CONFIG_ACPI_PROCESSOR_AGGREGATOR is not set | ||
343 | CONFIG_ACPI_THERMAL=y | ||
344 | # CONFIG_ACPI_CUSTOM_DSDT is not set | ||
345 | CONFIG_ACPI_BLACKLIST_YEAR=0 | ||
346 | # CONFIG_ACPI_DEBUG is not set | ||
347 | # CONFIG_ACPI_PCI_SLOT is not set | ||
348 | CONFIG_X86_PM_TIMER=y | ||
349 | # CONFIG_ACPI_CONTAINER is not set | ||
350 | # CONFIG_ACPI_SBS is not set | ||
351 | # CONFIG_SFI is not set | ||
352 | |||
353 | # | ||
354 | # CPU Frequency scaling | ||
355 | # | ||
356 | CONFIG_CPU_FREQ=y | ||
357 | CONFIG_CPU_FREQ_TABLE=y | ||
358 | # CONFIG_CPU_FREQ_DEBUG is not set | ||
359 | # CONFIG_CPU_FREQ_STAT is not set | ||
360 | CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y | ||
361 | # CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set | ||
362 | # CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set | ||
363 | # CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set | ||
364 | # CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set | ||
365 | CONFIG_CPU_FREQ_GOV_PERFORMANCE=y | ||
366 | # CONFIG_CPU_FREQ_GOV_POWERSAVE is not set | ||
367 | # CONFIG_CPU_FREQ_GOV_USERSPACE is not set | ||
368 | # CONFIG_CPU_FREQ_GOV_ONDEMAND is not set | ||
369 | # CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set | ||
370 | |||
371 | # | ||
372 | # CPUFreq processor drivers | ||
373 | # | ||
374 | CONFIG_X86_ACPI_CPUFREQ=y | ||
375 | # CONFIG_X86_POWERNOW_K8 is not set | ||
376 | # CONFIG_X86_SPEEDSTEP_CENTRINO is not set | ||
377 | # CONFIG_X86_P4_CLOCKMOD is not set | ||
378 | |||
379 | # | ||
380 | # shared options | ||
381 | # | ||
382 | # CONFIG_X86_SPEEDSTEP_LIB is not set | ||
383 | CONFIG_CPU_IDLE=y | ||
384 | CONFIG_CPU_IDLE_GOV_LADDER=y | ||
385 | |||
386 | # | ||
387 | # Memory power savings | ||
388 | # | ||
389 | # CONFIG_I7300_IDLE is not set | ||
390 | |||
391 | # | ||
392 | # Bus options (PCI etc.) | ||
393 | # | ||
394 | CONFIG_PCI=y | ||
395 | CONFIG_PCI_DIRECT=y | ||
396 | CONFIG_PCI_MMCONFIG=y | ||
397 | CONFIG_PCI_DOMAINS=y | ||
398 | # CONFIG_PCIEPORTBUS is not set | ||
399 | CONFIG_ARCH_SUPPORTS_MSI=y | ||
400 | # CONFIG_PCI_MSI is not set | ||
401 | # CONFIG_PCI_LEGACY is not set | ||
402 | # CONFIG_PCI_STUB is not set | ||
403 | CONFIG_HT_IRQ=y | ||
404 | # CONFIG_PCI_IOV is not set | ||
405 | CONFIG_ISA_DMA_API=y | ||
406 | CONFIG_K8_NB=y | ||
407 | # CONFIG_PCCARD is not set | ||
408 | # CONFIG_HOTPLUG_PCI is not set | ||
409 | |||
410 | # | ||
411 | # Executable file formats / Emulations | ||
412 | # | ||
413 | CONFIG_BINFMT_ELF=y | ||
414 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | ||
415 | # CONFIG_HAVE_AOUT is not set | ||
416 | CONFIG_BINFMT_MISC=y | ||
417 | # CONFIG_IA32_EMULATION is not set | ||
418 | # CONFIG_COMPAT_FOR_U64_ALIGNMENT is not set | ||
419 | CONFIG_NET=y | ||
420 | |||
421 | # | ||
422 | # Networking options | ||
423 | # | ||
424 | CONFIG_PACKET=y | ||
425 | CONFIG_PACKET_MMAP=y | ||
426 | CONFIG_UNIX=y | ||
427 | CONFIG_XFRM=y | ||
428 | CONFIG_XFRM_USER=y | ||
429 | # CONFIG_XFRM_SUB_POLICY is not set | ||
430 | # CONFIG_XFRM_MIGRATE is not set | ||
431 | # CONFIG_XFRM_STATISTICS is not set | ||
432 | CONFIG_XFRM_IPCOMP=y | ||
433 | CONFIG_NET_KEY=y | ||
434 | # CONFIG_NET_KEY_MIGRATE is not set | ||
435 | CONFIG_INET=y | ||
436 | CONFIG_IP_MULTICAST=y | ||
437 | CONFIG_IP_ADVANCED_ROUTER=y | ||
438 | CONFIG_ASK_IP_FIB_HASH=y | ||
439 | # CONFIG_IP_FIB_TRIE is not set | ||
440 | CONFIG_IP_FIB_HASH=y | ||
441 | CONFIG_IP_MULTIPLE_TABLES=y | ||
442 | CONFIG_IP_ROUTE_MULTIPATH=y | ||
443 | CONFIG_IP_ROUTE_VERBOSE=y | ||
444 | # CONFIG_IP_PNP is not set | ||
445 | CONFIG_NET_IPIP=y | ||
446 | CONFIG_NET_IPGRE=y | ||
447 | CONFIG_NET_IPGRE_BROADCAST=y | ||
448 | CONFIG_IP_MROUTE=y | ||
449 | CONFIG_IP_PIMSM_V1=y | ||
450 | CONFIG_IP_PIMSM_V2=y | ||
451 | # CONFIG_ARPD is not set | ||
452 | CONFIG_SYN_COOKIES=y | ||
453 | CONFIG_INET_AH=y | ||
454 | CONFIG_INET_ESP=y | ||
455 | CONFIG_INET_IPCOMP=y | ||
456 | CONFIG_INET_XFRM_TUNNEL=y | ||
457 | CONFIG_INET_TUNNEL=y | ||
458 | CONFIG_INET_XFRM_MODE_TRANSPORT=y | ||
459 | CONFIG_INET_XFRM_MODE_TUNNEL=y | ||
460 | CONFIG_INET_XFRM_MODE_BEET=y | ||
461 | CONFIG_INET_LRO=y | ||
462 | CONFIG_INET_DIAG=y | ||
463 | CONFIG_INET_TCP_DIAG=y | ||
464 | # CONFIG_TCP_CONG_ADVANCED is not set | ||
465 | CONFIG_TCP_CONG_CUBIC=y | ||
466 | CONFIG_DEFAULT_TCP_CONG="cubic" | ||
467 | # CONFIG_TCP_MD5SIG is not set | ||
468 | # CONFIG_IPV6 is not set | ||
469 | CONFIG_NETWORK_SECMARK=y | ||
470 | CONFIG_NETFILTER=y | ||
471 | # CONFIG_NETFILTER_DEBUG is not set | ||
472 | CONFIG_NETFILTER_ADVANCED=y | ||
473 | |||
474 | # | ||
475 | # Core Netfilter Configuration | ||
476 | # | ||
477 | CONFIG_NETFILTER_NETLINK=y | ||
478 | CONFIG_NETFILTER_NETLINK_QUEUE=y | ||
479 | CONFIG_NETFILTER_NETLINK_LOG=y | ||
480 | CONFIG_NF_CONNTRACK=y | ||
481 | CONFIG_NF_CT_ACCT=y | ||
482 | CONFIG_NF_CONNTRACK_MARK=y | ||
483 | CONFIG_NF_CONNTRACK_SECMARK=y | ||
484 | CONFIG_NF_CONNTRACK_EVENTS=y | ||
485 | # CONFIG_NF_CT_PROTO_DCCP is not set | ||
486 | # CONFIG_NF_CT_PROTO_SCTP is not set | ||
487 | # CONFIG_NF_CT_PROTO_UDPLITE is not set | ||
488 | CONFIG_NF_CONNTRACK_AMANDA=y | ||
489 | CONFIG_NF_CONNTRACK_FTP=y | ||
490 | # CONFIG_NF_CONNTRACK_H323 is not set | ||
491 | # CONFIG_NF_CONNTRACK_IRC is not set | ||
492 | CONFIG_NF_CONNTRACK_NETBIOS_NS=y | ||
493 | # CONFIG_NF_CONNTRACK_PPTP is not set | ||
494 | # CONFIG_NF_CONNTRACK_SANE is not set | ||
495 | # CONFIG_NF_CONNTRACK_SIP is not set | ||
496 | CONFIG_NF_CONNTRACK_TFTP=y | ||
497 | # CONFIG_NF_CT_NETLINK is not set | ||
498 | CONFIG_NETFILTER_XTABLES=y | ||
499 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y | ||
500 | CONFIG_NETFILTER_XT_TARGET_CONNMARK=y | ||
501 | # CONFIG_NETFILTER_XT_TARGET_CONNSECMARK is not set | ||
502 | CONFIG_NETFILTER_XT_TARGET_HL=y | ||
503 | CONFIG_NETFILTER_XT_TARGET_MARK=y | ||
504 | CONFIG_NETFILTER_XT_TARGET_NFLOG=y | ||
505 | CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y | ||
506 | CONFIG_NETFILTER_XT_TARGET_NOTRACK=y | ||
507 | CONFIG_NETFILTER_XT_TARGET_RATEEST=y | ||
508 | CONFIG_NETFILTER_XT_TARGET_TRACE=y | ||
509 | # CONFIG_NETFILTER_XT_TARGET_SECMARK is not set | ||
510 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=y | ||
511 | # CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set | ||
512 | # CONFIG_NETFILTER_XT_MATCH_COMMENT is not set | ||
513 | CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y | ||
514 | CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y | ||
515 | CONFIG_NETFILTER_XT_MATCH_CONNMARK=y | ||
516 | CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y | ||
517 | # CONFIG_NETFILTER_XT_MATCH_DCCP is not set | ||
518 | # CONFIG_NETFILTER_XT_MATCH_DSCP is not set | ||
519 | # CONFIG_NETFILTER_XT_MATCH_ESP is not set | ||
520 | # CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set | ||
521 | # CONFIG_NETFILTER_XT_MATCH_HELPER is not set | ||
522 | CONFIG_NETFILTER_XT_MATCH_HL=y | ||
523 | CONFIG_NETFILTER_XT_MATCH_IPRANGE=y | ||
524 | CONFIG_NETFILTER_XT_MATCH_LENGTH=y | ||
525 | CONFIG_NETFILTER_XT_MATCH_LIMIT=y | ||
526 | CONFIG_NETFILTER_XT_MATCH_MAC=y | ||
527 | CONFIG_NETFILTER_XT_MATCH_MARK=y | ||
528 | CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y | ||
529 | CONFIG_NETFILTER_XT_MATCH_OWNER=y | ||
530 | # CONFIG_NETFILTER_XT_MATCH_POLICY is not set | ||
531 | CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y | ||
532 | # CONFIG_NETFILTER_XT_MATCH_QUOTA is not set | ||
533 | # CONFIG_NETFILTER_XT_MATCH_RATEEST is not set | ||
534 | # CONFIG_NETFILTER_XT_MATCH_REALM is not set | ||
535 | # CONFIG_NETFILTER_XT_MATCH_RECENT is not set | ||
536 | # CONFIG_NETFILTER_XT_MATCH_SCTP is not set | ||
537 | # CONFIG_NETFILTER_XT_MATCH_STATE is not set | ||
538 | # CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set | ||
539 | CONFIG_NETFILTER_XT_MATCH_STRING=y | ||
540 | CONFIG_NETFILTER_XT_MATCH_TCPMSS=y | ||
541 | CONFIG_NETFILTER_XT_MATCH_TIME=y | ||
542 | CONFIG_NETFILTER_XT_MATCH_U32=y | ||
543 | # CONFIG_NETFILTER_XT_MATCH_OSF is not set | ||
544 | # CONFIG_IP_VS is not set | ||
545 | |||
546 | # | ||
547 | # IP: Netfilter Configuration | ||
548 | # | ||
549 | CONFIG_NF_DEFRAG_IPV4=y | ||
550 | CONFIG_NF_CONNTRACK_IPV4=y | ||
551 | CONFIG_NF_CONNTRACK_PROC_COMPAT=y | ||
552 | # CONFIG_IP_NF_QUEUE is not set | ||
553 | CONFIG_IP_NF_IPTABLES=y | ||
554 | CONFIG_IP_NF_MATCH_ADDRTYPE=y | ||
555 | CONFIG_IP_NF_MATCH_AH=y | ||
556 | CONFIG_IP_NF_MATCH_ECN=y | ||
557 | CONFIG_IP_NF_MATCH_TTL=y | ||
558 | CONFIG_IP_NF_FILTER=y | ||
559 | CONFIG_IP_NF_TARGET_REJECT=y | ||
560 | CONFIG_IP_NF_TARGET_LOG=y | ||
561 | CONFIG_IP_NF_TARGET_ULOG=y | ||
562 | # CONFIG_NF_NAT is not set | ||
563 | # CONFIG_IP_NF_MANGLE is not set | ||
564 | CONFIG_IP_NF_TARGET_TTL=y | ||
565 | CONFIG_IP_NF_RAW=y | ||
566 | CONFIG_IP_NF_ARPTABLES=y | ||
567 | CONFIG_IP_NF_ARPFILTER=y | ||
568 | CONFIG_IP_NF_ARP_MANGLE=y | ||
569 | # CONFIG_IP_DCCP is not set | ||
570 | # CONFIG_IP_SCTP is not set | ||
571 | # CONFIG_RDS is not set | ||
572 | # CONFIG_TIPC is not set | ||
573 | # CONFIG_ATM is not set | ||
574 | # CONFIG_BRIDGE is not set | ||
575 | # CONFIG_NET_DSA is not set | ||
576 | # CONFIG_VLAN_8021Q is not set | ||
577 | # CONFIG_DECNET is not set | ||
578 | # CONFIG_LLC2 is not set | ||
579 | # CONFIG_IPX is not set | ||
580 | # CONFIG_ATALK is not set | ||
581 | # CONFIG_X25 is not set | ||
582 | # CONFIG_LAPB is not set | ||
583 | # CONFIG_ECONET is not set | ||
584 | # CONFIG_WAN_ROUTER is not set | ||
585 | # CONFIG_PHONET is not set | ||
586 | # CONFIG_IEEE802154 is not set | ||
587 | # CONFIG_NET_SCHED is not set | ||
588 | # CONFIG_DCB is not set | ||
589 | |||
590 | # | ||
591 | # Network testing | ||
592 | # | ||
593 | # CONFIG_NET_PKTGEN is not set | ||
594 | # CONFIG_NET_DROP_MONITOR is not set | ||
595 | # CONFIG_HAMRADIO is not set | ||
596 | # CONFIG_CAN is not set | ||
597 | # CONFIG_IRDA is not set | ||
598 | # CONFIG_BT is not set | ||
599 | # CONFIG_AF_RXRPC is not set | ||
600 | CONFIG_FIB_RULES=y | ||
601 | # CONFIG_WIRELESS is not set | ||
602 | # CONFIG_WIMAX is not set | ||
603 | # CONFIG_RFKILL is not set | ||
604 | # CONFIG_NET_9P is not set | ||
605 | |||
606 | # | ||
607 | # Device Drivers | ||
608 | # | ||
609 | |||
610 | # | ||
611 | # Generic Driver Options | ||
612 | # | ||
613 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | ||
614 | # CONFIG_DEVTMPFS is not set | ||
615 | CONFIG_STANDALONE=y | ||
616 | CONFIG_PREVENT_FIRMWARE_BUILD=y | ||
617 | CONFIG_FW_LOADER=y | ||
618 | CONFIG_FIRMWARE_IN_KERNEL=y | ||
619 | CONFIG_EXTRA_FIRMWARE="" | ||
620 | # CONFIG_SYS_HYPERVISOR is not set | ||
621 | # CONFIG_CONNECTOR is not set | ||
622 | # CONFIG_MTD is not set | ||
623 | # CONFIG_PARPORT is not set | ||
624 | CONFIG_PNP=y | ||
625 | # CONFIG_PNP_DEBUG_MESSAGES is not set | ||
626 | |||
627 | # | ||
628 | # Protocols | ||
629 | # | ||
630 | CONFIG_PNPACPI=y | ||
631 | CONFIG_BLK_DEV=y | ||
632 | CONFIG_BLK_DEV_FD=y | ||
633 | # CONFIG_BLK_CPQ_DA is not set | ||
634 | # CONFIG_BLK_CPQ_CISS_DA is not set | ||
635 | # CONFIG_BLK_DEV_DAC960 is not set | ||
636 | # CONFIG_BLK_DEV_UMEM is not set | ||
637 | # CONFIG_BLK_DEV_COW_COMMON is not set | ||
638 | CONFIG_BLK_DEV_LOOP=y | ||
639 | # CONFIG_BLK_DEV_CRYPTOLOOP is not set | ||
640 | # CONFIG_BLK_DEV_NBD is not set | ||
641 | # CONFIG_BLK_DEV_SX8 is not set | ||
642 | # CONFIG_BLK_DEV_UB is not set | ||
643 | # CONFIG_BLK_DEV_RAM is not set | ||
644 | CONFIG_CDROM_PKTCDVD=y | ||
645 | CONFIG_CDROM_PKTCDVD_BUFFERS=8 | ||
646 | # CONFIG_CDROM_PKTCDVD_WCACHE is not set | ||
647 | # CONFIG_ATA_OVER_ETH is not set | ||
648 | # CONFIG_BLK_DEV_HD is not set | ||
649 | # CONFIG_MISC_DEVICES is not set | ||
650 | CONFIG_HAVE_IDE=y | ||
651 | CONFIG_IDE=y | ||
652 | |||
653 | # | ||
654 | # Please see Documentation/ide/ide.txt for help/info on IDE drives | ||
655 | # | ||
656 | CONFIG_IDE_XFER_MODE=y | ||
657 | CONFIG_IDE_ATAPI=y | ||
658 | # CONFIG_BLK_DEV_IDE_SATA is not set | ||
659 | CONFIG_IDE_GD=y | ||
660 | CONFIG_IDE_GD_ATA=y | ||
661 | # CONFIG_IDE_GD_ATAPI is not set | ||
662 | CONFIG_BLK_DEV_IDECD=y | ||
663 | CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS=y | ||
664 | # CONFIG_BLK_DEV_IDETAPE is not set | ||
665 | CONFIG_BLK_DEV_IDEACPI=y | ||
666 | # CONFIG_IDE_TASK_IOCTL is not set | ||
667 | CONFIG_IDE_PROC_FS=y | ||
668 | |||
669 | # | ||
670 | # IDE chipset support/bugfixes | ||
671 | # | ||
672 | CONFIG_IDE_GENERIC=y | ||
673 | # CONFIG_BLK_DEV_PLATFORM is not set | ||
674 | # CONFIG_BLK_DEV_CMD640 is not set | ||
675 | CONFIG_BLK_DEV_IDEPNP=y | ||
676 | CONFIG_BLK_DEV_IDEDMA_SFF=y | ||
677 | |||
678 | # | ||
679 | # PCI IDE chipsets support | ||
680 | # | ||
681 | CONFIG_BLK_DEV_IDEPCI=y | ||
682 | # CONFIG_IDEPCI_PCIBUS_ORDER is not set | ||
683 | # CONFIG_BLK_DEV_GENERIC is not set | ||
684 | # CONFIG_BLK_DEV_OPTI621 is not set | ||
685 | # CONFIG_BLK_DEV_RZ1000 is not set | ||
686 | CONFIG_BLK_DEV_IDEDMA_PCI=y | ||
687 | # CONFIG_BLK_DEV_AEC62XX is not set | ||
688 | # CONFIG_BLK_DEV_ALI15X3 is not set | ||
689 | # CONFIG_BLK_DEV_AMD74XX is not set | ||
690 | # CONFIG_BLK_DEV_ATIIXP is not set | ||
691 | # CONFIG_BLK_DEV_CMD64X is not set | ||
692 | # CONFIG_BLK_DEV_TRIFLEX is not set | ||
693 | # CONFIG_BLK_DEV_CS5520 is not set | ||
694 | # CONFIG_BLK_DEV_CS5530 is not set | ||
695 | # CONFIG_BLK_DEV_HPT366 is not set | ||
696 | # CONFIG_BLK_DEV_JMICRON is not set | ||
697 | # CONFIG_BLK_DEV_SC1200 is not set | ||
698 | CONFIG_BLK_DEV_PIIX=y | ||
699 | # CONFIG_BLK_DEV_IT8172 is not set | ||
700 | # CONFIG_BLK_DEV_IT8213 is not set | ||
701 | # CONFIG_BLK_DEV_IT821X is not set | ||
702 | # CONFIG_BLK_DEV_NS87415 is not set | ||
703 | # CONFIG_BLK_DEV_PDC202XX_OLD is not set | ||
704 | # CONFIG_BLK_DEV_PDC202XX_NEW is not set | ||
705 | # CONFIG_BLK_DEV_SVWKS is not set | ||
706 | # CONFIG_BLK_DEV_SIIMAGE is not set | ||
707 | # CONFIG_BLK_DEV_SIS5513 is not set | ||
708 | # CONFIG_BLK_DEV_SLC90E66 is not set | ||
709 | # CONFIG_BLK_DEV_TRM290 is not set | ||
710 | # CONFIG_BLK_DEV_VIA82CXXX is not set | ||
711 | # CONFIG_BLK_DEV_TC86C001 is not set | ||
712 | CONFIG_BLK_DEV_IDEDMA=y | ||
713 | |||
714 | # | ||
715 | # SCSI device support | ||
716 | # | ||
717 | # CONFIG_RAID_ATTRS is not set | ||
718 | CONFIG_SCSI=y | ||
719 | CONFIG_SCSI_DMA=y | ||
720 | # CONFIG_SCSI_TGT is not set | ||
721 | # CONFIG_SCSI_NETLINK is not set | ||
722 | # CONFIG_SCSI_PROC_FS is not set | ||
723 | |||
724 | # | ||
725 | # SCSI support type (disk, tape, CD-ROM) | ||
726 | # | ||
727 | CONFIG_BLK_DEV_SD=y | ||
728 | # CONFIG_CHR_DEV_ST is not set | ||
729 | # CONFIG_CHR_DEV_OSST is not set | ||
730 | CONFIG_BLK_DEV_SR=y | ||
731 | # CONFIG_BLK_DEV_SR_VENDOR is not set | ||
732 | CONFIG_CHR_DEV_SG=y | ||
733 | # CONFIG_CHR_DEV_SCH is not set | ||
734 | # CONFIG_SCSI_MULTI_LUN is not set | ||
735 | # CONFIG_SCSI_CONSTANTS is not set | ||
736 | # CONFIG_SCSI_LOGGING is not set | ||
737 | CONFIG_SCSI_SCAN_ASYNC=y | ||
738 | CONFIG_SCSI_WAIT_SCAN=m | ||
739 | |||
740 | # | ||
741 | # SCSI Transports | ||
742 | # | ||
743 | # CONFIG_SCSI_SPI_ATTRS is not set | ||
744 | # CONFIG_SCSI_FC_ATTRS is not set | ||
745 | # CONFIG_SCSI_ISCSI_ATTRS is not set | ||
746 | # CONFIG_SCSI_SAS_LIBSAS is not set | ||
747 | # CONFIG_SCSI_SRP_ATTRS is not set | ||
748 | # CONFIG_SCSI_LOWLEVEL is not set | ||
749 | # CONFIG_SCSI_DH is not set | ||
750 | # CONFIG_SCSI_OSD_INITIATOR is not set | ||
751 | CONFIG_ATA=y | ||
752 | # CONFIG_ATA_NONSTANDARD is not set | ||
753 | CONFIG_ATA_VERBOSE_ERROR=y | ||
754 | CONFIG_ATA_ACPI=y | ||
755 | CONFIG_SATA_PMP=y | ||
756 | CONFIG_SATA_AHCI=y | ||
757 | # CONFIG_SATA_SIL24 is not set | ||
758 | CONFIG_ATA_SFF=y | ||
759 | # CONFIG_SATA_SVW is not set | ||
760 | CONFIG_ATA_PIIX=y | ||
761 | # CONFIG_SATA_MV is not set | ||
762 | # CONFIG_SATA_NV is not set | ||
763 | # CONFIG_PDC_ADMA is not set | ||
764 | # CONFIG_SATA_QSTOR is not set | ||
765 | # CONFIG_SATA_PROMISE is not set | ||
766 | # CONFIG_SATA_SX4 is not set | ||
767 | # CONFIG_SATA_SIL is not set | ||
768 | # CONFIG_SATA_SIS is not set | ||
769 | # CONFIG_SATA_ULI is not set | ||
770 | # CONFIG_SATA_VIA is not set | ||
771 | # CONFIG_SATA_VITESSE is not set | ||
772 | # CONFIG_SATA_INIC162X is not set | ||
773 | # CONFIG_PATA_ACPI is not set | ||
774 | # CONFIG_PATA_ALI is not set | ||
775 | # CONFIG_PATA_AMD is not set | ||
776 | # CONFIG_PATA_ARTOP is not set | ||
777 | # CONFIG_PATA_ATP867X is not set | ||
778 | # CONFIG_PATA_ATIIXP is not set | ||
779 | # CONFIG_PATA_CMD640_PCI is not set | ||
780 | # CONFIG_PATA_CMD64X is not set | ||
781 | # CONFIG_PATA_CS5520 is not set | ||
782 | # CONFIG_PATA_CS5530 is not set | ||
783 | # CONFIG_PATA_CYPRESS is not set | ||
784 | # CONFIG_PATA_EFAR is not set | ||
785 | # CONFIG_ATA_GENERIC is not set | ||
786 | # CONFIG_PATA_HPT366 is not set | ||
787 | # CONFIG_PATA_HPT37X is not set | ||
788 | # CONFIG_PATA_HPT3X2N is not set | ||
789 | # CONFIG_PATA_HPT3X3 is not set | ||
790 | # CONFIG_PATA_IT821X is not set | ||
791 | # CONFIG_PATA_IT8213 is not set | ||
792 | # CONFIG_PATA_JMICRON is not set | ||
793 | # CONFIG_PATA_TRIFLEX is not set | ||
794 | # CONFIG_PATA_MARVELL is not set | ||
795 | # CONFIG_PATA_MPIIX is not set | ||
796 | # CONFIG_PATA_OLDPIIX is not set | ||
797 | # CONFIG_PATA_NETCELL is not set | ||
798 | # CONFIG_PATA_NINJA32 is not set | ||
799 | # CONFIG_PATA_NS87410 is not set | ||
800 | # CONFIG_PATA_NS87415 is not set | ||
801 | # CONFIG_PATA_OPTI is not set | ||
802 | # CONFIG_PATA_OPTIDMA is not set | ||
803 | # CONFIG_PATA_PDC_OLD is not set | ||
804 | # CONFIG_PATA_RADISYS is not set | ||
805 | # CONFIG_PATA_RDC is not set | ||
806 | # CONFIG_PATA_RZ1000 is not set | ||
807 | # CONFIG_PATA_SC1200 is not set | ||
808 | # CONFIG_PATA_SERVERWORKS is not set | ||
809 | # CONFIG_PATA_PDC2027X is not set | ||
810 | # CONFIG_PATA_SIL680 is not set | ||
811 | # CONFIG_PATA_SIS is not set | ||
812 | # CONFIG_PATA_VIA is not set | ||
813 | # CONFIG_PATA_WINBOND is not set | ||
814 | # CONFIG_PATA_SCH is not set | ||
815 | CONFIG_MD=y | ||
816 | # CONFIG_BLK_DEV_MD is not set | ||
817 | CONFIG_BLK_DEV_DM=y | ||
818 | # CONFIG_DM_DEBUG is not set | ||
819 | # CONFIG_DM_CRYPT is not set | ||
820 | # CONFIG_DM_SNAPSHOT is not set | ||
821 | # CONFIG_DM_MIRROR is not set | ||
822 | # CONFIG_DM_ZERO is not set | ||
823 | # CONFIG_DM_MULTIPATH is not set | ||
824 | # CONFIG_DM_DELAY is not set | ||
825 | # CONFIG_DM_UEVENT is not set | ||
826 | # CONFIG_FUSION is not set | ||
827 | |||
828 | # | ||
829 | # IEEE 1394 (FireWire) support | ||
830 | # | ||
831 | |||
832 | # | ||
833 | # You can enable one or both FireWire driver stacks. | ||
834 | # | ||
835 | |||
836 | # | ||
837 | # See the help texts for more information. | ||
838 | # | ||
839 | # CONFIG_FIREWIRE is not set | ||
840 | # CONFIG_IEEE1394 is not set | ||
841 | # CONFIG_I2O is not set | ||
842 | # CONFIG_MACINTOSH_DRIVERS is not set | ||
843 | CONFIG_NETDEVICES=y | ||
844 | CONFIG_DUMMY=y | ||
845 | # CONFIG_BONDING is not set | ||
846 | # CONFIG_MACVLAN is not set | ||
847 | # CONFIG_EQUALIZER is not set | ||
848 | CONFIG_TUN=y | ||
849 | # CONFIG_VETH is not set | ||
850 | # CONFIG_NET_SB1000 is not set | ||
851 | # CONFIG_ARCNET is not set | ||
852 | CONFIG_PHYLIB=y | ||
853 | |||
854 | # | ||
855 | # MII PHY device drivers | ||
856 | # | ||
857 | # CONFIG_MARVELL_PHY is not set | ||
858 | # CONFIG_DAVICOM_PHY is not set | ||
859 | # CONFIG_QSEMI_PHY is not set | ||
860 | # CONFIG_LXT_PHY is not set | ||
861 | # CONFIG_CICADA_PHY is not set | ||
862 | # CONFIG_VITESSE_PHY is not set | ||
863 | # CONFIG_SMSC_PHY is not set | ||
864 | # CONFIG_BROADCOM_PHY is not set | ||
865 | # CONFIG_ICPLUS_PHY is not set | ||
866 | # CONFIG_REALTEK_PHY is not set | ||
867 | # CONFIG_NATIONAL_PHY is not set | ||
868 | # CONFIG_STE10XP is not set | ||
869 | # CONFIG_LSI_ET1011C_PHY is not set | ||
870 | # CONFIG_FIXED_PHY is not set | ||
871 | # CONFIG_MDIO_BITBANG is not set | ||
872 | CONFIG_NET_ETHERNET=y | ||
873 | CONFIG_MII=y | ||
874 | # CONFIG_HAPPYMEAL is not set | ||
875 | # CONFIG_SUNGEM is not set | ||
876 | # CONFIG_CASSINI is not set | ||
877 | # CONFIG_NET_VENDOR_3COM is not set | ||
878 | # CONFIG_ETHOC is not set | ||
879 | # CONFIG_DNET is not set | ||
880 | # CONFIG_NET_TULIP is not set | ||
881 | # CONFIG_HP100 is not set | ||
882 | # CONFIG_IBM_NEW_EMAC_ZMII is not set | ||
883 | # CONFIG_IBM_NEW_EMAC_RGMII is not set | ||
884 | # CONFIG_IBM_NEW_EMAC_TAH is not set | ||
885 | # CONFIG_IBM_NEW_EMAC_EMAC4 is not set | ||
886 | # CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set | ||
887 | # CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set | ||
888 | # CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set | ||
889 | CONFIG_NET_PCI=y | ||
890 | CONFIG_PCNET32=y | ||
891 | CONFIG_AMD8111_ETH=y | ||
892 | # CONFIG_ADAPTEC_STARFIRE is not set | ||
893 | # CONFIG_B44 is not set | ||
894 | # CONFIG_FORCEDETH is not set | ||
895 | CONFIG_E100=y | ||
896 | # CONFIG_FEALNX is not set | ||
897 | # CONFIG_NATSEMI is not set | ||
898 | # CONFIG_NE2K_PCI is not set | ||
899 | CONFIG_8139CP=y | ||
900 | CONFIG_8139TOO=y | ||
901 | CONFIG_8139TOO_PIO=y | ||
902 | # CONFIG_8139TOO_TUNE_TWISTER is not set | ||
903 | CONFIG_8139TOO_8129=y | ||
904 | # CONFIG_8139_OLD_RX_RESET is not set | ||
905 | # CONFIG_R6040 is not set | ||
906 | # CONFIG_SIS900 is not set | ||
907 | # CONFIG_EPIC100 is not set | ||
908 | # CONFIG_SMSC9420 is not set | ||
909 | # CONFIG_SUNDANCE is not set | ||
910 | # CONFIG_TLAN is not set | ||
911 | # CONFIG_KS8842 is not set | ||
912 | # CONFIG_KS8851_MLL is not set | ||
913 | # CONFIG_VIA_RHINE is not set | ||
914 | # CONFIG_SC92031 is not set | ||
915 | # CONFIG_ATL2 is not set | ||
916 | CONFIG_NETDEV_1000=y | ||
917 | # CONFIG_ACENIC is not set | ||
918 | # CONFIG_DL2K is not set | ||
919 | CONFIG_E1000=y | ||
920 | # CONFIG_E1000E is not set | ||
921 | # CONFIG_IP1000 is not set | ||
922 | # CONFIG_IGB is not set | ||
923 | # CONFIG_IGBVF is not set | ||
924 | # CONFIG_NS83820 is not set | ||
925 | # CONFIG_HAMACHI is not set | ||
926 | # CONFIG_YELLOWFIN is not set | ||
927 | # CONFIG_R8169 is not set | ||
928 | # CONFIG_SIS190 is not set | ||
929 | # CONFIG_SKGE is not set | ||
930 | # CONFIG_SKY2 is not set | ||
931 | # CONFIG_VIA_VELOCITY is not set | ||
932 | # CONFIG_TIGON3 is not set | ||
933 | # CONFIG_BNX2 is not set | ||
934 | # CONFIG_CNIC is not set | ||
935 | # CONFIG_QLA3XXX is not set | ||
936 | # CONFIG_ATL1 is not set | ||
937 | # CONFIG_ATL1E is not set | ||
938 | # CONFIG_ATL1C is not set | ||
939 | # CONFIG_JME is not set | ||
940 | # CONFIG_NETDEV_10000 is not set | ||
941 | # CONFIG_TR is not set | ||
942 | CONFIG_WLAN=y | ||
943 | # CONFIG_WLAN_PRE80211 is not set | ||
944 | # CONFIG_WLAN_80211 is not set | ||
945 | |||
946 | # | ||
947 | # Enable WiMAX (Networking options) to see the WiMAX drivers | ||
948 | # | ||
949 | |||
950 | # | ||
951 | # USB Network Adapters | ||
952 | # | ||
953 | # CONFIG_USB_CATC is not set | ||
954 | # CONFIG_USB_KAWETH is not set | ||
955 | # CONFIG_USB_PEGASUS is not set | ||
956 | # CONFIG_USB_RTL8150 is not set | ||
957 | # CONFIG_USB_USBNET is not set | ||
958 | # CONFIG_WAN is not set | ||
959 | # CONFIG_FDDI is not set | ||
960 | # CONFIG_HIPPI is not set | ||
961 | # CONFIG_PPP is not set | ||
962 | # CONFIG_SLIP is not set | ||
963 | # CONFIG_NET_FC is not set | ||
964 | CONFIG_NETCONSOLE=m | ||
965 | CONFIG_NETCONSOLE_DYNAMIC=y | ||
966 | CONFIG_NETPOLL=y | ||
967 | # CONFIG_NETPOLL_TRAP is not set | ||
968 | CONFIG_NET_POLL_CONTROLLER=y | ||
969 | # CONFIG_VMXNET3 is not set | ||
970 | # CONFIG_ISDN is not set | ||
971 | # CONFIG_PHONE is not set | ||
972 | |||
973 | # | ||
974 | # Input device support | ||
975 | # | ||
976 | CONFIG_INPUT=y | ||
977 | CONFIG_INPUT_FF_MEMLESS=y | ||
978 | # CONFIG_INPUT_POLLDEV is not set | ||
979 | |||
980 | # | ||
981 | # Userland interfaces | ||
982 | # | ||
983 | CONFIG_INPUT_MOUSEDEV=y | ||
984 | CONFIG_INPUT_MOUSEDEV_PSAUX=y | ||
985 | CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 | ||
986 | CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 | ||
987 | # CONFIG_INPUT_JOYDEV is not set | ||
988 | CONFIG_INPUT_EVDEV=y | ||
989 | # CONFIG_INPUT_EVBUG is not set | ||
990 | |||
991 | # | ||
992 | # Input Device Drivers | ||
993 | # | ||
994 | CONFIG_INPUT_KEYBOARD=y | ||
995 | # CONFIG_KEYBOARD_ADP5588 is not set | ||
996 | CONFIG_KEYBOARD_ATKBD=y | ||
997 | # CONFIG_QT2160 is not set | ||
998 | # CONFIG_KEYBOARD_LKKBD is not set | ||
999 | # CONFIG_KEYBOARD_MAX7359 is not set | ||
1000 | # CONFIG_KEYBOARD_NEWTON is not set | ||
1001 | # CONFIG_KEYBOARD_OPENCORES is not set | ||
1002 | # CONFIG_KEYBOARD_STOWAWAY is not set | ||
1003 | # CONFIG_KEYBOARD_SUNKBD is not set | ||
1004 | # CONFIG_KEYBOARD_XTKBD is not set | ||
1005 | CONFIG_INPUT_MOUSE=y | ||
1006 | CONFIG_MOUSE_PS2=y | ||
1007 | CONFIG_MOUSE_PS2_ALPS=y | ||
1008 | CONFIG_MOUSE_PS2_LOGIPS2PP=y | ||
1009 | CONFIG_MOUSE_PS2_SYNAPTICS=y | ||
1010 | CONFIG_MOUSE_PS2_LIFEBOOK=y | ||
1011 | CONFIG_MOUSE_PS2_TRACKPOINT=y | ||
1012 | # CONFIG_MOUSE_PS2_ELANTECH is not set | ||
1013 | # CONFIG_MOUSE_PS2_SENTELIC is not set | ||
1014 | # CONFIG_MOUSE_PS2_TOUCHKIT is not set | ||
1015 | # CONFIG_MOUSE_SERIAL is not set | ||
1016 | # CONFIG_MOUSE_APPLETOUCH is not set | ||
1017 | # CONFIG_MOUSE_BCM5974 is not set | ||
1018 | # CONFIG_MOUSE_VSXXXAA is not set | ||
1019 | # CONFIG_MOUSE_SYNAPTICS_I2C is not set | ||
1020 | # CONFIG_INPUT_JOYSTICK is not set | ||
1021 | # CONFIG_INPUT_TABLET is not set | ||
1022 | # CONFIG_INPUT_TOUCHSCREEN is not set | ||
1023 | # CONFIG_INPUT_MISC is not set | ||
1024 | |||
1025 | # | ||
1026 | # Hardware I/O ports | ||
1027 | # | ||
1028 | CONFIG_SERIO=y | ||
1029 | CONFIG_SERIO_I8042=y | ||
1030 | CONFIG_SERIO_SERPORT=y | ||
1031 | # CONFIG_SERIO_CT82C710 is not set | ||
1032 | # CONFIG_SERIO_PCIPS2 is not set | ||
1033 | CONFIG_SERIO_LIBPS2=y | ||
1034 | CONFIG_SERIO_RAW=y | ||
1035 | # CONFIG_GAMEPORT is not set | ||
1036 | |||
1037 | # | ||
1038 | # Character devices | ||
1039 | # | ||
1040 | CONFIG_VT=y | ||
1041 | CONFIG_CONSOLE_TRANSLATIONS=y | ||
1042 | CONFIG_VT_CONSOLE=y | ||
1043 | CONFIG_HW_CONSOLE=y | ||
1044 | # CONFIG_VT_HW_CONSOLE_BINDING is not set | ||
1045 | # CONFIG_DEVKMEM is not set | ||
1046 | # CONFIG_SERIAL_NONSTANDARD is not set | ||
1047 | # CONFIG_NOZOMI is not set | ||
1048 | |||
1049 | # | ||
1050 | # Serial drivers | ||
1051 | # | ||
1052 | CONFIG_SERIAL_8250=y | ||
1053 | CONFIG_SERIAL_8250_CONSOLE=y | ||
1054 | CONFIG_FIX_EARLYCON_MEM=y | ||
1055 | CONFIG_SERIAL_8250_PCI=y | ||
1056 | CONFIG_SERIAL_8250_PNP=y | ||
1057 | CONFIG_SERIAL_8250_NR_UARTS=32 | ||
1058 | CONFIG_SERIAL_8250_RUNTIME_UARTS=4 | ||
1059 | CONFIG_SERIAL_8250_EXTENDED=y | ||
1060 | # CONFIG_SERIAL_8250_MANY_PORTS is not set | ||
1061 | # CONFIG_SERIAL_8250_SHARE_IRQ is not set | ||
1062 | # CONFIG_SERIAL_8250_DETECT_IRQ is not set | ||
1063 | # CONFIG_SERIAL_8250_RSA is not set | ||
1064 | |||
1065 | # | ||
1066 | # Non-8250 serial port support | ||
1067 | # | ||
1068 | CONFIG_SERIAL_CORE=y | ||
1069 | CONFIG_SERIAL_CORE_CONSOLE=y | ||
1070 | # CONFIG_SERIAL_JSM is not set | ||
1071 | CONFIG_UNIX98_PTYS=y | ||
1072 | CONFIG_DEVPTS_MULTIPLE_INSTANCES=y | ||
1073 | # CONFIG_LEGACY_PTYS is not set | ||
1074 | # CONFIG_IPMI_HANDLER is not set | ||
1075 | CONFIG_HW_RANDOM=y | ||
1076 | # CONFIG_HW_RANDOM_TIMERIOMEM is not set | ||
1077 | CONFIG_HW_RANDOM_INTEL=y | ||
1078 | CONFIG_HW_RANDOM_AMD=y | ||
1079 | # CONFIG_HW_RANDOM_VIA is not set | ||
1080 | # CONFIG_NVRAM is not set | ||
1081 | CONFIG_RTC=y | ||
1082 | # CONFIG_R3964 is not set | ||
1083 | # CONFIG_APPLICOM is not set | ||
1084 | # CONFIG_MWAVE is not set | ||
1085 | # CONFIG_PC8736x_GPIO is not set | ||
1086 | # CONFIG_RAW_DRIVER is not set | ||
1087 | CONFIG_HPET=y | ||
1088 | CONFIG_HPET_MMAP=y | ||
1089 | CONFIG_HANGCHECK_TIMER=y | ||
1090 | # CONFIG_TCG_TPM is not set | ||
1091 | # CONFIG_TELCLOCK is not set | ||
1092 | CONFIG_DEVPORT=y | ||
1093 | CONFIG_I2C=y | ||
1094 | CONFIG_I2C_BOARDINFO=y | ||
1095 | CONFIG_I2C_COMPAT=y | ||
1096 | CONFIG_I2C_CHARDEV=y | ||
1097 | CONFIG_I2C_HELPER_AUTO=y | ||
1098 | |||
1099 | # | ||
1100 | # I2C Hardware Bus support | ||
1101 | # | ||
1102 | |||
1103 | # | ||
1104 | # PC SMBus host controller drivers | ||
1105 | # | ||
1106 | # CONFIG_I2C_ALI1535 is not set | ||
1107 | # CONFIG_I2C_ALI1563 is not set | ||
1108 | # CONFIG_I2C_ALI15X3 is not set | ||
1109 | # CONFIG_I2C_AMD756 is not set | ||
1110 | # CONFIG_I2C_AMD8111 is not set | ||
1111 | CONFIG_I2C_I801=y | ||
1112 | CONFIG_I2C_ISCH=y | ||
1113 | # CONFIG_I2C_PIIX4 is not set | ||
1114 | # CONFIG_I2C_NFORCE2 is not set | ||
1115 | # CONFIG_I2C_SIS5595 is not set | ||
1116 | # CONFIG_I2C_SIS630 is not set | ||
1117 | # CONFIG_I2C_SIS96X is not set | ||
1118 | # CONFIG_I2C_VIA is not set | ||
1119 | # CONFIG_I2C_VIAPRO is not set | ||
1120 | |||
1121 | # | ||
1122 | # ACPI drivers | ||
1123 | # | ||
1124 | # CONFIG_I2C_SCMI is not set | ||
1125 | |||
1126 | # | ||
1127 | # I2C system bus drivers (mostly embedded / system-on-chip) | ||
1128 | # | ||
1129 | # CONFIG_I2C_OCORES is not set | ||
1130 | # CONFIG_I2C_SIMTEC is not set | ||
1131 | |||
1132 | # | ||
1133 | # External I2C/SMBus adapter drivers | ||
1134 | # | ||
1135 | # CONFIG_I2C_PARPORT_LIGHT is not set | ||
1136 | # CONFIG_I2C_TAOS_EVM is not set | ||
1137 | # CONFIG_I2C_TINY_USB is not set | ||
1138 | |||
1139 | # | ||
1140 | # Graphics adapter I2C/DDC channel drivers | ||
1141 | # | ||
1142 | # CONFIG_I2C_VOODOO3 is not set | ||
1143 | |||
1144 | # | ||
1145 | # Other I2C/SMBus bus drivers | ||
1146 | # | ||
1147 | # CONFIG_I2C_PCA_PLATFORM is not set | ||
1148 | # CONFIG_I2C_STUB is not set | ||
1149 | |||
1150 | # | ||
1151 | # Miscellaneous I2C Chip support | ||
1152 | # | ||
1153 | # CONFIG_DS1682 is not set | ||
1154 | # CONFIG_SENSORS_TSL2550 is not set | ||
1155 | # CONFIG_I2C_DEBUG_CORE is not set | ||
1156 | # CONFIG_I2C_DEBUG_ALGO is not set | ||
1157 | # CONFIG_I2C_DEBUG_BUS is not set | ||
1158 | # CONFIG_I2C_DEBUG_CHIP is not set | ||
1159 | # CONFIG_SPI is not set | ||
1160 | |||
1161 | # | ||
1162 | # PPS support | ||
1163 | # | ||
1164 | # CONFIG_PPS is not set | ||
1165 | CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y | ||
1166 | # CONFIG_GPIOLIB is not set | ||
1167 | # CONFIG_W1 is not set | ||
1168 | CONFIG_POWER_SUPPLY=y | ||
1169 | # CONFIG_POWER_SUPPLY_DEBUG is not set | ||
1170 | # CONFIG_PDA_POWER is not set | ||
1171 | # CONFIG_BATTERY_DS2760 is not set | ||
1172 | # CONFIG_BATTERY_DS2782 is not set | ||
1173 | # CONFIG_BATTERY_BQ27x00 is not set | ||
1174 | # CONFIG_BATTERY_MAX17040 is not set | ||
1175 | # CONFIG_HWMON is not set | ||
1176 | CONFIG_THERMAL=y | ||
1177 | # CONFIG_WATCHDOG is not set | ||
1178 | CONFIG_SSB_POSSIBLE=y | ||
1179 | |||
1180 | # | ||
1181 | # Sonics Silicon Backplane | ||
1182 | # | ||
1183 | # CONFIG_SSB is not set | ||
1184 | |||
1185 | # | ||
1186 | # Multifunction device drivers | ||
1187 | # | ||
1188 | # CONFIG_MFD_CORE is not set | ||
1189 | # CONFIG_MFD_SM501 is not set | ||
1190 | # CONFIG_HTC_PASIC3 is not set | ||
1191 | # CONFIG_TWL4030_CORE is not set | ||
1192 | # CONFIG_MFD_TMIO is not set | ||
1193 | # CONFIG_PMIC_DA903X is not set | ||
1194 | # CONFIG_MFD_WM8400 is not set | ||
1195 | # CONFIG_MFD_WM831X is not set | ||
1196 | # CONFIG_MFD_WM8350_I2C is not set | ||
1197 | # CONFIG_MFD_PCF50633 is not set | ||
1198 | # CONFIG_AB3100_CORE is not set | ||
1199 | # CONFIG_REGULATOR is not set | ||
1200 | # CONFIG_MEDIA_SUPPORT is not set | ||
1201 | |||
1202 | # | ||
1203 | # Graphics support | ||
1204 | # | ||
1205 | # CONFIG_AGP is not set | ||
1206 | CONFIG_VGA_ARB=y | ||
1207 | # CONFIG_DRM is not set | ||
1208 | CONFIG_VGASTATE=y | ||
1209 | CONFIG_VIDEO_OUTPUT_CONTROL=y | ||
1210 | CONFIG_FB=y | ||
1211 | # CONFIG_FIRMWARE_EDID is not set | ||
1212 | # CONFIG_FB_DDC is not set | ||
1213 | CONFIG_FB_BOOT_VESA_SUPPORT=y | ||
1214 | CONFIG_FB_CFB_FILLRECT=y | ||
1215 | CONFIG_FB_CFB_COPYAREA=y | ||
1216 | CONFIG_FB_CFB_IMAGEBLIT=y | ||
1217 | # CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set | ||
1218 | # CONFIG_FB_SYS_FILLRECT is not set | ||
1219 | # CONFIG_FB_SYS_COPYAREA is not set | ||
1220 | # CONFIG_FB_SYS_IMAGEBLIT is not set | ||
1221 | # CONFIG_FB_FOREIGN_ENDIAN is not set | ||
1222 | # CONFIG_FB_SYS_FOPS is not set | ||
1223 | # CONFIG_FB_SVGALIB is not set | ||
1224 | # CONFIG_FB_MACMODES is not set | ||
1225 | # CONFIG_FB_BACKLIGHT is not set | ||
1226 | # CONFIG_FB_MODE_HELPERS is not set | ||
1227 | # CONFIG_FB_TILEBLITTING is not set | ||
1228 | |||
1229 | # | ||
1230 | # Frame buffer hardware drivers | ||
1231 | # | ||
1232 | # CONFIG_FB_CIRRUS is not set | ||
1233 | # CONFIG_FB_PM2 is not set | ||
1234 | # CONFIG_FB_CYBER2000 is not set | ||
1235 | # CONFIG_FB_ARC is not set | ||
1236 | # CONFIG_FB_ASILIANT is not set | ||
1237 | # CONFIG_FB_IMSTT is not set | ||
1238 | CONFIG_FB_VGA16=y | ||
1239 | CONFIG_FB_VESA=y | ||
1240 | # CONFIG_FB_N411 is not set | ||
1241 | # CONFIG_FB_HGA is not set | ||
1242 | # CONFIG_FB_S1D13XXX is not set | ||
1243 | # CONFIG_FB_NVIDIA is not set | ||
1244 | # CONFIG_FB_RIVA is not set | ||
1245 | # CONFIG_FB_LE80578 is not set | ||
1246 | # CONFIG_FB_MATROX is not set | ||
1247 | # CONFIG_FB_RADEON is not set | ||
1248 | # CONFIG_FB_ATY128 is not set | ||
1249 | # CONFIG_FB_ATY is not set | ||
1250 | # CONFIG_FB_S3 is not set | ||
1251 | # CONFIG_FB_SAVAGE is not set | ||
1252 | # CONFIG_FB_SIS is not set | ||
1253 | # CONFIG_FB_VIA is not set | ||
1254 | # CONFIG_FB_NEOMAGIC is not set | ||
1255 | # CONFIG_FB_KYRO is not set | ||
1256 | # CONFIG_FB_3DFX is not set | ||
1257 | # CONFIG_FB_VOODOO1 is not set | ||
1258 | # CONFIG_FB_VT8623 is not set | ||
1259 | # CONFIG_FB_TRIDENT is not set | ||
1260 | # CONFIG_FB_ARK is not set | ||
1261 | # CONFIG_FB_PM3 is not set | ||
1262 | # CONFIG_FB_CARMINE is not set | ||
1263 | # CONFIG_FB_GEODE is not set | ||
1264 | # CONFIG_FB_VIRTUAL is not set | ||
1265 | # CONFIG_FB_METRONOME is not set | ||
1266 | # CONFIG_FB_MB862XX is not set | ||
1267 | # CONFIG_FB_BROADSHEET is not set | ||
1268 | # CONFIG_BACKLIGHT_LCD_SUPPORT is not set | ||
1269 | |||
1270 | # | ||
1271 | # Display device support | ||
1272 | # | ||
1273 | # CONFIG_DISPLAY_SUPPORT is not set | ||
1274 | |||
1275 | # | ||
1276 | # Console display driver support | ||
1277 | # | ||
1278 | CONFIG_VGA_CONSOLE=y | ||
1279 | CONFIG_VGACON_SOFT_SCROLLBACK=y | ||
1280 | CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=256 | ||
1281 | CONFIG_DUMMY_CONSOLE=y | ||
1282 | CONFIG_FRAMEBUFFER_CONSOLE=y | ||
1283 | # CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set | ||
1284 | # CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set | ||
1285 | # CONFIG_FONTS is not set | ||
1286 | CONFIG_FONT_8x8=y | ||
1287 | CONFIG_FONT_8x16=y | ||
1288 | CONFIG_LOGO=y | ||
1289 | CONFIG_LOGO_LINUX_MONO=y | ||
1290 | # CONFIG_LOGO_LINUX_VGA16 is not set | ||
1291 | # CONFIG_LOGO_LINUX_CLUT224 is not set | ||
1292 | # CONFIG_SOUND is not set | ||
1293 | CONFIG_HID_SUPPORT=y | ||
1294 | CONFIG_HID=y | ||
1295 | CONFIG_HIDRAW=y | ||
1296 | |||
1297 | # | ||
1298 | # USB Input Devices | ||
1299 | # | ||
1300 | CONFIG_USB_HID=y | ||
1301 | CONFIG_HID_PID=y | ||
1302 | CONFIG_USB_HIDDEV=y | ||
1303 | |||
1304 | # | ||
1305 | # Special HID drivers | ||
1306 | # | ||
1307 | CONFIG_HID_A4TECH=y | ||
1308 | CONFIG_HID_APPLE=y | ||
1309 | CONFIG_HID_BELKIN=y | ||
1310 | CONFIG_HID_CHERRY=y | ||
1311 | CONFIG_HID_CHICONY=y | ||
1312 | CONFIG_HID_CYPRESS=y | ||
1313 | CONFIG_HID_DRAGONRISE=y | ||
1314 | # CONFIG_DRAGONRISE_FF is not set | ||
1315 | CONFIG_HID_EZKEY=y | ||
1316 | CONFIG_HID_KYE=y | ||
1317 | CONFIG_HID_GYRATION=y | ||
1318 | CONFIG_HID_TWINHAN=y | ||
1319 | CONFIG_HID_KENSINGTON=y | ||
1320 | CONFIG_HID_LOGITECH=y | ||
1321 | # CONFIG_LOGITECH_FF is not set | ||
1322 | CONFIG_LOGIRUMBLEPAD2_FF=y | ||
1323 | CONFIG_HID_MICROSOFT=y | ||
1324 | CONFIG_HID_MONTEREY=y | ||
1325 | CONFIG_HID_NTRIG=y | ||
1326 | CONFIG_HID_PANTHERLORD=y | ||
1327 | # CONFIG_PANTHERLORD_FF is not set | ||
1328 | CONFIG_HID_PETALYNX=y | ||
1329 | CONFIG_HID_SAMSUNG=y | ||
1330 | CONFIG_HID_SONY=y | ||
1331 | CONFIG_HID_SUNPLUS=y | ||
1332 | CONFIG_HID_GREENASIA=y | ||
1333 | # CONFIG_GREENASIA_FF is not set | ||
1334 | CONFIG_HID_SMARTJOYPLUS=y | ||
1335 | # CONFIG_SMARTJOYPLUS_FF is not set | ||
1336 | CONFIG_HID_TOPSEED=y | ||
1337 | CONFIG_HID_THRUSTMASTER=y | ||
1338 | # CONFIG_THRUSTMASTER_FF is not set | ||
1339 | CONFIG_HID_ZEROPLUS=y | ||
1340 | # CONFIG_ZEROPLUS_FF is not set | ||
1341 | CONFIG_USB_SUPPORT=y | ||
1342 | CONFIG_USB_ARCH_HAS_HCD=y | ||
1343 | CONFIG_USB_ARCH_HAS_OHCI=y | ||
1344 | CONFIG_USB_ARCH_HAS_EHCI=y | ||
1345 | CONFIG_USB=y | ||
1346 | # CONFIG_USB_DEBUG is not set | ||
1347 | CONFIG_USB_ANNOUNCE_NEW_DEVICES=y | ||
1348 | |||
1349 | # | ||
1350 | # Miscellaneous USB options | ||
1351 | # | ||
1352 | # CONFIG_USB_DEVICEFS is not set | ||
1353 | # CONFIG_USB_DEVICE_CLASS is not set | ||
1354 | # CONFIG_USB_DYNAMIC_MINORS is not set | ||
1355 | # CONFIG_USB_SUSPEND is not set | ||
1356 | # CONFIG_USB_OTG is not set | ||
1357 | CONFIG_USB_MON=y | ||
1358 | # CONFIG_USB_WUSB is not set | ||
1359 | # CONFIG_USB_WUSB_CBAF is not set | ||
1360 | |||
1361 | # | ||
1362 | # USB Host Controller Drivers | ||
1363 | # | ||
1364 | # CONFIG_USB_C67X00_HCD is not set | ||
1365 | # CONFIG_USB_XHCI_HCD is not set | ||
1366 | CONFIG_USB_EHCI_HCD=y | ||
1367 | CONFIG_USB_EHCI_ROOT_HUB_TT=y | ||
1368 | # CONFIG_USB_EHCI_TT_NEWSCHED is not set | ||
1369 | # CONFIG_USB_OXU210HP_HCD is not set | ||
1370 | # CONFIG_USB_ISP116X_HCD is not set | ||
1371 | # CONFIG_USB_ISP1760_HCD is not set | ||
1372 | # CONFIG_USB_ISP1362_HCD is not set | ||
1373 | CONFIG_USB_OHCI_HCD=y | ||
1374 | # CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set | ||
1375 | # CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set | ||
1376 | CONFIG_USB_OHCI_LITTLE_ENDIAN=y | ||
1377 | CONFIG_USB_UHCI_HCD=y | ||
1378 | # CONFIG_USB_SL811_HCD is not set | ||
1379 | # CONFIG_USB_R8A66597_HCD is not set | ||
1380 | # CONFIG_USB_WHCI_HCD is not set | ||
1381 | # CONFIG_USB_HWA_HCD is not set | ||
1382 | |||
1383 | # | ||
1384 | # USB Device Class drivers | ||
1385 | # | ||
1386 | # CONFIG_USB_ACM is not set | ||
1387 | # CONFIG_USB_PRINTER is not set | ||
1388 | # CONFIG_USB_WDM is not set | ||
1389 | # CONFIG_USB_TMC is not set | ||
1390 | |||
1391 | # | ||
1392 | # NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may | ||
1393 | # | ||
1394 | |||
1395 | # | ||
1396 | # also be needed; see USB_STORAGE Help for more info | ||
1397 | # | ||
1398 | CONFIG_USB_STORAGE=y | ||
1399 | # CONFIG_USB_STORAGE_DEBUG is not set | ||
1400 | CONFIG_USB_STORAGE_DATAFAB=y | ||
1401 | CONFIG_USB_STORAGE_FREECOM=y | ||
1402 | CONFIG_USB_STORAGE_ISD200=y | ||
1403 | CONFIG_USB_STORAGE_USBAT=y | ||
1404 | CONFIG_USB_STORAGE_SDDR09=y | ||
1405 | CONFIG_USB_STORAGE_SDDR55=y | ||
1406 | CONFIG_USB_STORAGE_JUMPSHOT=y | ||
1407 | CONFIG_USB_STORAGE_ALAUDA=y | ||
1408 | CONFIG_USB_STORAGE_ONETOUCH=y | ||
1409 | CONFIG_USB_STORAGE_KARMA=y | ||
1410 | CONFIG_USB_STORAGE_CYPRESS_ATACB=y | ||
1411 | # CONFIG_USB_LIBUSUAL is not set | ||
1412 | |||
1413 | # | ||
1414 | # USB Imaging devices | ||
1415 | # | ||
1416 | # CONFIG_USB_MDC800 is not set | ||
1417 | # CONFIG_USB_MICROTEK is not set | ||
1418 | |||
1419 | # | ||
1420 | # USB port drivers | ||
1421 | # | ||
1422 | # CONFIG_USB_SERIAL is not set | ||
1423 | |||
1424 | # | ||
1425 | # USB Miscellaneous drivers | ||
1426 | # | ||
1427 | # CONFIG_USB_EMI62 is not set | ||
1428 | # CONFIG_USB_EMI26 is not set | ||
1429 | # CONFIG_USB_ADUTUX is not set | ||
1430 | # CONFIG_USB_SEVSEG is not set | ||
1431 | # CONFIG_USB_RIO500 is not set | ||
1432 | # CONFIG_USB_LEGOTOWER is not set | ||
1433 | # CONFIG_USB_LCD is not set | ||
1434 | # CONFIG_USB_BERRY_CHARGE is not set | ||
1435 | # CONFIG_USB_LED is not set | ||
1436 | # CONFIG_USB_CYPRESS_CY7C63 is not set | ||
1437 | # CONFIG_USB_CYTHERM is not set | ||
1438 | # CONFIG_USB_IDMOUSE is not set | ||
1439 | # CONFIG_USB_FTDI_ELAN is not set | ||
1440 | # CONFIG_USB_APPLEDISPLAY is not set | ||
1441 | # CONFIG_USB_SISUSBVGA is not set | ||
1442 | # CONFIG_USB_LD is not set | ||
1443 | # CONFIG_USB_TRANCEVIBRATOR is not set | ||
1444 | # CONFIG_USB_IOWARRIOR is not set | ||
1445 | # CONFIG_USB_TEST is not set | ||
1446 | # CONFIG_USB_ISIGHTFW is not set | ||
1447 | # CONFIG_USB_VST is not set | ||
1448 | # CONFIG_USB_GADGET is not set | ||
1449 | |||
1450 | # | ||
1451 | # OTG and related infrastructure | ||
1452 | # | ||
1453 | # CONFIG_NOP_USB_XCEIV is not set | ||
1454 | # CONFIG_UWB is not set | ||
1455 | # CONFIG_MMC is not set | ||
1456 | # CONFIG_MEMSTICK is not set | ||
1457 | # CONFIG_NEW_LEDS is not set | ||
1458 | # CONFIG_ACCESSIBILITY is not set | ||
1459 | # CONFIG_INFINIBAND is not set | ||
1460 | # CONFIG_EDAC is not set | ||
1461 | # CONFIG_RTC_CLASS is not set | ||
1462 | # CONFIG_DMADEVICES is not set | ||
1463 | # CONFIG_AUXDISPLAY is not set | ||
1464 | # CONFIG_UIO is not set | ||
1465 | |||
1466 | # | ||
1467 | # TI VLYNQ | ||
1468 | # | ||
1469 | # CONFIG_STAGING is not set | ||
1470 | # CONFIG_X86_PLATFORM_DEVICES is not set | ||
1471 | |||
1472 | # | ||
1473 | # Firmware Drivers | ||
1474 | # | ||
1475 | CONFIG_EDD=y | ||
1476 | # CONFIG_EDD_OFF is not set | ||
1477 | CONFIG_FIRMWARE_MEMMAP=y | ||
1478 | # CONFIG_DELL_RBU is not set | ||
1479 | # CONFIG_DCDBAS is not set | ||
1480 | # CONFIG_DMIID is not set | ||
1481 | # CONFIG_ISCSI_IBFT_FIND is not set | ||
1482 | |||
1483 | # | ||
1484 | # File systems | ||
1485 | # | ||
1486 | CONFIG_EXT2_FS=y | ||
1487 | CONFIG_EXT2_FS_XATTR=y | ||
1488 | CONFIG_EXT2_FS_POSIX_ACL=y | ||
1489 | CONFIG_EXT2_FS_SECURITY=y | ||
1490 | # CONFIG_EXT2_FS_XIP is not set | ||
1491 | CONFIG_EXT3_FS=y | ||
1492 | CONFIG_EXT3_DEFAULTS_TO_ORDERED=y | ||
1493 | CONFIG_EXT3_FS_XATTR=y | ||
1494 | CONFIG_EXT3_FS_POSIX_ACL=y | ||
1495 | CONFIG_EXT3_FS_SECURITY=y | ||
1496 | # CONFIG_EXT4_FS is not set | ||
1497 | CONFIG_JBD=y | ||
1498 | # CONFIG_JBD_DEBUG is not set | ||
1499 | CONFIG_FS_MBCACHE=y | ||
1500 | # CONFIG_REISERFS_FS is not set | ||
1501 | # CONFIG_JFS_FS is not set | ||
1502 | CONFIG_FS_POSIX_ACL=y | ||
1503 | # CONFIG_XFS_FS is not set | ||
1504 | # CONFIG_GFS2_FS is not set | ||
1505 | # CONFIG_OCFS2_FS is not set | ||
1506 | # CONFIG_BTRFS_FS is not set | ||
1507 | # CONFIG_NILFS2_FS is not set | ||
1508 | CONFIG_FILE_LOCKING=y | ||
1509 | CONFIG_FSNOTIFY=y | ||
1510 | CONFIG_DNOTIFY=y | ||
1511 | # CONFIG_INOTIFY is not set | ||
1512 | CONFIG_INOTIFY_USER=y | ||
1513 | # CONFIG_QUOTA is not set | ||
1514 | # CONFIG_AUTOFS_FS is not set | ||
1515 | # CONFIG_AUTOFS4_FS is not set | ||
1516 | # CONFIG_FUSE_FS is not set | ||
1517 | |||
1518 | # | ||
1519 | # Caches | ||
1520 | # | ||
1521 | CONFIG_FSCACHE=y | ||
1522 | CONFIG_FSCACHE_STATS=y | ||
1523 | # CONFIG_FSCACHE_HISTOGRAM is not set | ||
1524 | # CONFIG_FSCACHE_DEBUG is not set | ||
1525 | # CONFIG_FSCACHE_OBJECT_LIST is not set | ||
1526 | CONFIG_CACHEFILES=y | ||
1527 | # CONFIG_CACHEFILES_DEBUG is not set | ||
1528 | # CONFIG_CACHEFILES_HISTOGRAM is not set | ||
1529 | |||
1530 | # | ||
1531 | # CD-ROM/DVD Filesystems | ||
1532 | # | ||
1533 | CONFIG_ISO9660_FS=y | ||
1534 | CONFIG_JOLIET=y | ||
1535 | CONFIG_ZISOFS=y | ||
1536 | CONFIG_UDF_FS=y | ||
1537 | CONFIG_UDF_NLS=y | ||
1538 | |||
1539 | # | ||
1540 | # DOS/FAT/NT Filesystems | ||
1541 | # | ||
1542 | CONFIG_FAT_FS=y | ||
1543 | CONFIG_MSDOS_FS=y | ||
1544 | CONFIG_VFAT_FS=y | ||
1545 | CONFIG_FAT_DEFAULT_CODEPAGE=437 | ||
1546 | CONFIG_FAT_DEFAULT_IOCHARSET="utf8" | ||
1547 | CONFIG_NTFS_FS=y | ||
1548 | # CONFIG_NTFS_DEBUG is not set | ||
1549 | CONFIG_NTFS_RW=y | ||
1550 | |||
1551 | # | ||
1552 | # Pseudo filesystems | ||
1553 | # | ||
1554 | CONFIG_PROC_FS=y | ||
1555 | CONFIG_PROC_KCORE=y | ||
1556 | CONFIG_PROC_SYSCTL=y | ||
1557 | CONFIG_PROC_PAGE_MONITOR=y | ||
1558 | CONFIG_SYSFS=y | ||
1559 | CONFIG_TMPFS=y | ||
1560 | # CONFIG_TMPFS_POSIX_ACL is not set | ||
1561 | # CONFIG_HUGETLBFS is not set | ||
1562 | # CONFIG_HUGETLB_PAGE is not set | ||
1563 | CONFIG_CONFIGFS_FS=y | ||
1564 | # CONFIG_MISC_FILESYSTEMS is not set | ||
1565 | # CONFIG_NETWORK_FILESYSTEMS is not set | ||
1566 | |||
1567 | # | ||
1568 | # Partition Types | ||
1569 | # | ||
1570 | CONFIG_PARTITION_ADVANCED=y | ||
1571 | # CONFIG_ACORN_PARTITION is not set | ||
1572 | # CONFIG_OSF_PARTITION is not set | ||
1573 | # CONFIG_AMIGA_PARTITION is not set | ||
1574 | # CONFIG_ATARI_PARTITION is not set | ||
1575 | # CONFIG_MAC_PARTITION is not set | ||
1576 | CONFIG_MSDOS_PARTITION=y | ||
1577 | # CONFIG_BSD_DISKLABEL is not set | ||
1578 | # CONFIG_MINIX_SUBPARTITION is not set | ||
1579 | # CONFIG_SOLARIS_X86_PARTITION is not set | ||
1580 | # CONFIG_UNIXWARE_DISKLABEL is not set | ||
1581 | CONFIG_LDM_PARTITION=y | ||
1582 | CONFIG_LDM_DEBUG=y | ||
1583 | # CONFIG_SGI_PARTITION is not set | ||
1584 | # CONFIG_ULTRIX_PARTITION is not set | ||
1585 | # CONFIG_SUN_PARTITION is not set | ||
1586 | # CONFIG_KARMA_PARTITION is not set | ||
1587 | # CONFIG_EFI_PARTITION is not set | ||
1588 | # CONFIG_SYSV68_PARTITION is not set | ||
1589 | CONFIG_NLS=y | ||
1590 | CONFIG_NLS_DEFAULT="utf8" | ||
1591 | CONFIG_NLS_CODEPAGE_437=y | ||
1592 | # CONFIG_NLS_CODEPAGE_737 is not set | ||
1593 | # CONFIG_NLS_CODEPAGE_775 is not set | ||
1594 | CONFIG_NLS_CODEPAGE_850=y | ||
1595 | # CONFIG_NLS_CODEPAGE_852 is not set | ||
1596 | # CONFIG_NLS_CODEPAGE_855 is not set | ||
1597 | # CONFIG_NLS_CODEPAGE_857 is not set | ||
1598 | # CONFIG_NLS_CODEPAGE_860 is not set | ||
1599 | # CONFIG_NLS_CODEPAGE_861 is not set | ||
1600 | # CONFIG_NLS_CODEPAGE_862 is not set | ||
1601 | # CONFIG_NLS_CODEPAGE_863 is not set | ||
1602 | # CONFIG_NLS_CODEPAGE_864 is not set | ||
1603 | # CONFIG_NLS_CODEPAGE_865 is not set | ||
1604 | # CONFIG_NLS_CODEPAGE_866 is not set | ||
1605 | # CONFIG_NLS_CODEPAGE_869 is not set | ||
1606 | CONFIG_NLS_CODEPAGE_936=y | ||
1607 | # CONFIG_NLS_CODEPAGE_950 is not set | ||
1608 | # CONFIG_NLS_CODEPAGE_932 is not set | ||
1609 | # CONFIG_NLS_CODEPAGE_949 is not set | ||
1610 | # CONFIG_NLS_CODEPAGE_874 is not set | ||
1611 | # CONFIG_NLS_ISO8859_8 is not set | ||
1612 | # CONFIG_NLS_CODEPAGE_1250 is not set | ||
1613 | # CONFIG_NLS_CODEPAGE_1251 is not set | ||
1614 | CONFIG_NLS_ASCII=y | ||
1615 | CONFIG_NLS_ISO8859_1=y | ||
1616 | # CONFIG_NLS_ISO8859_2 is not set | ||
1617 | # CONFIG_NLS_ISO8859_3 is not set | ||
1618 | # CONFIG_NLS_ISO8859_4 is not set | ||
1619 | # CONFIG_NLS_ISO8859_5 is not set | ||
1620 | # CONFIG_NLS_ISO8859_6 is not set | ||
1621 | # CONFIG_NLS_ISO8859_7 is not set | ||
1622 | # CONFIG_NLS_ISO8859_9 is not set | ||
1623 | # CONFIG_NLS_ISO8859_13 is not set | ||
1624 | # CONFIG_NLS_ISO8859_14 is not set | ||
1625 | CONFIG_NLS_ISO8859_15=y | ||
1626 | # CONFIG_NLS_KOI8_R is not set | ||
1627 | # CONFIG_NLS_KOI8_U is not set | ||
1628 | CONFIG_NLS_UTF8=y | ||
1629 | # CONFIG_DLM is not set | ||
1630 | |||
1631 | # | ||
1632 | # Kernel hacking | ||
1633 | # | ||
1634 | CONFIG_TRACE_IRQFLAGS_SUPPORT=y | ||
1635 | CONFIG_PRINTK_TIME=y | ||
1636 | # CONFIG_ENABLE_WARN_DEPRECATED is not set | ||
1637 | # CONFIG_ENABLE_MUST_CHECK is not set | ||
1638 | CONFIG_FRAME_WARN=0 | ||
1639 | CONFIG_MAGIC_SYSRQ=y | ||
1640 | # CONFIG_STRIP_ASM_SYMS is not set | ||
1641 | # CONFIG_UNUSED_SYMBOLS is not set | ||
1642 | CONFIG_DEBUG_FS=y | ||
1643 | # CONFIG_HEADERS_CHECK is not set | ||
1644 | # CONFIG_DEBUG_KERNEL is not set | ||
1645 | # CONFIG_SLUB_DEBUG_ON is not set | ||
1646 | # CONFIG_SLUB_STATS is not set | ||
1647 | CONFIG_STACKTRACE=y | ||
1648 | CONFIG_DEBUG_BUGVERBOSE=y | ||
1649 | CONFIG_DEBUG_MEMORY_INIT=y | ||
1650 | CONFIG_ARCH_WANT_FRAME_POINTERS=y | ||
1651 | CONFIG_FRAME_POINTER=y | ||
1652 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set | ||
1653 | # CONFIG_LATENCYTOP is not set | ||
1654 | # CONFIG_SYSCTL_SYSCALL_CHECK is not set | ||
1655 | CONFIG_USER_STACKTRACE_SUPPORT=y | ||
1656 | CONFIG_NOP_TRACER=y | ||
1657 | CONFIG_HAVE_FTRACE_NMI_ENTER=y | ||
1658 | CONFIG_HAVE_FUNCTION_TRACER=y | ||
1659 | CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y | ||
1660 | CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST=y | ||
1661 | CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y | ||
1662 | CONFIG_HAVE_DYNAMIC_FTRACE=y | ||
1663 | CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y | ||
1664 | CONFIG_HAVE_SYSCALL_TRACEPOINTS=y | ||
1665 | CONFIG_RING_BUFFER=y | ||
1666 | CONFIG_FTRACE_NMI_ENTER=y | ||
1667 | CONFIG_EVENT_TRACING=y | ||
1668 | CONFIG_CONTEXT_SWITCH_TRACER=y | ||
1669 | CONFIG_TRACING=y | ||
1670 | CONFIG_GENERIC_TRACER=y | ||
1671 | CONFIG_TRACING_SUPPORT=y | ||
1672 | CONFIG_FTRACE=y | ||
1673 | CONFIG_FUNCTION_TRACER=y | ||
1674 | # CONFIG_FUNCTION_GRAPH_TRACER is not set | ||
1675 | # CONFIG_IRQSOFF_TRACER is not set | ||
1676 | # CONFIG_PREEMPT_TRACER is not set | ||
1677 | # CONFIG_SYSPROF_TRACER is not set | ||
1678 | # CONFIG_SCHED_TRACER is not set | ||
1679 | # CONFIG_FTRACE_SYSCALLS is not set | ||
1680 | # CONFIG_BOOT_TRACER is not set | ||
1681 | CONFIG_BRANCH_PROFILE_NONE=y | ||
1682 | # CONFIG_PROFILE_ANNOTATED_BRANCHES is not set | ||
1683 | # CONFIG_PROFILE_ALL_BRANCHES is not set | ||
1684 | # CONFIG_POWER_TRACER is not set | ||
1685 | # CONFIG_STACK_TRACER is not set | ||
1686 | # CONFIG_KMEMTRACE is not set | ||
1687 | # CONFIG_WORKQUEUE_TRACER is not set | ||
1688 | # CONFIG_BLK_DEV_IO_TRACE is not set | ||
1689 | CONFIG_DYNAMIC_FTRACE=y | ||
1690 | # CONFIG_FUNCTION_PROFILER is not set | ||
1691 | CONFIG_FTRACE_MCOUNT_RECORD=y | ||
1692 | # CONFIG_FTRACE_STARTUP_TEST is not set | ||
1693 | # CONFIG_MMIOTRACE is not set | ||
1694 | # CONFIG_RING_BUFFER_BENCHMARK is not set | ||
1695 | # CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set | ||
1696 | # CONFIG_DYNAMIC_DEBUG is not set | ||
1697 | # CONFIG_DMA_API_DEBUG is not set | ||
1698 | # CONFIG_SAMPLES is not set | ||
1699 | CONFIG_HAVE_ARCH_KGDB=y | ||
1700 | CONFIG_HAVE_ARCH_KMEMCHECK=y | ||
1701 | # CONFIG_STRICT_DEVMEM is not set | ||
1702 | CONFIG_X86_VERBOSE_BOOTUP=y | ||
1703 | CONFIG_EARLY_PRINTK=y | ||
1704 | # CONFIG_EARLY_PRINTK_DBGP is not set | ||
1705 | # CONFIG_IOMMU_STRESS is not set | ||
1706 | CONFIG_HAVE_MMIOTRACE_SUPPORT=y | ||
1707 | CONFIG_IO_DELAY_TYPE_0X80=0 | ||
1708 | CONFIG_IO_DELAY_TYPE_0XED=1 | ||
1709 | CONFIG_IO_DELAY_TYPE_UDELAY=2 | ||
1710 | CONFIG_IO_DELAY_TYPE_NONE=3 | ||
1711 | CONFIG_IO_DELAY_0X80=y | ||
1712 | # CONFIG_IO_DELAY_0XED is not set | ||
1713 | # CONFIG_IO_DELAY_UDELAY is not set | ||
1714 | # CONFIG_IO_DELAY_NONE is not set | ||
1715 | CONFIG_DEFAULT_IO_DELAY_TYPE=0 | ||
1716 | # CONFIG_OPTIMIZE_INLINING is not set | ||
1717 | |||
1718 | # | ||
1719 | # Security options | ||
1720 | # | ||
1721 | # CONFIG_KEYS is not set | ||
1722 | # CONFIG_SECURITY is not set | ||
1723 | # CONFIG_SECURITYFS is not set | ||
1724 | # CONFIG_SECURITY_FILE_CAPABILITIES is not set | ||
1725 | # CONFIG_IMA is not set | ||
1726 | CONFIG_CRYPTO=y | ||
1727 | |||
1728 | # | ||
1729 | # Crypto core or helper | ||
1730 | # | ||
1731 | CONFIG_CRYPTO_FIPS=y | ||
1732 | CONFIG_CRYPTO_ALGAPI=y | ||
1733 | CONFIG_CRYPTO_ALGAPI2=y | ||
1734 | CONFIG_CRYPTO_AEAD=y | ||
1735 | CONFIG_CRYPTO_AEAD2=y | ||
1736 | CONFIG_CRYPTO_BLKCIPHER=y | ||
1737 | CONFIG_CRYPTO_BLKCIPHER2=y | ||
1738 | CONFIG_CRYPTO_HASH=y | ||
1739 | CONFIG_CRYPTO_HASH2=y | ||
1740 | CONFIG_CRYPTO_RNG=y | ||
1741 | CONFIG_CRYPTO_RNG2=y | ||
1742 | CONFIG_CRYPTO_PCOMP=y | ||
1743 | CONFIG_CRYPTO_MANAGER=y | ||
1744 | CONFIG_CRYPTO_MANAGER2=y | ||
1745 | CONFIG_CRYPTO_GF128MUL=y | ||
1746 | CONFIG_CRYPTO_NULL=y | ||
1747 | CONFIG_CRYPTO_WORKQUEUE=y | ||
1748 | # CONFIG_CRYPTO_CRYPTD is not set | ||
1749 | CONFIG_CRYPTO_AUTHENC=y | ||
1750 | # CONFIG_CRYPTO_TEST is not set | ||
1751 | |||
1752 | # | ||
1753 | # Authenticated Encryption with Associated Data | ||
1754 | # | ||
1755 | CONFIG_CRYPTO_CCM=y | ||
1756 | CONFIG_CRYPTO_GCM=y | ||
1757 | CONFIG_CRYPTO_SEQIV=y | ||
1758 | |||
1759 | # | ||
1760 | # Block modes | ||
1761 | # | ||
1762 | CONFIG_CRYPTO_CBC=y | ||
1763 | CONFIG_CRYPTO_CTR=y | ||
1764 | CONFIG_CRYPTO_CTS=y | ||
1765 | CONFIG_CRYPTO_ECB=y | ||
1766 | # CONFIG_CRYPTO_LRW is not set | ||
1767 | CONFIG_CRYPTO_PCBC=y | ||
1768 | # CONFIG_CRYPTO_XTS is not set | ||
1769 | |||
1770 | # | ||
1771 | # Hash modes | ||
1772 | # | ||
1773 | CONFIG_CRYPTO_HMAC=y | ||
1774 | CONFIG_CRYPTO_XCBC=y | ||
1775 | # CONFIG_CRYPTO_VMAC is not set | ||
1776 | |||
1777 | # | ||
1778 | # Digest | ||
1779 | # | ||
1780 | CONFIG_CRYPTO_CRC32C=y | ||
1781 | CONFIG_CRYPTO_CRC32C_INTEL=y | ||
1782 | CONFIG_CRYPTO_GHASH=y | ||
1783 | # CONFIG_CRYPTO_MD4 is not set | ||
1784 | CONFIG_CRYPTO_MD5=y | ||
1785 | # CONFIG_CRYPTO_MICHAEL_MIC is not set | ||
1786 | CONFIG_CRYPTO_RMD128=y | ||
1787 | CONFIG_CRYPTO_RMD160=y | ||
1788 | CONFIG_CRYPTO_RMD256=y | ||
1789 | CONFIG_CRYPTO_RMD320=y | ||
1790 | CONFIG_CRYPTO_SHA1=y | ||
1791 | CONFIG_CRYPTO_SHA256=y | ||
1792 | CONFIG_CRYPTO_SHA512=y | ||
1793 | CONFIG_CRYPTO_TGR192=y | ||
1794 | CONFIG_CRYPTO_WP512=y | ||
1795 | |||
1796 | # | ||
1797 | # Ciphers | ||
1798 | # | ||
1799 | CONFIG_CRYPTO_AES=y | ||
1800 | # CONFIG_CRYPTO_AES_X86_64 is not set | ||
1801 | # CONFIG_CRYPTO_AES_NI_INTEL is not set | ||
1802 | # CONFIG_CRYPTO_ANUBIS is not set | ||
1803 | CONFIG_CRYPTO_ARC4=y | ||
1804 | # CONFIG_CRYPTO_BLOWFISH is not set | ||
1805 | # CONFIG_CRYPTO_CAMELLIA is not set | ||
1806 | CONFIG_CRYPTO_CAST5=y | ||
1807 | CONFIG_CRYPTO_CAST6=y | ||
1808 | CONFIG_CRYPTO_DES=y | ||
1809 | CONFIG_CRYPTO_FCRYPT=y | ||
1810 | # CONFIG_CRYPTO_KHAZAD is not set | ||
1811 | # CONFIG_CRYPTO_SALSA20 is not set | ||
1812 | # CONFIG_CRYPTO_SALSA20_X86_64 is not set | ||
1813 | # CONFIG_CRYPTO_SEED is not set | ||
1814 | CONFIG_CRYPTO_SERPENT=y | ||
1815 | CONFIG_CRYPTO_TEA=y | ||
1816 | CONFIG_CRYPTO_TWOFISH=y | ||
1817 | CONFIG_CRYPTO_TWOFISH_COMMON=y | ||
1818 | # CONFIG_CRYPTO_TWOFISH_X86_64 is not set | ||
1819 | |||
1820 | # | ||
1821 | # Compression | ||
1822 | # | ||
1823 | CONFIG_CRYPTO_DEFLATE=y | ||
1824 | CONFIG_CRYPTO_ZLIB=y | ||
1825 | CONFIG_CRYPTO_LZO=y | ||
1826 | |||
1827 | # | ||
1828 | # Random Number Generation | ||
1829 | # | ||
1830 | CONFIG_CRYPTO_ANSI_CPRNG=y | ||
1831 | # CONFIG_CRYPTO_HW is not set | ||
1832 | CONFIG_HAVE_KVM=y | ||
1833 | # CONFIG_VIRTUALIZATION is not set | ||
1834 | CONFIG_BINARY_PRINTF=y | ||
1835 | |||
1836 | # | ||
1837 | # Library routines | ||
1838 | # | ||
1839 | CONFIG_BITREVERSE=y | ||
1840 | CONFIG_GENERIC_FIND_FIRST_BIT=y | ||
1841 | CONFIG_GENERIC_FIND_NEXT_BIT=y | ||
1842 | CONFIG_GENERIC_FIND_LAST_BIT=y | ||
1843 | CONFIG_CRC_CCITT=y | ||
1844 | CONFIG_CRC16=y | ||
1845 | # CONFIG_CRC_T10DIF is not set | ||
1846 | CONFIG_CRC_ITU_T=y | ||
1847 | CONFIG_CRC32=y | ||
1848 | # CONFIG_CRC7 is not set | ||
1849 | CONFIG_LIBCRC32C=y | ||
1850 | CONFIG_ZLIB_INFLATE=y | ||
1851 | CONFIG_ZLIB_DEFLATE=y | ||
1852 | CONFIG_LZO_COMPRESS=y | ||
1853 | CONFIG_LZO_DECOMPRESS=y | ||
1854 | CONFIG_TEXTSEARCH=y | ||
1855 | CONFIG_TEXTSEARCH_KMP=y | ||
1856 | CONFIG_TEXTSEARCH_BM=y | ||
1857 | CONFIG_TEXTSEARCH_FSM=y | ||
1858 | CONFIG_HAS_IOMEM=y | ||
1859 | CONFIG_HAS_IOPORT=y | ||
1860 | CONFIG_HAS_DMA=y | ||
1861 | CONFIG_NLATTR=y | ||
1862 | |||
1863 | # | ||
1864 | # LITMUS^RT | ||
1865 | # | ||
1866 | |||
1867 | # | ||
1868 | # Real-Time Synchronization | ||
1869 | # | ||
1870 | CONFIG_SRP=y | ||
1871 | CONFIG_FMLP=y | ||
1872 | |||
1873 | # | ||
1874 | # Tracing | ||
1875 | # | ||
1876 | CONFIG_FEATHER_TRACE=y | ||
1877 | CONFIG_SCHED_TASK_TRACE=y | ||
1878 | CONFIG_SCHED_OVERHEAD_TRACE=y | ||
1879 | CONFIG_SCHED_DEBUG_TRACE=y | ||
diff --git a/download/2010.1/SHA256SUMS b/download/2010.1/SHA256SUMS new file mode 100644 index 0000000..6b67d5c --- /dev/null +++ b/download/2010.1/SHA256SUMS | |||
@@ -0,0 +1,4 @@ | |||
1 | 26b2aa111452e31acffbb866fd9b66058aa640220e8b7d30c103be8ed96b5751 32bit-config | ||
2 | 91fbdbd565c02cfb2f0d69f9dbdfde0b4b401fcaba04f4af24d8b6cf61046aa2 64bit-config | ||
3 | 9818b960c63df5ad694033f7a8782c555f9b90bc2507213e7d303105b1f6481a liblitmus-2010.1.tgz | ||
4 | 79753deefdfdb3f37341c95c92efc520fd4c840fc54c323c35c1f8fe65d8840e litmus-rt-2010.1.patch | ||
diff --git a/download/2010.1/liblitmus-2010.1.tgz b/download/2010.1/liblitmus-2010.1.tgz new file mode 100644 index 0000000..67c0306 --- /dev/null +++ b/download/2010.1/liblitmus-2010.1.tgz | |||
Binary files differ | |||
diff --git a/download/2010.1/litmus-rt-2010.1.patch b/download/2010.1/litmus-rt-2010.1.patch new file mode 100644 index 0000000..9df4716 --- /dev/null +++ b/download/2010.1/litmus-rt-2010.1.patch | |||
@@ -0,0 +1,38980 @@ | |||
1 | diff --git a/Makefile b/Makefile | ||
2 | index ec932b2..2603066 100644 | ||
3 | --- a/Makefile | ||
4 | +++ b/Makefile | ||
5 | @@ -1,7 +1,7 @@ | ||
6 | VERSION = 2 | ||
7 | PATCHLEVEL = 6 | ||
8 | SUBLEVEL = 32 | ||
9 | -EXTRAVERSION = .9 | ||
10 | +EXTRAVERSION =-litmus2010 | ||
11 | NAME = Man-Eating Seals of Antiquity | ||
12 | |||
13 | # *DOCUMENTATION* | ||
14 | @@ -644,7 +644,7 @@ export mod_strip_cmd | ||
15 | |||
16 | |||
17 | ifeq ($(KBUILD_EXTMOD),) | ||
18 | -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ | ||
19 | +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ litmus/ | ||
20 | |||
21 | vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \ | ||
22 | $(core-y) $(core-m) $(drivers-y) $(drivers-m) \ | ||
23 | diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c | ||
24 | index 62619f2..9a3334a 100644 | ||
25 | --- a/arch/alpha/kernel/osf_sys.c | ||
26 | +++ b/arch/alpha/kernel/osf_sys.c | ||
27 | @@ -178,18 +178,25 @@ SYSCALL_DEFINE6(osf_mmap, unsigned long, addr, unsigned long, len, | ||
28 | unsigned long, prot, unsigned long, flags, unsigned long, fd, | ||
29 | unsigned long, off) | ||
30 | { | ||
31 | - unsigned long ret = -EINVAL; | ||
32 | + struct file *file = NULL; | ||
33 | + unsigned long ret = -EBADF; | ||
34 | |||
35 | #if 0 | ||
36 | if (flags & (_MAP_HASSEMAPHORE | _MAP_INHERIT | _MAP_UNALIGNED)) | ||
37 | printk("%s: unimplemented OSF mmap flags %04lx\n", | ||
38 | current->comm, flags); | ||
39 | #endif | ||
40 | - if ((off + PAGE_ALIGN(len)) < off) | ||
41 | - goto out; | ||
42 | - if (off & ~PAGE_MASK) | ||
43 | - goto out; | ||
44 | - ret = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); | ||
45 | + if (!(flags & MAP_ANONYMOUS)) { | ||
46 | + file = fget(fd); | ||
47 | + if (!file) | ||
48 | + goto out; | ||
49 | + } | ||
50 | + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
51 | + down_write(¤t->mm->mmap_sem); | ||
52 | + ret = do_mmap(file, addr, len, prot, flags, off); | ||
53 | + up_write(¤t->mm->mmap_sem); | ||
54 | + if (file) | ||
55 | + fput(file); | ||
56 | out: | ||
57 | return ret; | ||
58 | } | ||
59 | diff --git a/arch/arm/include/asm/mman.h b/arch/arm/include/asm/mman.h | ||
60 | index 41f99c5..8eebf89 100644 | ||
61 | --- a/arch/arm/include/asm/mman.h | ||
62 | +++ b/arch/arm/include/asm/mman.h | ||
63 | @@ -1,4 +1 @@ | ||
64 | #include <asm-generic/mman.h> | ||
65 | - | ||
66 | -#define arch_mmap_check(addr, len, flags) \ | ||
67 | - (((flags) & MAP_FIXED && (addr) < FIRST_USER_ADDRESS) ? -EINVAL : 0) | ||
68 | diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S | ||
69 | index 4f07168..fafce1b 100644 | ||
70 | --- a/arch/arm/kernel/calls.S | ||
71 | +++ b/arch/arm/kernel/calls.S | ||
72 | @@ -172,7 +172,7 @@ | ||
73 | /* 160 */ CALL(sys_sched_get_priority_min) | ||
74 | CALL(sys_sched_rr_get_interval) | ||
75 | CALL(sys_nanosleep) | ||
76 | - CALL(sys_mremap) | ||
77 | + CALL(sys_arm_mremap) | ||
78 | CALL(sys_setresuid16) | ||
79 | /* 165 */ CALL(sys_getresuid16) | ||
80 | CALL(sys_ni_syscall) /* vm86 */ | ||
81 | diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S | ||
82 | index 2c1db77..f0fe95b 100644 | ||
83 | --- a/arch/arm/kernel/entry-common.S | ||
84 | +++ b/arch/arm/kernel/entry-common.S | ||
85 | @@ -416,12 +416,12 @@ sys_mmap2: | ||
86 | tst r5, #PGOFF_MASK | ||
87 | moveq r5, r5, lsr #PAGE_SHIFT - 12 | ||
88 | streq r5, [sp, #4] | ||
89 | - beq sys_mmap_pgoff | ||
90 | + beq do_mmap2 | ||
91 | mov r0, #-EINVAL | ||
92 | mov pc, lr | ||
93 | #else | ||
94 | str r5, [sp, #4] | ||
95 | - b sys_mmap_pgoff | ||
96 | + b do_mmap2 | ||
97 | #endif | ||
98 | ENDPROC(sys_mmap2) | ||
99 | |||
100 | diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c | ||
101 | index ae4027b..78ecaac 100644 | ||
102 | --- a/arch/arm/kernel/sys_arm.c | ||
103 | +++ b/arch/arm/kernel/sys_arm.c | ||
104 | @@ -28,6 +28,41 @@ | ||
105 | #include <linux/ipc.h> | ||
106 | #include <linux/uaccess.h> | ||
107 | |||
108 | +extern unsigned long do_mremap(unsigned long addr, unsigned long old_len, | ||
109 | + unsigned long new_len, unsigned long flags, | ||
110 | + unsigned long new_addr); | ||
111 | + | ||
112 | +/* common code for old and new mmaps */ | ||
113 | +inline long do_mmap2( | ||
114 | + unsigned long addr, unsigned long len, | ||
115 | + unsigned long prot, unsigned long flags, | ||
116 | + unsigned long fd, unsigned long pgoff) | ||
117 | +{ | ||
118 | + int error = -EINVAL; | ||
119 | + struct file * file = NULL; | ||
120 | + | ||
121 | + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
122 | + | ||
123 | + if (flags & MAP_FIXED && addr < FIRST_USER_ADDRESS) | ||
124 | + goto out; | ||
125 | + | ||
126 | + error = -EBADF; | ||
127 | + if (!(flags & MAP_ANONYMOUS)) { | ||
128 | + file = fget(fd); | ||
129 | + if (!file) | ||
130 | + goto out; | ||
131 | + } | ||
132 | + | ||
133 | + down_write(¤t->mm->mmap_sem); | ||
134 | + error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
135 | + up_write(¤t->mm->mmap_sem); | ||
136 | + | ||
137 | + if (file) | ||
138 | + fput(file); | ||
139 | +out: | ||
140 | + return error; | ||
141 | +} | ||
142 | + | ||
143 | struct mmap_arg_struct { | ||
144 | unsigned long addr; | ||
145 | unsigned long len; | ||
146 | @@ -49,11 +84,29 @@ asmlinkage int old_mmap(struct mmap_arg_struct __user *arg) | ||
147 | if (a.offset & ~PAGE_MASK) | ||
148 | goto out; | ||
149 | |||
150 | - error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); | ||
151 | + error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); | ||
152 | out: | ||
153 | return error; | ||
154 | } | ||
155 | |||
156 | +asmlinkage unsigned long | ||
157 | +sys_arm_mremap(unsigned long addr, unsigned long old_len, | ||
158 | + unsigned long new_len, unsigned long flags, | ||
159 | + unsigned long new_addr) | ||
160 | +{ | ||
161 | + unsigned long ret = -EINVAL; | ||
162 | + | ||
163 | + if (flags & MREMAP_FIXED && new_addr < FIRST_USER_ADDRESS) | ||
164 | + goto out; | ||
165 | + | ||
166 | + down_write(¤t->mm->mmap_sem); | ||
167 | + ret = do_mremap(addr, old_len, new_len, flags, new_addr); | ||
168 | + up_write(¤t->mm->mmap_sem); | ||
169 | + | ||
170 | +out: | ||
171 | + return ret; | ||
172 | +} | ||
173 | + | ||
174 | /* | ||
175 | * Perform the select(nd, in, out, ex, tv) and mmap() system | ||
176 | * calls. | ||
177 | diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c | ||
178 | index 36e4fb4..0976049 100644 | ||
179 | --- a/arch/arm/mach-davinci/dm646x.c | ||
180 | +++ b/arch/arm/mach-davinci/dm646x.c | ||
181 | @@ -789,14 +789,7 @@ static struct davinci_id dm646x_ids[] = { | ||
182 | .part_no = 0xb770, | ||
183 | .manufacturer = 0x017, | ||
184 | .cpu_id = DAVINCI_CPU_ID_DM6467, | ||
185 | - .name = "dm6467_rev1.x", | ||
186 | - }, | ||
187 | - { | ||
188 | - .variant = 0x1, | ||
189 | - .part_no = 0xb770, | ||
190 | - .manufacturer = 0x017, | ||
191 | - .cpu_id = DAVINCI_CPU_ID_DM6467, | ||
192 | - .name = "dm6467_rev3.x", | ||
193 | + .name = "dm6467", | ||
194 | }, | ||
195 | }; | ||
196 | |||
197 | diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c | ||
198 | index 86a8732..aec7f42 100644 | ||
199 | --- a/arch/arm/mach-pxa/em-x270.c | ||
200 | +++ b/arch/arm/mach-pxa/em-x270.c | ||
201 | @@ -497,15 +497,16 @@ static int em_x270_usb_hub_init(void) | ||
202 | goto err_free_vbus_gpio; | ||
203 | |||
204 | /* USB Hub power-on and reset */ | ||
205 | - gpio_direction_output(usb_hub_reset, 1); | ||
206 | - gpio_direction_output(GPIO9_USB_VBUS_EN, 0); | ||
207 | + gpio_direction_output(usb_hub_reset, 0); | ||
208 | regulator_enable(em_x270_usb_ldo); | ||
209 | - gpio_set_value(usb_hub_reset, 0); | ||
210 | gpio_set_value(usb_hub_reset, 1); | ||
211 | + gpio_set_value(usb_hub_reset, 0); | ||
212 | regulator_disable(em_x270_usb_ldo); | ||
213 | regulator_enable(em_x270_usb_ldo); | ||
214 | - gpio_set_value(usb_hub_reset, 0); | ||
215 | - gpio_set_value(GPIO9_USB_VBUS_EN, 1); | ||
216 | + gpio_set_value(usb_hub_reset, 1); | ||
217 | + | ||
218 | + /* enable VBUS */ | ||
219 | + gpio_direction_output(GPIO9_USB_VBUS_EN, 1); | ||
220 | |||
221 | return 0; | ||
222 | |||
223 | diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c | ||
224 | index f5abc51..2b79964 100644 | ||
225 | --- a/arch/arm/mm/mmap.c | ||
226 | +++ b/arch/arm/mm/mmap.c | ||
227 | @@ -54,8 +54,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, | ||
228 | * We enforce the MAP_FIXED case. | ||
229 | */ | ||
230 | if (flags & MAP_FIXED) { | ||
231 | - if (aliasing && flags & MAP_SHARED && | ||
232 | - (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) | ||
233 | + if (aliasing && flags & MAP_SHARED && addr & (SHMLBA - 1)) | ||
234 | return -EINVAL; | ||
235 | return addr; | ||
236 | } | ||
237 | diff --git a/arch/avr32/include/asm/syscalls.h b/arch/avr32/include/asm/syscalls.h | ||
238 | index 66a1972..483d666 100644 | ||
239 | --- a/arch/avr32/include/asm/syscalls.h | ||
240 | +++ b/arch/avr32/include/asm/syscalls.h | ||
241 | @@ -29,6 +29,10 @@ asmlinkage int sys_sigaltstack(const stack_t __user *, stack_t __user *, | ||
242 | struct pt_regs *); | ||
243 | asmlinkage int sys_rt_sigreturn(struct pt_regs *); | ||
244 | |||
245 | +/* kernel/sys_avr32.c */ | ||
246 | +asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long, | ||
247 | + unsigned long, unsigned long, off_t); | ||
248 | + | ||
249 | /* mm/cache.c */ | ||
250 | asmlinkage int sys_cacheflush(int, void __user *, size_t); | ||
251 | |||
252 | diff --git a/arch/avr32/kernel/sys_avr32.c b/arch/avr32/kernel/sys_avr32.c | ||
253 | index 459349b..5d2daea 100644 | ||
254 | --- a/arch/avr32/kernel/sys_avr32.c | ||
255 | +++ b/arch/avr32/kernel/sys_avr32.c | ||
256 | @@ -5,8 +5,39 @@ | ||
257 | * it under the terms of the GNU General Public License version 2 as | ||
258 | * published by the Free Software Foundation. | ||
259 | */ | ||
260 | +#include <linux/errno.h> | ||
261 | +#include <linux/fs.h> | ||
262 | +#include <linux/file.h> | ||
263 | +#include <linux/mm.h> | ||
264 | #include <linux/unistd.h> | ||
265 | |||
266 | +#include <asm/mman.h> | ||
267 | +#include <asm/uaccess.h> | ||
268 | +#include <asm/syscalls.h> | ||
269 | + | ||
270 | +asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, | ||
271 | + unsigned long prot, unsigned long flags, | ||
272 | + unsigned long fd, off_t offset) | ||
273 | +{ | ||
274 | + int error = -EBADF; | ||
275 | + struct file *file = NULL; | ||
276 | + | ||
277 | + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
278 | + if (!(flags & MAP_ANONYMOUS)) { | ||
279 | + file = fget(fd); | ||
280 | + if (!file) | ||
281 | + return error; | ||
282 | + } | ||
283 | + | ||
284 | + down_write(¤t->mm->mmap_sem); | ||
285 | + error = do_mmap_pgoff(file, addr, len, prot, flags, offset); | ||
286 | + up_write(¤t->mm->mmap_sem); | ||
287 | + | ||
288 | + if (file) | ||
289 | + fput(file); | ||
290 | + return error; | ||
291 | +} | ||
292 | + | ||
293 | int kernel_execve(const char *file, char **argv, char **envp) | ||
294 | { | ||
295 | register long scno asm("r8") = __NR_execve; | ||
296 | diff --git a/arch/avr32/kernel/syscall-stubs.S b/arch/avr32/kernel/syscall-stubs.S | ||
297 | index 0447a3e..f7244cd 100644 | ||
298 | --- a/arch/avr32/kernel/syscall-stubs.S | ||
299 | +++ b/arch/avr32/kernel/syscall-stubs.S | ||
300 | @@ -61,7 +61,7 @@ __sys_execve: | ||
301 | __sys_mmap2: | ||
302 | pushm lr | ||
303 | st.w --sp, ARG6 | ||
304 | - call sys_mmap_pgoff | ||
305 | + call sys_mmap2 | ||
306 | sub sp, -4 | ||
307 | popm pc | ||
308 | |||
309 | diff --git a/arch/blackfin/include/asm/page.h b/arch/blackfin/include/asm/page.h | ||
310 | index 1d04e40..944a07c 100644 | ||
311 | --- a/arch/blackfin/include/asm/page.h | ||
312 | +++ b/arch/blackfin/include/asm/page.h | ||
313 | @@ -10,9 +10,4 @@ | ||
314 | #include <asm-generic/page.h> | ||
315 | #define MAP_NR(addr) (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT) | ||
316 | |||
317 | -#define VM_DATA_DEFAULT_FLAGS \ | ||
318 | - (VM_READ | VM_WRITE | \ | ||
319 | - ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ | ||
320 | - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | ||
321 | - | ||
322 | #endif | ||
323 | diff --git a/arch/blackfin/kernel/sys_bfin.c b/arch/blackfin/kernel/sys_bfin.c | ||
324 | index 2e7f8e1..afcef12 100644 | ||
325 | --- a/arch/blackfin/kernel/sys_bfin.c | ||
326 | +++ b/arch/blackfin/kernel/sys_bfin.c | ||
327 | @@ -22,6 +22,39 @@ | ||
328 | #include <asm/cacheflush.h> | ||
329 | #include <asm/dma.h> | ||
330 | |||
331 | +/* common code for old and new mmaps */ | ||
332 | +static inline long | ||
333 | +do_mmap2(unsigned long addr, unsigned long len, | ||
334 | + unsigned long prot, unsigned long flags, | ||
335 | + unsigned long fd, unsigned long pgoff) | ||
336 | +{ | ||
337 | + int error = -EBADF; | ||
338 | + struct file *file = NULL; | ||
339 | + | ||
340 | + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
341 | + if (!(flags & MAP_ANONYMOUS)) { | ||
342 | + file = fget(fd); | ||
343 | + if (!file) | ||
344 | + goto out; | ||
345 | + } | ||
346 | + | ||
347 | + down_write(¤t->mm->mmap_sem); | ||
348 | + error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
349 | + up_write(¤t->mm->mmap_sem); | ||
350 | + | ||
351 | + if (file) | ||
352 | + fput(file); | ||
353 | + out: | ||
354 | + return error; | ||
355 | +} | ||
356 | + | ||
357 | +asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, | ||
358 | + unsigned long prot, unsigned long flags, | ||
359 | + unsigned long fd, unsigned long pgoff) | ||
360 | +{ | ||
361 | + return do_mmap2(addr, len, prot, flags, fd, pgoff); | ||
362 | +} | ||
363 | + | ||
364 | asmlinkage void *sys_sram_alloc(size_t size, unsigned long flags) | ||
365 | { | ||
366 | return sram_alloc_with_lsl(size, flags); | ||
367 | diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S | ||
368 | index 1d8f00a..94a0375 100644 | ||
369 | --- a/arch/blackfin/mach-common/entry.S | ||
370 | +++ b/arch/blackfin/mach-common/entry.S | ||
371 | @@ -1422,7 +1422,7 @@ ENTRY(_sys_call_table) | ||
372 | .long _sys_ni_syscall /* streams2 */ | ||
373 | .long _sys_vfork /* 190 */ | ||
374 | .long _sys_getrlimit | ||
375 | - .long _sys_mmap_pgoff | ||
376 | + .long _sys_mmap2 | ||
377 | .long _sys_truncate64 | ||
378 | .long _sys_ftruncate64 | ||
379 | .long _sys_stat64 /* 195 */ | ||
380 | diff --git a/arch/cris/kernel/sys_cris.c b/arch/cris/kernel/sys_cris.c | ||
381 | index c2bbb1a..2ad962c 100644 | ||
382 | --- a/arch/cris/kernel/sys_cris.c | ||
383 | +++ b/arch/cris/kernel/sys_cris.c | ||
384 | @@ -26,6 +26,31 @@ | ||
385 | #include <asm/uaccess.h> | ||
386 | #include <asm/segment.h> | ||
387 | |||
388 | +/* common code for old and new mmaps */ | ||
389 | +static inline long | ||
390 | +do_mmap2(unsigned long addr, unsigned long len, unsigned long prot, | ||
391 | + unsigned long flags, unsigned long fd, unsigned long pgoff) | ||
392 | +{ | ||
393 | + int error = -EBADF; | ||
394 | + struct file * file = NULL; | ||
395 | + | ||
396 | + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
397 | + if (!(flags & MAP_ANONYMOUS)) { | ||
398 | + file = fget(fd); | ||
399 | + if (!file) | ||
400 | + goto out; | ||
401 | + } | ||
402 | + | ||
403 | + down_write(¤t->mm->mmap_sem); | ||
404 | + error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
405 | + up_write(¤t->mm->mmap_sem); | ||
406 | + | ||
407 | + if (file) | ||
408 | + fput(file); | ||
409 | +out: | ||
410 | + return error; | ||
411 | +} | ||
412 | + | ||
413 | asmlinkage unsigned long old_mmap(unsigned long __user *args) | ||
414 | { | ||
415 | unsigned long buffer[6]; | ||
416 | @@ -38,7 +63,7 @@ asmlinkage unsigned long old_mmap(unsigned long __user *args) | ||
417 | if (buffer[5] & ~PAGE_MASK) /* verify that offset is on page boundary */ | ||
418 | goto out; | ||
419 | |||
420 | - err = sys_mmap_pgoff(buffer[0], buffer[1], buffer[2], buffer[3], | ||
421 | + err = do_mmap2(buffer[0], buffer[1], buffer[2], buffer[3], | ||
422 | buffer[4], buffer[5] >> PAGE_SHIFT); | ||
423 | out: | ||
424 | return err; | ||
425 | @@ -48,8 +73,7 @@ asmlinkage long | ||
426 | sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, | ||
427 | unsigned long flags, unsigned long fd, unsigned long pgoff) | ||
428 | { | ||
429 | - /* bug(?): 8Kb pages here */ | ||
430 | - return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); | ||
431 | + return do_mmap2(addr, len, prot, flags, fd, pgoff); | ||
432 | } | ||
433 | |||
434 | /* | ||
435 | diff --git a/arch/frv/include/asm/page.h b/arch/frv/include/asm/page.h | ||
436 | index 8c97068..25c6a50 100644 | ||
437 | --- a/arch/frv/include/asm/page.h | ||
438 | +++ b/arch/frv/include/asm/page.h | ||
439 | @@ -63,10 +63,12 @@ extern unsigned long max_pfn; | ||
440 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) | ||
441 | |||
442 | |||
443 | +#ifdef CONFIG_MMU | ||
444 | #define VM_DATA_DEFAULT_FLAGS \ | ||
445 | (VM_READ | VM_WRITE | \ | ||
446 | ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ | ||
447 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | ||
448 | +#endif | ||
449 | |||
450 | #endif /* __ASSEMBLY__ */ | ||
451 | |||
452 | diff --git a/arch/frv/kernel/sys_frv.c b/arch/frv/kernel/sys_frv.c | ||
453 | index 1d3d4c9..2b6b528 100644 | ||
454 | --- a/arch/frv/kernel/sys_frv.c | ||
455 | +++ b/arch/frv/kernel/sys_frv.c | ||
456 | @@ -31,6 +31,9 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, | ||
457 | unsigned long prot, unsigned long flags, | ||
458 | unsigned long fd, unsigned long pgoff) | ||
459 | { | ||
460 | + int error = -EBADF; | ||
461 | + struct file * file = NULL; | ||
462 | + | ||
463 | /* As with sparc32, make sure the shift for mmap2 is constant | ||
464 | (12), no matter what PAGE_SIZE we have.... */ | ||
465 | |||
466 | @@ -38,10 +41,69 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, | ||
467 | trying to map something we can't */ | ||
468 | if (pgoff & ((1 << (PAGE_SHIFT - 12)) - 1)) | ||
469 | return -EINVAL; | ||
470 | + pgoff >>= PAGE_SHIFT - 12; | ||
471 | + | ||
472 | + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
473 | + if (!(flags & MAP_ANONYMOUS)) { | ||
474 | + file = fget(fd); | ||
475 | + if (!file) | ||
476 | + goto out; | ||
477 | + } | ||
478 | + | ||
479 | + down_write(¤t->mm->mmap_sem); | ||
480 | + error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
481 | + up_write(¤t->mm->mmap_sem); | ||
482 | + | ||
483 | + if (file) | ||
484 | + fput(file); | ||
485 | +out: | ||
486 | + return error; | ||
487 | +} | ||
488 | + | ||
489 | +#if 0 /* DAVIDM - do we want this */ | ||
490 | +struct mmap_arg_struct64 { | ||
491 | + __u32 addr; | ||
492 | + __u32 len; | ||
493 | + __u32 prot; | ||
494 | + __u32 flags; | ||
495 | + __u64 offset; /* 64 bits */ | ||
496 | + __u32 fd; | ||
497 | +}; | ||
498 | + | ||
499 | +asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg) | ||
500 | +{ | ||
501 | + int error = -EFAULT; | ||
502 | + struct file * file = NULL; | ||
503 | + struct mmap_arg_struct64 a; | ||
504 | + unsigned long pgoff; | ||
505 | + | ||
506 | + if (copy_from_user(&a, arg, sizeof(a))) | ||
507 | + return -EFAULT; | ||
508 | + | ||
509 | + if ((long)a.offset & ~PAGE_MASK) | ||
510 | + return -EINVAL; | ||
511 | + | ||
512 | + pgoff = a.offset >> PAGE_SHIFT; | ||
513 | + if ((a.offset >> PAGE_SHIFT) != pgoff) | ||
514 | + return -EINVAL; | ||
515 | + | ||
516 | + if (!(a.flags & MAP_ANONYMOUS)) { | ||
517 | + error = -EBADF; | ||
518 | + file = fget(a.fd); | ||
519 | + if (!file) | ||
520 | + goto out; | ||
521 | + } | ||
522 | + a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
523 | |||
524 | - return sys_mmap_pgoff(addr, len, prot, flags, fd, | ||
525 | - pgoff >> (PAGE_SHIFT - 12)); | ||
526 | + down_write(¤t->mm->mmap_sem); | ||
527 | + error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff); | ||
528 | + up_write(¤t->mm->mmap_sem); | ||
529 | + if (file) | ||
530 | + fput(file); | ||
531 | +out: | ||
532 | + return error; | ||
533 | } | ||
534 | +#endif | ||
535 | |||
536 | /* | ||
537 | * sys_ipc() is the de-multiplexer for the SysV IPC calls.. | ||
538 | diff --git a/arch/h8300/kernel/sys_h8300.c b/arch/h8300/kernel/sys_h8300.c | ||
539 | index b5969db..8cb5d73 100644 | ||
540 | --- a/arch/h8300/kernel/sys_h8300.c | ||
541 | +++ b/arch/h8300/kernel/sys_h8300.c | ||
542 | @@ -26,6 +26,39 @@ | ||
543 | #include <asm/traps.h> | ||
544 | #include <asm/unistd.h> | ||
545 | |||
546 | +/* common code for old and new mmaps */ | ||
547 | +static inline long do_mmap2( | ||
548 | + unsigned long addr, unsigned long len, | ||
549 | + unsigned long prot, unsigned long flags, | ||
550 | + unsigned long fd, unsigned long pgoff) | ||
551 | +{ | ||
552 | + int error = -EBADF; | ||
553 | + struct file * file = NULL; | ||
554 | + | ||
555 | + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
556 | + if (!(flags & MAP_ANONYMOUS)) { | ||
557 | + file = fget(fd); | ||
558 | + if (!file) | ||
559 | + goto out; | ||
560 | + } | ||
561 | + | ||
562 | + down_write(¤t->mm->mmap_sem); | ||
563 | + error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
564 | + up_write(¤t->mm->mmap_sem); | ||
565 | + | ||
566 | + if (file) | ||
567 | + fput(file); | ||
568 | +out: | ||
569 | + return error; | ||
570 | +} | ||
571 | + | ||
572 | +asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, | ||
573 | + unsigned long prot, unsigned long flags, | ||
574 | + unsigned long fd, unsigned long pgoff) | ||
575 | +{ | ||
576 | + return do_mmap2(addr, len, prot, flags, fd, pgoff); | ||
577 | +} | ||
578 | + | ||
579 | /* | ||
580 | * Perform the select(nd, in, out, ex, tv) and mmap() system | ||
581 | * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to | ||
582 | @@ -54,11 +87,57 @@ asmlinkage int old_mmap(struct mmap_arg_struct *arg) | ||
583 | if (a.offset & ~PAGE_MASK) | ||
584 | goto out; | ||
585 | |||
586 | - error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, | ||
587 | - a.offset >> PAGE_SHIFT); | ||
588 | + a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
589 | + | ||
590 | + error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); | ||
591 | +out: | ||
592 | + return error; | ||
593 | +} | ||
594 | + | ||
595 | +#if 0 /* DAVIDM - do we want this */ | ||
596 | +struct mmap_arg_struct64 { | ||
597 | + __u32 addr; | ||
598 | + __u32 len; | ||
599 | + __u32 prot; | ||
600 | + __u32 flags; | ||
601 | + __u64 offset; /* 64 bits */ | ||
602 | + __u32 fd; | ||
603 | +}; | ||
604 | + | ||
605 | +asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg) | ||
606 | +{ | ||
607 | + int error = -EFAULT; | ||
608 | + struct file * file = NULL; | ||
609 | + struct mmap_arg_struct64 a; | ||
610 | + unsigned long pgoff; | ||
611 | + | ||
612 | + if (copy_from_user(&a, arg, sizeof(a))) | ||
613 | + return -EFAULT; | ||
614 | + | ||
615 | + if ((long)a.offset & ~PAGE_MASK) | ||
616 | + return -EINVAL; | ||
617 | + | ||
618 | + pgoff = a.offset >> PAGE_SHIFT; | ||
619 | + if ((a.offset >> PAGE_SHIFT) != pgoff) | ||
620 | + return -EINVAL; | ||
621 | + | ||
622 | + if (!(a.flags & MAP_ANONYMOUS)) { | ||
623 | + error = -EBADF; | ||
624 | + file = fget(a.fd); | ||
625 | + if (!file) | ||
626 | + goto out; | ||
627 | + } | ||
628 | + a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
629 | + | ||
630 | + down_write(¤t->mm->mmap_sem); | ||
631 | + error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff); | ||
632 | + up_write(¤t->mm->mmap_sem); | ||
633 | + if (file) | ||
634 | + fput(file); | ||
635 | out: | ||
636 | return error; | ||
637 | } | ||
638 | +#endif | ||
639 | |||
640 | struct sel_arg_struct { | ||
641 | unsigned long n; | ||
642 | diff --git a/arch/h8300/kernel/syscalls.S b/arch/h8300/kernel/syscalls.S | ||
643 | index 2d69881..4eb67fa 100644 | ||
644 | --- a/arch/h8300/kernel/syscalls.S | ||
645 | +++ b/arch/h8300/kernel/syscalls.S | ||
646 | @@ -206,7 +206,7 @@ SYMBOL_NAME_LABEL(sys_call_table) | ||
647 | .long SYMBOL_NAME(sys_ni_syscall) /* streams2 */ | ||
648 | .long SYMBOL_NAME(sys_vfork) /* 190 */ | ||
649 | .long SYMBOL_NAME(sys_getrlimit) | ||
650 | - .long SYMBOL_NAME(sys_mmap_pgoff) | ||
651 | + .long SYMBOL_NAME(sys_mmap2) | ||
652 | .long SYMBOL_NAME(sys_truncate64) | ||
653 | .long SYMBOL_NAME(sys_ftruncate64) | ||
654 | .long SYMBOL_NAME(sys_stat64) /* 195 */ | ||
655 | diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c | ||
656 | index e031ee8..625ed8f 100644 | ||
657 | --- a/arch/ia64/ia32/sys_ia32.c | ||
658 | +++ b/arch/ia64/ia32/sys_ia32.c | ||
659 | @@ -858,9 +858,6 @@ ia32_do_mmap (struct file *file, unsigned long addr, unsigned long len, int prot | ||
660 | |||
661 | prot = get_prot32(prot); | ||
662 | |||
663 | - if (flags & MAP_HUGETLB) | ||
664 | - return -ENOMEM; | ||
665 | - | ||
666 | #if PAGE_SHIFT > IA32_PAGE_SHIFT | ||
667 | mutex_lock(&ia32_mmap_mutex); | ||
668 | { | ||
669 | diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h | ||
670 | index cc8335e..0d9d16e 100644 | ||
671 | --- a/arch/ia64/include/asm/io.h | ||
672 | +++ b/arch/ia64/include/asm/io.h | ||
673 | @@ -424,8 +424,6 @@ __writeq (unsigned long val, volatile void __iomem *addr) | ||
674 | extern void __iomem * ioremap(unsigned long offset, unsigned long size); | ||
675 | extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size); | ||
676 | extern void iounmap (volatile void __iomem *addr); | ||
677 | -extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size); | ||
678 | -extern void early_iounmap (volatile void __iomem *addr, unsigned long size); | ||
679 | |||
680 | /* | ||
681 | * String version of IO memory access ops: | ||
682 | diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c | ||
683 | index 609d500..92ed83f 100644 | ||
684 | --- a/arch/ia64/kernel/sys_ia64.c | ||
685 | +++ b/arch/ia64/kernel/sys_ia64.c | ||
686 | @@ -100,7 +100,51 @@ sys_getpagesize (void) | ||
687 | asmlinkage unsigned long | ||
688 | ia64_brk (unsigned long brk) | ||
689 | { | ||
690 | - unsigned long retval = sys_brk(brk); | ||
691 | + unsigned long rlim, retval, newbrk, oldbrk; | ||
692 | + struct mm_struct *mm = current->mm; | ||
693 | + | ||
694 | + /* | ||
695 | + * Most of this replicates the code in sys_brk() except for an additional safety | ||
696 | + * check and the clearing of r8. However, we can't call sys_brk() because we need | ||
697 | + * to acquire the mmap_sem before we can do the test... | ||
698 | + */ | ||
699 | + down_write(&mm->mmap_sem); | ||
700 | + | ||
701 | + if (brk < mm->end_code) | ||
702 | + goto out; | ||
703 | + newbrk = PAGE_ALIGN(brk); | ||
704 | + oldbrk = PAGE_ALIGN(mm->brk); | ||
705 | + if (oldbrk == newbrk) | ||
706 | + goto set_brk; | ||
707 | + | ||
708 | + /* Always allow shrinking brk. */ | ||
709 | + if (brk <= mm->brk) { | ||
710 | + if (!do_munmap(mm, newbrk, oldbrk-newbrk)) | ||
711 | + goto set_brk; | ||
712 | + goto out; | ||
713 | + } | ||
714 | + | ||
715 | + /* Check against unimplemented/unmapped addresses: */ | ||
716 | + if ((newbrk - oldbrk) > RGN_MAP_LIMIT || REGION_OFFSET(newbrk) > RGN_MAP_LIMIT) | ||
717 | + goto out; | ||
718 | + | ||
719 | + /* Check against rlimit.. */ | ||
720 | + rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur; | ||
721 | + if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim) | ||
722 | + goto out; | ||
723 | + | ||
724 | + /* Check against existing mmap mappings. */ | ||
725 | + if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) | ||
726 | + goto out; | ||
727 | + | ||
728 | + /* Ok, looks good - let it rip. */ | ||
729 | + if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk) | ||
730 | + goto out; | ||
731 | +set_brk: | ||
732 | + mm->brk = brk; | ||
733 | +out: | ||
734 | + retval = mm->brk; | ||
735 | + up_write(&mm->mmap_sem); | ||
736 | force_successful_syscall_return(); | ||
737 | return retval; | ||
738 | } | ||
739 | @@ -141,6 +185,39 @@ int ia64_mmap_check(unsigned long addr, unsigned long len, | ||
740 | return 0; | ||
741 | } | ||
742 | |||
743 | +static inline unsigned long | ||
744 | +do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, unsigned long pgoff) | ||
745 | +{ | ||
746 | + struct file *file = NULL; | ||
747 | + | ||
748 | + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
749 | + if (!(flags & MAP_ANONYMOUS)) { | ||
750 | + file = fget(fd); | ||
751 | + if (!file) | ||
752 | + return -EBADF; | ||
753 | + | ||
754 | + if (!file->f_op || !file->f_op->mmap) { | ||
755 | + addr = -ENODEV; | ||
756 | + goto out; | ||
757 | + } | ||
758 | + } | ||
759 | + | ||
760 | + /* Careful about overflows.. */ | ||
761 | + len = PAGE_ALIGN(len); | ||
762 | + if (!len || len > TASK_SIZE) { | ||
763 | + addr = -EINVAL; | ||
764 | + goto out; | ||
765 | + } | ||
766 | + | ||
767 | + down_write(¤t->mm->mmap_sem); | ||
768 | + addr = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
769 | + up_write(¤t->mm->mmap_sem); | ||
770 | + | ||
771 | +out: if (file) | ||
772 | + fput(file); | ||
773 | + return addr; | ||
774 | +} | ||
775 | + | ||
776 | /* | ||
777 | * mmap2() is like mmap() except that the offset is expressed in units | ||
778 | * of PAGE_SIZE (instead of bytes). This allows to mmap2() (pieces | ||
779 | @@ -149,7 +226,7 @@ int ia64_mmap_check(unsigned long addr, unsigned long len, | ||
780 | asmlinkage unsigned long | ||
781 | sys_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, long pgoff) | ||
782 | { | ||
783 | - addr = sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); | ||
784 | + addr = do_mmap2(addr, len, prot, flags, fd, pgoff); | ||
785 | if (!IS_ERR((void *) addr)) | ||
786 | force_successful_syscall_return(); | ||
787 | return addr; | ||
788 | @@ -161,7 +238,7 @@ sys_mmap (unsigned long addr, unsigned long len, int prot, int flags, int fd, lo | ||
789 | if (offset_in_page(off) != 0) | ||
790 | return -EINVAL; | ||
791 | |||
792 | - addr = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); | ||
793 | + addr = do_mmap2(addr, len, prot, flags, fd, off >> PAGE_SHIFT); | ||
794 | if (!IS_ERR((void *) addr)) | ||
795 | force_successful_syscall_return(); | ||
796 | return addr; | ||
797 | diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c | ||
798 | index 3dccdd8..2a14062 100644 | ||
799 | --- a/arch/ia64/mm/ioremap.c | ||
800 | +++ b/arch/ia64/mm/ioremap.c | ||
801 | @@ -22,12 +22,6 @@ __ioremap (unsigned long phys_addr) | ||
802 | } | ||
803 | |||
804 | void __iomem * | ||
805 | -early_ioremap (unsigned long phys_addr, unsigned long size) | ||
806 | -{ | ||
807 | - return __ioremap(phys_addr); | ||
808 | -} | ||
809 | - | ||
810 | -void __iomem * | ||
811 | ioremap (unsigned long phys_addr, unsigned long size) | ||
812 | { | ||
813 | void __iomem *addr; | ||
814 | @@ -108,11 +102,6 @@ ioremap_nocache (unsigned long phys_addr, unsigned long size) | ||
815 | EXPORT_SYMBOL(ioremap_nocache); | ||
816 | |||
817 | void | ||
818 | -early_iounmap (volatile void __iomem *addr, unsigned long size) | ||
819 | -{ | ||
820 | -} | ||
821 | - | ||
822 | -void | ||
823 | iounmap (volatile void __iomem *addr) | ||
824 | { | ||
825 | if (REGION_NUMBER(addr) == RGN_GATE) | ||
826 | diff --git a/arch/m32r/kernel/sys_m32r.c b/arch/m32r/kernel/sys_m32r.c | ||
827 | index d3c865c..305ac85 100644 | ||
828 | --- a/arch/m32r/kernel/sys_m32r.c | ||
829 | +++ b/arch/m32r/kernel/sys_m32r.c | ||
830 | @@ -76,6 +76,30 @@ asmlinkage int sys_tas(int __user *addr) | ||
831 | return oldval; | ||
832 | } | ||
833 | |||
834 | +asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, | ||
835 | + unsigned long prot, unsigned long flags, | ||
836 | + unsigned long fd, unsigned long pgoff) | ||
837 | +{ | ||
838 | + int error = -EBADF; | ||
839 | + struct file *file = NULL; | ||
840 | + | ||
841 | + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
842 | + if (!(flags & MAP_ANONYMOUS)) { | ||
843 | + file = fget(fd); | ||
844 | + if (!file) | ||
845 | + goto out; | ||
846 | + } | ||
847 | + | ||
848 | + down_write(¤t->mm->mmap_sem); | ||
849 | + error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
850 | + up_write(¤t->mm->mmap_sem); | ||
851 | + | ||
852 | + if (file) | ||
853 | + fput(file); | ||
854 | +out: | ||
855 | + return error; | ||
856 | +} | ||
857 | + | ||
858 | /* | ||
859 | * sys_ipc() is the de-multiplexer for the SysV IPC calls.. | ||
860 | * | ||
861 | diff --git a/arch/m32r/kernel/syscall_table.S b/arch/m32r/kernel/syscall_table.S | ||
862 | index 60536e2..aa3bf4c 100644 | ||
863 | --- a/arch/m32r/kernel/syscall_table.S | ||
864 | +++ b/arch/m32r/kernel/syscall_table.S | ||
865 | @@ -191,7 +191,7 @@ ENTRY(sys_call_table) | ||
866 | .long sys_ni_syscall /* streams2 */ | ||
867 | .long sys_vfork /* 190 */ | ||
868 | .long sys_getrlimit | ||
869 | - .long sys_mmap_pgoff | ||
870 | + .long sys_mmap2 | ||
871 | .long sys_truncate64 | ||
872 | .long sys_ftruncate64 | ||
873 | .long sys_stat64 /* 195 */ | ||
874 | diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c | ||
875 | index 218f441..7deb402 100644 | ||
876 | --- a/arch/m68k/kernel/sys_m68k.c | ||
877 | +++ b/arch/m68k/kernel/sys_m68k.c | ||
878 | @@ -29,16 +29,37 @@ | ||
879 | #include <asm/page.h> | ||
880 | #include <asm/unistd.h> | ||
881 | |||
882 | +/* common code for old and new mmaps */ | ||
883 | +static inline long do_mmap2( | ||
884 | + unsigned long addr, unsigned long len, | ||
885 | + unsigned long prot, unsigned long flags, | ||
886 | + unsigned long fd, unsigned long pgoff) | ||
887 | +{ | ||
888 | + int error = -EBADF; | ||
889 | + struct file * file = NULL; | ||
890 | + | ||
891 | + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
892 | + if (!(flags & MAP_ANONYMOUS)) { | ||
893 | + file = fget(fd); | ||
894 | + if (!file) | ||
895 | + goto out; | ||
896 | + } | ||
897 | + | ||
898 | + down_write(¤t->mm->mmap_sem); | ||
899 | + error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
900 | + up_write(¤t->mm->mmap_sem); | ||
901 | + | ||
902 | + if (file) | ||
903 | + fput(file); | ||
904 | +out: | ||
905 | + return error; | ||
906 | +} | ||
907 | + | ||
908 | asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, | ||
909 | unsigned long prot, unsigned long flags, | ||
910 | unsigned long fd, unsigned long pgoff) | ||
911 | { | ||
912 | - /* | ||
913 | - * This is wrong for sun3 - there PAGE_SIZE is 8Kb, | ||
914 | - * so we need to shift the argument down by 1; m68k mmap64(3) | ||
915 | - * (in libc) expects the last argument of mmap2 in 4Kb units. | ||
916 | - */ | ||
917 | - return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); | ||
918 | + return do_mmap2(addr, len, prot, flags, fd, pgoff); | ||
919 | } | ||
920 | |||
921 | /* | ||
922 | @@ -69,11 +90,57 @@ asmlinkage int old_mmap(struct mmap_arg_struct __user *arg) | ||
923 | if (a.offset & ~PAGE_MASK) | ||
924 | goto out; | ||
925 | |||
926 | - error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, | ||
927 | - a.offset >> PAGE_SHIFT); | ||
928 | + a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
929 | + | ||
930 | + error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); | ||
931 | +out: | ||
932 | + return error; | ||
933 | +} | ||
934 | + | ||
935 | +#if 0 | ||
936 | +struct mmap_arg_struct64 { | ||
937 | + __u32 addr; | ||
938 | + __u32 len; | ||
939 | + __u32 prot; | ||
940 | + __u32 flags; | ||
941 | + __u64 offset; /* 64 bits */ | ||
942 | + __u32 fd; | ||
943 | +}; | ||
944 | + | ||
945 | +asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg) | ||
946 | +{ | ||
947 | + int error = -EFAULT; | ||
948 | + struct file * file = NULL; | ||
949 | + struct mmap_arg_struct64 a; | ||
950 | + unsigned long pgoff; | ||
951 | + | ||
952 | + if (copy_from_user(&a, arg, sizeof(a))) | ||
953 | + return -EFAULT; | ||
954 | + | ||
955 | + if ((long)a.offset & ~PAGE_MASK) | ||
956 | + return -EINVAL; | ||
957 | + | ||
958 | + pgoff = a.offset >> PAGE_SHIFT; | ||
959 | + if ((a.offset >> PAGE_SHIFT) != pgoff) | ||
960 | + return -EINVAL; | ||
961 | + | ||
962 | + if (!(a.flags & MAP_ANONYMOUS)) { | ||
963 | + error = -EBADF; | ||
964 | + file = fget(a.fd); | ||
965 | + if (!file) | ||
966 | + goto out; | ||
967 | + } | ||
968 | + a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
969 | + | ||
970 | + down_write(¤t->mm->mmap_sem); | ||
971 | + error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff); | ||
972 | + up_write(¤t->mm->mmap_sem); | ||
973 | + if (file) | ||
974 | + fput(file); | ||
975 | out: | ||
976 | return error; | ||
977 | } | ||
978 | +#endif | ||
979 | |||
980 | struct sel_arg_struct { | ||
981 | unsigned long n; | ||
982 | diff --git a/arch/m68knommu/kernel/sys_m68k.c b/arch/m68knommu/kernel/sys_m68k.c | ||
983 | index b67cbc7..efdd090 100644 | ||
984 | --- a/arch/m68knommu/kernel/sys_m68k.c | ||
985 | +++ b/arch/m68knommu/kernel/sys_m68k.c | ||
986 | @@ -27,6 +27,39 @@ | ||
987 | #include <asm/cacheflush.h> | ||
988 | #include <asm/unistd.h> | ||
989 | |||
990 | +/* common code for old and new mmaps */ | ||
991 | +static inline long do_mmap2( | ||
992 | + unsigned long addr, unsigned long len, | ||
993 | + unsigned long prot, unsigned long flags, | ||
994 | + unsigned long fd, unsigned long pgoff) | ||
995 | +{ | ||
996 | + int error = -EBADF; | ||
997 | + struct file * file = NULL; | ||
998 | + | ||
999 | + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
1000 | + if (!(flags & MAP_ANONYMOUS)) { | ||
1001 | + file = fget(fd); | ||
1002 | + if (!file) | ||
1003 | + goto out; | ||
1004 | + } | ||
1005 | + | ||
1006 | + down_write(¤t->mm->mmap_sem); | ||
1007 | + error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
1008 | + up_write(¤t->mm->mmap_sem); | ||
1009 | + | ||
1010 | + if (file) | ||
1011 | + fput(file); | ||
1012 | +out: | ||
1013 | + return error; | ||
1014 | +} | ||
1015 | + | ||
1016 | +asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, | ||
1017 | + unsigned long prot, unsigned long flags, | ||
1018 | + unsigned long fd, unsigned long pgoff) | ||
1019 | +{ | ||
1020 | + return do_mmap2(addr, len, prot, flags, fd, pgoff); | ||
1021 | +} | ||
1022 | + | ||
1023 | /* | ||
1024 | * Perform the select(nd, in, out, ex, tv) and mmap() system | ||
1025 | * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to | ||
1026 | @@ -55,8 +88,9 @@ asmlinkage int old_mmap(struct mmap_arg_struct *arg) | ||
1027 | if (a.offset & ~PAGE_MASK) | ||
1028 | goto out; | ||
1029 | |||
1030 | - error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, | ||
1031 | - a.offset >> PAGE_SHIFT); | ||
1032 | + a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
1033 | + | ||
1034 | + error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); | ||
1035 | out: | ||
1036 | return error; | ||
1037 | } | ||
1038 | diff --git a/arch/m68knommu/kernel/syscalltable.S b/arch/m68knommu/kernel/syscalltable.S | ||
1039 | index 486837e..23535cc 100644 | ||
1040 | --- a/arch/m68knommu/kernel/syscalltable.S | ||
1041 | +++ b/arch/m68knommu/kernel/syscalltable.S | ||
1042 | @@ -210,7 +210,7 @@ ENTRY(sys_call_table) | ||
1043 | .long sys_ni_syscall /* streams2 */ | ||
1044 | .long sys_vfork /* 190 */ | ||
1045 | .long sys_getrlimit | ||
1046 | - .long sys_mmap_pgoff | ||
1047 | + .long sys_mmap2 | ||
1048 | .long sys_truncate64 | ||
1049 | .long sys_ftruncate64 | ||
1050 | .long sys_stat64 /* 195 */ | ||
1051 | diff --git a/arch/microblaze/kernel/sys_microblaze.c b/arch/microblaze/kernel/sys_microblaze.c | ||
1052 | index 9f3c205..07cabed 100644 | ||
1053 | --- a/arch/microblaze/kernel/sys_microblaze.c | ||
1054 | +++ b/arch/microblaze/kernel/sys_microblaze.c | ||
1055 | @@ -62,14 +62,46 @@ out: | ||
1056 | return error; | ||
1057 | } | ||
1058 | |||
1059 | +asmlinkage long | ||
1060 | +sys_mmap2(unsigned long addr, unsigned long len, | ||
1061 | + unsigned long prot, unsigned long flags, | ||
1062 | + unsigned long fd, unsigned long pgoff) | ||
1063 | +{ | ||
1064 | + struct file *file = NULL; | ||
1065 | + int ret = -EBADF; | ||
1066 | + | ||
1067 | + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
1068 | + if (!(flags & MAP_ANONYMOUS)) { | ||
1069 | + file = fget(fd); | ||
1070 | + if (!file) { | ||
1071 | + printk(KERN_INFO "no fd in mmap\r\n"); | ||
1072 | + goto out; | ||
1073 | + } | ||
1074 | + } | ||
1075 | + | ||
1076 | + down_write(¤t->mm->mmap_sem); | ||
1077 | + ret = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
1078 | + up_write(¤t->mm->mmap_sem); | ||
1079 | + if (file) | ||
1080 | + fput(file); | ||
1081 | +out: | ||
1082 | + return ret; | ||
1083 | +} | ||
1084 | + | ||
1085 | asmlinkage long sys_mmap(unsigned long addr, unsigned long len, | ||
1086 | unsigned long prot, unsigned long flags, | ||
1087 | unsigned long fd, off_t pgoff) | ||
1088 | { | ||
1089 | - if (pgoff & ~PAGE_MASK) | ||
1090 | - return -EINVAL; | ||
1091 | + int err = -EINVAL; | ||
1092 | + | ||
1093 | + if (pgoff & ~PAGE_MASK) { | ||
1094 | + printk(KERN_INFO "no pagemask in mmap\r\n"); | ||
1095 | + goto out; | ||
1096 | + } | ||
1097 | |||
1098 | - return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT); | ||
1099 | + err = sys_mmap2(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT); | ||
1100 | +out: | ||
1101 | + return err; | ||
1102 | } | ||
1103 | |||
1104 | /* | ||
1105 | diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S | ||
1106 | index eb50ce5..ecec191 100644 | ||
1107 | --- a/arch/microblaze/kernel/syscall_table.S | ||
1108 | +++ b/arch/microblaze/kernel/syscall_table.S | ||
1109 | @@ -196,7 +196,7 @@ ENTRY(sys_call_table) | ||
1110 | .long sys_ni_syscall /* reserved for streams2 */ | ||
1111 | .long sys_vfork /* 190 */ | ||
1112 | .long sys_getrlimit | ||
1113 | - .long sys_mmap_pgoff /* mmap2 */ | ||
1114 | + .long sys_mmap2 /* mmap2 */ | ||
1115 | .long sys_truncate64 | ||
1116 | .long sys_ftruncate64 | ||
1117 | .long sys_stat64 /* 195 */ | ||
1118 | diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c | ||
1119 | index ea4a746..b77fefa 100644 | ||
1120 | --- a/arch/mips/kernel/linux32.c | ||
1121 | +++ b/arch/mips/kernel/linux32.c | ||
1122 | @@ -67,13 +67,28 @@ SYSCALL_DEFINE6(32_mmap2, unsigned long, addr, unsigned long, len, | ||
1123 | unsigned long, prot, unsigned long, flags, unsigned long, fd, | ||
1124 | unsigned long, pgoff) | ||
1125 | { | ||
1126 | + struct file * file = NULL; | ||
1127 | unsigned long error; | ||
1128 | |||
1129 | error = -EINVAL; | ||
1130 | if (pgoff & (~PAGE_MASK >> 12)) | ||
1131 | goto out; | ||
1132 | - error = sys_mmap_pgoff(addr, len, prot, flags, fd, | ||
1133 | - pgoff >> (PAGE_SHIFT-12)); | ||
1134 | + pgoff >>= PAGE_SHIFT-12; | ||
1135 | + | ||
1136 | + if (!(flags & MAP_ANONYMOUS)) { | ||
1137 | + error = -EBADF; | ||
1138 | + file = fget(fd); | ||
1139 | + if (!file) | ||
1140 | + goto out; | ||
1141 | + } | ||
1142 | + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
1143 | + | ||
1144 | + down_write(¤t->mm->mmap_sem); | ||
1145 | + error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
1146 | + up_write(¤t->mm->mmap_sem); | ||
1147 | + if (file) | ||
1148 | + fput(file); | ||
1149 | + | ||
1150 | out: | ||
1151 | return error; | ||
1152 | } | ||
1153 | diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c | ||
1154 | index 3f7f466..fe0d798 100644 | ||
1155 | --- a/arch/mips/kernel/syscall.c | ||
1156 | +++ b/arch/mips/kernel/syscall.c | ||
1157 | @@ -93,8 +93,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, | ||
1158 | * We do not accept a shared mapping if it would violate | ||
1159 | * cache aliasing constraints. | ||
1160 | */ | ||
1161 | - if ((flags & MAP_SHARED) && | ||
1162 | - ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) | ||
1163 | + if ((flags & MAP_SHARED) && (addr & shm_align_mask)) | ||
1164 | return -EINVAL; | ||
1165 | return addr; | ||
1166 | } | ||
1167 | @@ -130,6 +129,31 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, | ||
1168 | } | ||
1169 | } | ||
1170 | |||
1171 | +/* common code for old and new mmaps */ | ||
1172 | +static inline unsigned long | ||
1173 | +do_mmap2(unsigned long addr, unsigned long len, unsigned long prot, | ||
1174 | + unsigned long flags, unsigned long fd, unsigned long pgoff) | ||
1175 | +{ | ||
1176 | + unsigned long error = -EBADF; | ||
1177 | + struct file * file = NULL; | ||
1178 | + | ||
1179 | + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
1180 | + if (!(flags & MAP_ANONYMOUS)) { | ||
1181 | + file = fget(fd); | ||
1182 | + if (!file) | ||
1183 | + goto out; | ||
1184 | + } | ||
1185 | + | ||
1186 | + down_write(¤t->mm->mmap_sem); | ||
1187 | + error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
1188 | + up_write(¤t->mm->mmap_sem); | ||
1189 | + | ||
1190 | + if (file) | ||
1191 | + fput(file); | ||
1192 | +out: | ||
1193 | + return error; | ||
1194 | +} | ||
1195 | + | ||
1196 | SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len, | ||
1197 | unsigned long, prot, unsigned long, flags, unsigned long, | ||
1198 | fd, off_t, offset) | ||
1199 | @@ -140,7 +164,7 @@ SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len, | ||
1200 | if (offset & ~PAGE_MASK) | ||
1201 | goto out; | ||
1202 | |||
1203 | - result = sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); | ||
1204 | + result = do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); | ||
1205 | |||
1206 | out: | ||
1207 | return result; | ||
1208 | @@ -153,7 +177,7 @@ SYSCALL_DEFINE6(mips_mmap2, unsigned long, addr, unsigned long, len, | ||
1209 | if (pgoff & (~PAGE_MASK >> 12)) | ||
1210 | return -EINVAL; | ||
1211 | |||
1212 | - return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT-12)); | ||
1213 | + return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT-12)); | ||
1214 | } | ||
1215 | |||
1216 | save_static_function(sys_fork); | ||
1217 | diff --git a/arch/mn10300/include/asm/mman.h b/arch/mn10300/include/asm/mman.h | ||
1218 | index db5c53d..8eebf89 100644 | ||
1219 | --- a/arch/mn10300/include/asm/mman.h | ||
1220 | +++ b/arch/mn10300/include/asm/mman.h | ||
1221 | @@ -1,6 +1 @@ | ||
1222 | #include <asm-generic/mman.h> | ||
1223 | - | ||
1224 | -#define MIN_MAP_ADDR PAGE_SIZE /* minimum fixed mmap address */ | ||
1225 | - | ||
1226 | -#define arch_mmap_check(addr, len, flags) \ | ||
1227 | - (((flags) & MAP_FIXED && (addr) < MIN_MAP_ADDR) ? -EINVAL : 0) | ||
1228 | diff --git a/arch/mn10300/kernel/entry.S b/arch/mn10300/kernel/entry.S | ||
1229 | index c9ee6c0..a94e7ea 100644 | ||
1230 | --- a/arch/mn10300/kernel/entry.S | ||
1231 | +++ b/arch/mn10300/kernel/entry.S | ||
1232 | @@ -578,7 +578,7 @@ ENTRY(sys_call_table) | ||
1233 | .long sys_ni_syscall /* reserved for streams2 */ | ||
1234 | .long sys_vfork /* 190 */ | ||
1235 | .long sys_getrlimit | ||
1236 | - .long sys_mmap_pgoff | ||
1237 | + .long sys_mmap2 | ||
1238 | .long sys_truncate64 | ||
1239 | .long sys_ftruncate64 | ||
1240 | .long sys_stat64 /* 195 */ | ||
1241 | diff --git a/arch/mn10300/kernel/sys_mn10300.c b/arch/mn10300/kernel/sys_mn10300.c | ||
1242 | index 17cc6ce..8ca5af0 100644 | ||
1243 | --- a/arch/mn10300/kernel/sys_mn10300.c | ||
1244 | +++ b/arch/mn10300/kernel/sys_mn10300.c | ||
1245 | @@ -23,13 +23,47 @@ | ||
1246 | |||
1247 | #include <asm/uaccess.h> | ||
1248 | |||
1249 | +#define MIN_MAP_ADDR PAGE_SIZE /* minimum fixed mmap address */ | ||
1250 | + | ||
1251 | +/* | ||
1252 | + * memory mapping syscall | ||
1253 | + */ | ||
1254 | +asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, | ||
1255 | + unsigned long prot, unsigned long flags, | ||
1256 | + unsigned long fd, unsigned long pgoff) | ||
1257 | +{ | ||
1258 | + struct file *file = NULL; | ||
1259 | + long error = -EINVAL; | ||
1260 | + | ||
1261 | + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
1262 | + | ||
1263 | + if (flags & MAP_FIXED && addr < MIN_MAP_ADDR) | ||
1264 | + goto out; | ||
1265 | + | ||
1266 | + error = -EBADF; | ||
1267 | + if (!(flags & MAP_ANONYMOUS)) { | ||
1268 | + file = fget(fd); | ||
1269 | + if (!file) | ||
1270 | + goto out; | ||
1271 | + } | ||
1272 | + | ||
1273 | + down_write(¤t->mm->mmap_sem); | ||
1274 | + error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
1275 | + up_write(¤t->mm->mmap_sem); | ||
1276 | + | ||
1277 | + if (file) | ||
1278 | + fput(file); | ||
1279 | +out: | ||
1280 | + return error; | ||
1281 | +} | ||
1282 | + | ||
1283 | asmlinkage long old_mmap(unsigned long addr, unsigned long len, | ||
1284 | unsigned long prot, unsigned long flags, | ||
1285 | unsigned long fd, unsigned long offset) | ||
1286 | { | ||
1287 | if (offset & ~PAGE_MASK) | ||
1288 | return -EINVAL; | ||
1289 | - return sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); | ||
1290 | + return sys_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); | ||
1291 | } | ||
1292 | |||
1293 | struct sel_arg_struct { | ||
1294 | diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c | ||
1295 | index 9147391..71b3195 100644 | ||
1296 | --- a/arch/parisc/kernel/sys_parisc.c | ||
1297 | +++ b/arch/parisc/kernel/sys_parisc.c | ||
1298 | @@ -110,14 +110,37 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, | ||
1299 | return addr; | ||
1300 | } | ||
1301 | |||
1302 | +static unsigned long do_mmap2(unsigned long addr, unsigned long len, | ||
1303 | + unsigned long prot, unsigned long flags, unsigned long fd, | ||
1304 | + unsigned long pgoff) | ||
1305 | +{ | ||
1306 | + struct file * file = NULL; | ||
1307 | + unsigned long error = -EBADF; | ||
1308 | + if (!(flags & MAP_ANONYMOUS)) { | ||
1309 | + file = fget(fd); | ||
1310 | + if (!file) | ||
1311 | + goto out; | ||
1312 | + } | ||
1313 | + | ||
1314 | + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
1315 | + | ||
1316 | + down_write(¤t->mm->mmap_sem); | ||
1317 | + error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
1318 | + up_write(¤t->mm->mmap_sem); | ||
1319 | + | ||
1320 | + if (file != NULL) | ||
1321 | + fput(file); | ||
1322 | +out: | ||
1323 | + return error; | ||
1324 | +} | ||
1325 | + | ||
1326 | asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len, | ||
1327 | unsigned long prot, unsigned long flags, unsigned long fd, | ||
1328 | unsigned long pgoff) | ||
1329 | { | ||
1330 | /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE | ||
1331 | we have. */ | ||
1332 | - return sys_mmap_pgoff(addr, len, prot, flags, fd, | ||
1333 | - pgoff >> (PAGE_SHIFT - 12)); | ||
1334 | + return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT - 12)); | ||
1335 | } | ||
1336 | |||
1337 | asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len, | ||
1338 | @@ -125,8 +148,7 @@ asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len, | ||
1339 | unsigned long offset) | ||
1340 | { | ||
1341 | if (!(offset & ~PAGE_MASK)) { | ||
1342 | - return sys_mmap_pgoff(addr, len, prot, flags, fd, | ||
1343 | - offset >> PAGE_SHIFT); | ||
1344 | + return do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); | ||
1345 | } else { | ||
1346 | return -EINVAL; | ||
1347 | } | ||
1348 | diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h | ||
1349 | index 5698502..014a624 100644 | ||
1350 | --- a/arch/powerpc/include/asm/elf.h | ||
1351 | +++ b/arch/powerpc/include/asm/elf.h | ||
1352 | @@ -236,10 +236,14 @@ typedef elf_vrregset_t elf_fpxregset_t; | ||
1353 | #ifdef __powerpc64__ | ||
1354 | # define SET_PERSONALITY(ex) \ | ||
1355 | do { \ | ||
1356 | + unsigned long new_flags = 0; \ | ||
1357 | if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ | ||
1358 | - set_thread_flag(TIF_32BIT); \ | ||
1359 | + new_flags = _TIF_32BIT; \ | ||
1360 | + if ((current_thread_info()->flags & _TIF_32BIT) \ | ||
1361 | + != new_flags) \ | ||
1362 | + set_thread_flag(TIF_ABI_PENDING); \ | ||
1363 | else \ | ||
1364 | - clear_thread_flag(TIF_32BIT); \ | ||
1365 | + clear_thread_flag(TIF_ABI_PENDING); \ | ||
1366 | if (personality(current->personality) != PER_LINUX32) \ | ||
1367 | set_personality(PER_LINUX | \ | ||
1368 | (current->personality & (~PER_MASK))); \ | ||
1369 | diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h | ||
1370 | index 0192a4e..0845488 100644 | ||
1371 | --- a/arch/powerpc/include/asm/module.h | ||
1372 | +++ b/arch/powerpc/include/asm/module.h | ||
1373 | @@ -87,10 +87,5 @@ struct exception_table_entry; | ||
1374 | void sort_ex_table(struct exception_table_entry *start, | ||
1375 | struct exception_table_entry *finish); | ||
1376 | |||
1377 | -#ifdef CONFIG_MODVERSIONS | ||
1378 | -#define ARCH_RELOCATES_KCRCTAB | ||
1379 | - | ||
1380 | -extern const unsigned long reloc_start[]; | ||
1381 | -#endif | ||
1382 | #endif /* __KERNEL__ */ | ||
1383 | #endif /* _ASM_POWERPC_MODULE_H */ | ||
1384 | diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h | ||
1385 | index aa9d383..c8b3292 100644 | ||
1386 | --- a/arch/powerpc/include/asm/thread_info.h | ||
1387 | +++ b/arch/powerpc/include/asm/thread_info.h | ||
1388 | @@ -111,6 +111,7 @@ static inline struct thread_info *current_thread_info(void) | ||
1389 | #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */ | ||
1390 | #define TIF_FREEZE 14 /* Freezing for suspend */ | ||
1391 | #define TIF_RUNLATCH 15 /* Is the runlatch enabled? */ | ||
1392 | +#define TIF_ABI_PENDING 16 /* 32/64 bit switch needed */ | ||
1393 | |||
1394 | /* as above, but as bit values */ | ||
1395 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | ||
1396 | @@ -128,6 +129,7 @@ static inline struct thread_info *current_thread_info(void) | ||
1397 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) | ||
1398 | #define _TIF_FREEZE (1<<TIF_FREEZE) | ||
1399 | #define _TIF_RUNLATCH (1<<TIF_RUNLATCH) | ||
1400 | +#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) | ||
1401 | #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP) | ||
1402 | |||
1403 | #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ | ||
1404 | diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c | ||
1405 | index f0c624f..a5b632e 100644 | ||
1406 | --- a/arch/powerpc/kernel/align.c | ||
1407 | +++ b/arch/powerpc/kernel/align.c | ||
1408 | @@ -642,14 +642,10 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg, | ||
1409 | */ | ||
1410 | static int emulate_vsx(unsigned char __user *addr, unsigned int reg, | ||
1411 | unsigned int areg, struct pt_regs *regs, | ||
1412 | - unsigned int flags, unsigned int length, | ||
1413 | - unsigned int elsize) | ||
1414 | + unsigned int flags, unsigned int length) | ||
1415 | { | ||
1416 | char *ptr; | ||
1417 | - unsigned long *lptr; | ||
1418 | int ret = 0; | ||
1419 | - int sw = 0; | ||
1420 | - int i, j; | ||
1421 | |||
1422 | flush_vsx_to_thread(current); | ||
1423 | |||
1424 | @@ -658,35 +654,19 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg, | ||
1425 | else | ||
1426 | ptr = (char *) ¤t->thread.vr[reg - 32]; | ||
1427 | |||
1428 | - lptr = (unsigned long *) ptr; | ||
1429 | - | ||
1430 | - if (flags & SW) | ||
1431 | - sw = elsize-1; | ||
1432 | - | ||
1433 | - for (j = 0; j < length; j += elsize) { | ||
1434 | - for (i = 0; i < elsize; ++i) { | ||
1435 | - if (flags & ST) | ||
1436 | - ret |= __put_user(ptr[i^sw], addr + i); | ||
1437 | - else | ||
1438 | - ret |= __get_user(ptr[i^sw], addr + i); | ||
1439 | + if (flags & ST) | ||
1440 | + ret = __copy_to_user(addr, ptr, length); | ||
1441 | + else { | ||
1442 | + if (flags & SPLT){ | ||
1443 | + ret = __copy_from_user(ptr, addr, length); | ||
1444 | + ptr += length; | ||
1445 | } | ||
1446 | - ptr += elsize; | ||
1447 | - addr += elsize; | ||
1448 | + ret |= __copy_from_user(ptr, addr, length); | ||
1449 | } | ||
1450 | - | ||
1451 | - if (!ret) { | ||
1452 | - if (flags & U) | ||
1453 | - regs->gpr[areg] = regs->dar; | ||
1454 | - | ||
1455 | - /* Splat load copies the same data to top and bottom 8 bytes */ | ||
1456 | - if (flags & SPLT) | ||
1457 | - lptr[1] = lptr[0]; | ||
1458 | - /* For 8 byte loads, zero the top 8 bytes */ | ||
1459 | - else if (!(flags & ST) && (8 == length)) | ||
1460 | - lptr[1] = 0; | ||
1461 | - } else | ||
1462 | + if (flags & U) | ||
1463 | + regs->gpr[areg] = regs->dar; | ||
1464 | + if (ret) | ||
1465 | return -EFAULT; | ||
1466 | - | ||
1467 | return 1; | ||
1468 | } | ||
1469 | #endif | ||
1470 | @@ -787,25 +767,16 @@ int fix_alignment(struct pt_regs *regs) | ||
1471 | |||
1472 | #ifdef CONFIG_VSX | ||
1473 | if ((instruction & 0xfc00003e) == 0x7c000018) { | ||
1474 | - unsigned int elsize; | ||
1475 | - | ||
1476 | - /* Additional register addressing bit (64 VSX vs 32 FPR/GPR) */ | ||
1477 | + /* Additional register addressing bit (64 VSX vs 32 FPR/GPR */ | ||
1478 | reg |= (instruction & 0x1) << 5; | ||
1479 | /* Simple inline decoder instead of a table */ | ||
1480 | - /* VSX has only 8 and 16 byte memory accesses */ | ||
1481 | - nb = 8; | ||
1482 | if (instruction & 0x200) | ||
1483 | nb = 16; | ||
1484 | - | ||
1485 | - /* Vector stores in little-endian mode swap individual | ||
1486 | - elements, so process them separately */ | ||
1487 | - elsize = 4; | ||
1488 | - if (instruction & 0x80) | ||
1489 | - elsize = 8; | ||
1490 | - | ||
1491 | + else if (instruction & 0x080) | ||
1492 | + nb = 8; | ||
1493 | + else | ||
1494 | + nb = 4; | ||
1495 | flags = 0; | ||
1496 | - if (regs->msr & MSR_LE) | ||
1497 | - flags |= SW; | ||
1498 | if (instruction & 0x100) | ||
1499 | flags |= ST; | ||
1500 | if (instruction & 0x040) | ||
1501 | @@ -816,7 +787,7 @@ int fix_alignment(struct pt_regs *regs) | ||
1502 | nb = 8; | ||
1503 | } | ||
1504 | PPC_WARN_EMULATED(vsx); | ||
1505 | - return emulate_vsx(addr, reg, areg, regs, flags, nb, elsize); | ||
1506 | + return emulate_vsx(addr, reg, areg, regs, flags, nb); | ||
1507 | } | ||
1508 | #endif | ||
1509 | /* A size of 0 indicates an instruction we don't support, with | ||
1510 | diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c | ||
1511 | index cadbed6..e8dfdbd 100644 | ||
1512 | --- a/arch/powerpc/kernel/pci-common.c | ||
1513 | +++ b/arch/powerpc/kernel/pci-common.c | ||
1514 | @@ -1107,12 +1107,6 @@ void __devinit pcibios_setup_bus_devices(struct pci_bus *bus) | ||
1515 | list_for_each_entry(dev, &bus->devices, bus_list) { | ||
1516 | struct dev_archdata *sd = &dev->dev.archdata; | ||
1517 | |||
1518 | - /* Cardbus can call us to add new devices to a bus, so ignore | ||
1519 | - * those who are already fully discovered | ||
1520 | - */ | ||
1521 | - if (dev->is_added) | ||
1522 | - continue; | ||
1523 | - | ||
1524 | /* Setup OF node pointer in archdata */ | ||
1525 | sd->of_node = pci_device_to_OF_node(dev); | ||
1526 | |||
1527 | @@ -1153,13 +1147,6 @@ void __devinit pcibios_fixup_bus(struct pci_bus *bus) | ||
1528 | } | ||
1529 | EXPORT_SYMBOL(pcibios_fixup_bus); | ||
1530 | |||
1531 | -void __devinit pci_fixup_cardbus(struct pci_bus *bus) | ||
1532 | -{ | ||
1533 | - /* Now fixup devices on that bus */ | ||
1534 | - pcibios_setup_bus_devices(bus); | ||
1535 | -} | ||
1536 | - | ||
1537 | - | ||
1538 | static int skip_isa_ioresource_align(struct pci_dev *dev) | ||
1539 | { | ||
1540 | if ((ppc_pci_flags & PPC_PCI_CAN_SKIP_ISA_ALIGN) && | ||
1541 | diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c | ||
1542 | index 7b816da..c930ac3 100644 | ||
1543 | --- a/arch/powerpc/kernel/process.c | ||
1544 | +++ b/arch/powerpc/kernel/process.c | ||
1545 | @@ -554,6 +554,18 @@ void exit_thread(void) | ||
1546 | |||
1547 | void flush_thread(void) | ||
1548 | { | ||
1549 | +#ifdef CONFIG_PPC64 | ||
1550 | + struct thread_info *t = current_thread_info(); | ||
1551 | + | ||
1552 | + if (test_ti_thread_flag(t, TIF_ABI_PENDING)) { | ||
1553 | + clear_ti_thread_flag(t, TIF_ABI_PENDING); | ||
1554 | + if (test_ti_thread_flag(t, TIF_32BIT)) | ||
1555 | + clear_ti_thread_flag(t, TIF_32BIT); | ||
1556 | + else | ||
1557 | + set_ti_thread_flag(t, TIF_32BIT); | ||
1558 | + } | ||
1559 | +#endif | ||
1560 | + | ||
1561 | discard_lazy_cpu_state(); | ||
1562 | |||
1563 | if (current->thread.dabr) { | ||
1564 | diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c | ||
1565 | index 3370e62..c04832c 100644 | ||
1566 | --- a/arch/powerpc/kernel/syscalls.c | ||
1567 | +++ b/arch/powerpc/kernel/syscalls.c | ||
1568 | @@ -140,6 +140,7 @@ static inline unsigned long do_mmap2(unsigned long addr, size_t len, | ||
1569 | unsigned long prot, unsigned long flags, | ||
1570 | unsigned long fd, unsigned long off, int shift) | ||
1571 | { | ||
1572 | + struct file * file = NULL; | ||
1573 | unsigned long ret = -EINVAL; | ||
1574 | |||
1575 | if (!arch_validate_prot(prot)) | ||
1576 | @@ -150,8 +151,20 @@ static inline unsigned long do_mmap2(unsigned long addr, size_t len, | ||
1577 | goto out; | ||
1578 | off >>= shift; | ||
1579 | } | ||
1580 | + | ||
1581 | + ret = -EBADF; | ||
1582 | + if (!(flags & MAP_ANONYMOUS)) { | ||
1583 | + if (!(file = fget(fd))) | ||
1584 | + goto out; | ||
1585 | + } | ||
1586 | + | ||
1587 | + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
1588 | |||
1589 | - ret = sys_mmap_pgoff(addr, len, prot, flags, fd, off); | ||
1590 | + down_write(¤t->mm->mmap_sem); | ||
1591 | + ret = do_mmap_pgoff(file, addr, len, prot, flags, off); | ||
1592 | + up_write(¤t->mm->mmap_sem); | ||
1593 | + if (file) | ||
1594 | + fput(file); | ||
1595 | out: | ||
1596 | return ret; | ||
1597 | } | ||
1598 | diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S | ||
1599 | index fe46048..67b6916 100644 | ||
1600 | --- a/arch/powerpc/kernel/vector.S | ||
1601 | +++ b/arch/powerpc/kernel/vector.S | ||
1602 | @@ -58,7 +58,7 @@ _GLOBAL(load_up_altivec) | ||
1603 | * all 1's | ||
1604 | */ | ||
1605 | mfspr r4,SPRN_VRSAVE | ||
1606 | - cmpwi 0,r4,0 | ||
1607 | + cmpdi 0,r4,0 | ||
1608 | bne+ 1f | ||
1609 | li r4,-1 | ||
1610 | mtspr SPRN_VRSAVE,r4 | ||
1611 | diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S | ||
1612 | index dcd01c8..27735a7 100644 | ||
1613 | --- a/arch/powerpc/kernel/vmlinux.lds.S | ||
1614 | +++ b/arch/powerpc/kernel/vmlinux.lds.S | ||
1615 | @@ -38,9 +38,6 @@ jiffies = jiffies_64 + 4; | ||
1616 | #endif | ||
1617 | SECTIONS | ||
1618 | { | ||
1619 | - . = 0; | ||
1620 | - reloc_start = .; | ||
1621 | - | ||
1622 | . = KERNELBASE; | ||
1623 | |||
1624 | /* | ||
1625 | diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c | ||
1626 | index e82749b..ae88b14 100644 | ||
1627 | --- a/arch/powerpc/sysdev/fsl_pci.c | ||
1628 | +++ b/arch/powerpc/sysdev/fsl_pci.c | ||
1629 | @@ -392,22 +392,8 @@ DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8536, quirk_fsl_pcie_header); | ||
1630 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8641, quirk_fsl_pcie_header); | ||
1631 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8641D, quirk_fsl_pcie_header); | ||
1632 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_MPC8610, quirk_fsl_pcie_header); | ||
1633 | -DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1011E, quirk_fsl_pcie_header); | ||
1634 | -DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1011, quirk_fsl_pcie_header); | ||
1635 | -DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013E, quirk_fsl_pcie_header); | ||
1636 | -DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1013, quirk_fsl_pcie_header); | ||
1637 | -DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020E, quirk_fsl_pcie_header); | ||
1638 | -DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1020, quirk_fsl_pcie_header); | ||
1639 | -DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022E, quirk_fsl_pcie_header); | ||
1640 | -DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P1022, quirk_fsl_pcie_header); | ||
1641 | -DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010E, quirk_fsl_pcie_header); | ||
1642 | -DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010, quirk_fsl_pcie_header); | ||
1643 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2020E, quirk_fsl_pcie_header); | ||
1644 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2020, quirk_fsl_pcie_header); | ||
1645 | -DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4040E, quirk_fsl_pcie_header); | ||
1646 | -DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4040, quirk_fsl_pcie_header); | ||
1647 | -DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4080E, quirk_fsl_pcie_header); | ||
1648 | -DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4080, quirk_fsl_pcie_header); | ||
1649 | #endif /* CONFIG_PPC_85xx || CONFIG_PPC_86xx */ | ||
1650 | |||
1651 | #if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_MPC512x) | ||
1652 | diff --git a/arch/s390/include/asm/kvm.h b/arch/s390/include/asm/kvm.h | ||
1653 | index 82b32a1..3dfcaeb 100644 | ||
1654 | --- a/arch/s390/include/asm/kvm.h | ||
1655 | +++ b/arch/s390/include/asm/kvm.h | ||
1656 | @@ -1,5 +1,6 @@ | ||
1657 | #ifndef __LINUX_KVM_S390_H | ||
1658 | #define __LINUX_KVM_S390_H | ||
1659 | + | ||
1660 | /* | ||
1661 | * asm-s390/kvm.h - KVM s390 specific structures and definitions | ||
1662 | * | ||
1663 | @@ -14,8 +15,6 @@ | ||
1664 | */ | ||
1665 | #include <linux/types.h> | ||
1666 | |||
1667 | -#define __KVM_S390 | ||
1668 | - | ||
1669 | /* for KVM_GET_REGS and KVM_SET_REGS */ | ||
1670 | struct kvm_regs { | ||
1671 | /* general purpose regs for s390 */ | ||
1672 | diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c | ||
1673 | index 9c746c0..0debcec 100644 | ||
1674 | --- a/arch/s390/kernel/compat_linux.c | ||
1675 | +++ b/arch/s390/kernel/compat_linux.c | ||
1676 | @@ -683,6 +683,38 @@ struct mmap_arg_struct_emu31 { | ||
1677 | u32 offset; | ||
1678 | }; | ||
1679 | |||
1680 | +/* common code for old and new mmaps */ | ||
1681 | +static inline long do_mmap2( | ||
1682 | + unsigned long addr, unsigned long len, | ||
1683 | + unsigned long prot, unsigned long flags, | ||
1684 | + unsigned long fd, unsigned long pgoff) | ||
1685 | +{ | ||
1686 | + struct file * file = NULL; | ||
1687 | + unsigned long error = -EBADF; | ||
1688 | + | ||
1689 | + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
1690 | + if (!(flags & MAP_ANONYMOUS)) { | ||
1691 | + file = fget(fd); | ||
1692 | + if (!file) | ||
1693 | + goto out; | ||
1694 | + } | ||
1695 | + | ||
1696 | + down_write(¤t->mm->mmap_sem); | ||
1697 | + error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
1698 | + if (!IS_ERR((void *) error) && error + len >= 0x80000000ULL) { | ||
1699 | + /* Result is out of bounds. */ | ||
1700 | + do_munmap(current->mm, addr, len); | ||
1701 | + error = -ENOMEM; | ||
1702 | + } | ||
1703 | + up_write(¤t->mm->mmap_sem); | ||
1704 | + | ||
1705 | + if (file) | ||
1706 | + fput(file); | ||
1707 | +out: | ||
1708 | + return error; | ||
1709 | +} | ||
1710 | + | ||
1711 | + | ||
1712 | asmlinkage unsigned long | ||
1713 | old32_mmap(struct mmap_arg_struct_emu31 __user *arg) | ||
1714 | { | ||
1715 | @@ -696,8 +728,7 @@ old32_mmap(struct mmap_arg_struct_emu31 __user *arg) | ||
1716 | if (a.offset & ~PAGE_MASK) | ||
1717 | goto out; | ||
1718 | |||
1719 | - error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, | ||
1720 | - a.offset >> PAGE_SHIFT); | ||
1721 | + error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); | ||
1722 | out: | ||
1723 | return error; | ||
1724 | } | ||
1725 | @@ -710,7 +741,7 @@ sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg) | ||
1726 | |||
1727 | if (copy_from_user(&a, arg, sizeof(a))) | ||
1728 | goto out; | ||
1729 | - error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset); | ||
1730 | + error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset); | ||
1731 | out: | ||
1732 | return error; | ||
1733 | } | ||
1734 | diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S | ||
1735 | index e8ef21c..48215d1 100644 | ||
1736 | --- a/arch/s390/kernel/entry.S | ||
1737 | +++ b/arch/s390/kernel/entry.S | ||
1738 | @@ -571,7 +571,6 @@ pgm_svcper: | ||
1739 | mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID | ||
1740 | oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP | ||
1741 | TRACE_IRQS_ON | ||
1742 | - lm %r2,%r6,SP_R2(%r15) # load svc arguments | ||
1743 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | ||
1744 | b BASED(sysc_do_svc) | ||
1745 | |||
1746 | diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S | ||
1747 | index f33658f..9aff1d4 100644 | ||
1748 | --- a/arch/s390/kernel/entry64.S | ||
1749 | +++ b/arch/s390/kernel/entry64.S | ||
1750 | @@ -549,7 +549,6 @@ pgm_svcper: | ||
1751 | mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID | ||
1752 | oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP | ||
1753 | TRACE_IRQS_ON | ||
1754 | - lmg %r2,%r6,SP_R2(%r15) # load svc arguments | ||
1755 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | ||
1756 | j sysc_do_svc | ||
1757 | |||
1758 | diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S | ||
1759 | index d984a2a..6a25080 100644 | ||
1760 | --- a/arch/s390/kernel/head64.S | ||
1761 | +++ b/arch/s390/kernel/head64.S | ||
1762 | @@ -83,8 +83,6 @@ startup_continue: | ||
1763 | slr %r0,%r0 # set cpuid to zero | ||
1764 | sigp %r1,%r0,0x12 # switch to esame mode | ||
1765 | sam64 # switch to 64 bit mode | ||
1766 | - llgfr %r13,%r13 # clear high-order half of base reg | ||
1767 | - lmh %r0,%r15,.Lzero64-.LPG1(%r13) # clear high-order half | ||
1768 | lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers | ||
1769 | lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area | ||
1770 | # move IPL device to lowcore | ||
1771 | @@ -129,7 +127,6 @@ startup_continue: | ||
1772 | .L4malign:.quad 0xffffffffffc00000 | ||
1773 | .Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 | ||
1774 | .Lnop: .long 0x07000700 | ||
1775 | -.Lzero64:.fill 16,4,0x0 | ||
1776 | #ifdef CONFIG_ZFCPDUMP | ||
1777 | .Lcurrent_cpu: | ||
1778 | .long 0x0 | ||
1779 | diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c | ||
1780 | index 86a74c9..e9d94f6 100644 | ||
1781 | --- a/arch/s390/kernel/sys_s390.c | ||
1782 | +++ b/arch/s390/kernel/sys_s390.c | ||
1783 | @@ -32,6 +32,32 @@ | ||
1784 | #include <asm/uaccess.h> | ||
1785 | #include "entry.h" | ||
1786 | |||
1787 | +/* common code for old and new mmaps */ | ||
1788 | +static inline long do_mmap2( | ||
1789 | + unsigned long addr, unsigned long len, | ||
1790 | + unsigned long prot, unsigned long flags, | ||
1791 | + unsigned long fd, unsigned long pgoff) | ||
1792 | +{ | ||
1793 | + long error = -EBADF; | ||
1794 | + struct file * file = NULL; | ||
1795 | + | ||
1796 | + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
1797 | + if (!(flags & MAP_ANONYMOUS)) { | ||
1798 | + file = fget(fd); | ||
1799 | + if (!file) | ||
1800 | + goto out; | ||
1801 | + } | ||
1802 | + | ||
1803 | + down_write(¤t->mm->mmap_sem); | ||
1804 | + error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
1805 | + up_write(¤t->mm->mmap_sem); | ||
1806 | + | ||
1807 | + if (file) | ||
1808 | + fput(file); | ||
1809 | +out: | ||
1810 | + return error; | ||
1811 | +} | ||
1812 | + | ||
1813 | /* | ||
1814 | * Perform the select(nd, in, out, ex, tv) and mmap() system | ||
1815 | * calls. Linux for S/390 isn't able to handle more than 5 | ||
1816 | @@ -55,7 +81,7 @@ SYSCALL_DEFINE1(mmap2, struct mmap_arg_struct __user *, arg) | ||
1817 | |||
1818 | if (copy_from_user(&a, arg, sizeof(a))) | ||
1819 | goto out; | ||
1820 | - error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset); | ||
1821 | + error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset); | ||
1822 | out: | ||
1823 | return error; | ||
1824 | } | ||
1825 | @@ -72,7 +98,7 @@ SYSCALL_DEFINE1(s390_old_mmap, struct mmap_arg_struct __user *, arg) | ||
1826 | if (a.offset & ~PAGE_MASK) | ||
1827 | goto out; | ||
1828 | |||
1829 | - error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); | ||
1830 | + error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); | ||
1831 | out: | ||
1832 | return error; | ||
1833 | } | ||
1834 | diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c | ||
1835 | index b400964..ba9d8a7 100644 | ||
1836 | --- a/arch/s390/kvm/intercept.c | ||
1837 | +++ b/arch/s390/kvm/intercept.c | ||
1838 | @@ -213,7 +213,7 @@ static int handle_instruction_and_prog(struct kvm_vcpu *vcpu) | ||
1839 | return rc2; | ||
1840 | } | ||
1841 | |||
1842 | -static const intercept_handler_t intercept_funcs[] = { | ||
1843 | +static const intercept_handler_t intercept_funcs[0x48 >> 2] = { | ||
1844 | [0x00 >> 2] = handle_noop, | ||
1845 | [0x04 >> 2] = handle_instruction, | ||
1846 | [0x08 >> 2] = handle_prog, | ||
1847 | @@ -230,7 +230,7 @@ int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu) | ||
1848 | intercept_handler_t func; | ||
1849 | u8 code = vcpu->arch.sie_block->icptcode; | ||
1850 | |||
1851 | - if (code & 3 || (code >> 2) >= ARRAY_SIZE(intercept_funcs)) | ||
1852 | + if (code & 3 || code > 0x48) | ||
1853 | return -ENOTSUPP; | ||
1854 | func = intercept_funcs[code >> 2]; | ||
1855 | if (func) | ||
1856 | diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c | ||
1857 | index ca2d312..07ced89 100644 | ||
1858 | --- a/arch/s390/kvm/kvm-s390.c | ||
1859 | +++ b/arch/s390/kvm/kvm-s390.c | ||
1860 | @@ -116,16 +116,10 @@ long kvm_arch_dev_ioctl(struct file *filp, | ||
1861 | |||
1862 | int kvm_dev_ioctl_check_extension(long ext) | ||
1863 | { | ||
1864 | - int r; | ||
1865 | - | ||
1866 | switch (ext) { | ||
1867 | - case KVM_CAP_S390_PSW: | ||
1868 | - r = 1; | ||
1869 | - break; | ||
1870 | default: | ||
1871 | - r = 0; | ||
1872 | + return 0; | ||
1873 | } | ||
1874 | - return r; | ||
1875 | } | ||
1876 | |||
1877 | /* Section: vm related */ | ||
1878 | @@ -425,10 +419,8 @@ static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw) | ||
1879 | vcpu_load(vcpu); | ||
1880 | if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING) | ||
1881 | rc = -EBUSY; | ||
1882 | - else { | ||
1883 | - vcpu->run->psw_mask = psw.mask; | ||
1884 | - vcpu->run->psw_addr = psw.addr; | ||
1885 | - } | ||
1886 | + else | ||
1887 | + vcpu->arch.sie_block->gpsw = psw; | ||
1888 | vcpu_put(vcpu); | ||
1889 | return rc; | ||
1890 | } | ||
1891 | @@ -516,6 +508,9 @@ rerun_vcpu: | ||
1892 | |||
1893 | switch (kvm_run->exit_reason) { | ||
1894 | case KVM_EXIT_S390_SIEIC: | ||
1895 | + vcpu->arch.sie_block->gpsw.mask = kvm_run->s390_sieic.mask; | ||
1896 | + vcpu->arch.sie_block->gpsw.addr = kvm_run->s390_sieic.addr; | ||
1897 | + break; | ||
1898 | case KVM_EXIT_UNKNOWN: | ||
1899 | case KVM_EXIT_INTR: | ||
1900 | case KVM_EXIT_S390_RESET: | ||
1901 | @@ -524,9 +519,6 @@ rerun_vcpu: | ||
1902 | BUG(); | ||
1903 | } | ||
1904 | |||
1905 | - vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; | ||
1906 | - vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; | ||
1907 | - | ||
1908 | might_fault(); | ||
1909 | |||
1910 | do { | ||
1911 | @@ -546,6 +538,8 @@ rerun_vcpu: | ||
1912 | /* intercept cannot be handled in-kernel, prepare kvm-run */ | ||
1913 | kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; | ||
1914 | kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; | ||
1915 | + kvm_run->s390_sieic.mask = vcpu->arch.sie_block->gpsw.mask; | ||
1916 | + kvm_run->s390_sieic.addr = vcpu->arch.sie_block->gpsw.addr; | ||
1917 | kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; | ||
1918 | kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; | ||
1919 | rc = 0; | ||
1920 | @@ -557,9 +551,6 @@ rerun_vcpu: | ||
1921 | rc = 0; | ||
1922 | } | ||
1923 | |||
1924 | - kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; | ||
1925 | - kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; | ||
1926 | - | ||
1927 | if (vcpu->sigset_active) | ||
1928 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | ||
1929 | |||
1930 | diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c | ||
1931 | index 15ee111..40c8c67 100644 | ||
1932 | --- a/arch/s390/kvm/sigp.c | ||
1933 | +++ b/arch/s390/kvm/sigp.c | ||
1934 | @@ -188,9 +188,9 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, | ||
1935 | |||
1936 | /* make sure that the new value is valid memory */ | ||
1937 | address = address & 0x7fffe000u; | ||
1938 | - if ((copy_from_user(&tmp, (void __user *) | ||
1939 | - (address + vcpu->arch.sie_block->gmsor) , 1)) || | ||
1940 | - (copy_from_user(&tmp, (void __user *)(address + | ||
1941 | + if ((copy_from_guest(vcpu, &tmp, | ||
1942 | + (u64) (address + vcpu->arch.sie_block->gmsor) , 1)) || | ||
1943 | + (copy_from_guest(vcpu, &tmp, (u64) (address + | ||
1944 | vcpu->arch.sie_block->gmsor + PAGE_SIZE), 1))) { | ||
1945 | *reg |= SIGP_STAT_INVALID_PARAMETER; | ||
1946 | return 1; /* invalid parameter */ | ||
1947 | diff --git a/arch/score/kernel/sys_score.c b/arch/score/kernel/sys_score.c | ||
1948 | index 856ed68..0012494 100644 | ||
1949 | --- a/arch/score/kernel/sys_score.c | ||
1950 | +++ b/arch/score/kernel/sys_score.c | ||
1951 | @@ -36,16 +36,34 @@ asmlinkage long | ||
1952 | sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, | ||
1953 | unsigned long flags, unsigned long fd, unsigned long pgoff) | ||
1954 | { | ||
1955 | - return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); | ||
1956 | + int error = -EBADF; | ||
1957 | + struct file *file = NULL; | ||
1958 | + | ||
1959 | + if (pgoff & (~PAGE_MASK >> 12)) | ||
1960 | + return -EINVAL; | ||
1961 | + | ||
1962 | + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
1963 | + if (!(flags & MAP_ANONYMOUS)) { | ||
1964 | + file = fget(fd); | ||
1965 | + if (!file) | ||
1966 | + return error; | ||
1967 | + } | ||
1968 | + | ||
1969 | + down_write(¤t->mm->mmap_sem); | ||
1970 | + error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
1971 | + up_write(¤t->mm->mmap_sem); | ||
1972 | + | ||
1973 | + if (file) | ||
1974 | + fput(file); | ||
1975 | + | ||
1976 | + return error; | ||
1977 | } | ||
1978 | |||
1979 | asmlinkage long | ||
1980 | sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, | ||
1981 | - unsigned long flags, unsigned long fd, off_t offset) | ||
1982 | + unsigned long flags, unsigned long fd, off_t pgoff) | ||
1983 | { | ||
1984 | - if (unlikely(offset & ~PAGE_MASK)) | ||
1985 | - return -EINVAL; | ||
1986 | - return sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); | ||
1987 | + return sys_mmap2(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT); | ||
1988 | } | ||
1989 | |||
1990 | asmlinkage long | ||
1991 | diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h | ||
1992 | index ba64e7f..c0d359c 100644 | ||
1993 | --- a/arch/sh/include/asm/pgtable_32.h | ||
1994 | +++ b/arch/sh/include/asm/pgtable_32.h | ||
1995 | @@ -344,8 +344,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte) | ||
1996 | #define pte_special(pte) ((pte).pte_low & _PAGE_SPECIAL) | ||
1997 | |||
1998 | #ifdef CONFIG_X2TLB | ||
1999 | -#define pte_write(pte) \ | ||
2000 | - ((pte).pte_high & (_PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE)) | ||
2001 | +#define pte_write(pte) ((pte).pte_high & _PAGE_EXT_USER_WRITE) | ||
2002 | #else | ||
2003 | #define pte_write(pte) ((pte).pte_low & _PAGE_RW) | ||
2004 | #endif | ||
2005 | @@ -359,7 +358,7 @@ static inline pte_t pte_##fn(pte_t pte) { pte.pte_##h op; return pte; } | ||
2006 | * individually toggled (and user permissions are entirely decoupled from | ||
2007 | * kernel permissions), we attempt to couple them a bit more sanely here. | ||
2008 | */ | ||
2009 | -PTE_BIT_FUNC(high, wrprotect, &= ~(_PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE)); | ||
2010 | +PTE_BIT_FUNC(high, wrprotect, &= ~_PAGE_EXT_USER_WRITE); | ||
2011 | PTE_BIT_FUNC(high, mkwrite, |= _PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE); | ||
2012 | PTE_BIT_FUNC(high, mkhuge, |= _PAGE_SZHUGE); | ||
2013 | #else | ||
2014 | diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c | ||
2015 | index 44aa119..1192398 100644 | ||
2016 | --- a/arch/sh/kernel/process_64.c | ||
2017 | +++ b/arch/sh/kernel/process_64.c | ||
2018 | @@ -367,7 +367,7 @@ void exit_thread(void) | ||
2019 | void flush_thread(void) | ||
2020 | { | ||
2021 | |||
2022 | - /* Called by fs/exec.c (setup_new_exec) to remove traces of a | ||
2023 | + /* Called by fs/exec.c (flush_old_exec) to remove traces of a | ||
2024 | * previously running executable. */ | ||
2025 | #ifdef CONFIG_SH_FPU | ||
2026 | if (last_task_used_math == current) { | ||
2027 | diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c | ||
2028 | index 71399cd..8aa5d1c 100644 | ||
2029 | --- a/arch/sh/kernel/sys_sh.c | ||
2030 | +++ b/arch/sh/kernel/sys_sh.c | ||
2031 | @@ -28,13 +28,37 @@ | ||
2032 | #include <asm/cacheflush.h> | ||
2033 | #include <asm/cachectl.h> | ||
2034 | |||
2035 | +static inline long | ||
2036 | +do_mmap2(unsigned long addr, unsigned long len, unsigned long prot, | ||
2037 | + unsigned long flags, int fd, unsigned long pgoff) | ||
2038 | +{ | ||
2039 | + int error = -EBADF; | ||
2040 | + struct file *file = NULL; | ||
2041 | + | ||
2042 | + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
2043 | + if (!(flags & MAP_ANONYMOUS)) { | ||
2044 | + file = fget(fd); | ||
2045 | + if (!file) | ||
2046 | + goto out; | ||
2047 | + } | ||
2048 | + | ||
2049 | + down_write(¤t->mm->mmap_sem); | ||
2050 | + error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
2051 | + up_write(¤t->mm->mmap_sem); | ||
2052 | + | ||
2053 | + if (file) | ||
2054 | + fput(file); | ||
2055 | +out: | ||
2056 | + return error; | ||
2057 | +} | ||
2058 | + | ||
2059 | asmlinkage int old_mmap(unsigned long addr, unsigned long len, | ||
2060 | unsigned long prot, unsigned long flags, | ||
2061 | int fd, unsigned long off) | ||
2062 | { | ||
2063 | if (off & ~PAGE_MASK) | ||
2064 | return -EINVAL; | ||
2065 | - return sys_mmap_pgoff(addr, len, prot, flags, fd, off>>PAGE_SHIFT); | ||
2066 | + return do_mmap2(addr, len, prot, flags, fd, off>>PAGE_SHIFT); | ||
2067 | } | ||
2068 | |||
2069 | asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, | ||
2070 | @@ -50,7 +74,7 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, | ||
2071 | |||
2072 | pgoff >>= PAGE_SHIFT - 12; | ||
2073 | |||
2074 | - return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); | ||
2075 | + return do_mmap2(addr, len, prot, flags, fd, pgoff); | ||
2076 | } | ||
2077 | |||
2078 | /* | ||
2079 | diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c | ||
2080 | index afeb710..d2984fa 100644 | ||
2081 | --- a/arch/sh/mm/mmap.c | ||
2082 | +++ b/arch/sh/mm/mmap.c | ||
2083 | @@ -54,8 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, | ||
2084 | /* We do not accept a shared mapping if it would violate | ||
2085 | * cache aliasing constraints. | ||
2086 | */ | ||
2087 | - if ((flags & MAP_SHARED) && | ||
2088 | - ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) | ||
2089 | + if ((flags & MAP_SHARED) && (addr & shm_align_mask)) | ||
2090 | return -EINVAL; | ||
2091 | return addr; | ||
2092 | } | ||
2093 | diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile | ||
2094 | index 113225b..dfe272d 100644 | ||
2095 | --- a/arch/sparc/Makefile | ||
2096 | +++ b/arch/sparc/Makefile | ||
2097 | @@ -27,7 +27,6 @@ AS := $(AS) -32 | ||
2098 | LDFLAGS := -m elf32_sparc | ||
2099 | CHECKFLAGS += -D__sparc__ | ||
2100 | export BITS := 32 | ||
2101 | -UTS_MACHINE := sparc | ||
2102 | |||
2103 | #KBUILD_CFLAGS += -g -pipe -fcall-used-g5 -fcall-used-g7 | ||
2104 | KBUILD_CFLAGS += -m32 -pipe -mno-fpu -fcall-used-g5 -fcall-used-g7 | ||
2105 | @@ -47,7 +46,6 @@ CHECKFLAGS += -D__sparc__ -D__sparc_v9__ -D__arch64__ -m64 | ||
2106 | |||
2107 | LDFLAGS := -m elf64_sparc | ||
2108 | export BITS := 64 | ||
2109 | -UTS_MACHINE := sparc64 | ||
2110 | |||
2111 | KBUILD_CFLAGS += -m64 -pipe -mno-fpu -mcpu=ultrasparc -mcmodel=medlow \ | ||
2112 | -ffixed-g4 -ffixed-g5 -fcall-used-g7 -Wno-sign-compare \ | ||
2113 | diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h | ||
2114 | index 9968085..d42e393 100644 | ||
2115 | --- a/arch/sparc/include/asm/elf_64.h | ||
2116 | +++ b/arch/sparc/include/asm/elf_64.h | ||
2117 | @@ -196,10 +196,17 @@ static inline unsigned int sparc64_elf_hwcap(void) | ||
2118 | #define ELF_PLATFORM (NULL) | ||
2119 | |||
2120 | #define SET_PERSONALITY(ex) \ | ||
2121 | -do { if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ | ||
2122 | - set_thread_flag(TIF_32BIT); \ | ||
2123 | +do { unsigned long new_flags = current_thread_info()->flags; \ | ||
2124 | + new_flags &= _TIF_32BIT; \ | ||
2125 | + if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ | ||
2126 | + new_flags |= _TIF_32BIT; \ | ||
2127 | else \ | ||
2128 | - clear_thread_flag(TIF_32BIT); \ | ||
2129 | + new_flags &= ~_TIF_32BIT; \ | ||
2130 | + if ((current_thread_info()->flags & _TIF_32BIT) \ | ||
2131 | + != new_flags) \ | ||
2132 | + set_thread_flag(TIF_ABI_PENDING); \ | ||
2133 | + else \ | ||
2134 | + clear_thread_flag(TIF_ABI_PENDING); \ | ||
2135 | /* flush_thread will update pgd cache */ \ | ||
2136 | if (personality(current->personality) != PER_LINUX32) \ | ||
2137 | set_personality(PER_LINUX | \ | ||
2138 | diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h | ||
2139 | index f78ad9a..1b45a7b 100644 | ||
2140 | --- a/arch/sparc/include/asm/thread_info_64.h | ||
2141 | +++ b/arch/sparc/include/asm/thread_info_64.h | ||
2142 | @@ -227,11 +227,12 @@ register struct thread_info *current_thread_info_reg asm("g6"); | ||
2143 | /* flag bit 8 is available */ | ||
2144 | #define TIF_SECCOMP 9 /* secure computing */ | ||
2145 | #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */ | ||
2146 | +/* flag bit 11 is available */ | ||
2147 | /* NOTE: Thread flags >= 12 should be ones we have no interest | ||
2148 | * in using in assembly, else we can't use the mask as | ||
2149 | * an immediate value in instructions such as andcc. | ||
2150 | */ | ||
2151 | -/* flag bit 12 is available */ | ||
2152 | +#define TIF_ABI_PENDING 12 | ||
2153 | #define TIF_MEMDIE 13 | ||
2154 | #define TIF_POLLING_NRFLAG 14 | ||
2155 | #define TIF_FREEZE 15 /* is freezing for suspend */ | ||
2156 | @@ -245,6 +246,7 @@ register struct thread_info *current_thread_info_reg asm("g6"); | ||
2157 | #define _TIF_32BIT (1<<TIF_32BIT) | ||
2158 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) | ||
2159 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) | ||
2160 | +#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) | ||
2161 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) | ||
2162 | #define _TIF_FREEZE (1<<TIF_FREEZE) | ||
2163 | |||
2164 | diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c | ||
2165 | index e0ba898..cb3c72c 100644 | ||
2166 | --- a/arch/sparc/kernel/ldc.c | ||
2167 | +++ b/arch/sparc/kernel/ldc.c | ||
2168 | @@ -1242,13 +1242,13 @@ int ldc_bind(struct ldc_channel *lp, const char *name) | ||
2169 | snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name); | ||
2170 | |||
2171 | err = request_irq(lp->cfg.rx_irq, ldc_rx, | ||
2172 | - IRQF_SAMPLE_RANDOM | IRQF_DISABLED, | ||
2173 | + IRQF_SAMPLE_RANDOM | IRQF_DISABLED | IRQF_SHARED, | ||
2174 | lp->rx_irq_name, lp); | ||
2175 | if (err) | ||
2176 | return err; | ||
2177 | |||
2178 | err = request_irq(lp->cfg.tx_irq, ldc_tx, | ||
2179 | - IRQF_SAMPLE_RANDOM | IRQF_DISABLED, | ||
2180 | + IRQF_SAMPLE_RANDOM | IRQF_DISABLED | IRQF_SHARED, | ||
2181 | lp->tx_irq_name, lp); | ||
2182 | if (err) { | ||
2183 | free_irq(lp->cfg.rx_irq, lp); | ||
2184 | diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c | ||
2185 | index 4771274..b129611 100644 | ||
2186 | --- a/arch/sparc/kernel/nmi.c | ||
2187 | +++ b/arch/sparc/kernel/nmi.c | ||
2188 | @@ -96,6 +96,7 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) | ||
2189 | int cpu = smp_processor_id(); | ||
2190 | |||
2191 | clear_softint(1 << irq); | ||
2192 | + pcr_ops->write(PCR_PIC_PRIV); | ||
2193 | |||
2194 | local_cpu_data().__nmi_count++; | ||
2195 | |||
2196 | @@ -104,8 +105,6 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) | ||
2197 | if (notify_die(DIE_NMI, "nmi", regs, 0, | ||
2198 | pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) | ||
2199 | touched = 1; | ||
2200 | - else | ||
2201 | - pcr_ops->write(PCR_PIC_PRIV); | ||
2202 | |||
2203 | sum = kstat_irqs_cpu(0, cpu); | ||
2204 | if (__get_cpu_var(nmi_touch)) { | ||
2205 | diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c | ||
2206 | index 0a6f2d1..881947e 100644 | ||
2207 | --- a/arch/sparc/kernel/of_device_64.c | ||
2208 | +++ b/arch/sparc/kernel/of_device_64.c | ||
2209 | @@ -104,19 +104,9 @@ static int of_bus_pci_map(u32 *addr, const u32 *range, | ||
2210 | int i; | ||
2211 | |||
2212 | /* Check address type match */ | ||
2213 | - if (!((addr[0] ^ range[0]) & 0x03000000)) | ||
2214 | - goto type_match; | ||
2215 | - | ||
2216 | - /* Special exception, we can map a 64-bit address into | ||
2217 | - * a 32-bit range. | ||
2218 | - */ | ||
2219 | - if ((addr[0] & 0x03000000) == 0x03000000 && | ||
2220 | - (range[0] & 0x03000000) == 0x02000000) | ||
2221 | - goto type_match; | ||
2222 | - | ||
2223 | - return -EINVAL; | ||
2224 | + if ((addr[0] ^ range[0]) & 0x03000000) | ||
2225 | + return -EINVAL; | ||
2226 | |||
2227 | -type_match: | ||
2228 | if (of_out_of_range(addr + 1, range + 1, range + na + pna, | ||
2229 | na - 1, ns)) | ||
2230 | return -EINVAL; | ||
2231 | diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c | ||
2232 | index 198fb4e..fa5936e 100644 | ||
2233 | --- a/arch/sparc/kernel/perf_event.c | ||
2234 | +++ b/arch/sparc/kernel/perf_event.c | ||
2235 | @@ -986,17 +986,6 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self, | ||
2236 | data.addr = 0; | ||
2237 | |||
2238 | cpuc = &__get_cpu_var(cpu_hw_events); | ||
2239 | - | ||
2240 | - /* If the PMU has the TOE IRQ enable bits, we need to do a | ||
2241 | - * dummy write to the %pcr to clear the overflow bits and thus | ||
2242 | - * the interrupt. | ||
2243 | - * | ||
2244 | - * Do this before we peek at the counters to determine | ||
2245 | - * overflow so we don't lose any events. | ||
2246 | - */ | ||
2247 | - if (sparc_pmu->irq_bit) | ||
2248 | - pcr_ops->write(cpuc->pcr); | ||
2249 | - | ||
2250 | for (idx = 0; idx < MAX_HWEVENTS; idx++) { | ||
2251 | struct perf_event *event = cpuc->events[idx]; | ||
2252 | struct hw_perf_event *hwc; | ||
2253 | diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c | ||
2254 | index c3f1cce..18d6785 100644 | ||
2255 | --- a/arch/sparc/kernel/process_64.c | ||
2256 | +++ b/arch/sparc/kernel/process_64.c | ||
2257 | @@ -365,6 +365,14 @@ void flush_thread(void) | ||
2258 | struct thread_info *t = current_thread_info(); | ||
2259 | struct mm_struct *mm; | ||
2260 | |||
2261 | + if (test_ti_thread_flag(t, TIF_ABI_PENDING)) { | ||
2262 | + clear_ti_thread_flag(t, TIF_ABI_PENDING); | ||
2263 | + if (test_ti_thread_flag(t, TIF_32BIT)) | ||
2264 | + clear_ti_thread_flag(t, TIF_32BIT); | ||
2265 | + else | ||
2266 | + set_ti_thread_flag(t, TIF_32BIT); | ||
2267 | + } | ||
2268 | + | ||
2269 | mm = t->task->mm; | ||
2270 | if (mm) | ||
2271 | tsb_context_switch(mm); | ||
2272 | diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c | ||
2273 | index 3a82e65..03035c8 100644 | ||
2274 | --- a/arch/sparc/kernel/sys_sparc_32.c | ||
2275 | +++ b/arch/sparc/kernel/sys_sparc_32.c | ||
2276 | @@ -45,8 +45,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi | ||
2277 | /* We do not accept a shared mapping if it would violate | ||
2278 | * cache aliasing constraints. | ||
2279 | */ | ||
2280 | - if ((flags & MAP_SHARED) && | ||
2281 | - ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) | ||
2282 | + if ((flags & MAP_SHARED) && (addr & (SHMLBA - 1))) | ||
2283 | return -EINVAL; | ||
2284 | return addr; | ||
2285 | } | ||
2286 | @@ -80,6 +79,15 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi | ||
2287 | } | ||
2288 | } | ||
2289 | |||
2290 | +asmlinkage unsigned long sparc_brk(unsigned long brk) | ||
2291 | +{ | ||
2292 | + if(ARCH_SUN4C) { | ||
2293 | + if ((brk & 0xe0000000) != (current->mm->brk & 0xe0000000)) | ||
2294 | + return current->mm->brk; | ||
2295 | + } | ||
2296 | + return sys_brk(brk); | ||
2297 | +} | ||
2298 | + | ||
2299 | /* | ||
2300 | * sys_pipe() is the normal C calling standard for creating | ||
2301 | * a pipe. It's not the way unix traditionally does this, though. | ||
2302 | @@ -226,6 +234,31 @@ int sparc_mmap_check(unsigned long addr, unsigned long len) | ||
2303 | } | ||
2304 | |||
2305 | /* Linux version of mmap */ | ||
2306 | +static unsigned long do_mmap2(unsigned long addr, unsigned long len, | ||
2307 | + unsigned long prot, unsigned long flags, unsigned long fd, | ||
2308 | + unsigned long pgoff) | ||
2309 | +{ | ||
2310 | + struct file * file = NULL; | ||
2311 | + unsigned long retval = -EBADF; | ||
2312 | + | ||
2313 | + if (!(flags & MAP_ANONYMOUS)) { | ||
2314 | + file = fget(fd); | ||
2315 | + if (!file) | ||
2316 | + goto out; | ||
2317 | + } | ||
2318 | + | ||
2319 | + len = PAGE_ALIGN(len); | ||
2320 | + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
2321 | + | ||
2322 | + down_write(¤t->mm->mmap_sem); | ||
2323 | + retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
2324 | + up_write(¤t->mm->mmap_sem); | ||
2325 | + | ||
2326 | + if (file) | ||
2327 | + fput(file); | ||
2328 | +out: | ||
2329 | + return retval; | ||
2330 | +} | ||
2331 | |||
2332 | asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len, | ||
2333 | unsigned long prot, unsigned long flags, unsigned long fd, | ||
2334 | @@ -233,16 +266,14 @@ asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len, | ||
2335 | { | ||
2336 | /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE | ||
2337 | we have. */ | ||
2338 | - return sys_mmap_pgoff(addr, len, prot, flags, fd, | ||
2339 | - pgoff >> (PAGE_SHIFT - 12)); | ||
2340 | + return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT - 12)); | ||
2341 | } | ||
2342 | |||
2343 | asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len, | ||
2344 | unsigned long prot, unsigned long flags, unsigned long fd, | ||
2345 | unsigned long off) | ||
2346 | { | ||
2347 | - /* no alignment check? */ | ||
2348 | - return sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); | ||
2349 | + return do_mmap2(addr, len, prot, flags, fd, off >> PAGE_SHIFT); | ||
2350 | } | ||
2351 | |||
2352 | long sparc_remap_file_pages(unsigned long start, unsigned long size, | ||
2353 | @@ -256,6 +287,27 @@ long sparc_remap_file_pages(unsigned long start, unsigned long size, | ||
2354 | (pgoff >> (PAGE_SHIFT - 12)), flags); | ||
2355 | } | ||
2356 | |||
2357 | +extern unsigned long do_mremap(unsigned long addr, | ||
2358 | + unsigned long old_len, unsigned long new_len, | ||
2359 | + unsigned long flags, unsigned long new_addr); | ||
2360 | + | ||
2361 | +asmlinkage unsigned long sparc_mremap(unsigned long addr, | ||
2362 | + unsigned long old_len, unsigned long new_len, | ||
2363 | + unsigned long flags, unsigned long new_addr) | ||
2364 | +{ | ||
2365 | + unsigned long ret = -EINVAL; | ||
2366 | + | ||
2367 | + if (unlikely(sparc_mmap_check(addr, old_len))) | ||
2368 | + goto out; | ||
2369 | + if (unlikely(sparc_mmap_check(new_addr, new_len))) | ||
2370 | + goto out; | ||
2371 | + down_write(¤t->mm->mmap_sem); | ||
2372 | + ret = do_mremap(addr, old_len, new_len, flags, new_addr); | ||
2373 | + up_write(¤t->mm->mmap_sem); | ||
2374 | +out: | ||
2375 | + return ret; | ||
2376 | +} | ||
2377 | + | ||
2378 | /* we come to here via sys_nis_syscall so it can setup the regs argument */ | ||
2379 | asmlinkage unsigned long | ||
2380 | c_sys_nis_syscall (struct pt_regs *regs) | ||
2381 | diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c | ||
2382 | index cfa0e19..e2d1024 100644 | ||
2383 | --- a/arch/sparc/kernel/sys_sparc_64.c | ||
2384 | +++ b/arch/sparc/kernel/sys_sparc_64.c | ||
2385 | @@ -317,14 +317,10 @@ bottomup: | ||
2386 | unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags) | ||
2387 | { | ||
2388 | unsigned long align_goal, addr = -ENOMEM; | ||
2389 | - unsigned long (*get_area)(struct file *, unsigned long, | ||
2390 | - unsigned long, unsigned long, unsigned long); | ||
2391 | - | ||
2392 | - get_area = current->mm->get_unmapped_area; | ||
2393 | |||
2394 | if (flags & MAP_FIXED) { | ||
2395 | /* Ok, don't mess with it. */ | ||
2396 | - return get_area(NULL, orig_addr, len, pgoff, flags); | ||
2397 | + return get_unmapped_area(NULL, orig_addr, len, pgoff, flags); | ||
2398 | } | ||
2399 | flags &= ~MAP_SHARED; | ||
2400 | |||
2401 | @@ -337,7 +333,7 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u | ||
2402 | align_goal = (64UL * 1024); | ||
2403 | |||
2404 | do { | ||
2405 | - addr = get_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags); | ||
2406 | + addr = get_unmapped_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags); | ||
2407 | if (!(addr & ~PAGE_MASK)) { | ||
2408 | addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL); | ||
2409 | break; | ||
2410 | @@ -355,7 +351,7 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u | ||
2411 | * be obtained. | ||
2412 | */ | ||
2413 | if (addr & ~PAGE_MASK) | ||
2414 | - addr = get_area(NULL, orig_addr, len, pgoff, flags); | ||
2415 | + addr = get_unmapped_area(NULL, orig_addr, len, pgoff, flags); | ||
2416 | |||
2417 | return addr; | ||
2418 | } | ||
2419 | @@ -403,6 +399,18 @@ void arch_pick_mmap_layout(struct mm_struct *mm) | ||
2420 | } | ||
2421 | } | ||
2422 | |||
2423 | +SYSCALL_DEFINE1(sparc_brk, unsigned long, brk) | ||
2424 | +{ | ||
2425 | + /* People could try to be nasty and use ta 0x6d in 32bit programs */ | ||
2426 | + if (test_thread_flag(TIF_32BIT) && brk >= STACK_TOP32) | ||
2427 | + return current->mm->brk; | ||
2428 | + | ||
2429 | + if (unlikely(straddles_64bit_va_hole(current->mm->brk, brk))) | ||
2430 | + return current->mm->brk; | ||
2431 | + | ||
2432 | + return sys_brk(brk); | ||
2433 | +} | ||
2434 | + | ||
2435 | /* | ||
2436 | * sys_pipe() is the normal C calling standard for creating | ||
2437 | * a pipe. It's not the way unix traditionally does this, though. | ||
2438 | @@ -560,13 +568,23 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, | ||
2439 | unsigned long, prot, unsigned long, flags, unsigned long, fd, | ||
2440 | unsigned long, off) | ||
2441 | { | ||
2442 | - unsigned long retval = -EINVAL; | ||
2443 | + struct file * file = NULL; | ||
2444 | + unsigned long retval = -EBADF; | ||
2445 | |||
2446 | - if ((off + PAGE_ALIGN(len)) < off) | ||
2447 | - goto out; | ||
2448 | - if (off & ~PAGE_MASK) | ||
2449 | - goto out; | ||
2450 | - retval = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); | ||
2451 | + if (!(flags & MAP_ANONYMOUS)) { | ||
2452 | + file = fget(fd); | ||
2453 | + if (!file) | ||
2454 | + goto out; | ||
2455 | + } | ||
2456 | + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
2457 | + len = PAGE_ALIGN(len); | ||
2458 | + | ||
2459 | + down_write(¤t->mm->mmap_sem); | ||
2460 | + retval = do_mmap(file, addr, len, prot, flags, off); | ||
2461 | + up_write(¤t->mm->mmap_sem); | ||
2462 | + | ||
2463 | + if (file) | ||
2464 | + fput(file); | ||
2465 | out: | ||
2466 | return retval; | ||
2467 | } | ||
2468 | @@ -596,6 +614,12 @@ SYSCALL_DEFINE5(64_mremap, unsigned long, addr, unsigned long, old_len, | ||
2469 | |||
2470 | if (test_thread_flag(TIF_32BIT)) | ||
2471 | goto out; | ||
2472 | + if (unlikely(new_len >= VA_EXCLUDE_START)) | ||
2473 | + goto out; | ||
2474 | + if (unlikely(sparc_mmap_check(addr, old_len))) | ||
2475 | + goto out; | ||
2476 | + if (unlikely(sparc_mmap_check(new_addr, new_len))) | ||
2477 | + goto out; | ||
2478 | |||
2479 | down_write(¤t->mm->mmap_sem); | ||
2480 | ret = do_mremap(addr, old_len, new_len, flags, new_addr); | ||
2481 | diff --git a/arch/sparc/kernel/systbls.h b/arch/sparc/kernel/systbls.h | ||
2482 | index d2f999a..a63c5d2 100644 | ||
2483 | --- a/arch/sparc/kernel/systbls.h | ||
2484 | +++ b/arch/sparc/kernel/systbls.h | ||
2485 | @@ -9,6 +9,7 @@ | ||
2486 | struct new_utsname; | ||
2487 | |||
2488 | extern asmlinkage unsigned long sys_getpagesize(void); | ||
2489 | +extern asmlinkage unsigned long sparc_brk(unsigned long brk); | ||
2490 | extern asmlinkage long sparc_pipe(struct pt_regs *regs); | ||
2491 | extern asmlinkage long sys_ipc(unsigned int call, int first, | ||
2492 | unsigned long second, | ||
2493 | diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S | ||
2494 | index 14f950a..0f1658d 100644 | ||
2495 | --- a/arch/sparc/kernel/systbls_32.S | ||
2496 | +++ b/arch/sparc/kernel/systbls_32.S | ||
2497 | @@ -19,7 +19,7 @@ sys_call_table: | ||
2498 | /*0*/ .long sys_restart_syscall, sys_exit, sys_fork, sys_read, sys_write | ||
2499 | /*5*/ .long sys_open, sys_close, sys_wait4, sys_creat, sys_link | ||
2500 | /*10*/ .long sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys_mknod | ||
2501 | -/*15*/ .long sys_chmod, sys_lchown16, sys_brk, sys_nis_syscall, sys_lseek | ||
2502 | +/*15*/ .long sys_chmod, sys_lchown16, sparc_brk, sys_nis_syscall, sys_lseek | ||
2503 | /*20*/ .long sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16 | ||
2504 | /*25*/ .long sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_pause | ||
2505 | /*30*/ .long sys_utime, sys_lchown, sys_fchown, sys_access, sys_nice | ||
2506 | @@ -67,7 +67,7 @@ sys_call_table: | ||
2507 | /*235*/ .long sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall | ||
2508 | /*240*/ .long sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler | ||
2509 | /*245*/ .long sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep | ||
2510 | -/*250*/ .long sys_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl | ||
2511 | +/*250*/ .long sparc_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl | ||
2512 | /*255*/ .long sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep | ||
2513 | /*260*/ .long sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun | ||
2514 | /*265*/ .long sys_timer_delete, sys_timer_create, sys_nis_syscall, sys_io_setup, sys_io_destroy | ||
2515 | diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S | ||
2516 | index f63c871..009825f 100644 | ||
2517 | --- a/arch/sparc/kernel/systbls_64.S | ||
2518 | +++ b/arch/sparc/kernel/systbls_64.S | ||
2519 | @@ -21,7 +21,7 @@ sys_call_table32: | ||
2520 | /*0*/ .word sys_restart_syscall, sys32_exit, sys_fork, sys_read, sys_write | ||
2521 | /*5*/ .word sys32_open, sys_close, sys32_wait4, sys32_creat, sys_link | ||
2522 | /*10*/ .word sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys32_mknod | ||
2523 | -/*15*/ .word sys_chmod, sys_lchown16, sys_brk, sys32_perfctr, sys32_lseek | ||
2524 | +/*15*/ .word sys_chmod, sys_lchown16, sys_sparc_brk, sys32_perfctr, sys32_lseek | ||
2525 | /*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16 | ||
2526 | /*25*/ .word sys32_vmsplice, compat_sys_ptrace, sys_alarm, sys32_sigaltstack, sys_pause | ||
2527 | /*30*/ .word compat_sys_utime, sys_lchown, sys_fchown, sys32_access, sys32_nice | ||
2528 | @@ -96,7 +96,7 @@ sys_call_table: | ||
2529 | /*0*/ .word sys_restart_syscall, sparc_exit, sys_fork, sys_read, sys_write | ||
2530 | /*5*/ .word sys_open, sys_close, sys_wait4, sys_creat, sys_link | ||
2531 | /*10*/ .word sys_unlink, sys_nis_syscall, sys_chdir, sys_chown, sys_mknod | ||
2532 | -/*15*/ .word sys_chmod, sys_lchown, sys_brk, sys_perfctr, sys_lseek | ||
2533 | +/*15*/ .word sys_chmod, sys_lchown, sys_sparc_brk, sys_perfctr, sys_lseek | ||
2534 | /*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid, sys_getuid | ||
2535 | /*25*/ .word sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_nis_syscall | ||
2536 | /*30*/ .word sys_utime, sys_nis_syscall, sys_nis_syscall, sys_access, sys_nice | ||
2537 | diff --git a/arch/sparc/lib/mcount.S b/arch/sparc/lib/mcount.S | ||
2538 | index 24b8b12..7ce9c65 100644 | ||
2539 | --- a/arch/sparc/lib/mcount.S | ||
2540 | +++ b/arch/sparc/lib/mcount.S | ||
2541 | @@ -64,9 +64,8 @@ mcount: | ||
2542 | 2: sethi %hi(softirq_stack), %g3 | ||
2543 | or %g3, %lo(softirq_stack), %g3 | ||
2544 | ldx [%g3 + %g1], %g7 | ||
2545 | - sub %g7, STACK_BIAS, %g7 | ||
2546 | cmp %sp, %g7 | ||
2547 | - bleu,pt %xcc, 3f | ||
2548 | + bleu,pt %xcc, 2f | ||
2549 | sethi %hi(THREAD_SIZE), %g3 | ||
2550 | add %g7, %g3, %g7 | ||
2551 | cmp %sp, %g7 | ||
2552 | @@ -76,7 +75,7 @@ mcount: | ||
2553 | * again, we are already trying to output the stack overflow | ||
2554 | * message. | ||
2555 | */ | ||
2556 | -3: sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough | ||
2557 | + sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough | ||
2558 | or %g7, %lo(ovstack), %g7 | ||
2559 | add %g7, OVSTACKSIZE, %g3 | ||
2560 | sub %g3, STACK_BIAS + 192, %g3 | ||
2561 | diff --git a/arch/um/kernel/syscall.c b/arch/um/kernel/syscall.c | ||
2562 | index cccab85..a4625c7 100644 | ||
2563 | --- a/arch/um/kernel/syscall.c | ||
2564 | +++ b/arch/um/kernel/syscall.c | ||
2565 | @@ -8,7 +8,6 @@ | ||
2566 | #include "linux/mm.h" | ||
2567 | #include "linux/sched.h" | ||
2568 | #include "linux/utsname.h" | ||
2569 | -#include "linux/syscalls.h" | ||
2570 | #include "asm/current.h" | ||
2571 | #include "asm/mman.h" | ||
2572 | #include "asm/uaccess.h" | ||
2573 | @@ -38,6 +37,31 @@ long sys_vfork(void) | ||
2574 | return ret; | ||
2575 | } | ||
2576 | |||
2577 | +/* common code for old and new mmaps */ | ||
2578 | +long sys_mmap2(unsigned long addr, unsigned long len, | ||
2579 | + unsigned long prot, unsigned long flags, | ||
2580 | + unsigned long fd, unsigned long pgoff) | ||
2581 | +{ | ||
2582 | + long error = -EBADF; | ||
2583 | + struct file * file = NULL; | ||
2584 | + | ||
2585 | + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
2586 | + if (!(flags & MAP_ANONYMOUS)) { | ||
2587 | + file = fget(fd); | ||
2588 | + if (!file) | ||
2589 | + goto out; | ||
2590 | + } | ||
2591 | + | ||
2592 | + down_write(¤t->mm->mmap_sem); | ||
2593 | + error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
2594 | + up_write(¤t->mm->mmap_sem); | ||
2595 | + | ||
2596 | + if (file) | ||
2597 | + fput(file); | ||
2598 | + out: | ||
2599 | + return error; | ||
2600 | +} | ||
2601 | + | ||
2602 | long old_mmap(unsigned long addr, unsigned long len, | ||
2603 | unsigned long prot, unsigned long flags, | ||
2604 | unsigned long fd, unsigned long offset) | ||
2605 | @@ -46,7 +70,7 @@ long old_mmap(unsigned long addr, unsigned long len, | ||
2606 | if (offset & ~PAGE_MASK) | ||
2607 | goto out; | ||
2608 | |||
2609 | - err = sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); | ||
2610 | + err = sys_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); | ||
2611 | out: | ||
2612 | return err; | ||
2613 | } | ||
2614 | diff --git a/arch/um/sys-i386/shared/sysdep/syscalls.h b/arch/um/sys-i386/shared/sysdep/syscalls.h | ||
2615 | index e778767..9056981 100644 | ||
2616 | --- a/arch/um/sys-i386/shared/sysdep/syscalls.h | ||
2617 | +++ b/arch/um/sys-i386/shared/sysdep/syscalls.h | ||
2618 | @@ -20,3 +20,7 @@ extern syscall_handler_t *sys_call_table[]; | ||
2619 | #define EXECUTE_SYSCALL(syscall, regs) \ | ||
2620 | ((long (*)(struct syscall_args)) \ | ||
2621 | (*sys_call_table[syscall]))(SYSCALL_ARGS(®s->regs)) | ||
2622 | + | ||
2623 | +extern long sys_mmap2(unsigned long addr, unsigned long len, | ||
2624 | + unsigned long prot, unsigned long flags, | ||
2625 | + unsigned long fd, unsigned long pgoff); | ||
2626 | diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig | ||
2627 | index 4fdb669..e2cd95e 100644 | ||
2628 | --- a/arch/x86/Kconfig | ||
2629 | +++ b/arch/x86/Kconfig | ||
2630 | @@ -984,6 +984,12 @@ config X86_CPUID | ||
2631 | with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to | ||
2632 | /dev/cpu/31/cpuid. | ||
2633 | |||
2634 | +config X86_CPU_DEBUG | ||
2635 | + tristate "/sys/kernel/debug/x86/cpu/* - CPU Debug support" | ||
2636 | + ---help--- | ||
2637 | + If you select this option, this will provide various x86 CPUs | ||
2638 | + information through debugfs. | ||
2639 | + | ||
2640 | choice | ||
2641 | prompt "High Memory Support" | ||
2642 | default HIGHMEM4G if !X86_NUMAQ | ||
2643 | @@ -2086,3 +2092,5 @@ source "crypto/Kconfig" | ||
2644 | source "arch/x86/kvm/Kconfig" | ||
2645 | |||
2646 | source "lib/Kconfig" | ||
2647 | + | ||
2648 | +source "litmus/Kconfig" | ||
2649 | diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu | ||
2650 | index f2824fb..2649840 100644 | ||
2651 | --- a/arch/x86/Kconfig.cpu | ||
2652 | +++ b/arch/x86/Kconfig.cpu | ||
2653 | @@ -400,7 +400,7 @@ config X86_TSC | ||
2654 | |||
2655 | config X86_CMPXCHG64 | ||
2656 | def_bool y | ||
2657 | - depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM | ||
2658 | + depends on !M386 && !M486 | ||
2659 | |||
2660 | # this should be set for all -march=.. options where the compiler | ||
2661 | # generates cmov. | ||
2662 | diff --git a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu | ||
2663 | index 1937226..30e9a26 100644 | ||
2664 | --- a/arch/x86/Makefile_32.cpu | ||
2665 | +++ b/arch/x86/Makefile_32.cpu | ||
2666 | @@ -46,13 +46,6 @@ cflags-$(CONFIG_MGEODEGX1) += -march=pentium-mmx | ||
2667 | # cpu entries | ||
2668 | cflags-$(CONFIG_X86_GENERIC) += $(call tune,generic,$(call tune,i686)) | ||
2669 | |||
2670 | -# Work around the pentium-mmx code generator madness of gcc4.4.x which | ||
2671 | -# does stack alignment by generating horrible code _before_ the mcount | ||
2672 | -# prologue (push %ebp, mov %esp, %ebp) which breaks the function graph | ||
2673 | -# tracer assumptions. For i686, generic, core2 this is set by the | ||
2674 | -# compiler anyway | ||
2675 | -cflags-$(CONFIG_FUNCTION_GRAPH_TRACER) += $(call cc-option,-maccumulate-outgoing-args) | ||
2676 | - | ||
2677 | # Bug fix for binutils: this option is required in order to keep | ||
2678 | # binutils from generating NOPL instructions against our will. | ||
2679 | ifneq ($(CONFIG_X86_P6_NOP),y) | ||
2680 | diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c | ||
2681 | index f9f4724..2a4d073 100644 | ||
2682 | --- a/arch/x86/ia32/ia32_aout.c | ||
2683 | +++ b/arch/x86/ia32/ia32_aout.c | ||
2684 | @@ -308,16 +308,15 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs) | ||
2685 | if (retval) | ||
2686 | return retval; | ||
2687 | |||
2688 | - /* OK, This is the point of no return */ | ||
2689 | - set_personality(PER_LINUX); | ||
2690 | - set_thread_flag(TIF_IA32); | ||
2691 | - | ||
2692 | - setup_new_exec(bprm); | ||
2693 | - | ||
2694 | regs->cs = __USER32_CS; | ||
2695 | regs->r8 = regs->r9 = regs->r10 = regs->r11 = regs->r12 = | ||
2696 | regs->r13 = regs->r14 = regs->r15 = 0; | ||
2697 | |||
2698 | + /* OK, This is the point of no return */ | ||
2699 | + set_personality(PER_LINUX); | ||
2700 | + set_thread_flag(TIF_IA32); | ||
2701 | + clear_thread_flag(TIF_ABI_PENDING); | ||
2702 | + | ||
2703 | current->mm->end_code = ex.a_text + | ||
2704 | (current->mm->start_code = N_TXTADDR(ex)); | ||
2705 | current->mm->end_data = ex.a_data + | ||
2706 | diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S | ||
2707 | index 5294d84..581b056 100644 | ||
2708 | --- a/arch/x86/ia32/ia32entry.S | ||
2709 | +++ b/arch/x86/ia32/ia32entry.S | ||
2710 | @@ -696,7 +696,7 @@ ia32_sys_call_table: | ||
2711 | .quad quiet_ni_syscall /* streams2 */ | ||
2712 | .quad stub32_vfork /* 190 */ | ||
2713 | .quad compat_sys_getrlimit | ||
2714 | - .quad sys_mmap_pgoff | ||
2715 | + .quad sys32_mmap2 | ||
2716 | .quad sys32_truncate64 | ||
2717 | .quad sys32_ftruncate64 | ||
2718 | .quad sys32_stat64 /* 195 */ | ||
2719 | diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c | ||
2720 | index 016218c..9f55271 100644 | ||
2721 | --- a/arch/x86/ia32/sys_ia32.c | ||
2722 | +++ b/arch/x86/ia32/sys_ia32.c | ||
2723 | @@ -155,6 +155,9 @@ struct mmap_arg_struct { | ||
2724 | asmlinkage long sys32_mmap(struct mmap_arg_struct __user *arg) | ||
2725 | { | ||
2726 | struct mmap_arg_struct a; | ||
2727 | + struct file *file = NULL; | ||
2728 | + unsigned long retval; | ||
2729 | + struct mm_struct *mm ; | ||
2730 | |||
2731 | if (copy_from_user(&a, arg, sizeof(a))) | ||
2732 | return -EFAULT; | ||
2733 | @@ -162,8 +165,22 @@ asmlinkage long sys32_mmap(struct mmap_arg_struct __user *arg) | ||
2734 | if (a.offset & ~PAGE_MASK) | ||
2735 | return -EINVAL; | ||
2736 | |||
2737 | - return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, | ||
2738 | + if (!(a.flags & MAP_ANONYMOUS)) { | ||
2739 | + file = fget(a.fd); | ||
2740 | + if (!file) | ||
2741 | + return -EBADF; | ||
2742 | + } | ||
2743 | + | ||
2744 | + mm = current->mm; | ||
2745 | + down_write(&mm->mmap_sem); | ||
2746 | + retval = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, | ||
2747 | a.offset>>PAGE_SHIFT); | ||
2748 | + if (file) | ||
2749 | + fput(file); | ||
2750 | + | ||
2751 | + up_write(&mm->mmap_sem); | ||
2752 | + | ||
2753 | + return retval; | ||
2754 | } | ||
2755 | |||
2756 | asmlinkage long sys32_mprotect(unsigned long start, size_t len, | ||
2757 | @@ -522,6 +539,30 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd, | ||
2758 | return ret; | ||
2759 | } | ||
2760 | |||
2761 | +asmlinkage long sys32_mmap2(unsigned long addr, unsigned long len, | ||
2762 | + unsigned long prot, unsigned long flags, | ||
2763 | + unsigned long fd, unsigned long pgoff) | ||
2764 | +{ | ||
2765 | + struct mm_struct *mm = current->mm; | ||
2766 | + unsigned long error; | ||
2767 | + struct file *file = NULL; | ||
2768 | + | ||
2769 | + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
2770 | + if (!(flags & MAP_ANONYMOUS)) { | ||
2771 | + file = fget(fd); | ||
2772 | + if (!file) | ||
2773 | + return -EBADF; | ||
2774 | + } | ||
2775 | + | ||
2776 | + down_write(&mm->mmap_sem); | ||
2777 | + error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
2778 | + up_write(&mm->mmap_sem); | ||
2779 | + | ||
2780 | + if (file) | ||
2781 | + fput(file); | ||
2782 | + return error; | ||
2783 | +} | ||
2784 | + | ||
2785 | asmlinkage long sys32_olduname(struct oldold_utsname __user *name) | ||
2786 | { | ||
2787 | char *arch = "x86_64"; | ||
2788 | diff --git a/arch/x86/include/asm/amd_iommu.h b/arch/x86/include/asm/amd_iommu.h | ||
2789 | index 18aa3f8..4b18089 100644 | ||
2790 | --- a/arch/x86/include/asm/amd_iommu.h | ||
2791 | +++ b/arch/x86/include/asm/amd_iommu.h | ||
2792 | @@ -32,7 +32,6 @@ extern void amd_iommu_flush_all_domains(void); | ||
2793 | extern void amd_iommu_flush_all_devices(void); | ||
2794 | extern void amd_iommu_shutdown(void); | ||
2795 | extern void amd_iommu_apply_erratum_63(u16 devid); | ||
2796 | -extern void amd_iommu_init_api(void); | ||
2797 | #else | ||
2798 | static inline int amd_iommu_init(void) { return -ENODEV; } | ||
2799 | static inline void amd_iommu_detect(void) { } | ||
2800 | diff --git a/arch/x86/include/asm/cpu_debug.h b/arch/x86/include/asm/cpu_debug.h | ||
2801 | new file mode 100644 | ||
2802 | index 0000000..d96c1ee | ||
2803 | --- /dev/null | ||
2804 | +++ b/arch/x86/include/asm/cpu_debug.h | ||
2805 | @@ -0,0 +1,127 @@ | ||
2806 | +#ifndef _ASM_X86_CPU_DEBUG_H | ||
2807 | +#define _ASM_X86_CPU_DEBUG_H | ||
2808 | + | ||
2809 | +/* | ||
2810 | + * CPU x86 architecture debug | ||
2811 | + * | ||
2812 | + * Copyright(C) 2009 Jaswinder Singh Rajput | ||
2813 | + */ | ||
2814 | + | ||
2815 | +/* Register flags */ | ||
2816 | +enum cpu_debug_bit { | ||
2817 | +/* Model Specific Registers (MSRs) */ | ||
2818 | + CPU_MC_BIT, /* Machine Check */ | ||
2819 | + CPU_MONITOR_BIT, /* Monitor */ | ||
2820 | + CPU_TIME_BIT, /* Time */ | ||
2821 | + CPU_PMC_BIT, /* Performance Monitor */ | ||
2822 | + CPU_PLATFORM_BIT, /* Platform */ | ||
2823 | + CPU_APIC_BIT, /* APIC */ | ||
2824 | + CPU_POWERON_BIT, /* Power-on */ | ||
2825 | + CPU_CONTROL_BIT, /* Control */ | ||
2826 | + CPU_FEATURES_BIT, /* Features control */ | ||
2827 | + CPU_LBRANCH_BIT, /* Last Branch */ | ||
2828 | + CPU_BIOS_BIT, /* BIOS */ | ||
2829 | + CPU_FREQ_BIT, /* Frequency */ | ||
2830 | + CPU_MTTR_BIT, /* MTRR */ | ||
2831 | + CPU_PERF_BIT, /* Performance */ | ||
2832 | + CPU_CACHE_BIT, /* Cache */ | ||
2833 | + CPU_SYSENTER_BIT, /* Sysenter */ | ||
2834 | + CPU_THERM_BIT, /* Thermal */ | ||
2835 | + CPU_MISC_BIT, /* Miscellaneous */ | ||
2836 | + CPU_DEBUG_BIT, /* Debug */ | ||
2837 | + CPU_PAT_BIT, /* PAT */ | ||
2838 | + CPU_VMX_BIT, /* VMX */ | ||
2839 | + CPU_CALL_BIT, /* System Call */ | ||
2840 | + CPU_BASE_BIT, /* BASE Address */ | ||
2841 | + CPU_VER_BIT, /* Version ID */ | ||
2842 | + CPU_CONF_BIT, /* Configuration */ | ||
2843 | + CPU_SMM_BIT, /* System mgmt mode */ | ||
2844 | + CPU_SVM_BIT, /*Secure Virtual Machine*/ | ||
2845 | + CPU_OSVM_BIT, /* OS-Visible Workaround*/ | ||
2846 | +/* Standard Registers */ | ||
2847 | + CPU_TSS_BIT, /* Task Stack Segment */ | ||
2848 | + CPU_CR_BIT, /* Control Registers */ | ||
2849 | + CPU_DT_BIT, /* Descriptor Table */ | ||
2850 | +/* End of Registers flags */ | ||
2851 | + CPU_REG_ALL_BIT, /* Select all Registers */ | ||
2852 | +}; | ||
2853 | + | ||
2854 | +#define CPU_REG_ALL (~0) /* Select all Registers */ | ||
2855 | + | ||
2856 | +#define CPU_MC (1 << CPU_MC_BIT) | ||
2857 | +#define CPU_MONITOR (1 << CPU_MONITOR_BIT) | ||
2858 | +#define CPU_TIME (1 << CPU_TIME_BIT) | ||
2859 | +#define CPU_PMC (1 << CPU_PMC_BIT) | ||
2860 | +#define CPU_PLATFORM (1 << CPU_PLATFORM_BIT) | ||
2861 | +#define CPU_APIC (1 << CPU_APIC_BIT) | ||
2862 | +#define CPU_POWERON (1 << CPU_POWERON_BIT) | ||
2863 | +#define CPU_CONTROL (1 << CPU_CONTROL_BIT) | ||
2864 | +#define CPU_FEATURES (1 << CPU_FEATURES_BIT) | ||
2865 | +#define CPU_LBRANCH (1 << CPU_LBRANCH_BIT) | ||
2866 | +#define CPU_BIOS (1 << CPU_BIOS_BIT) | ||
2867 | +#define CPU_FREQ (1 << CPU_FREQ_BIT) | ||
2868 | +#define CPU_MTRR (1 << CPU_MTTR_BIT) | ||
2869 | +#define CPU_PERF (1 << CPU_PERF_BIT) | ||
2870 | +#define CPU_CACHE (1 << CPU_CACHE_BIT) | ||
2871 | +#define CPU_SYSENTER (1 << CPU_SYSENTER_BIT) | ||
2872 | +#define CPU_THERM (1 << CPU_THERM_BIT) | ||
2873 | +#define CPU_MISC (1 << CPU_MISC_BIT) | ||
2874 | +#define CPU_DEBUG (1 << CPU_DEBUG_BIT) | ||
2875 | +#define CPU_PAT (1 << CPU_PAT_BIT) | ||
2876 | +#define CPU_VMX (1 << CPU_VMX_BIT) | ||
2877 | +#define CPU_CALL (1 << CPU_CALL_BIT) | ||
2878 | +#define CPU_BASE (1 << CPU_BASE_BIT) | ||
2879 | +#define CPU_VER (1 << CPU_VER_BIT) | ||
2880 | +#define CPU_CONF (1 << CPU_CONF_BIT) | ||
2881 | +#define CPU_SMM (1 << CPU_SMM_BIT) | ||
2882 | +#define CPU_SVM (1 << CPU_SVM_BIT) | ||
2883 | +#define CPU_OSVM (1 << CPU_OSVM_BIT) | ||
2884 | +#define CPU_TSS (1 << CPU_TSS_BIT) | ||
2885 | +#define CPU_CR (1 << CPU_CR_BIT) | ||
2886 | +#define CPU_DT (1 << CPU_DT_BIT) | ||
2887 | + | ||
2888 | +/* Register file flags */ | ||
2889 | +enum cpu_file_bit { | ||
2890 | + CPU_INDEX_BIT, /* index */ | ||
2891 | + CPU_VALUE_BIT, /* value */ | ||
2892 | +}; | ||
2893 | + | ||
2894 | +#define CPU_FILE_VALUE (1 << CPU_VALUE_BIT) | ||
2895 | + | ||
2896 | +#define MAX_CPU_FILES 512 | ||
2897 | + | ||
2898 | +struct cpu_private { | ||
2899 | + unsigned cpu; | ||
2900 | + unsigned type; | ||
2901 | + unsigned reg; | ||
2902 | + unsigned file; | ||
2903 | +}; | ||
2904 | + | ||
2905 | +struct cpu_debug_base { | ||
2906 | + char *name; /* Register name */ | ||
2907 | + unsigned flag; /* Register flag */ | ||
2908 | + unsigned write; /* Register write flag */ | ||
2909 | +}; | ||
2910 | + | ||
2911 | +/* | ||
2912 | + * Currently it looks similar to cpu_debug_base but once we add more files | ||
2913 | + * cpu_file_base will go in different direction | ||
2914 | + */ | ||
2915 | +struct cpu_file_base { | ||
2916 | + char *name; /* Register file name */ | ||
2917 | + unsigned flag; /* Register file flag */ | ||
2918 | + unsigned write; /* Register write flag */ | ||
2919 | +}; | ||
2920 | + | ||
2921 | +struct cpu_cpuX_base { | ||
2922 | + struct dentry *dentry; /* Register dentry */ | ||
2923 | + int init; /* Register index file */ | ||
2924 | +}; | ||
2925 | + | ||
2926 | +struct cpu_debug_range { | ||
2927 | + unsigned min; /* Register range min */ | ||
2928 | + unsigned max; /* Register range max */ | ||
2929 | + unsigned flag; /* Supported flags */ | ||
2930 | +}; | ||
2931 | + | ||
2932 | +#endif /* _ASM_X86_CPU_DEBUG_H */ | ||
2933 | diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h | ||
2934 | index 8ac9d9a..456a304 100644 | ||
2935 | --- a/arch/x86/include/asm/elf.h | ||
2936 | +++ b/arch/x86/include/asm/elf.h | ||
2937 | @@ -197,8 +197,14 @@ do { \ | ||
2938 | set_fs(USER_DS); \ | ||
2939 | } while (0) | ||
2940 | |||
2941 | -void set_personality_ia32(void); | ||
2942 | -#define COMPAT_SET_PERSONALITY(ex) set_personality_ia32() | ||
2943 | +#define COMPAT_SET_PERSONALITY(ex) \ | ||
2944 | +do { \ | ||
2945 | + if (test_thread_flag(TIF_IA32)) \ | ||
2946 | + clear_thread_flag(TIF_ABI_PENDING); \ | ||
2947 | + else \ | ||
2948 | + set_thread_flag(TIF_ABI_PENDING); \ | ||
2949 | + current->personality |= force_personality32; \ | ||
2950 | +} while (0) | ||
2951 | |||
2952 | #define COMPAT_ELF_PLATFORM ("i686") | ||
2953 | |||
2954 | diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h | ||
2955 | index f5693c8..19e22e3 100644 | ||
2956 | --- a/arch/x86/include/asm/entry_arch.h | ||
2957 | +++ b/arch/x86/include/asm/entry_arch.h | ||
2958 | @@ -13,6 +13,7 @@ | ||
2959 | BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) | ||
2960 | BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) | ||
2961 | BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR) | ||
2962 | +BUILD_INTERRUPT(pull_timers_interrupt,PULL_TIMERS_VECTOR) | ||
2963 | BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR) | ||
2964 | BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR) | ||
2965 | |||
2966 | diff --git a/arch/x86/include/asm/feather_trace.h b/arch/x86/include/asm/feather_trace.h | ||
2967 | new file mode 100644 | ||
2968 | index 0000000..4fd3163 | ||
2969 | --- /dev/null | ||
2970 | +++ b/arch/x86/include/asm/feather_trace.h | ||
2971 | @@ -0,0 +1,17 @@ | ||
2972 | +#ifndef _ARCH_FEATHER_TRACE_H | ||
2973 | +#define _ARCH_FEATHER_TRACE_H | ||
2974 | + | ||
2975 | +#include <asm/msr.h> | ||
2976 | + | ||
2977 | +static inline unsigned long long ft_timestamp(void) | ||
2978 | +{ | ||
2979 | + return __native_read_tsc(); | ||
2980 | +} | ||
2981 | + | ||
2982 | +#ifdef CONFIG_X86_32 | ||
2983 | +#include "feather_trace_32.h" | ||
2984 | +#else | ||
2985 | +#include "feather_trace_64.h" | ||
2986 | +#endif | ||
2987 | + | ||
2988 | +#endif | ||
2989 | diff --git a/arch/x86/include/asm/feather_trace_32.h b/arch/x86/include/asm/feather_trace_32.h | ||
2990 | new file mode 100644 | ||
2991 | index 0000000..192cd09 | ||
2992 | --- /dev/null | ||
2993 | +++ b/arch/x86/include/asm/feather_trace_32.h | ||
2994 | @@ -0,0 +1,80 @@ | ||
2995 | +/* Do not directly include this file. Include feather_trace.h instead */ | ||
2996 | + | ||
2997 | +#define feather_callback __attribute__((regparm(0))) | ||
2998 | + | ||
2999 | +/* | ||
3000 | + * make the compiler reload any register that is not saved in | ||
3001 | + * a cdecl function call | ||
3002 | + */ | ||
3003 | +#define CLOBBER_LIST "memory", "cc", "eax", "ecx", "edx" | ||
3004 | + | ||
3005 | +#define ft_event(id, callback) \ | ||
3006 | + __asm__ __volatile__( \ | ||
3007 | + "1: jmp 2f \n\t" \ | ||
3008 | + " call " #callback " \n\t" \ | ||
3009 | + ".section __event_table, \"aw\" \n\t" \ | ||
3010 | + ".long " #id ", 0, 1b, 2f \n\t" \ | ||
3011 | + ".previous \n\t" \ | ||
3012 | + "2: \n\t" \ | ||
3013 | + : : : CLOBBER_LIST) | ||
3014 | + | ||
3015 | +#define ft_event0(id, callback) \ | ||
3016 | + __asm__ __volatile__( \ | ||
3017 | + "1: jmp 2f \n\t" \ | ||
3018 | + " subl $4, %%esp \n\t" \ | ||
3019 | + " movl $" #id ", (%%esp) \n\t" \ | ||
3020 | + " call " #callback " \n\t" \ | ||
3021 | + " addl $4, %%esp \n\t" \ | ||
3022 | + ".section __event_table, \"aw\" \n\t" \ | ||
3023 | + ".long " #id ", 0, 1b, 2f \n\t" \ | ||
3024 | + ".previous \n\t" \ | ||
3025 | + "2: \n\t" \ | ||
3026 | + : : : CLOBBER_LIST) | ||
3027 | + | ||
3028 | +#define ft_event1(id, callback, param) \ | ||
3029 | + __asm__ __volatile__( \ | ||
3030 | + "1: jmp 2f \n\t" \ | ||
3031 | + " subl $8, %%esp \n\t" \ | ||
3032 | + " movl %0, 4(%%esp) \n\t" \ | ||
3033 | + " movl $" #id ", (%%esp) \n\t" \ | ||
3034 | + " call " #callback " \n\t" \ | ||
3035 | + " addl $8, %%esp \n\t" \ | ||
3036 | + ".section __event_table, \"aw\" \n\t" \ | ||
3037 | + ".long " #id ", 0, 1b, 2f \n\t" \ | ||
3038 | + ".previous \n\t" \ | ||
3039 | + "2: \n\t" \ | ||
3040 | + : : "r" (param) : CLOBBER_LIST) | ||
3041 | + | ||
3042 | +#define ft_event2(id, callback, param, param2) \ | ||
3043 | + __asm__ __volatile__( \ | ||
3044 | + "1: jmp 2f \n\t" \ | ||
3045 | + " subl $12, %%esp \n\t" \ | ||
3046 | + " movl %1, 8(%%esp) \n\t" \ | ||
3047 | + " movl %0, 4(%%esp) \n\t" \ | ||
3048 | + " movl $" #id ", (%%esp) \n\t" \ | ||
3049 | + " call " #callback " \n\t" \ | ||
3050 | + " addl $12, %%esp \n\t" \ | ||
3051 | + ".section __event_table, \"aw\" \n\t" \ | ||
3052 | + ".long " #id ", 0, 1b, 2f \n\t" \ | ||
3053 | + ".previous \n\t" \ | ||
3054 | + "2: \n\t" \ | ||
3055 | + : : "r" (param), "r" (param2) : CLOBBER_LIST) | ||
3056 | + | ||
3057 | + | ||
3058 | +#define ft_event3(id, callback, p, p2, p3) \ | ||
3059 | + __asm__ __volatile__( \ | ||
3060 | + "1: jmp 2f \n\t" \ | ||
3061 | + " subl $16, %%esp \n\t" \ | ||
3062 | + " movl %2, 12(%%esp) \n\t" \ | ||
3063 | + " movl %1, 8(%%esp) \n\t" \ | ||
3064 | + " movl %0, 4(%%esp) \n\t" \ | ||
3065 | + " movl $" #id ", (%%esp) \n\t" \ | ||
3066 | + " call " #callback " \n\t" \ | ||
3067 | + " addl $16, %%esp \n\t" \ | ||
3068 | + ".section __event_table, \"aw\" \n\t" \ | ||
3069 | + ".long " #id ", 0, 1b, 2f \n\t" \ | ||
3070 | + ".previous \n\t" \ | ||
3071 | + "2: \n\t" \ | ||
3072 | + : : "r" (p), "r" (p2), "r" (p3) : CLOBBER_LIST) | ||
3073 | + | ||
3074 | +#define __ARCH_HAS_FEATHER_TRACE | ||
3075 | diff --git a/arch/x86/include/asm/feather_trace_64.h b/arch/x86/include/asm/feather_trace_64.h | ||
3076 | new file mode 100644 | ||
3077 | index 0000000..1cffa4e | ||
3078 | --- /dev/null | ||
3079 | +++ b/arch/x86/include/asm/feather_trace_64.h | ||
3080 | @@ -0,0 +1,69 @@ | ||
3081 | +/* Do not directly include this file. Include feather_trace.h instead */ | ||
3082 | + | ||
3083 | +/* regparm is the default on x86_64 */ | ||
3084 | +#define feather_callback | ||
3085 | + | ||
3086 | +# define _EVENT_TABLE(id,from,to) \ | ||
3087 | + ".section __event_table, \"aw\"\n\t" \ | ||
3088 | + ".balign 8\n\t" \ | ||
3089 | + ".quad " #id ", 0, " #from ", " #to " \n\t" \ | ||
3090 | + ".previous \n\t" | ||
3091 | + | ||
3092 | +/* | ||
3093 | + * x86_64 callee only owns rbp, rbx, r12 -> r15 | ||
3094 | + * the called can freely modify the others | ||
3095 | + */ | ||
3096 | +#define CLOBBER_LIST "memory", "cc", "rdi", "rsi", "rdx", "rcx", \ | ||
3097 | + "r8", "r9", "r10", "r11", "rax" | ||
3098 | + | ||
3099 | +#define ft_event(id, callback) \ | ||
3100 | + __asm__ __volatile__( \ | ||
3101 | + "1: jmp 2f \n\t" \ | ||
3102 | + " call " #callback " \n\t" \ | ||
3103 | + _EVENT_TABLE(id,1b,2f) \ | ||
3104 | + "2: \n\t" \ | ||
3105 | + : : : CLOBBER_LIST) | ||
3106 | + | ||
3107 | +#define ft_event0(id, callback) \ | ||
3108 | + __asm__ __volatile__( \ | ||
3109 | + "1: jmp 2f \n\t" \ | ||
3110 | + " movq $" #id ", %%rdi \n\t" \ | ||
3111 | + " call " #callback " \n\t" \ | ||
3112 | + _EVENT_TABLE(id,1b,2f) \ | ||
3113 | + "2: \n\t" \ | ||
3114 | + : : : CLOBBER_LIST) | ||
3115 | + | ||
3116 | +#define ft_event1(id, callback, param) \ | ||
3117 | + __asm__ __volatile__( \ | ||
3118 | + "1: jmp 2f \n\t" \ | ||
3119 | + " movq %0, %%rsi \n\t" \ | ||
3120 | + " movq $" #id ", %%rdi \n\t" \ | ||
3121 | + " call " #callback " \n\t" \ | ||
3122 | + _EVENT_TABLE(id,1b,2f) \ | ||
3123 | + "2: \n\t" \ | ||
3124 | + : : "r" (param) : CLOBBER_LIST) | ||
3125 | + | ||
3126 | +#define ft_event2(id, callback, param, param2) \ | ||
3127 | + __asm__ __volatile__( \ | ||
3128 | + "1: jmp 2f \n\t" \ | ||
3129 | + " movq %1, %%rdx \n\t" \ | ||
3130 | + " movq %0, %%rsi \n\t" \ | ||
3131 | + " movq $" #id ", %%rdi \n\t" \ | ||
3132 | + " call " #callback " \n\t" \ | ||
3133 | + _EVENT_TABLE(id,1b,2f) \ | ||
3134 | + "2: \n\t" \ | ||
3135 | + : : "r" (param), "r" (param2) : CLOBBER_LIST) | ||
3136 | + | ||
3137 | +#define ft_event3(id, callback, p, p2, p3) \ | ||
3138 | + __asm__ __volatile__( \ | ||
3139 | + "1: jmp 2f \n\t" \ | ||
3140 | + " movq %2, %%rcx \n\t" \ | ||
3141 | + " movq %1, %%rdx \n\t" \ | ||
3142 | + " movq %0, %%rsi \n\t" \ | ||
3143 | + " movq $" #id ", %%rdi \n\t" \ | ||
3144 | + " call " #callback " \n\t" \ | ||
3145 | + _EVENT_TABLE(id,1b,2f) \ | ||
3146 | + "2: \n\t" \ | ||
3147 | + : : "r" (p), "r" (p2), "r" (p3) : CLOBBER_LIST) | ||
3148 | + | ||
3149 | +#define __ARCH_HAS_FEATHER_TRACE | ||
3150 | diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h | ||
3151 | index 3251e23..1c22cb0 100644 | ||
3152 | --- a/arch/x86/include/asm/hpet.h | ||
3153 | +++ b/arch/x86/include/asm/hpet.h | ||
3154 | @@ -66,7 +66,6 @@ | ||
3155 | extern unsigned long hpet_address; | ||
3156 | extern unsigned long force_hpet_address; | ||
3157 | extern int hpet_force_user; | ||
3158 | -extern u8 hpet_msi_disable; | ||
3159 | extern int is_hpet_enabled(void); | ||
3160 | extern int hpet_enable(void); | ||
3161 | extern void hpet_disable(void); | ||
3162 | diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h | ||
3163 | index ba180d9..68900e7 100644 | ||
3164 | --- a/arch/x86/include/asm/hw_irq.h | ||
3165 | +++ b/arch/x86/include/asm/hw_irq.h | ||
3166 | @@ -53,6 +53,8 @@ extern void threshold_interrupt(void); | ||
3167 | extern void call_function_interrupt(void); | ||
3168 | extern void call_function_single_interrupt(void); | ||
3169 | |||
3170 | +extern void pull_timers_interrupt(void); | ||
3171 | + | ||
3172 | /* PIC specific functions */ | ||
3173 | extern void disable_8259A_irq(unsigned int irq); | ||
3174 | extern void enable_8259A_irq(unsigned int irq); | ||
3175 | @@ -110,6 +112,7 @@ extern asmlinkage void smp_irq_move_cleanup_interrupt(void); | ||
3176 | extern void smp_reschedule_interrupt(struct pt_regs *); | ||
3177 | extern void smp_call_function_interrupt(struct pt_regs *); | ||
3178 | extern void smp_call_function_single_interrupt(struct pt_regs *); | ||
3179 | +extern void smp_pull_timers_interrupt(struct pt_regs *); | ||
3180 | #ifdef CONFIG_X86_32 | ||
3181 | extern void smp_invalidate_interrupt(struct pt_regs *); | ||
3182 | #else | ||
3183 | diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h | ||
3184 | index 6e90a04..28c3bf3 100644 | ||
3185 | --- a/arch/x86/include/asm/irq_vectors.h | ||
3186 | +++ b/arch/x86/include/asm/irq_vectors.h | ||
3187 | @@ -104,6 +104,11 @@ | ||
3188 | #define LOCAL_TIMER_VECTOR 0xef | ||
3189 | |||
3190 | /* | ||
3191 | + * LITMUS^RT pull timers IRQ vector | ||
3192 | + */ | ||
3193 | +#define PULL_TIMERS_VECTOR 0xee | ||
3194 | + | ||
3195 | +/* | ||
3196 | * Generic system vector for platform specific use | ||
3197 | */ | ||
3198 | #define GENERIC_INTERRUPT_VECTOR 0xed | ||
3199 | @@ -113,7 +118,7 @@ | ||
3200 | */ | ||
3201 | #define LOCAL_PENDING_VECTOR 0xec | ||
3202 | |||
3203 | -#define UV_BAU_MESSAGE 0xea | ||
3204 | +#define UV_BAU_MESSAGE 0xec | ||
3205 | |||
3206 | /* | ||
3207 | * Self IPI vector for machine checks | ||
3208 | diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h | ||
3209 | index 7c18e12..b7ed2c4 100644 | ||
3210 | --- a/arch/x86/include/asm/kvm_emulate.h | ||
3211 | +++ b/arch/x86/include/asm/kvm_emulate.h | ||
3212 | @@ -129,7 +129,7 @@ struct decode_cache { | ||
3213 | u8 seg_override; | ||
3214 | unsigned int d; | ||
3215 | unsigned long regs[NR_VCPU_REGS]; | ||
3216 | - unsigned long eip, eip_orig; | ||
3217 | + unsigned long eip; | ||
3218 | /* modrm */ | ||
3219 | u8 modrm; | ||
3220 | u8 modrm_mod; | ||
3221 | diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h | ||
3222 | index d759a1f..d838922 100644 | ||
3223 | --- a/arch/x86/include/asm/kvm_host.h | ||
3224 | +++ b/arch/x86/include/asm/kvm_host.h | ||
3225 | @@ -412,7 +412,6 @@ struct kvm_arch{ | ||
3226 | unsigned long irq_sources_bitmap; | ||
3227 | unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; | ||
3228 | u64 vm_init_tsc; | ||
3229 | - s64 kvmclock_offset; | ||
3230 | }; | ||
3231 | |||
3232 | struct kvm_vm_stat { | ||
3233 | diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h | ||
3234 | index a479023..f1363b7 100644 | ||
3235 | --- a/arch/x86/include/asm/mce.h | ||
3236 | +++ b/arch/x86/include/asm/mce.h | ||
3237 | @@ -214,11 +214,5 @@ void intel_init_thermal(struct cpuinfo_x86 *c); | ||
3238 | |||
3239 | void mce_log_therm_throt_event(__u64 status); | ||
3240 | |||
3241 | -#ifdef CONFIG_X86_THERMAL_VECTOR | ||
3242 | -extern void mcheck_intel_therm_init(void); | ||
3243 | -#else | ||
3244 | -static inline void mcheck_intel_therm_init(void) { } | ||
3245 | -#endif | ||
3246 | - | ||
3247 | #endif /* __KERNEL__ */ | ||
3248 | #endif /* _ASM_X86_MCE_H */ | ||
3249 | diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h | ||
3250 | index 0e3e728..7e2b6ba 100644 | ||
3251 | --- a/arch/x86/include/asm/msr.h | ||
3252 | +++ b/arch/x86/include/asm/msr.h | ||
3253 | @@ -27,18 +27,6 @@ struct msr { | ||
3254 | }; | ||
3255 | }; | ||
3256 | |||
3257 | -struct msr_info { | ||
3258 | - u32 msr_no; | ||
3259 | - struct msr reg; | ||
3260 | - struct msr *msrs; | ||
3261 | - int err; | ||
3262 | -}; | ||
3263 | - | ||
3264 | -struct msr_regs_info { | ||
3265 | - u32 *regs; | ||
3266 | - int err; | ||
3267 | -}; | ||
3268 | - | ||
3269 | static inline unsigned long long native_read_tscp(unsigned int *aux) | ||
3270 | { | ||
3271 | unsigned long low, high; | ||
3272 | @@ -256,14 +244,11 @@ do { \ | ||
3273 | |||
3274 | #define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0) | ||
3275 | |||
3276 | -struct msr *msrs_alloc(void); | ||
3277 | -void msrs_free(struct msr *msrs); | ||
3278 | - | ||
3279 | #ifdef CONFIG_SMP | ||
3280 | int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); | ||
3281 | int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); | ||
3282 | -void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs); | ||
3283 | -void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs); | ||
3284 | +void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs); | ||
3285 | +void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs); | ||
3286 | int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); | ||
3287 | int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); | ||
3288 | int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); | ||
3289 | diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h | ||
3290 | index 13b1885..c978648 100644 | ||
3291 | --- a/arch/x86/include/asm/processor.h | ||
3292 | +++ b/arch/x86/include/asm/processor.h | ||
3293 | @@ -180,7 +180,7 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, | ||
3294 | unsigned int *ecx, unsigned int *edx) | ||
3295 | { | ||
3296 | /* ecx is often an input as well as an output. */ | ||
3297 | - asm volatile("cpuid" | ||
3298 | + asm("cpuid" | ||
3299 | : "=a" (*eax), | ||
3300 | "=b" (*ebx), | ||
3301 | "=c" (*ecx), | ||
3302 | diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h | ||
3303 | index 77c1184..72a6dcd 100644 | ||
3304 | --- a/arch/x86/include/asm/sys_ia32.h | ||
3305 | +++ b/arch/x86/include/asm/sys_ia32.h | ||
3306 | @@ -62,6 +62,9 @@ asmlinkage long sys32_pwrite(unsigned int, char __user *, u32, u32, u32); | ||
3307 | asmlinkage long sys32_personality(unsigned long); | ||
3308 | asmlinkage long sys32_sendfile(int, int, compat_off_t __user *, s32); | ||
3309 | |||
3310 | +asmlinkage long sys32_mmap2(unsigned long, unsigned long, unsigned long, | ||
3311 | + unsigned long, unsigned long, unsigned long); | ||
3312 | + | ||
3313 | struct oldold_utsname; | ||
3314 | struct old_utsname; | ||
3315 | asmlinkage long sys32_olduname(struct oldold_utsname __user *); | ||
3316 | diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h | ||
3317 | index 1bb6e39..372b76e 100644 | ||
3318 | --- a/arch/x86/include/asm/syscalls.h | ||
3319 | +++ b/arch/x86/include/asm/syscalls.h | ||
3320 | @@ -55,6 +55,8 @@ struct sel_arg_struct; | ||
3321 | struct oldold_utsname; | ||
3322 | struct old_utsname; | ||
3323 | |||
3324 | +asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long, | ||
3325 | + unsigned long, unsigned long, unsigned long); | ||
3326 | asmlinkage int old_mmap(struct mmap_arg_struct __user *); | ||
3327 | asmlinkage int old_select(struct sel_arg_struct __user *); | ||
3328 | asmlinkage int sys_ipc(uint, int, int, int, void __user *, long); | ||
3329 | diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h | ||
3330 | index 19c3ce4..d27d0a2 100644 | ||
3331 | --- a/arch/x86/include/asm/thread_info.h | ||
3332 | +++ b/arch/x86/include/asm/thread_info.h | ||
3333 | @@ -86,6 +86,7 @@ struct thread_info { | ||
3334 | #define TIF_NOTSC 16 /* TSC is not accessible in userland */ | ||
3335 | #define TIF_IA32 17 /* 32bit process */ | ||
3336 | #define TIF_FORK 18 /* ret_from_fork */ | ||
3337 | +#define TIF_ABI_PENDING 19 | ||
3338 | #define TIF_MEMDIE 20 | ||
3339 | #define TIF_DEBUG 21 /* uses debug registers */ | ||
3340 | #define TIF_IO_BITMAP 22 /* uses I/O bitmap */ | ||
3341 | @@ -109,6 +110,7 @@ struct thread_info { | ||
3342 | #define _TIF_NOTSC (1 << TIF_NOTSC) | ||
3343 | #define _TIF_IA32 (1 << TIF_IA32) | ||
3344 | #define _TIF_FORK (1 << TIF_FORK) | ||
3345 | +#define _TIF_ABI_PENDING (1 << TIF_ABI_PENDING) | ||
3346 | #define _TIF_DEBUG (1 << TIF_DEBUG) | ||
3347 | #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) | ||
3348 | #define _TIF_FREEZE (1 << TIF_FREEZE) | ||
3349 | diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h | ||
3350 | index 6fb3c20..f9b507f 100644 | ||
3351 | --- a/arch/x86/include/asm/unistd_32.h | ||
3352 | +++ b/arch/x86/include/asm/unistd_32.h | ||
3353 | @@ -343,9 +343,13 @@ | ||
3354 | #define __NR_rt_tgsigqueueinfo 335 | ||
3355 | #define __NR_perf_event_open 336 | ||
3356 | |||
3357 | +#define __NR_LITMUS 337 | ||
3358 | + | ||
3359 | +#include "litmus/unistd_32.h" | ||
3360 | + | ||
3361 | #ifdef __KERNEL__ | ||
3362 | |||
3363 | -#define NR_syscalls 337 | ||
3364 | +#define NR_syscalls 336 + NR_litmus_syscalls | ||
3365 | |||
3366 | #define __ARCH_WANT_IPC_PARSE_VERSION | ||
3367 | #define __ARCH_WANT_OLD_READDIR | ||
3368 | diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h | ||
3369 | index 8d3ad0a..33b2003 100644 | ||
3370 | --- a/arch/x86/include/asm/unistd_64.h | ||
3371 | +++ b/arch/x86/include/asm/unistd_64.h | ||
3372 | @@ -662,6 +662,10 @@ __SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo) | ||
3373 | #define __NR_perf_event_open 298 | ||
3374 | __SYSCALL(__NR_perf_event_open, sys_perf_event_open) | ||
3375 | |||
3376 | +#define __NR_LITMUS 299 | ||
3377 | + | ||
3378 | +#include "litmus/unistd_64.h" | ||
3379 | + | ||
3380 | #ifndef __NO_STUBS | ||
3381 | #define __ARCH_WANT_OLD_READDIR | ||
3382 | #define __ARCH_WANT_OLD_STAT | ||
3383 | diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h | ||
3384 | index e90a8a9..d1414af 100644 | ||
3385 | --- a/arch/x86/include/asm/uv/uv_hub.h | ||
3386 | +++ b/arch/x86/include/asm/uv/uv_hub.h | ||
3387 | @@ -31,20 +31,20 @@ | ||
3388 | * contiguous (although various IO spaces may punch holes in | ||
3389 | * it).. | ||
3390 | * | ||
3391 | - * N - Number of bits in the node portion of a socket physical | ||
3392 | - * address. | ||
3393 | + * N - Number of bits in the node portion of a socket physical | ||
3394 | + * address. | ||
3395 | * | ||
3396 | - * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of | ||
3397 | - * routers always have low bit of 1, C/MBricks have low bit | ||
3398 | - * equal to 0. Most addressing macros that target UV hub chips | ||
3399 | - * right shift the NASID by 1 to exclude the always-zero bit. | ||
3400 | - * NASIDs contain up to 15 bits. | ||
3401 | + * NASID - network ID of a router, Mbrick or Cbrick. Nasid values of | ||
3402 | + * routers always have low bit of 1, C/MBricks have low bit | ||
3403 | + * equal to 0. Most addressing macros that target UV hub chips | ||
3404 | + * right shift the NASID by 1 to exclude the always-zero bit. | ||
3405 | + * NASIDs contain up to 15 bits. | ||
3406 | * | ||
3407 | * GNODE - NASID right shifted by 1 bit. Most mmrs contain gnodes instead | ||
3408 | * of nasids. | ||
3409 | * | ||
3410 | - * PNODE - the low N bits of the GNODE. The PNODE is the most useful variant | ||
3411 | - * of the nasid for socket usage. | ||
3412 | + * PNODE - the low N bits of the GNODE. The PNODE is the most useful variant | ||
3413 | + * of the nasid for socket usage. | ||
3414 | * | ||
3415 | * | ||
3416 | * NumaLink Global Physical Address Format: | ||
3417 | @@ -71,12 +71,12 @@ | ||
3418 | * | ||
3419 | * | ||
3420 | * APICID format | ||
3421 | - * NOTE!!!!!! This is the current format of the APICID. However, code | ||
3422 | - * should assume that this will change in the future. Use functions | ||
3423 | - * in this file for all APICID bit manipulations and conversion. | ||
3424 | + * NOTE!!!!!! This is the current format of the APICID. However, code | ||
3425 | + * should assume that this will change in the future. Use functions | ||
3426 | + * in this file for all APICID bit manipulations and conversion. | ||
3427 | * | ||
3428 | - * 1111110000000000 | ||
3429 | - * 5432109876543210 | ||
3430 | + * 1111110000000000 | ||
3431 | + * 5432109876543210 | ||
3432 | * pppppppppplc0cch | ||
3433 | * sssssssssss | ||
3434 | * | ||
3435 | @@ -89,9 +89,9 @@ | ||
3436 | * Note: Processor only supports 12 bits in the APICID register. The ACPI | ||
3437 | * tables hold all 16 bits. Software needs to be aware of this. | ||
3438 | * | ||
3439 | - * Unless otherwise specified, all references to APICID refer to | ||
3440 | - * the FULL value contained in ACPI tables, not the subset in the | ||
3441 | - * processor APICID register. | ||
3442 | + * Unless otherwise specified, all references to APICID refer to | ||
3443 | + * the FULL value contained in ACPI tables, not the subset in the | ||
3444 | + * processor APICID register. | ||
3445 | */ | ||
3446 | |||
3447 | |||
3448 | @@ -151,16 +151,16 @@ struct uv_hub_info_s { | ||
3449 | }; | ||
3450 | |||
3451 | DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); | ||
3452 | -#define uv_hub_info (&__get_cpu_var(__uv_hub_info)) | ||
3453 | +#define uv_hub_info (&__get_cpu_var(__uv_hub_info)) | ||
3454 | #define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu)) | ||
3455 | |||
3456 | /* | ||
3457 | * Local & Global MMR space macros. | ||
3458 | - * Note: macros are intended to be used ONLY by inline functions | ||
3459 | - * in this file - not by other kernel code. | ||
3460 | - * n - NASID (full 15-bit global nasid) | ||
3461 | - * g - GNODE (full 15-bit global nasid, right shifted 1) | ||
3462 | - * p - PNODE (local part of nsids, right shifted 1) | ||
3463 | + * Note: macros are intended to be used ONLY by inline functions | ||
3464 | + * in this file - not by other kernel code. | ||
3465 | + * n - NASID (full 15-bit global nasid) | ||
3466 | + * g - GNODE (full 15-bit global nasid, right shifted 1) | ||
3467 | + * p - PNODE (local part of nsids, right shifted 1) | ||
3468 | */ | ||
3469 | #define UV_NASID_TO_PNODE(n) (((n) >> 1) & uv_hub_info->pnode_mask) | ||
3470 | #define UV_PNODE_TO_GNODE(p) ((p) |uv_hub_info->gnode_extra) | ||
3471 | @@ -213,8 +213,8 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); | ||
3472 | /* | ||
3473 | * Macros for converting between kernel virtual addresses, socket local physical | ||
3474 | * addresses, and UV global physical addresses. | ||
3475 | - * Note: use the standard __pa() & __va() macros for converting | ||
3476 | - * between socket virtual and socket physical addresses. | ||
3477 | + * Note: use the standard __pa() & __va() macros for converting | ||
3478 | + * between socket virtual and socket physical addresses. | ||
3479 | */ | ||
3480 | |||
3481 | /* socket phys RAM --> UV global physical address */ | ||
3482 | @@ -265,18 +265,21 @@ static inline int uv_apicid_to_pnode(int apicid) | ||
3483 | * Access global MMRs using the low memory MMR32 space. This region supports | ||
3484 | * faster MMR access but not all MMRs are accessible in this space. | ||
3485 | */ | ||
3486 | -static inline unsigned long *uv_global_mmr32_address(int pnode, unsigned long offset) | ||
3487 | +static inline unsigned long *uv_global_mmr32_address(int pnode, | ||
3488 | + unsigned long offset) | ||
3489 | { | ||
3490 | return __va(UV_GLOBAL_MMR32_BASE | | ||
3491 | UV_GLOBAL_MMR32_PNODE_BITS(pnode) | offset); | ||
3492 | } | ||
3493 | |||
3494 | -static inline void uv_write_global_mmr32(int pnode, unsigned long offset, unsigned long val) | ||
3495 | +static inline void uv_write_global_mmr32(int pnode, unsigned long offset, | ||
3496 | + unsigned long val) | ||
3497 | { | ||
3498 | writeq(val, uv_global_mmr32_address(pnode, offset)); | ||
3499 | } | ||
3500 | |||
3501 | -static inline unsigned long uv_read_global_mmr32(int pnode, unsigned long offset) | ||
3502 | +static inline unsigned long uv_read_global_mmr32(int pnode, | ||
3503 | + unsigned long offset) | ||
3504 | { | ||
3505 | return readq(uv_global_mmr32_address(pnode, offset)); | ||
3506 | } | ||
3507 | @@ -285,32 +288,25 @@ static inline unsigned long uv_read_global_mmr32(int pnode, unsigned long offset | ||
3508 | * Access Global MMR space using the MMR space located at the top of physical | ||
3509 | * memory. | ||
3510 | */ | ||
3511 | -static inline unsigned long *uv_global_mmr64_address(int pnode, unsigned long offset) | ||
3512 | +static inline unsigned long *uv_global_mmr64_address(int pnode, | ||
3513 | + unsigned long offset) | ||
3514 | { | ||
3515 | return __va(UV_GLOBAL_MMR64_BASE | | ||
3516 | UV_GLOBAL_MMR64_PNODE_BITS(pnode) | offset); | ||
3517 | } | ||
3518 | |||
3519 | -static inline void uv_write_global_mmr64(int pnode, unsigned long offset, unsigned long val) | ||
3520 | +static inline void uv_write_global_mmr64(int pnode, unsigned long offset, | ||
3521 | + unsigned long val) | ||
3522 | { | ||
3523 | writeq(val, uv_global_mmr64_address(pnode, offset)); | ||
3524 | } | ||
3525 | |||
3526 | -static inline unsigned long uv_read_global_mmr64(int pnode, unsigned long offset) | ||
3527 | +static inline unsigned long uv_read_global_mmr64(int pnode, | ||
3528 | + unsigned long offset) | ||
3529 | { | ||
3530 | return readq(uv_global_mmr64_address(pnode, offset)); | ||
3531 | } | ||
3532 | |||
3533 | -static inline void uv_write_global_mmr8(int pnode, unsigned long offset, unsigned char val) | ||
3534 | -{ | ||
3535 | - writeb(val, uv_global_mmr64_address(pnode, offset)); | ||
3536 | -} | ||
3537 | - | ||
3538 | -static inline unsigned char uv_read_global_mmr8(int pnode, unsigned long offset) | ||
3539 | -{ | ||
3540 | - return readb(uv_global_mmr64_address(pnode, offset)); | ||
3541 | -} | ||
3542 | - | ||
3543 | /* | ||
3544 | * Access hub local MMRs. Faster than using global space but only local MMRs | ||
3545 | * are accessible. | ||
3546 | @@ -430,17 +426,11 @@ static inline void uv_set_scir_bits(unsigned char value) | ||
3547 | } | ||
3548 | } | ||
3549 | |||
3550 | -static inline unsigned long uv_scir_offset(int apicid) | ||
3551 | -{ | ||
3552 | - return SCIR_LOCAL_MMR_BASE | (apicid & 0x3f); | ||
3553 | -} | ||
3554 | - | ||
3555 | static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value) | ||
3556 | { | ||
3557 | if (uv_cpu_hub_info(cpu)->scir.state != value) { | ||
3558 | - uv_write_global_mmr8(uv_cpu_to_pnode(cpu), | ||
3559 | - uv_cpu_hub_info(cpu)->scir.offset, value); | ||
3560 | uv_cpu_hub_info(cpu)->scir.state = value; | ||
3561 | + uv_write_local_mmr8(uv_cpu_hub_info(cpu)->scir.offset, value); | ||
3562 | } | ||
3563 | } | ||
3564 | |||
3565 | diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile | ||
3566 | index d8e5d0c..a99b34d 100644 | ||
3567 | --- a/arch/x86/kernel/Makefile | ||
3568 | +++ b/arch/x86/kernel/Makefile | ||
3569 | @@ -117,6 +117,8 @@ obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o | ||
3570 | |||
3571 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o | ||
3572 | |||
3573 | +obj-$(CONFIG_FEATHER_TRACE) += ft_event.o | ||
3574 | + | ||
3575 | ### | ||
3576 | # 64 bit specific files | ||
3577 | ifeq ($(CONFIG_X86_64),y) | ||
3578 | diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c | ||
3579 | index 2e837f5..59cdfa4 100644 | ||
3580 | --- a/arch/x86/kernel/acpi/cstate.c | ||
3581 | +++ b/arch/x86/kernel/acpi/cstate.c | ||
3582 | @@ -48,7 +48,7 @@ void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags, | ||
3583 | * P4, Core and beyond CPUs | ||
3584 | */ | ||
3585 | if (c->x86_vendor == X86_VENDOR_INTEL && | ||
3586 | - (c->x86 > 0xf || (c->x86 == 6 && c->x86_model >= 0x0f))) | ||
3587 | + (c->x86 > 0xf || (c->x86 == 6 && c->x86_model >= 14))) | ||
3588 | flags->bm_control = 0; | ||
3589 | } | ||
3590 | EXPORT_SYMBOL(acpi_processor_power_init_bm_check); | ||
3591 | diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c | ||
3592 | index 23fc9fe..0285521 100644 | ||
3593 | --- a/arch/x86/kernel/amd_iommu.c | ||
3594 | +++ b/arch/x86/kernel/amd_iommu.c | ||
3595 | @@ -540,7 +540,7 @@ static void flush_all_devices_for_iommu(struct amd_iommu *iommu) | ||
3596 | static void flush_devices_by_domain(struct protection_domain *domain) | ||
3597 | { | ||
3598 | struct amd_iommu *iommu; | ||
3599 | - unsigned long i; | ||
3600 | + int i; | ||
3601 | |||
3602 | for (i = 0; i <= amd_iommu_last_bdf; ++i) { | ||
3603 | if ((domain == NULL && amd_iommu_pd_table[i] == NULL) || | ||
3604 | @@ -1230,10 +1230,9 @@ static void __detach_device(struct protection_domain *domain, u16 devid) | ||
3605 | |||
3606 | /* | ||
3607 | * If we run in passthrough mode the device must be assigned to the | ||
3608 | - * passthrough domain if it is detached from any other domain. | ||
3609 | - * Make sure we can deassign from the pt_domain itself. | ||
3610 | + * passthrough domain if it is detached from any other domain | ||
3611 | */ | ||
3612 | - if (iommu_pass_through && domain != pt_domain) { | ||
3613 | + if (iommu_pass_through) { | ||
3614 | struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; | ||
3615 | __attach_device(iommu, pt_domain, devid); | ||
3616 | } | ||
3617 | @@ -2048,10 +2047,10 @@ static void prealloc_protection_domains(void) | ||
3618 | struct pci_dev *dev = NULL; | ||
3619 | struct dma_ops_domain *dma_dom; | ||
3620 | struct amd_iommu *iommu; | ||
3621 | - u16 devid, __devid; | ||
3622 | + u16 devid; | ||
3623 | |||
3624 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | ||
3625 | - __devid = devid = calc_devid(dev->bus->number, dev->devfn); | ||
3626 | + devid = calc_devid(dev->bus->number, dev->devfn); | ||
3627 | if (devid > amd_iommu_last_bdf) | ||
3628 | continue; | ||
3629 | devid = amd_iommu_alias_table[devid]; | ||
3630 | @@ -2066,10 +2065,6 @@ static void prealloc_protection_domains(void) | ||
3631 | init_unity_mappings_for_device(dma_dom, devid); | ||
3632 | dma_dom->target_dev = devid; | ||
3633 | |||
3634 | - attach_device(iommu, &dma_dom->domain, devid); | ||
3635 | - if (__devid != devid) | ||
3636 | - attach_device(iommu, &dma_dom->domain, __devid); | ||
3637 | - | ||
3638 | list_add_tail(&dma_dom->list, &iommu_pd_list); | ||
3639 | } | ||
3640 | } | ||
3641 | @@ -2084,11 +2079,6 @@ static struct dma_map_ops amd_iommu_dma_ops = { | ||
3642 | .dma_supported = amd_iommu_dma_supported, | ||
3643 | }; | ||
3644 | |||
3645 | -void __init amd_iommu_init_api(void) | ||
3646 | -{ | ||
3647 | - register_iommu(&amd_iommu_ops); | ||
3648 | -} | ||
3649 | - | ||
3650 | /* | ||
3651 | * The function which clues the AMD IOMMU driver into dma_ops. | ||
3652 | */ | ||
3653 | @@ -2130,6 +2120,8 @@ int __init amd_iommu_init_dma_ops(void) | ||
3654 | /* Make the driver finally visible to the drivers */ | ||
3655 | dma_ops = &amd_iommu_dma_ops; | ||
3656 | |||
3657 | + register_iommu(&amd_iommu_ops); | ||
3658 | + | ||
3659 | bus_register_notifier(&pci_bus_type, &device_nb); | ||
3660 | |||
3661 | amd_iommu_stats_init(); | ||
3662 | diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c | ||
3663 | index 362ab88..c20001e 100644 | ||
3664 | --- a/arch/x86/kernel/amd_iommu_init.c | ||
3665 | +++ b/arch/x86/kernel/amd_iommu_init.c | ||
3666 | @@ -136,11 +136,6 @@ LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the | ||
3667 | system */ | ||
3668 | |||
3669 | /* | ||
3670 | - * Set to true if ACPI table parsing and hardware intialization went properly | ||
3671 | - */ | ||
3672 | -static bool amd_iommu_initialized; | ||
3673 | - | ||
3674 | -/* | ||
3675 | * Pointer to the device table which is shared by all AMD IOMMUs | ||
3676 | * it is indexed by the PCI device id or the HT unit id and contains | ||
3677 | * information about the domain the device belongs to as well as the | ||
3678 | @@ -918,8 +913,6 @@ static int __init init_iommu_all(struct acpi_table_header *table) | ||
3679 | } | ||
3680 | WARN_ON(p != end); | ||
3681 | |||
3682 | - amd_iommu_initialized = true; | ||
3683 | - | ||
3684 | return 0; | ||
3685 | } | ||
3686 | |||
3687 | @@ -932,7 +925,7 @@ static int __init init_iommu_all(struct acpi_table_header *table) | ||
3688 | * | ||
3689 | ****************************************************************************/ | ||
3690 | |||
3691 | -static int iommu_setup_msi(struct amd_iommu *iommu) | ||
3692 | +static int __init iommu_setup_msi(struct amd_iommu *iommu) | ||
3693 | { | ||
3694 | int r; | ||
3695 | |||
3696 | @@ -1270,9 +1263,6 @@ int __init amd_iommu_init(void) | ||
3697 | if (acpi_table_parse("IVRS", init_iommu_all) != 0) | ||
3698 | goto free; | ||
3699 | |||
3700 | - if (!amd_iommu_initialized) | ||
3701 | - goto free; | ||
3702 | - | ||
3703 | if (acpi_table_parse("IVRS", init_memory_definitions) != 0) | ||
3704 | goto free; | ||
3705 | |||
3706 | @@ -1288,12 +1278,9 @@ int __init amd_iommu_init(void) | ||
3707 | ret = amd_iommu_init_passthrough(); | ||
3708 | else | ||
3709 | ret = amd_iommu_init_dma_ops(); | ||
3710 | - | ||
3711 | if (ret) | ||
3712 | goto free; | ||
3713 | |||
3714 | - amd_iommu_init_api(); | ||
3715 | - | ||
3716 | enable_iommus(); | ||
3717 | |||
3718 | if (iommu_pass_through) | ||
3719 | diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c | ||
3720 | index c86dbcf..894aa97 100644 | ||
3721 | --- a/arch/x86/kernel/apic/apic.c | ||
3722 | +++ b/arch/x86/kernel/apic/apic.c | ||
3723 | @@ -246,7 +246,7 @@ static int modern_apic(void) | ||
3724 | */ | ||
3725 | static void native_apic_write_dummy(u32 reg, u32 v) | ||
3726 | { | ||
3727 | - WARN_ON_ONCE(cpu_has_apic && !disable_apic); | ||
3728 | + WARN_ON_ONCE((cpu_has_apic || !disable_apic)); | ||
3729 | } | ||
3730 | |||
3731 | static u32 native_apic_read_dummy(u32 reg) | ||
3732 | diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c | ||
3733 | index 873f81f..d0c99ab 100644 | ||
3734 | --- a/arch/x86/kernel/apic/apic_flat_64.c | ||
3735 | +++ b/arch/x86/kernel/apic/apic_flat_64.c | ||
3736 | @@ -240,11 +240,6 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | ||
3737 | printk(KERN_DEBUG "system APIC only can use physical flat"); | ||
3738 | return 1; | ||
3739 | } | ||
3740 | - | ||
3741 | - if (!strncmp(oem_id, "IBM", 3) && !strncmp(oem_table_id, "EXA", 3)) { | ||
3742 | - printk(KERN_DEBUG "IBM Summit detected, will use apic physical"); | ||
3743 | - return 1; | ||
3744 | - } | ||
3745 | #endif | ||
3746 | |||
3747 | return 0; | ||
3748 | diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c | ||
3749 | index c107e83..dc69f28 100644 | ||
3750 | --- a/arch/x86/kernel/apic/io_apic.c | ||
3751 | +++ b/arch/x86/kernel/apic/io_apic.c | ||
3752 | @@ -3157,7 +3157,6 @@ unsigned int create_irq_nr(unsigned int irq_want, int node) | ||
3753 | continue; | ||
3754 | |||
3755 | desc_new = move_irq_desc(desc_new, node); | ||
3756 | - cfg_new = desc_new->chip_data; | ||
3757 | |||
3758 | if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0) | ||
3759 | irq = new; | ||
3760 | diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c | ||
3761 | index 9ee87cf..326c254 100644 | ||
3762 | --- a/arch/x86/kernel/apic/x2apic_uv_x.c | ||
3763 | +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | ||
3764 | @@ -364,13 +364,13 @@ static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size) | ||
3765 | |||
3766 | enum map_type {map_wb, map_uc}; | ||
3767 | |||
3768 | -static __init void map_high(char *id, unsigned long base, int pshift, | ||
3769 | - int bshift, int max_pnode, enum map_type map_type) | ||
3770 | +static __init void map_high(char *id, unsigned long base, int shift, | ||
3771 | + int max_pnode, enum map_type map_type) | ||
3772 | { | ||
3773 | unsigned long bytes, paddr; | ||
3774 | |||
3775 | - paddr = base << pshift; | ||
3776 | - bytes = (1UL << bshift) * (max_pnode + 1); | ||
3777 | + paddr = base << shift; | ||
3778 | + bytes = (1UL << shift) * (max_pnode + 1); | ||
3779 | printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, | ||
3780 | paddr + bytes); | ||
3781 | if (map_type == map_uc) | ||
3782 | @@ -386,7 +386,7 @@ static __init void map_gru_high(int max_pnode) | ||
3783 | |||
3784 | gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR); | ||
3785 | if (gru.s.enable) | ||
3786 | - map_high("GRU", gru.s.base, shift, shift, max_pnode, map_wb); | ||
3787 | + map_high("GRU", gru.s.base, shift, max_pnode, map_wb); | ||
3788 | } | ||
3789 | |||
3790 | static __init void map_mmr_high(int max_pnode) | ||
3791 | @@ -396,7 +396,7 @@ static __init void map_mmr_high(int max_pnode) | ||
3792 | |||
3793 | mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR); | ||
3794 | if (mmr.s.enable) | ||
3795 | - map_high("MMR", mmr.s.base, shift, shift, max_pnode, map_uc); | ||
3796 | + map_high("MMR", mmr.s.base, shift, max_pnode, map_uc); | ||
3797 | } | ||
3798 | |||
3799 | static __init void map_mmioh_high(int max_pnode) | ||
3800 | @@ -406,8 +406,7 @@ static __init void map_mmioh_high(int max_pnode) | ||
3801 | |||
3802 | mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR); | ||
3803 | if (mmioh.s.enable) | ||
3804 | - map_high("MMIOH", mmioh.s.base, shift, mmioh.s.m_io, | ||
3805 | - max_pnode, map_uc); | ||
3806 | + map_high("MMIOH", mmioh.s.base, shift, max_pnode, map_uc); | ||
3807 | } | ||
3808 | |||
3809 | static __init void uv_rtc_init(void) | ||
3810 | @@ -608,10 +607,8 @@ void __init uv_system_init(void) | ||
3811 | uv_rtc_init(); | ||
3812 | |||
3813 | for_each_present_cpu(cpu) { | ||
3814 | - int apicid = per_cpu(x86_cpu_to_apicid, cpu); | ||
3815 | - | ||
3816 | nid = cpu_to_node(cpu); | ||
3817 | - pnode = uv_apicid_to_pnode(apicid); | ||
3818 | + pnode = uv_apicid_to_pnode(per_cpu(x86_cpu_to_apicid, cpu)); | ||
3819 | blade = boot_pnode_to_blade(pnode); | ||
3820 | lcpu = uv_blade_info[blade].nr_possible_cpus; | ||
3821 | uv_blade_info[blade].nr_possible_cpus++; | ||
3822 | @@ -632,13 +629,15 @@ void __init uv_system_init(void) | ||
3823 | uv_cpu_hub_info(cpu)->gnode_extra = gnode_extra; | ||
3824 | uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; | ||
3825 | uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id; | ||
3826 | - uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid); | ||
3827 | + uv_cpu_hub_info(cpu)->scir.offset = SCIR_LOCAL_MMR_BASE + lcpu; | ||
3828 | uv_node_to_blade[nid] = blade; | ||
3829 | uv_cpu_to_blade[cpu] = blade; | ||
3830 | max_pnode = max(pnode, max_pnode); | ||
3831 | |||
3832 | - printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, lcpu %d, blade %d\n", | ||
3833 | - cpu, apicid, pnode, nid, lcpu, blade); | ||
3834 | + printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, " | ||
3835 | + "lcpu %d, blade %d\n", | ||
3836 | + cpu, per_cpu(x86_cpu_to_apicid, cpu), pnode, nid, | ||
3837 | + lcpu, blade); | ||
3838 | } | ||
3839 | |||
3840 | /* Add blade/pnode info for nodes without cpus */ | ||
3841 | diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile | ||
3842 | index ff502cc..68537e9 100644 | ||
3843 | --- a/arch/x86/kernel/cpu/Makefile | ||
3844 | +++ b/arch/x86/kernel/cpu/Makefile | ||
3845 | @@ -18,6 +18,8 @@ obj-y += vmware.o hypervisor.o sched.o | ||
3846 | obj-$(CONFIG_X86_32) += bugs.o cmpxchg.o | ||
3847 | obj-$(CONFIG_X86_64) += bugs_64.o | ||
3848 | |||
3849 | +obj-$(CONFIG_X86_CPU_DEBUG) += cpu_debug.o | ||
3850 | + | ||
3851 | obj-$(CONFIG_CPU_SUP_INTEL) += intel.o | ||
3852 | obj-$(CONFIG_CPU_SUP_AMD) += amd.o | ||
3853 | obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o | ||
3854 | diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c | ||
3855 | new file mode 100644 | ||
3856 | index 0000000..dca325c | ||
3857 | --- /dev/null | ||
3858 | +++ b/arch/x86/kernel/cpu/cpu_debug.c | ||
3859 | @@ -0,0 +1,688 @@ | ||
3860 | +/* | ||
3861 | + * CPU x86 architecture debug code | ||
3862 | + * | ||
3863 | + * Copyright(C) 2009 Jaswinder Singh Rajput | ||
3864 | + * | ||
3865 | + * For licencing details see kernel-base/COPYING | ||
3866 | + */ | ||
3867 | + | ||
3868 | +#include <linux/interrupt.h> | ||
3869 | +#include <linux/compiler.h> | ||
3870 | +#include <linux/seq_file.h> | ||
3871 | +#include <linux/debugfs.h> | ||
3872 | +#include <linux/kprobes.h> | ||
3873 | +#include <linux/uaccess.h> | ||
3874 | +#include <linux/kernel.h> | ||
3875 | +#include <linux/module.h> | ||
3876 | +#include <linux/percpu.h> | ||
3877 | +#include <linux/signal.h> | ||
3878 | +#include <linux/errno.h> | ||
3879 | +#include <linux/sched.h> | ||
3880 | +#include <linux/types.h> | ||
3881 | +#include <linux/init.h> | ||
3882 | +#include <linux/slab.h> | ||
3883 | +#include <linux/smp.h> | ||
3884 | + | ||
3885 | +#include <asm/cpu_debug.h> | ||
3886 | +#include <asm/paravirt.h> | ||
3887 | +#include <asm/system.h> | ||
3888 | +#include <asm/traps.h> | ||
3889 | +#include <asm/apic.h> | ||
3890 | +#include <asm/desc.h> | ||
3891 | + | ||
3892 | +static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpu_arr); | ||
3893 | +static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], priv_arr); | ||
3894 | +static DEFINE_PER_CPU(int, cpu_priv_count); | ||
3895 | + | ||
3896 | +static DEFINE_MUTEX(cpu_debug_lock); | ||
3897 | + | ||
3898 | +static struct dentry *cpu_debugfs_dir; | ||
3899 | + | ||
3900 | +static struct cpu_debug_base cpu_base[] = { | ||
3901 | + { "mc", CPU_MC, 0 }, | ||
3902 | + { "monitor", CPU_MONITOR, 0 }, | ||
3903 | + { "time", CPU_TIME, 0 }, | ||
3904 | + { "pmc", CPU_PMC, 1 }, | ||
3905 | + { "platform", CPU_PLATFORM, 0 }, | ||
3906 | + { "apic", CPU_APIC, 0 }, | ||
3907 | + { "poweron", CPU_POWERON, 0 }, | ||
3908 | + { "control", CPU_CONTROL, 0 }, | ||
3909 | + { "features", CPU_FEATURES, 0 }, | ||
3910 | + { "lastbranch", CPU_LBRANCH, 0 }, | ||
3911 | + { "bios", CPU_BIOS, 0 }, | ||
3912 | + { "freq", CPU_FREQ, 0 }, | ||
3913 | + { "mtrr", CPU_MTRR, 0 }, | ||
3914 | + { "perf", CPU_PERF, 0 }, | ||
3915 | + { "cache", CPU_CACHE, 0 }, | ||
3916 | + { "sysenter", CPU_SYSENTER, 0 }, | ||
3917 | + { "therm", CPU_THERM, 0 }, | ||
3918 | + { "misc", CPU_MISC, 0 }, | ||
3919 | + { "debug", CPU_DEBUG, 0 }, | ||
3920 | + { "pat", CPU_PAT, 0 }, | ||
3921 | + { "vmx", CPU_VMX, 0 }, | ||
3922 | + { "call", CPU_CALL, 0 }, | ||
3923 | + { "base", CPU_BASE, 0 }, | ||
3924 | + { "ver", CPU_VER, 0 }, | ||
3925 | + { "conf", CPU_CONF, 0 }, | ||
3926 | + { "smm", CPU_SMM, 0 }, | ||
3927 | + { "svm", CPU_SVM, 0 }, | ||
3928 | + { "osvm", CPU_OSVM, 0 }, | ||
3929 | + { "tss", CPU_TSS, 0 }, | ||
3930 | + { "cr", CPU_CR, 0 }, | ||
3931 | + { "dt", CPU_DT, 0 }, | ||
3932 | + { "registers", CPU_REG_ALL, 0 }, | ||
3933 | +}; | ||
3934 | + | ||
3935 | +static struct cpu_file_base cpu_file[] = { | ||
3936 | + { "index", CPU_REG_ALL, 0 }, | ||
3937 | + { "value", CPU_REG_ALL, 1 }, | ||
3938 | +}; | ||
3939 | + | ||
3940 | +/* CPU Registers Range */ | ||
3941 | +static struct cpu_debug_range cpu_reg_range[] = { | ||
3942 | + { 0x00000000, 0x00000001, CPU_MC, }, | ||
3943 | + { 0x00000006, 0x00000007, CPU_MONITOR, }, | ||
3944 | + { 0x00000010, 0x00000010, CPU_TIME, }, | ||
3945 | + { 0x00000011, 0x00000013, CPU_PMC, }, | ||
3946 | + { 0x00000017, 0x00000017, CPU_PLATFORM, }, | ||
3947 | + { 0x0000001B, 0x0000001B, CPU_APIC, }, | ||
3948 | + { 0x0000002A, 0x0000002B, CPU_POWERON, }, | ||
3949 | + { 0x0000002C, 0x0000002C, CPU_FREQ, }, | ||
3950 | + { 0x0000003A, 0x0000003A, CPU_CONTROL, }, | ||
3951 | + { 0x00000040, 0x00000047, CPU_LBRANCH, }, | ||
3952 | + { 0x00000060, 0x00000067, CPU_LBRANCH, }, | ||
3953 | + { 0x00000079, 0x00000079, CPU_BIOS, }, | ||
3954 | + { 0x00000088, 0x0000008A, CPU_CACHE, }, | ||
3955 | + { 0x0000008B, 0x0000008B, CPU_BIOS, }, | ||
3956 | + { 0x0000009B, 0x0000009B, CPU_MONITOR, }, | ||
3957 | + { 0x000000C1, 0x000000C4, CPU_PMC, }, | ||
3958 | + { 0x000000CD, 0x000000CD, CPU_FREQ, }, | ||
3959 | + { 0x000000E7, 0x000000E8, CPU_PERF, }, | ||
3960 | + { 0x000000FE, 0x000000FE, CPU_MTRR, }, | ||
3961 | + | ||
3962 | + { 0x00000116, 0x0000011E, CPU_CACHE, }, | ||
3963 | + { 0x00000174, 0x00000176, CPU_SYSENTER, }, | ||
3964 | + { 0x00000179, 0x0000017B, CPU_MC, }, | ||
3965 | + { 0x00000186, 0x00000189, CPU_PMC, }, | ||
3966 | + { 0x00000198, 0x00000199, CPU_PERF, }, | ||
3967 | + { 0x0000019A, 0x0000019A, CPU_TIME, }, | ||
3968 | + { 0x0000019B, 0x0000019D, CPU_THERM, }, | ||
3969 | + { 0x000001A0, 0x000001A0, CPU_MISC, }, | ||
3970 | + { 0x000001C9, 0x000001C9, CPU_LBRANCH, }, | ||
3971 | + { 0x000001D7, 0x000001D8, CPU_LBRANCH, }, | ||
3972 | + { 0x000001D9, 0x000001D9, CPU_DEBUG, }, | ||
3973 | + { 0x000001DA, 0x000001E0, CPU_LBRANCH, }, | ||
3974 | + | ||
3975 | + { 0x00000200, 0x0000020F, CPU_MTRR, }, | ||
3976 | + { 0x00000250, 0x00000250, CPU_MTRR, }, | ||
3977 | + { 0x00000258, 0x00000259, CPU_MTRR, }, | ||
3978 | + { 0x00000268, 0x0000026F, CPU_MTRR, }, | ||
3979 | + { 0x00000277, 0x00000277, CPU_PAT, }, | ||
3980 | + { 0x000002FF, 0x000002FF, CPU_MTRR, }, | ||
3981 | + | ||
3982 | + { 0x00000300, 0x00000311, CPU_PMC, }, | ||
3983 | + { 0x00000345, 0x00000345, CPU_PMC, }, | ||
3984 | + { 0x00000360, 0x00000371, CPU_PMC, }, | ||
3985 | + { 0x0000038D, 0x00000390, CPU_PMC, }, | ||
3986 | + { 0x000003A0, 0x000003BE, CPU_PMC, }, | ||
3987 | + { 0x000003C0, 0x000003CD, CPU_PMC, }, | ||
3988 | + { 0x000003E0, 0x000003E1, CPU_PMC, }, | ||
3989 | + { 0x000003F0, 0x000003F2, CPU_PMC, }, | ||
3990 | + | ||
3991 | + { 0x00000400, 0x00000417, CPU_MC, }, | ||
3992 | + { 0x00000480, 0x0000048B, CPU_VMX, }, | ||
3993 | + | ||
3994 | + { 0x00000600, 0x00000600, CPU_DEBUG, }, | ||
3995 | + { 0x00000680, 0x0000068F, CPU_LBRANCH, }, | ||
3996 | + { 0x000006C0, 0x000006CF, CPU_LBRANCH, }, | ||
3997 | + | ||
3998 | + { 0x000107CC, 0x000107D3, CPU_PMC, }, | ||
3999 | + | ||
4000 | + { 0xC0000080, 0xC0000080, CPU_FEATURES, }, | ||
4001 | + { 0xC0000081, 0xC0000084, CPU_CALL, }, | ||
4002 | + { 0xC0000100, 0xC0000102, CPU_BASE, }, | ||
4003 | + { 0xC0000103, 0xC0000103, CPU_TIME, }, | ||
4004 | + | ||
4005 | + { 0xC0010000, 0xC0010007, CPU_PMC, }, | ||
4006 | + { 0xC0010010, 0xC0010010, CPU_CONF, }, | ||
4007 | + { 0xC0010015, 0xC0010015, CPU_CONF, }, | ||
4008 | + { 0xC0010016, 0xC001001A, CPU_MTRR, }, | ||
4009 | + { 0xC001001D, 0xC001001D, CPU_MTRR, }, | ||
4010 | + { 0xC001001F, 0xC001001F, CPU_CONF, }, | ||
4011 | + { 0xC0010030, 0xC0010035, CPU_BIOS, }, | ||
4012 | + { 0xC0010044, 0xC0010048, CPU_MC, }, | ||
4013 | + { 0xC0010050, 0xC0010056, CPU_SMM, }, | ||
4014 | + { 0xC0010058, 0xC0010058, CPU_CONF, }, | ||
4015 | + { 0xC0010060, 0xC0010060, CPU_CACHE, }, | ||
4016 | + { 0xC0010061, 0xC0010068, CPU_SMM, }, | ||
4017 | + { 0xC0010069, 0xC001006B, CPU_SMM, }, | ||
4018 | + { 0xC0010070, 0xC0010071, CPU_SMM, }, | ||
4019 | + { 0xC0010111, 0xC0010113, CPU_SMM, }, | ||
4020 | + { 0xC0010114, 0xC0010118, CPU_SVM, }, | ||
4021 | + { 0xC0010140, 0xC0010141, CPU_OSVM, }, | ||
4022 | + { 0xC0011022, 0xC0011023, CPU_CONF, }, | ||
4023 | +}; | ||
4024 | + | ||
4025 | +static int is_typeflag_valid(unsigned cpu, unsigned flag) | ||
4026 | +{ | ||
4027 | + int i; | ||
4028 | + | ||
4029 | + /* Standard Registers should be always valid */ | ||
4030 | + if (flag >= CPU_TSS) | ||
4031 | + return 1; | ||
4032 | + | ||
4033 | + for (i = 0; i < ARRAY_SIZE(cpu_reg_range); i++) { | ||
4034 | + if (cpu_reg_range[i].flag == flag) | ||
4035 | + return 1; | ||
4036 | + } | ||
4037 | + | ||
4038 | + /* Invalid */ | ||
4039 | + return 0; | ||
4040 | +} | ||
4041 | + | ||
4042 | +static unsigned get_cpu_range(unsigned cpu, unsigned *min, unsigned *max, | ||
4043 | + int index, unsigned flag) | ||
4044 | +{ | ||
4045 | + if (cpu_reg_range[index].flag == flag) { | ||
4046 | + *min = cpu_reg_range[index].min; | ||
4047 | + *max = cpu_reg_range[index].max; | ||
4048 | + } else | ||
4049 | + *max = 0; | ||
4050 | + | ||
4051 | + return *max; | ||
4052 | +} | ||
4053 | + | ||
4054 | +/* This function can also be called with seq = NULL for printk */ | ||
4055 | +static void print_cpu_data(struct seq_file *seq, unsigned type, | ||
4056 | + u32 low, u32 high) | ||
4057 | +{ | ||
4058 | + struct cpu_private *priv; | ||
4059 | + u64 val = high; | ||
4060 | + | ||
4061 | + if (seq) { | ||
4062 | + priv = seq->private; | ||
4063 | + if (priv->file) { | ||
4064 | + val = (val << 32) | low; | ||
4065 | + seq_printf(seq, "0x%llx\n", val); | ||
4066 | + } else | ||
4067 | + seq_printf(seq, " %08x: %08x_%08x\n", | ||
4068 | + type, high, low); | ||
4069 | + } else | ||
4070 | + printk(KERN_INFO " %08x: %08x_%08x\n", type, high, low); | ||
4071 | +} | ||
4072 | + | ||
4073 | +/* This function can also be called with seq = NULL for printk */ | ||
4074 | +static void print_msr(struct seq_file *seq, unsigned cpu, unsigned flag) | ||
4075 | +{ | ||
4076 | + unsigned msr, msr_min, msr_max; | ||
4077 | + struct cpu_private *priv; | ||
4078 | + u32 low, high; | ||
4079 | + int i; | ||
4080 | + | ||
4081 | + if (seq) { | ||
4082 | + priv = seq->private; | ||
4083 | + if (priv->file) { | ||
4084 | + if (!rdmsr_safe_on_cpu(priv->cpu, priv->reg, | ||
4085 | + &low, &high)) | ||
4086 | + print_cpu_data(seq, priv->reg, low, high); | ||
4087 | + return; | ||
4088 | + } | ||
4089 | + } | ||
4090 | + | ||
4091 | + for (i = 0; i < ARRAY_SIZE(cpu_reg_range); i++) { | ||
4092 | + if (!get_cpu_range(cpu, &msr_min, &msr_max, i, flag)) | ||
4093 | + continue; | ||
4094 | + | ||
4095 | + for (msr = msr_min; msr <= msr_max; msr++) { | ||
4096 | + if (rdmsr_safe_on_cpu(cpu, msr, &low, &high)) | ||
4097 | + continue; | ||
4098 | + print_cpu_data(seq, msr, low, high); | ||
4099 | + } | ||
4100 | + } | ||
4101 | +} | ||
4102 | + | ||
4103 | +static void print_tss(void *arg) | ||
4104 | +{ | ||
4105 | + struct pt_regs *regs = task_pt_regs(current); | ||
4106 | + struct seq_file *seq = arg; | ||
4107 | + unsigned int seg; | ||
4108 | + | ||
4109 | + seq_printf(seq, " RAX\t: %016lx\n", regs->ax); | ||
4110 | + seq_printf(seq, " RBX\t: %016lx\n", regs->bx); | ||
4111 | + seq_printf(seq, " RCX\t: %016lx\n", regs->cx); | ||
4112 | + seq_printf(seq, " RDX\t: %016lx\n", regs->dx); | ||
4113 | + | ||
4114 | + seq_printf(seq, " RSI\t: %016lx\n", regs->si); | ||
4115 | + seq_printf(seq, " RDI\t: %016lx\n", regs->di); | ||
4116 | + seq_printf(seq, " RBP\t: %016lx\n", regs->bp); | ||
4117 | + seq_printf(seq, " ESP\t: %016lx\n", regs->sp); | ||
4118 | + | ||
4119 | +#ifdef CONFIG_X86_64 | ||
4120 | + seq_printf(seq, " R08\t: %016lx\n", regs->r8); | ||
4121 | + seq_printf(seq, " R09\t: %016lx\n", regs->r9); | ||
4122 | + seq_printf(seq, " R10\t: %016lx\n", regs->r10); | ||
4123 | + seq_printf(seq, " R11\t: %016lx\n", regs->r11); | ||
4124 | + seq_printf(seq, " R12\t: %016lx\n", regs->r12); | ||
4125 | + seq_printf(seq, " R13\t: %016lx\n", regs->r13); | ||
4126 | + seq_printf(seq, " R14\t: %016lx\n", regs->r14); | ||
4127 | + seq_printf(seq, " R15\t: %016lx\n", regs->r15); | ||
4128 | +#endif | ||
4129 | + | ||
4130 | + asm("movl %%cs,%0" : "=r" (seg)); | ||
4131 | + seq_printf(seq, " CS\t: %04x\n", seg); | ||
4132 | + asm("movl %%ds,%0" : "=r" (seg)); | ||
4133 | + seq_printf(seq, " DS\t: %04x\n", seg); | ||
4134 | + seq_printf(seq, " SS\t: %04lx\n", regs->ss & 0xffff); | ||
4135 | + asm("movl %%es,%0" : "=r" (seg)); | ||
4136 | + seq_printf(seq, " ES\t: %04x\n", seg); | ||
4137 | + asm("movl %%fs,%0" : "=r" (seg)); | ||
4138 | + seq_printf(seq, " FS\t: %04x\n", seg); | ||
4139 | + asm("movl %%gs,%0" : "=r" (seg)); | ||
4140 | + seq_printf(seq, " GS\t: %04x\n", seg); | ||
4141 | + | ||
4142 | + seq_printf(seq, " EFLAGS\t: %016lx\n", regs->flags); | ||
4143 | + | ||
4144 | + seq_printf(seq, " EIP\t: %016lx\n", regs->ip); | ||
4145 | +} | ||
4146 | + | ||
4147 | +static void print_cr(void *arg) | ||
4148 | +{ | ||
4149 | + struct seq_file *seq = arg; | ||
4150 | + | ||
4151 | + seq_printf(seq, " cr0\t: %016lx\n", read_cr0()); | ||
4152 | + seq_printf(seq, " cr2\t: %016lx\n", read_cr2()); | ||
4153 | + seq_printf(seq, " cr3\t: %016lx\n", read_cr3()); | ||
4154 | + seq_printf(seq, " cr4\t: %016lx\n", read_cr4_safe()); | ||
4155 | +#ifdef CONFIG_X86_64 | ||
4156 | + seq_printf(seq, " cr8\t: %016lx\n", read_cr8()); | ||
4157 | +#endif | ||
4158 | +} | ||
4159 | + | ||
4160 | +static void print_desc_ptr(char *str, struct seq_file *seq, struct desc_ptr dt) | ||
4161 | +{ | ||
4162 | + seq_printf(seq, " %s\t: %016llx\n", str, (u64)(dt.address | dt.size)); | ||
4163 | +} | ||
4164 | + | ||
4165 | +static void print_dt(void *seq) | ||
4166 | +{ | ||
4167 | + struct desc_ptr dt; | ||
4168 | + unsigned long ldt; | ||
4169 | + | ||
4170 | + /* IDT */ | ||
4171 | + store_idt((struct desc_ptr *)&dt); | ||
4172 | + print_desc_ptr("IDT", seq, dt); | ||
4173 | + | ||
4174 | + /* GDT */ | ||
4175 | + store_gdt((struct desc_ptr *)&dt); | ||
4176 | + print_desc_ptr("GDT", seq, dt); | ||
4177 | + | ||
4178 | + /* LDT */ | ||
4179 | + store_ldt(ldt); | ||
4180 | + seq_printf(seq, " LDT\t: %016lx\n", ldt); | ||
4181 | + | ||
4182 | + /* TR */ | ||
4183 | + store_tr(ldt); | ||
4184 | + seq_printf(seq, " TR\t: %016lx\n", ldt); | ||
4185 | +} | ||
4186 | + | ||
4187 | +static void print_dr(void *arg) | ||
4188 | +{ | ||
4189 | + struct seq_file *seq = arg; | ||
4190 | + unsigned long dr; | ||
4191 | + int i; | ||
4192 | + | ||
4193 | + for (i = 0; i < 8; i++) { | ||
4194 | + /* Ignore db4, db5 */ | ||
4195 | + if ((i == 4) || (i == 5)) | ||
4196 | + continue; | ||
4197 | + get_debugreg(dr, i); | ||
4198 | + seq_printf(seq, " dr%d\t: %016lx\n", i, dr); | ||
4199 | + } | ||
4200 | + | ||
4201 | + seq_printf(seq, "\n MSR\t:\n"); | ||
4202 | +} | ||
4203 | + | ||
4204 | +static void print_apic(void *arg) | ||
4205 | +{ | ||
4206 | + struct seq_file *seq = arg; | ||
4207 | + | ||
4208 | +#ifdef CONFIG_X86_LOCAL_APIC | ||
4209 | + seq_printf(seq, " LAPIC\t:\n"); | ||
4210 | + seq_printf(seq, " ID\t\t: %08x\n", apic_read(APIC_ID) >> 24); | ||
4211 | + seq_printf(seq, " LVR\t\t: %08x\n", apic_read(APIC_LVR)); | ||
4212 | + seq_printf(seq, " TASKPRI\t: %08x\n", apic_read(APIC_TASKPRI)); | ||
4213 | + seq_printf(seq, " ARBPRI\t\t: %08x\n", apic_read(APIC_ARBPRI)); | ||
4214 | + seq_printf(seq, " PROCPRI\t: %08x\n", apic_read(APIC_PROCPRI)); | ||
4215 | + seq_printf(seq, " LDR\t\t: %08x\n", apic_read(APIC_LDR)); | ||
4216 | + seq_printf(seq, " DFR\t\t: %08x\n", apic_read(APIC_DFR)); | ||
4217 | + seq_printf(seq, " SPIV\t\t: %08x\n", apic_read(APIC_SPIV)); | ||
4218 | + seq_printf(seq, " ISR\t\t: %08x\n", apic_read(APIC_ISR)); | ||
4219 | + seq_printf(seq, " ESR\t\t: %08x\n", apic_read(APIC_ESR)); | ||
4220 | + seq_printf(seq, " ICR\t\t: %08x\n", apic_read(APIC_ICR)); | ||
4221 | + seq_printf(seq, " ICR2\t\t: %08x\n", apic_read(APIC_ICR2)); | ||
4222 | + seq_printf(seq, " LVTT\t\t: %08x\n", apic_read(APIC_LVTT)); | ||
4223 | + seq_printf(seq, " LVTTHMR\t: %08x\n", apic_read(APIC_LVTTHMR)); | ||
4224 | + seq_printf(seq, " LVTPC\t\t: %08x\n", apic_read(APIC_LVTPC)); | ||
4225 | + seq_printf(seq, " LVT0\t\t: %08x\n", apic_read(APIC_LVT0)); | ||
4226 | + seq_printf(seq, " LVT1\t\t: %08x\n", apic_read(APIC_LVT1)); | ||
4227 | + seq_printf(seq, " LVTERR\t\t: %08x\n", apic_read(APIC_LVTERR)); | ||
4228 | + seq_printf(seq, " TMICT\t\t: %08x\n", apic_read(APIC_TMICT)); | ||
4229 | + seq_printf(seq, " TMCCT\t\t: %08x\n", apic_read(APIC_TMCCT)); | ||
4230 | + seq_printf(seq, " TDCR\t\t: %08x\n", apic_read(APIC_TDCR)); | ||
4231 | + if (boot_cpu_has(X86_FEATURE_EXTAPIC)) { | ||
4232 | + unsigned int i, v, maxeilvt; | ||
4233 | + | ||
4234 | + v = apic_read(APIC_EFEAT); | ||
4235 | + maxeilvt = (v >> 16) & 0xff; | ||
4236 | + seq_printf(seq, " EFEAT\t\t: %08x\n", v); | ||
4237 | + seq_printf(seq, " ECTRL\t\t: %08x\n", apic_read(APIC_ECTRL)); | ||
4238 | + | ||
4239 | + for (i = 0; i < maxeilvt; i++) { | ||
4240 | + v = apic_read(APIC_EILVTn(i)); | ||
4241 | + seq_printf(seq, " EILVT%d\t\t: %08x\n", i, v); | ||
4242 | + } | ||
4243 | + } | ||
4244 | +#endif /* CONFIG_X86_LOCAL_APIC */ | ||
4245 | + seq_printf(seq, "\n MSR\t:\n"); | ||
4246 | +} | ||
4247 | + | ||
4248 | +static int cpu_seq_show(struct seq_file *seq, void *v) | ||
4249 | +{ | ||
4250 | + struct cpu_private *priv = seq->private; | ||
4251 | + | ||
4252 | + if (priv == NULL) | ||
4253 | + return -EINVAL; | ||
4254 | + | ||
4255 | + switch (cpu_base[priv->type].flag) { | ||
4256 | + case CPU_TSS: | ||
4257 | + smp_call_function_single(priv->cpu, print_tss, seq, 1); | ||
4258 | + break; | ||
4259 | + case CPU_CR: | ||
4260 | + smp_call_function_single(priv->cpu, print_cr, seq, 1); | ||
4261 | + break; | ||
4262 | + case CPU_DT: | ||
4263 | + smp_call_function_single(priv->cpu, print_dt, seq, 1); | ||
4264 | + break; | ||
4265 | + case CPU_DEBUG: | ||
4266 | + if (priv->file == CPU_INDEX_BIT) | ||
4267 | + smp_call_function_single(priv->cpu, print_dr, seq, 1); | ||
4268 | + print_msr(seq, priv->cpu, cpu_base[priv->type].flag); | ||
4269 | + break; | ||
4270 | + case CPU_APIC: | ||
4271 | + if (priv->file == CPU_INDEX_BIT) | ||
4272 | + smp_call_function_single(priv->cpu, print_apic, seq, 1); | ||
4273 | + print_msr(seq, priv->cpu, cpu_base[priv->type].flag); | ||
4274 | + break; | ||
4275 | + | ||
4276 | + default: | ||
4277 | + print_msr(seq, priv->cpu, cpu_base[priv->type].flag); | ||
4278 | + break; | ||
4279 | + } | ||
4280 | + seq_printf(seq, "\n"); | ||
4281 | + | ||
4282 | + return 0; | ||
4283 | +} | ||
4284 | + | ||
4285 | +static void *cpu_seq_start(struct seq_file *seq, loff_t *pos) | ||
4286 | +{ | ||
4287 | + if (*pos == 0) /* One time is enough ;-) */ | ||
4288 | + return seq; | ||
4289 | + | ||
4290 | + return NULL; | ||
4291 | +} | ||
4292 | + | ||
4293 | +static void *cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
4294 | +{ | ||
4295 | + (*pos)++; | ||
4296 | + | ||
4297 | + return cpu_seq_start(seq, pos); | ||
4298 | +} | ||
4299 | + | ||
4300 | +static void cpu_seq_stop(struct seq_file *seq, void *v) | ||
4301 | +{ | ||
4302 | +} | ||
4303 | + | ||
4304 | +static const struct seq_operations cpu_seq_ops = { | ||
4305 | + .start = cpu_seq_start, | ||
4306 | + .next = cpu_seq_next, | ||
4307 | + .stop = cpu_seq_stop, | ||
4308 | + .show = cpu_seq_show, | ||
4309 | +}; | ||
4310 | + | ||
4311 | +static int cpu_seq_open(struct inode *inode, struct file *file) | ||
4312 | +{ | ||
4313 | + struct cpu_private *priv = inode->i_private; | ||
4314 | + struct seq_file *seq; | ||
4315 | + int err; | ||
4316 | + | ||
4317 | + err = seq_open(file, &cpu_seq_ops); | ||
4318 | + if (!err) { | ||
4319 | + seq = file->private_data; | ||
4320 | + seq->private = priv; | ||
4321 | + } | ||
4322 | + | ||
4323 | + return err; | ||
4324 | +} | ||
4325 | + | ||
4326 | +static int write_msr(struct cpu_private *priv, u64 val) | ||
4327 | +{ | ||
4328 | + u32 low, high; | ||
4329 | + | ||
4330 | + high = (val >> 32) & 0xffffffff; | ||
4331 | + low = val & 0xffffffff; | ||
4332 | + | ||
4333 | + if (!wrmsr_safe_on_cpu(priv->cpu, priv->reg, low, high)) | ||
4334 | + return 0; | ||
4335 | + | ||
4336 | + return -EPERM; | ||
4337 | +} | ||
4338 | + | ||
4339 | +static int write_cpu_register(struct cpu_private *priv, const char *buf) | ||
4340 | +{ | ||
4341 | + int ret = -EPERM; | ||
4342 | + u64 val; | ||
4343 | + | ||
4344 | + ret = strict_strtoull(buf, 0, &val); | ||
4345 | + if (ret < 0) | ||
4346 | + return ret; | ||
4347 | + | ||
4348 | + /* Supporting only MSRs */ | ||
4349 | + if (priv->type < CPU_TSS_BIT) | ||
4350 | + return write_msr(priv, val); | ||
4351 | + | ||
4352 | + return ret; | ||
4353 | +} | ||
4354 | + | ||
4355 | +static ssize_t cpu_write(struct file *file, const char __user *ubuf, | ||
4356 | + size_t count, loff_t *off) | ||
4357 | +{ | ||
4358 | + struct seq_file *seq = file->private_data; | ||
4359 | + struct cpu_private *priv = seq->private; | ||
4360 | + char buf[19]; | ||
4361 | + | ||
4362 | + if ((priv == NULL) || (count >= sizeof(buf))) | ||
4363 | + return -EINVAL; | ||
4364 | + | ||
4365 | + if (copy_from_user(&buf, ubuf, count)) | ||
4366 | + return -EFAULT; | ||
4367 | + | ||
4368 | + buf[count] = 0; | ||
4369 | + | ||
4370 | + if ((cpu_base[priv->type].write) && (cpu_file[priv->file].write)) | ||
4371 | + if (!write_cpu_register(priv, buf)) | ||
4372 | + return count; | ||
4373 | + | ||
4374 | + return -EACCES; | ||
4375 | +} | ||
4376 | + | ||
4377 | +static const struct file_operations cpu_fops = { | ||
4378 | + .owner = THIS_MODULE, | ||
4379 | + .open = cpu_seq_open, | ||
4380 | + .read = seq_read, | ||
4381 | + .write = cpu_write, | ||
4382 | + .llseek = seq_lseek, | ||
4383 | + .release = seq_release, | ||
4384 | +}; | ||
4385 | + | ||
4386 | +static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg, | ||
4387 | + unsigned file, struct dentry *dentry) | ||
4388 | +{ | ||
4389 | + struct cpu_private *priv = NULL; | ||
4390 | + | ||
4391 | + /* Already intialized */ | ||
4392 | + if (file == CPU_INDEX_BIT) | ||
4393 | + if (per_cpu(cpu_arr[type].init, cpu)) | ||
4394 | + return 0; | ||
4395 | + | ||
4396 | + priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
4397 | + if (priv == NULL) | ||
4398 | + return -ENOMEM; | ||
4399 | + | ||
4400 | + priv->cpu = cpu; | ||
4401 | + priv->type = type; | ||
4402 | + priv->reg = reg; | ||
4403 | + priv->file = file; | ||
4404 | + mutex_lock(&cpu_debug_lock); | ||
4405 | + per_cpu(priv_arr[type], cpu) = priv; | ||
4406 | + per_cpu(cpu_priv_count, cpu)++; | ||
4407 | + mutex_unlock(&cpu_debug_lock); | ||
4408 | + | ||
4409 | + if (file) | ||
4410 | + debugfs_create_file(cpu_file[file].name, S_IRUGO, | ||
4411 | + dentry, (void *)priv, &cpu_fops); | ||
4412 | + else { | ||
4413 | + debugfs_create_file(cpu_base[type].name, S_IRUGO, | ||
4414 | + per_cpu(cpu_arr[type].dentry, cpu), | ||
4415 | + (void *)priv, &cpu_fops); | ||
4416 | + mutex_lock(&cpu_debug_lock); | ||
4417 | + per_cpu(cpu_arr[type].init, cpu) = 1; | ||
4418 | + mutex_unlock(&cpu_debug_lock); | ||
4419 | + } | ||
4420 | + | ||
4421 | + return 0; | ||
4422 | +} | ||
4423 | + | ||
4424 | +static int cpu_init_regfiles(unsigned cpu, unsigned int type, unsigned reg, | ||
4425 | + struct dentry *dentry) | ||
4426 | +{ | ||
4427 | + unsigned file; | ||
4428 | + int err = 0; | ||
4429 | + | ||
4430 | + for (file = 0; file < ARRAY_SIZE(cpu_file); file++) { | ||
4431 | + err = cpu_create_file(cpu, type, reg, file, dentry); | ||
4432 | + if (err) | ||
4433 | + return err; | ||
4434 | + } | ||
4435 | + | ||
4436 | + return err; | ||
4437 | +} | ||
4438 | + | ||
4439 | +static int cpu_init_msr(unsigned cpu, unsigned type, struct dentry *dentry) | ||
4440 | +{ | ||
4441 | + struct dentry *cpu_dentry = NULL; | ||
4442 | + unsigned reg, reg_min, reg_max; | ||
4443 | + int i, err = 0; | ||
4444 | + char reg_dir[12]; | ||
4445 | + u32 low, high; | ||
4446 | + | ||
4447 | + for (i = 0; i < ARRAY_SIZE(cpu_reg_range); i++) { | ||
4448 | + if (!get_cpu_range(cpu, ®_min, ®_max, i, | ||
4449 | + cpu_base[type].flag)) | ||
4450 | + continue; | ||
4451 | + | ||
4452 | + for (reg = reg_min; reg <= reg_max; reg++) { | ||
4453 | + if (rdmsr_safe_on_cpu(cpu, reg, &low, &high)) | ||
4454 | + continue; | ||
4455 | + | ||
4456 | + sprintf(reg_dir, "0x%x", reg); | ||
4457 | + cpu_dentry = debugfs_create_dir(reg_dir, dentry); | ||
4458 | + err = cpu_init_regfiles(cpu, type, reg, cpu_dentry); | ||
4459 | + if (err) | ||
4460 | + return err; | ||
4461 | + } | ||
4462 | + } | ||
4463 | + | ||
4464 | + return err; | ||
4465 | +} | ||
4466 | + | ||
4467 | +static int cpu_init_allreg(unsigned cpu, struct dentry *dentry) | ||
4468 | +{ | ||
4469 | + struct dentry *cpu_dentry = NULL; | ||
4470 | + unsigned type; | ||
4471 | + int err = 0; | ||
4472 | + | ||
4473 | + for (type = 0; type < ARRAY_SIZE(cpu_base) - 1; type++) { | ||
4474 | + if (!is_typeflag_valid(cpu, cpu_base[type].flag)) | ||
4475 | + continue; | ||
4476 | + cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry); | ||
4477 | + per_cpu(cpu_arr[type].dentry, cpu) = cpu_dentry; | ||
4478 | + | ||
4479 | + if (type < CPU_TSS_BIT) | ||
4480 | + err = cpu_init_msr(cpu, type, cpu_dentry); | ||
4481 | + else | ||
4482 | + err = cpu_create_file(cpu, type, 0, CPU_INDEX_BIT, | ||
4483 | + cpu_dentry); | ||
4484 | + if (err) | ||
4485 | + return err; | ||
4486 | + } | ||
4487 | + | ||
4488 | + return err; | ||
4489 | +} | ||
4490 | + | ||
4491 | +static int cpu_init_cpu(void) | ||
4492 | +{ | ||
4493 | + struct dentry *cpu_dentry = NULL; | ||
4494 | + struct cpuinfo_x86 *cpui; | ||
4495 | + char cpu_dir[12]; | ||
4496 | + unsigned cpu; | ||
4497 | + int err = 0; | ||
4498 | + | ||
4499 | + for (cpu = 0; cpu < nr_cpu_ids; cpu++) { | ||
4500 | + cpui = &cpu_data(cpu); | ||
4501 | + if (!cpu_has(cpui, X86_FEATURE_MSR)) | ||
4502 | + continue; | ||
4503 | + | ||
4504 | + sprintf(cpu_dir, "cpu%d", cpu); | ||
4505 | + cpu_dentry = debugfs_create_dir(cpu_dir, cpu_debugfs_dir); | ||
4506 | + err = cpu_init_allreg(cpu, cpu_dentry); | ||
4507 | + | ||
4508 | + pr_info("cpu%d(%d) debug files %d\n", | ||
4509 | + cpu, nr_cpu_ids, per_cpu(cpu_priv_count, cpu)); | ||
4510 | + if (per_cpu(cpu_priv_count, cpu) > MAX_CPU_FILES) { | ||
4511 | + pr_err("Register files count %d exceeds limit %d\n", | ||
4512 | + per_cpu(cpu_priv_count, cpu), MAX_CPU_FILES); | ||
4513 | + per_cpu(cpu_priv_count, cpu) = MAX_CPU_FILES; | ||
4514 | + err = -ENFILE; | ||
4515 | + } | ||
4516 | + if (err) | ||
4517 | + return err; | ||
4518 | + } | ||
4519 | + | ||
4520 | + return err; | ||
4521 | +} | ||
4522 | + | ||
4523 | +static int __init cpu_debug_init(void) | ||
4524 | +{ | ||
4525 | + cpu_debugfs_dir = debugfs_create_dir("cpu", arch_debugfs_dir); | ||
4526 | + | ||
4527 | + return cpu_init_cpu(); | ||
4528 | +} | ||
4529 | + | ||
4530 | +static void __exit cpu_debug_exit(void) | ||
4531 | +{ | ||
4532 | + int i, cpu; | ||
4533 | + | ||
4534 | + if (cpu_debugfs_dir) | ||
4535 | + debugfs_remove_recursive(cpu_debugfs_dir); | ||
4536 | + | ||
4537 | + for (cpu = 0; cpu < nr_cpu_ids; cpu++) | ||
4538 | + for (i = 0; i < per_cpu(cpu_priv_count, cpu); i++) | ||
4539 | + kfree(per_cpu(priv_arr[i], cpu)); | ||
4540 | +} | ||
4541 | + | ||
4542 | +module_init(cpu_debug_init); | ||
4543 | +module_exit(cpu_debug_exit); | ||
4544 | + | ||
4545 | +MODULE_AUTHOR("Jaswinder Singh Rajput"); | ||
4546 | +MODULE_DESCRIPTION("CPU Debug module"); | ||
4547 | +MODULE_LICENSE("GPL"); | ||
4548 | diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | ||
4549 | index ab1cd30..3f12dab 100644 | ||
4550 | --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | ||
4551 | +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | ||
4552 | @@ -1351,7 +1351,6 @@ static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol) | ||
4553 | |||
4554 | kfree(data->powernow_table); | ||
4555 | kfree(data); | ||
4556 | - per_cpu(powernow_data, pol->cpu) = NULL; | ||
4557 | |||
4558 | return 0; | ||
4559 | } | ||
4560 | @@ -1371,7 +1370,7 @@ static unsigned int powernowk8_get(unsigned int cpu) | ||
4561 | int err; | ||
4562 | |||
4563 | if (!data) | ||
4564 | - return 0; | ||
4565 | + return -EINVAL; | ||
4566 | |||
4567 | smp_call_function_single(cpu, query_values_on_cpu, &err, true); | ||
4568 | if (err) | ||
4569 | diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c | ||
4570 | index a2a03cf..40e1835 100644 | ||
4571 | --- a/arch/x86/kernel/cpu/intel.c | ||
4572 | +++ b/arch/x86/kernel/cpu/intel.c | ||
4573 | @@ -70,6 +70,7 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | ||
4574 | if (c->x86_power & (1 << 8)) { | ||
4575 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | ||
4576 | set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); | ||
4577 | + set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE); | ||
4578 | sched_clock_stable = 1; | ||
4579 | } | ||
4580 | |||
4581 | diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c | ||
4582 | index 8178d03..804c40e 100644 | ||
4583 | --- a/arch/x86/kernel/cpu/intel_cacheinfo.c | ||
4584 | +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | ||
4585 | @@ -94,7 +94,7 @@ static const struct _cache_table __cpuinitconst cache_table[] = | ||
4586 | { 0xd1, LVL_3, 1024 }, /* 4-way set assoc, 64 byte line size */ | ||
4587 | { 0xd2, LVL_3, 2048 }, /* 4-way set assoc, 64 byte line size */ | ||
4588 | { 0xd6, LVL_3, 1024 }, /* 8-way set assoc, 64 byte line size */ | ||
4589 | - { 0xd7, LVL_3, 2048 }, /* 8-way set assoc, 64 byte line size */ | ||
4590 | + { 0xd7, LVL_3, 2038 }, /* 8-way set assoc, 64 byte line size */ | ||
4591 | { 0xd8, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */ | ||
4592 | { 0xdc, LVL_3, 2048 }, /* 12-way set assoc, 64 byte line size */ | ||
4593 | { 0xdd, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */ | ||
4594 | @@ -102,9 +102,6 @@ static const struct _cache_table __cpuinitconst cache_table[] = | ||
4595 | { 0xe2, LVL_3, 2048 }, /* 16-way set assoc, 64 byte line size */ | ||
4596 | { 0xe3, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */ | ||
4597 | { 0xe4, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */ | ||
4598 | - { 0xea, LVL_3, 12288 }, /* 24-way set assoc, 64 byte line size */ | ||
4599 | - { 0xeb, LVL_3, 18432 }, /* 24-way set assoc, 64 byte line size */ | ||
4600 | - { 0xec, LVL_3, 24576 }, /* 24-way set assoc, 64 byte line size */ | ||
4601 | { 0x00, 0, 0} | ||
4602 | }; | ||
4603 | |||
4604 | diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c | ||
4605 | index 0f16a2b..721a77c 100644 | ||
4606 | --- a/arch/x86/kernel/cpu/mcheck/mce.c | ||
4607 | +++ b/arch/x86/kernel/cpu/mcheck/mce.c | ||
4608 | @@ -1374,14 +1374,13 @@ static void mce_init_timer(void) | ||
4609 | struct timer_list *t = &__get_cpu_var(mce_timer); | ||
4610 | int *n = &__get_cpu_var(mce_next_interval); | ||
4611 | |||
4612 | - setup_timer(t, mcheck_timer, smp_processor_id()); | ||
4613 | - | ||
4614 | if (mce_ignore_ce) | ||
4615 | return; | ||
4616 | |||
4617 | *n = check_interval * HZ; | ||
4618 | if (!*n) | ||
4619 | return; | ||
4620 | + setup_timer(t, mcheck_timer, smp_processor_id()); | ||
4621 | t->expires = round_jiffies(jiffies + *n); | ||
4622 | add_timer_on(t, smp_processor_id()); | ||
4623 | } | ||
4624 | @@ -1992,11 +1991,9 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | ||
4625 | break; | ||
4626 | case CPU_DOWN_FAILED: | ||
4627 | case CPU_DOWN_FAILED_FROZEN: | ||
4628 | - if (!mce_ignore_ce && check_interval) { | ||
4629 | - t->expires = round_jiffies(jiffies + | ||
4630 | + t->expires = round_jiffies(jiffies + | ||
4631 | __get_cpu_var(mce_next_interval)); | ||
4632 | - add_timer_on(t, cpu); | ||
4633 | - } | ||
4634 | + add_timer_on(t, cpu); | ||
4635 | smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); | ||
4636 | break; | ||
4637 | case CPU_POST_DEAD: | ||
4638 | diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c | ||
4639 | index 687638e..b3a1dba 100644 | ||
4640 | --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c | ||
4641 | +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c | ||
4642 | @@ -49,8 +49,6 @@ static DEFINE_PER_CPU(struct thermal_state, thermal_state); | ||
4643 | |||
4644 | static atomic_t therm_throt_en = ATOMIC_INIT(0); | ||
4645 | |||
4646 | -static u32 lvtthmr_init __read_mostly; | ||
4647 | - | ||
4648 | #ifdef CONFIG_SYSFS | ||
4649 | #define define_therm_throt_sysdev_one_ro(_name) \ | ||
4650 | static SYSDEV_ATTR(_name, 0444, therm_throt_sysdev_show_##_name, NULL) | ||
4651 | @@ -256,27 +254,14 @@ asmlinkage void smp_thermal_interrupt(struct pt_regs *regs) | ||
4652 | ack_APIC_irq(); | ||
4653 | } | ||
4654 | |||
4655 | -void __init mcheck_intel_therm_init(void) | ||
4656 | -{ | ||
4657 | - /* | ||
4658 | - * This function is only called on boot CPU. Save the init thermal | ||
4659 | - * LVT value on BSP and use that value to restore APs' thermal LVT | ||
4660 | - * entry BIOS programmed later | ||
4661 | - */ | ||
4662 | - if (cpu_has(&boot_cpu_data, X86_FEATURE_ACPI) && | ||
4663 | - cpu_has(&boot_cpu_data, X86_FEATURE_ACC)) | ||
4664 | - lvtthmr_init = apic_read(APIC_LVTTHMR); | ||
4665 | -} | ||
4666 | - | ||
4667 | void intel_init_thermal(struct cpuinfo_x86 *c) | ||
4668 | { | ||
4669 | unsigned int cpu = smp_processor_id(); | ||
4670 | int tm2 = 0; | ||
4671 | u32 l, h; | ||
4672 | |||
4673 | - /* Thermal monitoring depends on APIC, ACPI and clock modulation */ | ||
4674 | - if (!cpu_has_apic || !cpu_has(c, X86_FEATURE_ACPI) || | ||
4675 | - !cpu_has(c, X86_FEATURE_ACC)) | ||
4676 | + /* Thermal monitoring depends on ACPI and clock modulation*/ | ||
4677 | + if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC)) | ||
4678 | return; | ||
4679 | |||
4680 | /* | ||
4681 | @@ -285,20 +270,7 @@ void intel_init_thermal(struct cpuinfo_x86 *c) | ||
4682 | * since it might be delivered via SMI already: | ||
4683 | */ | ||
4684 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | ||
4685 | - | ||
4686 | - /* | ||
4687 | - * The initial value of thermal LVT entries on all APs always reads | ||
4688 | - * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI | ||
4689 | - * sequence to them and LVT registers are reset to 0s except for | ||
4690 | - * the mask bits which are set to 1s when APs receive INIT IPI. | ||
4691 | - * Always restore the value that BIOS has programmed on AP based on | ||
4692 | - * BSP's info we saved since BIOS is always setting the same value | ||
4693 | - * for all threads/cores | ||
4694 | - */ | ||
4695 | - apic_write(APIC_LVTTHMR, lvtthmr_init); | ||
4696 | - | ||
4697 | - h = lvtthmr_init; | ||
4698 | - | ||
4699 | + h = apic_read(APIC_LVTTHMR); | ||
4700 | if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { | ||
4701 | printk(KERN_DEBUG | ||
4702 | "CPU%d: Thermal monitoring handled by SMI\n", cpu); | ||
4703 | diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c | ||
4704 | index 898df97..fab786f 100644 | ||
4705 | --- a/arch/x86/kernel/cpu/perfctr-watchdog.c | ||
4706 | +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c | ||
4707 | @@ -712,7 +712,7 @@ static void probe_nmi_watchdog(void) | ||
4708 | switch (boot_cpu_data.x86_vendor) { | ||
4709 | case X86_VENDOR_AMD: | ||
4710 | if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15 && | ||
4711 | - boot_cpu_data.x86 != 16 && boot_cpu_data.x86 != 17) | ||
4712 | + boot_cpu_data.x86 != 16) | ||
4713 | return; | ||
4714 | wd_ops = &k7_wd_ops; | ||
4715 | break; | ||
4716 | diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c | ||
4717 | index 0c91110..6a52d4b 100644 | ||
4718 | --- a/arch/x86/kernel/cpuid.c | ||
4719 | +++ b/arch/x86/kernel/cpuid.c | ||
4720 | @@ -192,8 +192,7 @@ static int __init cpuid_init(void) | ||
4721 | int i, err = 0; | ||
4722 | i = 0; | ||
4723 | |||
4724 | - if (__register_chrdev(CPUID_MAJOR, 0, NR_CPUS, | ||
4725 | - "cpu/cpuid", &cpuid_fops)) { | ||
4726 | + if (register_chrdev(CPUID_MAJOR, "cpu/cpuid", &cpuid_fops)) { | ||
4727 | printk(KERN_ERR "cpuid: unable to get major %d for cpuid\n", | ||
4728 | CPUID_MAJOR); | ||
4729 | err = -EBUSY; | ||
4730 | @@ -222,7 +221,7 @@ out_class: | ||
4731 | } | ||
4732 | class_destroy(cpuid_class); | ||
4733 | out_chrdev: | ||
4734 | - __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid"); | ||
4735 | + unregister_chrdev(CPUID_MAJOR, "cpu/cpuid"); | ||
4736 | out: | ||
4737 | return err; | ||
4738 | } | ||
4739 | @@ -234,7 +233,7 @@ static void __exit cpuid_exit(void) | ||
4740 | for_each_online_cpu(cpu) | ||
4741 | cpuid_device_destroy(cpu); | ||
4742 | class_destroy(cpuid_class); | ||
4743 | - __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid"); | ||
4744 | + unregister_chrdev(CPUID_MAJOR, "cpu/cpuid"); | ||
4745 | unregister_hotcpu_notifier(&cpuid_class_cpu_notifier); | ||
4746 | } | ||
4747 | |||
4748 | diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S | ||
4749 | index b5c061f..5e9b0e5 100644 | ||
4750 | --- a/arch/x86/kernel/entry_64.S | ||
4751 | +++ b/arch/x86/kernel/entry_64.S | ||
4752 | @@ -1008,6 +1008,8 @@ apicinterrupt CALL_FUNCTION_VECTOR \ | ||
4753 | call_function_interrupt smp_call_function_interrupt | ||
4754 | apicinterrupt RESCHEDULE_VECTOR \ | ||
4755 | reschedule_interrupt smp_reschedule_interrupt | ||
4756 | +apicinterrupt PULL_TIMERS_VECTOR \ | ||
4757 | + pull_timers_interrupt smp_pull_timers_interrupt | ||
4758 | #endif | ||
4759 | |||
4760 | apicinterrupt ERROR_APIC_VECTOR \ | ||
4761 | diff --git a/arch/x86/kernel/ft_event.c b/arch/x86/kernel/ft_event.c | ||
4762 | new file mode 100644 | ||
4763 | index 0000000..e07ee30 | ||
4764 | --- /dev/null | ||
4765 | +++ b/arch/x86/kernel/ft_event.c | ||
4766 | @@ -0,0 +1,112 @@ | ||
4767 | +#include <linux/types.h> | ||
4768 | + | ||
4769 | +#include <litmus/feather_trace.h> | ||
4770 | + | ||
4771 | +#ifdef __ARCH_HAS_FEATHER_TRACE | ||
4772 | +/* the feather trace management functions assume | ||
4773 | + * exclusive access to the event table | ||
4774 | + */ | ||
4775 | + | ||
4776 | + | ||
4777 | +#define BYTE_JUMP 0xeb | ||
4778 | +#define BYTE_JUMP_LEN 0x02 | ||
4779 | + | ||
4780 | +/* for each event, there is an entry in the event table */ | ||
4781 | +struct trace_event { | ||
4782 | + long id; | ||
4783 | + long count; | ||
4784 | + long start_addr; | ||
4785 | + long end_addr; | ||
4786 | +}; | ||
4787 | + | ||
4788 | +extern struct trace_event __start___event_table[]; | ||
4789 | +extern struct trace_event __stop___event_table[]; | ||
4790 | + | ||
4791 | +int ft_enable_event(unsigned long id) | ||
4792 | +{ | ||
4793 | + struct trace_event* te = __start___event_table; | ||
4794 | + int count = 0; | ||
4795 | + char* delta; | ||
4796 | + unsigned char* instr; | ||
4797 | + | ||
4798 | + while (te < __stop___event_table) { | ||
4799 | + if (te->id == id && ++te->count == 1) { | ||
4800 | + instr = (unsigned char*) te->start_addr; | ||
4801 | + /* make sure we don't clobber something wrong */ | ||
4802 | + if (*instr == BYTE_JUMP) { | ||
4803 | + delta = (((unsigned char*) te->start_addr) + 1); | ||
4804 | + *delta = 0; | ||
4805 | + } | ||
4806 | + } | ||
4807 | + if (te->id == id) | ||
4808 | + count++; | ||
4809 | + te++; | ||
4810 | + } | ||
4811 | + | ||
4812 | + printk(KERN_DEBUG "ft_enable_event: enabled %d events\n", count); | ||
4813 | + return count; | ||
4814 | +} | ||
4815 | + | ||
4816 | +int ft_disable_event(unsigned long id) | ||
4817 | +{ | ||
4818 | + struct trace_event* te = __start___event_table; | ||
4819 | + int count = 0; | ||
4820 | + char* delta; | ||
4821 | + unsigned char* instr; | ||
4822 | + | ||
4823 | + while (te < __stop___event_table) { | ||
4824 | + if (te->id == id && --te->count == 0) { | ||
4825 | + instr = (unsigned char*) te->start_addr; | ||
4826 | + if (*instr == BYTE_JUMP) { | ||
4827 | + delta = (((unsigned char*) te->start_addr) + 1); | ||
4828 | + *delta = te->end_addr - te->start_addr - | ||
4829 | + BYTE_JUMP_LEN; | ||
4830 | + } | ||
4831 | + } | ||
4832 | + if (te->id == id) | ||
4833 | + count++; | ||
4834 | + te++; | ||
4835 | + } | ||
4836 | + | ||
4837 | + printk(KERN_DEBUG "ft_disable_event: disabled %d events\n", count); | ||
4838 | + return count; | ||
4839 | +} | ||
4840 | + | ||
4841 | +int ft_disable_all_events(void) | ||
4842 | +{ | ||
4843 | + struct trace_event* te = __start___event_table; | ||
4844 | + int count = 0; | ||
4845 | + char* delta; | ||
4846 | + unsigned char* instr; | ||
4847 | + | ||
4848 | + while (te < __stop___event_table) { | ||
4849 | + if (te->count) { | ||
4850 | + instr = (unsigned char*) te->start_addr; | ||
4851 | + if (*instr == BYTE_JUMP) { | ||
4852 | + delta = (((unsigned char*) te->start_addr) | ||
4853 | + + 1); | ||
4854 | + *delta = te->end_addr - te->start_addr - | ||
4855 | + BYTE_JUMP_LEN; | ||
4856 | + te->count = 0; | ||
4857 | + count++; | ||
4858 | + } | ||
4859 | + } | ||
4860 | + te++; | ||
4861 | + } | ||
4862 | + return count; | ||
4863 | +} | ||
4864 | + | ||
4865 | +int ft_is_event_enabled(unsigned long id) | ||
4866 | +{ | ||
4867 | + struct trace_event* te = __start___event_table; | ||
4868 | + | ||
4869 | + while (te < __stop___event_table) { | ||
4870 | + if (te->id == id) | ||
4871 | + return te->count; | ||
4872 | + te++; | ||
4873 | + } | ||
4874 | + return 0; | ||
4875 | +} | ||
4876 | + | ||
4877 | +#endif | ||
4878 | + | ||
4879 | diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c | ||
4880 | index 5877873..dedc2bd 100644 | ||
4881 | --- a/arch/x86/kernel/hpet.c | ||
4882 | +++ b/arch/x86/kernel/hpet.c | ||
4883 | @@ -33,8 +33,6 @@ | ||
4884 | * HPET address is set in acpi/boot.c, when an ACPI entry exists | ||
4885 | */ | ||
4886 | unsigned long hpet_address; | ||
4887 | -u8 hpet_msi_disable; | ||
4888 | - | ||
4889 | #ifdef CONFIG_PCI_MSI | ||
4890 | static unsigned long hpet_num_timers; | ||
4891 | #endif | ||
4892 | @@ -586,9 +584,6 @@ static void hpet_msi_capability_lookup(unsigned int start_timer) | ||
4893 | unsigned int num_timers_used = 0; | ||
4894 | int i; | ||
4895 | |||
4896 | - if (hpet_msi_disable) | ||
4897 | - return; | ||
4898 | - | ||
4899 | id = hpet_readl(HPET_ID); | ||
4900 | |||
4901 | num_timers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT); | ||
4902 | @@ -916,9 +911,6 @@ static __init int hpet_late_init(void) | ||
4903 | hpet_reserve_platform_timers(hpet_readl(HPET_ID)); | ||
4904 | hpet_print_config(); | ||
4905 | |||
4906 | - if (hpet_msi_disable) | ||
4907 | - return 0; | ||
4908 | - | ||
4909 | for_each_online_cpu(cpu) { | ||
4910 | hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu); | ||
4911 | } | ||
4912 | diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c | ||
4913 | index 40f3077..f5fa64c 100644 | ||
4914 | --- a/arch/x86/kernel/irqinit.c | ||
4915 | +++ b/arch/x86/kernel/irqinit.c | ||
4916 | @@ -172,6 +172,9 @@ static void __init smp_intr_init(void) | ||
4917 | alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, | ||
4918 | call_function_single_interrupt); | ||
4919 | |||
4920 | + /* IPI for hrtimer pulling on remote cpus */ | ||
4921 | + alloc_intr_gate(PULL_TIMERS_VECTOR, pull_timers_interrupt); | ||
4922 | + | ||
4923 | /* Low priority IPI to cleanup after moving an irq */ | ||
4924 | set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); | ||
4925 | set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors); | ||
4926 | diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c | ||
4927 | index 5eaeb5e..6a3cefc 100644 | ||
4928 | --- a/arch/x86/kernel/msr.c | ||
4929 | +++ b/arch/x86/kernel/msr.c | ||
4930 | @@ -251,7 +251,7 @@ static int __init msr_init(void) | ||
4931 | int i, err = 0; | ||
4932 | i = 0; | ||
4933 | |||
4934 | - if (__register_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr", &msr_fops)) { | ||
4935 | + if (register_chrdev(MSR_MAJOR, "cpu/msr", &msr_fops)) { | ||
4936 | printk(KERN_ERR "msr: unable to get major %d for msr\n", | ||
4937 | MSR_MAJOR); | ||
4938 | err = -EBUSY; | ||
4939 | @@ -279,7 +279,7 @@ out_class: | ||
4940 | msr_device_destroy(i); | ||
4941 | class_destroy(msr_class); | ||
4942 | out_chrdev: | ||
4943 | - __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr"); | ||
4944 | + unregister_chrdev(MSR_MAJOR, "cpu/msr"); | ||
4945 | out: | ||
4946 | return err; | ||
4947 | } | ||
4948 | @@ -290,7 +290,7 @@ static void __exit msr_exit(void) | ||
4949 | for_each_online_cpu(cpu) | ||
4950 | msr_device_destroy(cpu); | ||
4951 | class_destroy(msr_class); | ||
4952 | - __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr"); | ||
4953 | + unregister_chrdev(MSR_MAJOR, "cpu/msr"); | ||
4954 | unregister_hotcpu_notifier(&msr_class_cpu_notifier); | ||
4955 | } | ||
4956 | |||
4957 | diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c | ||
4958 | index e6ec8a2..971a3be 100644 | ||
4959 | --- a/arch/x86/kernel/pci-calgary_64.c | ||
4960 | +++ b/arch/x86/kernel/pci-calgary_64.c | ||
4961 | @@ -318,15 +318,13 @@ static inline struct iommu_table *find_iommu_table(struct device *dev) | ||
4962 | |||
4963 | pdev = to_pci_dev(dev); | ||
4964 | |||
4965 | - /* search up the device tree for an iommu */ | ||
4966 | pbus = pdev->bus; | ||
4967 | - do { | ||
4968 | - tbl = pci_iommu(pbus); | ||
4969 | - if (tbl && tbl->it_busno == pbus->number) | ||
4970 | - break; | ||
4971 | - tbl = NULL; | ||
4972 | + | ||
4973 | + /* is the device behind a bridge? Look for the root bus */ | ||
4974 | + while (pbus->parent) | ||
4975 | pbus = pbus->parent; | ||
4976 | - } while (pbus); | ||
4977 | + | ||
4978 | + tbl = pci_iommu(pbus); | ||
4979 | |||
4980 | BUG_ON(tbl && (tbl->it_busno != pbus->number)); | ||
4981 | |||
4982 | diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c | ||
4983 | index 6ac3931..a6e804d 100644 | ||
4984 | --- a/arch/x86/kernel/pci-dma.c | ||
4985 | +++ b/arch/x86/kernel/pci-dma.c | ||
4986 | @@ -214,7 +214,7 @@ static __init int iommu_setup(char *p) | ||
4987 | if (!strncmp(p, "allowdac", 8)) | ||
4988 | forbid_dac = 0; | ||
4989 | if (!strncmp(p, "nodac", 5)) | ||
4990 | - forbid_dac = 1; | ||
4991 | + forbid_dac = -1; | ||
4992 | if (!strncmp(p, "usedac", 6)) { | ||
4993 | forbid_dac = -1; | ||
4994 | return 1; | ||
4995 | diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c | ||
4996 | index fcc0b5c..a7f1b64 100644 | ||
4997 | --- a/arch/x86/kernel/pci-gart_64.c | ||
4998 | +++ b/arch/x86/kernel/pci-gart_64.c | ||
4999 | @@ -856,7 +856,7 @@ void __init gart_parse_options(char *p) | ||
5000 | #endif | ||
5001 | if (isdigit(*p) && get_option(&p, &arg)) | ||
5002 | iommu_size = arg; | ||
5003 | - if (!strncmp(p, "fullflush", 9)) | ||
5004 | + if (!strncmp(p, "fullflush", 8)) | ||
5005 | iommu_fullflush = 1; | ||
5006 | if (!strncmp(p, "nofullflush", 11)) | ||
5007 | iommu_fullflush = 0; | ||
5008 | diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c | ||
5009 | index f010ab4..5284cd2 100644 | ||
5010 | --- a/arch/x86/kernel/process.c | ||
5011 | +++ b/arch/x86/kernel/process.c | ||
5012 | @@ -91,6 +91,18 @@ void flush_thread(void) | ||
5013 | { | ||
5014 | struct task_struct *tsk = current; | ||
5015 | |||
5016 | +#ifdef CONFIG_X86_64 | ||
5017 | + if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) { | ||
5018 | + clear_tsk_thread_flag(tsk, TIF_ABI_PENDING); | ||
5019 | + if (test_tsk_thread_flag(tsk, TIF_IA32)) { | ||
5020 | + clear_tsk_thread_flag(tsk, TIF_IA32); | ||
5021 | + } else { | ||
5022 | + set_tsk_thread_flag(tsk, TIF_IA32); | ||
5023 | + current_thread_info()->status |= TS_COMPAT; | ||
5024 | + } | ||
5025 | + } | ||
5026 | +#endif | ||
5027 | + | ||
5028 | clear_tsk_thread_flag(tsk, TIF_DEBUG); | ||
5029 | |||
5030 | tsk->thread.debugreg0 = 0; | ||
5031 | diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c | ||
5032 | index f9ce04f..eb62cbc 100644 | ||
5033 | --- a/arch/x86/kernel/process_64.c | ||
5034 | +++ b/arch/x86/kernel/process_64.c | ||
5035 | @@ -540,17 +540,6 @@ sys_clone(unsigned long clone_flags, unsigned long newsp, | ||
5036 | return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid); | ||
5037 | } | ||
5038 | |||
5039 | -void set_personality_ia32(void) | ||
5040 | -{ | ||
5041 | - /* inherit personality from parent */ | ||
5042 | - | ||
5043 | - /* Make sure to be in 32bit mode */ | ||
5044 | - set_thread_flag(TIF_IA32); | ||
5045 | - | ||
5046 | - /* Prepare the first "return" to user space */ | ||
5047 | - current_thread_info()->status |= TS_COMPAT; | ||
5048 | -} | ||
5049 | - | ||
5050 | unsigned long get_wchan(struct task_struct *p) | ||
5051 | { | ||
5052 | unsigned long stack; | ||
5053 | diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c | ||
5054 | index c06acdd..7b058a2 100644 | ||
5055 | --- a/arch/x86/kernel/ptrace.c | ||
5056 | +++ b/arch/x86/kernel/ptrace.c | ||
5057 | @@ -408,14 +408,14 @@ static int genregs_get(struct task_struct *target, | ||
5058 | { | ||
5059 | if (kbuf) { | ||
5060 | unsigned long *k = kbuf; | ||
5061 | - while (count >= sizeof(*k)) { | ||
5062 | + while (count > 0) { | ||
5063 | *k++ = getreg(target, pos); | ||
5064 | count -= sizeof(*k); | ||
5065 | pos += sizeof(*k); | ||
5066 | } | ||
5067 | } else { | ||
5068 | unsigned long __user *u = ubuf; | ||
5069 | - while (count >= sizeof(*u)) { | ||
5070 | + while (count > 0) { | ||
5071 | if (__put_user(getreg(target, pos), u++)) | ||
5072 | return -EFAULT; | ||
5073 | count -= sizeof(*u); | ||
5074 | @@ -434,14 +434,14 @@ static int genregs_set(struct task_struct *target, | ||
5075 | int ret = 0; | ||
5076 | if (kbuf) { | ||
5077 | const unsigned long *k = kbuf; | ||
5078 | - while (count >= sizeof(*k) && !ret) { | ||
5079 | + while (count > 0 && !ret) { | ||
5080 | ret = putreg(target, pos, *k++); | ||
5081 | count -= sizeof(*k); | ||
5082 | pos += sizeof(*k); | ||
5083 | } | ||
5084 | } else { | ||
5085 | const unsigned long __user *u = ubuf; | ||
5086 | - while (count >= sizeof(*u) && !ret) { | ||
5087 | + while (count > 0 && !ret) { | ||
5088 | unsigned long word; | ||
5089 | ret = __get_user(word, u++); | ||
5090 | if (ret) | ||
5091 | @@ -1219,14 +1219,14 @@ static int genregs32_get(struct task_struct *target, | ||
5092 | { | ||
5093 | if (kbuf) { | ||
5094 | compat_ulong_t *k = kbuf; | ||
5095 | - while (count >= sizeof(*k)) { | ||
5096 | + while (count > 0) { | ||
5097 | getreg32(target, pos, k++); | ||
5098 | count -= sizeof(*k); | ||
5099 | pos += sizeof(*k); | ||
5100 | } | ||
5101 | } else { | ||
5102 | compat_ulong_t __user *u = ubuf; | ||
5103 | - while (count >= sizeof(*u)) { | ||
5104 | + while (count > 0) { | ||
5105 | compat_ulong_t word; | ||
5106 | getreg32(target, pos, &word); | ||
5107 | if (__put_user(word, u++)) | ||
5108 | @@ -1247,14 +1247,14 @@ static int genregs32_set(struct task_struct *target, | ||
5109 | int ret = 0; | ||
5110 | if (kbuf) { | ||
5111 | const compat_ulong_t *k = kbuf; | ||
5112 | - while (count >= sizeof(*k) && !ret) { | ||
5113 | + while (count > 0 && !ret) { | ||
5114 | ret = putreg32(target, pos, *k++); | ||
5115 | count -= sizeof(*k); | ||
5116 | pos += sizeof(*k); | ||
5117 | } | ||
5118 | } else { | ||
5119 | const compat_ulong_t __user *u = ubuf; | ||
5120 | - while (count >= sizeof(*u) && !ret) { | ||
5121 | + while (count > 0 && !ret) { | ||
5122 | compat_ulong_t word; | ||
5123 | ret = __get_user(word, u++); | ||
5124 | if (ret) | ||
5125 | diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c | ||
5126 | index 0040164..6c3b2c6 100644 | ||
5127 | --- a/arch/x86/kernel/quirks.c | ||
5128 | +++ b/arch/x86/kernel/quirks.c | ||
5129 | @@ -491,19 +491,6 @@ void force_hpet_resume(void) | ||
5130 | break; | ||
5131 | } | ||
5132 | } | ||
5133 | - | ||
5134 | -/* | ||
5135 | - * HPET MSI on some boards (ATI SB700/SB800) has side effect on | ||
5136 | - * floppy DMA. Disable HPET MSI on such platforms. | ||
5137 | - */ | ||
5138 | -static void force_disable_hpet_msi(struct pci_dev *unused) | ||
5139 | -{ | ||
5140 | - hpet_msi_disable = 1; | ||
5141 | -} | ||
5142 | - | ||
5143 | -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, | ||
5144 | - force_disable_hpet_msi); | ||
5145 | - | ||
5146 | #endif | ||
5147 | |||
5148 | #if defined(CONFIG_PCI) && defined(CONFIG_NUMA) | ||
5149 | diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c | ||
5150 | index bff34d6..f930787 100644 | ||
5151 | --- a/arch/x86/kernel/reboot.c | ||
5152 | +++ b/arch/x86/kernel/reboot.c | ||
5153 | @@ -203,15 +203,6 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { | ||
5154 | DMI_MATCH(DMI_BOARD_NAME, "0T656F"), | ||
5155 | }, | ||
5156 | }, | ||
5157 | - { /* Handle problems with rebooting on Dell OptiPlex 760 with 0G919G*/ | ||
5158 | - .callback = set_bios_reboot, | ||
5159 | - .ident = "Dell OptiPlex 760", | ||
5160 | - .matches = { | ||
5161 | - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
5162 | - DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 760"), | ||
5163 | - DMI_MATCH(DMI_BOARD_NAME, "0G919G"), | ||
5164 | - }, | ||
5165 | - }, | ||
5166 | { /* Handle problems with rebooting on Dell 2400's */ | ||
5167 | .callback = set_bios_reboot, | ||
5168 | .ident = "Dell PowerEdge 2400", | ||
5169 | @@ -268,14 +259,6 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { | ||
5170 | DMI_MATCH(DMI_PRODUCT_NAME, "SBC-FITPC2"), | ||
5171 | }, | ||
5172 | }, | ||
5173 | - { /* Handle problems with rebooting on ASUS P4S800 */ | ||
5174 | - .callback = set_bios_reboot, | ||
5175 | - .ident = "ASUS P4S800", | ||
5176 | - .matches = { | ||
5177 | - DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), | ||
5178 | - DMI_MATCH(DMI_BOARD_NAME, "P4S800"), | ||
5179 | - }, | ||
5180 | - }, | ||
5181 | { } | ||
5182 | }; | ||
5183 | |||
5184 | diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c | ||
5185 | index 8425f7e..2a34f9c 100644 | ||
5186 | --- a/arch/x86/kernel/setup.c | ||
5187 | +++ b/arch/x86/kernel/setup.c | ||
5188 | @@ -109,7 +109,6 @@ | ||
5189 | #ifdef CONFIG_X86_64 | ||
5190 | #include <asm/numa_64.h> | ||
5191 | #endif | ||
5192 | -#include <asm/mce.h> | ||
5193 | |||
5194 | /* | ||
5195 | * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries. | ||
5196 | @@ -667,27 +666,19 @@ static struct dmi_system_id __initdata bad_bios_dmi_table[] = { | ||
5197 | DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix/MSC"), | ||
5198 | }, | ||
5199 | }, | ||
5200 | + { | ||
5201 | /* | ||
5202 | - * AMI BIOS with low memory corruption was found on Intel DG45ID and | ||
5203 | - * DG45FC boards. | ||
5204 | - * It has a different DMI_BIOS_VENDOR = "Intel Corp.", for now we will | ||
5205 | + * AMI BIOS with low memory corruption was found on Intel DG45ID board. | ||
5206 | + * It hase different DMI_BIOS_VENDOR = "Intel Corp.", for now we will | ||
5207 | * match only DMI_BOARD_NAME and see if there is more bad products | ||
5208 | * with this vendor. | ||
5209 | */ | ||
5210 | - { | ||
5211 | .callback = dmi_low_memory_corruption, | ||
5212 | .ident = "AMI BIOS", | ||
5213 | .matches = { | ||
5214 | DMI_MATCH(DMI_BOARD_NAME, "DG45ID"), | ||
5215 | }, | ||
5216 | }, | ||
5217 | - { | ||
5218 | - .callback = dmi_low_memory_corruption, | ||
5219 | - .ident = "AMI BIOS", | ||
5220 | - .matches = { | ||
5221 | - DMI_MATCH(DMI_BOARD_NAME, "DG45FC"), | ||
5222 | - }, | ||
5223 | - }, | ||
5224 | #endif | ||
5225 | {} | ||
5226 | }; | ||
5227 | @@ -1040,8 +1031,6 @@ void __init setup_arch(char **cmdline_p) | ||
5228 | #endif | ||
5229 | #endif | ||
5230 | x86_init.oem.banner(); | ||
5231 | - | ||
5232 | - mcheck_intel_therm_init(); | ||
5233 | } | ||
5234 | |||
5235 | #ifdef CONFIG_X86_32 | ||
5236 | diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c | ||
5237 | index ec1de97..a93528b 100644 | ||
5238 | --- a/arch/x86/kernel/smp.c | ||
5239 | +++ b/arch/x86/kernel/smp.c | ||
5240 | @@ -22,6 +22,9 @@ | ||
5241 | #include <linux/interrupt.h> | ||
5242 | #include <linux/cpu.h> | ||
5243 | |||
5244 | +#include <litmus/litmus.h> | ||
5245 | +#include <litmus/trace.h> | ||
5246 | + | ||
5247 | #include <asm/mtrr.h> | ||
5248 | #include <asm/tlbflush.h> | ||
5249 | #include <asm/mmu_context.h> | ||
5250 | @@ -117,6 +120,7 @@ static void native_smp_send_reschedule(int cpu) | ||
5251 | WARN_ON(1); | ||
5252 | return; | ||
5253 | } | ||
5254 | + TS_SEND_RESCHED_START(cpu); | ||
5255 | apic->send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR); | ||
5256 | } | ||
5257 | |||
5258 | @@ -146,6 +150,16 @@ void native_send_call_func_ipi(const struct cpumask *mask) | ||
5259 | free_cpumask_var(allbutself); | ||
5260 | } | ||
5261 | |||
5262 | +/* trigger timers on remote cpu */ | ||
5263 | +void smp_send_pull_timers(int cpu) | ||
5264 | +{ | ||
5265 | + if (unlikely(cpu_is_offline(cpu))) { | ||
5266 | + WARN_ON(1); | ||
5267 | + return; | ||
5268 | + } | ||
5269 | + apic->send_IPI_mask(cpumask_of(cpu), PULL_TIMERS_VECTOR); | ||
5270 | +} | ||
5271 | + | ||
5272 | /* | ||
5273 | * this function calls the 'stop' function on all other CPUs in the system. | ||
5274 | */ | ||
5275 | @@ -197,7 +211,12 @@ static void native_smp_send_stop(void) | ||
5276 | void smp_reschedule_interrupt(struct pt_regs *regs) | ||
5277 | { | ||
5278 | ack_APIC_irq(); | ||
5279 | + /* LITMUS^RT needs this interrupt to proper reschedule | ||
5280 | + * on this cpu | ||
5281 | + */ | ||
5282 | + set_tsk_need_resched(current); | ||
5283 | inc_irq_stat(irq_resched_count); | ||
5284 | + TS_SEND_RESCHED_END; | ||
5285 | /* | ||
5286 | * KVM uses this interrupt to force a cpu out of guest mode | ||
5287 | */ | ||
5288 | @@ -221,6 +240,15 @@ void smp_call_function_single_interrupt(struct pt_regs *regs) | ||
5289 | irq_exit(); | ||
5290 | } | ||
5291 | |||
5292 | +extern void hrtimer_pull(void); | ||
5293 | + | ||
5294 | +void smp_pull_timers_interrupt(struct pt_regs *regs) | ||
5295 | +{ | ||
5296 | + ack_APIC_irq(); | ||
5297 | + TRACE("pull timer interrupt\n"); | ||
5298 | + hrtimer_pull(); | ||
5299 | +} | ||
5300 | + | ||
5301 | struct smp_ops smp_ops = { | ||
5302 | .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, | ||
5303 | .smp_prepare_cpus = native_smp_prepare_cpus, | ||
5304 | diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c | ||
5305 | index dee1ff7..1884a8d 100644 | ||
5306 | --- a/arch/x86/kernel/sys_i386_32.c | ||
5307 | +++ b/arch/x86/kernel/sys_i386_32.c | ||
5308 | @@ -24,6 +24,31 @@ | ||
5309 | |||
5310 | #include <asm/syscalls.h> | ||
5311 | |||
5312 | +asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, | ||
5313 | + unsigned long prot, unsigned long flags, | ||
5314 | + unsigned long fd, unsigned long pgoff) | ||
5315 | +{ | ||
5316 | + int error = -EBADF; | ||
5317 | + struct file *file = NULL; | ||
5318 | + struct mm_struct *mm = current->mm; | ||
5319 | + | ||
5320 | + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
5321 | + if (!(flags & MAP_ANONYMOUS)) { | ||
5322 | + file = fget(fd); | ||
5323 | + if (!file) | ||
5324 | + goto out; | ||
5325 | + } | ||
5326 | + | ||
5327 | + down_write(&mm->mmap_sem); | ||
5328 | + error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
5329 | + up_write(&mm->mmap_sem); | ||
5330 | + | ||
5331 | + if (file) | ||
5332 | + fput(file); | ||
5333 | +out: | ||
5334 | + return error; | ||
5335 | +} | ||
5336 | + | ||
5337 | /* | ||
5338 | * Perform the select(nd, in, out, ex, tv) and mmap() system | ||
5339 | * calls. Linux/i386 didn't use to be able to handle more than | ||
5340 | @@ -52,7 +77,7 @@ asmlinkage int old_mmap(struct mmap_arg_struct __user *arg) | ||
5341 | if (a.offset & ~PAGE_MASK) | ||
5342 | goto out; | ||
5343 | |||
5344 | - err = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, | ||
5345 | + err = sys_mmap2(a.addr, a.len, a.prot, a.flags, | ||
5346 | a.fd, a.offset >> PAGE_SHIFT); | ||
5347 | out: | ||
5348 | return err; | ||
5349 | diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c | ||
5350 | index 8aa2057..45e00eb 100644 | ||
5351 | --- a/arch/x86/kernel/sys_x86_64.c | ||
5352 | +++ b/arch/x86/kernel/sys_x86_64.c | ||
5353 | @@ -23,11 +23,26 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, | ||
5354 | unsigned long, fd, unsigned long, off) | ||
5355 | { | ||
5356 | long error; | ||
5357 | + struct file *file; | ||
5358 | + | ||
5359 | error = -EINVAL; | ||
5360 | if (off & ~PAGE_MASK) | ||
5361 | goto out; | ||
5362 | |||
5363 | - error = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); | ||
5364 | + error = -EBADF; | ||
5365 | + file = NULL; | ||
5366 | + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
5367 | + if (!(flags & MAP_ANONYMOUS)) { | ||
5368 | + file = fget(fd); | ||
5369 | + if (!file) | ||
5370 | + goto out; | ||
5371 | + } | ||
5372 | + down_write(¤t->mm->mmap_sem); | ||
5373 | + error = do_mmap_pgoff(file, addr, len, prot, flags, off >> PAGE_SHIFT); | ||
5374 | + up_write(¤t->mm->mmap_sem); | ||
5375 | + | ||
5376 | + if (file) | ||
5377 | + fput(file); | ||
5378 | out: | ||
5379 | return error; | ||
5380 | } | ||
5381 | diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S | ||
5382 | index 76d70a4..17fcb3a 100644 | ||
5383 | --- a/arch/x86/kernel/syscall_table_32.S | ||
5384 | +++ b/arch/x86/kernel/syscall_table_32.S | ||
5385 | @@ -191,7 +191,7 @@ ENTRY(sys_call_table) | ||
5386 | .long sys_ni_syscall /* reserved for streams2 */ | ||
5387 | .long ptregs_vfork /* 190 */ | ||
5388 | .long sys_getrlimit | ||
5389 | - .long sys_mmap_pgoff | ||
5390 | + .long sys_mmap2 | ||
5391 | .long sys_truncate64 | ||
5392 | .long sys_ftruncate64 | ||
5393 | .long sys_stat64 /* 195 */ | ||
5394 | @@ -336,3 +336,17 @@ ENTRY(sys_call_table) | ||
5395 | .long sys_pwritev | ||
5396 | .long sys_rt_tgsigqueueinfo /* 335 */ | ||
5397 | .long sys_perf_event_open | ||
5398 | + .long sys_set_rt_task_param /* LITMUS^RT 337 */ | ||
5399 | + .long sys_get_rt_task_param | ||
5400 | + .long sys_complete_job | ||
5401 | + .long sys_od_open | ||
5402 | + .long sys_od_close | ||
5403 | + .long sys_fmlp_down | ||
5404 | + .long sys_fmlp_up | ||
5405 | + .long sys_srp_down | ||
5406 | + .long sys_srp_up | ||
5407 | + .long sys_query_job_no | ||
5408 | + .long sys_wait_for_job_release | ||
5409 | + .long sys_wait_for_ts_release | ||
5410 | + .long sys_release_ts | ||
5411 | + .long sys_null_call | ||
5412 | diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c | ||
5413 | index 364d015..1740c85 100644 | ||
5414 | --- a/arch/x86/kernel/tlb_uv.c | ||
5415 | +++ b/arch/x86/kernel/tlb_uv.c | ||
5416 | @@ -817,8 +817,10 @@ static int __init uv_init_blade(int blade) | ||
5417 | */ | ||
5418 | apicid = blade_to_first_apicid(blade); | ||
5419 | pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG); | ||
5420 | - uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, | ||
5421 | + if ((pa & 0xff) != UV_BAU_MESSAGE) { | ||
5422 | + uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, | ||
5423 | ((apicid << 32) | UV_BAU_MESSAGE)); | ||
5424 | + } | ||
5425 | return 0; | ||
5426 | } | ||
5427 | |||
5428 | diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c | ||
5429 | index 597683a..cd982f4 100644 | ||
5430 | --- a/arch/x86/kernel/tsc.c | ||
5431 | +++ b/arch/x86/kernel/tsc.c | ||
5432 | @@ -763,7 +763,6 @@ void mark_tsc_unstable(char *reason) | ||
5433 | { | ||
5434 | if (!tsc_unstable) { | ||
5435 | tsc_unstable = 1; | ||
5436 | - sched_clock_stable = 0; | ||
5437 | printk(KERN_INFO "Marking TSC unstable due to %s\n", reason); | ||
5438 | /* Change only the rating, when not registered */ | ||
5439 | if (clocksource_tsc.mult) | ||
5440 | diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c | ||
5441 | index e02dbb6..1be5cd6 100644 | ||
5442 | --- a/arch/x86/kvm/emulate.c | ||
5443 | +++ b/arch/x86/kvm/emulate.c | ||
5444 | @@ -613,9 +613,6 @@ static int do_insn_fetch(struct x86_emulate_ctxt *ctxt, | ||
5445 | { | ||
5446 | int rc = 0; | ||
5447 | |||
5448 | - /* x86 instructions are limited to 15 bytes. */ | ||
5449 | - if (eip + size - ctxt->decode.eip_orig > 15) | ||
5450 | - return X86EMUL_UNHANDLEABLE; | ||
5451 | eip += ctxt->cs_base; | ||
5452 | while (size--) { | ||
5453 | rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++); | ||
5454 | @@ -874,7 +871,7 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) | ||
5455 | /* Shadow copy of register state. Committed on successful emulation. */ | ||
5456 | |||
5457 | memset(c, 0, sizeof(struct decode_cache)); | ||
5458 | - c->eip = c->eip_orig = kvm_rip_read(ctxt->vcpu); | ||
5459 | + c->eip = kvm_rip_read(ctxt->vcpu); | ||
5460 | ctxt->cs_base = seg_base(ctxt, VCPU_SREG_CS); | ||
5461 | memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs); | ||
5462 | |||
5463 | diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c | ||
5464 | index 88ad162..144e7f6 100644 | ||
5465 | --- a/arch/x86/kvm/i8254.c | ||
5466 | +++ b/arch/x86/kvm/i8254.c | ||
5467 | @@ -465,9 +465,6 @@ static int pit_ioport_read(struct kvm_io_device *this, | ||
5468 | return -EOPNOTSUPP; | ||
5469 | |||
5470 | addr &= KVM_PIT_CHANNEL_MASK; | ||
5471 | - if (addr == 3) | ||
5472 | - return 0; | ||
5473 | - | ||
5474 | s = &pit_state->channels[addr]; | ||
5475 | |||
5476 | mutex_lock(&pit_state->lock); | ||
5477 | diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c | ||
5478 | index 8dfeaaa..23c2176 100644 | ||
5479 | --- a/arch/x86/kvm/lapic.c | ||
5480 | +++ b/arch/x86/kvm/lapic.c | ||
5481 | @@ -374,12 +374,6 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, | ||
5482 | if (unlikely(!apic_enabled(apic))) | ||
5483 | break; | ||
5484 | |||
5485 | - if (trig_mode) { | ||
5486 | - apic_debug("level trig mode for vector %d", vector); | ||
5487 | - apic_set_vector(vector, apic->regs + APIC_TMR); | ||
5488 | - } else | ||
5489 | - apic_clear_vector(vector, apic->regs + APIC_TMR); | ||
5490 | - | ||
5491 | result = !apic_test_and_set_irr(vector, apic); | ||
5492 | trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode, | ||
5493 | trig_mode, vector, !result); | ||
5494 | @@ -390,6 +384,11 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, | ||
5495 | break; | ||
5496 | } | ||
5497 | |||
5498 | + if (trig_mode) { | ||
5499 | + apic_debug("level trig mode for vector %d", vector); | ||
5500 | + apic_set_vector(vector, apic->regs + APIC_TMR); | ||
5501 | + } else | ||
5502 | + apic_clear_vector(vector, apic->regs + APIC_TMR); | ||
5503 | kvm_vcpu_kick(vcpu); | ||
5504 | break; | ||
5505 | |||
5506 | @@ -1157,7 +1156,6 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu) | ||
5507 | hrtimer_cancel(&apic->lapic_timer.timer); | ||
5508 | update_divide_count(apic); | ||
5509 | start_apic_timer(apic); | ||
5510 | - apic->irr_pending = true; | ||
5511 | } | ||
5512 | |||
5513 | void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) | ||
5514 | diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c | ||
5515 | index 3a01519..818b92a 100644 | ||
5516 | --- a/arch/x86/kvm/mmu.c | ||
5517 | +++ b/arch/x86/kvm/mmu.c | ||
5518 | @@ -477,7 +477,7 @@ static int host_mapping_level(struct kvm *kvm, gfn_t gfn) | ||
5519 | |||
5520 | addr = gfn_to_hva(kvm, gfn); | ||
5521 | if (kvm_is_error_hva(addr)) | ||
5522 | - return PT_PAGE_TABLE_LEVEL; | ||
5523 | + return page_size; | ||
5524 | |||
5525 | down_read(¤t->mm->mmap_sem); | ||
5526 | vma = find_vma(current->mm, addr); | ||
5527 | @@ -515,9 +515,11 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) | ||
5528 | if (host_level == PT_PAGE_TABLE_LEVEL) | ||
5529 | return host_level; | ||
5530 | |||
5531 | - for (level = PT_DIRECTORY_LEVEL; level <= host_level; ++level) | ||
5532 | + for (level = PT_DIRECTORY_LEVEL; level <= host_level; ++level) { | ||
5533 | + | ||
5534 | if (has_wrprotected_page(vcpu->kvm, large_gfn, level)) | ||
5535 | break; | ||
5536 | + } | ||
5537 | |||
5538 | return level - 1; | ||
5539 | } | ||
5540 | diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h | ||
5541 | index 5fa3325..72558f8 100644 | ||
5542 | --- a/arch/x86/kvm/paging_tmpl.h | ||
5543 | +++ b/arch/x86/kvm/paging_tmpl.h | ||
5544 | @@ -150,9 +150,7 @@ walk: | ||
5545 | walker->table_gfn[walker->level - 1] = table_gfn; | ||
5546 | walker->pte_gpa[walker->level - 1] = pte_gpa; | ||
5547 | |||
5548 | - if (kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte))) | ||
5549 | - goto not_present; | ||
5550 | - | ||
5551 | + kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte)); | ||
5552 | trace_kvm_mmu_paging_element(pte, walker->level); | ||
5553 | |||
5554 | if (!is_present_gpte(pte)) | ||
5555 | @@ -457,6 +455,8 @@ out_unlock: | ||
5556 | static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) | ||
5557 | { | ||
5558 | struct kvm_shadow_walk_iterator iterator; | ||
5559 | + pt_element_t gpte; | ||
5560 | + gpa_t pte_gpa = -1; | ||
5561 | int level; | ||
5562 | u64 *sptep; | ||
5563 | int need_flush = 0; | ||
5564 | @@ -471,6 +471,10 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) | ||
5565 | if (level == PT_PAGE_TABLE_LEVEL || | ||
5566 | ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) || | ||
5567 | ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) { | ||
5568 | + struct kvm_mmu_page *sp = page_header(__pa(sptep)); | ||
5569 | + | ||
5570 | + pte_gpa = (sp->gfn << PAGE_SHIFT); | ||
5571 | + pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); | ||
5572 | |||
5573 | if (is_shadow_present_pte(*sptep)) { | ||
5574 | rmap_remove(vcpu->kvm, sptep); | ||
5575 | @@ -489,6 +493,18 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) | ||
5576 | if (need_flush) | ||
5577 | kvm_flush_remote_tlbs(vcpu->kvm); | ||
5578 | spin_unlock(&vcpu->kvm->mmu_lock); | ||
5579 | + | ||
5580 | + if (pte_gpa == -1) | ||
5581 | + return; | ||
5582 | + if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte, | ||
5583 | + sizeof(pt_element_t))) | ||
5584 | + return; | ||
5585 | + if (is_present_gpte(gpte) && (gpte & PT_ACCESSED_MASK)) { | ||
5586 | + if (mmu_topup_memory_caches(vcpu)) | ||
5587 | + return; | ||
5588 | + kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte, | ||
5589 | + sizeof(pt_element_t), 0); | ||
5590 | + } | ||
5591 | } | ||
5592 | |||
5593 | static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) | ||
5594 | diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c | ||
5595 | index e78d990..ae07d26 100644 | ||
5596 | --- a/arch/x86/kvm/x86.c | ||
5597 | +++ b/arch/x86/kvm/x86.c | ||
5598 | @@ -484,19 +484,16 @@ static inline u32 bit(int bitno) | ||
5599 | * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. | ||
5600 | * | ||
5601 | * This list is modified at module load time to reflect the | ||
5602 | - * capabilities of the host cpu. This capabilities test skips MSRs that are | ||
5603 | - * kvm-specific. Those are put in the beginning of the list. | ||
5604 | + * capabilities of the host cpu. | ||
5605 | */ | ||
5606 | - | ||
5607 | -#define KVM_SAVE_MSRS_BEGIN 2 | ||
5608 | static u32 msrs_to_save[] = { | ||
5609 | - MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, | ||
5610 | MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, | ||
5611 | MSR_K6_STAR, | ||
5612 | #ifdef CONFIG_X86_64 | ||
5613 | MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, | ||
5614 | #endif | ||
5615 | - MSR_IA32_TSC, MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA | ||
5616 | + MSR_IA32_TSC, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, | ||
5617 | + MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA | ||
5618 | }; | ||
5619 | |||
5620 | static unsigned num_msrs_to_save; | ||
5621 | @@ -583,7 +580,7 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) | ||
5622 | { | ||
5623 | static int version; | ||
5624 | struct pvclock_wall_clock wc; | ||
5625 | - struct timespec boot; | ||
5626 | + struct timespec now, sys, boot; | ||
5627 | |||
5628 | if (!wall_clock) | ||
5629 | return; | ||
5630 | @@ -598,7 +595,9 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) | ||
5631 | * wall clock specified here. guest system time equals host | ||
5632 | * system time for us, thus we must fill in host boot time here. | ||
5633 | */ | ||
5634 | - getboottime(&boot); | ||
5635 | + now = current_kernel_time(); | ||
5636 | + ktime_get_ts(&sys); | ||
5637 | + boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys)); | ||
5638 | |||
5639 | wc.sec = boot.tv_sec; | ||
5640 | wc.nsec = boot.tv_nsec; | ||
5641 | @@ -673,14 +672,12 @@ static void kvm_write_guest_time(struct kvm_vcpu *v) | ||
5642 | local_irq_save(flags); | ||
5643 | kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp); | ||
5644 | ktime_get_ts(&ts); | ||
5645 | - monotonic_to_bootbased(&ts); | ||
5646 | local_irq_restore(flags); | ||
5647 | |||
5648 | /* With all the info we got, fill in the values */ | ||
5649 | |||
5650 | vcpu->hv_clock.system_time = ts.tv_nsec + | ||
5651 | - (NSEC_PER_SEC * (u64)ts.tv_sec) + v->kvm->arch.kvmclock_offset; | ||
5652 | - | ||
5653 | + (NSEC_PER_SEC * (u64)ts.tv_sec); | ||
5654 | /* | ||
5655 | * The interface expects us to write an even number signaling that the | ||
5656 | * update is finished. Since the guest won't see the intermediate | ||
5657 | @@ -1227,7 +1224,6 @@ int kvm_dev_ioctl_check_extension(long ext) | ||
5658 | case KVM_CAP_PIT2: | ||
5659 | case KVM_CAP_PIT_STATE2: | ||
5660 | case KVM_CAP_SET_IDENTITY_MAP_ADDR: | ||
5661 | - case KVM_CAP_ADJUST_CLOCK: | ||
5662 | r = 1; | ||
5663 | break; | ||
5664 | case KVM_CAP_COALESCED_MMIO: | ||
5665 | @@ -2425,44 +2421,6 @@ long kvm_arch_vm_ioctl(struct file *filp, | ||
5666 | r = 0; | ||
5667 | break; | ||
5668 | } | ||
5669 | - case KVM_SET_CLOCK: { | ||
5670 | - struct timespec now; | ||
5671 | - struct kvm_clock_data user_ns; | ||
5672 | - u64 now_ns; | ||
5673 | - s64 delta; | ||
5674 | - | ||
5675 | - r = -EFAULT; | ||
5676 | - if (copy_from_user(&user_ns, argp, sizeof(user_ns))) | ||
5677 | - goto out; | ||
5678 | - | ||
5679 | - r = -EINVAL; | ||
5680 | - if (user_ns.flags) | ||
5681 | - goto out; | ||
5682 | - | ||
5683 | - r = 0; | ||
5684 | - ktime_get_ts(&now); | ||
5685 | - now_ns = timespec_to_ns(&now); | ||
5686 | - delta = user_ns.clock - now_ns; | ||
5687 | - kvm->arch.kvmclock_offset = delta; | ||
5688 | - break; | ||
5689 | - } | ||
5690 | - case KVM_GET_CLOCK: { | ||
5691 | - struct timespec now; | ||
5692 | - struct kvm_clock_data user_ns; | ||
5693 | - u64 now_ns; | ||
5694 | - | ||
5695 | - ktime_get_ts(&now); | ||
5696 | - now_ns = timespec_to_ns(&now); | ||
5697 | - user_ns.clock = kvm->arch.kvmclock_offset + now_ns; | ||
5698 | - user_ns.flags = 0; | ||
5699 | - | ||
5700 | - r = -EFAULT; | ||
5701 | - if (copy_to_user(argp, &user_ns, sizeof(user_ns))) | ||
5702 | - goto out; | ||
5703 | - r = 0; | ||
5704 | - break; | ||
5705 | - } | ||
5706 | - | ||
5707 | default: | ||
5708 | ; | ||
5709 | } | ||
5710 | @@ -2475,8 +2433,7 @@ static void kvm_init_msr_list(void) | ||
5711 | u32 dummy[2]; | ||
5712 | unsigned i, j; | ||
5713 | |||
5714 | - /* skip the first msrs in the list. KVM-specific */ | ||
5715 | - for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) { | ||
5716 | + for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) { | ||
5717 | if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0) | ||
5718 | continue; | ||
5719 | if (j < i) | ||
5720 | @@ -4805,13 +4762,12 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | ||
5721 | GFP_KERNEL); | ||
5722 | if (!vcpu->arch.mce_banks) { | ||
5723 | r = -ENOMEM; | ||
5724 | - goto fail_free_lapic; | ||
5725 | + goto fail_mmu_destroy; | ||
5726 | } | ||
5727 | vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; | ||
5728 | |||
5729 | return 0; | ||
5730 | -fail_free_lapic: | ||
5731 | - kvm_free_lapic(vcpu); | ||
5732 | + | ||
5733 | fail_mmu_destroy: | ||
5734 | kvm_mmu_destroy(vcpu); | ||
5735 | fail_free_pio_data: | ||
5736 | @@ -4822,7 +4778,6 @@ fail: | ||
5737 | |||
5738 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | ||
5739 | { | ||
5740 | - kfree(vcpu->arch.mce_banks); | ||
5741 | kvm_free_lapic(vcpu); | ||
5742 | down_read(&vcpu->kvm->slots_lock); | ||
5743 | kvm_mmu_destroy(vcpu); | ||
5744 | diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile | ||
5745 | index c2b6f39..85f5db9 100644 | ||
5746 | --- a/arch/x86/lib/Makefile | ||
5747 | +++ b/arch/x86/lib/Makefile | ||
5748 | @@ -2,14 +2,14 @@ | ||
5749 | # Makefile for x86 specific library files. | ||
5750 | # | ||
5751 | |||
5752 | -obj-$(CONFIG_SMP) += msr-smp.o | ||
5753 | +obj-$(CONFIG_SMP) := msr.o | ||
5754 | |||
5755 | lib-y := delay.o | ||
5756 | lib-y += thunk_$(BITS).o | ||
5757 | lib-y += usercopy_$(BITS).o getuser.o putuser.o | ||
5758 | lib-y += memcpy_$(BITS).o | ||
5759 | |||
5760 | -obj-y += msr.o msr-reg.o msr-reg-export.o | ||
5761 | +obj-y += msr-reg.o msr-reg-export.o | ||
5762 | |||
5763 | ifeq ($(CONFIG_X86_32),y) | ||
5764 | obj-y += atomic64_32.o | ||
5765 | diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c | ||
5766 | index 8f8eebd..33a1e3c 100644 | ||
5767 | --- a/arch/x86/lib/msr.c | ||
5768 | +++ b/arch/x86/lib/msr.c | ||
5769 | @@ -1,23 +1,226 @@ | ||
5770 | #include <linux/module.h> | ||
5771 | #include <linux/preempt.h> | ||
5772 | +#include <linux/smp.h> | ||
5773 | #include <asm/msr.h> | ||
5774 | |||
5775 | -struct msr *msrs_alloc(void) | ||
5776 | +struct msr_info { | ||
5777 | + u32 msr_no; | ||
5778 | + struct msr reg; | ||
5779 | + struct msr *msrs; | ||
5780 | + int off; | ||
5781 | + int err; | ||
5782 | +}; | ||
5783 | + | ||
5784 | +static void __rdmsr_on_cpu(void *info) | ||
5785 | +{ | ||
5786 | + struct msr_info *rv = info; | ||
5787 | + struct msr *reg; | ||
5788 | + int this_cpu = raw_smp_processor_id(); | ||
5789 | + | ||
5790 | + if (rv->msrs) | ||
5791 | + reg = &rv->msrs[this_cpu - rv->off]; | ||
5792 | + else | ||
5793 | + reg = &rv->reg; | ||
5794 | + | ||
5795 | + rdmsr(rv->msr_no, reg->l, reg->h); | ||
5796 | +} | ||
5797 | + | ||
5798 | +static void __wrmsr_on_cpu(void *info) | ||
5799 | +{ | ||
5800 | + struct msr_info *rv = info; | ||
5801 | + struct msr *reg; | ||
5802 | + int this_cpu = raw_smp_processor_id(); | ||
5803 | + | ||
5804 | + if (rv->msrs) | ||
5805 | + reg = &rv->msrs[this_cpu - rv->off]; | ||
5806 | + else | ||
5807 | + reg = &rv->reg; | ||
5808 | + | ||
5809 | + wrmsr(rv->msr_no, reg->l, reg->h); | ||
5810 | +} | ||
5811 | + | ||
5812 | +int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) | ||
5813 | +{ | ||
5814 | + int err; | ||
5815 | + struct msr_info rv; | ||
5816 | + | ||
5817 | + memset(&rv, 0, sizeof(rv)); | ||
5818 | + | ||
5819 | + rv.msr_no = msr_no; | ||
5820 | + err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1); | ||
5821 | + *l = rv.reg.l; | ||
5822 | + *h = rv.reg.h; | ||
5823 | + | ||
5824 | + return err; | ||
5825 | +} | ||
5826 | +EXPORT_SYMBOL(rdmsr_on_cpu); | ||
5827 | + | ||
5828 | +int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) | ||
5829 | +{ | ||
5830 | + int err; | ||
5831 | + struct msr_info rv; | ||
5832 | + | ||
5833 | + memset(&rv, 0, sizeof(rv)); | ||
5834 | + | ||
5835 | + rv.msr_no = msr_no; | ||
5836 | + rv.reg.l = l; | ||
5837 | + rv.reg.h = h; | ||
5838 | + err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1); | ||
5839 | + | ||
5840 | + return err; | ||
5841 | +} | ||
5842 | +EXPORT_SYMBOL(wrmsr_on_cpu); | ||
5843 | + | ||
5844 | +/* rdmsr on a bunch of CPUs | ||
5845 | + * | ||
5846 | + * @mask: which CPUs | ||
5847 | + * @msr_no: which MSR | ||
5848 | + * @msrs: array of MSR values | ||
5849 | + * | ||
5850 | + */ | ||
5851 | +void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs) | ||
5852 | +{ | ||
5853 | + struct msr_info rv; | ||
5854 | + int this_cpu; | ||
5855 | + | ||
5856 | + memset(&rv, 0, sizeof(rv)); | ||
5857 | + | ||
5858 | + rv.off = cpumask_first(mask); | ||
5859 | + rv.msrs = msrs; | ||
5860 | + rv.msr_no = msr_no; | ||
5861 | + | ||
5862 | + this_cpu = get_cpu(); | ||
5863 | + | ||
5864 | + if (cpumask_test_cpu(this_cpu, mask)) | ||
5865 | + __rdmsr_on_cpu(&rv); | ||
5866 | + | ||
5867 | + smp_call_function_many(mask, __rdmsr_on_cpu, &rv, 1); | ||
5868 | + put_cpu(); | ||
5869 | +} | ||
5870 | +EXPORT_SYMBOL(rdmsr_on_cpus); | ||
5871 | + | ||
5872 | +/* | ||
5873 | + * wrmsr on a bunch of CPUs | ||
5874 | + * | ||
5875 | + * @mask: which CPUs | ||
5876 | + * @msr_no: which MSR | ||
5877 | + * @msrs: array of MSR values | ||
5878 | + * | ||
5879 | + */ | ||
5880 | +void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs) | ||
5881 | +{ | ||
5882 | + struct msr_info rv; | ||
5883 | + int this_cpu; | ||
5884 | + | ||
5885 | + memset(&rv, 0, sizeof(rv)); | ||
5886 | + | ||
5887 | + rv.off = cpumask_first(mask); | ||
5888 | + rv.msrs = msrs; | ||
5889 | + rv.msr_no = msr_no; | ||
5890 | + | ||
5891 | + this_cpu = get_cpu(); | ||
5892 | + | ||
5893 | + if (cpumask_test_cpu(this_cpu, mask)) | ||
5894 | + __wrmsr_on_cpu(&rv); | ||
5895 | + | ||
5896 | + smp_call_function_many(mask, __wrmsr_on_cpu, &rv, 1); | ||
5897 | + put_cpu(); | ||
5898 | +} | ||
5899 | +EXPORT_SYMBOL(wrmsr_on_cpus); | ||
5900 | + | ||
5901 | +/* These "safe" variants are slower and should be used when the target MSR | ||
5902 | + may not actually exist. */ | ||
5903 | +static void __rdmsr_safe_on_cpu(void *info) | ||
5904 | +{ | ||
5905 | + struct msr_info *rv = info; | ||
5906 | + | ||
5907 | + rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h); | ||
5908 | +} | ||
5909 | + | ||
5910 | +static void __wrmsr_safe_on_cpu(void *info) | ||
5911 | +{ | ||
5912 | + struct msr_info *rv = info; | ||
5913 | + | ||
5914 | + rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h); | ||
5915 | +} | ||
5916 | + | ||
5917 | +int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) | ||
5918 | { | ||
5919 | - struct msr *msrs = NULL; | ||
5920 | + int err; | ||
5921 | + struct msr_info rv; | ||
5922 | |||
5923 | - msrs = alloc_percpu(struct msr); | ||
5924 | - if (!msrs) { | ||
5925 | - pr_warning("%s: error allocating msrs\n", __func__); | ||
5926 | - return NULL; | ||
5927 | - } | ||
5928 | + memset(&rv, 0, sizeof(rv)); | ||
5929 | |||
5930 | - return msrs; | ||
5931 | + rv.msr_no = msr_no; | ||
5932 | + err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1); | ||
5933 | + *l = rv.reg.l; | ||
5934 | + *h = rv.reg.h; | ||
5935 | + | ||
5936 | + return err ? err : rv.err; | ||
5937 | } | ||
5938 | -EXPORT_SYMBOL(msrs_alloc); | ||
5939 | +EXPORT_SYMBOL(rdmsr_safe_on_cpu); | ||
5940 | |||
5941 | -void msrs_free(struct msr *msrs) | ||
5942 | +int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) | ||
5943 | { | ||
5944 | - free_percpu(msrs); | ||
5945 | + int err; | ||
5946 | + struct msr_info rv; | ||
5947 | + | ||
5948 | + memset(&rv, 0, sizeof(rv)); | ||
5949 | + | ||
5950 | + rv.msr_no = msr_no; | ||
5951 | + rv.reg.l = l; | ||
5952 | + rv.reg.h = h; | ||
5953 | + err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1); | ||
5954 | + | ||
5955 | + return err ? err : rv.err; | ||
5956 | +} | ||
5957 | +EXPORT_SYMBOL(wrmsr_safe_on_cpu); | ||
5958 | + | ||
5959 | +/* | ||
5960 | + * These variants are significantly slower, but allows control over | ||
5961 | + * the entire 32-bit GPR set. | ||
5962 | + */ | ||
5963 | +struct msr_regs_info { | ||
5964 | + u32 *regs; | ||
5965 | + int err; | ||
5966 | +}; | ||
5967 | + | ||
5968 | +static void __rdmsr_safe_regs_on_cpu(void *info) | ||
5969 | +{ | ||
5970 | + struct msr_regs_info *rv = info; | ||
5971 | + | ||
5972 | + rv->err = rdmsr_safe_regs(rv->regs); | ||
5973 | +} | ||
5974 | + | ||
5975 | +static void __wrmsr_safe_regs_on_cpu(void *info) | ||
5976 | +{ | ||
5977 | + struct msr_regs_info *rv = info; | ||
5978 | + | ||
5979 | + rv->err = wrmsr_safe_regs(rv->regs); | ||
5980 | +} | ||
5981 | + | ||
5982 | +int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs) | ||
5983 | +{ | ||
5984 | + int err; | ||
5985 | + struct msr_regs_info rv; | ||
5986 | + | ||
5987 | + rv.regs = regs; | ||
5988 | + rv.err = -EIO; | ||
5989 | + err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1); | ||
5990 | + | ||
5991 | + return err ? err : rv.err; | ||
5992 | +} | ||
5993 | +EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu); | ||
5994 | + | ||
5995 | +int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs) | ||
5996 | +{ | ||
5997 | + int err; | ||
5998 | + struct msr_regs_info rv; | ||
5999 | + | ||
6000 | + rv.regs = regs; | ||
6001 | + rv.err = -EIO; | ||
6002 | + err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1); | ||
6003 | + | ||
6004 | + return err ? err : rv.err; | ||
6005 | } | ||
6006 | -EXPORT_SYMBOL(msrs_free); | ||
6007 | +EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu); | ||
6008 | diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c | ||
6009 | index 3871c60..dbb5381 100644 | ||
6010 | --- a/arch/x86/mm/srat_64.c | ||
6011 | +++ b/arch/x86/mm/srat_64.c | ||
6012 | @@ -229,11 +229,9 @@ update_nodes_add(int node, unsigned long start, unsigned long end) | ||
6013 | printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n"); | ||
6014 | } | ||
6015 | |||
6016 | - if (changed) { | ||
6017 | - node_set(node, cpu_nodes_parsed); | ||
6018 | + if (changed) | ||
6019 | printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n", | ||
6020 | nd->start, nd->end); | ||
6021 | - } | ||
6022 | } | ||
6023 | |||
6024 | /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ | ||
6025 | diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c | ||
6026 | index 3347f69..cb88b1a 100644 | ||
6027 | --- a/arch/x86/oprofile/nmi_int.c | ||
6028 | +++ b/arch/x86/oprofile/nmi_int.c | ||
6029 | @@ -222,7 +222,7 @@ static void nmi_cpu_switch(void *dummy) | ||
6030 | |||
6031 | /* move to next set */ | ||
6032 | si += model->num_counters; | ||
6033 | - if ((si >= model->num_virt_counters) || (counter_config[si].count == 0)) | ||
6034 | + if ((si > model->num_virt_counters) || (counter_config[si].count == 0)) | ||
6035 | per_cpu(switch_index, cpu) = 0; | ||
6036 | else | ||
6037 | per_cpu(switch_index, cpu) = si; | ||
6038 | @@ -598,7 +598,6 @@ static int __init ppro_init(char **cpu_type) | ||
6039 | case 15: case 23: | ||
6040 | *cpu_type = "i386/core_2"; | ||
6041 | break; | ||
6042 | - case 0x2e: | ||
6043 | case 26: | ||
6044 | spec = &op_arch_perfmon_spec; | ||
6045 | *cpu_type = "i386/core_i7"; | ||
6046 | diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c | ||
6047 | index a672f12..b22d13b 100644 | ||
6048 | --- a/arch/x86/pci/i386.c | ||
6049 | +++ b/arch/x86/pci/i386.c | ||
6050 | @@ -282,15 +282,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | ||
6051 | return -EINVAL; | ||
6052 | |||
6053 | prot = pgprot_val(vma->vm_page_prot); | ||
6054 | - | ||
6055 | - /* | ||
6056 | - * Return error if pat is not enabled and write_combine is requested. | ||
6057 | - * Caller can followup with UC MINUS request and add a WC mtrr if there | ||
6058 | - * is a free mtrr slot. | ||
6059 | - */ | ||
6060 | - if (!pat_enabled && write_combine) | ||
6061 | - return -EINVAL; | ||
6062 | - | ||
6063 | if (pat_enabled && write_combine) | ||
6064 | prot |= _PAGE_CACHE_WC; | ||
6065 | else if (pat_enabled || boot_cpu_data.x86 > 3) | ||
6066 | diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c | ||
6067 | index 79f9738..dfbf70e 100644 | ||
6068 | --- a/arch/x86/xen/enlighten.c | ||
6069 | +++ b/arch/x86/xen/enlighten.c | ||
6070 | @@ -138,23 +138,24 @@ static void xen_vcpu_setup(int cpu) | ||
6071 | */ | ||
6072 | void xen_vcpu_restore(void) | ||
6073 | { | ||
6074 | - int cpu; | ||
6075 | + if (have_vcpu_info_placement) { | ||
6076 | + int cpu; | ||
6077 | |||
6078 | - for_each_online_cpu(cpu) { | ||
6079 | - bool other_cpu = (cpu != smp_processor_id()); | ||
6080 | + for_each_online_cpu(cpu) { | ||
6081 | + bool other_cpu = (cpu != smp_processor_id()); | ||
6082 | |||
6083 | - if (other_cpu && | ||
6084 | - HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL)) | ||
6085 | - BUG(); | ||
6086 | - | ||
6087 | - xen_setup_runstate_info(cpu); | ||
6088 | + if (other_cpu && | ||
6089 | + HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL)) | ||
6090 | + BUG(); | ||
6091 | |||
6092 | - if (have_vcpu_info_placement) | ||
6093 | xen_vcpu_setup(cpu); | ||
6094 | |||
6095 | - if (other_cpu && | ||
6096 | - HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL)) | ||
6097 | - BUG(); | ||
6098 | + if (other_cpu && | ||
6099 | + HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL)) | ||
6100 | + BUG(); | ||
6101 | + } | ||
6102 | + | ||
6103 | + BUG_ON(!have_vcpu_info_placement); | ||
6104 | } | ||
6105 | } | ||
6106 | |||
6107 | @@ -1181,8 +1182,6 @@ asmlinkage void __init xen_start_kernel(void) | ||
6108 | |||
6109 | xen_raw_console_write("about to get started...\n"); | ||
6110 | |||
6111 | - xen_setup_runstate_info(0); | ||
6112 | - | ||
6113 | /* Start the world */ | ||
6114 | #ifdef CONFIG_X86_32 | ||
6115 | i386_start_kernel(); | ||
6116 | diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c | ||
6117 | index bf4cd6b..3bf7b1d 100644 | ||
6118 | --- a/arch/x86/xen/mmu.c | ||
6119 | +++ b/arch/x86/xen/mmu.c | ||
6120 | @@ -185,7 +185,7 @@ static inline unsigned p2m_index(unsigned long pfn) | ||
6121 | } | ||
6122 | |||
6123 | /* Build the parallel p2m_top_mfn structures */ | ||
6124 | -void xen_build_mfn_list_list(void) | ||
6125 | +static void __init xen_build_mfn_list_list(void) | ||
6126 | { | ||
6127 | unsigned pfn, idx; | ||
6128 | |||
6129 | diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c | ||
6130 | index 360f8d8..fe03eee 100644 | ||
6131 | --- a/arch/x86/xen/smp.c | ||
6132 | +++ b/arch/x86/xen/smp.c | ||
6133 | @@ -295,7 +295,6 @@ static int __cpuinit xen_cpu_up(unsigned int cpu) | ||
6134 | (unsigned long)task_stack_page(idle) - | ||
6135 | KERNEL_STACK_OFFSET + THREAD_SIZE; | ||
6136 | #endif | ||
6137 | - xen_setup_runstate_info(cpu); | ||
6138 | xen_setup_timer(cpu); | ||
6139 | xen_init_lock_cpu(cpu); | ||
6140 | |||
6141 | diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c | ||
6142 | index 987267f..95be7b4 100644 | ||
6143 | --- a/arch/x86/xen/suspend.c | ||
6144 | +++ b/arch/x86/xen/suspend.c | ||
6145 | @@ -1,5 +1,4 @@ | ||
6146 | #include <linux/types.h> | ||
6147 | -#include <linux/clockchips.h> | ||
6148 | |||
6149 | #include <xen/interface/xen.h> | ||
6150 | #include <xen/grant_table.h> | ||
6151 | @@ -28,8 +27,6 @@ void xen_pre_suspend(void) | ||
6152 | |||
6153 | void xen_post_suspend(int suspend_cancelled) | ||
6154 | { | ||
6155 | - xen_build_mfn_list_list(); | ||
6156 | - | ||
6157 | xen_setup_shared_info(); | ||
6158 | |||
6159 | if (suspend_cancelled) { | ||
6160 | @@ -47,19 +44,7 @@ void xen_post_suspend(int suspend_cancelled) | ||
6161 | |||
6162 | } | ||
6163 | |||
6164 | -static void xen_vcpu_notify_restore(void *data) | ||
6165 | -{ | ||
6166 | - unsigned long reason = (unsigned long)data; | ||
6167 | - | ||
6168 | - /* Boot processor notified via generic timekeeping_resume() */ | ||
6169 | - if ( smp_processor_id() == 0) | ||
6170 | - return; | ||
6171 | - | ||
6172 | - clockevents_notify(reason, NULL); | ||
6173 | -} | ||
6174 | - | ||
6175 | void xen_arch_resume(void) | ||
6176 | { | ||
6177 | - smp_call_function(xen_vcpu_notify_restore, | ||
6178 | - (void *)CLOCK_EVT_NOTIFY_RESUME, 1); | ||
6179 | + /* nothing */ | ||
6180 | } | ||
6181 | diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c | ||
6182 | index 9d1f853..0a5aa44 100644 | ||
6183 | --- a/arch/x86/xen/time.c | ||
6184 | +++ b/arch/x86/xen/time.c | ||
6185 | @@ -100,7 +100,7 @@ bool xen_vcpu_stolen(int vcpu) | ||
6186 | return per_cpu(runstate, vcpu).state == RUNSTATE_runnable; | ||
6187 | } | ||
6188 | |||
6189 | -void xen_setup_runstate_info(int cpu) | ||
6190 | +static void setup_runstate_info(int cpu) | ||
6191 | { | ||
6192 | struct vcpu_register_runstate_memory_area area; | ||
6193 | |||
6194 | @@ -434,7 +434,7 @@ void xen_setup_timer(int cpu) | ||
6195 | name = "<timer kasprintf failed>"; | ||
6196 | |||
6197 | irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt, | ||
6198 | - IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER, | ||
6199 | + IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, | ||
6200 | name, NULL); | ||
6201 | |||
6202 | evt = &per_cpu(xen_clock_events, cpu); | ||
6203 | @@ -442,6 +442,8 @@ void xen_setup_timer(int cpu) | ||
6204 | |||
6205 | evt->cpumask = cpumask_of(cpu); | ||
6206 | evt->irq = irq; | ||
6207 | + | ||
6208 | + setup_runstate_info(cpu); | ||
6209 | } | ||
6210 | |||
6211 | void xen_teardown_timer(int cpu) | ||
6212 | @@ -492,7 +494,6 @@ __init void xen_time_init(void) | ||
6213 | |||
6214 | setup_force_cpu_cap(X86_FEATURE_TSC); | ||
6215 | |||
6216 | - xen_setup_runstate_info(cpu); | ||
6217 | xen_setup_timer(cpu); | ||
6218 | xen_setup_cpu_clockevents(); | ||
6219 | } | ||
6220 | diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S | ||
6221 | index 53adefd..02f496a 100644 | ||
6222 | --- a/arch/x86/xen/xen-asm_64.S | ||
6223 | +++ b/arch/x86/xen/xen-asm_64.S | ||
6224 | @@ -96,7 +96,7 @@ ENTRY(xen_sysret32) | ||
6225 | pushq $__USER32_CS | ||
6226 | pushq %rcx | ||
6227 | |||
6228 | - pushq $0 | ||
6229 | + pushq $VGCF_in_syscall | ||
6230 | 1: jmp hypercall_iret | ||
6231 | ENDPATCH(xen_sysret32) | ||
6232 | RELOC(xen_sysret32, 1b+1) | ||
6233 | @@ -151,7 +151,7 @@ ENTRY(xen_syscall32_target) | ||
6234 | ENTRY(xen_sysenter_target) | ||
6235 | lea 16(%rsp), %rsp /* strip %rcx, %r11 */ | ||
6236 | mov $-ENOSYS, %rax | ||
6237 | - pushq $0 | ||
6238 | + pushq $VGCF_in_syscall | ||
6239 | jmp hypercall_iret | ||
6240 | ENDPROC(xen_syscall32_target) | ||
6241 | ENDPROC(xen_sysenter_target) | ||
6242 | diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h | ||
6243 | index f9153a3..355fa6b 100644 | ||
6244 | --- a/arch/x86/xen/xen-ops.h | ||
6245 | +++ b/arch/x86/xen/xen-ops.h | ||
6246 | @@ -25,7 +25,6 @@ extern struct shared_info *HYPERVISOR_shared_info; | ||
6247 | |||
6248 | void xen_setup_mfn_list_list(void); | ||
6249 | void xen_setup_shared_info(void); | ||
6250 | -void xen_build_mfn_list_list(void); | ||
6251 | void xen_setup_machphys_mapping(void); | ||
6252 | pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn); | ||
6253 | void xen_ident_map_ISA(void); | ||
6254 | @@ -42,7 +41,6 @@ void __init xen_build_dynamic_phys_to_machine(void); | ||
6255 | |||
6256 | void xen_init_irq_ops(void); | ||
6257 | void xen_setup_timer(int cpu); | ||
6258 | -void xen_setup_runstate_info(int cpu); | ||
6259 | void xen_teardown_timer(int cpu); | ||
6260 | cycle_t xen_clocksource_read(void); | ||
6261 | void xen_setup_cpu_clockevents(void); | ||
6262 | diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h | ||
6263 | index 4352dbe..05cebf8 100644 | ||
6264 | --- a/arch/xtensa/include/asm/syscall.h | ||
6265 | +++ b/arch/xtensa/include/asm/syscall.h | ||
6266 | @@ -13,6 +13,8 @@ struct sigaction; | ||
6267 | asmlinkage long xtensa_execve(char*, char**, char**, struct pt_regs*); | ||
6268 | asmlinkage long xtensa_clone(unsigned long, unsigned long, struct pt_regs*); | ||
6269 | asmlinkage long xtensa_pipe(int __user *); | ||
6270 | +asmlinkage long xtensa_mmap2(unsigned long, unsigned long, unsigned long, | ||
6271 | + unsigned long, unsigned long, unsigned long); | ||
6272 | asmlinkage long xtensa_ptrace(long, long, long, long); | ||
6273 | asmlinkage long xtensa_sigreturn(struct pt_regs*); | ||
6274 | asmlinkage long xtensa_rt_sigreturn(struct pt_regs*); | ||
6275 | diff --git a/arch/xtensa/include/asm/unistd.h b/arch/xtensa/include/asm/unistd.h | ||
6276 | index 9a5c354..c092c8f 100644 | ||
6277 | --- a/arch/xtensa/include/asm/unistd.h | ||
6278 | +++ b/arch/xtensa/include/asm/unistd.h | ||
6279 | @@ -189,7 +189,7 @@ __SYSCALL( 79, sys_fremovexattr, 2) | ||
6280 | /* File Map / Shared Memory Operations */ | ||
6281 | |||
6282 | #define __NR_mmap2 80 | ||
6283 | -__SYSCALL( 80, sys_mmap_pgoff, 6) | ||
6284 | +__SYSCALL( 80, xtensa_mmap2, 6) | ||
6285 | #define __NR_munmap 81 | ||
6286 | __SYSCALL( 81, sys_munmap, 2) | ||
6287 | #define __NR_mprotect 82 | ||
6288 | diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c | ||
6289 | index 1e67bab..ac15ecb 100644 | ||
6290 | --- a/arch/xtensa/kernel/syscall.c | ||
6291 | +++ b/arch/xtensa/kernel/syscall.c | ||
6292 | @@ -57,6 +57,31 @@ asmlinkage long xtensa_pipe(int __user *userfds) | ||
6293 | return error; | ||
6294 | } | ||
6295 | |||
6296 | + | ||
6297 | +asmlinkage long xtensa_mmap2(unsigned long addr, unsigned long len, | ||
6298 | + unsigned long prot, unsigned long flags, | ||
6299 | + unsigned long fd, unsigned long pgoff) | ||
6300 | +{ | ||
6301 | + int error = -EBADF; | ||
6302 | + struct file * file = NULL; | ||
6303 | + | ||
6304 | + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | ||
6305 | + if (!(flags & MAP_ANONYMOUS)) { | ||
6306 | + file = fget(fd); | ||
6307 | + if (!file) | ||
6308 | + goto out; | ||
6309 | + } | ||
6310 | + | ||
6311 | + down_write(¤t->mm->mmap_sem); | ||
6312 | + error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); | ||
6313 | + up_write(¤t->mm->mmap_sem); | ||
6314 | + | ||
6315 | + if (file) | ||
6316 | + fput(file); | ||
6317 | +out: | ||
6318 | + return error; | ||
6319 | +} | ||
6320 | + | ||
6321 | asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg) | ||
6322 | { | ||
6323 | unsigned long ret; | ||
6324 | diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c | ||
6325 | index 49f6ede..7411915 100644 | ||
6326 | --- a/drivers/acpi/bus.c | ||
6327 | +++ b/drivers/acpi/bus.c | ||
6328 | @@ -344,167 +344,6 @@ bool acpi_bus_can_wakeup(acpi_handle handle) | ||
6329 | |||
6330 | EXPORT_SYMBOL(acpi_bus_can_wakeup); | ||
6331 | |||
6332 | -static void acpi_print_osc_error(acpi_handle handle, | ||
6333 | - struct acpi_osc_context *context, char *error) | ||
6334 | -{ | ||
6335 | - struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER}; | ||
6336 | - int i; | ||
6337 | - | ||
6338 | - if (ACPI_FAILURE(acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer))) | ||
6339 | - printk(KERN_DEBUG "%s\n", error); | ||
6340 | - else { | ||
6341 | - printk(KERN_DEBUG "%s:%s\n", (char *)buffer.pointer, error); | ||
6342 | - kfree(buffer.pointer); | ||
6343 | - } | ||
6344 | - printk(KERN_DEBUG"_OSC request data:"); | ||
6345 | - for (i = 0; i < context->cap.length; i += sizeof(u32)) | ||
6346 | - printk("%x ", *((u32 *)(context->cap.pointer + i))); | ||
6347 | - printk("\n"); | ||
6348 | -} | ||
6349 | - | ||
6350 | -static u8 hex_val(unsigned char c) | ||
6351 | -{ | ||
6352 | - return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10; | ||
6353 | -} | ||
6354 | - | ||
6355 | -static acpi_status acpi_str_to_uuid(char *str, u8 *uuid) | ||
6356 | -{ | ||
6357 | - int i; | ||
6358 | - static int opc_map_to_uuid[16] = {6, 4, 2, 0, 11, 9, 16, 14, 19, 21, | ||
6359 | - 24, 26, 28, 30, 32, 34}; | ||
6360 | - | ||
6361 | - if (strlen(str) != 36) | ||
6362 | - return AE_BAD_PARAMETER; | ||
6363 | - for (i = 0; i < 36; i++) { | ||
6364 | - if (i == 8 || i == 13 || i == 18 || i == 23) { | ||
6365 | - if (str[i] != '-') | ||
6366 | - return AE_BAD_PARAMETER; | ||
6367 | - } else if (!isxdigit(str[i])) | ||
6368 | - return AE_BAD_PARAMETER; | ||
6369 | - } | ||
6370 | - for (i = 0; i < 16; i++) { | ||
6371 | - uuid[i] = hex_val(str[opc_map_to_uuid[i]]) << 4; | ||
6372 | - uuid[i] |= hex_val(str[opc_map_to_uuid[i] + 1]); | ||
6373 | - } | ||
6374 | - return AE_OK; | ||
6375 | -} | ||
6376 | - | ||
6377 | -acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context) | ||
6378 | -{ | ||
6379 | - acpi_status status; | ||
6380 | - struct acpi_object_list input; | ||
6381 | - union acpi_object in_params[4]; | ||
6382 | - union acpi_object *out_obj; | ||
6383 | - u8 uuid[16]; | ||
6384 | - u32 errors; | ||
6385 | - struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; | ||
6386 | - | ||
6387 | - if (!context) | ||
6388 | - return AE_ERROR; | ||
6389 | - if (ACPI_FAILURE(acpi_str_to_uuid(context->uuid_str, uuid))) | ||
6390 | - return AE_ERROR; | ||
6391 | - context->ret.length = ACPI_ALLOCATE_BUFFER; | ||
6392 | - context->ret.pointer = NULL; | ||
6393 | - | ||
6394 | - /* Setting up input parameters */ | ||
6395 | - input.count = 4; | ||
6396 | - input.pointer = in_params; | ||
6397 | - in_params[0].type = ACPI_TYPE_BUFFER; | ||
6398 | - in_params[0].buffer.length = 16; | ||
6399 | - in_params[0].buffer.pointer = uuid; | ||
6400 | - in_params[1].type = ACPI_TYPE_INTEGER; | ||
6401 | - in_params[1].integer.value = context->rev; | ||
6402 | - in_params[2].type = ACPI_TYPE_INTEGER; | ||
6403 | - in_params[2].integer.value = context->cap.length/sizeof(u32); | ||
6404 | - in_params[3].type = ACPI_TYPE_BUFFER; | ||
6405 | - in_params[3].buffer.length = context->cap.length; | ||
6406 | - in_params[3].buffer.pointer = context->cap.pointer; | ||
6407 | - | ||
6408 | - status = acpi_evaluate_object(handle, "_OSC", &input, &output); | ||
6409 | - if (ACPI_FAILURE(status)) | ||
6410 | - return status; | ||
6411 | - | ||
6412 | - if (!output.length) | ||
6413 | - return AE_NULL_OBJECT; | ||
6414 | - | ||
6415 | - out_obj = output.pointer; | ||
6416 | - if (out_obj->type != ACPI_TYPE_BUFFER | ||
6417 | - || out_obj->buffer.length != context->cap.length) { | ||
6418 | - acpi_print_osc_error(handle, context, | ||
6419 | - "_OSC evaluation returned wrong type"); | ||
6420 | - status = AE_TYPE; | ||
6421 | - goto out_kfree; | ||
6422 | - } | ||
6423 | - /* Need to ignore the bit0 in result code */ | ||
6424 | - errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0); | ||
6425 | - if (errors) { | ||
6426 | - if (errors & OSC_REQUEST_ERROR) | ||
6427 | - acpi_print_osc_error(handle, context, | ||
6428 | - "_OSC request failed"); | ||
6429 | - if (errors & OSC_INVALID_UUID_ERROR) | ||
6430 | - acpi_print_osc_error(handle, context, | ||
6431 | - "_OSC invalid UUID"); | ||
6432 | - if (errors & OSC_INVALID_REVISION_ERROR) | ||
6433 | - acpi_print_osc_error(handle, context, | ||
6434 | - "_OSC invalid revision"); | ||
6435 | - if (errors & OSC_CAPABILITIES_MASK_ERROR) { | ||
6436 | - if (((u32 *)context->cap.pointer)[OSC_QUERY_TYPE] | ||
6437 | - & OSC_QUERY_ENABLE) | ||
6438 | - goto out_success; | ||
6439 | - status = AE_SUPPORT; | ||
6440 | - goto out_kfree; | ||
6441 | - } | ||
6442 | - status = AE_ERROR; | ||
6443 | - goto out_kfree; | ||
6444 | - } | ||
6445 | -out_success: | ||
6446 | - context->ret.length = out_obj->buffer.length; | ||
6447 | - context->ret.pointer = kmalloc(context->ret.length, GFP_KERNEL); | ||
6448 | - if (!context->ret.pointer) { | ||
6449 | - status = AE_NO_MEMORY; | ||
6450 | - goto out_kfree; | ||
6451 | - } | ||
6452 | - memcpy(context->ret.pointer, out_obj->buffer.pointer, | ||
6453 | - context->ret.length); | ||
6454 | - status = AE_OK; | ||
6455 | - | ||
6456 | -out_kfree: | ||
6457 | - kfree(output.pointer); | ||
6458 | - if (status != AE_OK) | ||
6459 | - context->ret.pointer = NULL; | ||
6460 | - return status; | ||
6461 | -} | ||
6462 | -EXPORT_SYMBOL(acpi_run_osc); | ||
6463 | - | ||
6464 | -static u8 sb_uuid_str[] = "0811B06E-4A27-44F9-8D60-3CBBC22E7B48"; | ||
6465 | -static void acpi_bus_osc_support(void) | ||
6466 | -{ | ||
6467 | - u32 capbuf[2]; | ||
6468 | - struct acpi_osc_context context = { | ||
6469 | - .uuid_str = sb_uuid_str, | ||
6470 | - .rev = 1, | ||
6471 | - .cap.length = 8, | ||
6472 | - .cap.pointer = capbuf, | ||
6473 | - }; | ||
6474 | - acpi_handle handle; | ||
6475 | - | ||
6476 | - capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; | ||
6477 | - capbuf[OSC_SUPPORT_TYPE] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */ | ||
6478 | -#if defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) ||\ | ||
6479 | - defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE) | ||
6480 | - capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PAD_SUPPORT; | ||
6481 | -#endif | ||
6482 | - | ||
6483 | -#if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE) | ||
6484 | - capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PPC_OST_SUPPORT; | ||
6485 | -#endif | ||
6486 | - if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))) | ||
6487 | - return; | ||
6488 | - if (ACPI_SUCCESS(acpi_run_osc(handle, &context))) | ||
6489 | - kfree(context.ret.pointer); | ||
6490 | - /* do we need to check the returned cap? Sounds no */ | ||
6491 | -} | ||
6492 | - | ||
6493 | /* -------------------------------------------------------------------------- | ||
6494 | Event Management | ||
6495 | -------------------------------------------------------------------------- */ | ||
6496 | @@ -895,8 +734,6 @@ static int __init acpi_bus_init(void) | ||
6497 | status = acpi_ec_ecdt_probe(); | ||
6498 | /* Ignore result. Not having an ECDT is not fatal. */ | ||
6499 | |||
6500 | - acpi_bus_osc_support(); | ||
6501 | - | ||
6502 | status = acpi_initialize_objects(ACPI_FULL_INITIALIZATION); | ||
6503 | if (ACPI_FAILURE(status)) { | ||
6504 | printk(KERN_ERR PREFIX "Unable to initialize ACPI objects\n"); | ||
6505 | diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c | ||
6506 | index 8a95e83..0c9c6a9 100644 | ||
6507 | --- a/drivers/acpi/button.c | ||
6508 | +++ b/drivers/acpi/button.c | ||
6509 | @@ -282,13 +282,6 @@ static int acpi_lid_send_state(struct acpi_device *device) | ||
6510 | if (ret == NOTIFY_DONE) | ||
6511 | ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, | ||
6512 | device); | ||
6513 | - if (ret == NOTIFY_DONE || ret == NOTIFY_OK) { | ||
6514 | - /* | ||
6515 | - * It is also regarded as success if the notifier_chain | ||
6516 | - * returns NOTIFY_OK or NOTIFY_DONE. | ||
6517 | - */ | ||
6518 | - ret = 0; | ||
6519 | - } | ||
6520 | return ret; | ||
6521 | } | ||
6522 | |||
6523 | diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c | ||
6524 | index f1670e0..baef28c 100644 | ||
6525 | --- a/drivers/acpi/ec.c | ||
6526 | +++ b/drivers/acpi/ec.c | ||
6527 | @@ -201,13 +201,14 @@ unlock: | ||
6528 | spin_unlock_irqrestore(&ec->curr_lock, flags); | ||
6529 | } | ||
6530 | |||
6531 | -static int acpi_ec_sync_query(struct acpi_ec *ec); | ||
6532 | +static void acpi_ec_gpe_query(void *ec_cxt); | ||
6533 | |||
6534 | -static int ec_check_sci_sync(struct acpi_ec *ec, u8 state) | ||
6535 | +static int ec_check_sci(struct acpi_ec *ec, u8 state) | ||
6536 | { | ||
6537 | if (state & ACPI_EC_FLAG_SCI) { | ||
6538 | if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) | ||
6539 | - return acpi_ec_sync_query(ec); | ||
6540 | + return acpi_os_execute(OSL_EC_BURST_HANDLER, | ||
6541 | + acpi_ec_gpe_query, ec); | ||
6542 | } | ||
6543 | return 0; | ||
6544 | } | ||
6545 | @@ -248,6 +249,11 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, | ||
6546 | { | ||
6547 | unsigned long tmp; | ||
6548 | int ret = 0; | ||
6549 | + pr_debug(PREFIX "transaction start\n"); | ||
6550 | + /* disable GPE during transaction if storm is detected */ | ||
6551 | + if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { | ||
6552 | + acpi_disable_gpe(NULL, ec->gpe); | ||
6553 | + } | ||
6554 | if (EC_FLAGS_MSI) | ||
6555 | udelay(ACPI_EC_MSI_UDELAY); | ||
6556 | /* start transaction */ | ||
6557 | @@ -259,9 +265,20 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, | ||
6558 | clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); | ||
6559 | spin_unlock_irqrestore(&ec->curr_lock, tmp); | ||
6560 | ret = ec_poll(ec); | ||
6561 | + pr_debug(PREFIX "transaction end\n"); | ||
6562 | spin_lock_irqsave(&ec->curr_lock, tmp); | ||
6563 | ec->curr = NULL; | ||
6564 | spin_unlock_irqrestore(&ec->curr_lock, tmp); | ||
6565 | + if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { | ||
6566 | + /* check if we received SCI during transaction */ | ||
6567 | + ec_check_sci(ec, acpi_ec_read_status(ec)); | ||
6568 | + /* it is safe to enable GPE outside of transaction */ | ||
6569 | + acpi_enable_gpe(NULL, ec->gpe); | ||
6570 | + } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) { | ||
6571 | + pr_info(PREFIX "GPE storm detected, " | ||
6572 | + "transactions will use polling mode\n"); | ||
6573 | + set_bit(EC_FLAGS_GPE_STORM, &ec->flags); | ||
6574 | + } | ||
6575 | return ret; | ||
6576 | } | ||
6577 | |||
6578 | @@ -304,26 +321,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) | ||
6579 | status = -ETIME; | ||
6580 | goto end; | ||
6581 | } | ||
6582 | - pr_debug(PREFIX "transaction start\n"); | ||
6583 | - /* disable GPE during transaction if storm is detected */ | ||
6584 | - if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { | ||
6585 | - acpi_disable_gpe(NULL, ec->gpe); | ||
6586 | - } | ||
6587 | - | ||
6588 | status = acpi_ec_transaction_unlocked(ec, t); | ||
6589 | - | ||
6590 | - /* check if we received SCI during transaction */ | ||
6591 | - ec_check_sci_sync(ec, acpi_ec_read_status(ec)); | ||
6592 | - if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { | ||
6593 | - msleep(1); | ||
6594 | - /* it is safe to enable GPE outside of transaction */ | ||
6595 | - acpi_enable_gpe(NULL, ec->gpe); | ||
6596 | - } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) { | ||
6597 | - pr_info(PREFIX "GPE storm detected, " | ||
6598 | - "transactions will use polling mode\n"); | ||
6599 | - set_bit(EC_FLAGS_GPE_STORM, &ec->flags); | ||
6600 | - } | ||
6601 | - pr_debug(PREFIX "transaction end\n"); | ||
6602 | end: | ||
6603 | if (ec->global_lock) | ||
6604 | acpi_release_global_lock(glk); | ||
6605 | @@ -445,7 +443,7 @@ int ec_transaction(u8 command, | ||
6606 | |||
6607 | EXPORT_SYMBOL(ec_transaction); | ||
6608 | |||
6609 | -static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 * data) | ||
6610 | +static int acpi_ec_query(struct acpi_ec *ec, u8 * data) | ||
6611 | { | ||
6612 | int result; | ||
6613 | u8 d; | ||
6614 | @@ -454,16 +452,20 @@ static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 * data) | ||
6615 | .wlen = 0, .rlen = 1}; | ||
6616 | if (!ec || !data) | ||
6617 | return -EINVAL; | ||
6618 | + | ||
6619 | /* | ||
6620 | * Query the EC to find out which _Qxx method we need to evaluate. | ||
6621 | * Note that successful completion of the query causes the ACPI_EC_SCI | ||
6622 | * bit to be cleared (and thus clearing the interrupt source). | ||
6623 | */ | ||
6624 | - result = acpi_ec_transaction_unlocked(ec, &t); | ||
6625 | + | ||
6626 | + result = acpi_ec_transaction(ec, &t); | ||
6627 | if (result) | ||
6628 | return result; | ||
6629 | + | ||
6630 | if (!d) | ||
6631 | return -ENODATA; | ||
6632 | + | ||
6633 | *data = d; | ||
6634 | return 0; | ||
6635 | } | ||
6636 | @@ -507,78 +509,43 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit) | ||
6637 | |||
6638 | EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler); | ||
6639 | |||
6640 | -static void acpi_ec_run(void *cxt) | ||
6641 | -{ | ||
6642 | - struct acpi_ec_query_handler *handler = cxt; | ||
6643 | - if (!handler) | ||
6644 | - return; | ||
6645 | - pr_debug(PREFIX "start query execution\n"); | ||
6646 | - if (handler->func) | ||
6647 | - handler->func(handler->data); | ||
6648 | - else if (handler->handle) | ||
6649 | - acpi_evaluate_object(handler->handle, NULL, NULL, NULL); | ||
6650 | - pr_debug(PREFIX "stop query execution\n"); | ||
6651 | - kfree(handler); | ||
6652 | -} | ||
6653 | - | ||
6654 | -static int acpi_ec_sync_query(struct acpi_ec *ec) | ||
6655 | +static void acpi_ec_gpe_query(void *ec_cxt) | ||
6656 | { | ||
6657 | + struct acpi_ec *ec = ec_cxt; | ||
6658 | u8 value = 0; | ||
6659 | - int status; | ||
6660 | - struct acpi_ec_query_handler *handler, *copy; | ||
6661 | - if ((status = acpi_ec_query_unlocked(ec, &value))) | ||
6662 | - return status; | ||
6663 | + struct acpi_ec_query_handler *handler, copy; | ||
6664 | + | ||
6665 | + if (!ec || acpi_ec_query(ec, &value)) | ||
6666 | + return; | ||
6667 | + mutex_lock(&ec->lock); | ||
6668 | list_for_each_entry(handler, &ec->list, node) { | ||
6669 | if (value == handler->query_bit) { | ||
6670 | /* have custom handler for this bit */ | ||
6671 | - copy = kmalloc(sizeof(*handler), GFP_KERNEL); | ||
6672 | - if (!copy) | ||
6673 | - return -ENOMEM; | ||
6674 | - memcpy(copy, handler, sizeof(*copy)); | ||
6675 | - pr_debug(PREFIX "push query execution (0x%2x) on queue\n", value); | ||
6676 | - return acpi_os_execute(OSL_GPE_HANDLER, | ||
6677 | - acpi_ec_run, copy); | ||
6678 | + memcpy(©, handler, sizeof(copy)); | ||
6679 | + mutex_unlock(&ec->lock); | ||
6680 | + if (copy.func) { | ||
6681 | + copy.func(copy.data); | ||
6682 | + } else if (copy.handle) { | ||
6683 | + acpi_evaluate_object(copy.handle, NULL, NULL, NULL); | ||
6684 | + } | ||
6685 | + return; | ||
6686 | } | ||
6687 | } | ||
6688 | - return 0; | ||
6689 | -} | ||
6690 | - | ||
6691 | -static void acpi_ec_gpe_query(void *ec_cxt) | ||
6692 | -{ | ||
6693 | - struct acpi_ec *ec = ec_cxt; | ||
6694 | - if (!ec) | ||
6695 | - return; | ||
6696 | - mutex_lock(&ec->lock); | ||
6697 | - acpi_ec_sync_query(ec); | ||
6698 | mutex_unlock(&ec->lock); | ||
6699 | } | ||
6700 | |||
6701 | -static void acpi_ec_gpe_query(void *ec_cxt); | ||
6702 | - | ||
6703 | -static int ec_check_sci(struct acpi_ec *ec, u8 state) | ||
6704 | -{ | ||
6705 | - if (state & ACPI_EC_FLAG_SCI) { | ||
6706 | - if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) { | ||
6707 | - pr_debug(PREFIX "push gpe query to the queue\n"); | ||
6708 | - return acpi_os_execute(OSL_NOTIFY_HANDLER, | ||
6709 | - acpi_ec_gpe_query, ec); | ||
6710 | - } | ||
6711 | - } | ||
6712 | - return 0; | ||
6713 | -} | ||
6714 | - | ||
6715 | static u32 acpi_ec_gpe_handler(void *data) | ||
6716 | { | ||
6717 | struct acpi_ec *ec = data; | ||
6718 | + u8 status; | ||
6719 | |||
6720 | pr_debug(PREFIX "~~~> interrupt\n"); | ||
6721 | + status = acpi_ec_read_status(ec); | ||
6722 | |||
6723 | - advance_transaction(ec, acpi_ec_read_status(ec)); | ||
6724 | - if (ec_transaction_done(ec) && | ||
6725 | - (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) { | ||
6726 | + advance_transaction(ec, status); | ||
6727 | + if (ec_transaction_done(ec) && (status & ACPI_EC_FLAG_IBF) == 0) | ||
6728 | wake_up(&ec->wait); | ||
6729 | - ec_check_sci(ec, acpi_ec_read_status(ec)); | ||
6730 | - } | ||
6731 | + ec_check_sci(ec, status); | ||
6732 | return ACPI_INTERRUPT_HANDLED; | ||
6733 | } | ||
6734 | |||
6735 | @@ -949,7 +916,6 @@ static int ec_validate_ecdt(const struct dmi_system_id *id) | ||
6736 | /* MSI EC needs special treatment, enable it */ | ||
6737 | static int ec_flag_msi(const struct dmi_system_id *id) | ||
6738 | { | ||
6739 | - printk(KERN_DEBUG PREFIX "Detected MSI hardware, enabling workarounds.\n"); | ||
6740 | EC_FLAGS_MSI = 1; | ||
6741 | EC_FLAGS_VALIDATE_ECDT = 1; | ||
6742 | return 0; | ||
6743 | @@ -962,13 +928,8 @@ static struct dmi_system_id __initdata ec_dmi_table[] = { | ||
6744 | DMI_MATCH(DMI_BOARD_NAME, "JFL92") }, NULL}, | ||
6745 | { | ||
6746 | ec_flag_msi, "MSI hardware", { | ||
6747 | - DMI_MATCH(DMI_BIOS_VENDOR, "Micro-Star")}, NULL}, | ||
6748 | - { | ||
6749 | - ec_flag_msi, "MSI hardware", { | ||
6750 | - DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star")}, NULL}, | ||
6751 | - { | ||
6752 | - ec_flag_msi, "MSI hardware", { | ||
6753 | - DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star")}, NULL}, | ||
6754 | + DMI_MATCH(DMI_BIOS_VENDOR, "Micro-Star"), | ||
6755 | + DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star") }, NULL}, | ||
6756 | { | ||
6757 | ec_validate_ecdt, "ASUS hardware", { | ||
6758 | DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL}, | ||
6759 | diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c | ||
6760 | index d9f78f6..bbd066e 100644 | ||
6761 | --- a/drivers/acpi/processor_idle.c | ||
6762 | +++ b/drivers/acpi/processor_idle.c | ||
6763 | @@ -110,14 +110,6 @@ static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = { | ||
6764 | DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), | ||
6765 | DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")}, | ||
6766 | (void *)2}, | ||
6767 | - { set_max_cstate, "Pavilion zv5000", { | ||
6768 | - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
6769 | - DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")}, | ||
6770 | - (void *)1}, | ||
6771 | - { set_max_cstate, "Asus L8400B", { | ||
6772 | - DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), | ||
6773 | - DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")}, | ||
6774 | - (void *)1}, | ||
6775 | {}, | ||
6776 | }; | ||
6777 | |||
6778 | @@ -307,17 +299,6 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) | ||
6779 | pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency; | ||
6780 | pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency; | ||
6781 | |||
6782 | - /* | ||
6783 | - * FADT specified C2 latency must be less than or equal to | ||
6784 | - * 100 microseconds. | ||
6785 | - */ | ||
6786 | - if (acpi_gbl_FADT.C2latency > ACPI_PROCESSOR_MAX_C2_LATENCY) { | ||
6787 | - ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
6788 | - "C2 latency too large [%d]\n", acpi_gbl_FADT.C2latency)); | ||
6789 | - /* invalidate C2 */ | ||
6790 | - pr->power.states[ACPI_STATE_C2].address = 0; | ||
6791 | - } | ||
6792 | - | ||
6793 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
6794 | "lvl2[0x%08x] lvl3[0x%08x]\n", | ||
6795 | pr->power.states[ACPI_STATE_C2].address, | ||
6796 | @@ -514,6 +495,16 @@ static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx) | ||
6797 | return; | ||
6798 | |||
6799 | /* | ||
6800 | + * C2 latency must be less than or equal to 100 | ||
6801 | + * microseconds. | ||
6802 | + */ | ||
6803 | + else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) { | ||
6804 | + ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
6805 | + "latency too large [%d]\n", cx->latency)); | ||
6806 | + return; | ||
6807 | + } | ||
6808 | + | ||
6809 | + /* | ||
6810 | * Otherwise we've met all of our C2 requirements. | ||
6811 | * Normalize the C2 latency to expidite policy | ||
6812 | */ | ||
6813 | diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c | ||
6814 | index 0b09703..14a7481 100644 | ||
6815 | --- a/drivers/acpi/scan.c | ||
6816 | +++ b/drivers/acpi/scan.c | ||
6817 | @@ -1357,9 +1357,6 @@ int acpi_bus_start(struct acpi_device *device) | ||
6818 | { | ||
6819 | struct acpi_bus_ops ops; | ||
6820 | |||
6821 | - if (!device) | ||
6822 | - return -EINVAL; | ||
6823 | - | ||
6824 | memset(&ops, 0, sizeof(ops)); | ||
6825 | ops.acpi_op_start = 1; | ||
6826 | |||
6827 | diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c | ||
6828 | index 9b37502..a3241a1 100644 | ||
6829 | --- a/drivers/ata/ahci.c | ||
6830 | +++ b/drivers/ata/ahci.c | ||
6831 | @@ -113,7 +113,6 @@ enum { | ||
6832 | board_ahci_mcp65 = 6, | ||
6833 | board_ahci_nopmp = 7, | ||
6834 | board_ahci_yesncq = 8, | ||
6835 | - board_ahci_nosntf = 9, | ||
6836 | |||
6837 | /* global controller registers */ | ||
6838 | HOST_CAP = 0x00, /* host capabilities */ | ||
6839 | @@ -236,7 +235,6 @@ enum { | ||
6840 | AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */ | ||
6841 | AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as | ||
6842 | link offline */ | ||
6843 | - AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */ | ||
6844 | |||
6845 | /* ap->flags bits */ | ||
6846 | |||
6847 | @@ -510,7 +508,7 @@ static const struct ata_port_info ahci_port_info[] = { | ||
6848 | .udma_mask = ATA_UDMA6, | ||
6849 | .port_ops = &ahci_ops, | ||
6850 | }, | ||
6851 | - [board_ahci_yesncq] = | ||
6852 | + /* board_ahci_yesncq */ | ||
6853 | { | ||
6854 | AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ), | ||
6855 | .flags = AHCI_FLAG_COMMON, | ||
6856 | @@ -518,14 +516,6 @@ static const struct ata_port_info ahci_port_info[] = { | ||
6857 | .udma_mask = ATA_UDMA6, | ||
6858 | .port_ops = &ahci_ops, | ||
6859 | }, | ||
6860 | - [board_ahci_nosntf] = | ||
6861 | - { | ||
6862 | - AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF), | ||
6863 | - .flags = AHCI_FLAG_COMMON, | ||
6864 | - .pio_mask = ATA_PIO4, | ||
6865 | - .udma_mask = ATA_UDMA6, | ||
6866 | - .port_ops = &ahci_ops, | ||
6867 | - }, | ||
6868 | }; | ||
6869 | |||
6870 | static const struct pci_device_id ahci_pci_tbl[] = { | ||
6871 | @@ -541,7 +531,7 @@ static const struct pci_device_id ahci_pci_tbl[] = { | ||
6872 | { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */ | ||
6873 | { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */ | ||
6874 | { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */ | ||
6875 | - { PCI_VDEVICE(INTEL, 0x2822), board_ahci_nosntf }, /* ICH8 */ | ||
6876 | + { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */ | ||
6877 | { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */ | ||
6878 | { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */ | ||
6879 | { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */ | ||
6880 | @@ -859,12 +849,6 @@ static void ahci_save_initial_config(struct pci_dev *pdev, | ||
6881 | cap &= ~HOST_CAP_PMP; | ||
6882 | } | ||
6883 | |||
6884 | - if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) { | ||
6885 | - dev_printk(KERN_INFO, &pdev->dev, | ||
6886 | - "controller can't do SNTF, turning off CAP_SNTF\n"); | ||
6887 | - cap &= ~HOST_CAP_SNTF; | ||
6888 | - } | ||
6889 | - | ||
6890 | if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 && | ||
6891 | port_map != 1) { | ||
6892 | dev_printk(KERN_INFO, &pdev->dev, | ||
6893 | @@ -2868,21 +2852,6 @@ static bool ahci_broken_suspend(struct pci_dev *pdev) | ||
6894 | }, | ||
6895 | .driver_data = "F.23", /* cutoff BIOS version */ | ||
6896 | }, | ||
6897 | - /* | ||
6898 | - * Acer eMachines G725 has the same problem. BIOS | ||
6899 | - * V1.03 is known to be broken. V3.04 is known to | ||
6900 | - * work. Inbetween, there are V1.06, V2.06 and V3.03 | ||
6901 | - * that we don't have much idea about. For now, | ||
6902 | - * blacklist anything older than V3.04. | ||
6903 | - */ | ||
6904 | - { | ||
6905 | - .ident = "G725", | ||
6906 | - .matches = { | ||
6907 | - DMI_MATCH(DMI_SYS_VENDOR, "eMachines"), | ||
6908 | - DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"), | ||
6909 | - }, | ||
6910 | - .driver_data = "V3.04", /* cutoff BIOS version */ | ||
6911 | - }, | ||
6912 | { } /* terminate list */ | ||
6913 | }; | ||
6914 | const struct dmi_system_id *dmi = dmi_first_match(sysids); | ||
6915 | diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c | ||
6916 | index 0c6155f..9ac4e37 100644 | ||
6917 | --- a/drivers/ata/ata_piix.c | ||
6918 | +++ b/drivers/ata/ata_piix.c | ||
6919 | @@ -869,10 +869,10 @@ static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, in | ||
6920 | (timings[pio][1] << 8); | ||
6921 | } | ||
6922 | |||
6923 | - if (ap->udma_mask) | ||
6924 | + if (ap->udma_mask) { | ||
6925 | udma_enable &= ~(1 << devid); | ||
6926 | - | ||
6927 | - pci_write_config_word(dev, master_port, master_data); | ||
6928 | + pci_write_config_word(dev, master_port, master_data); | ||
6929 | + } | ||
6930 | } | ||
6931 | /* Don't scribble on 0x48 if the controller does not support UDMA */ | ||
6932 | if (ap->udma_mask) | ||
6933 | diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c | ||
6934 | index 91fed3c..dc72690 100644 | ||
6935 | --- a/drivers/ata/libata-core.c | ||
6936 | +++ b/drivers/ata/libata-core.c | ||
6937 | @@ -3790,45 +3790,21 @@ int sata_link_debounce(struct ata_link *link, const unsigned long *params, | ||
6938 | int sata_link_resume(struct ata_link *link, const unsigned long *params, | ||
6939 | unsigned long deadline) | ||
6940 | { | ||
6941 | - int tries = ATA_LINK_RESUME_TRIES; | ||
6942 | u32 scontrol, serror; | ||
6943 | int rc; | ||
6944 | |||
6945 | if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) | ||
6946 | return rc; | ||
6947 | |||
6948 | - /* | ||
6949 | - * Writes to SControl sometimes get ignored under certain | ||
6950 | - * controllers (ata_piix SIDPR). Make sure DET actually is | ||
6951 | - * cleared. | ||
6952 | - */ | ||
6953 | - do { | ||
6954 | - scontrol = (scontrol & 0x0f0) | 0x300; | ||
6955 | - if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) | ||
6956 | - return rc; | ||
6957 | - /* | ||
6958 | - * Some PHYs react badly if SStatus is pounded | ||
6959 | - * immediately after resuming. Delay 200ms before | ||
6960 | - * debouncing. | ||
6961 | - */ | ||
6962 | - msleep(200); | ||
6963 | + scontrol = (scontrol & 0x0f0) | 0x300; | ||
6964 | |||
6965 | - /* is SControl restored correctly? */ | ||
6966 | - if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) | ||
6967 | - return rc; | ||
6968 | - } while ((scontrol & 0xf0f) != 0x300 && --tries); | ||
6969 | - | ||
6970 | - if ((scontrol & 0xf0f) != 0x300) { | ||
6971 | - ata_link_printk(link, KERN_ERR, | ||
6972 | - "failed to resume link (SControl %X)\n", | ||
6973 | - scontrol); | ||
6974 | - return 0; | ||
6975 | - } | ||
6976 | + if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) | ||
6977 | + return rc; | ||
6978 | |||
6979 | - if (tries < ATA_LINK_RESUME_TRIES) | ||
6980 | - ata_link_printk(link, KERN_WARNING, | ||
6981 | - "link resume succeeded after %d retries\n", | ||
6982 | - ATA_LINK_RESUME_TRIES - tries); | ||
6983 | + /* Some PHYs react badly if SStatus is pounded immediately | ||
6984 | + * after resuming. Delay 200ms before debouncing. | ||
6985 | + */ | ||
6986 | + msleep(200); | ||
6987 | |||
6988 | if ((rc = sata_link_debounce(link, params, deadline))) | ||
6989 | return rc; | ||
6990 | diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c | ||
6991 | index 7d8d3c3..bba2ae5 100644 | ||
6992 | --- a/drivers/ata/libata-eh.c | ||
6993 | +++ b/drivers/ata/libata-eh.c | ||
6994 | @@ -2019,9 +2019,8 @@ static void ata_eh_link_autopsy(struct ata_link *link) | ||
6995 | qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); | ||
6996 | |||
6997 | /* determine whether the command is worth retrying */ | ||
6998 | - if (qc->flags & ATA_QCFLAG_IO || | ||
6999 | - (!(qc->err_mask & AC_ERR_INVALID) && | ||
7000 | - qc->err_mask != AC_ERR_DEV)) | ||
7001 | + if (!(qc->err_mask & AC_ERR_INVALID) && | ||
7002 | + ((qc->flags & ATA_QCFLAG_IO) || qc->err_mask != AC_ERR_DEV)) | ||
7003 | qc->flags |= ATA_QCFLAG_RETRY; | ||
7004 | |||
7005 | /* accumulate error info */ | ||
7006 | diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c | ||
7007 | index 2ae15c3..bbbb1fa 100644 | ||
7008 | --- a/drivers/ata/libata-sff.c | ||
7009 | +++ b/drivers/ata/libata-sff.c | ||
7010 | @@ -893,9 +893,6 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) | ||
7011 | do_write); | ||
7012 | } | ||
7013 | |||
7014 | - if (!do_write) | ||
7015 | - flush_dcache_page(page); | ||
7016 | - | ||
7017 | qc->curbytes += qc->sect_size; | ||
7018 | qc->cursg_ofs += qc->sect_size; | ||
7019 | |||
7020 | diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c | ||
7021 | index f0bad9b..f98dffe 100644 | ||
7022 | --- a/drivers/ata/pata_cmd64x.c | ||
7023 | +++ b/drivers/ata/pata_cmd64x.c | ||
7024 | @@ -219,7 +219,7 @@ static void cmd64x_set_dmamode(struct ata_port *ap, struct ata_device *adev) | ||
7025 | regU |= udma_data[adev->dma_mode - XFER_UDMA_0] << shift; | ||
7026 | /* Merge the control bits */ | ||
7027 | regU |= 1 << adev->devno; /* UDMA on */ | ||
7028 | - if (adev->dma_mode > XFER_UDMA_2) /* 15nS timing */ | ||
7029 | + if (adev->dma_mode > 2) /* 15nS timing */ | ||
7030 | regU |= 4 << adev->devno; | ||
7031 | } else { | ||
7032 | regU &= ~ (1 << adev->devno); /* UDMA off */ | ||
7033 | diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c | ||
7034 | index ec07c53..d0a7df2 100644 | ||
7035 | --- a/drivers/ata/pata_hpt37x.c | ||
7036 | +++ b/drivers/ata/pata_hpt37x.c | ||
7037 | @@ -24,7 +24,7 @@ | ||
7038 | #include <linux/libata.h> | ||
7039 | |||
7040 | #define DRV_NAME "pata_hpt37x" | ||
7041 | -#define DRV_VERSION "0.6.14" | ||
7042 | +#define DRV_VERSION "0.6.12" | ||
7043 | |||
7044 | struct hpt_clock { | ||
7045 | u8 xfer_speed; | ||
7046 | @@ -404,8 +404,9 @@ static void hpt370_set_piomode(struct ata_port *ap, struct ata_device *adev) | ||
7047 | |||
7048 | pci_read_config_dword(pdev, addr1, ®); | ||
7049 | mode = hpt37x_find_mode(ap, adev->pio_mode); | ||
7050 | - mode &= 0xCFC3FFFF; /* Leave DMA bits alone */ | ||
7051 | - reg &= ~0xCFC3FFFF; /* Strip timing bits */ | ||
7052 | + mode &= ~0x8000000; /* No FIFO in PIO */ | ||
7053 | + mode &= ~0x30070000; /* Leave config bits alone */ | ||
7054 | + reg &= 0x30070000; /* Strip timing bits */ | ||
7055 | pci_write_config_dword(pdev, addr1, reg | mode); | ||
7056 | } | ||
7057 | |||
7058 | @@ -422,7 +423,8 @@ static void hpt370_set_dmamode(struct ata_port *ap, struct ata_device *adev) | ||
7059 | { | ||
7060 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | ||
7061 | u32 addr1, addr2; | ||
7062 | - u32 reg, mode, mask; | ||
7063 | + u32 reg; | ||
7064 | + u32 mode; | ||
7065 | u8 fast; | ||
7066 | |||
7067 | addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no); | ||
7068 | @@ -434,12 +436,11 @@ static void hpt370_set_dmamode(struct ata_port *ap, struct ata_device *adev) | ||
7069 | fast |= 0x01; | ||
7070 | pci_write_config_byte(pdev, addr2, fast); | ||
7071 | |||
7072 | - mask = adev->dma_mode < XFER_UDMA_0 ? 0x31C001FF : 0x303C0000; | ||
7073 | - | ||
7074 | pci_read_config_dword(pdev, addr1, ®); | ||
7075 | mode = hpt37x_find_mode(ap, adev->dma_mode); | ||
7076 | - mode &= mask; | ||
7077 | - reg &= ~mask; | ||
7078 | + mode |= 0x8000000; /* FIFO in MWDMA or UDMA */ | ||
7079 | + mode &= ~0xC0000000; /* Leave config bits alone */ | ||
7080 | + reg &= 0xC0000000; /* Strip timing bits */ | ||
7081 | pci_write_config_dword(pdev, addr1, reg | mode); | ||
7082 | } | ||
7083 | |||
7084 | @@ -507,8 +508,9 @@ static void hpt372_set_piomode(struct ata_port *ap, struct ata_device *adev) | ||
7085 | mode = hpt37x_find_mode(ap, adev->pio_mode); | ||
7086 | |||
7087 | printk("Find mode for %d reports %X\n", adev->pio_mode, mode); | ||
7088 | - mode &= 0xCFC3FFFF; /* Leave DMA bits alone */ | ||
7089 | - reg &= ~0xCFC3FFFF; /* Strip timing bits */ | ||
7090 | + mode &= ~0x80000000; /* No FIFO in PIO */ | ||
7091 | + mode &= ~0x30070000; /* Leave config bits alone */ | ||
7092 | + reg &= 0x30070000; /* Strip timing bits */ | ||
7093 | pci_write_config_dword(pdev, addr1, reg | mode); | ||
7094 | } | ||
7095 | |||
7096 | @@ -525,7 +527,8 @@ static void hpt372_set_dmamode(struct ata_port *ap, struct ata_device *adev) | ||
7097 | { | ||
7098 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | ||
7099 | u32 addr1, addr2; | ||
7100 | - u32 reg, mode, mask; | ||
7101 | + u32 reg; | ||
7102 | + u32 mode; | ||
7103 | u8 fast; | ||
7104 | |||
7105 | addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no); | ||
7106 | @@ -536,13 +539,12 @@ static void hpt372_set_dmamode(struct ata_port *ap, struct ata_device *adev) | ||
7107 | fast &= ~0x07; | ||
7108 | pci_write_config_byte(pdev, addr2, fast); | ||
7109 | |||
7110 | - mask = adev->dma_mode < XFER_UDMA_0 ? 0x31C001FF : 0x303C0000; | ||
7111 | - | ||
7112 | pci_read_config_dword(pdev, addr1, ®); | ||
7113 | mode = hpt37x_find_mode(ap, adev->dma_mode); | ||
7114 | printk("Find mode for DMA %d reports %X\n", adev->dma_mode, mode); | ||
7115 | - mode &= mask; | ||
7116 | - reg &= ~mask; | ||
7117 | + mode &= ~0xC0000000; /* Leave config bits alone */ | ||
7118 | + mode |= 0x80000000; /* FIFO in MWDMA or UDMA */ | ||
7119 | + reg &= 0xC0000000; /* Strip timing bits */ | ||
7120 | pci_write_config_dword(pdev, addr1, reg | mode); | ||
7121 | } | ||
7122 | |||
7123 | diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c | ||
7124 | index d16e87e..3d59fe0 100644 | ||
7125 | --- a/drivers/ata/pata_hpt3x2n.c | ||
7126 | +++ b/drivers/ata/pata_hpt3x2n.c | ||
7127 | @@ -8,7 +8,7 @@ | ||
7128 | * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org> | ||
7129 | * Portions Copyright (C) 2001 Sun Microsystems, Inc. | ||
7130 | * Portions Copyright (C) 2003 Red Hat Inc | ||
7131 | - * Portions Copyright (C) 2005-2009 MontaVista Software, Inc. | ||
7132 | + * Portions Copyright (C) 2005-2007 MontaVista Software, Inc. | ||
7133 | * | ||
7134 | * | ||
7135 | * TODO | ||
7136 | @@ -25,7 +25,7 @@ | ||
7137 | #include <linux/libata.h> | ||
7138 | |||
7139 | #define DRV_NAME "pata_hpt3x2n" | ||
7140 | -#define DRV_VERSION "0.3.8" | ||
7141 | +#define DRV_VERSION "0.3.4" | ||
7142 | |||
7143 | enum { | ||
7144 | HPT_PCI_FAST = (1 << 31), | ||
7145 | @@ -185,8 +185,9 @@ static void hpt3x2n_set_piomode(struct ata_port *ap, struct ata_device *adev) | ||
7146 | |||
7147 | pci_read_config_dword(pdev, addr1, ®); | ||
7148 | mode = hpt3x2n_find_mode(ap, adev->pio_mode); | ||
7149 | - mode &= 0xCFC3FFFF; /* Leave DMA bits alone */ | ||
7150 | - reg &= ~0xCFC3FFFF; /* Strip timing bits */ | ||
7151 | + mode &= ~0x8000000; /* No FIFO in PIO */ | ||
7152 | + mode &= ~0x30070000; /* Leave config bits alone */ | ||
7153 | + reg &= 0x30070000; /* Strip timing bits */ | ||
7154 | pci_write_config_dword(pdev, addr1, reg | mode); | ||
7155 | } | ||
7156 | |||
7157 | @@ -203,7 +204,8 @@ static void hpt3x2n_set_dmamode(struct ata_port *ap, struct ata_device *adev) | ||
7158 | { | ||
7159 | struct pci_dev *pdev = to_pci_dev(ap->host->dev); | ||
7160 | u32 addr1, addr2; | ||
7161 | - u32 reg, mode, mask; | ||
7162 | + u32 reg; | ||
7163 | + u32 mode; | ||
7164 | u8 fast; | ||
7165 | |||
7166 | addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no); | ||
7167 | @@ -214,12 +216,11 @@ static void hpt3x2n_set_dmamode(struct ata_port *ap, struct ata_device *adev) | ||
7168 | fast &= ~0x07; | ||
7169 | pci_write_config_byte(pdev, addr2, fast); | ||
7170 | |||
7171 | - mask = adev->dma_mode < XFER_UDMA_0 ? 0x31C001FF : 0x303C0000; | ||
7172 | - | ||
7173 | pci_read_config_dword(pdev, addr1, ®); | ||
7174 | mode = hpt3x2n_find_mode(ap, adev->dma_mode); | ||
7175 | - mode &= mask; | ||
7176 | - reg &= ~mask; | ||
7177 | + mode |= 0x8000000; /* FIFO in MWDMA or UDMA */ | ||
7178 | + mode &= ~0xC0000000; /* Leave config bits alone */ | ||
7179 | + reg &= 0xC0000000; /* Strip timing bits */ | ||
7180 | pci_write_config_dword(pdev, addr1, reg | mode); | ||
7181 | } | ||
7182 | |||
7183 | @@ -262,7 +263,7 @@ static void hpt3x2n_bmdma_stop(struct ata_queued_cmd *qc) | ||
7184 | |||
7185 | static void hpt3x2n_set_clock(struct ata_port *ap, int source) | ||
7186 | { | ||
7187 | - void __iomem *bmdma = ap->ioaddr.bmdma_addr - ap->port_no * 8; | ||
7188 | + void __iomem *bmdma = ap->ioaddr.bmdma_addr; | ||
7189 | |||
7190 | /* Tristate the bus */ | ||
7191 | iowrite8(0x80, bmdma+0x73); | ||
7192 | @@ -272,9 +273,9 @@ static void hpt3x2n_set_clock(struct ata_port *ap, int source) | ||
7193 | iowrite8(source, bmdma+0x7B); | ||
7194 | iowrite8(0xC0, bmdma+0x79); | ||
7195 | |||
7196 | - /* Reset state machines, avoid enabling the disabled channels */ | ||
7197 | - iowrite8(ioread8(bmdma+0x70) | 0x32, bmdma+0x70); | ||
7198 | - iowrite8(ioread8(bmdma+0x74) | 0x32, bmdma+0x74); | ||
7199 | + /* Reset state machines */ | ||
7200 | + iowrite8(0x37, bmdma+0x70); | ||
7201 | + iowrite8(0x37, bmdma+0x74); | ||
7202 | |||
7203 | /* Complete reset */ | ||
7204 | iowrite8(0x00, bmdma+0x79); | ||
7205 | @@ -284,10 +285,21 @@ static void hpt3x2n_set_clock(struct ata_port *ap, int source) | ||
7206 | iowrite8(0x00, bmdma+0x77); | ||
7207 | } | ||
7208 | |||
7209 | +/* Check if our partner interface is busy */ | ||
7210 | + | ||
7211 | +static int hpt3x2n_pair_idle(struct ata_port *ap) | ||
7212 | +{ | ||
7213 | + struct ata_host *host = ap->host; | ||
7214 | + struct ata_port *pair = host->ports[ap->port_no ^ 1]; | ||
7215 | + | ||
7216 | + if (pair->hsm_task_state == HSM_ST_IDLE) | ||
7217 | + return 1; | ||
7218 | + return 0; | ||
7219 | +} | ||
7220 | + | ||
7221 | static int hpt3x2n_use_dpll(struct ata_port *ap, int writing) | ||
7222 | { | ||
7223 | long flags = (long)ap->host->private_data; | ||
7224 | - | ||
7225 | /* See if we should use the DPLL */ | ||
7226 | if (writing) | ||
7227 | return USE_DPLL; /* Needed for write */ | ||
7228 | @@ -296,35 +308,20 @@ static int hpt3x2n_use_dpll(struct ata_port *ap, int writing) | ||
7229 | return 0; | ||
7230 | } | ||
7231 | |||
7232 | -static int hpt3x2n_qc_defer(struct ata_queued_cmd *qc) | ||
7233 | -{ | ||
7234 | - struct ata_port *ap = qc->ap; | ||
7235 | - struct ata_port *alt = ap->host->ports[ap->port_no ^ 1]; | ||
7236 | - int rc, flags = (long)ap->host->private_data; | ||
7237 | - int dpll = hpt3x2n_use_dpll(ap, qc->tf.flags & ATA_TFLAG_WRITE); | ||
7238 | - | ||
7239 | - /* First apply the usual rules */ | ||
7240 | - rc = ata_std_qc_defer(qc); | ||
7241 | - if (rc != 0) | ||
7242 | - return rc; | ||
7243 | - | ||
7244 | - if ((flags & USE_DPLL) != dpll && alt->qc_active) | ||
7245 | - return ATA_DEFER_PORT; | ||
7246 | - return 0; | ||
7247 | -} | ||
7248 | - | ||
7249 | static unsigned int hpt3x2n_qc_issue(struct ata_queued_cmd *qc) | ||
7250 | { | ||
7251 | + struct ata_taskfile *tf = &qc->tf; | ||
7252 | struct ata_port *ap = qc->ap; | ||
7253 | int flags = (long)ap->host->private_data; | ||
7254 | - int dpll = hpt3x2n_use_dpll(ap, qc->tf.flags & ATA_TFLAG_WRITE); | ||
7255 | - | ||
7256 | - if ((flags & USE_DPLL) != dpll) { | ||
7257 | - flags &= ~USE_DPLL; | ||
7258 | - flags |= dpll; | ||
7259 | - ap->host->private_data = (void *)(long)flags; | ||
7260 | |||
7261 | - hpt3x2n_set_clock(ap, dpll ? 0x21 : 0x23); | ||
7262 | + if (hpt3x2n_pair_idle(ap)) { | ||
7263 | + int dpll = hpt3x2n_use_dpll(ap, (tf->flags & ATA_TFLAG_WRITE)); | ||
7264 | + if ((flags & USE_DPLL) != dpll) { | ||
7265 | + if (dpll == 1) | ||
7266 | + hpt3x2n_set_clock(ap, 0x21); | ||
7267 | + else | ||
7268 | + hpt3x2n_set_clock(ap, 0x23); | ||
7269 | + } | ||
7270 | } | ||
7271 | return ata_sff_qc_issue(qc); | ||
7272 | } | ||
7273 | @@ -341,8 +338,6 @@ static struct ata_port_operations hpt3x2n_port_ops = { | ||
7274 | .inherits = &ata_bmdma_port_ops, | ||
7275 | |||
7276 | .bmdma_stop = hpt3x2n_bmdma_stop, | ||
7277 | - | ||
7278 | - .qc_defer = hpt3x2n_qc_defer, | ||
7279 | .qc_issue = hpt3x2n_qc_issue, | ||
7280 | |||
7281 | .cable_detect = hpt3x2n_cable_detect, | ||
7282 | @@ -460,7 +455,7 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id) | ||
7283 | unsigned int f_low, f_high; | ||
7284 | int adjust; | ||
7285 | unsigned long iobase = pci_resource_start(dev, 4); | ||
7286 | - void *hpriv = (void *)USE_DPLL; | ||
7287 | + void *hpriv = NULL; | ||
7288 | int rc; | ||
7289 | |||
7290 | rc = pcim_enable_device(dev); | ||
7291 | @@ -548,7 +543,7 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id) | ||
7292 | /* Set our private data up. We only need a few flags so we use | ||
7293 | it directly */ | ||
7294 | if (pci_mhz > 60) { | ||
7295 | - hpriv = (void *)(PCI66 | USE_DPLL); | ||
7296 | + hpriv = (void *)PCI66; | ||
7297 | /* | ||
7298 | * On HPT371N, if ATA clock is 66 MHz we must set bit 2 in | ||
7299 | * the MISC. register to stretch the UltraDMA Tss timing. | ||
7300 | diff --git a/drivers/base/class.c b/drivers/base/class.c | ||
7301 | index 6e2c3b0..161746d 100644 | ||
7302 | --- a/drivers/base/class.c | ||
7303 | +++ b/drivers/base/class.c | ||
7304 | @@ -59,8 +59,6 @@ static void class_release(struct kobject *kobj) | ||
7305 | else | ||
7306 | pr_debug("class '%s' does not have a release() function, " | ||
7307 | "be careful\n", class->name); | ||
7308 | - | ||
7309 | - kfree(cp); | ||
7310 | } | ||
7311 | |||
7312 | static struct sysfs_ops class_sysfs_ops = { | ||
7313 | diff --git a/drivers/base/core.c b/drivers/base/core.c | ||
7314 | index 1093179..6bee6af 100644 | ||
7315 | --- a/drivers/base/core.c | ||
7316 | +++ b/drivers/base/core.c | ||
7317 | @@ -56,14 +56,7 @@ static inline int device_is_not_partition(struct device *dev) | ||
7318 | */ | ||
7319 | const char *dev_driver_string(const struct device *dev) | ||
7320 | { | ||
7321 | - struct device_driver *drv; | ||
7322 | - | ||
7323 | - /* dev->driver can change to NULL underneath us because of unbinding, | ||
7324 | - * so be careful about accessing it. dev->bus and dev->class should | ||
7325 | - * never change once they are set, so they don't need special care. | ||
7326 | - */ | ||
7327 | - drv = ACCESS_ONCE(dev->driver); | ||
7328 | - return drv ? drv->name : | ||
7329 | + return dev->driver ? dev->driver->name : | ||
7330 | (dev->bus ? dev->bus->name : | ||
7331 | (dev->class ? dev->class->name : "")); | ||
7332 | } | ||
7333 | diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c | ||
7334 | index 33faaa2..a1cb5af 100644 | ||
7335 | --- a/drivers/base/devtmpfs.c | ||
7336 | +++ b/drivers/base/devtmpfs.c | ||
7337 | @@ -353,7 +353,6 @@ int __init devtmpfs_init(void) | ||
7338 | { | ||
7339 | int err; | ||
7340 | struct vfsmount *mnt; | ||
7341 | - char options[] = "mode=0755"; | ||
7342 | |||
7343 | err = register_filesystem(&dev_fs_type); | ||
7344 | if (err) { | ||
7345 | @@ -362,7 +361,7 @@ int __init devtmpfs_init(void) | ||
7346 | return err; | ||
7347 | } | ||
7348 | |||
7349 | - mnt = kern_mount_data(&dev_fs_type, options); | ||
7350 | + mnt = kern_mount(&dev_fs_type); | ||
7351 | if (IS_ERR(mnt)) { | ||
7352 | err = PTR_ERR(mnt); | ||
7353 | printk(KERN_ERR "devtmpfs: unable to create devtmpfs %i\n", err); | ||
7354 | diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c | ||
7355 | index 0a4b75f..846d89e 100644 | ||
7356 | --- a/drivers/base/power/runtime.c | ||
7357 | +++ b/drivers/base/power/runtime.c | ||
7358 | @@ -777,7 +777,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status) | ||
7359 | } | ||
7360 | |||
7361 | if (parent) { | ||
7362 | - spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING); | ||
7363 | + spin_lock(&parent->power.lock); | ||
7364 | |||
7365 | /* | ||
7366 | * It is invalid to put an active child under a parent that is | ||
7367 | diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c | ||
7368 | index ca9c548..92b1263 100644 | ||
7369 | --- a/drivers/block/cciss.c | ||
7370 | +++ b/drivers/block/cciss.c | ||
7371 | @@ -339,9 +339,6 @@ static int cciss_seq_show(struct seq_file *seq, void *v) | ||
7372 | if (*pos > h->highest_lun) | ||
7373 | return 0; | ||
7374 | |||
7375 | - if (drv == NULL) /* it's possible for h->drv[] to have holes. */ | ||
7376 | - return 0; | ||
7377 | - | ||
7378 | if (drv->heads == 0) | ||
7379 | return 0; | ||
7380 | |||
7381 | diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c | ||
7382 | index 68b5957..2ddf03a 100644 | ||
7383 | --- a/drivers/block/pktcdvd.c | ||
7384 | +++ b/drivers/block/pktcdvd.c | ||
7385 | @@ -322,7 +322,7 @@ static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd) | ||
7386 | pkt_kobj_remove(pd->kobj_stat); | ||
7387 | pkt_kobj_remove(pd->kobj_wqueue); | ||
7388 | if (class_pktcdvd) | ||
7389 | - device_unregister(pd->dev); | ||
7390 | + device_destroy(class_pktcdvd, pd->pkt_dev); | ||
7391 | } | ||
7392 | |||
7393 | |||
7394 | diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c | ||
7395 | index 1be7631..44bc8bb 100644 | ||
7396 | --- a/drivers/bluetooth/btusb.c | ||
7397 | +++ b/drivers/bluetooth/btusb.c | ||
7398 | @@ -307,7 +307,6 @@ static void btusb_bulk_complete(struct urb *urb) | ||
7399 | return; | ||
7400 | |||
7401 | usb_anchor_urb(urb, &data->bulk_anchor); | ||
7402 | - usb_mark_last_busy(data->udev); | ||
7403 | |||
7404 | err = usb_submit_urb(urb, GFP_ATOMIC); | ||
7405 | if (err < 0) { | ||
7406 | diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c | ||
7407 | index 4dcfef0..3cb56a0 100644 | ||
7408 | --- a/drivers/char/agp/intel-agp.c | ||
7409 | +++ b/drivers/char/agp/intel-agp.c | ||
7410 | @@ -178,7 +178,6 @@ static struct _intel_private { | ||
7411 | * popup and for the GTT. | ||
7412 | */ | ||
7413 | int gtt_entries; /* i830+ */ | ||
7414 | - int gtt_total_size; | ||
7415 | union { | ||
7416 | void __iomem *i9xx_flush_page; | ||
7417 | void *i8xx_flush_page; | ||
7418 | @@ -1154,7 +1153,7 @@ static int intel_i915_configure(void) | ||
7419 | readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ | ||
7420 | |||
7421 | if (agp_bridge->driver->needs_scratch_page) { | ||
7422 | - for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) { | ||
7423 | + for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) { | ||
7424 | writel(agp_bridge->scratch_page, intel_private.gtt+i); | ||
7425 | } | ||
7426 | readl(intel_private.gtt+i-1); /* PCI Posting. */ | ||
7427 | @@ -1309,8 +1308,6 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge) | ||
7428 | if (!intel_private.gtt) | ||
7429 | return -ENOMEM; | ||
7430 | |||
7431 | - intel_private.gtt_total_size = gtt_map_size / 4; | ||
7432 | - | ||
7433 | temp &= 0xfff80000; | ||
7434 | |||
7435 | intel_private.registers = ioremap(temp, 128 * 4096); | ||
7436 | @@ -1398,8 +1395,6 @@ static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge) | ||
7437 | if (!intel_private.gtt) | ||
7438 | return -ENOMEM; | ||
7439 | |||
7440 | - intel_private.gtt_total_size = gtt_size / 4; | ||
7441 | - | ||
7442 | intel_private.registers = ioremap(temp, 128 * 4096); | ||
7443 | if (!intel_private.registers) { | ||
7444 | iounmap(intel_private.gtt); | ||
7445 | diff --git a/drivers/char/mem.c b/drivers/char/mem.c | ||
7446 | index aef3fb4..a074fce 100644 | ||
7447 | --- a/drivers/char/mem.c | ||
7448 | +++ b/drivers/char/mem.c | ||
7449 | @@ -35,19 +35,6 @@ | ||
7450 | # include <linux/efi.h> | ||
7451 | #endif | ||
7452 | |||
7453 | -static inline unsigned long size_inside_page(unsigned long start, | ||
7454 | - unsigned long size) | ||
7455 | -{ | ||
7456 | - unsigned long sz; | ||
7457 | - | ||
7458 | - if (-start & (PAGE_SIZE - 1)) | ||
7459 | - sz = -start & (PAGE_SIZE - 1); | ||
7460 | - else | ||
7461 | - sz = PAGE_SIZE; | ||
7462 | - | ||
7463 | - return min_t(unsigned long, sz, size); | ||
7464 | -} | ||
7465 | - | ||
7466 | /* | ||
7467 | * Architectures vary in how they handle caching for addresses | ||
7468 | * outside of main memory. | ||
7469 | @@ -421,7 +408,6 @@ static ssize_t read_kmem(struct file *file, char __user *buf, | ||
7470 | unsigned long p = *ppos; | ||
7471 | ssize_t low_count, read, sz; | ||
7472 | char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */ | ||
7473 | - int err = 0; | ||
7474 | |||
7475 | read = 0; | ||
7476 | if (p < (unsigned long) high_memory) { | ||
7477 | @@ -444,7 +430,15 @@ static ssize_t read_kmem(struct file *file, char __user *buf, | ||
7478 | } | ||
7479 | #endif | ||
7480 | while (low_count > 0) { | ||
7481 | - sz = size_inside_page(p, low_count); | ||
7482 | + /* | ||
7483 | + * Handle first page in case it's not aligned | ||
7484 | + */ | ||
7485 | + if (-p & (PAGE_SIZE - 1)) | ||
7486 | + sz = -p & (PAGE_SIZE - 1); | ||
7487 | + else | ||
7488 | + sz = PAGE_SIZE; | ||
7489 | + | ||
7490 | + sz = min_t(unsigned long, sz, low_count); | ||
7491 | |||
7492 | /* | ||
7493 | * On ia64 if a page has been mapped somewhere as | ||
7494 | @@ -468,18 +462,16 @@ static ssize_t read_kmem(struct file *file, char __user *buf, | ||
7495 | if (!kbuf) | ||
7496 | return -ENOMEM; | ||
7497 | while (count > 0) { | ||
7498 | - int len = size_inside_page(p, count); | ||
7499 | + int len = count; | ||
7500 | |||
7501 | - if (!is_vmalloc_or_module_addr((void *)p)) { | ||
7502 | - err = -ENXIO; | ||
7503 | - break; | ||
7504 | - } | ||
7505 | + if (len > PAGE_SIZE) | ||
7506 | + len = PAGE_SIZE; | ||
7507 | len = vread(kbuf, (char *)p, len); | ||
7508 | if (!len) | ||
7509 | break; | ||
7510 | if (copy_to_user(buf, kbuf, len)) { | ||
7511 | - err = -EFAULT; | ||
7512 | - break; | ||
7513 | + free_page((unsigned long)kbuf); | ||
7514 | + return -EFAULT; | ||
7515 | } | ||
7516 | count -= len; | ||
7517 | buf += len; | ||
7518 | @@ -488,8 +480,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf, | ||
7519 | } | ||
7520 | free_page((unsigned long)kbuf); | ||
7521 | } | ||
7522 | - *ppos = p; | ||
7523 | - return read ? read : err; | ||
7524 | + *ppos = p; | ||
7525 | + return read; | ||
7526 | } | ||
7527 | |||
7528 | |||
7529 | @@ -518,8 +510,15 @@ do_write_kmem(void *p, unsigned long realp, const char __user * buf, | ||
7530 | |||
7531 | while (count > 0) { | ||
7532 | char *ptr; | ||
7533 | + /* | ||
7534 | + * Handle first page in case it's not aligned | ||
7535 | + */ | ||
7536 | + if (-realp & (PAGE_SIZE - 1)) | ||
7537 | + sz = -realp & (PAGE_SIZE - 1); | ||
7538 | + else | ||
7539 | + sz = PAGE_SIZE; | ||
7540 | |||
7541 | - sz = size_inside_page(realp, count); | ||
7542 | + sz = min_t(unsigned long, sz, count); | ||
7543 | |||
7544 | /* | ||
7545 | * On ia64 if a page has been mapped somewhere as | ||
7546 | @@ -558,7 +557,6 @@ static ssize_t write_kmem(struct file * file, const char __user * buf, | ||
7547 | ssize_t virtr = 0; | ||
7548 | ssize_t written; | ||
7549 | char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */ | ||
7550 | - int err = 0; | ||
7551 | |||
7552 | if (p < (unsigned long) high_memory) { | ||
7553 | |||
7554 | @@ -580,20 +578,20 @@ static ssize_t write_kmem(struct file * file, const char __user * buf, | ||
7555 | if (!kbuf) | ||
7556 | return wrote ? wrote : -ENOMEM; | ||
7557 | while (count > 0) { | ||
7558 | - int len = size_inside_page(p, count); | ||
7559 | + int len = count; | ||
7560 | |||
7561 | - if (!is_vmalloc_or_module_addr((void *)p)) { | ||
7562 | - err = -ENXIO; | ||
7563 | - break; | ||
7564 | - } | ||
7565 | + if (len > PAGE_SIZE) | ||
7566 | + len = PAGE_SIZE; | ||
7567 | if (len) { | ||
7568 | written = copy_from_user(kbuf, buf, len); | ||
7569 | if (written) { | ||
7570 | - err = -EFAULT; | ||
7571 | - break; | ||
7572 | + if (wrote + virtr) | ||
7573 | + break; | ||
7574 | + free_page((unsigned long)kbuf); | ||
7575 | + return -EFAULT; | ||
7576 | } | ||
7577 | } | ||
7578 | - vwrite(kbuf, (char *)p, len); | ||
7579 | + len = vwrite(kbuf, (char *)p, len); | ||
7580 | count -= len; | ||
7581 | buf += len; | ||
7582 | virtr += len; | ||
7583 | @@ -602,8 +600,8 @@ static ssize_t write_kmem(struct file * file, const char __user * buf, | ||
7584 | free_page((unsigned long)kbuf); | ||
7585 | } | ||
7586 | |||
7587 | - *ppos = p; | ||
7588 | - return virtr + wrote ? : err; | ||
7589 | + *ppos = p; | ||
7590 | + return virtr + wrote; | ||
7591 | } | ||
7592 | #endif | ||
7593 | |||
7594 | diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c | ||
7595 | index dc52f75..d3400b2 100644 | ||
7596 | --- a/drivers/char/nozomi.c | ||
7597 | +++ b/drivers/char/nozomi.c | ||
7598 | @@ -1629,10 +1629,10 @@ static void ntty_close(struct tty_struct *tty, struct file *file) | ||
7599 | |||
7600 | dc->open_ttys--; | ||
7601 | port->count--; | ||
7602 | + tty_port_tty_set(port, NULL); | ||
7603 | |||
7604 | if (port->count == 0) { | ||
7605 | DBG1("close: %d", nport->token_dl); | ||
7606 | - tty_port_tty_set(port, NULL); | ||
7607 | spin_lock_irqsave(&dc->spin_mutex, flags); | ||
7608 | dc->last_ier &= ~(nport->token_dl); | ||
7609 | writew(dc->last_ier, dc->reg_ier); | ||
7610 | diff --git a/drivers/char/random.c b/drivers/char/random.c | ||
7611 | index 908ac1f..04b505e 100644 | ||
7612 | --- a/drivers/char/random.c | ||
7613 | +++ b/drivers/char/random.c | ||
7614 | @@ -1051,6 +1051,12 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) | ||
7615 | /* like a named pipe */ | ||
7616 | } | ||
7617 | |||
7618 | + /* | ||
7619 | + * If we gave the user some bytes, update the access time. | ||
7620 | + */ | ||
7621 | + if (count) | ||
7622 | + file_accessed(file); | ||
7623 | + | ||
7624 | return (count ? count : retval); | ||
7625 | } | ||
7626 | |||
7627 | @@ -1101,6 +1107,7 @@ static ssize_t random_write(struct file *file, const char __user *buffer, | ||
7628 | size_t count, loff_t *ppos) | ||
7629 | { | ||
7630 | size_t ret; | ||
7631 | + struct inode *inode = file->f_path.dentry->d_inode; | ||
7632 | |||
7633 | ret = write_pool(&blocking_pool, buffer, count); | ||
7634 | if (ret) | ||
7635 | @@ -1109,6 +1116,8 @@ static ssize_t random_write(struct file *file, const char __user *buffer, | ||
7636 | if (ret) | ||
7637 | return ret; | ||
7638 | |||
7639 | + inode->i_mtime = current_fs_time(inode->i_sb); | ||
7640 | + mark_inode_dirty(inode); | ||
7641 | return (ssize_t)count; | ||
7642 | } | ||
7643 | |||
7644 | diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c | ||
7645 | index f584407..ecba494 100644 | ||
7646 | --- a/drivers/char/tpm/tpm_infineon.c | ||
7647 | +++ b/drivers/char/tpm/tpm_infineon.c | ||
7648 | @@ -39,12 +39,12 @@ | ||
7649 | struct tpm_inf_dev { | ||
7650 | int iotype; | ||
7651 | |||
7652 | - void __iomem *mem_base; /* MMIO ioremap'd addr */ | ||
7653 | - unsigned long map_base; /* phys MMIO base */ | ||
7654 | - unsigned long map_size; /* MMIO region size */ | ||
7655 | - unsigned int index_off; /* index register offset */ | ||
7656 | + void __iomem *mem_base; /* MMIO ioremap'd addr */ | ||
7657 | + unsigned long map_base; /* phys MMIO base */ | ||
7658 | + unsigned long map_size; /* MMIO region size */ | ||
7659 | + unsigned int index_off; /* index register offset */ | ||
7660 | |||
7661 | - unsigned int data_regs; /* Data registers */ | ||
7662 | + unsigned int data_regs; /* Data registers */ | ||
7663 | unsigned int data_size; | ||
7664 | |||
7665 | unsigned int config_port; /* IO Port config index reg */ | ||
7666 | @@ -406,14 +406,14 @@ static const struct tpm_vendor_specific tpm_inf = { | ||
7667 | .miscdev = {.fops = &inf_ops,}, | ||
7668 | }; | ||
7669 | |||
7670 | -static const struct pnp_device_id tpm_inf_pnp_tbl[] = { | ||
7671 | +static const struct pnp_device_id tpm_pnp_tbl[] = { | ||
7672 | /* Infineon TPMs */ | ||
7673 | {"IFX0101", 0}, | ||
7674 | {"IFX0102", 0}, | ||
7675 | {"", 0} | ||
7676 | }; | ||
7677 | |||
7678 | -MODULE_DEVICE_TABLE(pnp, tpm_inf_pnp_tbl); | ||
7679 | +MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl); | ||
7680 | |||
7681 | static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev, | ||
7682 | const struct pnp_device_id *dev_id) | ||
7683 | @@ -430,7 +430,7 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev, | ||
7684 | if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && | ||
7685 | !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) { | ||
7686 | |||
7687 | - tpm_dev.iotype = TPM_INF_IO_PORT; | ||
7688 | + tpm_dev.iotype = TPM_INF_IO_PORT; | ||
7689 | |||
7690 | tpm_dev.config_port = pnp_port_start(dev, 0); | ||
7691 | tpm_dev.config_size = pnp_port_len(dev, 0); | ||
7692 | @@ -459,9 +459,9 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev, | ||
7693 | goto err_last; | ||
7694 | } | ||
7695 | } else if (pnp_mem_valid(dev, 0) && | ||
7696 | - !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) { | ||
7697 | + !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) { | ||
7698 | |||
7699 | - tpm_dev.iotype = TPM_INF_IO_MEM; | ||
7700 | + tpm_dev.iotype = TPM_INF_IO_MEM; | ||
7701 | |||
7702 | tpm_dev.map_base = pnp_mem_start(dev, 0); | ||
7703 | tpm_dev.map_size = pnp_mem_len(dev, 0); | ||
7704 | @@ -563,11 +563,11 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev, | ||
7705 | "product id 0x%02x%02x" | ||
7706 | "%s\n", | ||
7707 | tpm_dev.iotype == TPM_INF_IO_PORT ? | ||
7708 | - tpm_dev.config_port : | ||
7709 | - tpm_dev.map_base + tpm_dev.index_off, | ||
7710 | + tpm_dev.config_port : | ||
7711 | + tpm_dev.map_base + tpm_dev.index_off, | ||
7712 | tpm_dev.iotype == TPM_INF_IO_PORT ? | ||
7713 | - tpm_dev.data_regs : | ||
7714 | - tpm_dev.map_base + tpm_dev.data_regs, | ||
7715 | + tpm_dev.data_regs : | ||
7716 | + tpm_dev.map_base + tpm_dev.data_regs, | ||
7717 | version[0], version[1], | ||
7718 | vendorid[0], vendorid[1], | ||
7719 | productid[0], productid[1], chipname); | ||
7720 | @@ -607,55 +607,20 @@ static __devexit void tpm_inf_pnp_remove(struct pnp_dev *dev) | ||
7721 | iounmap(tpm_dev.mem_base); | ||
7722 | release_mem_region(tpm_dev.map_base, tpm_dev.map_size); | ||
7723 | } | ||
7724 | - tpm_dev_vendor_release(chip); | ||
7725 | tpm_remove_hardware(chip->dev); | ||
7726 | } | ||
7727 | } | ||
7728 | |||
7729 | -static int tpm_inf_pnp_suspend(struct pnp_dev *dev, pm_message_t pm_state) | ||
7730 | -{ | ||
7731 | - struct tpm_chip *chip = pnp_get_drvdata(dev); | ||
7732 | - int rc; | ||
7733 | - if (chip) { | ||
7734 | - u8 savestate[] = { | ||
7735 | - 0, 193, /* TPM_TAG_RQU_COMMAND */ | ||
7736 | - 0, 0, 0, 10, /* blob length (in bytes) */ | ||
7737 | - 0, 0, 0, 152 /* TPM_ORD_SaveState */ | ||
7738 | - }; | ||
7739 | - dev_info(&dev->dev, "saving TPM state\n"); | ||
7740 | - rc = tpm_inf_send(chip, savestate, sizeof(savestate)); | ||
7741 | - if (rc < 0) { | ||
7742 | - dev_err(&dev->dev, "error while saving TPM state\n"); | ||
7743 | - return rc; | ||
7744 | - } | ||
7745 | - } | ||
7746 | - return 0; | ||
7747 | -} | ||
7748 | - | ||
7749 | -static int tpm_inf_pnp_resume(struct pnp_dev *dev) | ||
7750 | -{ | ||
7751 | - /* Re-configure TPM after suspending */ | ||
7752 | - tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR); | ||
7753 | - tpm_config_out(IOLIMH, TPM_INF_ADDR); | ||
7754 | - tpm_config_out((tpm_dev.data_regs >> 8) & 0xff, TPM_INF_DATA); | ||
7755 | - tpm_config_out(IOLIML, TPM_INF_ADDR); | ||
7756 | - tpm_config_out((tpm_dev.data_regs & 0xff), TPM_INF_DATA); | ||
7757 | - /* activate register */ | ||
7758 | - tpm_config_out(TPM_DAR, TPM_INF_ADDR); | ||
7759 | - tpm_config_out(0x01, TPM_INF_DATA); | ||
7760 | - tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR); | ||
7761 | - /* disable RESET, LP and IRQC */ | ||
7762 | - tpm_data_out(RESET_LP_IRQC_DISABLE, CMD); | ||
7763 | - return tpm_pm_resume(&dev->dev); | ||
7764 | -} | ||
7765 | - | ||
7766 | static struct pnp_driver tpm_inf_pnp_driver = { | ||
7767 | .name = "tpm_inf_pnp", | ||
7768 | - .id_table = tpm_inf_pnp_tbl, | ||
7769 | + .driver = { | ||
7770 | + .owner = THIS_MODULE, | ||
7771 | + .suspend = tpm_pm_suspend, | ||
7772 | + .resume = tpm_pm_resume, | ||
7773 | + }, | ||
7774 | + .id_table = tpm_pnp_tbl, | ||
7775 | .probe = tpm_inf_pnp_probe, | ||
7776 | - .suspend = tpm_inf_pnp_suspend, | ||
7777 | - .resume = tpm_inf_pnp_resume, | ||
7778 | - .remove = __devexit_p(tpm_inf_pnp_remove) | ||
7779 | + .remove = __devexit_p(tpm_inf_pnp_remove), | ||
7780 | }; | ||
7781 | |||
7782 | static int __init init_inf(void) | ||
7783 | @@ -673,5 +638,5 @@ module_exit(cleanup_inf); | ||
7784 | |||
7785 | MODULE_AUTHOR("Marcel Selhorst <m.selhorst@sirrix.com>"); | ||
7786 | MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2"); | ||
7787 | -MODULE_VERSION("1.9.2"); | ||
7788 | +MODULE_VERSION("1.9"); | ||
7789 | MODULE_LICENSE("GPL"); | ||
7790 | diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c | ||
7791 | index 05cab2c..59499ee 100644 | ||
7792 | --- a/drivers/char/tty_io.c | ||
7793 | +++ b/drivers/char/tty_io.c | ||
7794 | @@ -1930,10 +1930,8 @@ static int tty_fasync(int fd, struct file *filp, int on) | ||
7795 | pid = task_pid(current); | ||
7796 | type = PIDTYPE_PID; | ||
7797 | } | ||
7798 | - get_pid(pid); | ||
7799 | spin_unlock_irqrestore(&tty->ctrl_lock, flags); | ||
7800 | retval = __f_setown(filp, pid, type, 0); | ||
7801 | - put_pid(pid); | ||
7802 | if (retval) | ||
7803 | goto out; | ||
7804 | } else { | ||
7805 | diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c | ||
7806 | index 537c29a..f060246 100644 | ||
7807 | --- a/drivers/connector/connector.c | ||
7808 | +++ b/drivers/connector/connector.c | ||
7809 | @@ -36,6 +36,17 @@ MODULE_LICENSE("GPL"); | ||
7810 | MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>"); | ||
7811 | MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector."); | ||
7812 | |||
7813 | +static u32 cn_idx = CN_IDX_CONNECTOR; | ||
7814 | +static u32 cn_val = CN_VAL_CONNECTOR; | ||
7815 | + | ||
7816 | +module_param(cn_idx, uint, 0); | ||
7817 | +module_param(cn_val, uint, 0); | ||
7818 | +MODULE_PARM_DESC(cn_idx, "Connector's main device idx."); | ||
7819 | +MODULE_PARM_DESC(cn_val, "Connector's main device val."); | ||
7820 | + | ||
7821 | +static DEFINE_MUTEX(notify_lock); | ||
7822 | +static LIST_HEAD(notify_list); | ||
7823 | + | ||
7824 | static struct cn_dev cdev; | ||
7825 | |||
7826 | static int cn_already_initialized; | ||
7827 | @@ -199,6 +210,54 @@ static void cn_rx_skb(struct sk_buff *__skb) | ||
7828 | } | ||
7829 | |||
7830 | /* | ||
7831 | + * Notification routing. | ||
7832 | + * | ||
7833 | + * Gets id and checks if there are notification request for it's idx | ||
7834 | + * and val. If there are such requests notify the listeners with the | ||
7835 | + * given notify event. | ||
7836 | + * | ||
7837 | + */ | ||
7838 | +static void cn_notify(struct cb_id *id, u32 notify_event) | ||
7839 | +{ | ||
7840 | + struct cn_ctl_entry *ent; | ||
7841 | + | ||
7842 | + mutex_lock(¬ify_lock); | ||
7843 | + list_for_each_entry(ent, ¬ify_list, notify_entry) { | ||
7844 | + int i; | ||
7845 | + struct cn_notify_req *req; | ||
7846 | + struct cn_ctl_msg *ctl = ent->msg; | ||
7847 | + int idx_found, val_found; | ||
7848 | + | ||
7849 | + idx_found = val_found = 0; | ||
7850 | + | ||
7851 | + req = (struct cn_notify_req *)ctl->data; | ||
7852 | + for (i = 0; i < ctl->idx_notify_num; ++i, ++req) { | ||
7853 | + if (id->idx >= req->first && | ||
7854 | + id->idx < req->first + req->range) { | ||
7855 | + idx_found = 1; | ||
7856 | + break; | ||
7857 | + } | ||
7858 | + } | ||
7859 | + | ||
7860 | + for (i = 0; i < ctl->val_notify_num; ++i, ++req) { | ||
7861 | + if (id->val >= req->first && | ||
7862 | + id->val < req->first + req->range) { | ||
7863 | + val_found = 1; | ||
7864 | + break; | ||
7865 | + } | ||
7866 | + } | ||
7867 | + | ||
7868 | + if (idx_found && val_found) { | ||
7869 | + struct cn_msg m = { .ack = notify_event, }; | ||
7870 | + | ||
7871 | + memcpy(&m.id, id, sizeof(m.id)); | ||
7872 | + cn_netlink_send(&m, ctl->group, GFP_KERNEL); | ||
7873 | + } | ||
7874 | + } | ||
7875 | + mutex_unlock(¬ify_lock); | ||
7876 | +} | ||
7877 | + | ||
7878 | +/* | ||
7879 | * Callback add routing - adds callback with given ID and name. | ||
7880 | * If there is registered callback with the same ID it will not be added. | ||
7881 | * | ||
7882 | @@ -217,6 +276,8 @@ int cn_add_callback(struct cb_id *id, char *name, | ||
7883 | if (err) | ||
7884 | return err; | ||
7885 | |||
7886 | + cn_notify(id, 0); | ||
7887 | + | ||
7888 | return 0; | ||
7889 | } | ||
7890 | EXPORT_SYMBOL_GPL(cn_add_callback); | ||
7891 | @@ -234,9 +295,111 @@ void cn_del_callback(struct cb_id *id) | ||
7892 | struct cn_dev *dev = &cdev; | ||
7893 | |||
7894 | cn_queue_del_callback(dev->cbdev, id); | ||
7895 | + cn_notify(id, 1); | ||
7896 | } | ||
7897 | EXPORT_SYMBOL_GPL(cn_del_callback); | ||
7898 | |||
7899 | +/* | ||
7900 | + * Checks two connector's control messages to be the same. | ||
7901 | + * Returns 1 if they are the same or if the first one is corrupted. | ||
7902 | + */ | ||
7903 | +static int cn_ctl_msg_equals(struct cn_ctl_msg *m1, struct cn_ctl_msg *m2) | ||
7904 | +{ | ||
7905 | + int i; | ||
7906 | + struct cn_notify_req *req1, *req2; | ||
7907 | + | ||
7908 | + if (m1->idx_notify_num != m2->idx_notify_num) | ||
7909 | + return 0; | ||
7910 | + | ||
7911 | + if (m1->val_notify_num != m2->val_notify_num) | ||
7912 | + return 0; | ||
7913 | + | ||
7914 | + if (m1->len != m2->len) | ||
7915 | + return 0; | ||
7916 | + | ||
7917 | + if ((m1->idx_notify_num + m1->val_notify_num) * sizeof(*req1) != | ||
7918 | + m1->len) | ||
7919 | + return 1; | ||
7920 | + | ||
7921 | + req1 = (struct cn_notify_req *)m1->data; | ||
7922 | + req2 = (struct cn_notify_req *)m2->data; | ||
7923 | + | ||
7924 | + for (i = 0; i < m1->idx_notify_num; ++i) { | ||
7925 | + if (req1->first != req2->first || req1->range != req2->range) | ||
7926 | + return 0; | ||
7927 | + req1++; | ||
7928 | + req2++; | ||
7929 | + } | ||
7930 | + | ||
7931 | + for (i = 0; i < m1->val_notify_num; ++i) { | ||
7932 | + if (req1->first != req2->first || req1->range != req2->range) | ||
7933 | + return 0; | ||
7934 | + req1++; | ||
7935 | + req2++; | ||
7936 | + } | ||
7937 | + | ||
7938 | + return 1; | ||
7939 | +} | ||
7940 | + | ||
7941 | +/* | ||
7942 | + * Main connector device's callback. | ||
7943 | + * | ||
7944 | + * Used for notification of a request's processing. | ||
7945 | + */ | ||
7946 | +static void cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp) | ||
7947 | +{ | ||
7948 | + struct cn_ctl_msg *ctl; | ||
7949 | + struct cn_ctl_entry *ent; | ||
7950 | + u32 size; | ||
7951 | + | ||
7952 | + if (msg->len < sizeof(*ctl)) | ||
7953 | + return; | ||
7954 | + | ||
7955 | + ctl = (struct cn_ctl_msg *)msg->data; | ||
7956 | + | ||
7957 | + size = (sizeof(*ctl) + ((ctl->idx_notify_num + | ||
7958 | + ctl->val_notify_num) * | ||
7959 | + sizeof(struct cn_notify_req))); | ||
7960 | + | ||
7961 | + if (msg->len != size) | ||
7962 | + return; | ||
7963 | + | ||
7964 | + if (ctl->len + sizeof(*ctl) != msg->len) | ||
7965 | + return; | ||
7966 | + | ||
7967 | + /* | ||
7968 | + * Remove notification. | ||
7969 | + */ | ||
7970 | + if (ctl->group == 0) { | ||
7971 | + struct cn_ctl_entry *n; | ||
7972 | + | ||
7973 | + mutex_lock(¬ify_lock); | ||
7974 | + list_for_each_entry_safe(ent, n, ¬ify_list, notify_entry) { | ||
7975 | + if (cn_ctl_msg_equals(ent->msg, ctl)) { | ||
7976 | + list_del(&ent->notify_entry); | ||
7977 | + kfree(ent); | ||
7978 | + } | ||
7979 | + } | ||
7980 | + mutex_unlock(¬ify_lock); | ||
7981 | + | ||
7982 | + return; | ||
7983 | + } | ||
7984 | + | ||
7985 | + size += sizeof(*ent); | ||
7986 | + | ||
7987 | + ent = kzalloc(size, GFP_KERNEL); | ||
7988 | + if (!ent) | ||
7989 | + return; | ||
7990 | + | ||
7991 | + ent->msg = (struct cn_ctl_msg *)(ent + 1); | ||
7992 | + | ||
7993 | + memcpy(ent->msg, ctl, size - sizeof(*ent)); | ||
7994 | + | ||
7995 | + mutex_lock(¬ify_lock); | ||
7996 | + list_add(&ent->notify_entry, ¬ify_list); | ||
7997 | + mutex_unlock(¬ify_lock); | ||
7998 | +} | ||
7999 | + | ||
8000 | static int cn_proc_show(struct seq_file *m, void *v) | ||
8001 | { | ||
8002 | struct cn_queue_dev *dev = cdev.cbdev; | ||
8003 | @@ -274,8 +437,11 @@ static const struct file_operations cn_file_ops = { | ||
8004 | static int __devinit cn_init(void) | ||
8005 | { | ||
8006 | struct cn_dev *dev = &cdev; | ||
8007 | + int err; | ||
8008 | |||
8009 | dev->input = cn_rx_skb; | ||
8010 | + dev->id.idx = cn_idx; | ||
8011 | + dev->id.val = cn_val; | ||
8012 | |||
8013 | dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, | ||
8014 | CN_NETLINK_USERS + 0xf, | ||
8015 | @@ -291,6 +457,14 @@ static int __devinit cn_init(void) | ||
8016 | |||
8017 | cn_already_initialized = 1; | ||
8018 | |||
8019 | + err = cn_add_callback(&dev->id, "connector", &cn_callback); | ||
8020 | + if (err) { | ||
8021 | + cn_already_initialized = 0; | ||
8022 | + cn_queue_free_dev(dev->cbdev); | ||
8023 | + netlink_kernel_release(dev->nls); | ||
8024 | + return -EINVAL; | ||
8025 | + } | ||
8026 | + | ||
8027 | proc_net_fops_create(&init_net, "connector", S_IRUGO, &cn_file_ops); | ||
8028 | |||
8029 | return 0; | ||
8030 | @@ -304,6 +478,7 @@ static void __devexit cn_fini(void) | ||
8031 | |||
8032 | proc_net_remove(&init_net, "connector"); | ||
8033 | |||
8034 | + cn_del_callback(&dev->id); | ||
8035 | cn_queue_free_dev(dev->cbdev); | ||
8036 | netlink_kernel_release(dev->nls); | ||
8037 | } | ||
8038 | diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c | ||
8039 | index 73655ae..6810443 100644 | ||
8040 | --- a/drivers/cpuidle/governors/menu.c | ||
8041 | +++ b/drivers/cpuidle/governors/menu.c | ||
8042 | @@ -18,7 +18,6 @@ | ||
8043 | #include <linux/hrtimer.h> | ||
8044 | #include <linux/tick.h> | ||
8045 | #include <linux/sched.h> | ||
8046 | -#include <linux/math64.h> | ||
8047 | |||
8048 | #define BUCKETS 12 | ||
8049 | #define RESOLUTION 1024 | ||
8050 | @@ -170,12 +169,6 @@ static DEFINE_PER_CPU(struct menu_device, menu_devices); | ||
8051 | |||
8052 | static void menu_update(struct cpuidle_device *dev); | ||
8053 | |||
8054 | -/* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */ | ||
8055 | -static u64 div_round64(u64 dividend, u32 divisor) | ||
8056 | -{ | ||
8057 | - return div_u64(dividend + (divisor / 2), divisor); | ||
8058 | -} | ||
8059 | - | ||
8060 | /** | ||
8061 | * menu_select - selects the next idle state to enter | ||
8062 | * @dev: the CPU | ||
8063 | @@ -216,8 +209,9 @@ static int menu_select(struct cpuidle_device *dev) | ||
8064 | data->correction_factor[data->bucket] = RESOLUTION * DECAY; | ||
8065 | |||
8066 | /* Make sure to round up for half microseconds */ | ||
8067 | - data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket], | ||
8068 | - RESOLUTION * DECAY); | ||
8069 | + data->predicted_us = DIV_ROUND_CLOSEST( | ||
8070 | + data->expected_us * data->correction_factor[data->bucket], | ||
8071 | + RESOLUTION * DECAY); | ||
8072 | |||
8073 | /* | ||
8074 | * We want to default to C1 (hlt), not to busy polling | ||
8075 | diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c | ||
8076 | index d3a27e0..0af8057 100644 | ||
8077 | --- a/drivers/crypto/padlock-sha.c | ||
8078 | +++ b/drivers/crypto/padlock-sha.c | ||
8079 | @@ -57,23 +57,6 @@ static int padlock_sha_update(struct shash_desc *desc, | ||
8080 | return crypto_shash_update(&dctx->fallback, data, length); | ||
8081 | } | ||
8082 | |||
8083 | -static int padlock_sha_export(struct shash_desc *desc, void *out) | ||
8084 | -{ | ||
8085 | - struct padlock_sha_desc *dctx = shash_desc_ctx(desc); | ||
8086 | - | ||
8087 | - return crypto_shash_export(&dctx->fallback, out); | ||
8088 | -} | ||
8089 | - | ||
8090 | -static int padlock_sha_import(struct shash_desc *desc, const void *in) | ||
8091 | -{ | ||
8092 | - struct padlock_sha_desc *dctx = shash_desc_ctx(desc); | ||
8093 | - struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm); | ||
8094 | - | ||
8095 | - dctx->fallback.tfm = ctx->fallback; | ||
8096 | - dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; | ||
8097 | - return crypto_shash_import(&dctx->fallback, in); | ||
8098 | -} | ||
8099 | - | ||
8100 | static inline void padlock_output_block(uint32_t *src, | ||
8101 | uint32_t *dst, size_t count) | ||
8102 | { | ||
8103 | @@ -252,10 +235,7 @@ static struct shash_alg sha1_alg = { | ||
8104 | .update = padlock_sha_update, | ||
8105 | .finup = padlock_sha1_finup, | ||
8106 | .final = padlock_sha1_final, | ||
8107 | - .export = padlock_sha_export, | ||
8108 | - .import = padlock_sha_import, | ||
8109 | .descsize = sizeof(struct padlock_sha_desc), | ||
8110 | - .statesize = sizeof(struct sha1_state), | ||
8111 | .base = { | ||
8112 | .cra_name = "sha1", | ||
8113 | .cra_driver_name = "sha1-padlock", | ||
8114 | @@ -276,10 +256,7 @@ static struct shash_alg sha256_alg = { | ||
8115 | .update = padlock_sha_update, | ||
8116 | .finup = padlock_sha256_finup, | ||
8117 | .final = padlock_sha256_final, | ||
8118 | - .export = padlock_sha_export, | ||
8119 | - .import = padlock_sha_import, | ||
8120 | .descsize = sizeof(struct padlock_sha_desc), | ||
8121 | - .statesize = sizeof(struct sha256_state), | ||
8122 | .base = { | ||
8123 | .cra_name = "sha256", | ||
8124 | .cra_driver_name = "sha256-padlock", | ||
8125 | diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c | ||
8126 | index c558fa1..7585c41 100644 | ||
8127 | --- a/drivers/dma/at_hdmac.c | ||
8128 | +++ b/drivers/dma/at_hdmac.c | ||
8129 | @@ -815,7 +815,7 @@ atc_is_tx_complete(struct dma_chan *chan, | ||
8130 | dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n", | ||
8131 | cookie, done ? *done : 0, used ? *used : 0); | ||
8132 | |||
8133 | - spin_lock_bh(&atchan->lock); | ||
8134 | + spin_lock_bh(atchan->lock); | ||
8135 | |||
8136 | last_complete = atchan->completed_cookie; | ||
8137 | last_used = chan->cookie; | ||
8138 | @@ -830,7 +830,7 @@ atc_is_tx_complete(struct dma_chan *chan, | ||
8139 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
8140 | } | ||
8141 | |||
8142 | - spin_unlock_bh(&atchan->lock); | ||
8143 | + spin_unlock_bh(atchan->lock); | ||
8144 | |||
8145 | if (done) | ||
8146 | *done = last_complete; | ||
8147 | diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c | ||
8148 | index dcc4ab7..c524d36 100644 | ||
8149 | --- a/drivers/dma/ioat/dma.c | ||
8150 | +++ b/drivers/dma/ioat/dma.c | ||
8151 | @@ -1032,7 +1032,7 @@ int __devinit ioat_probe(struct ioatdma_device *device) | ||
8152 | dma->dev = &pdev->dev; | ||
8153 | |||
8154 | if (!dma->chancnt) { | ||
8155 | - dev_err(dev, "channel enumeration error\n"); | ||
8156 | + dev_err(dev, "zero channels detected\n"); | ||
8157 | goto err_setup_interrupts; | ||
8158 | } | ||
8159 | |||
8160 | diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h | ||
8161 | index bbc3e78..45edde9 100644 | ||
8162 | --- a/drivers/dma/ioat/dma.h | ||
8163 | +++ b/drivers/dma/ioat/dma.h | ||
8164 | @@ -60,7 +60,6 @@ | ||
8165 | * @dca: direct cache access context | ||
8166 | * @intr_quirk: interrupt setup quirk (for ioat_v1 devices) | ||
8167 | * @enumerate_channels: hw version specific channel enumeration | ||
8168 | - * @reset_hw: hw version specific channel (re)initialization | ||
8169 | * @cleanup_tasklet: select between the v2 and v3 cleanup routines | ||
8170 | * @timer_fn: select between the v2 and v3 timer watchdog routines | ||
8171 | * @self_test: hardware version specific self test for each supported op type | ||
8172 | @@ -79,7 +78,6 @@ struct ioatdma_device { | ||
8173 | struct dca_provider *dca; | ||
8174 | void (*intr_quirk)(struct ioatdma_device *device); | ||
8175 | int (*enumerate_channels)(struct ioatdma_device *device); | ||
8176 | - int (*reset_hw)(struct ioat_chan_common *chan); | ||
8177 | void (*cleanup_tasklet)(unsigned long data); | ||
8178 | void (*timer_fn)(unsigned long data); | ||
8179 | int (*self_test)(struct ioatdma_device *device); | ||
8180 | @@ -266,22 +264,6 @@ static inline void ioat_suspend(struct ioat_chan_common *chan) | ||
8181 | writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); | ||
8182 | } | ||
8183 | |||
8184 | -static inline void ioat_reset(struct ioat_chan_common *chan) | ||
8185 | -{ | ||
8186 | - u8 ver = chan->device->version; | ||
8187 | - | ||
8188 | - writeb(IOAT_CHANCMD_RESET, chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); | ||
8189 | -} | ||
8190 | - | ||
8191 | -static inline bool ioat_reset_pending(struct ioat_chan_common *chan) | ||
8192 | -{ | ||
8193 | - u8 ver = chan->device->version; | ||
8194 | - u8 cmd; | ||
8195 | - | ||
8196 | - cmd = readb(chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); | ||
8197 | - return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET; | ||
8198 | -} | ||
8199 | - | ||
8200 | static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr) | ||
8201 | { | ||
8202 | struct ioat_chan_common *chan = &ioat->base; | ||
8203 | diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c | ||
8204 | index 5cc37af..8f1f7f0 100644 | ||
8205 | --- a/drivers/dma/ioat/dma_v2.c | ||
8206 | +++ b/drivers/dma/ioat/dma_v2.c | ||
8207 | @@ -239,50 +239,20 @@ void __ioat2_restart_chan(struct ioat2_dma_chan *ioat) | ||
8208 | __ioat2_start_null_desc(ioat); | ||
8209 | } | ||
8210 | |||
8211 | -int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo) | ||
8212 | +static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) | ||
8213 | { | ||
8214 | - unsigned long end = jiffies + tmo; | ||
8215 | - int err = 0; | ||
8216 | + struct ioat_chan_common *chan = &ioat->base; | ||
8217 | + unsigned long phys_complete; | ||
8218 | u32 status; | ||
8219 | |||
8220 | status = ioat_chansts(chan); | ||
8221 | if (is_ioat_active(status) || is_ioat_idle(status)) | ||
8222 | ioat_suspend(chan); | ||
8223 | while (is_ioat_active(status) || is_ioat_idle(status)) { | ||
8224 | - if (tmo && time_after(jiffies, end)) { | ||
8225 | - err = -ETIMEDOUT; | ||
8226 | - break; | ||
8227 | - } | ||
8228 | status = ioat_chansts(chan); | ||
8229 | cpu_relax(); | ||
8230 | } | ||
8231 | |||
8232 | - return err; | ||
8233 | -} | ||
8234 | - | ||
8235 | -int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo) | ||
8236 | -{ | ||
8237 | - unsigned long end = jiffies + tmo; | ||
8238 | - int err = 0; | ||
8239 | - | ||
8240 | - ioat_reset(chan); | ||
8241 | - while (ioat_reset_pending(chan)) { | ||
8242 | - if (end && time_after(jiffies, end)) { | ||
8243 | - err = -ETIMEDOUT; | ||
8244 | - break; | ||
8245 | - } | ||
8246 | - cpu_relax(); | ||
8247 | - } | ||
8248 | - | ||
8249 | - return err; | ||
8250 | -} | ||
8251 | - | ||
8252 | -static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) | ||
8253 | -{ | ||
8254 | - struct ioat_chan_common *chan = &ioat->base; | ||
8255 | - unsigned long phys_complete; | ||
8256 | - | ||
8257 | - ioat2_quiesce(chan, 0); | ||
8258 | if (ioat_cleanup_preamble(chan, &phys_complete)) | ||
8259 | __cleanup(ioat, phys_complete); | ||
8260 | |||
8261 | @@ -348,19 +318,6 @@ void ioat2_timer_event(unsigned long data) | ||
8262 | spin_unlock_bh(&chan->cleanup_lock); | ||
8263 | } | ||
8264 | |||
8265 | -static int ioat2_reset_hw(struct ioat_chan_common *chan) | ||
8266 | -{ | ||
8267 | - /* throw away whatever the channel was doing and get it initialized */ | ||
8268 | - u32 chanerr; | ||
8269 | - | ||
8270 | - ioat2_quiesce(chan, msecs_to_jiffies(100)); | ||
8271 | - | ||
8272 | - chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | ||
8273 | - writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); | ||
8274 | - | ||
8275 | - return ioat2_reset_sync(chan, msecs_to_jiffies(200)); | ||
8276 | -} | ||
8277 | - | ||
8278 | /** | ||
8279 | * ioat2_enumerate_channels - find and initialize the device's channels | ||
8280 | * @device: the device to be enumerated | ||
8281 | @@ -403,10 +360,6 @@ int ioat2_enumerate_channels(struct ioatdma_device *device) | ||
8282 | (unsigned long) ioat); | ||
8283 | ioat->xfercap_log = xfercap_log; | ||
8284 | spin_lock_init(&ioat->ring_lock); | ||
8285 | - if (device->reset_hw(&ioat->base)) { | ||
8286 | - i = 0; | ||
8287 | - break; | ||
8288 | - } | ||
8289 | } | ||
8290 | dma->chancnt = i; | ||
8291 | return i; | ||
8292 | @@ -514,6 +467,7 @@ int ioat2_alloc_chan_resources(struct dma_chan *c) | ||
8293 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | ||
8294 | struct ioat_chan_common *chan = &ioat->base; | ||
8295 | struct ioat_ring_ent **ring; | ||
8296 | + u32 chanerr; | ||
8297 | int order; | ||
8298 | |||
8299 | /* have we already been set up? */ | ||
8300 | @@ -523,6 +477,12 @@ int ioat2_alloc_chan_resources(struct dma_chan *c) | ||
8301 | /* Setup register to interrupt and write completion status on error */ | ||
8302 | writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET); | ||
8303 | |||
8304 | + chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | ||
8305 | + if (chanerr) { | ||
8306 | + dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr); | ||
8307 | + writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); | ||
8308 | + } | ||
8309 | + | ||
8310 | /* allocate a completion writeback area */ | ||
8311 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ | ||
8312 | chan->completion = pci_pool_alloc(chan->device->completion_pool, | ||
8313 | @@ -786,7 +746,13 @@ void ioat2_free_chan_resources(struct dma_chan *c) | ||
8314 | tasklet_disable(&chan->cleanup_task); | ||
8315 | del_timer_sync(&chan->timer); | ||
8316 | device->cleanup_tasklet((unsigned long) ioat); | ||
8317 | - device->reset_hw(chan); | ||
8318 | + | ||
8319 | + /* Delay 100ms after reset to allow internal DMA logic to quiesce | ||
8320 | + * before removing DMA descriptor resources. | ||
8321 | + */ | ||
8322 | + writeb(IOAT_CHANCMD_RESET, | ||
8323 | + chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); | ||
8324 | + mdelay(100); | ||
8325 | |||
8326 | spin_lock_bh(&ioat->ring_lock); | ||
8327 | descs = ioat2_ring_space(ioat); | ||
8328 | @@ -873,7 +839,6 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) | ||
8329 | int err; | ||
8330 | |||
8331 | device->enumerate_channels = ioat2_enumerate_channels; | ||
8332 | - device->reset_hw = ioat2_reset_hw; | ||
8333 | device->cleanup_tasklet = ioat2_cleanup_tasklet; | ||
8334 | device->timer_fn = ioat2_timer_event; | ||
8335 | device->self_test = ioat_dma_self_test; | ||
8336 | diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h | ||
8337 | index 3afad8d..1d849ef 100644 | ||
8338 | --- a/drivers/dma/ioat/dma_v2.h | ||
8339 | +++ b/drivers/dma/ioat/dma_v2.h | ||
8340 | @@ -185,8 +185,6 @@ bool reshape_ring(struct ioat2_dma_chan *ioat, int order); | ||
8341 | void __ioat2_issue_pending(struct ioat2_dma_chan *ioat); | ||
8342 | void ioat2_cleanup_tasklet(unsigned long data); | ||
8343 | void ioat2_timer_event(unsigned long data); | ||
8344 | -int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo); | ||
8345 | -int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo); | ||
8346 | extern struct kobj_type ioat2_ktype; | ||
8347 | extern struct kmem_cache *ioat2_cache; | ||
8348 | #endif /* IOATDMA_V2_H */ | ||
8349 | diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c | ||
8350 | index 9908c9e..42f6f10 100644 | ||
8351 | --- a/drivers/dma/ioat/dma_v3.c | ||
8352 | +++ b/drivers/dma/ioat/dma_v3.c | ||
8353 | @@ -650,11 +650,9 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, | ||
8354 | |||
8355 | num_descs = ioat2_xferlen_to_descs(ioat, len); | ||
8356 | /* we need 2x the number of descriptors to cover greater than 3 | ||
8357 | - * sources (we need 1 extra source in the q-only continuation | ||
8358 | - * case and 3 extra sources in the p+q continuation case. | ||
8359 | + * sources | ||
8360 | */ | ||
8361 | - if (src_cnt + dmaf_p_disabled_continue(flags) > 3 || | ||
8362 | - (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) { | ||
8363 | + if (src_cnt > 3 || flags & DMA_PREP_CONTINUE) { | ||
8364 | with_ext = 1; | ||
8365 | num_descs *= 2; | ||
8366 | } else | ||
8367 | @@ -1130,45 +1128,6 @@ static int __devinit ioat3_dma_self_test(struct ioatdma_device *device) | ||
8368 | return 0; | ||
8369 | } | ||
8370 | |||
8371 | -static int ioat3_reset_hw(struct ioat_chan_common *chan) | ||
8372 | -{ | ||
8373 | - /* throw away whatever the channel was doing and get it | ||
8374 | - * initialized, with ioat3 specific workarounds | ||
8375 | - */ | ||
8376 | - struct ioatdma_device *device = chan->device; | ||
8377 | - struct pci_dev *pdev = device->pdev; | ||
8378 | - u32 chanerr; | ||
8379 | - u16 dev_id; | ||
8380 | - int err; | ||
8381 | - | ||
8382 | - ioat2_quiesce(chan, msecs_to_jiffies(100)); | ||
8383 | - | ||
8384 | - chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | ||
8385 | - writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); | ||
8386 | - | ||
8387 | - /* -= IOAT ver.3 workarounds =- */ | ||
8388 | - /* Write CHANERRMSK_INT with 3E07h to mask out the errors | ||
8389 | - * that can cause stability issues for IOAT ver.3, and clear any | ||
8390 | - * pending errors | ||
8391 | - */ | ||
8392 | - pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07); | ||
8393 | - err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr); | ||
8394 | - if (err) { | ||
8395 | - dev_err(&pdev->dev, "channel error register unreachable\n"); | ||
8396 | - return err; | ||
8397 | - } | ||
8398 | - pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr); | ||
8399 | - | ||
8400 | - /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit | ||
8401 | - * (workaround for spurious config parity error after restart) | ||
8402 | - */ | ||
8403 | - pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); | ||
8404 | - if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) | ||
8405 | - pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10); | ||
8406 | - | ||
8407 | - return ioat2_reset_sync(chan, msecs_to_jiffies(200)); | ||
8408 | -} | ||
8409 | - | ||
8410 | int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) | ||
8411 | { | ||
8412 | struct pci_dev *pdev = device->pdev; | ||
8413 | @@ -1178,10 +1137,10 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) | ||
8414 | struct ioat_chan_common *chan; | ||
8415 | bool is_raid_device = false; | ||
8416 | int err; | ||
8417 | + u16 dev_id; | ||
8418 | u32 cap; | ||
8419 | |||
8420 | device->enumerate_channels = ioat2_enumerate_channels; | ||
8421 | - device->reset_hw = ioat3_reset_hw; | ||
8422 | device->self_test = ioat3_dma_self_test; | ||
8423 | dma = &device->common; | ||
8424 | dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; | ||
8425 | @@ -1257,6 +1216,19 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) | ||
8426 | dma->device_prep_dma_xor_val = NULL; | ||
8427 | #endif | ||
8428 | |||
8429 | + /* -= IOAT ver.3 workarounds =- */ | ||
8430 | + /* Write CHANERRMSK_INT with 3E07h to mask out the errors | ||
8431 | + * that can cause stability issues for IOAT ver.3 | ||
8432 | + */ | ||
8433 | + pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07); | ||
8434 | + | ||
8435 | + /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit | ||
8436 | + * (workaround for spurious config parity error after restart) | ||
8437 | + */ | ||
8438 | + pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); | ||
8439 | + if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) | ||
8440 | + pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10); | ||
8441 | + | ||
8442 | err = ioat_probe(device); | ||
8443 | if (err) | ||
8444 | return err; | ||
8445 | diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h | ||
8446 | index e8ae63b..f015ec1 100644 | ||
8447 | --- a/drivers/dma/ioat/registers.h | ||
8448 | +++ b/drivers/dma/ioat/registers.h | ||
8449 | @@ -27,7 +27,6 @@ | ||
8450 | |||
8451 | #define IOAT_PCI_DEVICE_ID_OFFSET 0x02 | ||
8452 | #define IOAT_PCI_DMAUNCERRSTS_OFFSET 0x148 | ||
8453 | -#define IOAT_PCI_CHANERR_INT_OFFSET 0x180 | ||
8454 | #define IOAT_PCI_CHANERRMASK_INT_OFFSET 0x184 | ||
8455 | |||
8456 | /* MMIO Device Registers */ | ||
8457 | diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c | ||
8458 | index 01bc8e2..a38831c 100644 | ||
8459 | --- a/drivers/edac/amd64_edac.c | ||
8460 | +++ b/drivers/edac/amd64_edac.c | ||
8461 | @@ -13,8 +13,6 @@ module_param(report_gart_errors, int, 0644); | ||
8462 | static int ecc_enable_override; | ||
8463 | module_param(ecc_enable_override, int, 0644); | ||
8464 | |||
8465 | -static struct msr *msrs; | ||
8466 | - | ||
8467 | /* Lookup table for all possible MC control instances */ | ||
8468 | struct amd64_pvt; | ||
8469 | static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES]; | ||
8470 | @@ -2620,90 +2618,6 @@ static int amd64_init_csrows(struct mem_ctl_info *mci) | ||
8471 | return empty; | ||
8472 | } | ||
8473 | |||
8474 | -/* get all cores on this DCT */ | ||
8475 | -static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid) | ||
8476 | -{ | ||
8477 | - int cpu; | ||
8478 | - | ||
8479 | - for_each_online_cpu(cpu) | ||
8480 | - if (amd_get_nb_id(cpu) == nid) | ||
8481 | - cpumask_set_cpu(cpu, mask); | ||
8482 | -} | ||
8483 | - | ||
8484 | -/* check MCG_CTL on all the cpus on this node */ | ||
8485 | -static bool amd64_nb_mce_bank_enabled_on_node(int nid) | ||
8486 | -{ | ||
8487 | - cpumask_var_t mask; | ||
8488 | - int cpu, nbe; | ||
8489 | - bool ret = false; | ||
8490 | - | ||
8491 | - if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { | ||
8492 | - amd64_printk(KERN_WARNING, "%s: error allocating mask\n", | ||
8493 | - __func__); | ||
8494 | - return false; | ||
8495 | - } | ||
8496 | - | ||
8497 | - get_cpus_on_this_dct_cpumask(mask, nid); | ||
8498 | - | ||
8499 | - rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs); | ||
8500 | - | ||
8501 | - for_each_cpu(cpu, mask) { | ||
8502 | - struct msr *reg = per_cpu_ptr(msrs, cpu); | ||
8503 | - nbe = reg->l & K8_MSR_MCGCTL_NBE; | ||
8504 | - | ||
8505 | - debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n", | ||
8506 | - cpu, reg->q, | ||
8507 | - (nbe ? "enabled" : "disabled")); | ||
8508 | - | ||
8509 | - if (!nbe) | ||
8510 | - goto out; | ||
8511 | - } | ||
8512 | - ret = true; | ||
8513 | - | ||
8514 | -out: | ||
8515 | - free_cpumask_var(mask); | ||
8516 | - return ret; | ||
8517 | -} | ||
8518 | - | ||
8519 | -static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on) | ||
8520 | -{ | ||
8521 | - cpumask_var_t cmask; | ||
8522 | - int cpu; | ||
8523 | - | ||
8524 | - if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) { | ||
8525 | - amd64_printk(KERN_WARNING, "%s: error allocating mask\n", | ||
8526 | - __func__); | ||
8527 | - return false; | ||
8528 | - } | ||
8529 | - | ||
8530 | - get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id); | ||
8531 | - | ||
8532 | - rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); | ||
8533 | - | ||
8534 | - for_each_cpu(cpu, cmask) { | ||
8535 | - | ||
8536 | - struct msr *reg = per_cpu_ptr(msrs, cpu); | ||
8537 | - | ||
8538 | - if (on) { | ||
8539 | - if (reg->l & K8_MSR_MCGCTL_NBE) | ||
8540 | - pvt->flags.ecc_report = 1; | ||
8541 | - | ||
8542 | - reg->l |= K8_MSR_MCGCTL_NBE; | ||
8543 | - } else { | ||
8544 | - /* | ||
8545 | - * Turn off ECC reporting only when it was off before | ||
8546 | - */ | ||
8547 | - if (!pvt->flags.ecc_report) | ||
8548 | - reg->l &= ~K8_MSR_MCGCTL_NBE; | ||
8549 | - } | ||
8550 | - } | ||
8551 | - wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); | ||
8552 | - | ||
8553 | - free_cpumask_var(cmask); | ||
8554 | - | ||
8555 | - return 0; | ||
8556 | -} | ||
8557 | - | ||
8558 | /* | ||
8559 | * Only if 'ecc_enable_override' is set AND BIOS had ECC disabled, do "we" | ||
8560 | * enable it. | ||
8561 | @@ -2711,12 +2625,17 @@ static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on) | ||
8562 | static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci) | ||
8563 | { | ||
8564 | struct amd64_pvt *pvt = mci->pvt_info; | ||
8565 | - int err = 0; | ||
8566 | - u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; | ||
8567 | + const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id); | ||
8568 | + int cpu, idx = 0, err = 0; | ||
8569 | + struct msr msrs[cpumask_weight(cpumask)]; | ||
8570 | + u32 value; | ||
8571 | + u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; | ||
8572 | |||
8573 | if (!ecc_enable_override) | ||
8574 | return; | ||
8575 | |||
8576 | + memset(msrs, 0, sizeof(msrs)); | ||
8577 | + | ||
8578 | amd64_printk(KERN_WARNING, | ||
8579 | "'ecc_enable_override' parameter is active, " | ||
8580 | "Enabling AMD ECC hardware now: CAUTION\n"); | ||
8581 | @@ -2732,9 +2651,16 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci) | ||
8582 | value |= mask; | ||
8583 | pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value); | ||
8584 | |||
8585 | - if (amd64_toggle_ecc_err_reporting(pvt, ON)) | ||
8586 | - amd64_printk(KERN_WARNING, "Error enabling ECC reporting over " | ||
8587 | - "MCGCTL!\n"); | ||
8588 | + rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs); | ||
8589 | + | ||
8590 | + for_each_cpu(cpu, cpumask) { | ||
8591 | + if (msrs[idx].l & K8_MSR_MCGCTL_NBE) | ||
8592 | + set_bit(idx, &pvt->old_mcgctl); | ||
8593 | + | ||
8594 | + msrs[idx].l |= K8_MSR_MCGCTL_NBE; | ||
8595 | + idx++; | ||
8596 | + } | ||
8597 | + wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs); | ||
8598 | |||
8599 | err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value); | ||
8600 | if (err) | ||
8601 | @@ -2775,12 +2701,17 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci) | ||
8602 | |||
8603 | static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt) | ||
8604 | { | ||
8605 | - int err = 0; | ||
8606 | - u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; | ||
8607 | + const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id); | ||
8608 | + int cpu, idx = 0, err = 0; | ||
8609 | + struct msr msrs[cpumask_weight(cpumask)]; | ||
8610 | + u32 value; | ||
8611 | + u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; | ||
8612 | |||
8613 | if (!pvt->nbctl_mcgctl_saved) | ||
8614 | return; | ||
8615 | |||
8616 | + memset(msrs, 0, sizeof(msrs)); | ||
8617 | + | ||
8618 | err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value); | ||
8619 | if (err) | ||
8620 | debugf0("Reading K8_NBCTL failed\n"); | ||
8621 | @@ -2790,9 +2721,66 @@ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt) | ||
8622 | /* restore the NB Enable MCGCTL bit */ | ||
8623 | pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value); | ||
8624 | |||
8625 | - if (amd64_toggle_ecc_err_reporting(pvt, OFF)) | ||
8626 | - amd64_printk(KERN_WARNING, "Error restoring ECC reporting over " | ||
8627 | - "MCGCTL!\n"); | ||
8628 | + rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs); | ||
8629 | + | ||
8630 | + for_each_cpu(cpu, cpumask) { | ||
8631 | + msrs[idx].l &= ~K8_MSR_MCGCTL_NBE; | ||
8632 | + msrs[idx].l |= | ||
8633 | + test_bit(idx, &pvt->old_mcgctl) << K8_MSR_MCGCTL_NBE; | ||
8634 | + idx++; | ||
8635 | + } | ||
8636 | + | ||
8637 | + wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs); | ||
8638 | +} | ||
8639 | + | ||
8640 | +/* get all cores on this DCT */ | ||
8641 | +static void get_cpus_on_this_dct_cpumask(cpumask_t *mask, int nid) | ||
8642 | +{ | ||
8643 | + int cpu; | ||
8644 | + | ||
8645 | + for_each_online_cpu(cpu) | ||
8646 | + if (amd_get_nb_id(cpu) == nid) | ||
8647 | + cpumask_set_cpu(cpu, mask); | ||
8648 | +} | ||
8649 | + | ||
8650 | +/* check MCG_CTL on all the cpus on this node */ | ||
8651 | +static bool amd64_nb_mce_bank_enabled_on_node(int nid) | ||
8652 | +{ | ||
8653 | + cpumask_t mask; | ||
8654 | + struct msr *msrs; | ||
8655 | + int cpu, nbe, idx = 0; | ||
8656 | + bool ret = false; | ||
8657 | + | ||
8658 | + cpumask_clear(&mask); | ||
8659 | + | ||
8660 | + get_cpus_on_this_dct_cpumask(&mask, nid); | ||
8661 | + | ||
8662 | + msrs = kzalloc(sizeof(struct msr) * cpumask_weight(&mask), GFP_KERNEL); | ||
8663 | + if (!msrs) { | ||
8664 | + amd64_printk(KERN_WARNING, "%s: error allocating msrs\n", | ||
8665 | + __func__); | ||
8666 | + return false; | ||
8667 | + } | ||
8668 | + | ||
8669 | + rdmsr_on_cpus(&mask, MSR_IA32_MCG_CTL, msrs); | ||
8670 | + | ||
8671 | + for_each_cpu(cpu, &mask) { | ||
8672 | + nbe = msrs[idx].l & K8_MSR_MCGCTL_NBE; | ||
8673 | + | ||
8674 | + debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n", | ||
8675 | + cpu, msrs[idx].q, | ||
8676 | + (nbe ? "enabled" : "disabled")); | ||
8677 | + | ||
8678 | + if (!nbe) | ||
8679 | + goto out; | ||
8680 | + | ||
8681 | + idx++; | ||
8682 | + } | ||
8683 | + ret = true; | ||
8684 | + | ||
8685 | +out: | ||
8686 | + kfree(msrs); | ||
8687 | + return ret; | ||
8688 | } | ||
8689 | |||
8690 | /* | ||
8691 | @@ -2801,11 +2789,10 @@ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt) | ||
8692 | * the memory system completely. A command line option allows to force-enable | ||
8693 | * hardware ECC later in amd64_enable_ecc_error_reporting(). | ||
8694 | */ | ||
8695 | -static const char *ecc_msg = | ||
8696 | - "ECC disabled in the BIOS or no ECC capability, module will not load.\n" | ||
8697 | - " Either enable ECC checking or force module loading by setting " | ||
8698 | - "'ecc_enable_override'.\n" | ||
8699 | - " (Note that use of the override may cause unknown side effects.)\n"; | ||
8700 | +static const char *ecc_warning = | ||
8701 | + "WARNING: ECC is disabled by BIOS. Module will NOT be loaded.\n" | ||
8702 | + " Either Enable ECC in the BIOS, or set 'ecc_enable_override'.\n" | ||
8703 | + " Also, use of the override can cause unknown side effects.\n"; | ||
8704 | |||
8705 | static int amd64_check_ecc_enabled(struct amd64_pvt *pvt) | ||
8706 | { | ||
8707 | @@ -2820,7 +2807,7 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt) | ||
8708 | |||
8709 | ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE); | ||
8710 | if (!ecc_enabled) | ||
8711 | - amd64_printk(KERN_NOTICE, "This node reports that Memory ECC " | ||
8712 | + amd64_printk(KERN_WARNING, "This node reports that Memory ECC " | ||
8713 | "is currently disabled, set F3x%x[22] (%s).\n", | ||
8714 | K8_NBCFG, pci_name(pvt->misc_f3_ctl)); | ||
8715 | else | ||
8716 | @@ -2828,17 +2815,18 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt) | ||
8717 | |||
8718 | nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id); | ||
8719 | if (!nb_mce_en) | ||
8720 | - amd64_printk(KERN_NOTICE, "NB MCE bank disabled, set MSR " | ||
8721 | + amd64_printk(KERN_WARNING, "NB MCE bank disabled, set MSR " | ||
8722 | "0x%08x[4] on node %d to enable.\n", | ||
8723 | MSR_IA32_MCG_CTL, pvt->mc_node_id); | ||
8724 | |||
8725 | if (!ecc_enabled || !nb_mce_en) { | ||
8726 | if (!ecc_enable_override) { | ||
8727 | - amd64_printk(KERN_NOTICE, "%s", ecc_msg); | ||
8728 | + amd64_printk(KERN_WARNING, "%s", ecc_warning); | ||
8729 | return -ENODEV; | ||
8730 | } | ||
8731 | + } else | ||
8732 | + /* CLEAR the override, since BIOS controlled it */ | ||
8733 | ecc_enable_override = 0; | ||
8734 | - } | ||
8735 | |||
8736 | return 0; | ||
8737 | } | ||
8738 | @@ -2921,6 +2909,7 @@ static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl, | ||
8739 | pvt->ext_model = boot_cpu_data.x86_model >> 4; | ||
8740 | pvt->mc_type_index = mc_type_index; | ||
8741 | pvt->ops = family_ops(mc_type_index); | ||
8742 | + pvt->old_mcgctl = 0; | ||
8743 | |||
8744 | /* | ||
8745 | * We have the dram_f2_ctl device as an argument, now go reserve its | ||
8746 | @@ -3082,15 +3071,16 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev) | ||
8747 | |||
8748 | amd64_free_mc_sibling_devices(pvt); | ||
8749 | |||
8750 | + kfree(pvt); | ||
8751 | + mci->pvt_info = NULL; | ||
8752 | + | ||
8753 | + mci_lookup[pvt->mc_node_id] = NULL; | ||
8754 | + | ||
8755 | /* unregister from EDAC MCE */ | ||
8756 | amd_report_gart_errors(false); | ||
8757 | amd_unregister_ecc_decoder(amd64_decode_bus_error); | ||
8758 | |||
8759 | /* Free the EDAC CORE resources */ | ||
8760 | - mci->pvt_info = NULL; | ||
8761 | - mci_lookup[pvt->mc_node_id] = NULL; | ||
8762 | - | ||
8763 | - kfree(pvt); | ||
8764 | edac_mc_free(mci); | ||
8765 | } | ||
8766 | |||
8767 | @@ -3167,29 +3157,23 @@ static void amd64_setup_pci_device(void) | ||
8768 | static int __init amd64_edac_init(void) | ||
8769 | { | ||
8770 | int nb, err = -ENODEV; | ||
8771 | - bool load_ok = false; | ||
8772 | |||
8773 | edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n"); | ||
8774 | |||
8775 | opstate_init(); | ||
8776 | |||
8777 | if (cache_k8_northbridges() < 0) | ||
8778 | - goto err_ret; | ||
8779 | - | ||
8780 | - msrs = msrs_alloc(); | ||
8781 | - if (!msrs) | ||
8782 | - goto err_ret; | ||
8783 | + return err; | ||
8784 | |||
8785 | err = pci_register_driver(&amd64_pci_driver); | ||
8786 | if (err) | ||
8787 | - goto err_pci; | ||
8788 | + return err; | ||
8789 | |||
8790 | /* | ||
8791 | * At this point, the array 'pvt_lookup[]' contains pointers to alloc'd | ||
8792 | * amd64_pvt structs. These will be used in the 2nd stage init function | ||
8793 | * to finish initialization of the MC instances. | ||
8794 | */ | ||
8795 | - err = -ENODEV; | ||
8796 | for (nb = 0; nb < num_k8_northbridges; nb++) { | ||
8797 | if (!pvt_lookup[nb]) | ||
8798 | continue; | ||
8799 | @@ -3197,21 +3181,16 @@ static int __init amd64_edac_init(void) | ||
8800 | err = amd64_init_2nd_stage(pvt_lookup[nb]); | ||
8801 | if (err) | ||
8802 | goto err_2nd_stage; | ||
8803 | - | ||
8804 | - load_ok = true; | ||
8805 | } | ||
8806 | |||
8807 | - if (load_ok) { | ||
8808 | - amd64_setup_pci_device(); | ||
8809 | - return 0; | ||
8810 | - } | ||
8811 | + amd64_setup_pci_device(); | ||
8812 | + | ||
8813 | + return 0; | ||
8814 | |||
8815 | err_2nd_stage: | ||
8816 | + debugf0("2nd stage failed\n"); | ||
8817 | pci_unregister_driver(&amd64_pci_driver); | ||
8818 | -err_pci: | ||
8819 | - msrs_free(msrs); | ||
8820 | - msrs = NULL; | ||
8821 | -err_ret: | ||
8822 | + | ||
8823 | return err; | ||
8824 | } | ||
8825 | |||
8826 | @@ -3221,9 +3200,6 @@ static void __exit amd64_edac_exit(void) | ||
8827 | edac_pci_release_generic_ctl(amd64_ctl_pci); | ||
8828 | |||
8829 | pci_unregister_driver(&amd64_pci_driver); | ||
8830 | - | ||
8831 | - msrs_free(msrs); | ||
8832 | - msrs = NULL; | ||
8833 | } | ||
8834 | |||
8835 | module_init(amd64_edac_init); | ||
8836 | diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h | ||
8837 | index bba6c94..c6f359a 100644 | ||
8838 | --- a/drivers/edac/amd64_edac.h | ||
8839 | +++ b/drivers/edac/amd64_edac.h | ||
8840 | @@ -147,8 +147,6 @@ | ||
8841 | #define MAX_CS_COUNT 8 | ||
8842 | #define DRAM_REG_COUNT 8 | ||
8843 | |||
8844 | -#define ON true | ||
8845 | -#define OFF false | ||
8846 | |||
8847 | /* | ||
8848 | * PCI-defined configuration space registers | ||
8849 | @@ -388,7 +386,10 @@ enum { | ||
8850 | #define K8_NBCAP_DUAL_NODE BIT(1) | ||
8851 | #define K8_NBCAP_DCT_DUAL BIT(0) | ||
8852 | |||
8853 | -/* MSRs */ | ||
8854 | +/* | ||
8855 | + * MSR Regs | ||
8856 | + */ | ||
8857 | +#define K8_MSR_MCGCTL 0x017b | ||
8858 | #define K8_MSR_MCGCTL_NBE BIT(4) | ||
8859 | |||
8860 | #define K8_MSR_MC4CTL 0x0410 | ||
8861 | @@ -486,6 +487,7 @@ struct amd64_pvt { | ||
8862 | /* Save old hw registers' values before we modified them */ | ||
8863 | u32 nbctl_mcgctl_saved; /* When true, following 2 are valid */ | ||
8864 | u32 old_nbctl; | ||
8865 | + unsigned long old_mcgctl; /* per core on this node */ | ||
8866 | |||
8867 | /* MC Type Index value: socket F vs Family 10h */ | ||
8868 | u32 mc_type_index; | ||
8869 | @@ -493,7 +495,6 @@ struct amd64_pvt { | ||
8870 | /* misc settings */ | ||
8871 | struct flags { | ||
8872 | unsigned long cf8_extcfg:1; | ||
8873 | - unsigned long ecc_report:1; | ||
8874 | } flags; | ||
8875 | }; | ||
8876 | |||
8877 | diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c | ||
8878 | index adc10a2..77a9579 100644 | ||
8879 | --- a/drivers/edac/i5000_edac.c | ||
8880 | +++ b/drivers/edac/i5000_edac.c | ||
8881 | @@ -577,13 +577,7 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci, | ||
8882 | debugf0("\tUncorrected bits= 0x%x\n", ue_errors); | ||
8883 | |||
8884 | branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd); | ||
8885 | - | ||
8886 | - /* | ||
8887 | - * According with i5000 datasheet, bit 28 has no significance | ||
8888 | - * for errors M4Err-M12Err and M17Err-M21Err, on FERR_NF_FBD | ||
8889 | - */ | ||
8890 | - channel = branch & 2; | ||
8891 | - | ||
8892 | + channel = branch; | ||
8893 | bank = NREC_BANK(info->nrecmema); | ||
8894 | rank = NREC_RANK(info->nrecmema); | ||
8895 | rdwr = NREC_RDWR(info->nrecmema); | ||
8896 | diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c | ||
8897 | index ed635ae..e4864e8 100644 | ||
8898 | --- a/drivers/firewire/core-card.c | ||
8899 | +++ b/drivers/firewire/core-card.c | ||
8900 | @@ -57,9 +57,6 @@ static LIST_HEAD(card_list); | ||
8901 | static LIST_HEAD(descriptor_list); | ||
8902 | static int descriptor_count; | ||
8903 | |||
8904 | -/* ROM header, bus info block, root dir header, capabilities = 7 quadlets */ | ||
8905 | -static size_t config_rom_length = 1 + 4 + 1 + 1; | ||
8906 | - | ||
8907 | #define BIB_CRC(v) ((v) << 0) | ||
8908 | #define BIB_CRC_LENGTH(v) ((v) << 16) | ||
8909 | #define BIB_INFO_LENGTH(v) ((v) << 24) | ||
8910 | @@ -75,7 +72,7 @@ static size_t config_rom_length = 1 + 4 + 1 + 1; | ||
8911 | #define BIB_CMC ((1) << 30) | ||
8912 | #define BIB_IMC ((1) << 31) | ||
8913 | |||
8914 | -static u32 *generate_config_rom(struct fw_card *card) | ||
8915 | +static u32 *generate_config_rom(struct fw_card *card, size_t *config_rom_length) | ||
8916 | { | ||
8917 | struct fw_descriptor *desc; | ||
8918 | static u32 config_rom[256]; | ||
8919 | @@ -134,7 +131,7 @@ static u32 *generate_config_rom(struct fw_card *card) | ||
8920 | for (i = 0; i < j; i += length + 1) | ||
8921 | length = fw_compute_block_crc(config_rom + i); | ||
8922 | |||
8923 | - WARN_ON(j != config_rom_length); | ||
8924 | + *config_rom_length = j; | ||
8925 | |||
8926 | return config_rom; | ||
8927 | } | ||
8928 | @@ -143,24 +140,17 @@ static void update_config_roms(void) | ||
8929 | { | ||
8930 | struct fw_card *card; | ||
8931 | u32 *config_rom; | ||
8932 | + size_t length; | ||
8933 | |||
8934 | list_for_each_entry (card, &card_list, link) { | ||
8935 | - config_rom = generate_config_rom(card); | ||
8936 | - card->driver->set_config_rom(card, config_rom, | ||
8937 | - config_rom_length); | ||
8938 | + config_rom = generate_config_rom(card, &length); | ||
8939 | + card->driver->set_config_rom(card, config_rom, length); | ||
8940 | } | ||
8941 | } | ||
8942 | |||
8943 | -static size_t required_space(struct fw_descriptor *desc) | ||
8944 | -{ | ||
8945 | - /* descriptor + entry into root dir + optional immediate entry */ | ||
8946 | - return desc->length + 1 + (desc->immediate > 0 ? 1 : 0); | ||
8947 | -} | ||
8948 | - | ||
8949 | int fw_core_add_descriptor(struct fw_descriptor *desc) | ||
8950 | { | ||
8951 | size_t i; | ||
8952 | - int ret; | ||
8953 | |||
8954 | /* | ||
8955 | * Check descriptor is valid; the length of all blocks in the | ||
8956 | @@ -176,21 +166,15 @@ int fw_core_add_descriptor(struct fw_descriptor *desc) | ||
8957 | |||
8958 | mutex_lock(&card_mutex); | ||
8959 | |||
8960 | - if (config_rom_length + required_space(desc) > 256) { | ||
8961 | - ret = -EBUSY; | ||
8962 | - } else { | ||
8963 | - list_add_tail(&desc->link, &descriptor_list); | ||
8964 | - config_rom_length += required_space(desc); | ||
8965 | + list_add_tail(&desc->link, &descriptor_list); | ||
8966 | + descriptor_count++; | ||
8967 | + if (desc->immediate > 0) | ||
8968 | descriptor_count++; | ||
8969 | - if (desc->immediate > 0) | ||
8970 | - descriptor_count++; | ||
8971 | - update_config_roms(); | ||
8972 | - ret = 0; | ||
8973 | - } | ||
8974 | + update_config_roms(); | ||
8975 | |||
8976 | mutex_unlock(&card_mutex); | ||
8977 | |||
8978 | - return ret; | ||
8979 | + return 0; | ||
8980 | } | ||
8981 | EXPORT_SYMBOL(fw_core_add_descriptor); | ||
8982 | |||
8983 | @@ -199,7 +183,6 @@ void fw_core_remove_descriptor(struct fw_descriptor *desc) | ||
8984 | mutex_lock(&card_mutex); | ||
8985 | |||
8986 | list_del(&desc->link); | ||
8987 | - config_rom_length -= required_space(desc); | ||
8988 | descriptor_count--; | ||
8989 | if (desc->immediate > 0) | ||
8990 | descriptor_count--; | ||
8991 | @@ -453,6 +436,7 @@ int fw_card_add(struct fw_card *card, | ||
8992 | u32 max_receive, u32 link_speed, u64 guid) | ||
8993 | { | ||
8994 | u32 *config_rom; | ||
8995 | + size_t length; | ||
8996 | int ret; | ||
8997 | |||
8998 | card->max_receive = max_receive; | ||
8999 | @@ -461,8 +445,8 @@ int fw_card_add(struct fw_card *card, | ||
9000 | |||
9001 | mutex_lock(&card_mutex); | ||
9002 | |||
9003 | - config_rom = generate_config_rom(card); | ||
9004 | - ret = card->driver->enable(card, config_rom, config_rom_length); | ||
9005 | + config_rom = generate_config_rom(card, &length); | ||
9006 | + ret = card->driver->enable(card, config_rom, length); | ||
9007 | if (ret == 0) | ||
9008 | list_add_tail(&card->link, &card_list); | ||
9009 | |||
9010 | diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c | ||
9011 | index 720b39b..94260aa 100644 | ||
9012 | --- a/drivers/firewire/ohci.c | ||
9013 | +++ b/drivers/firewire/ohci.c | ||
9014 | @@ -2209,13 +2209,6 @@ static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, | ||
9015 | page = payload >> PAGE_SHIFT; | ||
9016 | offset = payload & ~PAGE_MASK; | ||
9017 | rest = p->payload_length; | ||
9018 | - /* | ||
9019 | - * The controllers I've tested have not worked correctly when | ||
9020 | - * second_req_count is zero. Rather than do something we know won't | ||
9021 | - * work, return an error | ||
9022 | - */ | ||
9023 | - if (rest == 0) | ||
9024 | - return -EINVAL; | ||
9025 | |||
9026 | /* FIXME: make packet-per-buffer/dual-buffer a context option */ | ||
9027 | while (rest > 0) { | ||
9028 | @@ -2269,7 +2262,7 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, | ||
9029 | unsigned long payload) | ||
9030 | { | ||
9031 | struct iso_context *ctx = container_of(base, struct iso_context, base); | ||
9032 | - struct descriptor *d, *pd; | ||
9033 | + struct descriptor *d = NULL, *pd = NULL; | ||
9034 | struct fw_iso_packet *p = packet; | ||
9035 | dma_addr_t d_bus, page_bus; | ||
9036 | u32 z, header_z, rest; | ||
9037 | @@ -2307,9 +2300,8 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, | ||
9038 | d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d))); | ||
9039 | |||
9040 | rest = payload_per_buffer; | ||
9041 | - pd = d; | ||
9042 | for (j = 1; j < z; j++) { | ||
9043 | - pd++; | ||
9044 | + pd = d + j; | ||
9045 | pd->control = cpu_to_le16(DESCRIPTOR_STATUS | | ||
9046 | DESCRIPTOR_INPUT_MORE); | ||
9047 | |||
9048 | @@ -2412,7 +2404,6 @@ static void ohci_pmac_off(struct pci_dev *dev) | ||
9049 | |||
9050 | #define PCI_VENDOR_ID_AGERE PCI_VENDOR_ID_ATT | ||
9051 | #define PCI_DEVICE_ID_AGERE_FW643 0x5901 | ||
9052 | -#define PCI_DEVICE_ID_TI_TSB43AB23 0x8024 | ||
9053 | |||
9054 | static int __devinit pci_probe(struct pci_dev *dev, | ||
9055 | const struct pci_device_id *ent) | ||
9056 | @@ -2478,8 +2469,7 @@ static int __devinit pci_probe(struct pci_dev *dev, | ||
9057 | #if !defined(CONFIG_X86_32) | ||
9058 | /* dual-buffer mode is broken with descriptor addresses above 2G */ | ||
9059 | if (dev->vendor == PCI_VENDOR_ID_TI && | ||
9060 | - (dev->device == PCI_DEVICE_ID_TI_TSB43AB22 || | ||
9061 | - dev->device == PCI_DEVICE_ID_TI_TSB43AB23)) | ||
9062 | + dev->device == PCI_DEVICE_ID_TI_TSB43AB22) | ||
9063 | ohci->use_dualbuffer = false; | ||
9064 | #endif | ||
9065 | |||
9066 | diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c | ||
9067 | index 3a2ccb0..938100f 100644 | ||
9068 | --- a/drivers/firmware/dmi_scan.c | ||
9069 | +++ b/drivers/firmware/dmi_scan.c | ||
9070 | @@ -429,7 +429,7 @@ static bool dmi_matches(const struct dmi_system_id *dmi) | ||
9071 | for (i = 0; i < ARRAY_SIZE(dmi->matches); i++) { | ||
9072 | int s = dmi->matches[i].slot; | ||
9073 | if (s == DMI_NONE) | ||
9074 | - break; | ||
9075 | + continue; | ||
9076 | if (dmi_ident[s] | ||
9077 | && strstr(dmi_ident[s], dmi->matches[i].substr)) | ||
9078 | continue; | ||
9079 | @@ -440,15 +440,6 @@ static bool dmi_matches(const struct dmi_system_id *dmi) | ||
9080 | } | ||
9081 | |||
9082 | /** | ||
9083 | - * dmi_is_end_of_table - check for end-of-table marker | ||
9084 | - * @dmi: pointer to the dmi_system_id structure to check | ||
9085 | - */ | ||
9086 | -static bool dmi_is_end_of_table(const struct dmi_system_id *dmi) | ||
9087 | -{ | ||
9088 | - return dmi->matches[0].slot == DMI_NONE; | ||
9089 | -} | ||
9090 | - | ||
9091 | -/** | ||
9092 | * dmi_check_system - check system DMI data | ||
9093 | * @list: array of dmi_system_id structures to match against | ||
9094 | * All non-null elements of the list must match | ||
9095 | @@ -466,7 +457,7 @@ int dmi_check_system(const struct dmi_system_id *list) | ||
9096 | int count = 0; | ||
9097 | const struct dmi_system_id *d; | ||
9098 | |||
9099 | - for (d = list; !dmi_is_end_of_table(d); d++) | ||
9100 | + for (d = list; d->ident; d++) | ||
9101 | if (dmi_matches(d)) { | ||
9102 | count++; | ||
9103 | if (d->callback && d->callback(d)) | ||
9104 | @@ -493,7 +484,7 @@ const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list) | ||
9105 | { | ||
9106 | const struct dmi_system_id *d; | ||
9107 | |||
9108 | - for (d = list; !dmi_is_end_of_table(d); d++) | ||
9109 | + for (d = list; d->ident; d++) | ||
9110 | if (dmi_matches(d)) | ||
9111 | return d; | ||
9112 | |||
9113 | diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c | ||
9114 | index a1fce68..628eae3 100644 | ||
9115 | --- a/drivers/gpu/drm/ati_pcigart.c | ||
9116 | +++ b/drivers/gpu/drm/ati_pcigart.c | ||
9117 | @@ -39,7 +39,8 @@ static int drm_ati_alloc_pcigart_table(struct drm_device *dev, | ||
9118 | struct drm_ati_pcigart_info *gart_info) | ||
9119 | { | ||
9120 | gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size, | ||
9121 | - PAGE_SIZE); | ||
9122 | + PAGE_SIZE, | ||
9123 | + gart_info->table_mask); | ||
9124 | if (gart_info->table_handle == NULL) | ||
9125 | return -ENOMEM; | ||
9126 | |||
9127 | @@ -111,13 +112,6 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga | ||
9128 | if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) { | ||
9129 | DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n"); | ||
9130 | |||
9131 | - if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) { | ||
9132 | - DRM_ERROR("fail to set dma mask to 0x%Lx\n", | ||
9133 | - gart_info->table_mask); | ||
9134 | - ret = 1; | ||
9135 | - goto done; | ||
9136 | - } | ||
9137 | - | ||
9138 | ret = drm_ati_alloc_pcigart_table(dev, gart_info); | ||
9139 | if (ret) { | ||
9140 | DRM_ERROR("cannot allocate PCI GART page!\n"); | ||
9141 | diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c | ||
9142 | index 8417cc4..3d09e30 100644 | ||
9143 | --- a/drivers/gpu/drm/drm_bufs.c | ||
9144 | +++ b/drivers/gpu/drm/drm_bufs.c | ||
9145 | @@ -326,7 +326,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, | ||
9146 | * As we're limiting the address to 2^32-1 (or less), | ||
9147 | * casting it down to 32 bits is no problem, but we | ||
9148 | * need to point to a 64bit variable first. */ | ||
9149 | - dmah = drm_pci_alloc(dev, map->size, map->size); | ||
9150 | + dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL); | ||
9151 | if (!dmah) { | ||
9152 | kfree(map); | ||
9153 | return -ENOMEM; | ||
9154 | @@ -885,7 +885,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request) | ||
9155 | |||
9156 | while (entry->buf_count < count) { | ||
9157 | |||
9158 | - dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000); | ||
9159 | + dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful); | ||
9160 | |||
9161 | if (!dmah) { | ||
9162 | /* Set count correctly so we free the proper amount. */ | ||
9163 | diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c | ||
9164 | index afed886..bbfd110 100644 | ||
9165 | --- a/drivers/gpu/drm/drm_crtc_helper.c | ||
9166 | +++ b/drivers/gpu/drm/drm_crtc_helper.c | ||
9167 | @@ -1020,9 +1020,6 @@ bool drm_helper_initial_config(struct drm_device *dev) | ||
9168 | { | ||
9169 | int count = 0; | ||
9170 | |||
9171 | - /* disable all the possible outputs/crtcs before entering KMS mode */ | ||
9172 | - drm_helper_disable_unused_functions(dev); | ||
9173 | - | ||
9174 | drm_fb_helper_parse_command_line(dev); | ||
9175 | |||
9176 | count = drm_helper_probe_connector_modes(dev, | ||
9177 | diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c | ||
9178 | index 8bf3770..e9dbb48 100644 | ||
9179 | --- a/drivers/gpu/drm/drm_gem.c | ||
9180 | +++ b/drivers/gpu/drm/drm_gem.c | ||
9181 | @@ -142,6 +142,19 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size) | ||
9182 | if (IS_ERR(obj->filp)) | ||
9183 | goto free; | ||
9184 | |||
9185 | + /* Basically we want to disable the OOM killer and handle ENOMEM | ||
9186 | + * ourselves by sacrificing pages from cached buffers. | ||
9187 | + * XXX shmem_file_[gs]et_gfp_mask() | ||
9188 | + */ | ||
9189 | + mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, | ||
9190 | + GFP_HIGHUSER | | ||
9191 | + __GFP_COLD | | ||
9192 | + __GFP_FS | | ||
9193 | + __GFP_RECLAIMABLE | | ||
9194 | + __GFP_NORETRY | | ||
9195 | + __GFP_NOWARN | | ||
9196 | + __GFP_NOMEMALLOC); | ||
9197 | + | ||
9198 | kref_init(&obj->refcount); | ||
9199 | kref_init(&obj->handlecount); | ||
9200 | obj->size = size; | ||
9201 | diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c | ||
9202 | index 332d743..0a6f0b3 100644 | ||
9203 | --- a/drivers/gpu/drm/drm_irq.c | ||
9204 | +++ b/drivers/gpu/drm/drm_irq.c | ||
9205 | @@ -429,21 +429,15 @@ int drm_vblank_get(struct drm_device *dev, int crtc) | ||
9206 | |||
9207 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | ||
9208 | /* Going from 0->1 means we have to enable interrupts again */ | ||
9209 | - if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) { | ||
9210 | - if (!dev->vblank_enabled[crtc]) { | ||
9211 | - ret = dev->driver->enable_vblank(dev, crtc); | ||
9212 | - DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret); | ||
9213 | - if (ret) | ||
9214 | - atomic_dec(&dev->vblank_refcount[crtc]); | ||
9215 | - else { | ||
9216 | - dev->vblank_enabled[crtc] = 1; | ||
9217 | - drm_update_vblank_count(dev, crtc); | ||
9218 | - } | ||
9219 | - } | ||
9220 | - } else { | ||
9221 | - if (!dev->vblank_enabled[crtc]) { | ||
9222 | + if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 && | ||
9223 | + !dev->vblank_enabled[crtc]) { | ||
9224 | + ret = dev->driver->enable_vblank(dev, crtc); | ||
9225 | + DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret); | ||
9226 | + if (ret) | ||
9227 | atomic_dec(&dev->vblank_refcount[crtc]); | ||
9228 | - ret = -EINVAL; | ||
9229 | + else { | ||
9230 | + dev->vblank_enabled[crtc] = 1; | ||
9231 | + drm_update_vblank_count(dev, crtc); | ||
9232 | } | ||
9233 | } | ||
9234 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); | ||
9235 | @@ -470,18 +464,6 @@ void drm_vblank_put(struct drm_device *dev, int crtc) | ||
9236 | } | ||
9237 | EXPORT_SYMBOL(drm_vblank_put); | ||
9238 | |||
9239 | -void drm_vblank_off(struct drm_device *dev, int crtc) | ||
9240 | -{ | ||
9241 | - unsigned long irqflags; | ||
9242 | - | ||
9243 | - spin_lock_irqsave(&dev->vbl_lock, irqflags); | ||
9244 | - DRM_WAKEUP(&dev->vbl_queue[crtc]); | ||
9245 | - dev->vblank_enabled[crtc] = 0; | ||
9246 | - dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc); | ||
9247 | - spin_unlock_irqrestore(&dev->vbl_lock, irqflags); | ||
9248 | -} | ||
9249 | -EXPORT_SYMBOL(drm_vblank_off); | ||
9250 | - | ||
9251 | /** | ||
9252 | * drm_vblank_pre_modeset - account for vblanks across mode sets | ||
9253 | * @dev: DRM device | ||
9254 | diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c | ||
9255 | index e68ebf9..577094f 100644 | ||
9256 | --- a/drivers/gpu/drm/drm_pci.c | ||
9257 | +++ b/drivers/gpu/drm/drm_pci.c | ||
9258 | @@ -47,7 +47,8 @@ | ||
9259 | /** | ||
9260 | * \brief Allocate a PCI consistent memory block, for DMA. | ||
9261 | */ | ||
9262 | -drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align) | ||
9263 | +drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align, | ||
9264 | + dma_addr_t maxaddr) | ||
9265 | { | ||
9266 | drm_dma_handle_t *dmah; | ||
9267 | #if 1 | ||
9268 | @@ -62,6 +63,11 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali | ||
9269 | if (align > size) | ||
9270 | return NULL; | ||
9271 | |||
9272 | + if (pci_set_dma_mask(dev->pdev, maxaddr) != 0) { | ||
9273 | + DRM_ERROR("Setting pci dma mask failed\n"); | ||
9274 | + return NULL; | ||
9275 | + } | ||
9276 | + | ||
9277 | dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL); | ||
9278 | if (!dmah) | ||
9279 | return NULL; | ||
9280 | diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c | ||
9281 | index 7e859d6..26bf055 100644 | ||
9282 | --- a/drivers/gpu/drm/i915/i915_debugfs.c | ||
9283 | +++ b/drivers/gpu/drm/i915/i915_debugfs.c | ||
9284 | @@ -288,7 +288,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data) | ||
9285 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { | ||
9286 | obj = obj_priv->obj; | ||
9287 | if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { | ||
9288 | - ret = i915_gem_object_get_pages(obj, 0); | ||
9289 | + ret = i915_gem_object_get_pages(obj); | ||
9290 | if (ret) { | ||
9291 | DRM_ERROR("Failed to get pages: %d\n", ret); | ||
9292 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
9293 | @@ -384,7 +384,37 @@ out: | ||
9294 | return 0; | ||
9295 | } | ||
9296 | |||
9297 | +static int i915_registers_info(struct seq_file *m, void *data) { | ||
9298 | + struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
9299 | + struct drm_device *dev = node->minor->dev; | ||
9300 | + drm_i915_private_t *dev_priv = dev->dev_private; | ||
9301 | + uint32_t reg; | ||
9302 | + | ||
9303 | +#define DUMP_RANGE(start, end) \ | ||
9304 | + for (reg=start; reg < end; reg += 4) \ | ||
9305 | + seq_printf(m, "%08x\t%08x\n", reg, I915_READ(reg)); | ||
9306 | + | ||
9307 | + DUMP_RANGE(0x00000, 0x00fff); /* VGA registers */ | ||
9308 | + DUMP_RANGE(0x02000, 0x02fff); /* instruction, memory, interrupt control registers */ | ||
9309 | + DUMP_RANGE(0x03000, 0x031ff); /* FENCE and PPGTT control registers */ | ||
9310 | + DUMP_RANGE(0x03200, 0x03fff); /* frame buffer compression registers */ | ||
9311 | + DUMP_RANGE(0x05000, 0x05fff); /* I/O control registers */ | ||
9312 | + DUMP_RANGE(0x06000, 0x06fff); /* clock control registers */ | ||
9313 | + DUMP_RANGE(0x07000, 0x07fff); /* 3D internal debug registers */ | ||
9314 | + DUMP_RANGE(0x07400, 0x088ff); /* GPE debug registers */ | ||
9315 | + DUMP_RANGE(0x0a000, 0x0afff); /* display palette registers */ | ||
9316 | + DUMP_RANGE(0x10000, 0x13fff); /* MMIO MCHBAR */ | ||
9317 | + DUMP_RANGE(0x30000, 0x3ffff); /* overlay registers */ | ||
9318 | + DUMP_RANGE(0x60000, 0x6ffff); /* display engine pipeline registers */ | ||
9319 | + DUMP_RANGE(0x70000, 0x72fff); /* display and cursor registers */ | ||
9320 | + DUMP_RANGE(0x73000, 0x73fff); /* performance counters */ | ||
9321 | + | ||
9322 | + return 0; | ||
9323 | +} | ||
9324 | + | ||
9325 | + | ||
9326 | static struct drm_info_list i915_debugfs_list[] = { | ||
9327 | + {"i915_regs", i915_registers_info, 0}, | ||
9328 | {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, | ||
9329 | {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, | ||
9330 | {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, | ||
9331 | diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c | ||
9332 | index eaa1893..e5b138b 100644 | ||
9333 | --- a/drivers/gpu/drm/i915/i915_dma.c | ||
9334 | +++ b/drivers/gpu/drm/i915/i915_dma.c | ||
9335 | @@ -123,7 +123,7 @@ static int i915_init_phys_hws(struct drm_device *dev) | ||
9336 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
9337 | /* Program Hardware Status Page */ | ||
9338 | dev_priv->status_page_dmah = | ||
9339 | - drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE); | ||
9340 | + drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff); | ||
9341 | |||
9342 | if (!dev_priv->status_page_dmah) { | ||
9343 | DRM_ERROR("Can not allocate hardware status page\n"); | ||
9344 | @@ -1111,8 +1111,7 @@ static void i915_setup_compression(struct drm_device *dev, int size) | ||
9345 | { | ||
9346 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
9347 | struct drm_mm_node *compressed_fb, *compressed_llb; | ||
9348 | - unsigned long cfb_base; | ||
9349 | - unsigned long ll_base = 0; | ||
9350 | + unsigned long cfb_base, ll_base; | ||
9351 | |||
9352 | /* Leave 1M for line length buffer & misc. */ | ||
9353 | compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); | ||
9354 | @@ -1252,8 +1251,6 @@ static int i915_load_modeset_init(struct drm_device *dev, | ||
9355 | if (ret) | ||
9356 | goto destroy_ringbuffer; | ||
9357 | |||
9358 | - intel_modeset_init(dev); | ||
9359 | - | ||
9360 | ret = drm_irq_install(dev); | ||
9361 | if (ret) | ||
9362 | goto destroy_ringbuffer; | ||
9363 | @@ -1268,6 +1265,8 @@ static int i915_load_modeset_init(struct drm_device *dev, | ||
9364 | |||
9365 | I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); | ||
9366 | |||
9367 | + intel_modeset_init(dev); | ||
9368 | + | ||
9369 | drm_helper_initial_config(dev); | ||
9370 | |||
9371 | return 0; | ||
9372 | diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h | ||
9373 | index f5d49a7..a725f65 100644 | ||
9374 | --- a/drivers/gpu/drm/i915/i915_drv.h | ||
9375 | +++ b/drivers/gpu/drm/i915/i915_drv.h | ||
9376 | @@ -467,15 +467,6 @@ typedef struct drm_i915_private { | ||
9377 | struct list_head flushing_list; | ||
9378 | |||
9379 | /** | ||
9380 | - * List of objects currently pending a GPU write flush. | ||
9381 | - * | ||
9382 | - * All elements on this list will belong to either the | ||
9383 | - * active_list or flushing_list, last_rendering_seqno can | ||
9384 | - * be used to differentiate between the two elements. | ||
9385 | - */ | ||
9386 | - struct list_head gpu_write_list; | ||
9387 | - | ||
9388 | - /** | ||
9389 | * LRU list of objects which are not in the ringbuffer and | ||
9390 | * are ready to unbind, but are still in the GTT. | ||
9391 | * | ||
9392 | @@ -555,7 +546,6 @@ typedef struct drm_i915_private { | ||
9393 | struct timer_list idle_timer; | ||
9394 | bool busy; | ||
9395 | u16 orig_clock; | ||
9396 | - struct drm_connector *int_lvds_connector; | ||
9397 | } drm_i915_private_t; | ||
9398 | |||
9399 | /** driver private structure attached to each drm_gem_object */ | ||
9400 | @@ -567,8 +557,6 @@ struct drm_i915_gem_object { | ||
9401 | |||
9402 | /** This object's place on the active/flushing/inactive lists */ | ||
9403 | struct list_head list; | ||
9404 | - /** This object's place on GPU write list */ | ||
9405 | - struct list_head gpu_write_list; | ||
9406 | |||
9407 | /** This object's place on the fenced object LRU */ | ||
9408 | struct list_head fence_list; | ||
9409 | @@ -825,17 +813,15 @@ void i915_gem_cleanup_ringbuffer(struct drm_device *dev); | ||
9410 | int i915_gem_do_init(struct drm_device *dev, unsigned long start, | ||
9411 | unsigned long end); | ||
9412 | int i915_gem_idle(struct drm_device *dev); | ||
9413 | -int i915_lp_ring_sync(struct drm_device *dev); | ||
9414 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); | ||
9415 | int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, | ||
9416 | int write); | ||
9417 | -int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj); | ||
9418 | int i915_gem_attach_phys_object(struct drm_device *dev, | ||
9419 | struct drm_gem_object *obj, int id); | ||
9420 | void i915_gem_detach_phys_object(struct drm_device *dev, | ||
9421 | struct drm_gem_object *obj); | ||
9422 | void i915_gem_free_all_phys_object(struct drm_device *dev); | ||
9423 | -int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); | ||
9424 | +int i915_gem_object_get_pages(struct drm_gem_object *obj); | ||
9425 | void i915_gem_object_put_pages(struct drm_gem_object *obj); | ||
9426 | void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); | ||
9427 | |||
9428 | @@ -971,7 +957,6 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | ||
9429 | #define IS_I85X(dev) ((dev)->pci_device == 0x3582) | ||
9430 | #define IS_I855(dev) ((dev)->pci_device == 0x3582) | ||
9431 | #define IS_I865G(dev) ((dev)->pci_device == 0x2572) | ||
9432 | -#define IS_I8XX(dev) (IS_I830(dev) || IS_845G(dev) || IS_I85X(dev) || IS_I865G(dev)) | ||
9433 | |||
9434 | #define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a) | ||
9435 | #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) | ||
9436 | @@ -1033,12 +1018,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | ||
9437 | */ | ||
9438 | #define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \ | ||
9439 | IS_I915GM(dev))) | ||
9440 | -#define SUPPORTS_DIGITAL_OUTPUTS(dev) (IS_I9XX(dev) && !IS_IGD(dev)) | ||
9441 | #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev)) | ||
9442 | #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev)) | ||
9443 | #define SUPPORTS_EDP(dev) (IS_IGDNG_M(dev)) | ||
9444 | -#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \ | ||
9445 | - !IS_IGDNG(dev) && !IS_IGD(dev)) | ||
9446 | #define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev)) | ||
9447 | /* dsparb controlled by hw only */ | ||
9448 | #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev)) | ||
9449 | diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c | ||
9450 | index 04da731..abfc27b 100644 | ||
9451 | --- a/drivers/gpu/drm/i915/i915_gem.c | ||
9452 | +++ b/drivers/gpu/drm/i915/i915_gem.c | ||
9453 | @@ -277,7 +277,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, | ||
9454 | |||
9455 | mutex_lock(&dev->struct_mutex); | ||
9456 | |||
9457 | - ret = i915_gem_object_get_pages(obj, 0); | ||
9458 | + ret = i915_gem_object_get_pages(obj); | ||
9459 | if (ret != 0) | ||
9460 | goto fail_unlock; | ||
9461 | |||
9462 | @@ -321,24 +321,40 @@ fail_unlock: | ||
9463 | return ret; | ||
9464 | } | ||
9465 | |||
9466 | +static inline gfp_t | ||
9467 | +i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj) | ||
9468 | +{ | ||
9469 | + return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping); | ||
9470 | +} | ||
9471 | + | ||
9472 | +static inline void | ||
9473 | +i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp) | ||
9474 | +{ | ||
9475 | + mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp); | ||
9476 | +} | ||
9477 | + | ||
9478 | static int | ||
9479 | i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) | ||
9480 | { | ||
9481 | int ret; | ||
9482 | |||
9483 | - ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN); | ||
9484 | + ret = i915_gem_object_get_pages(obj); | ||
9485 | |||
9486 | /* If we've insufficient memory to map in the pages, attempt | ||
9487 | * to make some space by throwing out some old buffers. | ||
9488 | */ | ||
9489 | if (ret == -ENOMEM) { | ||
9490 | struct drm_device *dev = obj->dev; | ||
9491 | + gfp_t gfp; | ||
9492 | |||
9493 | ret = i915_gem_evict_something(dev, obj->size); | ||
9494 | if (ret) | ||
9495 | return ret; | ||
9496 | |||
9497 | - ret = i915_gem_object_get_pages(obj, 0); | ||
9498 | + gfp = i915_gem_object_get_page_gfp_mask(obj); | ||
9499 | + i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY); | ||
9500 | + ret = i915_gem_object_get_pages(obj); | ||
9501 | + i915_gem_object_set_page_gfp_mask (obj, gfp); | ||
9502 | } | ||
9503 | |||
9504 | return ret; | ||
9505 | @@ -774,7 +790,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | ||
9506 | |||
9507 | mutex_lock(&dev->struct_mutex); | ||
9508 | |||
9509 | - ret = i915_gem_object_get_pages(obj, 0); | ||
9510 | + ret = i915_gem_object_get_pages(obj); | ||
9511 | if (ret != 0) | ||
9512 | goto fail_unlock; | ||
9513 | |||
9514 | @@ -1272,7 +1288,6 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj) | ||
9515 | list->hash.key = list->file_offset_node->start; | ||
9516 | if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) { | ||
9517 | DRM_ERROR("failed to add to map hash\n"); | ||
9518 | - ret = -ENOMEM; | ||
9519 | goto out_free_mm; | ||
9520 | } | ||
9521 | |||
9522 | @@ -1552,8 +1567,6 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) | ||
9523 | else | ||
9524 | list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); | ||
9525 | |||
9526 | - BUG_ON(!list_empty(&obj_priv->gpu_write_list)); | ||
9527 | - | ||
9528 | obj_priv->last_rendering_seqno = 0; | ||
9529 | if (obj_priv->active) { | ||
9530 | obj_priv->active = 0; | ||
9531 | @@ -1624,8 +1637,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, | ||
9532 | struct drm_i915_gem_object *obj_priv, *next; | ||
9533 | |||
9534 | list_for_each_entry_safe(obj_priv, next, | ||
9535 | - &dev_priv->mm.gpu_write_list, | ||
9536 | - gpu_write_list) { | ||
9537 | + &dev_priv->mm.flushing_list, list) { | ||
9538 | struct drm_gem_object *obj = obj_priv->obj; | ||
9539 | |||
9540 | if ((obj->write_domain & flush_domains) == | ||
9541 | @@ -1633,7 +1645,6 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, | ||
9542 | uint32_t old_write_domain = obj->write_domain; | ||
9543 | |||
9544 | obj->write_domain = 0; | ||
9545 | - list_del_init(&obj_priv->gpu_write_list); | ||
9546 | i915_gem_object_move_to_active(obj, seqno); | ||
9547 | |||
9548 | trace_i915_gem_object_change_domain(obj, | ||
9549 | @@ -1809,8 +1820,12 @@ i915_gem_retire_work_handler(struct work_struct *work) | ||
9550 | mutex_unlock(&dev->struct_mutex); | ||
9551 | } | ||
9552 | |||
9553 | +/** | ||
9554 | + * Waits for a sequence number to be signaled, and cleans up the | ||
9555 | + * request and object lists appropriately for that event. | ||
9556 | + */ | ||
9557 | static int | ||
9558 | -i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) | ||
9559 | +i915_wait_request(struct drm_device *dev, uint32_t seqno) | ||
9560 | { | ||
9561 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
9562 | u32 ier; | ||
9563 | @@ -1837,15 +1852,10 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) | ||
9564 | |||
9565 | dev_priv->mm.waiting_gem_seqno = seqno; | ||
9566 | i915_user_irq_get(dev); | ||
9567 | - if (interruptible) | ||
9568 | - ret = wait_event_interruptible(dev_priv->irq_queue, | ||
9569 | - i915_seqno_passed(i915_get_gem_seqno(dev), seqno) || | ||
9570 | - atomic_read(&dev_priv->mm.wedged)); | ||
9571 | - else | ||
9572 | - wait_event(dev_priv->irq_queue, | ||
9573 | - i915_seqno_passed(i915_get_gem_seqno(dev), seqno) || | ||
9574 | - atomic_read(&dev_priv->mm.wedged)); | ||
9575 | - | ||
9576 | + ret = wait_event_interruptible(dev_priv->irq_queue, | ||
9577 | + i915_seqno_passed(i915_get_gem_seqno(dev), | ||
9578 | + seqno) || | ||
9579 | + atomic_read(&dev_priv->mm.wedged)); | ||
9580 | i915_user_irq_put(dev); | ||
9581 | dev_priv->mm.waiting_gem_seqno = 0; | ||
9582 | |||
9583 | @@ -1869,34 +1879,6 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) | ||
9584 | return ret; | ||
9585 | } | ||
9586 | |||
9587 | -/** | ||
9588 | - * Waits for a sequence number to be signaled, and cleans up the | ||
9589 | - * request and object lists appropriately for that event. | ||
9590 | - */ | ||
9591 | -static int | ||
9592 | -i915_wait_request(struct drm_device *dev, uint32_t seqno) | ||
9593 | -{ | ||
9594 | - return i915_do_wait_request(dev, seqno, 1); | ||
9595 | -} | ||
9596 | - | ||
9597 | -/** | ||
9598 | - * Waits for the ring to finish up to the latest request. Usefull for waiting | ||
9599 | - * for flip events, e.g for the overlay support. */ | ||
9600 | -int i915_lp_ring_sync(struct drm_device *dev) | ||
9601 | -{ | ||
9602 | - uint32_t seqno; | ||
9603 | - int ret; | ||
9604 | - | ||
9605 | - seqno = i915_add_request(dev, NULL, 0); | ||
9606 | - | ||
9607 | - if (seqno == 0) | ||
9608 | - return -ENOMEM; | ||
9609 | - | ||
9610 | - ret = i915_do_wait_request(dev, seqno, 0); | ||
9611 | - BUG_ON(ret == -ERESTARTSYS); | ||
9612 | - return ret; | ||
9613 | -} | ||
9614 | - | ||
9615 | static void | ||
9616 | i915_gem_flush(struct drm_device *dev, | ||
9617 | uint32_t invalidate_domains, | ||
9618 | @@ -1965,7 +1947,7 @@ i915_gem_flush(struct drm_device *dev, | ||
9619 | #endif | ||
9620 | BEGIN_LP_RING(2); | ||
9621 | OUT_RING(cmd); | ||
9622 | - OUT_RING(MI_NOOP); | ||
9623 | + OUT_RING(0); /* noop */ | ||
9624 | ADVANCE_LP_RING(); | ||
9625 | } | ||
9626 | } | ||
9627 | @@ -2027,6 +2009,9 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | ||
9628 | /* blow away mappings if mapped through GTT */ | ||
9629 | i915_gem_release_mmap(obj); | ||
9630 | |||
9631 | + if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | ||
9632 | + i915_gem_clear_fence_reg(obj); | ||
9633 | + | ||
9634 | /* Move the object to the CPU domain to ensure that | ||
9635 | * any possible CPU writes while it's not in the GTT | ||
9636 | * are flushed when we go to remap it. This will | ||
9637 | @@ -2042,10 +2027,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | ||
9638 | |||
9639 | BUG_ON(obj_priv->active); | ||
9640 | |||
9641 | - /* release the fence reg _after_ flushing */ | ||
9642 | - if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | ||
9643 | - i915_gem_clear_fence_reg(obj); | ||
9644 | - | ||
9645 | if (obj_priv->agp_mem != NULL) { | ||
9646 | drm_unbind_agp(obj_priv->agp_mem); | ||
9647 | drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); | ||
9648 | @@ -2106,8 +2087,8 @@ static int | ||
9649 | i915_gem_evict_everything(struct drm_device *dev) | ||
9650 | { | ||
9651 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
9652 | - int ret; | ||
9653 | uint32_t seqno; | ||
9654 | + int ret; | ||
9655 | bool lists_empty; | ||
9656 | |||
9657 | spin_lock(&dev_priv->mm.active_list_lock); | ||
9658 | @@ -2129,8 +2110,6 @@ i915_gem_evict_everything(struct drm_device *dev) | ||
9659 | if (ret) | ||
9660 | return ret; | ||
9661 | |||
9662 | - BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); | ||
9663 | - | ||
9664 | ret = i915_gem_evict_from_inactive_list(dev); | ||
9665 | if (ret) | ||
9666 | return ret; | ||
9667 | @@ -2238,8 +2217,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) | ||
9668 | } | ||
9669 | |||
9670 | int | ||
9671 | -i915_gem_object_get_pages(struct drm_gem_object *obj, | ||
9672 | - gfp_t gfpmask) | ||
9673 | +i915_gem_object_get_pages(struct drm_gem_object *obj) | ||
9674 | { | ||
9675 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
9676 | int page_count, i; | ||
9677 | @@ -2265,10 +2243,7 @@ i915_gem_object_get_pages(struct drm_gem_object *obj, | ||
9678 | inode = obj->filp->f_path.dentry->d_inode; | ||
9679 | mapping = inode->i_mapping; | ||
9680 | for (i = 0; i < page_count; i++) { | ||
9681 | - page = read_cache_page_gfp(mapping, i, | ||
9682 | - mapping_gfp_mask (mapping) | | ||
9683 | - __GFP_COLD | | ||
9684 | - gfpmask); | ||
9685 | + page = read_mapping_page(mapping, i, NULL); | ||
9686 | if (IS_ERR(page)) { | ||
9687 | ret = PTR_ERR(page); | ||
9688 | i915_gem_object_put_pages(obj); | ||
9689 | @@ -2591,9 +2566,12 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | ||
9690 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
9691 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
9692 | struct drm_mm_node *free_space; | ||
9693 | - gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; | ||
9694 | + bool retry_alloc = false; | ||
9695 | int ret; | ||
9696 | |||
9697 | + if (dev_priv->mm.suspended) | ||
9698 | + return -EBUSY; | ||
9699 | + | ||
9700 | if (obj_priv->madv != I915_MADV_WILLNEED) { | ||
9701 | DRM_ERROR("Attempting to bind a purgeable object\n"); | ||
9702 | return -EINVAL; | ||
9703 | @@ -2635,7 +2613,15 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | ||
9704 | DRM_INFO("Binding object of size %zd at 0x%08x\n", | ||
9705 | obj->size, obj_priv->gtt_offset); | ||
9706 | #endif | ||
9707 | - ret = i915_gem_object_get_pages(obj, gfpmask); | ||
9708 | + if (retry_alloc) { | ||
9709 | + i915_gem_object_set_page_gfp_mask (obj, | ||
9710 | + i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY); | ||
9711 | + } | ||
9712 | + ret = i915_gem_object_get_pages(obj); | ||
9713 | + if (retry_alloc) { | ||
9714 | + i915_gem_object_set_page_gfp_mask (obj, | ||
9715 | + i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY); | ||
9716 | + } | ||
9717 | if (ret) { | ||
9718 | drm_mm_put_block(obj_priv->gtt_space); | ||
9719 | obj_priv->gtt_space = NULL; | ||
9720 | @@ -2645,9 +2631,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | ||
9721 | ret = i915_gem_evict_something(dev, obj->size); | ||
9722 | if (ret) { | ||
9723 | /* now try to shrink everyone else */ | ||
9724 | - if (gfpmask) { | ||
9725 | - gfpmask = 0; | ||
9726 | - goto search_free; | ||
9727 | + if (! retry_alloc) { | ||
9728 | + retry_alloc = true; | ||
9729 | + goto search_free; | ||
9730 | } | ||
9731 | |||
9732 | return ret; | ||
9733 | @@ -2725,7 +2711,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) | ||
9734 | old_write_domain = obj->write_domain; | ||
9735 | i915_gem_flush(dev, 0, obj->write_domain); | ||
9736 | seqno = i915_add_request(dev, NULL, obj->write_domain); | ||
9737 | - BUG_ON(obj->write_domain); | ||
9738 | + obj->write_domain = 0; | ||
9739 | i915_gem_object_move_to_active(obj, seqno); | ||
9740 | |||
9741 | trace_i915_gem_object_change_domain(obj, | ||
9742 | @@ -2825,57 +2811,6 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | ||
9743 | return 0; | ||
9744 | } | ||
9745 | |||
9746 | -/* | ||
9747 | - * Prepare buffer for display plane. Use uninterruptible for possible flush | ||
9748 | - * wait, as in modesetting process we're not supposed to be interrupted. | ||
9749 | - */ | ||
9750 | -int | ||
9751 | -i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) | ||
9752 | -{ | ||
9753 | - struct drm_device *dev = obj->dev; | ||
9754 | - struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
9755 | - uint32_t old_write_domain, old_read_domains; | ||
9756 | - int ret; | ||
9757 | - | ||
9758 | - /* Not valid to be called on unbound objects. */ | ||
9759 | - if (obj_priv->gtt_space == NULL) | ||
9760 | - return -EINVAL; | ||
9761 | - | ||
9762 | - i915_gem_object_flush_gpu_write_domain(obj); | ||
9763 | - | ||
9764 | - /* Wait on any GPU rendering and flushing to occur. */ | ||
9765 | - if (obj_priv->active) { | ||
9766 | -#if WATCH_BUF | ||
9767 | - DRM_INFO("%s: object %p wait for seqno %08x\n", | ||
9768 | - __func__, obj, obj_priv->last_rendering_seqno); | ||
9769 | -#endif | ||
9770 | - ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0); | ||
9771 | - if (ret != 0) | ||
9772 | - return ret; | ||
9773 | - } | ||
9774 | - | ||
9775 | - old_write_domain = obj->write_domain; | ||
9776 | - old_read_domains = obj->read_domains; | ||
9777 | - | ||
9778 | - obj->read_domains &= I915_GEM_DOMAIN_GTT; | ||
9779 | - | ||
9780 | - i915_gem_object_flush_cpu_write_domain(obj); | ||
9781 | - | ||
9782 | - /* It should now be out of any other write domains, and we can update | ||
9783 | - * the domain values for our changes. | ||
9784 | - */ | ||
9785 | - BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); | ||
9786 | - obj->read_domains |= I915_GEM_DOMAIN_GTT; | ||
9787 | - obj->write_domain = I915_GEM_DOMAIN_GTT; | ||
9788 | - obj_priv->dirty = 1; | ||
9789 | - | ||
9790 | - trace_i915_gem_object_change_domain(obj, | ||
9791 | - old_read_domains, | ||
9792 | - old_write_domain); | ||
9793 | - | ||
9794 | - return 0; | ||
9795 | -} | ||
9796 | - | ||
9797 | /** | ||
9798 | * Moves a single object to the CPU read, and possibly write domain. | ||
9799 | * | ||
9800 | @@ -3796,23 +3731,16 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | ||
9801 | i915_gem_flush(dev, | ||
9802 | dev->invalidate_domains, | ||
9803 | dev->flush_domains); | ||
9804 | - if (dev->flush_domains & I915_GEM_GPU_DOMAINS) | ||
9805 | + if (dev->flush_domains) | ||
9806 | (void)i915_add_request(dev, file_priv, | ||
9807 | dev->flush_domains); | ||
9808 | } | ||
9809 | |||
9810 | for (i = 0; i < args->buffer_count; i++) { | ||
9811 | struct drm_gem_object *obj = object_list[i]; | ||
9812 | - struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
9813 | uint32_t old_write_domain = obj->write_domain; | ||
9814 | |||
9815 | obj->write_domain = obj->pending_write_domain; | ||
9816 | - if (obj->write_domain) | ||
9817 | - list_move_tail(&obj_priv->gpu_write_list, | ||
9818 | - &dev_priv->mm.gpu_write_list); | ||
9819 | - else | ||
9820 | - list_del_init(&obj_priv->gpu_write_list); | ||
9821 | - | ||
9822 | trace_i915_gem_object_change_domain(obj, | ||
9823 | obj->read_domains, | ||
9824 | old_write_domain); | ||
9825 | @@ -4205,7 +4133,6 @@ int i915_gem_init_object(struct drm_gem_object *obj) | ||
9826 | obj_priv->obj = obj; | ||
9827 | obj_priv->fence_reg = I915_FENCE_REG_NONE; | ||
9828 | INIT_LIST_HEAD(&obj_priv->list); | ||
9829 | - INIT_LIST_HEAD(&obj_priv->gpu_write_list); | ||
9830 | INIT_LIST_HEAD(&obj_priv->fence_list); | ||
9831 | obj_priv->madv = I915_MADV_WILLNEED; | ||
9832 | |||
9833 | @@ -4657,7 +4584,6 @@ i915_gem_load(struct drm_device *dev) | ||
9834 | spin_lock_init(&dev_priv->mm.active_list_lock); | ||
9835 | INIT_LIST_HEAD(&dev_priv->mm.active_list); | ||
9836 | INIT_LIST_HEAD(&dev_priv->mm.flushing_list); | ||
9837 | - INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); | ||
9838 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); | ||
9839 | INIT_LIST_HEAD(&dev_priv->mm.request_list); | ||
9840 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); | ||
9841 | @@ -4712,7 +4638,7 @@ int i915_gem_init_phys_object(struct drm_device *dev, | ||
9842 | |||
9843 | phys_obj->id = id; | ||
9844 | |||
9845 | - phys_obj->handle = drm_pci_alloc(dev, size, 0); | ||
9846 | + phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff); | ||
9847 | if (!phys_obj->handle) { | ||
9848 | ret = -ENOMEM; | ||
9849 | goto kfree_obj; | ||
9850 | @@ -4770,7 +4696,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev, | ||
9851 | if (!obj_priv->phys_obj) | ||
9852 | return; | ||
9853 | |||
9854 | - ret = i915_gem_object_get_pages(obj, 0); | ||
9855 | + ret = i915_gem_object_get_pages(obj); | ||
9856 | if (ret) | ||
9857 | goto out; | ||
9858 | |||
9859 | @@ -4828,7 +4754,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, | ||
9860 | obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; | ||
9861 | obj_priv->phys_obj->cur_obj = obj; | ||
9862 | |||
9863 | - ret = i915_gem_object_get_pages(obj, 0); | ||
9864 | + ret = i915_gem_object_get_pages(obj); | ||
9865 | if (ret) { | ||
9866 | DRM_ERROR("failed to get page list\n"); | ||
9867 | goto out; | ||
9868 | diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c | ||
9869 | index 63f28ad..aa7fd82 100644 | ||
9870 | --- a/drivers/gpu/drm/i915/i915_irq.c | ||
9871 | +++ b/drivers/gpu/drm/i915/i915_irq.c | ||
9872 | @@ -255,6 +255,7 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev) | ||
9873 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
9874 | int ret = IRQ_NONE; | ||
9875 | u32 de_iir, gt_iir, de_ier; | ||
9876 | + u32 new_de_iir, new_gt_iir; | ||
9877 | struct drm_i915_master_private *master_priv; | ||
9878 | |||
9879 | /* disable master interrupt before clearing iir */ | ||
9880 | @@ -265,31 +266,35 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev) | ||
9881 | de_iir = I915_READ(DEIIR); | ||
9882 | gt_iir = I915_READ(GTIIR); | ||
9883 | |||
9884 | - if (de_iir == 0 && gt_iir == 0) | ||
9885 | - goto done; | ||
9886 | + for (;;) { | ||
9887 | + if (de_iir == 0 && gt_iir == 0) | ||
9888 | + break; | ||
9889 | |||
9890 | - ret = IRQ_HANDLED; | ||
9891 | + ret = IRQ_HANDLED; | ||
9892 | |||
9893 | - if (dev->primary->master) { | ||
9894 | - master_priv = dev->primary->master->driver_priv; | ||
9895 | - if (master_priv->sarea_priv) | ||
9896 | - master_priv->sarea_priv->last_dispatch = | ||
9897 | - READ_BREADCRUMB(dev_priv); | ||
9898 | - } | ||
9899 | + I915_WRITE(DEIIR, de_iir); | ||
9900 | + new_de_iir = I915_READ(DEIIR); | ||
9901 | + I915_WRITE(GTIIR, gt_iir); | ||
9902 | + new_gt_iir = I915_READ(GTIIR); | ||
9903 | |||
9904 | - if (gt_iir & GT_USER_INTERRUPT) { | ||
9905 | - u32 seqno = i915_get_gem_seqno(dev); | ||
9906 | - dev_priv->mm.irq_gem_seqno = seqno; | ||
9907 | - trace_i915_gem_request_complete(dev, seqno); | ||
9908 | - DRM_WAKEUP(&dev_priv->irq_queue); | ||
9909 | - dev_priv->hangcheck_count = 0; | ||
9910 | - mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); | ||
9911 | - } | ||
9912 | + if (dev->primary->master) { | ||
9913 | + master_priv = dev->primary->master->driver_priv; | ||
9914 | + if (master_priv->sarea_priv) | ||
9915 | + master_priv->sarea_priv->last_dispatch = | ||
9916 | + READ_BREADCRUMB(dev_priv); | ||
9917 | + } | ||
9918 | + | ||
9919 | + if (gt_iir & GT_USER_INTERRUPT) { | ||
9920 | + u32 seqno = i915_get_gem_seqno(dev); | ||
9921 | + dev_priv->mm.irq_gem_seqno = seqno; | ||
9922 | + trace_i915_gem_request_complete(dev, seqno); | ||
9923 | + DRM_WAKEUP(&dev_priv->irq_queue); | ||
9924 | + } | ||
9925 | |||
9926 | - I915_WRITE(GTIIR, gt_iir); | ||
9927 | - I915_WRITE(DEIIR, de_iir); | ||
9928 | + de_iir = new_de_iir; | ||
9929 | + gt_iir = new_gt_iir; | ||
9930 | + } | ||
9931 | |||
9932 | -done: | ||
9933 | I915_WRITE(DEIER, de_ier); | ||
9934 | (void)I915_READ(DEIER); | ||
9935 | |||
9936 | @@ -1044,10 +1049,6 @@ void i915_driver_irq_preinstall(struct drm_device * dev) | ||
9937 | (void) I915_READ(IER); | ||
9938 | } | ||
9939 | |||
9940 | -/* | ||
9941 | - * Must be called after intel_modeset_init or hotplug interrupts won't be | ||
9942 | - * enabled correctly. | ||
9943 | - */ | ||
9944 | int i915_driver_irq_postinstall(struct drm_device *dev) | ||
9945 | { | ||
9946 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
9947 | @@ -1070,23 +1071,19 @@ int i915_driver_irq_postinstall(struct drm_device *dev) | ||
9948 | if (I915_HAS_HOTPLUG(dev)) { | ||
9949 | u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); | ||
9950 | |||
9951 | - /* Note HDMI and DP share bits */ | ||
9952 | - if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) | ||
9953 | - hotplug_en |= HDMIB_HOTPLUG_INT_EN; | ||
9954 | - if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) | ||
9955 | - hotplug_en |= HDMIC_HOTPLUG_INT_EN; | ||
9956 | - if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) | ||
9957 | - hotplug_en |= HDMID_HOTPLUG_INT_EN; | ||
9958 | - if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) | ||
9959 | - hotplug_en |= SDVOC_HOTPLUG_INT_EN; | ||
9960 | - if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) | ||
9961 | - hotplug_en |= SDVOB_HOTPLUG_INT_EN; | ||
9962 | - if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) | ||
9963 | - hotplug_en |= CRT_HOTPLUG_INT_EN; | ||
9964 | - /* Ignore TV since it's buggy */ | ||
9965 | - | ||
9966 | + /* Leave other bits alone */ | ||
9967 | + hotplug_en |= HOTPLUG_EN_MASK; | ||
9968 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); | ||
9969 | |||
9970 | + dev_priv->hotplug_supported_mask = CRT_HOTPLUG_INT_STATUS | | ||
9971 | + TV_HOTPLUG_INT_STATUS | SDVOC_HOTPLUG_INT_STATUS | | ||
9972 | + SDVOB_HOTPLUG_INT_STATUS; | ||
9973 | + if (IS_G4X(dev)) { | ||
9974 | + dev_priv->hotplug_supported_mask |= | ||
9975 | + HDMIB_HOTPLUG_INT_STATUS | | ||
9976 | + HDMIC_HOTPLUG_INT_STATUS | | ||
9977 | + HDMID_HOTPLUG_INT_STATUS; | ||
9978 | + } | ||
9979 | /* Enable in IER... */ | ||
9980 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; | ||
9981 | /* and unmask in IMR */ | ||
9982 | diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h | ||
9983 | index cc9b49a..1687edf 100644 | ||
9984 | --- a/drivers/gpu/drm/i915/i915_reg.h | ||
9985 | +++ b/drivers/gpu/drm/i915/i915_reg.h | ||
9986 | @@ -329,7 +329,6 @@ | ||
9987 | #define FBC_CTL_PERIODIC (1<<30) | ||
9988 | #define FBC_CTL_INTERVAL_SHIFT (16) | ||
9989 | #define FBC_CTL_UNCOMPRESSIBLE (1<<14) | ||
9990 | -#define FBC_C3_IDLE (1<<13) | ||
9991 | #define FBC_CTL_STRIDE_SHIFT (5) | ||
9992 | #define FBC_CTL_FENCENO (1<<0) | ||
9993 | #define FBC_COMMAND 0x0320c | ||
9994 | @@ -406,13 +405,6 @@ | ||
9995 | # define GPIO_DATA_VAL_IN (1 << 12) | ||
9996 | # define GPIO_DATA_PULLUP_DISABLE (1 << 13) | ||
9997 | |||
9998 | -#define GMBUS0 0x5100 | ||
9999 | -#define GMBUS1 0x5104 | ||
10000 | -#define GMBUS2 0x5108 | ||
10001 | -#define GMBUS3 0x510c | ||
10002 | -#define GMBUS4 0x5110 | ||
10003 | -#define GMBUS5 0x5120 | ||
10004 | - | ||
10005 | /* | ||
10006 | * Clock control & power management | ||
10007 | */ | ||
10008 | @@ -871,6 +863,14 @@ | ||
10009 | #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) | ||
10010 | #define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */ | ||
10011 | #define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f | ||
10012 | +#define HOTPLUG_EN_MASK (HDMIB_HOTPLUG_INT_EN | \ | ||
10013 | + HDMIC_HOTPLUG_INT_EN | \ | ||
10014 | + HDMID_HOTPLUG_INT_EN | \ | ||
10015 | + SDVOB_HOTPLUG_INT_EN | \ | ||
10016 | + SDVOC_HOTPLUG_INT_EN | \ | ||
10017 | + TV_HOTPLUG_INT_EN | \ | ||
10018 | + CRT_HOTPLUG_INT_EN) | ||
10019 | + | ||
10020 | |||
10021 | #define PORT_HOTPLUG_STAT 0x61114 | ||
10022 | #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) | ||
10023 | @@ -968,8 +968,6 @@ | ||
10024 | #define LVDS_PORT_EN (1 << 31) | ||
10025 | /* Selects pipe B for LVDS data. Must be set on pre-965. */ | ||
10026 | #define LVDS_PIPEB_SELECT (1 << 30) | ||
10027 | -/* LVDS dithering flag on 965/g4x platform */ | ||
10028 | -#define LVDS_ENABLE_DITHER (1 << 25) | ||
10029 | /* Enable border for unscaled (or aspect-scaled) display */ | ||
10030 | #define LVDS_BORDER_ENABLE (1 << 15) | ||
10031 | /* | ||
10032 | @@ -1739,8 +1737,6 @@ | ||
10033 | |||
10034 | /* Display & cursor control */ | ||
10035 | |||
10036 | -/* dithering flag on Ironlake */ | ||
10037 | -#define PIPE_ENABLE_DITHER (1 << 4) | ||
10038 | /* Pipe A */ | ||
10039 | #define PIPEADSL 0x70000 | ||
10040 | #define PIPEACONF 0x70008 | ||
10041 | @@ -2161,13 +2157,6 @@ | ||
10042 | #define PCH_GPIOE 0xc5020 | ||
10043 | #define PCH_GPIOF 0xc5024 | ||
10044 | |||
10045 | -#define PCH_GMBUS0 0xc5100 | ||
10046 | -#define PCH_GMBUS1 0xc5104 | ||
10047 | -#define PCH_GMBUS2 0xc5108 | ||
10048 | -#define PCH_GMBUS3 0xc510c | ||
10049 | -#define PCH_GMBUS4 0xc5110 | ||
10050 | -#define PCH_GMBUS5 0xc5120 | ||
10051 | - | ||
10052 | #define PCH_DPLL_A 0xc6014 | ||
10053 | #define PCH_DPLL_B 0xc6018 | ||
10054 | |||
10055 | diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c | ||
10056 | index 7ad742f..6eec817 100644 | ||
10057 | --- a/drivers/gpu/drm/i915/i915_suspend.c | ||
10058 | +++ b/drivers/gpu/drm/i915/i915_suspend.c | ||
10059 | @@ -27,7 +27,7 @@ | ||
10060 | #include "drmP.h" | ||
10061 | #include "drm.h" | ||
10062 | #include "i915_drm.h" | ||
10063 | -#include "intel_drv.h" | ||
10064 | +#include "i915_drv.h" | ||
10065 | |||
10066 | static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) | ||
10067 | { | ||
10068 | @@ -846,9 +846,6 @@ int i915_restore_state(struct drm_device *dev) | ||
10069 | for (i = 0; i < 3; i++) | ||
10070 | I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); | ||
10071 | |||
10072 | - /* I2C state */ | ||
10073 | - intel_i2c_reset_gmbus(dev); | ||
10074 | - | ||
10075 | return 0; | ||
10076 | } | ||
10077 | |||
10078 | diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c | ||
10079 | index 5e730e6..e505144 100644 | ||
10080 | --- a/drivers/gpu/drm/i915/intel_crt.c | ||
10081 | +++ b/drivers/gpu/drm/i915/intel_crt.c | ||
10082 | @@ -185,9 +185,6 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector) | ||
10083 | adpa = I915_READ(PCH_ADPA); | ||
10084 | |||
10085 | adpa &= ~ADPA_CRT_HOTPLUG_MASK; | ||
10086 | - /* disable HPD first */ | ||
10087 | - I915_WRITE(PCH_ADPA, adpa); | ||
10088 | - (void)I915_READ(PCH_ADPA); | ||
10089 | |||
10090 | adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | | ||
10091 | ADPA_CRT_HOTPLUG_WARMUP_10MS | | ||
10092 | @@ -579,6 +576,4 @@ void intel_crt_init(struct drm_device *dev) | ||
10093 | drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); | ||
10094 | |||
10095 | drm_sysfs_connector_add(connector); | ||
10096 | - | ||
10097 | - dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; | ||
10098 | } | ||
10099 | diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c | ||
10100 | index b00a1aa..099f420 100644 | ||
10101 | --- a/drivers/gpu/drm/i915/intel_display.c | ||
10102 | +++ b/drivers/gpu/drm/i915/intel_display.c | ||
10103 | @@ -988,8 +988,6 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | ||
10104 | |||
10105 | /* enable it... */ | ||
10106 | fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; | ||
10107 | - if (IS_I945GM(dev)) | ||
10108 | - fbc_ctl |= FBC_C3_IDLE; /* 945 needs special SR handling */ | ||
10109 | fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; | ||
10110 | fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; | ||
10111 | if (obj_priv->tiling_mode != I915_TILING_NONE) | ||
10112 | @@ -1253,7 +1251,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | ||
10113 | return ret; | ||
10114 | } | ||
10115 | |||
10116 | - ret = i915_gem_object_set_to_display_plane(obj); | ||
10117 | + ret = i915_gem_object_set_to_gtt_domain(obj, 1); | ||
10118 | if (ret != 0) { | ||
10119 | i915_gem_object_unpin(obj); | ||
10120 | mutex_unlock(&dev->struct_mutex); | ||
10121 | @@ -1475,10 +1473,6 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | ||
10122 | int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; | ||
10123 | u32 temp; | ||
10124 | int tries = 5, j, n; | ||
10125 | - u32 pipe_bpc; | ||
10126 | - | ||
10127 | - temp = I915_READ(pipeconf_reg); | ||
10128 | - pipe_bpc = temp & PIPE_BPC_MASK; | ||
10129 | |||
10130 | /* XXX: When our outputs are all unaware of DPMS modes other than off | ||
10131 | * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. | ||
10132 | @@ -1488,15 +1482,6 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | ||
10133 | case DRM_MODE_DPMS_STANDBY: | ||
10134 | case DRM_MODE_DPMS_SUSPEND: | ||
10135 | DRM_DEBUG("crtc %d dpms on\n", pipe); | ||
10136 | - | ||
10137 | - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | ||
10138 | - temp = I915_READ(PCH_LVDS); | ||
10139 | - if ((temp & LVDS_PORT_EN) == 0) { | ||
10140 | - I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); | ||
10141 | - POSTING_READ(PCH_LVDS); | ||
10142 | - } | ||
10143 | - } | ||
10144 | - | ||
10145 | if (HAS_eDP) { | ||
10146 | /* enable eDP PLL */ | ||
10147 | igdng_enable_pll_edp(crtc); | ||
10148 | @@ -1510,12 +1495,6 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | ||
10149 | |||
10150 | /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ | ||
10151 | temp = I915_READ(fdi_rx_reg); | ||
10152 | - /* | ||
10153 | - * make the BPC in FDI Rx be consistent with that in | ||
10154 | - * pipeconf reg. | ||
10155 | - */ | ||
10156 | - temp &= ~(0x7 << 16); | ||
10157 | - temp |= (pipe_bpc << 11); | ||
10158 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | | ||
10159 | FDI_SEL_PCDCLK | | ||
10160 | FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ | ||
10161 | @@ -1656,12 +1635,6 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | ||
10162 | |||
10163 | /* enable PCH transcoder */ | ||
10164 | temp = I915_READ(transconf_reg); | ||
10165 | - /* | ||
10166 | - * make the BPC in transcoder be consistent with | ||
10167 | - * that in pipeconf reg. | ||
10168 | - */ | ||
10169 | - temp &= ~PIPE_BPC_MASK; | ||
10170 | - temp |= pipe_bpc; | ||
10171 | I915_WRITE(transconf_reg, temp | TRANS_ENABLE); | ||
10172 | I915_READ(transconf_reg); | ||
10173 | |||
10174 | @@ -1693,6 +1666,8 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | ||
10175 | case DRM_MODE_DPMS_OFF: | ||
10176 | DRM_DEBUG("crtc %d dpms off\n", pipe); | ||
10177 | |||
10178 | + i915_disable_vga(dev); | ||
10179 | + | ||
10180 | /* Disable display plane */ | ||
10181 | temp = I915_READ(dspcntr_reg); | ||
10182 | if ((temp & DISPLAY_PLANE_ENABLE) != 0) { | ||
10183 | @@ -1702,8 +1677,6 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | ||
10184 | I915_READ(dspbase_reg); | ||
10185 | } | ||
10186 | |||
10187 | - i915_disable_vga(dev); | ||
10188 | - | ||
10189 | /* disable cpu pipe, disable after all planes disabled */ | ||
10190 | temp = I915_READ(pipeconf_reg); | ||
10191 | if ((temp & PIPEACONF_ENABLE) != 0) { | ||
10192 | @@ -1724,15 +1697,9 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | ||
10193 | } else | ||
10194 | DRM_DEBUG("crtc %d is disabled\n", pipe); | ||
10195 | |||
10196 | - udelay(100); | ||
10197 | - | ||
10198 | - /* Disable PF */ | ||
10199 | - temp = I915_READ(pf_ctl_reg); | ||
10200 | - if ((temp & PF_ENABLE) != 0) { | ||
10201 | - I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE); | ||
10202 | - I915_READ(pf_ctl_reg); | ||
10203 | + if (HAS_eDP) { | ||
10204 | + igdng_disable_pll_edp(crtc); | ||
10205 | } | ||
10206 | - I915_WRITE(pf_win_size, 0); | ||
10207 | |||
10208 | /* disable CPU FDI tx and PCH FDI rx */ | ||
10209 | temp = I915_READ(fdi_tx_reg); | ||
10210 | @@ -1740,9 +1707,6 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | ||
10211 | I915_READ(fdi_tx_reg); | ||
10212 | |||
10213 | temp = I915_READ(fdi_rx_reg); | ||
10214 | - /* BPC in FDI rx is consistent with that in pipeconf */ | ||
10215 | - temp &= ~(0x07 << 16); | ||
10216 | - temp |= (pipe_bpc << 11); | ||
10217 | I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE); | ||
10218 | I915_READ(fdi_rx_reg); | ||
10219 | |||
10220 | @@ -1761,13 +1725,6 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | ||
10221 | |||
10222 | udelay(100); | ||
10223 | |||
10224 | - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | ||
10225 | - temp = I915_READ(PCH_LVDS); | ||
10226 | - I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN); | ||
10227 | - I915_READ(PCH_LVDS); | ||
10228 | - udelay(100); | ||
10229 | - } | ||
10230 | - | ||
10231 | /* disable PCH transcoder */ | ||
10232 | temp = I915_READ(transconf_reg); | ||
10233 | if ((temp & TRANS_ENABLE) != 0) { | ||
10234 | @@ -1786,13 +1743,6 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | ||
10235 | } | ||
10236 | } | ||
10237 | } | ||
10238 | - temp = I915_READ(transconf_reg); | ||
10239 | - /* BPC in transcoder is consistent with that in pipeconf */ | ||
10240 | - temp &= ~PIPE_BPC_MASK; | ||
10241 | - temp |= pipe_bpc; | ||
10242 | - I915_WRITE(transconf_reg, temp); | ||
10243 | - I915_READ(transconf_reg); | ||
10244 | - udelay(100); | ||
10245 | |||
10246 | /* disable PCH DPLL */ | ||
10247 | temp = I915_READ(pch_dpll_reg); | ||
10248 | @@ -1801,19 +1751,13 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | ||
10249 | I915_READ(pch_dpll_reg); | ||
10250 | } | ||
10251 | |||
10252 | - if (HAS_eDP) { | ||
10253 | - igdng_disable_pll_edp(crtc); | ||
10254 | - } | ||
10255 | - | ||
10256 | temp = I915_READ(fdi_rx_reg); | ||
10257 | - temp &= ~FDI_SEL_PCDCLK; | ||
10258 | - I915_WRITE(fdi_rx_reg, temp); | ||
10259 | - I915_READ(fdi_rx_reg); | ||
10260 | - | ||
10261 | - temp = I915_READ(fdi_rx_reg); | ||
10262 | - temp &= ~FDI_RX_PLL_ENABLE; | ||
10263 | - I915_WRITE(fdi_rx_reg, temp); | ||
10264 | - I915_READ(fdi_rx_reg); | ||
10265 | + if ((temp & FDI_RX_PLL_ENABLE) != 0) { | ||
10266 | + temp &= ~FDI_SEL_PCDCLK; | ||
10267 | + temp &= ~FDI_RX_PLL_ENABLE; | ||
10268 | + I915_WRITE(fdi_rx_reg, temp); | ||
10269 | + I915_READ(fdi_rx_reg); | ||
10270 | + } | ||
10271 | |||
10272 | /* Disable CPU FDI TX PLL */ | ||
10273 | temp = I915_READ(fdi_tx_reg); | ||
10274 | @@ -1823,8 +1767,16 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) | ||
10275 | udelay(100); | ||
10276 | } | ||
10277 | |||
10278 | + /* Disable PF */ | ||
10279 | + temp = I915_READ(pf_ctl_reg); | ||
10280 | + if ((temp & PF_ENABLE) != 0) { | ||
10281 | + I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE); | ||
10282 | + I915_READ(pf_ctl_reg); | ||
10283 | + } | ||
10284 | + I915_WRITE(pf_win_size, 0); | ||
10285 | + | ||
10286 | /* Wait for the clocks to turn off. */ | ||
10287 | - udelay(100); | ||
10288 | + udelay(150); | ||
10289 | break; | ||
10290 | } | ||
10291 | } | ||
10292 | @@ -1893,7 +1845,6 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | ||
10293 | intel_update_watermarks(dev); | ||
10294 | /* Give the overlay scaler a chance to disable if it's on this pipe */ | ||
10295 | //intel_crtc_dpms_video(crtc, FALSE); TODO | ||
10296 | - drm_vblank_off(dev, pipe); | ||
10297 | |||
10298 | if (dev_priv->cfb_plane == plane && | ||
10299 | dev_priv->display.disable_fbc) | ||
10300 | @@ -2540,10 +2491,6 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, | ||
10301 | sr_entries = roundup(sr_entries / cacheline_size, 1); | ||
10302 | DRM_DEBUG("self-refresh entries: %d\n", sr_entries); | ||
10303 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); | ||
10304 | - } else { | ||
10305 | - /* Turn off self refresh if both pipes are enabled */ | ||
10306 | - I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) | ||
10307 | - & ~FW_BLC_SELF_EN); | ||
10308 | } | ||
10309 | |||
10310 | DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", | ||
10311 | @@ -2562,43 +2509,15 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, | ||
10312 | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); | ||
10313 | } | ||
10314 | |||
10315 | -static void i965_update_wm(struct drm_device *dev, int planea_clock, | ||
10316 | - int planeb_clock, int sr_hdisplay, int pixel_size) | ||
10317 | +static void i965_update_wm(struct drm_device *dev, int unused, int unused2, | ||
10318 | + int unused3, int unused4) | ||
10319 | { | ||
10320 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
10321 | - unsigned long line_time_us; | ||
10322 | - int sr_clock, sr_entries, srwm = 1; | ||
10323 | - | ||
10324 | - /* Calc sr entries for one plane configs */ | ||
10325 | - if (sr_hdisplay && (!planea_clock || !planeb_clock)) { | ||
10326 | - /* self-refresh has much higher latency */ | ||
10327 | - const static int sr_latency_ns = 12000; | ||
10328 | - | ||
10329 | - sr_clock = planea_clock ? planea_clock : planeb_clock; | ||
10330 | - line_time_us = ((sr_hdisplay * 1000) / sr_clock); | ||
10331 | - | ||
10332 | - /* Use ns/us then divide to preserve precision */ | ||
10333 | - sr_entries = (((sr_latency_ns / line_time_us) + 1) * | ||
10334 | - pixel_size * sr_hdisplay) / 1000; | ||
10335 | - sr_entries = roundup(sr_entries / I915_FIFO_LINE_SIZE, 1); | ||
10336 | - DRM_DEBUG("self-refresh entries: %d\n", sr_entries); | ||
10337 | - srwm = I945_FIFO_SIZE - sr_entries; | ||
10338 | - if (srwm < 0) | ||
10339 | - srwm = 1; | ||
10340 | - srwm &= 0x3f; | ||
10341 | - I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); | ||
10342 | - } else { | ||
10343 | - /* Turn off self refresh if both pipes are enabled */ | ||
10344 | - I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) | ||
10345 | - & ~FW_BLC_SELF_EN); | ||
10346 | - } | ||
10347 | |||
10348 | - DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", | ||
10349 | - srwm); | ||
10350 | + DRM_DEBUG("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR 8\n"); | ||
10351 | |||
10352 | /* 965 has limitations... */ | ||
10353 | - I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) | | ||
10354 | - (8 << 0)); | ||
10355 | + I915_WRITE(DSPFW1, (8 << 16) | (8 << 8) | (8 << 0)); | ||
10356 | I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); | ||
10357 | } | ||
10358 | |||
10359 | @@ -2659,10 +2578,6 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, | ||
10360 | if (srwm < 0) | ||
10361 | srwm = 1; | ||
10362 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); | ||
10363 | - } else { | ||
10364 | - /* Turn off self refresh if both pipes are enabled */ | ||
10365 | - I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) | ||
10366 | - & ~FW_BLC_SELF_EN); | ||
10367 | } | ||
10368 | |||
10369 | DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", | ||
10370 | @@ -2939,18 +2854,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | ||
10371 | |||
10372 | /* determine panel color depth */ | ||
10373 | temp = I915_READ(pipeconf_reg); | ||
10374 | - temp &= ~PIPE_BPC_MASK; | ||
10375 | - if (is_lvds) { | ||
10376 | - int lvds_reg = I915_READ(PCH_LVDS); | ||
10377 | - /* the BPC will be 6 if it is 18-bit LVDS panel */ | ||
10378 | - if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP) | ||
10379 | - temp |= PIPE_8BPC; | ||
10380 | - else | ||
10381 | - temp |= PIPE_6BPC; | ||
10382 | - } else | ||
10383 | - temp |= PIPE_8BPC; | ||
10384 | - I915_WRITE(pipeconf_reg, temp); | ||
10385 | - I915_READ(pipeconf_reg); | ||
10386 | |||
10387 | switch (temp & PIPE_BPC_MASK) { | ||
10388 | case PIPE_8BPC: | ||
10389 | @@ -3178,20 +3081,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | ||
10390 | * appropriately here, but we need to look more thoroughly into how | ||
10391 | * panels behave in the two modes. | ||
10392 | */ | ||
10393 | - /* set the dithering flag */ | ||
10394 | - if (IS_I965G(dev)) { | ||
10395 | - if (dev_priv->lvds_dither) { | ||
10396 | - if (IS_IGDNG(dev)) | ||
10397 | - pipeconf |= PIPE_ENABLE_DITHER; | ||
10398 | - else | ||
10399 | - lvds |= LVDS_ENABLE_DITHER; | ||
10400 | - } else { | ||
10401 | - if (IS_IGDNG(dev)) | ||
10402 | - pipeconf &= ~PIPE_ENABLE_DITHER; | ||
10403 | - else | ||
10404 | - lvds &= ~LVDS_ENABLE_DITHER; | ||
10405 | - } | ||
10406 | - } | ||
10407 | + | ||
10408 | I915_WRITE(lvds_reg, lvds); | ||
10409 | I915_READ(lvds_reg); | ||
10410 | } | ||
10411 | @@ -3775,6 +3665,125 @@ static void intel_gpu_idle_timer(unsigned long arg) | ||
10412 | queue_work(dev_priv->wq, &dev_priv->idle_work); | ||
10413 | } | ||
10414 | |||
10415 | +void intel_increase_renderclock(struct drm_device *dev, bool schedule) | ||
10416 | +{ | ||
10417 | + drm_i915_private_t *dev_priv = dev->dev_private; | ||
10418 | + | ||
10419 | + if (IS_IGDNG(dev)) | ||
10420 | + return; | ||
10421 | + | ||
10422 | + if (!dev_priv->render_reclock_avail) { | ||
10423 | + DRM_DEBUG("not reclocking render clock\n"); | ||
10424 | + return; | ||
10425 | + } | ||
10426 | + | ||
10427 | + /* Restore render clock frequency to original value */ | ||
10428 | + if (IS_G4X(dev) || IS_I9XX(dev)) | ||
10429 | + pci_write_config_word(dev->pdev, GCFGC, dev_priv->orig_clock); | ||
10430 | + else if (IS_I85X(dev)) | ||
10431 | + pci_write_config_word(dev->pdev, HPLLCC, dev_priv->orig_clock); | ||
10432 | + DRM_DEBUG("increasing render clock frequency\n"); | ||
10433 | + | ||
10434 | + /* Schedule downclock */ | ||
10435 | + if (schedule) | ||
10436 | + mod_timer(&dev_priv->idle_timer, jiffies + | ||
10437 | + msecs_to_jiffies(GPU_IDLE_TIMEOUT)); | ||
10438 | +} | ||
10439 | + | ||
10440 | +void intel_decrease_renderclock(struct drm_device *dev) | ||
10441 | +{ | ||
10442 | + drm_i915_private_t *dev_priv = dev->dev_private; | ||
10443 | + | ||
10444 | + if (IS_IGDNG(dev)) | ||
10445 | + return; | ||
10446 | + | ||
10447 | + if (!dev_priv->render_reclock_avail) { | ||
10448 | + DRM_DEBUG("not reclocking render clock\n"); | ||
10449 | + return; | ||
10450 | + } | ||
10451 | + | ||
10452 | + if (IS_G4X(dev)) { | ||
10453 | + u16 gcfgc; | ||
10454 | + | ||
10455 | + /* Adjust render clock... */ | ||
10456 | + pci_read_config_word(dev->pdev, GCFGC, &gcfgc); | ||
10457 | + | ||
10458 | + /* Down to minimum... */ | ||
10459 | + gcfgc &= ~GM45_GC_RENDER_CLOCK_MASK; | ||
10460 | + gcfgc |= GM45_GC_RENDER_CLOCK_266_MHZ; | ||
10461 | + | ||
10462 | + pci_write_config_word(dev->pdev, GCFGC, gcfgc); | ||
10463 | + } else if (IS_I965G(dev)) { | ||
10464 | + u16 gcfgc; | ||
10465 | + | ||
10466 | + /* Adjust render clock... */ | ||
10467 | + pci_read_config_word(dev->pdev, GCFGC, &gcfgc); | ||
10468 | + | ||
10469 | + /* Down to minimum... */ | ||
10470 | + gcfgc &= ~I965_GC_RENDER_CLOCK_MASK; | ||
10471 | + gcfgc |= I965_GC_RENDER_CLOCK_267_MHZ; | ||
10472 | + | ||
10473 | + pci_write_config_word(dev->pdev, GCFGC, gcfgc); | ||
10474 | + } else if (IS_I945G(dev) || IS_I945GM(dev)) { | ||
10475 | + u16 gcfgc; | ||
10476 | + | ||
10477 | + /* Adjust render clock... */ | ||
10478 | + pci_read_config_word(dev->pdev, GCFGC, &gcfgc); | ||
10479 | + | ||
10480 | + /* Down to minimum... */ | ||
10481 | + gcfgc &= ~I945_GC_RENDER_CLOCK_MASK; | ||
10482 | + gcfgc |= I945_GC_RENDER_CLOCK_166_MHZ; | ||
10483 | + | ||
10484 | + pci_write_config_word(dev->pdev, GCFGC, gcfgc); | ||
10485 | + } else if (IS_I915G(dev)) { | ||
10486 | + u16 gcfgc; | ||
10487 | + | ||
10488 | + /* Adjust render clock... */ | ||
10489 | + pci_read_config_word(dev->pdev, GCFGC, &gcfgc); | ||
10490 | + | ||
10491 | + /* Down to minimum... */ | ||
10492 | + gcfgc &= ~I915_GC_RENDER_CLOCK_MASK; | ||
10493 | + gcfgc |= I915_GC_RENDER_CLOCK_166_MHZ; | ||
10494 | + | ||
10495 | + pci_write_config_word(dev->pdev, GCFGC, gcfgc); | ||
10496 | + } else if (IS_I85X(dev)) { | ||
10497 | + u16 hpllcc; | ||
10498 | + | ||
10499 | + /* Adjust render clock... */ | ||
10500 | + pci_read_config_word(dev->pdev, HPLLCC, &hpllcc); | ||
10501 | + | ||
10502 | + /* Up to maximum... */ | ||
10503 | + hpllcc &= ~GC_CLOCK_CONTROL_MASK; | ||
10504 | + hpllcc |= GC_CLOCK_133_200; | ||
10505 | + | ||
10506 | + pci_write_config_word(dev->pdev, HPLLCC, hpllcc); | ||
10507 | + } | ||
10508 | + DRM_DEBUG("decreasing render clock frequency\n"); | ||
10509 | +} | ||
10510 | + | ||
10511 | +/* Note that no increase function is needed for this - increase_renderclock() | ||
10512 | + * will also rewrite these bits | ||
10513 | + */ | ||
10514 | +void intel_decrease_displayclock(struct drm_device *dev) | ||
10515 | +{ | ||
10516 | + if (IS_IGDNG(dev)) | ||
10517 | + return; | ||
10518 | + | ||
10519 | + if (IS_I945G(dev) || IS_I945GM(dev) || IS_I915G(dev) || | ||
10520 | + IS_I915GM(dev)) { | ||
10521 | + u16 gcfgc; | ||
10522 | + | ||
10523 | + /* Adjust render clock... */ | ||
10524 | + pci_read_config_word(dev->pdev, GCFGC, &gcfgc); | ||
10525 | + | ||
10526 | + /* Down to minimum... */ | ||
10527 | + gcfgc &= ~0xf0; | ||
10528 | + gcfgc |= 0x80; | ||
10529 | + | ||
10530 | + pci_write_config_word(dev->pdev, GCFGC, gcfgc); | ||
10531 | + } | ||
10532 | +} | ||
10533 | + | ||
10534 | #define CRTC_IDLE_TIMEOUT 1000 /* ms */ | ||
10535 | |||
10536 | static void intel_crtc_idle_timer(unsigned long arg) | ||
10537 | @@ -3888,6 +3897,12 @@ static void intel_idle_update(struct work_struct *work) | ||
10538 | |||
10539 | mutex_lock(&dev->struct_mutex); | ||
10540 | |||
10541 | + /* GPU isn't processing, downclock it. */ | ||
10542 | + if (!dev_priv->busy) { | ||
10543 | + intel_decrease_renderclock(dev); | ||
10544 | + intel_decrease_displayclock(dev); | ||
10545 | + } | ||
10546 | + | ||
10547 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
10548 | /* Skip inactive CRTCs */ | ||
10549 | if (!crtc->fb) | ||
10550 | @@ -3922,6 +3937,7 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj) | ||
10551 | return; | ||
10552 | |||
10553 | dev_priv->busy = true; | ||
10554 | + intel_increase_renderclock(dev, true); | ||
10555 | |||
10556 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
10557 | if (!crtc->fb) | ||
10558 | @@ -4102,51 +4118,37 @@ static void intel_setup_outputs(struct drm_device *dev) | ||
10559 | if (I915_READ(PCH_DP_D) & DP_DETECTED) | ||
10560 | intel_dp_init(dev, PCH_DP_D); | ||
10561 | |||
10562 | - } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { | ||
10563 | + } else if (IS_I9XX(dev)) { | ||
10564 | bool found = false; | ||
10565 | |||
10566 | if (I915_READ(SDVOB) & SDVO_DETECTED) { | ||
10567 | - DRM_DEBUG_KMS("probing SDVOB\n"); | ||
10568 | found = intel_sdvo_init(dev, SDVOB); | ||
10569 | - if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { | ||
10570 | - DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); | ||
10571 | + if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) | ||
10572 | intel_hdmi_init(dev, SDVOB); | ||
10573 | - } | ||
10574 | |||
10575 | - if (!found && SUPPORTS_INTEGRATED_DP(dev)) { | ||
10576 | - DRM_DEBUG_KMS("probing DP_B\n"); | ||
10577 | + if (!found && SUPPORTS_INTEGRATED_DP(dev)) | ||
10578 | intel_dp_init(dev, DP_B); | ||
10579 | - } | ||
10580 | } | ||
10581 | |||
10582 | /* Before G4X SDVOC doesn't have its own detect register */ | ||
10583 | |||
10584 | - if (I915_READ(SDVOB) & SDVO_DETECTED) { | ||
10585 | - DRM_DEBUG_KMS("probing SDVOC\n"); | ||
10586 | + if (I915_READ(SDVOB) & SDVO_DETECTED) | ||
10587 | found = intel_sdvo_init(dev, SDVOC); | ||
10588 | - } | ||
10589 | |||
10590 | if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { | ||
10591 | |||
10592 | - if (SUPPORTS_INTEGRATED_HDMI(dev)) { | ||
10593 | - DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); | ||
10594 | + if (SUPPORTS_INTEGRATED_HDMI(dev)) | ||
10595 | intel_hdmi_init(dev, SDVOC); | ||
10596 | - } | ||
10597 | - if (SUPPORTS_INTEGRATED_DP(dev)) { | ||
10598 | - DRM_DEBUG_KMS("probing DP_C\n"); | ||
10599 | + if (SUPPORTS_INTEGRATED_DP(dev)) | ||
10600 | intel_dp_init(dev, DP_C); | ||
10601 | - } | ||
10602 | } | ||
10603 | |||
10604 | - if (SUPPORTS_INTEGRATED_DP(dev) && | ||
10605 | - (I915_READ(DP_D) & DP_DETECTED)) { | ||
10606 | - DRM_DEBUG_KMS("probing DP_D\n"); | ||
10607 | + if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED)) | ||
10608 | intel_dp_init(dev, DP_D); | ||
10609 | - } | ||
10610 | - } else if (IS_I8XX(dev)) | ||
10611 | + } else | ||
10612 | intel_dvo_init(dev); | ||
10613 | |||
10614 | - if (SUPPORTS_TV(dev)) | ||
10615 | + if (IS_I9XX(dev) && IS_MOBILE(dev) && !IS_IGDNG(dev)) | ||
10616 | intel_tv_init(dev); | ||
10617 | |||
10618 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
10619 | @@ -4440,6 +4442,7 @@ void intel_modeset_cleanup(struct drm_device *dev) | ||
10620 | del_timer_sync(&intel_crtc->idle_timer); | ||
10621 | } | ||
10622 | |||
10623 | + intel_increase_renderclock(dev, false); | ||
10624 | del_timer_sync(&dev_priv->idle_timer); | ||
10625 | |||
10626 | mutex_unlock(&dev->struct_mutex); | ||
10627 | diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c | ||
10628 | index d487771..d834475 100644 | ||
10629 | --- a/drivers/gpu/drm/i915/intel_dp.c | ||
10630 | +++ b/drivers/gpu/drm/i915/intel_dp.c | ||
10631 | @@ -1254,11 +1254,11 @@ intel_dp_init(struct drm_device *dev, int output_reg) | ||
10632 | else | ||
10633 | intel_output->type = INTEL_OUTPUT_DISPLAYPORT; | ||
10634 | |||
10635 | - if (output_reg == DP_B || output_reg == PCH_DP_B) | ||
10636 | + if (output_reg == DP_B) | ||
10637 | intel_output->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); | ||
10638 | - else if (output_reg == DP_C || output_reg == PCH_DP_C) | ||
10639 | + else if (output_reg == DP_C) | ||
10640 | intel_output->clone_mask = (1 << INTEL_DP_C_CLONE_BIT); | ||
10641 | - else if (output_reg == DP_D || output_reg == PCH_DP_D) | ||
10642 | + else if (output_reg == DP_D) | ||
10643 | intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); | ||
10644 | |||
10645 | if (IS_eDP(intel_output)) { | ||
10646 | @@ -1290,20 +1290,14 @@ intel_dp_init(struct drm_device *dev, int output_reg) | ||
10647 | break; | ||
10648 | case DP_B: | ||
10649 | case PCH_DP_B: | ||
10650 | - dev_priv->hotplug_supported_mask |= | ||
10651 | - HDMIB_HOTPLUG_INT_STATUS; | ||
10652 | name = "DPDDC-B"; | ||
10653 | break; | ||
10654 | case DP_C: | ||
10655 | case PCH_DP_C: | ||
10656 | - dev_priv->hotplug_supported_mask |= | ||
10657 | - HDMIC_HOTPLUG_INT_STATUS; | ||
10658 | name = "DPDDC-C"; | ||
10659 | break; | ||
10660 | case DP_D: | ||
10661 | case PCH_DP_D: | ||
10662 | - dev_priv->hotplug_supported_mask |= | ||
10663 | - HDMID_HOTPLUG_INT_STATUS; | ||
10664 | name = "DPDDC-D"; | ||
10665 | break; | ||
10666 | } | ||
10667 | diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h | ||
10668 | index 6c7c19f..ef61fe9 100644 | ||
10669 | --- a/drivers/gpu/drm/i915/intel_drv.h | ||
10670 | +++ b/drivers/gpu/drm/i915/intel_drv.h | ||
10671 | @@ -134,8 +134,6 @@ void intel_i2c_destroy(struct i2c_adapter *adapter); | ||
10672 | int intel_ddc_get_modes(struct intel_output *intel_output); | ||
10673 | extern bool intel_ddc_probe(struct intel_output *intel_output); | ||
10674 | void intel_i2c_quirk_set(struct drm_device *dev, bool enable); | ||
10675 | -void intel_i2c_reset_gmbus(struct drm_device *dev); | ||
10676 | - | ||
10677 | extern void intel_crt_init(struct drm_device *dev); | ||
10678 | extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); | ||
10679 | extern bool intel_sdvo_init(struct drm_device *dev, int output_device); | ||
10680 | diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c | ||
10681 | index 1318ac2..2b0fe54 100644 | ||
10682 | --- a/drivers/gpu/drm/i915/intel_fb.c | ||
10683 | +++ b/drivers/gpu/drm/i915/intel_fb.c | ||
10684 | @@ -148,7 +148,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, | ||
10685 | |||
10686 | mutex_lock(&dev->struct_mutex); | ||
10687 | |||
10688 | - ret = i915_gem_object_pin(fbo, 64*1024); | ||
10689 | + ret = i915_gem_object_pin(fbo, PAGE_SIZE); | ||
10690 | if (ret) { | ||
10691 | DRM_ERROR("failed to pin fb: %d\n", ret); | ||
10692 | goto out_unref; | ||
10693 | diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c | ||
10694 | index 85760bf..c33451a 100644 | ||
10695 | --- a/drivers/gpu/drm/i915/intel_hdmi.c | ||
10696 | +++ b/drivers/gpu/drm/i915/intel_hdmi.c | ||
10697 | @@ -254,26 +254,21 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | ||
10698 | if (sdvox_reg == SDVOB) { | ||
10699 | intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); | ||
10700 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); | ||
10701 | - dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; | ||
10702 | } else if (sdvox_reg == SDVOC) { | ||
10703 | intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); | ||
10704 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); | ||
10705 | - dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; | ||
10706 | } else if (sdvox_reg == HDMIB) { | ||
10707 | intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); | ||
10708 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, | ||
10709 | "HDMIB"); | ||
10710 | - dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; | ||
10711 | } else if (sdvox_reg == HDMIC) { | ||
10712 | intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); | ||
10713 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, | ||
10714 | "HDMIC"); | ||
10715 | - dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; | ||
10716 | } else if (sdvox_reg == HDMID) { | ||
10717 | intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); | ||
10718 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, | ||
10719 | "HDMID"); | ||
10720 | - dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; | ||
10721 | } | ||
10722 | if (!intel_output->ddc_bus) | ||
10723 | goto err_connector; | ||
10724 | diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c | ||
10725 | index b94acc4..c7eab72 100644 | ||
10726 | --- a/drivers/gpu/drm/i915/intel_i2c.c | ||
10727 | +++ b/drivers/gpu/drm/i915/intel_i2c.c | ||
10728 | @@ -118,23 +118,6 @@ static void set_data(void *data, int state_high) | ||
10729 | udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */ | ||
10730 | } | ||
10731 | |||
10732 | -/* Clears the GMBUS setup. Our driver doesn't make use of the GMBUS I2C | ||
10733 | - * engine, but if the BIOS leaves it enabled, then that can break our use | ||
10734 | - * of the bit-banging I2C interfaces. This is notably the case with the | ||
10735 | - * Mac Mini in EFI mode. | ||
10736 | - */ | ||
10737 | -void | ||
10738 | -intel_i2c_reset_gmbus(struct drm_device *dev) | ||
10739 | -{ | ||
10740 | - struct drm_i915_private *dev_priv = dev->dev_private; | ||
10741 | - | ||
10742 | - if (IS_IGDNG(dev)) { | ||
10743 | - I915_WRITE(PCH_GMBUS0, 0); | ||
10744 | - } else { | ||
10745 | - I915_WRITE(GMBUS0, 0); | ||
10746 | - } | ||
10747 | -} | ||
10748 | - | ||
10749 | /** | ||
10750 | * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg | ||
10751 | * @dev: DRM device | ||
10752 | @@ -185,8 +168,6 @@ struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, | ||
10753 | if(i2c_bit_add_bus(&chan->adapter)) | ||
10754 | goto out_free; | ||
10755 | |||
10756 | - intel_i2c_reset_gmbus(dev); | ||
10757 | - | ||
10758 | /* JJJ: raise SCL and SDA? */ | ||
10759 | intel_i2c_quirk_set(dev, true); | ||
10760 | set_data(chan, 1); | ||
10761 | diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c | ||
10762 | index 952bb4e..05598ae 100644 | ||
10763 | --- a/drivers/gpu/drm/i915/intel_lvds.c | ||
10764 | +++ b/drivers/gpu/drm/i915/intel_lvds.c | ||
10765 | @@ -602,33 +602,12 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, | ||
10766 | /* Some lid devices report incorrect lid status, assume they're connected */ | ||
10767 | static const struct dmi_system_id bad_lid_status[] = { | ||
10768 | { | ||
10769 | - .ident = "Compaq nx9020", | ||
10770 | - .matches = { | ||
10771 | - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
10772 | - DMI_MATCH(DMI_BOARD_NAME, "3084"), | ||
10773 | - }, | ||
10774 | - }, | ||
10775 | - { | ||
10776 | - .ident = "Samsung SX20S", | ||
10777 | - .matches = { | ||
10778 | - DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"), | ||
10779 | - DMI_MATCH(DMI_BOARD_NAME, "SX20S"), | ||
10780 | - }, | ||
10781 | - }, | ||
10782 | - { | ||
10783 | .ident = "Aspire One", | ||
10784 | .matches = { | ||
10785 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
10786 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"), | ||
10787 | }, | ||
10788 | }, | ||
10789 | - { | ||
10790 | - .ident = "PC-81005", | ||
10791 | - .matches = { | ||
10792 | - DMI_MATCH(DMI_SYS_VENDOR, "MALATA"), | ||
10793 | - DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"), | ||
10794 | - }, | ||
10795 | - }, | ||
10796 | { } | ||
10797 | }; | ||
10798 | |||
10799 | @@ -700,14 +679,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, | ||
10800 | struct drm_i915_private *dev_priv = | ||
10801 | container_of(nb, struct drm_i915_private, lid_notifier); | ||
10802 | struct drm_device *dev = dev_priv->dev; | ||
10803 | - struct drm_connector *connector = dev_priv->int_lvds_connector; | ||
10804 | |||
10805 | - /* | ||
10806 | - * check and update the status of LVDS connector after receiving | ||
10807 | - * the LID nofication event. | ||
10808 | - */ | ||
10809 | - if (connector) | ||
10810 | - connector->status = connector->funcs->detect(connector); | ||
10811 | if (!acpi_lid_open()) { | ||
10812 | dev_priv->modeset_on_lid = 1; | ||
10813 | return NOTIFY_OK; | ||
10814 | @@ -1113,8 +1085,6 @@ out: | ||
10815 | DRM_DEBUG("lid notifier registration failed\n"); | ||
10816 | dev_priv->lid_notifier.notifier_call = NULL; | ||
10817 | } | ||
10818 | - /* keep the LVDS connector */ | ||
10819 | - dev_priv->int_lvds_connector = connector; | ||
10820 | drm_sysfs_connector_add(connector); | ||
10821 | return; | ||
10822 | |||
10823 | diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c | ||
10824 | index 3f5aaf1..083bec2 100644 | ||
10825 | --- a/drivers/gpu/drm/i915/intel_sdvo.c | ||
10826 | +++ b/drivers/gpu/drm/i915/intel_sdvo.c | ||
10827 | @@ -472,63 +472,14 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) | ||
10828 | } | ||
10829 | |||
10830 | /** | ||
10831 | - * Try to read the response after issuie the DDC switch command. But it | ||
10832 | - * is noted that we must do the action of reading response and issuing DDC | ||
10833 | - * switch command in one I2C transaction. Otherwise when we try to start | ||
10834 | - * another I2C transaction after issuing the DDC bus switch, it will be | ||
10835 | - * switched to the internal SDVO register. | ||
10836 | + * Don't check status code from this as it switches the bus back to the | ||
10837 | + * SDVO chips which defeats the purpose of doing a bus switch in the first | ||
10838 | + * place. | ||
10839 | */ | ||
10840 | static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, | ||
10841 | u8 target) | ||
10842 | { | ||
10843 | - struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; | ||
10844 | - u8 out_buf[2], cmd_buf[2], ret_value[2], ret; | ||
10845 | - struct i2c_msg msgs[] = { | ||
10846 | - { | ||
10847 | - .addr = sdvo_priv->slave_addr >> 1, | ||
10848 | - .flags = 0, | ||
10849 | - .len = 2, | ||
10850 | - .buf = out_buf, | ||
10851 | - }, | ||
10852 | - /* the following two are to read the response */ | ||
10853 | - { | ||
10854 | - .addr = sdvo_priv->slave_addr >> 1, | ||
10855 | - .flags = 0, | ||
10856 | - .len = 1, | ||
10857 | - .buf = cmd_buf, | ||
10858 | - }, | ||
10859 | - { | ||
10860 | - .addr = sdvo_priv->slave_addr >> 1, | ||
10861 | - .flags = I2C_M_RD, | ||
10862 | - .len = 1, | ||
10863 | - .buf = ret_value, | ||
10864 | - }, | ||
10865 | - }; | ||
10866 | - | ||
10867 | - intel_sdvo_debug_write(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, | ||
10868 | - &target, 1); | ||
10869 | - /* write the DDC switch command argument */ | ||
10870 | - intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0, target); | ||
10871 | - | ||
10872 | - out_buf[0] = SDVO_I2C_OPCODE; | ||
10873 | - out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH; | ||
10874 | - cmd_buf[0] = SDVO_I2C_CMD_STATUS; | ||
10875 | - cmd_buf[1] = 0; | ||
10876 | - ret_value[0] = 0; | ||
10877 | - ret_value[1] = 0; | ||
10878 | - | ||
10879 | - ret = i2c_transfer(intel_output->i2c_bus, msgs, 3); | ||
10880 | - if (ret != 3) { | ||
10881 | - /* failure in I2C transfer */ | ||
10882 | - DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); | ||
10883 | - return; | ||
10884 | - } | ||
10885 | - if (ret_value[0] != SDVO_CMD_STATUS_SUCCESS) { | ||
10886 | - DRM_DEBUG_KMS("DDC switch command returns response %d\n", | ||
10887 | - ret_value[0]); | ||
10888 | - return; | ||
10889 | - } | ||
10890 | - return; | ||
10891 | + intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &target, 1); | ||
10892 | } | ||
10893 | |||
10894 | static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1) | ||
10895 | @@ -1638,32 +1589,6 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) | ||
10896 | edid = drm_get_edid(&intel_output->base, | ||
10897 | intel_output->ddc_bus); | ||
10898 | |||
10899 | - /* This is only applied to SDVO cards with multiple outputs */ | ||
10900 | - if (edid == NULL && intel_sdvo_multifunc_encoder(intel_output)) { | ||
10901 | - uint8_t saved_ddc, temp_ddc; | ||
10902 | - saved_ddc = sdvo_priv->ddc_bus; | ||
10903 | - temp_ddc = sdvo_priv->ddc_bus >> 1; | ||
10904 | - /* | ||
10905 | - * Don't use the 1 as the argument of DDC bus switch to get | ||
10906 | - * the EDID. It is used for SDVO SPD ROM. | ||
10907 | - */ | ||
10908 | - while(temp_ddc > 1) { | ||
10909 | - sdvo_priv->ddc_bus = temp_ddc; | ||
10910 | - edid = drm_get_edid(&intel_output->base, | ||
10911 | - intel_output->ddc_bus); | ||
10912 | - if (edid) { | ||
10913 | - /* | ||
10914 | - * When we can get the EDID, maybe it is the | ||
10915 | - * correct DDC bus. Update it. | ||
10916 | - */ | ||
10917 | - sdvo_priv->ddc_bus = temp_ddc; | ||
10918 | - break; | ||
10919 | - } | ||
10920 | - temp_ddc >>= 1; | ||
10921 | - } | ||
10922 | - if (edid == NULL) | ||
10923 | - sdvo_priv->ddc_bus = saved_ddc; | ||
10924 | - } | ||
10925 | /* when there is no edid and no monitor is connected with VGA | ||
10926 | * port, try to use the CRT ddc to read the EDID for DVI-connector | ||
10927 | */ | ||
10928 | @@ -2743,7 +2668,6 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | ||
10929 | |||
10930 | bool intel_sdvo_init(struct drm_device *dev, int output_device) | ||
10931 | { | ||
10932 | - struct drm_i915_private *dev_priv = dev->dev_private; | ||
10933 | struct drm_connector *connector; | ||
10934 | struct intel_output *intel_output; | ||
10935 | struct intel_sdvo_priv *sdvo_priv; | ||
10936 | @@ -2790,12 +2714,10 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) | ||
10937 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); | ||
10938 | sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, | ||
10939 | "SDVOB/VGA DDC BUS"); | ||
10940 | - dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; | ||
10941 | } else { | ||
10942 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); | ||
10943 | sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, | ||
10944 | "SDVOC/VGA DDC BUS"); | ||
10945 | - dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; | ||
10946 | } | ||
10947 | |||
10948 | if (intel_output->ddc_bus == NULL) | ||
10949 | diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c | ||
10950 | index ce026f0..9ca9179 100644 | ||
10951 | --- a/drivers/gpu/drm/i915/intel_tv.c | ||
10952 | +++ b/drivers/gpu/drm/i915/intel_tv.c | ||
10953 | @@ -1213,17 +1213,20 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | ||
10954 | tv_ctl |= TV_TRILEVEL_SYNC; | ||
10955 | if (tv_mode->pal_burst) | ||
10956 | tv_ctl |= TV_PAL_BURST; | ||
10957 | - | ||
10958 | scctl1 = 0; | ||
10959 | - if (tv_mode->dda1_inc) | ||
10960 | + /* dda1 implies valid video levels */ | ||
10961 | + if (tv_mode->dda1_inc) { | ||
10962 | scctl1 |= TV_SC_DDA1_EN; | ||
10963 | + } | ||
10964 | + | ||
10965 | if (tv_mode->dda2_inc) | ||
10966 | scctl1 |= TV_SC_DDA2_EN; | ||
10967 | + | ||
10968 | if (tv_mode->dda3_inc) | ||
10969 | scctl1 |= TV_SC_DDA3_EN; | ||
10970 | + | ||
10971 | scctl1 |= tv_mode->sc_reset; | ||
10972 | - if (video_levels) | ||
10973 | - scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT; | ||
10974 | + scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT; | ||
10975 | scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT; | ||
10976 | |||
10977 | scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT | | ||
10978 | @@ -1801,8 +1804,6 @@ intel_tv_init(struct drm_device *dev) | ||
10979 | drm_connector_attach_property(connector, | ||
10980 | dev->mode_config.tv_bottom_margin_property, | ||
10981 | tv_priv->margin[TV_MARGIN_BOTTOM]); | ||
10982 | - | ||
10983 | - dev_priv->hotplug_supported_mask |= TV_HOTPLUG_INT_STATUS; | ||
10984 | out: | ||
10985 | drm_sysfs_connector_add(connector); | ||
10986 | } | ||
10987 | diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c | ||
10988 | index fed2291..d67c425 100644 | ||
10989 | --- a/drivers/gpu/drm/radeon/atom.c | ||
10990 | +++ b/drivers/gpu/drm/radeon/atom.c | ||
10991 | @@ -607,7 +607,7 @@ static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg) | ||
10992 | uint8_t count = U8((*ptr)++); | ||
10993 | SDEBUG(" count: %d\n", count); | ||
10994 | if (arg == ATOM_UNIT_MICROSEC) | ||
10995 | - udelay(count); | ||
10996 | + schedule_timeout_uninterruptible(usecs_to_jiffies(count)); | ||
10997 | else | ||
10998 | schedule_timeout_uninterruptible(msecs_to_jiffies(count)); | ||
10999 | } | ||
11000 | diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c | ||
11001 | index 19f93f2..c15287a 100644 | ||
11002 | --- a/drivers/gpu/drm/radeon/atombios_crtc.c | ||
11003 | +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | ||
11004 | @@ -241,7 +241,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) | ||
11005 | { | ||
11006 | struct drm_device *dev = crtc->dev; | ||
11007 | struct radeon_device *rdev = dev->dev_private; | ||
11008 | - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
11009 | |||
11010 | switch (mode) { | ||
11011 | case DRM_MODE_DPMS_ON: | ||
11012 | @@ -249,21 +248,20 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) | ||
11013 | if (ASIC_IS_DCE3(rdev)) | ||
11014 | atombios_enable_crtc_memreq(crtc, 1); | ||
11015 | atombios_blank_crtc(crtc, 0); | ||
11016 | - if (rdev->family < CHIP_R600) | ||
11017 | - drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); | ||
11018 | - radeon_crtc_load_lut(crtc); | ||
11019 | break; | ||
11020 | case DRM_MODE_DPMS_STANDBY: | ||
11021 | case DRM_MODE_DPMS_SUSPEND: | ||
11022 | case DRM_MODE_DPMS_OFF: | ||
11023 | - if (rdev->family < CHIP_R600) | ||
11024 | - drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); | ||
11025 | atombios_blank_crtc(crtc, 1); | ||
11026 | if (ASIC_IS_DCE3(rdev)) | ||
11027 | atombios_enable_crtc_memreq(crtc, 0); | ||
11028 | atombios_enable_crtc(crtc, 0); | ||
11029 | break; | ||
11030 | } | ||
11031 | + | ||
11032 | + if (mode != DRM_MODE_DPMS_OFF) { | ||
11033 | + radeon_crtc_load_lut(crtc); | ||
11034 | + } | ||
11035 | } | ||
11036 | |||
11037 | static void | ||
11038 | diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c | ||
11039 | index 969502a..2ed88a8 100644 | ||
11040 | --- a/drivers/gpu/drm/radeon/radeon_atombios.c | ||
11041 | +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | ||
11042 | @@ -135,14 +135,6 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, | ||
11043 | } | ||
11044 | } | ||
11045 | |||
11046 | - /* HIS X1300 is DVI+VGA, not DVI+DVI */ | ||
11047 | - if ((dev->pdev->device == 0x7146) && | ||
11048 | - (dev->pdev->subsystem_vendor == 0x17af) && | ||
11049 | - (dev->pdev->subsystem_device == 0x2058)) { | ||
11050 | - if (supported_device == ATOM_DEVICE_DFP1_SUPPORT) | ||
11051 | - return false; | ||
11052 | - } | ||
11053 | - | ||
11054 | /* Funky macbooks */ | ||
11055 | if ((dev->pdev->device == 0x71C5) && | ||
11056 | (dev->pdev->subsystem_vendor == 0x106b) && | ||
11057 | diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c | ||
11058 | index 22ce4d6..8d0b7aa 100644 | ||
11059 | --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c | ||
11060 | +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c | ||
11061 | @@ -292,7 +292,8 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) | ||
11062 | uint32_t mask; | ||
11063 | |||
11064 | if (radeon_crtc->crtc_id) | ||
11065 | - mask = (RADEON_CRTC2_DISP_DIS | | ||
11066 | + mask = (RADEON_CRTC2_EN | | ||
11067 | + RADEON_CRTC2_DISP_DIS | | ||
11068 | RADEON_CRTC2_VSYNC_DIS | | ||
11069 | RADEON_CRTC2_HSYNC_DIS | | ||
11070 | RADEON_CRTC2_DISP_REQ_EN_B); | ||
11071 | @@ -304,7 +305,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) | ||
11072 | switch (mode) { | ||
11073 | case DRM_MODE_DPMS_ON: | ||
11074 | if (radeon_crtc->crtc_id) | ||
11075 | - WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~(RADEON_CRTC2_EN | mask)); | ||
11076 | + WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~mask); | ||
11077 | else { | ||
11078 | WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN | | ||
11079 | RADEON_CRTC_DISP_REQ_EN_B)); | ||
11080 | @@ -318,7 +319,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) | ||
11081 | case DRM_MODE_DPMS_OFF: | ||
11082 | drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); | ||
11083 | if (radeon_crtc->crtc_id) | ||
11084 | - WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask)); | ||
11085 | + WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~mask); | ||
11086 | else { | ||
11087 | WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN | | ||
11088 | RADEON_CRTC_DISP_REQ_EN_B)); | ||
11089 | diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c | ||
11090 | index c8942ca..f8a465d 100644 | ||
11091 | --- a/drivers/gpu/drm/radeon/radeon_test.c | ||
11092 | +++ b/drivers/gpu/drm/radeon/radeon_test.c | ||
11093 | @@ -42,8 +42,8 @@ void radeon_test_moves(struct radeon_device *rdev) | ||
11094 | /* Number of tests = | ||
11095 | * (Total GTT - IB pool - writeback page - ring buffer) / test size | ||
11096 | */ | ||
11097 | - n = ((u32)(rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE - | ||
11098 | - rdev->cp.ring_size)) / size; | ||
11099 | + n = (rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE - | ||
11100 | + rdev->cp.ring_size) / size; | ||
11101 | |||
11102 | gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); | ||
11103 | if (!gtt_obj) { | ||
11104 | diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c | ||
11105 | index 4444f48..5f117cd 100644 | ||
11106 | --- a/drivers/gpu/drm/radeon/rs600.c | ||
11107 | +++ b/drivers/gpu/drm/radeon/rs600.c | ||
11108 | @@ -301,7 +301,9 @@ int rs600_mc_wait_for_idle(struct radeon_device *rdev) | ||
11109 | |||
11110 | void rs600_gpu_init(struct radeon_device *rdev) | ||
11111 | { | ||
11112 | + /* FIXME: HDP same place on rs600 ? */ | ||
11113 | r100_hdp_reset(rdev); | ||
11114 | + /* FIXME: is this correct ? */ | ||
11115 | r420_pipes_init(rdev); | ||
11116 | /* Wait for mc idle */ | ||
11117 | if (rs600_mc_wait_for_idle(rdev)) | ||
11118 | @@ -310,20 +312,9 @@ void rs600_gpu_init(struct radeon_device *rdev) | ||
11119 | |||
11120 | void rs600_vram_info(struct radeon_device *rdev) | ||
11121 | { | ||
11122 | + /* FIXME: to do or is these values sane ? */ | ||
11123 | rdev->mc.vram_is_ddr = true; | ||
11124 | rdev->mc.vram_width = 128; | ||
11125 | - | ||
11126 | - rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); | ||
11127 | - rdev->mc.mc_vram_size = rdev->mc.real_vram_size; | ||
11128 | - | ||
11129 | - rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); | ||
11130 | - rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); | ||
11131 | - | ||
11132 | - if (rdev->mc.mc_vram_size > rdev->mc.aper_size) | ||
11133 | - rdev->mc.mc_vram_size = rdev->mc.aper_size; | ||
11134 | - | ||
11135 | - if (rdev->mc.real_vram_size > rdev->mc.aper_size) | ||
11136 | - rdev->mc.real_vram_size = rdev->mc.aper_size; | ||
11137 | } | ||
11138 | |||
11139 | void rs600_bandwidth_update(struct radeon_device *rdev) | ||
11140 | diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c | ||
11141 | index b12ff76..2754717 100644 | ||
11142 | --- a/drivers/gpu/drm/radeon/rs690.c | ||
11143 | +++ b/drivers/gpu/drm/radeon/rs690.c | ||
11144 | @@ -131,25 +131,24 @@ void rs690_pm_info(struct radeon_device *rdev) | ||
11145 | |||
11146 | void rs690_vram_info(struct radeon_device *rdev) | ||
11147 | { | ||
11148 | + uint32_t tmp; | ||
11149 | fixed20_12 a; | ||
11150 | |||
11151 | rs400_gart_adjust_size(rdev); | ||
11152 | - | ||
11153 | + /* DDR for all card after R300 & IGP */ | ||
11154 | rdev->mc.vram_is_ddr = true; | ||
11155 | - rdev->mc.vram_width = 128; | ||
11156 | - | ||
11157 | + /* FIXME: is this correct for RS690/RS740 ? */ | ||
11158 | + tmp = RREG32(RADEON_MEM_CNTL); | ||
11159 | + if (tmp & R300_MEM_NUM_CHANNELS_MASK) { | ||
11160 | + rdev->mc.vram_width = 128; | ||
11161 | + } else { | ||
11162 | + rdev->mc.vram_width = 64; | ||
11163 | + } | ||
11164 | rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); | ||
11165 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; | ||
11166 | |||
11167 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); | ||
11168 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); | ||
11169 | - | ||
11170 | - if (rdev->mc.mc_vram_size > rdev->mc.aper_size) | ||
11171 | - rdev->mc.mc_vram_size = rdev->mc.aper_size; | ||
11172 | - | ||
11173 | - if (rdev->mc.real_vram_size > rdev->mc.aper_size) | ||
11174 | - rdev->mc.real_vram_size = rdev->mc.aper_size; | ||
11175 | - | ||
11176 | rs690_pm_info(rdev); | ||
11177 | /* FIXME: we should enforce default clock in case GPU is not in | ||
11178 | * default setup | ||
11179 | diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c | ||
11180 | index 5b4d66d..4b96e7a 100644 | ||
11181 | --- a/drivers/hid/hid-apple.c | ||
11182 | +++ b/drivers/hid/hid-apple.c | ||
11183 | @@ -431,13 +431,6 @@ static const struct hid_device_id apple_devices[] = { | ||
11184 | .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD }, | ||
11185 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS), | ||
11186 | .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, | ||
11187 | - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI), | ||
11188 | - .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, | ||
11189 | - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO), | ||
11190 | - .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN | | ||
11191 | - APPLE_ISO_KEYBOARD }, | ||
11192 | - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS), | ||
11193 | - .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, | ||
11194 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY), | ||
11195 | .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, | ||
11196 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY), | ||
11197 | diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c | ||
11198 | index 9678354..7d05c4b 100644 | ||
11199 | --- a/drivers/hid/hid-core.c | ||
11200 | +++ b/drivers/hid/hid-core.c | ||
11201 | @@ -1287,9 +1287,6 @@ static const struct hid_device_id hid_blacklist[] = { | ||
11202 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) }, | ||
11203 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) }, | ||
11204 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) }, | ||
11205 | - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) }, | ||
11206 | - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) }, | ||
11207 | - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, | ||
11208 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, | ||
11209 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, | ||
11210 | { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) }, | ||
11211 | diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h | ||
11212 | index e380e7b..adbef5d 100644 | ||
11213 | --- a/drivers/hid/hid-ids.h | ||
11214 | +++ b/drivers/hid/hid-ids.h | ||
11215 | @@ -88,9 +88,6 @@ | ||
11216 | #define USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI 0x0236 | ||
11217 | #define USB_DEVICE_ID_APPLE_WELLSPRING3_ISO 0x0237 | ||
11218 | #define USB_DEVICE_ID_APPLE_WELLSPRING3_JIS 0x0238 | ||
11219 | -#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239 | ||
11220 | -#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a | ||
11221 | -#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b | ||
11222 | #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a | ||
11223 | #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b | ||
11224 | #define USB_DEVICE_ID_APPLE_ATV_IRCONTROL 0x8241 | ||
11225 | diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c | ||
11226 | index 5d901f6..03bd703 100644 | ||
11227 | --- a/drivers/hid/usbhid/hid-core.c | ||
11228 | +++ b/drivers/hid/usbhid/hid-core.c | ||
11229 | @@ -998,8 +998,7 @@ static int usbhid_start(struct hid_device *hid) | ||
11230 | usbhid->urbctrl->transfer_dma = usbhid->ctrlbuf_dma; | ||
11231 | usbhid->urbctrl->transfer_flags |= (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP); | ||
11232 | |||
11233 | - if (!(hid->quirks & HID_QUIRK_NO_INIT_REPORTS)) | ||
11234 | - usbhid_init_reports(hid); | ||
11235 | + usbhid_init_reports(hid); | ||
11236 | |||
11237 | set_bit(HID_STARTED, &usbhid->iofl); | ||
11238 | |||
11239 | diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c | ||
11240 | index 5713b93..0d9045a 100644 | ||
11241 | --- a/drivers/hid/usbhid/hid-quirks.c | ||
11242 | +++ b/drivers/hid/usbhid/hid-quirks.c | ||
11243 | @@ -280,7 +280,7 @@ u32 usbhid_lookup_quirk(const u16 idVendor, const u16 idProduct) | ||
11244 | if (idVendor == USB_VENDOR_ID_NCR && | ||
11245 | idProduct >= USB_DEVICE_ID_NCR_FIRST && | ||
11246 | idProduct <= USB_DEVICE_ID_NCR_LAST) | ||
11247 | - return HID_QUIRK_NO_INIT_REPORTS; | ||
11248 | + return HID_QUIRK_NOGET; | ||
11249 | |||
11250 | down_read(&dquirks_rwsem); | ||
11251 | bl_entry = usbhid_exists_dquirk(idVendor, idProduct); | ||
11252 | diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig | ||
11253 | index c1f7ea0..700e93a 100644 | ||
11254 | --- a/drivers/hwmon/Kconfig | ||
11255 | +++ b/drivers/hwmon/Kconfig | ||
11256 | @@ -374,7 +374,7 @@ config SENSORS_GL520SM | ||
11257 | |||
11258 | config SENSORS_CORETEMP | ||
11259 | tristate "Intel Core/Core2/Atom temperature sensor" | ||
11260 | - depends on X86 && PCI && EXPERIMENTAL | ||
11261 | + depends on X86 && EXPERIMENTAL | ||
11262 | help | ||
11263 | If you say yes here you get support for the temperature | ||
11264 | sensor inside your CPU. Most of the family 6 CPUs | ||
11265 | diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c | ||
11266 | index 14f910d..1852f27 100644 | ||
11267 | --- a/drivers/hwmon/adt7462.c | ||
11268 | +++ b/drivers/hwmon/adt7462.c | ||
11269 | @@ -97,7 +97,7 @@ I2C_CLIENT_INSMOD_1(adt7462); | ||
11270 | #define ADT7462_PIN24_SHIFT 6 | ||
11271 | #define ADT7462_PIN26_VOLT_INPUT 0x08 | ||
11272 | #define ADT7462_PIN25_VOLT_INPUT 0x20 | ||
11273 | -#define ADT7462_PIN28_SHIFT 4 /* cfg3 */ | ||
11274 | +#define ADT7462_PIN28_SHIFT 6 /* cfg3 */ | ||
11275 | #define ADT7462_PIN28_VOLT 0x5 | ||
11276 | |||
11277 | #define ADT7462_REG_ALARM1 0xB8 | ||
11278 | @@ -182,7 +182,7 @@ I2C_CLIENT_INSMOD_1(adt7462); | ||
11279 | * | ||
11280 | * Some, but not all, of these voltages have low/high limits. | ||
11281 | */ | ||
11282 | -#define ADT7462_VOLT_COUNT 13 | ||
11283 | +#define ADT7462_VOLT_COUNT 12 | ||
11284 | |||
11285 | #define ADT7462_VENDOR 0x41 | ||
11286 | #define ADT7462_DEVICE 0x62 | ||
11287 | diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c | ||
11288 | index 2d7bcee..caef39c 100644 | ||
11289 | --- a/drivers/hwmon/coretemp.c | ||
11290 | +++ b/drivers/hwmon/coretemp.c | ||
11291 | @@ -33,7 +33,6 @@ | ||
11292 | #include <linux/list.h> | ||
11293 | #include <linux/platform_device.h> | ||
11294 | #include <linux/cpu.h> | ||
11295 | -#include <linux/pci.h> | ||
11296 | #include <asm/msr.h> | ||
11297 | #include <asm/processor.h> | ||
11298 | |||
11299 | @@ -162,7 +161,6 @@ static int __devinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device * | ||
11300 | int usemsr_ee = 1; | ||
11301 | int err; | ||
11302 | u32 eax, edx; | ||
11303 | - struct pci_dev *host_bridge; | ||
11304 | |||
11305 | /* Early chips have no MSR for TjMax */ | ||
11306 | |||
11307 | @@ -170,21 +168,11 @@ static int __devinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device * | ||
11308 | usemsr_ee = 0; | ||
11309 | } | ||
11310 | |||
11311 | - /* Atom CPUs */ | ||
11312 | + /* Atoms seems to have TjMax at 90C */ | ||
11313 | |||
11314 | if (c->x86_model == 0x1c) { | ||
11315 | usemsr_ee = 0; | ||
11316 | - | ||
11317 | - host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); | ||
11318 | - | ||
11319 | - if (host_bridge && host_bridge->vendor == PCI_VENDOR_ID_INTEL | ||
11320 | - && (host_bridge->device == 0xa000 /* NM10 based nettop */ | ||
11321 | - || host_bridge->device == 0xa010)) /* NM10 based netbook */ | ||
11322 | - tjmax = 100000; | ||
11323 | - else | ||
11324 | - tjmax = 90000; | ||
11325 | - | ||
11326 | - pci_dev_put(host_bridge); | ||
11327 | + tjmax = 90000; | ||
11328 | } | ||
11329 | |||
11330 | if ((c->x86_model > 0xe) && (usemsr_ee)) { | ||
11331 | diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c | ||
11332 | index f600813..da1b1f9 100644 | ||
11333 | --- a/drivers/hwmon/fschmd.c | ||
11334 | +++ b/drivers/hwmon/fschmd.c | ||
11335 | @@ -767,7 +767,6 @@ leave: | ||
11336 | static int watchdog_open(struct inode *inode, struct file *filp) | ||
11337 | { | ||
11338 | struct fschmd_data *pos, *data = NULL; | ||
11339 | - int watchdog_is_open; | ||
11340 | |||
11341 | /* We get called from drivers/char/misc.c with misc_mtx hold, and we | ||
11342 | call misc_register() from fschmd_probe() with watchdog_data_mutex | ||
11343 | @@ -782,12 +781,10 @@ static int watchdog_open(struct inode *inode, struct file *filp) | ||
11344 | } | ||
11345 | } | ||
11346 | /* Note we can never not have found data, so we don't check for this */ | ||
11347 | - watchdog_is_open = test_and_set_bit(0, &data->watchdog_is_open); | ||
11348 | - if (!watchdog_is_open) | ||
11349 | - kref_get(&data->kref); | ||
11350 | + kref_get(&data->kref); | ||
11351 | mutex_unlock(&watchdog_data_mutex); | ||
11352 | |||
11353 | - if (watchdog_is_open) | ||
11354 | + if (test_and_set_bit(0, &data->watchdog_is_open)) | ||
11355 | return -EBUSY; | ||
11356 | |||
11357 | /* Start the watchdog */ | ||
11358 | diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c | ||
11359 | index 1508e0a..f7e7016 100644 | ||
11360 | --- a/drivers/hwmon/lm78.c | ||
11361 | +++ b/drivers/hwmon/lm78.c | ||
11362 | @@ -870,16 +870,17 @@ static struct lm78_data *lm78_update_device(struct device *dev) | ||
11363 | static int __init lm78_isa_found(unsigned short address) | ||
11364 | { | ||
11365 | int val, save, found = 0; | ||
11366 | - int port; | ||
11367 | - | ||
11368 | - /* Some boards declare base+0 to base+7 as a PNP device, some base+4 | ||
11369 | - * to base+7 and some base+5 to base+6. So we better request each port | ||
11370 | - * individually for the probing phase. */ | ||
11371 | - for (port = address; port < address + LM78_EXTENT; port++) { | ||
11372 | - if (!request_region(port, 1, "lm78")) { | ||
11373 | - pr_debug("lm78: Failed to request port 0x%x\n", port); | ||
11374 | - goto release; | ||
11375 | - } | ||
11376 | + | ||
11377 | + /* We have to request the region in two parts because some | ||
11378 | + boards declare base+4 to base+7 as a PNP device */ | ||
11379 | + if (!request_region(address, 4, "lm78")) { | ||
11380 | + pr_debug("lm78: Failed to request low part of region\n"); | ||
11381 | + return 0; | ||
11382 | + } | ||
11383 | + if (!request_region(address + 4, 4, "lm78")) { | ||
11384 | + pr_debug("lm78: Failed to request high part of region\n"); | ||
11385 | + release_region(address, 4); | ||
11386 | + return 0; | ||
11387 | } | ||
11388 | |||
11389 | #define REALLY_SLOW_IO | ||
11390 | @@ -943,8 +944,8 @@ static int __init lm78_isa_found(unsigned short address) | ||
11391 | val & 0x80 ? "LM79" : "LM78", (int)address); | ||
11392 | |||
11393 | release: | ||
11394 | - for (port--; port >= address; port--) | ||
11395 | - release_region(port, 1); | ||
11396 | + release_region(address + 4, 4); | ||
11397 | + release_region(address, 4); | ||
11398 | return found; | ||
11399 | } | ||
11400 | |||
11401 | diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c | ||
11402 | index 864a371..ebe38b6 100644 | ||
11403 | --- a/drivers/hwmon/sht15.c | ||
11404 | +++ b/drivers/hwmon/sht15.c | ||
11405 | @@ -305,7 +305,7 @@ static inline int sht15_calc_temp(struct sht15_data *data) | ||
11406 | int d1 = 0; | ||
11407 | int i; | ||
11408 | |||
11409 | - for (i = 1; i < ARRAY_SIZE(temppoints); i++) | ||
11410 | + for (i = 1; i < ARRAY_SIZE(temppoints) - 1; i++) | ||
11411 | /* Find pointer to interpolate */ | ||
11412 | if (data->supply_uV > temppoints[i - 1].vdd) { | ||
11413 | d1 = (data->supply_uV/1000 - temppoints[i - 1].vdd) | ||
11414 | @@ -332,12 +332,12 @@ static inline int sht15_calc_humid(struct sht15_data *data) | ||
11415 | |||
11416 | const int c1 = -4; | ||
11417 | const int c2 = 40500; /* x 10 ^ -6 */ | ||
11418 | - const int c3 = -2800; /* x10 ^ -9 */ | ||
11419 | + const int c3 = 2800; /* x10 ^ -9 */ | ||
11420 | |||
11421 | RHlinear = c1*1000 | ||
11422 | + c2 * data->val_humid/1000 | ||
11423 | + (data->val_humid * data->val_humid * c3)/1000000; | ||
11424 | - return (temp - 25000) * (10000 + 80 * data->val_humid) | ||
11425 | + return (temp - 25000) * (10000 + 800 * data->val_humid) | ||
11426 | / 1000000 + RHlinear; | ||
11427 | } | ||
11428 | |||
11429 | diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c | ||
11430 | index f0b6883..d27ed1b 100644 | ||
11431 | --- a/drivers/hwmon/w83781d.c | ||
11432 | +++ b/drivers/hwmon/w83781d.c | ||
11433 | @@ -1818,17 +1818,17 @@ static int __init | ||
11434 | w83781d_isa_found(unsigned short address) | ||
11435 | { | ||
11436 | int val, save, found = 0; | ||
11437 | - int port; | ||
11438 | - | ||
11439 | - /* Some boards declare base+0 to base+7 as a PNP device, some base+4 | ||
11440 | - * to base+7 and some base+5 to base+6. So we better request each port | ||
11441 | - * individually for the probing phase. */ | ||
11442 | - for (port = address; port < address + W83781D_EXTENT; port++) { | ||
11443 | - if (!request_region(port, 1, "w83781d")) { | ||
11444 | - pr_debug("w83781d: Failed to request port 0x%x\n", | ||
11445 | - port); | ||
11446 | - goto release; | ||
11447 | - } | ||
11448 | + | ||
11449 | + /* We have to request the region in two parts because some | ||
11450 | + boards declare base+4 to base+7 as a PNP device */ | ||
11451 | + if (!request_region(address, 4, "w83781d")) { | ||
11452 | + pr_debug("w83781d: Failed to request low part of region\n"); | ||
11453 | + return 0; | ||
11454 | + } | ||
11455 | + if (!request_region(address + 4, 4, "w83781d")) { | ||
11456 | + pr_debug("w83781d: Failed to request high part of region\n"); | ||
11457 | + release_region(address, 4); | ||
11458 | + return 0; | ||
11459 | } | ||
11460 | |||
11461 | #define REALLY_SLOW_IO | ||
11462 | @@ -1902,8 +1902,8 @@ w83781d_isa_found(unsigned short address) | ||
11463 | val == 0x30 ? "W83782D" : "W83781D", (int)address); | ||
11464 | |||
11465 | release: | ||
11466 | - for (port--; port >= address; port--) | ||
11467 | - release_region(port, 1); | ||
11468 | + release_region(address + 4, 4); | ||
11469 | + release_region(address, 4); | ||
11470 | return found; | ||
11471 | } | ||
11472 | |||
11473 | diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c | ||
11474 | index f7346a9..0ed68e2 100644 | ||
11475 | --- a/drivers/i2c/busses/i2c-pca-isa.c | ||
11476 | +++ b/drivers/i2c/busses/i2c-pca-isa.c | ||
11477 | @@ -75,7 +75,7 @@ static int pca_isa_waitforcompletion(void *pd) | ||
11478 | unsigned long timeout; | ||
11479 | |||
11480 | if (irq > -1) { | ||
11481 | - ret = wait_event_timeout(pca_wait, | ||
11482 | + ret = wait_event_interruptible_timeout(pca_wait, | ||
11483 | pca_isa_readbyte(pd, I2C_PCA_CON) | ||
11484 | & I2C_PCA_CON_SI, pca_isa_ops.timeout); | ||
11485 | } else { | ||
11486 | @@ -96,7 +96,7 @@ static void pca_isa_resetchip(void *pd) | ||
11487 | } | ||
11488 | |||
11489 | static irqreturn_t pca_handler(int this_irq, void *dev_id) { | ||
11490 | - wake_up(&pca_wait); | ||
11491 | + wake_up_interruptible(&pca_wait); | ||
11492 | return IRQ_HANDLED; | ||
11493 | } | ||
11494 | |||
11495 | diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c | ||
11496 | index 5b2213d..c4df9d4 100644 | ||
11497 | --- a/drivers/i2c/busses/i2c-pca-platform.c | ||
11498 | +++ b/drivers/i2c/busses/i2c-pca-platform.c | ||
11499 | @@ -84,7 +84,7 @@ static int i2c_pca_pf_waitforcompletion(void *pd) | ||
11500 | unsigned long timeout; | ||
11501 | |||
11502 | if (i2c->irq) { | ||
11503 | - ret = wait_event_timeout(i2c->wait, | ||
11504 | + ret = wait_event_interruptible_timeout(i2c->wait, | ||
11505 | i2c->algo_data.read_byte(i2c, I2C_PCA_CON) | ||
11506 | & I2C_PCA_CON_SI, i2c->adap.timeout); | ||
11507 | } else { | ||
11508 | @@ -122,7 +122,7 @@ static irqreturn_t i2c_pca_pf_handler(int this_irq, void *dev_id) | ||
11509 | if ((i2c->algo_data.read_byte(i2c, I2C_PCA_CON) & I2C_PCA_CON_SI) == 0) | ||
11510 | return IRQ_NONE; | ||
11511 | |||
11512 | - wake_up(&i2c->wait); | ||
11513 | + wake_up_interruptible(&i2c->wait); | ||
11514 | |||
11515 | return IRQ_HANDLED; | ||
11516 | } | ||
11517 | diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c | ||
11518 | index e29b6d5..b1c050f 100644 | ||
11519 | --- a/drivers/i2c/busses/i2c-tiny-usb.c | ||
11520 | +++ b/drivers/i2c/busses/i2c-tiny-usb.c | ||
11521 | @@ -13,7 +13,6 @@ | ||
11522 | #include <linux/kernel.h> | ||
11523 | #include <linux/errno.h> | ||
11524 | #include <linux/module.h> | ||
11525 | -#include <linux/types.h> | ||
11526 | |||
11527 | /* include interfaces to usb layer */ | ||
11528 | #include <linux/usb.h> | ||
11529 | @@ -32,8 +31,8 @@ | ||
11530 | #define CMD_I2C_IO_END (1<<1) | ||
11531 | |||
11532 | /* i2c bit delay, default is 10us -> 100kHz */ | ||
11533 | -static unsigned short delay = 10; | ||
11534 | -module_param(delay, ushort, 0); | ||
11535 | +static int delay = 10; | ||
11536 | +module_param(delay, int, 0); | ||
11537 | MODULE_PARM_DESC(delay, "bit delay in microseconds, " | ||
11538 | "e.g. 10 for 100kHz (default is 100kHz)"); | ||
11539 | |||
11540 | @@ -110,7 +109,7 @@ static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) | ||
11541 | |||
11542 | static u32 usb_func(struct i2c_adapter *adapter) | ||
11543 | { | ||
11544 | - __le32 func; | ||
11545 | + u32 func; | ||
11546 | |||
11547 | /* get functionality from adapter */ | ||
11548 | if (usb_read(adapter, CMD_GET_FUNC, 0, 0, &func, sizeof(func)) != | ||
11549 | @@ -119,7 +118,7 @@ static u32 usb_func(struct i2c_adapter *adapter) | ||
11550 | return 0; | ||
11551 | } | ||
11552 | |||
11553 | - return le32_to_cpu(func); | ||
11554 | + return func; | ||
11555 | } | ||
11556 | |||
11557 | /* This is the actual algorithm we define */ | ||
11558 | @@ -217,7 +216,8 @@ static int i2c_tiny_usb_probe(struct usb_interface *interface, | ||
11559 | "i2c-tiny-usb at bus %03d device %03d", | ||
11560 | dev->usb_dev->bus->busnum, dev->usb_dev->devnum); | ||
11561 | |||
11562 | - if (usb_write(&dev->adapter, CMD_SET_DELAY, delay, 0, NULL, 0) != 0) { | ||
11563 | + if (usb_write(&dev->adapter, CMD_SET_DELAY, | ||
11564 | + cpu_to_le16(delay), 0, NULL, 0) != 0) { | ||
11565 | dev_err(&dev->adapter.dev, | ||
11566 | "failure setting delay to %dus\n", delay); | ||
11567 | retval = -EIO; | ||
11568 | diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c | ||
11569 | index 3bf7b0a..2965043 100644 | ||
11570 | --- a/drivers/i2c/i2c-core.c | ||
11571 | +++ b/drivers/i2c/i2c-core.c | ||
11572 | @@ -801,9 +801,6 @@ int i2c_del_adapter(struct i2c_adapter *adap) | ||
11573 | adap->dev.parent); | ||
11574 | #endif | ||
11575 | |||
11576 | - /* device name is gone after device_unregister */ | ||
11577 | - dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name); | ||
11578 | - | ||
11579 | /* clean up the sysfs representation */ | ||
11580 | init_completion(&adap->dev_released); | ||
11581 | device_unregister(&adap->dev); | ||
11582 | @@ -816,6 +813,8 @@ int i2c_del_adapter(struct i2c_adapter *adap) | ||
11583 | idr_remove(&i2c_adapter_idr, adap->nr); | ||
11584 | mutex_unlock(&core_lock); | ||
11585 | |||
11586 | + dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name); | ||
11587 | + | ||
11588 | /* Clear the device structure in case this adapter is ever going to be | ||
11589 | added again */ | ||
11590 | memset(&adap->dev, 0, sizeof(adap->dev)); | ||
11591 | diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c | ||
11592 | index 1ccfb40..9aec78d 100644 | ||
11593 | --- a/drivers/ide/slc90e66.c | ||
11594 | +++ b/drivers/ide/slc90e66.c | ||
11595 | @@ -91,7 +91,8 @@ static void slc90e66_set_dma_mode(ide_drive_t *drive, const u8 speed) | ||
11596 | |||
11597 | if (!(reg48 & u_flag)) | ||
11598 | pci_write_config_word(dev, 0x48, reg48|u_flag); | ||
11599 | - if ((reg4a & a_speed) != u_speed) { | ||
11600 | + /* FIXME: (reg4a & a_speed) ? */ | ||
11601 | + if ((reg4a & u_speed) != u_speed) { | ||
11602 | pci_write_config_word(dev, 0x4a, reg4a & ~a_speed); | ||
11603 | pci_read_config_word(dev, 0x4a, ®4a); | ||
11604 | pci_write_config_word(dev, 0x4a, reg4a|u_speed); | ||
11605 | diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c | ||
11606 | index 100da85..b368406 100644 | ||
11607 | --- a/drivers/infiniband/hw/ipath/ipath_fs.c | ||
11608 | +++ b/drivers/infiniband/hw/ipath/ipath_fs.c | ||
11609 | @@ -346,8 +346,10 @@ static int ipathfs_fill_super(struct super_block *sb, void *data, | ||
11610 | list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) { | ||
11611 | spin_unlock_irqrestore(&ipath_devs_lock, flags); | ||
11612 | ret = create_device_files(sb, dd); | ||
11613 | - if (ret) | ||
11614 | + if (ret) { | ||
11615 | + deactivate_locked_super(sb); | ||
11616 | goto bail; | ||
11617 | + } | ||
11618 | spin_lock_irqsave(&ipath_devs_lock, flags); | ||
11619 | } | ||
11620 | |||
11621 | diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c | ||
11622 | index df3eb8c..2bf5116 100644 | ||
11623 | --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c | ||
11624 | +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | ||
11625 | @@ -884,7 +884,6 @@ struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour, | ||
11626 | |||
11627 | neigh->neighbour = neighbour; | ||
11628 | neigh->dev = dev; | ||
11629 | - memset(&neigh->dgid.raw, 0, sizeof (union ib_gid)); | ||
11630 | *to_ipoib_neigh(neighbour) = neigh; | ||
11631 | skb_queue_head_init(&neigh->queue); | ||
11632 | ipoib_cm_set(neigh, NULL); | ||
11633 | diff --git a/drivers/input/misc/winbond-cir.c b/drivers/input/misc/winbond-cir.c | ||
11634 | index c8f5a9a..33309fe 100644 | ||
11635 | --- a/drivers/input/misc/winbond-cir.c | ||
11636 | +++ b/drivers/input/misc/winbond-cir.c | ||
11637 | @@ -768,7 +768,7 @@ wbcir_parse_rc6(struct device *dev, struct wbcir_data *data) | ||
11638 | return; | ||
11639 | } | ||
11640 | |||
11641 | - dev_dbg(dev, "IR-RC6 ad 0x%02X cm 0x%02X cu 0x%04X " | ||
11642 | + dev_info(dev, "IR-RC6 ad 0x%02X cm 0x%02X cu 0x%04X " | ||
11643 | "toggle %u mode %u scan 0x%08X\n", | ||
11644 | address, | ||
11645 | command, | ||
11646 | diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c | ||
11647 | index fc8823b..f361106 100644 | ||
11648 | --- a/drivers/input/mouse/alps.c | ||
11649 | +++ b/drivers/input/mouse/alps.c | ||
11650 | @@ -5,7 +5,6 @@ | ||
11651 | * Copyright (c) 2003-2005 Peter Osterlund <petero2@telia.com> | ||
11652 | * Copyright (c) 2004 Dmitry Torokhov <dtor@mail.ru> | ||
11653 | * Copyright (c) 2005 Vojtech Pavlik <vojtech@suse.cz> | ||
11654 | - * Copyright (c) 2009 Sebastian Kapfer <sebastian_kapfer@gmx.net> | ||
11655 | * | ||
11656 | * ALPS detection, tap switching and status querying info is taken from | ||
11657 | * tpconfig utility (by C. Scott Ananian and Bruce Kall). | ||
11658 | @@ -36,8 +35,6 @@ | ||
11659 | #define ALPS_OLDPROTO 0x10 | ||
11660 | #define ALPS_PASS 0x20 | ||
11661 | #define ALPS_FW_BK_2 0x40 | ||
11662 | -#define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with | ||
11663 | - 6-byte ALPS packet */ | ||
11664 | |||
11665 | static const struct alps_model_info alps_model_data[] = { | ||
11666 | { { 0x32, 0x02, 0x14 }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Toshiba Salellite Pro M10 */ | ||
11667 | @@ -58,9 +55,7 @@ static const struct alps_model_info alps_model_data[] = { | ||
11668 | { { 0x20, 0x02, 0x0e }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* XXX */ | ||
11669 | { { 0x22, 0x02, 0x0a }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, | ||
11670 | { { 0x22, 0x02, 0x14 }, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D600 */ | ||
11671 | - /* Dell Latitude E5500, E6400, E6500, Precision M4400 */ | ||
11672 | - { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf, | ||
11673 | - ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, | ||
11674 | + { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude E6500 */ | ||
11675 | { { 0x73, 0x02, 0x50 }, 0xcf, 0xcf, ALPS_FW_BK_1 }, /* Dell Vostro 1400 */ | ||
11676 | }; | ||
11677 | |||
11678 | @@ -71,88 +66,20 @@ static const struct alps_model_info alps_model_data[] = { | ||
11679 | */ | ||
11680 | |||
11681 | /* | ||
11682 | - * PS/2 packet format | ||
11683 | - * | ||
11684 | - * byte 0: 0 0 YSGN XSGN 1 M R L | ||
11685 | - * byte 1: X7 X6 X5 X4 X3 X2 X1 X0 | ||
11686 | - * byte 2: Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 | ||
11687 | - * | ||
11688 | - * Note that the device never signals overflow condition. | ||
11689 | - * | ||
11690 | - * ALPS absolute Mode - new format | ||
11691 | + * ALPS abolute Mode - new format | ||
11692 | * | ||
11693 | * byte 0: 1 ? ? ? 1 ? ? ? | ||
11694 | * byte 1: 0 x6 x5 x4 x3 x2 x1 x0 | ||
11695 | - * byte 2: 0 x10 x9 x8 x7 ? fin ges | ||
11696 | + * byte 2: 0 x10 x9 x8 x7 ? fin ges | ||
11697 | * byte 3: 0 y9 y8 y7 1 M R L | ||
11698 | * byte 4: 0 y6 y5 y4 y3 y2 y1 y0 | ||
11699 | * byte 5: 0 z6 z5 z4 z3 z2 z1 z0 | ||
11700 | * | ||
11701 | - * Dualpoint device -- interleaved packet format | ||
11702 | - * | ||
11703 | - * byte 0: 1 1 0 0 1 1 1 1 | ||
11704 | - * byte 1: 0 x6 x5 x4 x3 x2 x1 x0 | ||
11705 | - * byte 2: 0 x10 x9 x8 x7 0 fin ges | ||
11706 | - * byte 3: 0 0 YSGN XSGN 1 1 1 1 | ||
11707 | - * byte 4: X7 X6 X5 X4 X3 X2 X1 X0 | ||
11708 | - * byte 5: Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 | ||
11709 | - * byte 6: 0 y9 y8 y7 1 m r l | ||
11710 | - * byte 7: 0 y6 y5 y4 y3 y2 y1 y0 | ||
11711 | - * byte 8: 0 z6 z5 z4 z3 z2 z1 z0 | ||
11712 | - * | ||
11713 | - * CAPITALS = stick, miniscules = touchpad | ||
11714 | - * | ||
11715 | * ?'s can have different meanings on different models, | ||
11716 | * such as wheel rotation, extra buttons, stick buttons | ||
11717 | * on a dualpoint, etc. | ||
11718 | */ | ||
11719 | |||
11720 | -static bool alps_is_valid_first_byte(const struct alps_model_info *model, | ||
11721 | - unsigned char data) | ||
11722 | -{ | ||
11723 | - return (data & model->mask0) == model->byte0; | ||
11724 | -} | ||
11725 | - | ||
11726 | -static void alps_report_buttons(struct psmouse *psmouse, | ||
11727 | - struct input_dev *dev1, struct input_dev *dev2, | ||
11728 | - int left, int right, int middle) | ||
11729 | -{ | ||
11730 | - struct alps_data *priv = psmouse->private; | ||
11731 | - const struct alps_model_info *model = priv->i; | ||
11732 | - | ||
11733 | - if (model->flags & ALPS_PS2_INTERLEAVED) { | ||
11734 | - struct input_dev *dev; | ||
11735 | - | ||
11736 | - /* | ||
11737 | - * If shared button has already been reported on the | ||
11738 | - * other device (dev2) then this event should be also | ||
11739 | - * sent through that device. | ||
11740 | - */ | ||
11741 | - dev = test_bit(BTN_LEFT, dev2->key) ? dev2 : dev1; | ||
11742 | - input_report_key(dev, BTN_LEFT, left); | ||
11743 | - | ||
11744 | - dev = test_bit(BTN_RIGHT, dev2->key) ? dev2 : dev1; | ||
11745 | - input_report_key(dev, BTN_RIGHT, right); | ||
11746 | - | ||
11747 | - dev = test_bit(BTN_MIDDLE, dev2->key) ? dev2 : dev1; | ||
11748 | - input_report_key(dev, BTN_MIDDLE, middle); | ||
11749 | - | ||
11750 | - /* | ||
11751 | - * Sync the _other_ device now, we'll do the first | ||
11752 | - * device later once we report the rest of the events. | ||
11753 | - */ | ||
11754 | - input_sync(dev2); | ||
11755 | - } else { | ||
11756 | - /* | ||
11757 | - * For devices with non-interleaved packets we know what | ||
11758 | - * device buttons belong to so we can simply report them. | ||
11759 | - */ | ||
11760 | - input_report_key(dev1, BTN_LEFT, left); | ||
11761 | - input_report_key(dev1, BTN_RIGHT, right); | ||
11762 | - input_report_key(dev1, BTN_MIDDLE, middle); | ||
11763 | - } | ||
11764 | -} | ||
11765 | - | ||
11766 | static void alps_process_packet(struct psmouse *psmouse) | ||
11767 | { | ||
11768 | struct alps_data *priv = psmouse->private; | ||
11769 | @@ -162,6 +89,18 @@ static void alps_process_packet(struct psmouse *psmouse) | ||
11770 | int x, y, z, ges, fin, left, right, middle; | ||
11771 | int back = 0, forward = 0; | ||
11772 | |||
11773 | + if ((packet[0] & 0xc8) == 0x08) { /* 3-byte PS/2 packet */ | ||
11774 | + input_report_key(dev2, BTN_LEFT, packet[0] & 1); | ||
11775 | + input_report_key(dev2, BTN_RIGHT, packet[0] & 2); | ||
11776 | + input_report_key(dev2, BTN_MIDDLE, packet[0] & 4); | ||
11777 | + input_report_rel(dev2, REL_X, | ||
11778 | + packet[1] ? packet[1] - ((packet[0] << 4) & 0x100) : 0); | ||
11779 | + input_report_rel(dev2, REL_Y, | ||
11780 | + packet[2] ? ((packet[0] << 3) & 0x100) - packet[2] : 0); | ||
11781 | + input_sync(dev2); | ||
11782 | + return; | ||
11783 | + } | ||
11784 | + | ||
11785 | if (priv->i->flags & ALPS_OLDPROTO) { | ||
11786 | left = packet[2] & 0x10; | ||
11787 | right = packet[2] & 0x08; | ||
11788 | @@ -197,13 +136,18 @@ static void alps_process_packet(struct psmouse *psmouse) | ||
11789 | input_report_rel(dev2, REL_X, (x > 383 ? (x - 768) : x)); | ||
11790 | input_report_rel(dev2, REL_Y, -(y > 255 ? (y - 512) : y)); | ||
11791 | |||
11792 | - alps_report_buttons(psmouse, dev2, dev, left, right, middle); | ||
11793 | + input_report_key(dev2, BTN_LEFT, left); | ||
11794 | + input_report_key(dev2, BTN_RIGHT, right); | ||
11795 | + input_report_key(dev2, BTN_MIDDLE, middle); | ||
11796 | |||
11797 | + input_sync(dev); | ||
11798 | input_sync(dev2); | ||
11799 | return; | ||
11800 | } | ||
11801 | |||
11802 | - alps_report_buttons(psmouse, dev, dev2, left, right, middle); | ||
11803 | + input_report_key(dev, BTN_LEFT, left); | ||
11804 | + input_report_key(dev, BTN_RIGHT, right); | ||
11805 | + input_report_key(dev, BTN_MIDDLE, middle); | ||
11806 | |||
11807 | /* Convert hardware tap to a reasonable Z value */ | ||
11808 | if (ges && !fin) z = 40; | ||
11809 | @@ -244,168 +188,25 @@ static void alps_process_packet(struct psmouse *psmouse) | ||
11810 | input_sync(dev); | ||
11811 | } | ||
11812 | |||
11813 | -static void alps_report_bare_ps2_packet(struct psmouse *psmouse, | ||
11814 | - unsigned char packet[], | ||
11815 | - bool report_buttons) | ||
11816 | -{ | ||
11817 | - struct alps_data *priv = psmouse->private; | ||
11818 | - struct input_dev *dev2 = priv->dev2; | ||
11819 | - | ||
11820 | - if (report_buttons) | ||
11821 | - alps_report_buttons(psmouse, dev2, psmouse->dev, | ||
11822 | - packet[0] & 1, packet[0] & 2, packet[0] & 4); | ||
11823 | - | ||
11824 | - input_report_rel(dev2, REL_X, | ||
11825 | - packet[1] ? packet[1] - ((packet[0] << 4) & 0x100) : 0); | ||
11826 | - input_report_rel(dev2, REL_Y, | ||
11827 | - packet[2] ? ((packet[0] << 3) & 0x100) - packet[2] : 0); | ||
11828 | - | ||
11829 | - input_sync(dev2); | ||
11830 | -} | ||
11831 | - | ||
11832 | -static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse) | ||
11833 | -{ | ||
11834 | - struct alps_data *priv = psmouse->private; | ||
11835 | - | ||
11836 | - if (psmouse->pktcnt < 6) | ||
11837 | - return PSMOUSE_GOOD_DATA; | ||
11838 | - | ||
11839 | - if (psmouse->pktcnt == 6) { | ||
11840 | - /* | ||
11841 | - * Start a timer to flush the packet if it ends up last | ||
11842 | - * 6-byte packet in the stream. Timer needs to fire | ||
11843 | - * psmouse core times out itself. 20 ms should be enough | ||
11844 | - * to decide if we are getting more data or not. | ||
11845 | - */ | ||
11846 | - mod_timer(&priv->timer, jiffies + msecs_to_jiffies(20)); | ||
11847 | - return PSMOUSE_GOOD_DATA; | ||
11848 | - } | ||
11849 | - | ||
11850 | - del_timer(&priv->timer); | ||
11851 | - | ||
11852 | - if (psmouse->packet[6] & 0x80) { | ||
11853 | - | ||
11854 | - /* | ||
11855 | - * Highest bit is set - that means we either had | ||
11856 | - * complete ALPS packet and this is start of the | ||
11857 | - * next packet or we got garbage. | ||
11858 | - */ | ||
11859 | - | ||
11860 | - if (((psmouse->packet[3] | | ||
11861 | - psmouse->packet[4] | | ||
11862 | - psmouse->packet[5]) & 0x80) || | ||
11863 | - (!alps_is_valid_first_byte(priv->i, psmouse->packet[6]))) { | ||
11864 | - dbg("refusing packet %x %x %x %x " | ||
11865 | - "(suspected interleaved ps/2)\n", | ||
11866 | - psmouse->packet[3], psmouse->packet[4], | ||
11867 | - psmouse->packet[5], psmouse->packet[6]); | ||
11868 | - return PSMOUSE_BAD_DATA; | ||
11869 | - } | ||
11870 | - | ||
11871 | - alps_process_packet(psmouse); | ||
11872 | - | ||
11873 | - /* Continue with the next packet */ | ||
11874 | - psmouse->packet[0] = psmouse->packet[6]; | ||
11875 | - psmouse->pktcnt = 1; | ||
11876 | - | ||
11877 | - } else { | ||
11878 | - | ||
11879 | - /* | ||
11880 | - * High bit is 0 - that means that we indeed got a PS/2 | ||
11881 | - * packet in the middle of ALPS packet. | ||
11882 | - * | ||
11883 | - * There is also possibility that we got 6-byte ALPS | ||
11884 | - * packet followed by 3-byte packet from trackpoint. We | ||
11885 | - * can not distinguish between these 2 scenarios but | ||
11886 | - * becase the latter is unlikely to happen in course of | ||
11887 | - * normal operation (user would need to press all | ||
11888 | - * buttons on the pad and start moving trackpoint | ||
11889 | - * without touching the pad surface) we assume former. | ||
11890 | - * Even if we are wrong the wost thing that would happen | ||
11891 | - * the cursor would jump but we should not get protocol | ||
11892 | - * desynchronization. | ||
11893 | - */ | ||
11894 | - | ||
11895 | - alps_report_bare_ps2_packet(psmouse, &psmouse->packet[3], | ||
11896 | - false); | ||
11897 | - | ||
11898 | - /* | ||
11899 | - * Continue with the standard ALPS protocol handling, | ||
11900 | - * but make sure we won't process it as an interleaved | ||
11901 | - * packet again, which may happen if all buttons are | ||
11902 | - * pressed. To avoid this let's reset the 4th bit which | ||
11903 | - * is normally 1. | ||
11904 | - */ | ||
11905 | - psmouse->packet[3] = psmouse->packet[6] & 0xf7; | ||
11906 | - psmouse->pktcnt = 4; | ||
11907 | - } | ||
11908 | - | ||
11909 | - return PSMOUSE_GOOD_DATA; | ||
11910 | -} | ||
11911 | - | ||
11912 | -static void alps_flush_packet(unsigned long data) | ||
11913 | -{ | ||
11914 | - struct psmouse *psmouse = (struct psmouse *)data; | ||
11915 | - | ||
11916 | - serio_pause_rx(psmouse->ps2dev.serio); | ||
11917 | - | ||
11918 | - if (psmouse->pktcnt == 6) { | ||
11919 | - | ||
11920 | - /* | ||
11921 | - * We did not any more data in reasonable amount of time. | ||
11922 | - * Validate the last 3 bytes and process as a standard | ||
11923 | - * ALPS packet. | ||
11924 | - */ | ||
11925 | - if ((psmouse->packet[3] | | ||
11926 | - psmouse->packet[4] | | ||
11927 | - psmouse->packet[5]) & 0x80) { | ||
11928 | - dbg("refusing packet %x %x %x " | ||
11929 | - "(suspected interleaved ps/2)\n", | ||
11930 | - psmouse->packet[3], psmouse->packet[4], | ||
11931 | - psmouse->packet[5]); | ||
11932 | - } else { | ||
11933 | - alps_process_packet(psmouse); | ||
11934 | - } | ||
11935 | - psmouse->pktcnt = 0; | ||
11936 | - } | ||
11937 | - | ||
11938 | - serio_continue_rx(psmouse->ps2dev.serio); | ||
11939 | -} | ||
11940 | - | ||
11941 | static psmouse_ret_t alps_process_byte(struct psmouse *psmouse) | ||
11942 | { | ||
11943 | struct alps_data *priv = psmouse->private; | ||
11944 | - const struct alps_model_info *model = priv->i; | ||
11945 | |||
11946 | if ((psmouse->packet[0] & 0xc8) == 0x08) { /* PS/2 packet */ | ||
11947 | if (psmouse->pktcnt == 3) { | ||
11948 | - alps_report_bare_ps2_packet(psmouse, psmouse->packet, | ||
11949 | - true); | ||
11950 | + alps_process_packet(psmouse); | ||
11951 | return PSMOUSE_FULL_PACKET; | ||
11952 | } | ||
11953 | return PSMOUSE_GOOD_DATA; | ||
11954 | } | ||
11955 | |||
11956 | - /* Check for PS/2 packet stuffed in the middle of ALPS packet. */ | ||
11957 | - | ||
11958 | - if ((model->flags & ALPS_PS2_INTERLEAVED) && | ||
11959 | - psmouse->pktcnt >= 4 && (psmouse->packet[3] & 0x0f) == 0x0f) { | ||
11960 | - return alps_handle_interleaved_ps2(psmouse); | ||
11961 | - } | ||
11962 | - | ||
11963 | - if (!alps_is_valid_first_byte(model, psmouse->packet[0])) { | ||
11964 | - dbg("refusing packet[0] = %x (mask0 = %x, byte0 = %x)\n", | ||
11965 | - psmouse->packet[0], model->mask0, model->byte0); | ||
11966 | + if ((psmouse->packet[0] & priv->i->mask0) != priv->i->byte0) | ||
11967 | return PSMOUSE_BAD_DATA; | ||
11968 | - } | ||
11969 | |||
11970 | /* Bytes 2 - 6 should have 0 in the highest bit */ | ||
11971 | if (psmouse->pktcnt >= 2 && psmouse->pktcnt <= 6 && | ||
11972 | - (psmouse->packet[psmouse->pktcnt - 1] & 0x80)) { | ||
11973 | - dbg("refusing packet[%i] = %x\n", | ||
11974 | - psmouse->pktcnt - 1, psmouse->packet[psmouse->pktcnt - 1]); | ||
11975 | + (psmouse->packet[psmouse->pktcnt - 1] & 0x80)) | ||
11976 | return PSMOUSE_BAD_DATA; | ||
11977 | - } | ||
11978 | |||
11979 | if (psmouse->pktcnt == 6) { | ||
11980 | alps_process_packet(psmouse); | ||
11981 | @@ -644,7 +445,6 @@ static void alps_disconnect(struct psmouse *psmouse) | ||
11982 | struct alps_data *priv = psmouse->private; | ||
11983 | |||
11984 | psmouse_reset(psmouse); | ||
11985 | - del_timer_sync(&priv->timer); | ||
11986 | input_unregister_device(priv->dev2); | ||
11987 | kfree(priv); | ||
11988 | } | ||
11989 | @@ -661,8 +461,6 @@ int alps_init(struct psmouse *psmouse) | ||
11990 | goto init_fail; | ||
11991 | |||
11992 | priv->dev2 = dev2; | ||
11993 | - setup_timer(&priv->timer, alps_flush_packet, (unsigned long)psmouse); | ||
11994 | - | ||
11995 | psmouse->private = priv; | ||
11996 | |||
11997 | if (alps_hw_init(psmouse, &version)) | ||
11998 | diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h | ||
11999 | index 904ed8b..bc87936 100644 | ||
12000 | --- a/drivers/input/mouse/alps.h | ||
12001 | +++ b/drivers/input/mouse/alps.h | ||
12002 | @@ -23,7 +23,6 @@ struct alps_data { | ||
12003 | char phys[32]; /* Phys */ | ||
12004 | const struct alps_model_info *i;/* Info */ | ||
12005 | int prev_fin; /* Finger bit from previous packet */ | ||
12006 | - struct timer_list timer; | ||
12007 | }; | ||
12008 | |||
12009 | #ifdef CONFIG_MOUSE_PS2_ALPS | ||
12010 | diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c | ||
12011 | index 0876d82..07c5379 100644 | ||
12012 | --- a/drivers/input/mouse/psmouse-base.c | ||
12013 | +++ b/drivers/input/mouse/psmouse-base.c | ||
12014 | @@ -667,6 +667,19 @@ static int psmouse_extensions(struct psmouse *psmouse, | ||
12015 | max_proto = PSMOUSE_IMEX; | ||
12016 | } | ||
12017 | |||
12018 | +/* | ||
12019 | + * Try Finger Sensing Pad | ||
12020 | + */ | ||
12021 | + if (max_proto > PSMOUSE_IMEX) { | ||
12022 | + if (fsp_detect(psmouse, set_properties) == 0) { | ||
12023 | + if (!set_properties || fsp_init(psmouse) == 0) | ||
12024 | + return PSMOUSE_FSP; | ||
12025 | +/* | ||
12026 | + * Init failed, try basic relative protocols | ||
12027 | + */ | ||
12028 | + max_proto = PSMOUSE_IMEX; | ||
12029 | + } | ||
12030 | + } | ||
12031 | |||
12032 | if (max_proto > PSMOUSE_IMEX) { | ||
12033 | if (genius_detect(psmouse, set_properties) == 0) | ||
12034 | @@ -683,21 +696,6 @@ static int psmouse_extensions(struct psmouse *psmouse, | ||
12035 | } | ||
12036 | |||
12037 | /* | ||
12038 | - * Try Finger Sensing Pad. We do it here because its probe upsets | ||
12039 | - * Trackpoint devices (causing TP_READ_ID command to time out). | ||
12040 | - */ | ||
12041 | - if (max_proto > PSMOUSE_IMEX) { | ||
12042 | - if (fsp_detect(psmouse, set_properties) == 0) { | ||
12043 | - if (!set_properties || fsp_init(psmouse) == 0) | ||
12044 | - return PSMOUSE_FSP; | ||
12045 | -/* | ||
12046 | - * Init failed, try basic relative protocols | ||
12047 | - */ | ||
12048 | - max_proto = PSMOUSE_IMEX; | ||
12049 | - } | ||
12050 | - } | ||
12051 | - | ||
12052 | -/* | ||
12053 | * Reset to defaults in case the device got confused by extended | ||
12054 | * protocol probes. Note that we follow up with full reset because | ||
12055 | * some mice put themselves to sleep when they see PSMOUSE_RESET_DIS. | ||
12056 | diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h | ||
12057 | index 2a5982e..2bcf1ac 100644 | ||
12058 | --- a/drivers/input/serio/i8042-x86ia64io.h | ||
12059 | +++ b/drivers/input/serio/i8042-x86ia64io.h | ||
12060 | @@ -67,12 +67,10 @@ static inline void i8042_write_command(int val) | ||
12061 | |||
12062 | #include <linux/dmi.h> | ||
12063 | |||
12064 | -static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { | ||
12065 | +static struct dmi_system_id __initdata i8042_dmi_noloop_table[] = { | ||
12066 | { | ||
12067 | - /* | ||
12068 | - * Arima-Rioworks HDAMB - | ||
12069 | - * AUX LOOP command does not raise AUX IRQ | ||
12070 | - */ | ||
12071 | + /* AUX LOOP command does not raise AUX IRQ */ | ||
12072 | + .ident = "Arima-Rioworks HDAMB", | ||
12073 | .matches = { | ||
12074 | DMI_MATCH(DMI_BOARD_VENDOR, "RIOWORKS"), | ||
12075 | DMI_MATCH(DMI_BOARD_NAME, "HDAMB"), | ||
12076 | @@ -80,7 +78,7 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { | ||
12077 | }, | ||
12078 | }, | ||
12079 | { | ||
12080 | - /* ASUS G1S */ | ||
12081 | + .ident = "ASUS G1S", | ||
12082 | .matches = { | ||
12083 | DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc."), | ||
12084 | DMI_MATCH(DMI_BOARD_NAME, "G1S"), | ||
12085 | @@ -88,7 +86,8 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { | ||
12086 | }, | ||
12087 | }, | ||
12088 | { | ||
12089 | - /* ASUS P65UP5 - AUX LOOP command does not raise AUX IRQ */ | ||
12090 | + /* AUX LOOP command does not raise AUX IRQ */ | ||
12091 | + .ident = "ASUS P65UP5", | ||
12092 | .matches = { | ||
12093 | DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), | ||
12094 | DMI_MATCH(DMI_BOARD_NAME, "P/I-P65UP5"), | ||
12095 | @@ -96,6 +95,7 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { | ||
12096 | }, | ||
12097 | }, | ||
12098 | { | ||
12099 | + .ident = "Compaq Proliant 8500", | ||
12100 | .matches = { | ||
12101 | DMI_MATCH(DMI_SYS_VENDOR, "Compaq"), | ||
12102 | DMI_MATCH(DMI_PRODUCT_NAME , "ProLiant"), | ||
12103 | @@ -103,6 +103,7 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { | ||
12104 | }, | ||
12105 | }, | ||
12106 | { | ||
12107 | + .ident = "Compaq Proliant DL760", | ||
12108 | .matches = { | ||
12109 | DMI_MATCH(DMI_SYS_VENDOR, "Compaq"), | ||
12110 | DMI_MATCH(DMI_PRODUCT_NAME , "ProLiant"), | ||
12111 | @@ -110,7 +111,7 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { | ||
12112 | }, | ||
12113 | }, | ||
12114 | { | ||
12115 | - /* OQO Model 01 */ | ||
12116 | + .ident = "OQO Model 01", | ||
12117 | .matches = { | ||
12118 | DMI_MATCH(DMI_SYS_VENDOR, "OQO"), | ||
12119 | DMI_MATCH(DMI_PRODUCT_NAME, "ZEPTO"), | ||
12120 | @@ -118,7 +119,8 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { | ||
12121 | }, | ||
12122 | }, | ||
12123 | { | ||
12124 | - /* ULI EV4873 - AUX LOOP does not work properly */ | ||
12125 | + /* AUX LOOP does not work properly */ | ||
12126 | + .ident = "ULI EV4873", | ||
12127 | .matches = { | ||
12128 | DMI_MATCH(DMI_SYS_VENDOR, "ULI"), | ||
12129 | DMI_MATCH(DMI_PRODUCT_NAME, "EV4873"), | ||
12130 | @@ -126,7 +128,7 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { | ||
12131 | }, | ||
12132 | }, | ||
12133 | { | ||
12134 | - /* Microsoft Virtual Machine */ | ||
12135 | + .ident = "Microsoft Virtual Machine", | ||
12136 | .matches = { | ||
12137 | DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), | ||
12138 | DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"), | ||
12139 | @@ -134,7 +136,7 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { | ||
12140 | }, | ||
12141 | }, | ||
12142 | { | ||
12143 | - /* Medion MAM 2070 */ | ||
12144 | + .ident = "Medion MAM 2070", | ||
12145 | .matches = { | ||
12146 | DMI_MATCH(DMI_SYS_VENDOR, "Notebook"), | ||
12147 | DMI_MATCH(DMI_PRODUCT_NAME, "MAM 2070"), | ||
12148 | @@ -142,7 +144,7 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { | ||
12149 | }, | ||
12150 | }, | ||
12151 | { | ||
12152 | - /* Blue FB5601 */ | ||
12153 | + .ident = "Blue FB5601", | ||
12154 | .matches = { | ||
12155 | DMI_MATCH(DMI_SYS_VENDOR, "blue"), | ||
12156 | DMI_MATCH(DMI_PRODUCT_NAME, "FB5601"), | ||
12157 | @@ -150,7 +152,7 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { | ||
12158 | }, | ||
12159 | }, | ||
12160 | { | ||
12161 | - /* Gigabyte M912 */ | ||
12162 | + .ident = "Gigabyte M912", | ||
12163 | .matches = { | ||
12164 | DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), | ||
12165 | DMI_MATCH(DMI_PRODUCT_NAME, "M912"), | ||
12166 | @@ -158,14 +160,7 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { | ||
12167 | }, | ||
12168 | }, | ||
12169 | { | ||
12170 | - /* Gigabyte M1022M netbook */ | ||
12171 | - .matches = { | ||
12172 | - DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co.,Ltd."), | ||
12173 | - DMI_MATCH(DMI_BOARD_NAME, "M1022E"), | ||
12174 | - DMI_MATCH(DMI_BOARD_VERSION, "1.02"), | ||
12175 | - }, | ||
12176 | - }, | ||
12177 | - { | ||
12178 | + .ident = "HP DV9700", | ||
12179 | .matches = { | ||
12180 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
12181 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"), | ||
12182 | @@ -182,72 +177,72 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { | ||
12183 | * ... apparently some Toshibas don't like MUX mode either and | ||
12184 | * die horrible death on reboot. | ||
12185 | */ | ||
12186 | -static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { | ||
12187 | +static struct dmi_system_id __initdata i8042_dmi_nomux_table[] = { | ||
12188 | { | ||
12189 | - /* Fujitsu Lifebook P7010/P7010D */ | ||
12190 | + .ident = "Fujitsu Lifebook P7010/P7010D", | ||
12191 | .matches = { | ||
12192 | DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), | ||
12193 | DMI_MATCH(DMI_PRODUCT_NAME, "P7010"), | ||
12194 | }, | ||
12195 | }, | ||
12196 | { | ||
12197 | - /* Fujitsu Lifebook P7010 */ | ||
12198 | + .ident = "Fujitsu Lifebook P7010", | ||
12199 | .matches = { | ||
12200 | DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), | ||
12201 | DMI_MATCH(DMI_PRODUCT_NAME, "0000000000"), | ||
12202 | }, | ||
12203 | }, | ||
12204 | { | ||
12205 | - /* Fujitsu Lifebook P5020D */ | ||
12206 | + .ident = "Fujitsu Lifebook P5020D", | ||
12207 | .matches = { | ||
12208 | DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), | ||
12209 | DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook P Series"), | ||
12210 | }, | ||
12211 | }, | ||
12212 | { | ||
12213 | - /* Fujitsu Lifebook S2000 */ | ||
12214 | + .ident = "Fujitsu Lifebook S2000", | ||
12215 | .matches = { | ||
12216 | DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), | ||
12217 | DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S Series"), | ||
12218 | }, | ||
12219 | }, | ||
12220 | { | ||
12221 | - /* Fujitsu Lifebook S6230 */ | ||
12222 | + .ident = "Fujitsu Lifebook S6230", | ||
12223 | .matches = { | ||
12224 | DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), | ||
12225 | DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S6230"), | ||
12226 | }, | ||
12227 | }, | ||
12228 | { | ||
12229 | - /* Fujitsu T70H */ | ||
12230 | + .ident = "Fujitsu T70H", | ||
12231 | .matches = { | ||
12232 | DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), | ||
12233 | DMI_MATCH(DMI_PRODUCT_NAME, "FMVLT70H"), | ||
12234 | }, | ||
12235 | }, | ||
12236 | { | ||
12237 | - /* Fujitsu-Siemens Lifebook T3010 */ | ||
12238 | + .ident = "Fujitsu-Siemens Lifebook T3010", | ||
12239 | .matches = { | ||
12240 | DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), | ||
12241 | DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T3010"), | ||
12242 | }, | ||
12243 | }, | ||
12244 | { | ||
12245 | - /* Fujitsu-Siemens Lifebook E4010 */ | ||
12246 | + .ident = "Fujitsu-Siemens Lifebook E4010", | ||
12247 | .matches = { | ||
12248 | DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), | ||
12249 | DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E4010"), | ||
12250 | }, | ||
12251 | }, | ||
12252 | { | ||
12253 | - /* Fujitsu-Siemens Amilo Pro 2010 */ | ||
12254 | + .ident = "Fujitsu-Siemens Amilo Pro 2010", | ||
12255 | .matches = { | ||
12256 | DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), | ||
12257 | DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pro V2010"), | ||
12258 | }, | ||
12259 | }, | ||
12260 | { | ||
12261 | - /* Fujitsu-Siemens Amilo Pro 2030 */ | ||
12262 | + .ident = "Fujitsu-Siemens Amilo Pro 2030", | ||
12263 | .matches = { | ||
12264 | DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), | ||
12265 | DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"), | ||
12266 | @@ -258,7 +253,7 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { | ||
12267 | * No data is coming from the touchscreen unless KBC | ||
12268 | * is in legacy mode. | ||
12269 | */ | ||
12270 | - /* Panasonic CF-29 */ | ||
12271 | + .ident = "Panasonic CF-29", | ||
12272 | .matches = { | ||
12273 | DMI_MATCH(DMI_SYS_VENDOR, "Matsushita"), | ||
12274 | DMI_MATCH(DMI_PRODUCT_NAME, "CF-29"), | ||
12275 | @@ -266,10 +261,10 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { | ||
12276 | }, | ||
12277 | { | ||
12278 | /* | ||
12279 | - * HP Pavilion DV4017EA - | ||
12280 | - * errors on MUX ports are reported without raising AUXDATA | ||
12281 | + * Errors on MUX ports are reported without raising AUXDATA | ||
12282 | * causing "spurious NAK" messages. | ||
12283 | */ | ||
12284 | + .ident = "HP Pavilion DV4017EA", | ||
12285 | .matches = { | ||
12286 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
12287 | DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EA032EA#ABF)"), | ||
12288 | @@ -277,9 +272,9 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { | ||
12289 | }, | ||
12290 | { | ||
12291 | /* | ||
12292 | - * HP Pavilion ZT1000 - | ||
12293 | - * like DV4017EA does not raise AUXERR for errors on MUX ports. | ||
12294 | + * Like DV4017EA does not raise AUXERR for errors on MUX ports. | ||
12295 | */ | ||
12296 | + .ident = "HP Pavilion ZT1000", | ||
12297 | .matches = { | ||
12298 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
12299 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Notebook PC"), | ||
12300 | @@ -288,41 +283,44 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { | ||
12301 | }, | ||
12302 | { | ||
12303 | /* | ||
12304 | - * HP Pavilion DV4270ca - | ||
12305 | - * like DV4017EA does not raise AUXERR for errors on MUX ports. | ||
12306 | + * Like DV4017EA does not raise AUXERR for errors on MUX ports. | ||
12307 | */ | ||
12308 | + .ident = "HP Pavilion DV4270ca", | ||
12309 | .matches = { | ||
12310 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
12311 | DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EH476UA#ABL)"), | ||
12312 | }, | ||
12313 | }, | ||
12314 | { | ||
12315 | + .ident = "Toshiba P10", | ||
12316 | .matches = { | ||
12317 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), | ||
12318 | DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P10"), | ||
12319 | }, | ||
12320 | }, | ||
12321 | { | ||
12322 | + .ident = "Toshiba Equium A110", | ||
12323 | .matches = { | ||
12324 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), | ||
12325 | DMI_MATCH(DMI_PRODUCT_NAME, "EQUIUM A110"), | ||
12326 | }, | ||
12327 | }, | ||
12328 | { | ||
12329 | + .ident = "Alienware Sentia", | ||
12330 | .matches = { | ||
12331 | DMI_MATCH(DMI_SYS_VENDOR, "ALIENWARE"), | ||
12332 | DMI_MATCH(DMI_PRODUCT_NAME, "Sentia"), | ||
12333 | }, | ||
12334 | }, | ||
12335 | { | ||
12336 | - /* Sharp Actius MM20 */ | ||
12337 | + .ident = "Sharp Actius MM20", | ||
12338 | .matches = { | ||
12339 | DMI_MATCH(DMI_SYS_VENDOR, "SHARP"), | ||
12340 | DMI_MATCH(DMI_PRODUCT_NAME, "PC-MM20 Series"), | ||
12341 | }, | ||
12342 | }, | ||
12343 | { | ||
12344 | - /* Sony Vaio FS-115b */ | ||
12345 | + .ident = "Sony Vaio FS-115b", | ||
12346 | .matches = { | ||
12347 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | ||
12348 | DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FS115B"), | ||
12349 | @@ -330,72 +328,73 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { | ||
12350 | }, | ||
12351 | { | ||
12352 | /* | ||
12353 | - * Sony Vaio FZ-240E - | ||
12354 | - * reset and GET ID commands issued via KBD port are | ||
12355 | + * Reset and GET ID commands issued via KBD port are | ||
12356 | * sometimes being delivered to AUX3. | ||
12357 | */ | ||
12358 | + .ident = "Sony Vaio FZ-240E", | ||
12359 | .matches = { | ||
12360 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | ||
12361 | DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ240E"), | ||
12362 | }, | ||
12363 | }, | ||
12364 | { | ||
12365 | - /* Amoi M636/A737 */ | ||
12366 | + .ident = "Amoi M636/A737", | ||
12367 | .matches = { | ||
12368 | DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."), | ||
12369 | DMI_MATCH(DMI_PRODUCT_NAME, "M636/A737 platform"), | ||
12370 | }, | ||
12371 | }, | ||
12372 | { | ||
12373 | - /* Lenovo 3000 n100 */ | ||
12374 | + .ident = "Lenovo 3000 n100", | ||
12375 | .matches = { | ||
12376 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
12377 | DMI_MATCH(DMI_PRODUCT_NAME, "076804U"), | ||
12378 | }, | ||
12379 | }, | ||
12380 | { | ||
12381 | + .ident = "Acer Aspire 1360", | ||
12382 | .matches = { | ||
12383 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
12384 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"), | ||
12385 | }, | ||
12386 | }, | ||
12387 | { | ||
12388 | - /* Gericom Bellagio */ | ||
12389 | + .ident = "Gericom Bellagio", | ||
12390 | .matches = { | ||
12391 | DMI_MATCH(DMI_SYS_VENDOR, "Gericom"), | ||
12392 | DMI_MATCH(DMI_PRODUCT_NAME, "N34AS6"), | ||
12393 | }, | ||
12394 | }, | ||
12395 | { | ||
12396 | - /* IBM 2656 */ | ||
12397 | + .ident = "IBM 2656", | ||
12398 | .matches = { | ||
12399 | DMI_MATCH(DMI_SYS_VENDOR, "IBM"), | ||
12400 | DMI_MATCH(DMI_PRODUCT_NAME, "2656"), | ||
12401 | }, | ||
12402 | }, | ||
12403 | { | ||
12404 | - /* Dell XPS M1530 */ | ||
12405 | + .ident = "Dell XPS M1530", | ||
12406 | .matches = { | ||
12407 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
12408 | DMI_MATCH(DMI_PRODUCT_NAME, "XPS M1530"), | ||
12409 | }, | ||
12410 | }, | ||
12411 | { | ||
12412 | - /* Compal HEL80I */ | ||
12413 | + .ident = "Compal HEL80I", | ||
12414 | .matches = { | ||
12415 | DMI_MATCH(DMI_SYS_VENDOR, "COMPAL"), | ||
12416 | DMI_MATCH(DMI_PRODUCT_NAME, "HEL80I"), | ||
12417 | }, | ||
12418 | }, | ||
12419 | { | ||
12420 | - /* Dell Vostro 1510 */ | ||
12421 | + .ident = "Dell Vostro 1510", | ||
12422 | .matches = { | ||
12423 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
12424 | DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"), | ||
12425 | }, | ||
12426 | }, | ||
12427 | { | ||
12428 | - /* Acer Aspire 5536 */ | ||
12429 | + .ident = "Acer Aspire 5536", | ||
12430 | .matches = { | ||
12431 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
12432 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5536"), | ||
12433 | @@ -405,65 +404,65 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { | ||
12434 | { } | ||
12435 | }; | ||
12436 | |||
12437 | -static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { | ||
12438 | +static struct dmi_system_id __initdata i8042_dmi_reset_table[] = { | ||
12439 | { | ||
12440 | - /* MSI Wind U-100 */ | ||
12441 | + .ident = "MSI Wind U-100", | ||
12442 | .matches = { | ||
12443 | DMI_MATCH(DMI_BOARD_NAME, "U-100"), | ||
12444 | DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"), | ||
12445 | }, | ||
12446 | }, | ||
12447 | { | ||
12448 | - /* LG Electronics X110 */ | ||
12449 | + .ident = "LG Electronics X110", | ||
12450 | .matches = { | ||
12451 | DMI_MATCH(DMI_BOARD_NAME, "X110"), | ||
12452 | DMI_MATCH(DMI_BOARD_VENDOR, "LG Electronics Inc."), | ||
12453 | }, | ||
12454 | }, | ||
12455 | { | ||
12456 | - /* Acer Aspire One 150 */ | ||
12457 | + .ident = "Acer Aspire One 150", | ||
12458 | .matches = { | ||
12459 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
12460 | DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"), | ||
12461 | }, | ||
12462 | }, | ||
12463 | { | ||
12464 | - /* Advent 4211 */ | ||
12465 | + .ident = "Advent 4211", | ||
12466 | .matches = { | ||
12467 | DMI_MATCH(DMI_SYS_VENDOR, "DIXONSXP"), | ||
12468 | DMI_MATCH(DMI_PRODUCT_NAME, "Advent 4211"), | ||
12469 | }, | ||
12470 | }, | ||
12471 | { | ||
12472 | - /* Medion Akoya Mini E1210 */ | ||
12473 | + .ident = "Medion Akoya Mini E1210", | ||
12474 | .matches = { | ||
12475 | DMI_MATCH(DMI_SYS_VENDOR, "MEDION"), | ||
12476 | DMI_MATCH(DMI_PRODUCT_NAME, "E1210"), | ||
12477 | }, | ||
12478 | }, | ||
12479 | { | ||
12480 | - /* Mivvy M310 */ | ||
12481 | + .ident = "Mivvy M310", | ||
12482 | .matches = { | ||
12483 | DMI_MATCH(DMI_SYS_VENDOR, "VIOOO"), | ||
12484 | DMI_MATCH(DMI_PRODUCT_NAME, "N10"), | ||
12485 | }, | ||
12486 | }, | ||
12487 | { | ||
12488 | - /* Dell Vostro 1320 */ | ||
12489 | + .ident = "Dell Vostro 1320", | ||
12490 | .matches = { | ||
12491 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
12492 | DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1320"), | ||
12493 | }, | ||
12494 | }, | ||
12495 | { | ||
12496 | - /* Dell Vostro 1520 */ | ||
12497 | + .ident = "Dell Vostro 1520", | ||
12498 | .matches = { | ||
12499 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
12500 | DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1520"), | ||
12501 | }, | ||
12502 | }, | ||
12503 | { | ||
12504 | - /* Dell Vostro 1720 */ | ||
12505 | + .ident = "Dell Vostro 1720", | ||
12506 | .matches = { | ||
12507 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
12508 | DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1720"), | ||
12509 | @@ -473,16 +472,16 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { | ||
12510 | }; | ||
12511 | |||
12512 | #ifdef CONFIG_PNP | ||
12513 | -static const struct dmi_system_id __initconst i8042_dmi_nopnp_table[] = { | ||
12514 | +static struct dmi_system_id __initdata i8042_dmi_nopnp_table[] = { | ||
12515 | { | ||
12516 | - /* Intel MBO Desktop D845PESV */ | ||
12517 | + .ident = "Intel MBO Desktop D845PESV", | ||
12518 | .matches = { | ||
12519 | DMI_MATCH(DMI_BOARD_NAME, "D845PESV"), | ||
12520 | DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"), | ||
12521 | }, | ||
12522 | }, | ||
12523 | { | ||
12524 | - /* MSI Wind U-100 */ | ||
12525 | + .ident = "MSI Wind U-100", | ||
12526 | .matches = { | ||
12527 | DMI_MATCH(DMI_BOARD_NAME, "U-100"), | ||
12528 | DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"), | ||
12529 | @@ -491,23 +490,27 @@ static const struct dmi_system_id __initconst i8042_dmi_nopnp_table[] = { | ||
12530 | { } | ||
12531 | }; | ||
12532 | |||
12533 | -static const struct dmi_system_id __initconst i8042_dmi_laptop_table[] = { | ||
12534 | +static struct dmi_system_id __initdata i8042_dmi_laptop_table[] = { | ||
12535 | { | ||
12536 | + .ident = "Portable", | ||
12537 | .matches = { | ||
12538 | DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */ | ||
12539 | }, | ||
12540 | }, | ||
12541 | { | ||
12542 | + .ident = "Laptop", | ||
12543 | .matches = { | ||
12544 | DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /* Laptop */ | ||
12545 | }, | ||
12546 | }, | ||
12547 | { | ||
12548 | + .ident = "Notebook", | ||
12549 | .matches = { | ||
12550 | DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */ | ||
12551 | }, | ||
12552 | }, | ||
12553 | { | ||
12554 | + .ident = "Sub-Notebook", | ||
12555 | .matches = { | ||
12556 | DMI_MATCH(DMI_CHASSIS_TYPE, "14"), /* Sub-Notebook */ | ||
12557 | }, | ||
12558 | @@ -522,65 +525,58 @@ static const struct dmi_system_id __initconst i8042_dmi_laptop_table[] = { | ||
12559 | * Originally, this was just confined to older laptops, but a few Acer laptops | ||
12560 | * have turned up in 2007 that also need this again. | ||
12561 | */ | ||
12562 | -static const struct dmi_system_id __initconst i8042_dmi_dritek_table[] = { | ||
12563 | - { | ||
12564 | - /* Acer Aspire 5610 */ | ||
12565 | - .matches = { | ||
12566 | - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
12567 | - DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5610"), | ||
12568 | - }, | ||
12569 | - }, | ||
12570 | +static struct dmi_system_id __initdata i8042_dmi_dritek_table[] = { | ||
12571 | { | ||
12572 | - /* Acer Aspire 5630 */ | ||
12573 | + .ident = "Acer Aspire 5630", | ||
12574 | .matches = { | ||
12575 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
12576 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5630"), | ||
12577 | }, | ||
12578 | }, | ||
12579 | { | ||
12580 | - /* Acer Aspire 5650 */ | ||
12581 | + .ident = "Acer Aspire 5650", | ||
12582 | .matches = { | ||
12583 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
12584 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5650"), | ||
12585 | }, | ||
12586 | }, | ||
12587 | { | ||
12588 | - /* Acer Aspire 5680 */ | ||
12589 | + .ident = "Acer Aspire 5680", | ||
12590 | .matches = { | ||
12591 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
12592 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5680"), | ||
12593 | }, | ||
12594 | }, | ||
12595 | { | ||
12596 | - /* Acer Aspire 5720 */ | ||
12597 | + .ident = "Acer Aspire 5720", | ||
12598 | .matches = { | ||
12599 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
12600 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5720"), | ||
12601 | }, | ||
12602 | }, | ||
12603 | { | ||
12604 | - /* Acer Aspire 9110 */ | ||
12605 | + .ident = "Acer Aspire 9110", | ||
12606 | .matches = { | ||
12607 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
12608 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 9110"), | ||
12609 | }, | ||
12610 | }, | ||
12611 | { | ||
12612 | - /* Acer TravelMate 660 */ | ||
12613 | + .ident = "Acer TravelMate 660", | ||
12614 | .matches = { | ||
12615 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
12616 | DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 660"), | ||
12617 | }, | ||
12618 | }, | ||
12619 | { | ||
12620 | - /* Acer TravelMate 2490 */ | ||
12621 | + .ident = "Acer TravelMate 2490", | ||
12622 | .matches = { | ||
12623 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
12624 | DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2490"), | ||
12625 | }, | ||
12626 | }, | ||
12627 | { | ||
12628 | - /* Acer TravelMate 4280 */ | ||
12629 | + .ident = "Acer TravelMate 4280", | ||
12630 | .matches = { | ||
12631 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
12632 | DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4280"), | ||
12633 | diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c | ||
12634 | index ede4658..951c57b 100644 | ||
12635 | --- a/drivers/lguest/segments.c | ||
12636 | +++ b/drivers/lguest/segments.c | ||
12637 | @@ -179,10 +179,8 @@ void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi) | ||
12638 | * We assume the Guest has the same number of GDT entries as the | ||
12639 | * Host, otherwise we'd have to dynamically allocate the Guest GDT. | ||
12640 | */ | ||
12641 | - if (num >= ARRAY_SIZE(cpu->arch.gdt)) { | ||
12642 | + if (num >= ARRAY_SIZE(cpu->arch.gdt)) | ||
12643 | kill_guest(cpu, "too many gdt entries %i", num); | ||
12644 | - return; | ||
12645 | - } | ||
12646 | |||
12647 | /* Set it up, then fix it. */ | ||
12648 | cpu->arch.gdt[num].a = lo; | ||
12649 | diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c | ||
12650 | index 386a797..556f0fe 100644 | ||
12651 | --- a/drivers/macintosh/therm_adt746x.c | ||
12652 | +++ b/drivers/macintosh/therm_adt746x.c | ||
12653 | @@ -79,7 +79,6 @@ struct thermostat { | ||
12654 | u8 limits[3]; | ||
12655 | int last_speed[2]; | ||
12656 | int last_var[2]; | ||
12657 | - int pwm_inv[2]; | ||
12658 | }; | ||
12659 | |||
12660 | static enum {ADT7460, ADT7467} therm_type; | ||
12661 | @@ -230,23 +229,19 @@ static void write_fan_speed(struct thermostat *th, int speed, int fan) | ||
12662 | |||
12663 | if (speed >= 0) { | ||
12664 | manual = read_reg(th, MANUAL_MODE[fan]); | ||
12665 | - manual &= ~INVERT_MASK; | ||
12666 | write_reg(th, MANUAL_MODE[fan], | ||
12667 | - manual | MANUAL_MASK | th->pwm_inv[fan]); | ||
12668 | + (manual|MANUAL_MASK) & (~INVERT_MASK)); | ||
12669 | write_reg(th, FAN_SPD_SET[fan], speed); | ||
12670 | } else { | ||
12671 | /* back to automatic */ | ||
12672 | if(therm_type == ADT7460) { | ||
12673 | manual = read_reg(th, | ||
12674 | MANUAL_MODE[fan]) & (~MANUAL_MASK); | ||
12675 | - manual &= ~INVERT_MASK; | ||
12676 | - manual |= th->pwm_inv[fan]; | ||
12677 | + | ||
12678 | write_reg(th, | ||
12679 | MANUAL_MODE[fan], manual|REM_CONTROL[fan]); | ||
12680 | } else { | ||
12681 | manual = read_reg(th, MANUAL_MODE[fan]); | ||
12682 | - manual &= ~INVERT_MASK; | ||
12683 | - manual |= th->pwm_inv[fan]; | ||
12684 | write_reg(th, MANUAL_MODE[fan], manual&(~AUTO_MASK)); | ||
12685 | } | ||
12686 | } | ||
12687 | @@ -423,10 +418,6 @@ static int probe_thermostat(struct i2c_client *client, | ||
12688 | |||
12689 | thermostat = th; | ||
12690 | |||
12691 | - /* record invert bit status because fw can corrupt it after suspend */ | ||
12692 | - th->pwm_inv[0] = read_reg(th, MANUAL_MODE[0]) & INVERT_MASK; | ||
12693 | - th->pwm_inv[1] = read_reg(th, MANUAL_MODE[1]) & INVERT_MASK; | ||
12694 | - | ||
12695 | /* be sure to really write fan speed the first time */ | ||
12696 | th->last_speed[0] = -2; | ||
12697 | th->last_speed[1] = -2; | ||
12698 | diff --git a/drivers/macintosh/windfarm_smu_controls.c b/drivers/macintosh/windfarm_smu_controls.c | ||
12699 | index 6c68b9e..961fa0e 100644 | ||
12700 | --- a/drivers/macintosh/windfarm_smu_controls.c | ||
12701 | +++ b/drivers/macintosh/windfarm_smu_controls.c | ||
12702 | @@ -202,8 +202,6 @@ static struct smu_fan_control *smu_fan_create(struct device_node *node, | ||
12703 | fct->ctrl.name = "cpu-front-fan-1"; | ||
12704 | else if (!strcmp(l, "CPU A PUMP")) | ||
12705 | fct->ctrl.name = "cpu-pump-0"; | ||
12706 | - else if (!strcmp(l, "CPU B PUMP")) | ||
12707 | - fct->ctrl.name = "cpu-pump-1"; | ||
12708 | else if (!strcmp(l, "Slots Fan") || !strcmp(l, "Slots fan") || | ||
12709 | !strcmp(l, "EXPANSION SLOTS INTAKE")) | ||
12710 | fct->ctrl.name = "slots-fan"; | ||
12711 | diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c | ||
12712 | index a5e5f2f..60e2b32 100644 | ||
12713 | --- a/drivers/md/bitmap.c | ||
12714 | +++ b/drivers/md/bitmap.c | ||
12715 | @@ -1078,31 +1078,23 @@ static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap, | ||
12716 | * out to disk | ||
12717 | */ | ||
12718 | |||
12719 | -void bitmap_daemon_work(mddev_t *mddev) | ||
12720 | +void bitmap_daemon_work(struct bitmap *bitmap) | ||
12721 | { | ||
12722 | - struct bitmap *bitmap; | ||
12723 | unsigned long j; | ||
12724 | unsigned long flags; | ||
12725 | struct page *page = NULL, *lastpage = NULL; | ||
12726 | int blocks; | ||
12727 | void *paddr; | ||
12728 | |||
12729 | - /* Use a mutex to guard daemon_work against | ||
12730 | - * bitmap_destroy. | ||
12731 | - */ | ||
12732 | - mutex_lock(&mddev->bitmap_mutex); | ||
12733 | - bitmap = mddev->bitmap; | ||
12734 | - if (bitmap == NULL) { | ||
12735 | - mutex_unlock(&mddev->bitmap_mutex); | ||
12736 | + if (bitmap == NULL) | ||
12737 | return; | ||
12738 | - } | ||
12739 | if (time_before(jiffies, bitmap->daemon_lastrun + bitmap->daemon_sleep*HZ)) | ||
12740 | goto done; | ||
12741 | |||
12742 | bitmap->daemon_lastrun = jiffies; | ||
12743 | if (bitmap->allclean) { | ||
12744 | bitmap->mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; | ||
12745 | - goto done; | ||
12746 | + return; | ||
12747 | } | ||
12748 | bitmap->allclean = 1; | ||
12749 | |||
12750 | @@ -1211,7 +1203,6 @@ void bitmap_daemon_work(mddev_t *mddev) | ||
12751 | done: | ||
12752 | if (bitmap->allclean == 0) | ||
12753 | bitmap->mddev->thread->timeout = bitmap->daemon_sleep * HZ; | ||
12754 | - mutex_unlock(&mddev->bitmap_mutex); | ||
12755 | } | ||
12756 | |||
12757 | static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap, | ||
12758 | @@ -1550,9 +1541,9 @@ void bitmap_flush(mddev_t *mddev) | ||
12759 | */ | ||
12760 | sleep = bitmap->daemon_sleep; | ||
12761 | bitmap->daemon_sleep = 0; | ||
12762 | - bitmap_daemon_work(mddev); | ||
12763 | - bitmap_daemon_work(mddev); | ||
12764 | - bitmap_daemon_work(mddev); | ||
12765 | + bitmap_daemon_work(bitmap); | ||
12766 | + bitmap_daemon_work(bitmap); | ||
12767 | + bitmap_daemon_work(bitmap); | ||
12768 | bitmap->daemon_sleep = sleep; | ||
12769 | bitmap_update_sb(bitmap); | ||
12770 | } | ||
12771 | @@ -1583,7 +1574,6 @@ static void bitmap_free(struct bitmap *bitmap) | ||
12772 | kfree(bp); | ||
12773 | kfree(bitmap); | ||
12774 | } | ||
12775 | - | ||
12776 | void bitmap_destroy(mddev_t *mddev) | ||
12777 | { | ||
12778 | struct bitmap *bitmap = mddev->bitmap; | ||
12779 | @@ -1591,9 +1581,7 @@ void bitmap_destroy(mddev_t *mddev) | ||
12780 | if (!bitmap) /* there was no bitmap */ | ||
12781 | return; | ||
12782 | |||
12783 | - mutex_lock(&mddev->bitmap_mutex); | ||
12784 | mddev->bitmap = NULL; /* disconnect from the md device */ | ||
12785 | - mutex_unlock(&mddev->bitmap_mutex); | ||
12786 | if (mddev->thread) | ||
12787 | mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; | ||
12788 | |||
12789 | diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h | ||
12790 | index 7e38d13..e989006 100644 | ||
12791 | --- a/drivers/md/bitmap.h | ||
12792 | +++ b/drivers/md/bitmap.h | ||
12793 | @@ -282,7 +282,7 @@ void bitmap_close_sync(struct bitmap *bitmap); | ||
12794 | void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector); | ||
12795 | |||
12796 | void bitmap_unplug(struct bitmap *bitmap); | ||
12797 | -void bitmap_daemon_work(mddev_t *mddev); | ||
12798 | +void bitmap_daemon_work(struct bitmap *bitmap); | ||
12799 | #endif | ||
12800 | |||
12801 | #endif | ||
12802 | diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c | ||
12803 | index 959d6d1..ed10381 100644 | ||
12804 | --- a/drivers/md/dm-crypt.c | ||
12805 | +++ b/drivers/md/dm-crypt.c | ||
12806 | @@ -1,7 +1,7 @@ | ||
12807 | /* | ||
12808 | * Copyright (C) 2003 Christophe Saout <christophe@saout.de> | ||
12809 | * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> | ||
12810 | - * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved. | ||
12811 | + * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved. | ||
12812 | * | ||
12813 | * This file is released under the GPL. | ||
12814 | */ | ||
12815 | @@ -71,21 +71,10 @@ struct crypt_iv_operations { | ||
12816 | int (*ctr)(struct crypt_config *cc, struct dm_target *ti, | ||
12817 | const char *opts); | ||
12818 | void (*dtr)(struct crypt_config *cc); | ||
12819 | - int (*init)(struct crypt_config *cc); | ||
12820 | - int (*wipe)(struct crypt_config *cc); | ||
12821 | + const char *(*status)(struct crypt_config *cc); | ||
12822 | int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector); | ||
12823 | }; | ||
12824 | |||
12825 | -struct iv_essiv_private { | ||
12826 | - struct crypto_cipher *tfm; | ||
12827 | - struct crypto_hash *hash_tfm; | ||
12828 | - u8 *salt; | ||
12829 | -}; | ||
12830 | - | ||
12831 | -struct iv_benbi_private { | ||
12832 | - int shift; | ||
12833 | -}; | ||
12834 | - | ||
12835 | /* | ||
12836 | * Crypt: maps a linear range of a block device | ||
12837 | * and encrypts / decrypts at the same time. | ||
12838 | @@ -113,8 +102,8 @@ struct crypt_config { | ||
12839 | struct crypt_iv_operations *iv_gen_ops; | ||
12840 | char *iv_mode; | ||
12841 | union { | ||
12842 | - struct iv_essiv_private essiv; | ||
12843 | - struct iv_benbi_private benbi; | ||
12844 | + struct crypto_cipher *essiv_tfm; | ||
12845 | + int benbi_shift; | ||
12846 | } iv_gen_private; | ||
12847 | sector_t iv_offset; | ||
12848 | unsigned int iv_size; | ||
12849 | @@ -180,114 +169,88 @@ static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector) | ||
12850 | return 0; | ||
12851 | } | ||
12852 | |||
12853 | -/* Initialise ESSIV - compute salt but no local memory allocations */ | ||
12854 | -static int crypt_iv_essiv_init(struct crypt_config *cc) | ||
12855 | -{ | ||
12856 | - struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; | ||
12857 | - struct hash_desc desc; | ||
12858 | - struct scatterlist sg; | ||
12859 | - int err; | ||
12860 | - | ||
12861 | - sg_init_one(&sg, cc->key, cc->key_size); | ||
12862 | - desc.tfm = essiv->hash_tfm; | ||
12863 | - desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | ||
12864 | - | ||
12865 | - err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt); | ||
12866 | - if (err) | ||
12867 | - return err; | ||
12868 | - | ||
12869 | - return crypto_cipher_setkey(essiv->tfm, essiv->salt, | ||
12870 | - crypto_hash_digestsize(essiv->hash_tfm)); | ||
12871 | -} | ||
12872 | - | ||
12873 | -/* Wipe salt and reset key derived from volume key */ | ||
12874 | -static int crypt_iv_essiv_wipe(struct crypt_config *cc) | ||
12875 | -{ | ||
12876 | - struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; | ||
12877 | - unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm); | ||
12878 | - | ||
12879 | - memset(essiv->salt, 0, salt_size); | ||
12880 | - | ||
12881 | - return crypto_cipher_setkey(essiv->tfm, essiv->salt, salt_size); | ||
12882 | -} | ||
12883 | - | ||
12884 | -static void crypt_iv_essiv_dtr(struct crypt_config *cc) | ||
12885 | -{ | ||
12886 | - struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; | ||
12887 | - | ||
12888 | - crypto_free_cipher(essiv->tfm); | ||
12889 | - essiv->tfm = NULL; | ||
12890 | - | ||
12891 | - crypto_free_hash(essiv->hash_tfm); | ||
12892 | - essiv->hash_tfm = NULL; | ||
12893 | - | ||
12894 | - kzfree(essiv->salt); | ||
12895 | - essiv->salt = NULL; | ||
12896 | -} | ||
12897 | - | ||
12898 | static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, | ||
12899 | const char *opts) | ||
12900 | { | ||
12901 | - struct crypto_cipher *essiv_tfm = NULL; | ||
12902 | - struct crypto_hash *hash_tfm = NULL; | ||
12903 | - u8 *salt = NULL; | ||
12904 | + struct crypto_cipher *essiv_tfm; | ||
12905 | + struct crypto_hash *hash_tfm; | ||
12906 | + struct hash_desc desc; | ||
12907 | + struct scatterlist sg; | ||
12908 | + unsigned int saltsize; | ||
12909 | + u8 *salt; | ||
12910 | int err; | ||
12911 | |||
12912 | - if (!opts) { | ||
12913 | + if (opts == NULL) { | ||
12914 | ti->error = "Digest algorithm missing for ESSIV mode"; | ||
12915 | return -EINVAL; | ||
12916 | } | ||
12917 | |||
12918 | - /* Allocate hash algorithm */ | ||
12919 | + /* Hash the cipher key with the given hash algorithm */ | ||
12920 | hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC); | ||
12921 | if (IS_ERR(hash_tfm)) { | ||
12922 | ti->error = "Error initializing ESSIV hash"; | ||
12923 | - err = PTR_ERR(hash_tfm); | ||
12924 | - goto bad; | ||
12925 | + return PTR_ERR(hash_tfm); | ||
12926 | } | ||
12927 | |||
12928 | - salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL); | ||
12929 | - if (!salt) { | ||
12930 | + saltsize = crypto_hash_digestsize(hash_tfm); | ||
12931 | + salt = kmalloc(saltsize, GFP_KERNEL); | ||
12932 | + if (salt == NULL) { | ||
12933 | ti->error = "Error kmallocing salt storage in ESSIV"; | ||
12934 | - err = -ENOMEM; | ||
12935 | - goto bad; | ||
12936 | + crypto_free_hash(hash_tfm); | ||
12937 | + return -ENOMEM; | ||
12938 | } | ||
12939 | |||
12940 | - /* Allocate essiv_tfm */ | ||
12941 | + sg_init_one(&sg, cc->key, cc->key_size); | ||
12942 | + desc.tfm = hash_tfm; | ||
12943 | + desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | ||
12944 | + err = crypto_hash_digest(&desc, &sg, cc->key_size, salt); | ||
12945 | + crypto_free_hash(hash_tfm); | ||
12946 | + | ||
12947 | + if (err) { | ||
12948 | + ti->error = "Error calculating hash in ESSIV"; | ||
12949 | + kfree(salt); | ||
12950 | + return err; | ||
12951 | + } | ||
12952 | + | ||
12953 | + /* Setup the essiv_tfm with the given salt */ | ||
12954 | essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); | ||
12955 | if (IS_ERR(essiv_tfm)) { | ||
12956 | ti->error = "Error allocating crypto tfm for ESSIV"; | ||
12957 | - err = PTR_ERR(essiv_tfm); | ||
12958 | - goto bad; | ||
12959 | + kfree(salt); | ||
12960 | + return PTR_ERR(essiv_tfm); | ||
12961 | } | ||
12962 | if (crypto_cipher_blocksize(essiv_tfm) != | ||
12963 | crypto_ablkcipher_ivsize(cc->tfm)) { | ||
12964 | ti->error = "Block size of ESSIV cipher does " | ||
12965 | "not match IV size of block cipher"; | ||
12966 | - err = -EINVAL; | ||
12967 | - goto bad; | ||
12968 | + crypto_free_cipher(essiv_tfm); | ||
12969 | + kfree(salt); | ||
12970 | + return -EINVAL; | ||
12971 | } | ||
12972 | + err = crypto_cipher_setkey(essiv_tfm, salt, saltsize); | ||
12973 | + if (err) { | ||
12974 | + ti->error = "Failed to set key for ESSIV cipher"; | ||
12975 | + crypto_free_cipher(essiv_tfm); | ||
12976 | + kfree(salt); | ||
12977 | + return err; | ||
12978 | + } | ||
12979 | + kfree(salt); | ||
12980 | |||
12981 | - cc->iv_gen_private.essiv.salt = salt; | ||
12982 | - cc->iv_gen_private.essiv.tfm = essiv_tfm; | ||
12983 | - cc->iv_gen_private.essiv.hash_tfm = hash_tfm; | ||
12984 | - | ||
12985 | + cc->iv_gen_private.essiv_tfm = essiv_tfm; | ||
12986 | return 0; | ||
12987 | +} | ||
12988 | |||
12989 | -bad: | ||
12990 | - if (essiv_tfm && !IS_ERR(essiv_tfm)) | ||
12991 | - crypto_free_cipher(essiv_tfm); | ||
12992 | - if (hash_tfm && !IS_ERR(hash_tfm)) | ||
12993 | - crypto_free_hash(hash_tfm); | ||
12994 | - kfree(salt); | ||
12995 | - return err; | ||
12996 | +static void crypt_iv_essiv_dtr(struct crypt_config *cc) | ||
12997 | +{ | ||
12998 | + crypto_free_cipher(cc->iv_gen_private.essiv_tfm); | ||
12999 | + cc->iv_gen_private.essiv_tfm = NULL; | ||
13000 | } | ||
13001 | |||
13002 | static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector) | ||
13003 | { | ||
13004 | memset(iv, 0, cc->iv_size); | ||
13005 | *(u64 *)iv = cpu_to_le64(sector); | ||
13006 | - crypto_cipher_encrypt_one(cc->iv_gen_private.essiv.tfm, iv, iv); | ||
13007 | + crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv); | ||
13008 | return 0; | ||
13009 | } | ||
13010 | |||
13011 | @@ -310,7 +273,7 @@ static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, | ||
13012 | return -EINVAL; | ||
13013 | } | ||
13014 | |||
13015 | - cc->iv_gen_private.benbi.shift = 9 - log; | ||
13016 | + cc->iv_gen_private.benbi_shift = 9 - log; | ||
13017 | |||
13018 | return 0; | ||
13019 | } | ||
13020 | @@ -325,7 +288,7 @@ static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector) | ||
13021 | |||
13022 | memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */ | ||
13023 | |||
13024 | - val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi.shift) + 1); | ||
13025 | + val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1); | ||
13026 | put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64))); | ||
13027 | |||
13028 | return 0; | ||
13029 | @@ -345,8 +308,6 @@ static struct crypt_iv_operations crypt_iv_plain_ops = { | ||
13030 | static struct crypt_iv_operations crypt_iv_essiv_ops = { | ||
13031 | .ctr = crypt_iv_essiv_ctr, | ||
13032 | .dtr = crypt_iv_essiv_dtr, | ||
13033 | - .init = crypt_iv_essiv_init, | ||
13034 | - .wipe = crypt_iv_essiv_wipe, | ||
13035 | .generator = crypt_iv_essiv_gen | ||
13036 | }; | ||
13037 | |||
13038 | @@ -1078,12 +1039,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | ||
13039 | cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0) | ||
13040 | goto bad_ivmode; | ||
13041 | |||
13042 | - if (cc->iv_gen_ops && cc->iv_gen_ops->init && | ||
13043 | - cc->iv_gen_ops->init(cc) < 0) { | ||
13044 | - ti->error = "Error initialising IV"; | ||
13045 | - goto bad_slab_pool; | ||
13046 | - } | ||
13047 | - | ||
13048 | cc->iv_size = crypto_ablkcipher_ivsize(tfm); | ||
13049 | if (cc->iv_size) | ||
13050 | /* at least a 64 bit sector number should fit in our buffer */ | ||
13051 | @@ -1323,7 +1278,6 @@ static void crypt_resume(struct dm_target *ti) | ||
13052 | static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) | ||
13053 | { | ||
13054 | struct crypt_config *cc = ti->private; | ||
13055 | - int ret = -EINVAL; | ||
13056 | |||
13057 | if (argc < 2) | ||
13058 | goto error; | ||
13059 | @@ -1333,22 +1287,10 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) | ||
13060 | DMWARN("not suspended during key manipulation."); | ||
13061 | return -EINVAL; | ||
13062 | } | ||
13063 | - if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) { | ||
13064 | - ret = crypt_set_key(cc, argv[2]); | ||
13065 | - if (ret) | ||
13066 | - return ret; | ||
13067 | - if (cc->iv_gen_ops && cc->iv_gen_ops->init) | ||
13068 | - ret = cc->iv_gen_ops->init(cc); | ||
13069 | - return ret; | ||
13070 | - } | ||
13071 | - if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) { | ||
13072 | - if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) { | ||
13073 | - ret = cc->iv_gen_ops->wipe(cc); | ||
13074 | - if (ret) | ||
13075 | - return ret; | ||
13076 | - } | ||
13077 | + if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) | ||
13078 | + return crypt_set_key(cc, argv[2]); | ||
13079 | + if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) | ||
13080 | return crypt_wipe_key(cc); | ||
13081 | - } | ||
13082 | } | ||
13083 | |||
13084 | error: | ||
13085 | diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c | ||
13086 | index 2052159..7dbe652 100644 | ||
13087 | --- a/drivers/md/dm-exception-store.c | ||
13088 | +++ b/drivers/md/dm-exception-store.c | ||
13089 | @@ -216,8 +216,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, | ||
13090 | type = get_type("N"); | ||
13091 | else { | ||
13092 | ti->error = "Persistent flag is not P or N"; | ||
13093 | - r = -EINVAL; | ||
13094 | - goto bad_type; | ||
13095 | + return -EINVAL; | ||
13096 | } | ||
13097 | |||
13098 | if (!type) { | ||
13099 | diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c | ||
13100 | index d19854c..a679429 100644 | ||
13101 | --- a/drivers/md/dm-ioctl.c | ||
13102 | +++ b/drivers/md/dm-ioctl.c | ||
13103 | @@ -56,11 +56,6 @@ static void dm_hash_remove_all(int keep_open_devices); | ||
13104 | */ | ||
13105 | static DECLARE_RWSEM(_hash_lock); | ||
13106 | |||
13107 | -/* | ||
13108 | - * Protects use of mdptr to obtain hash cell name and uuid from mapped device. | ||
13109 | - */ | ||
13110 | -static DEFINE_MUTEX(dm_hash_cells_mutex); | ||
13111 | - | ||
13112 | static void init_buckets(struct list_head *buckets) | ||
13113 | { | ||
13114 | unsigned int i; | ||
13115 | @@ -211,9 +206,7 @@ static int dm_hash_insert(const char *name, const char *uuid, struct mapped_devi | ||
13116 | list_add(&cell->uuid_list, _uuid_buckets + hash_str(uuid)); | ||
13117 | } | ||
13118 | dm_get(md); | ||
13119 | - mutex_lock(&dm_hash_cells_mutex); | ||
13120 | dm_set_mdptr(md, cell); | ||
13121 | - mutex_unlock(&dm_hash_cells_mutex); | ||
13122 | up_write(&_hash_lock); | ||
13123 | |||
13124 | return 0; | ||
13125 | @@ -231,9 +224,7 @@ static void __hash_remove(struct hash_cell *hc) | ||
13126 | /* remove from the dev hash */ | ||
13127 | list_del(&hc->uuid_list); | ||
13128 | list_del(&hc->name_list); | ||
13129 | - mutex_lock(&dm_hash_cells_mutex); | ||
13130 | dm_set_mdptr(hc->md, NULL); | ||
13131 | - mutex_unlock(&dm_hash_cells_mutex); | ||
13132 | |||
13133 | table = dm_get_table(hc->md); | ||
13134 | if (table) { | ||
13135 | @@ -330,9 +321,7 @@ static int dm_hash_rename(uint32_t cookie, const char *old, const char *new) | ||
13136 | */ | ||
13137 | list_del(&hc->name_list); | ||
13138 | old_name = hc->name; | ||
13139 | - mutex_lock(&dm_hash_cells_mutex); | ||
13140 | hc->name = new_name; | ||
13141 | - mutex_unlock(&dm_hash_cells_mutex); | ||
13142 | list_add(&hc->name_list, _name_buckets + hash_str(new_name)); | ||
13143 | |||
13144 | /* | ||
13145 | @@ -1593,7 +1582,8 @@ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid) | ||
13146 | if (!md) | ||
13147 | return -ENXIO; | ||
13148 | |||
13149 | - mutex_lock(&dm_hash_cells_mutex); | ||
13150 | + dm_get(md); | ||
13151 | + down_read(&_hash_lock); | ||
13152 | hc = dm_get_mdptr(md); | ||
13153 | if (!hc || hc->md != md) { | ||
13154 | r = -ENXIO; | ||
13155 | @@ -1606,7 +1596,8 @@ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid) | ||
13156 | strcpy(uuid, hc->uuid ? : ""); | ||
13157 | |||
13158 | out: | ||
13159 | - mutex_unlock(&dm_hash_cells_mutex); | ||
13160 | + up_read(&_hash_lock); | ||
13161 | + dm_put(md); | ||
13162 | |||
13163 | return r; | ||
13164 | } | ||
13165 | diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c | ||
13166 | index f1c8cae..54abf9e 100644 | ||
13167 | --- a/drivers/md/dm-log-userspace-transfer.c | ||
13168 | +++ b/drivers/md/dm-log-userspace-transfer.c | ||
13169 | @@ -172,15 +172,11 @@ int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type, | ||
13170 | { | ||
13171 | int r = 0; | ||
13172 | size_t dummy = 0; | ||
13173 | - int overhead_size = sizeof(struct dm_ulog_request) + sizeof(struct cn_msg); | ||
13174 | + int overhead_size = | ||
13175 | + sizeof(struct dm_ulog_request *) + sizeof(struct cn_msg); | ||
13176 | struct dm_ulog_request *tfr = prealloced_ulog_tfr; | ||
13177 | struct receiving_pkg pkg; | ||
13178 | |||
13179 | - /* | ||
13180 | - * Given the space needed to hold the 'struct cn_msg' and | ||
13181 | - * 'struct dm_ulog_request' - do we have enough payload | ||
13182 | - * space remaining? | ||
13183 | - */ | ||
13184 | if (data_size > (DM_ULOG_PREALLOCED_SIZE - overhead_size)) { | ||
13185 | DMINFO("Size of tfr exceeds preallocated size"); | ||
13186 | return -EINVAL; | ||
13187 | @@ -195,7 +191,7 @@ resend: | ||
13188 | */ | ||
13189 | mutex_lock(&dm_ulog_lock); | ||
13190 | |||
13191 | - memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - sizeof(struct cn_msg)); | ||
13192 | + memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size); | ||
13193 | memcpy(tfr->uuid, uuid, DM_UUID_LEN); | ||
13194 | tfr->luid = luid; | ||
13195 | tfr->seq = dm_ulog_seq++; | ||
13196 | diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c | ||
13197 | index 8a4a9c8..3a3ba46 100644 | ||
13198 | --- a/drivers/md/dm-snap.c | ||
13199 | +++ b/drivers/md/dm-snap.c | ||
13200 | @@ -553,8 +553,6 @@ static int init_hash_tables(struct dm_snapshot *s) | ||
13201 | hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift; | ||
13202 | hash_size = min(hash_size, max_buckets); | ||
13203 | |||
13204 | - if (hash_size < 64) | ||
13205 | - hash_size = 64; | ||
13206 | hash_size = rounddown_pow_of_two(hash_size); | ||
13207 | if (init_exception_table(&s->complete, hash_size, | ||
13208 | DM_CHUNK_CONSECUTIVE_BITS)) | ||
13209 | @@ -1154,11 +1152,10 @@ static int snapshot_status(struct dm_target *ti, status_type_t type, | ||
13210 | unsigned sz = 0; | ||
13211 | struct dm_snapshot *snap = ti->private; | ||
13212 | |||
13213 | + down_write(&snap->lock); | ||
13214 | + | ||
13215 | switch (type) { | ||
13216 | case STATUSTYPE_INFO: | ||
13217 | - | ||
13218 | - down_write(&snap->lock); | ||
13219 | - | ||
13220 | if (!snap->valid) | ||
13221 | DMEMIT("Invalid"); | ||
13222 | else { | ||
13223 | @@ -1174,9 +1171,6 @@ static int snapshot_status(struct dm_target *ti, status_type_t type, | ||
13224 | else | ||
13225 | DMEMIT("Unknown"); | ||
13226 | } | ||
13227 | - | ||
13228 | - up_write(&snap->lock); | ||
13229 | - | ||
13230 | break; | ||
13231 | |||
13232 | case STATUSTYPE_TABLE: | ||
13233 | @@ -1191,6 +1185,8 @@ static int snapshot_status(struct dm_target *ti, status_type_t type, | ||
13234 | break; | ||
13235 | } | ||
13236 | |||
13237 | + up_write(&snap->lock); | ||
13238 | + | ||
13239 | return 0; | ||
13240 | } | ||
13241 | |||
13242 | diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c | ||
13243 | index bd58703..e0efc1a 100644 | ||
13244 | --- a/drivers/md/dm-stripe.c | ||
13245 | +++ b/drivers/md/dm-stripe.c | ||
13246 | @@ -110,7 +110,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) | ||
13247 | } | ||
13248 | |||
13249 | stripes = simple_strtoul(argv[0], &end, 10); | ||
13250 | - if (!stripes || *end) { | ||
13251 | + if (*end) { | ||
13252 | ti->error = "Invalid stripe count"; | ||
13253 | return -EINVAL; | ||
13254 | } | ||
13255 | diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c | ||
13256 | index e869128..1a6cb3c 100644 | ||
13257 | --- a/drivers/md/dm-table.c | ||
13258 | +++ b/drivers/md/dm-table.c | ||
13259 | @@ -499,15 +499,16 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, | ||
13260 | return 0; | ||
13261 | } | ||
13262 | |||
13263 | - if (bdev_stack_limits(limits, bdev, start) < 0) | ||
13264 | - DMWARN("%s: adding target device %s caused an alignment inconsistency: " | ||
13265 | + if (blk_stack_limits(limits, &q->limits, start << 9) < 0) | ||
13266 | + DMWARN("%s: target device %s is misaligned: " | ||
13267 | "physical_block_size=%u, logical_block_size=%u, " | ||
13268 | "alignment_offset=%u, start=%llu", | ||
13269 | dm_device_name(ti->table->md), bdevname(bdev, b), | ||
13270 | q->limits.physical_block_size, | ||
13271 | q->limits.logical_block_size, | ||
13272 | q->limits.alignment_offset, | ||
13273 | - (unsigned long long) start << SECTOR_SHIFT); | ||
13274 | + (unsigned long long) start << 9); | ||
13275 | + | ||
13276 | |||
13277 | /* | ||
13278 | * Check if merge fn is supported. | ||
13279 | @@ -1024,9 +1025,9 @@ combine_limits: | ||
13280 | * for the table. | ||
13281 | */ | ||
13282 | if (blk_stack_limits(limits, &ti_limits, 0) < 0) | ||
13283 | - DMWARN("%s: adding target device " | ||
13284 | + DMWARN("%s: target device " | ||
13285 | "(start sect %llu len %llu) " | ||
13286 | - "caused an alignment inconsistency", | ||
13287 | + "is misaligned", | ||
13288 | dm_device_name(table->md), | ||
13289 | (unsigned long long) ti->begin, | ||
13290 | (unsigned long long) ti->len); | ||
13291 | @@ -1078,6 +1079,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | ||
13292 | struct queue_limits *limits) | ||
13293 | { | ||
13294 | /* | ||
13295 | + * Each target device in the table has a data area that should normally | ||
13296 | + * be aligned such that the DM device's alignment_offset is 0. | ||
13297 | + * FIXME: Propagate alignment_offsets up the stack and warn of | ||
13298 | + * sub-optimal or inconsistent settings. | ||
13299 | + */ | ||
13300 | + limits->alignment_offset = 0; | ||
13301 | + limits->misaligned = 0; | ||
13302 | + | ||
13303 | + /* | ||
13304 | * Copy table's limits to the DM device's request_queue | ||
13305 | */ | ||
13306 | q->limits = *limits; | ||
13307 | diff --git a/drivers/md/dm-uevent.c b/drivers/md/dm-uevent.c | ||
13308 | index c7c555a..6f65883 100644 | ||
13309 | --- a/drivers/md/dm-uevent.c | ||
13310 | +++ b/drivers/md/dm-uevent.c | ||
13311 | @@ -139,13 +139,14 @@ void dm_send_uevents(struct list_head *events, struct kobject *kobj) | ||
13312 | list_del_init(&event->elist); | ||
13313 | |||
13314 | /* | ||
13315 | - * When a device is being removed this copy fails and we | ||
13316 | - * discard these unsent events. | ||
13317 | + * Need to call dm_copy_name_and_uuid from here for now. | ||
13318 | + * Context of previous var adds and locking used for | ||
13319 | + * hash_cell not compatable. | ||
13320 | */ | ||
13321 | if (dm_copy_name_and_uuid(event->md, event->name, | ||
13322 | event->uuid)) { | ||
13323 | - DMINFO("%s: skipping sending uevent for lost device", | ||
13324 | - __func__); | ||
13325 | + DMERR("%s: dm_copy_name_and_uuid() failed", | ||
13326 | + __func__); | ||
13327 | goto uevent_free; | ||
13328 | } | ||
13329 | |||
13330 | diff --git a/drivers/md/md.c b/drivers/md/md.c | ||
13331 | index 08f7471..b182f86 100644 | ||
13332 | --- a/drivers/md/md.c | ||
13333 | +++ b/drivers/md/md.c | ||
13334 | @@ -282,9 +282,7 @@ static void mddev_put(mddev_t *mddev) | ||
13335 | if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) | ||
13336 | return; | ||
13337 | if (!mddev->raid_disks && list_empty(&mddev->disks) && | ||
13338 | - mddev->ctime == 0 && !mddev->hold_active) { | ||
13339 | - /* Array is not configured at all, and not held active, | ||
13340 | - * so destroy it */ | ||
13341 | + !mddev->hold_active) { | ||
13342 | list_del(&mddev->all_mddevs); | ||
13343 | if (mddev->gendisk) { | ||
13344 | /* we did a probe so need to clean up. | ||
13345 | @@ -369,7 +367,6 @@ static mddev_t * mddev_find(dev_t unit) | ||
13346 | |||
13347 | mutex_init(&new->open_mutex); | ||
13348 | mutex_init(&new->reconfig_mutex); | ||
13349 | - mutex_init(&new->bitmap_mutex); | ||
13350 | INIT_LIST_HEAD(&new->disks); | ||
13351 | INIT_LIST_HEAD(&new->all_mddevs); | ||
13352 | init_timer(&new->safemode_timer); | ||
13353 | @@ -4173,7 +4170,7 @@ static int do_md_run(mddev_t * mddev) | ||
13354 | mddev->barriers_work = 1; | ||
13355 | mddev->ok_start_degraded = start_dirty_degraded; | ||
13356 | |||
13357 | - if (start_readonly && mddev->ro == 0) | ||
13358 | + if (start_readonly) | ||
13359 | mddev->ro = 2; /* read-only, but switch on first write */ | ||
13360 | |||
13361 | err = mddev->pers->run(mddev); | ||
13362 | @@ -5073,10 +5070,6 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info) | ||
13363 | mddev->minor_version = info->minor_version; | ||
13364 | mddev->patch_version = info->patch_version; | ||
13365 | mddev->persistent = !info->not_persistent; | ||
13366 | - /* ensure mddev_put doesn't delete this now that there | ||
13367 | - * is some minimal configuration. | ||
13368 | - */ | ||
13369 | - mddev->ctime = get_seconds(); | ||
13370 | return 0; | ||
13371 | } | ||
13372 | mddev->major_version = MD_MAJOR_VERSION; | ||
13373 | @@ -6636,7 +6629,7 @@ void md_check_recovery(mddev_t *mddev) | ||
13374 | |||
13375 | |||
13376 | if (mddev->bitmap) | ||
13377 | - bitmap_daemon_work(mddev); | ||
13378 | + bitmap_daemon_work(mddev->bitmap); | ||
13379 | |||
13380 | if (mddev->ro) | ||
13381 | return; | ||
13382 | diff --git a/drivers/md/md.h b/drivers/md/md.h | ||
13383 | index 87430fe..f184b69 100644 | ||
13384 | --- a/drivers/md/md.h | ||
13385 | +++ b/drivers/md/md.h | ||
13386 | @@ -289,7 +289,6 @@ struct mddev_s | ||
13387 | * hot-adding a bitmap. It should | ||
13388 | * eventually be settable by sysfs. | ||
13389 | */ | ||
13390 | - struct mutex bitmap_mutex; | ||
13391 | |||
13392 | struct list_head all_mddevs; | ||
13393 | }; | ||
13394 | diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c | ||
13395 | index 431b9b2..d29215d 100644 | ||
13396 | --- a/drivers/md/raid5.c | ||
13397 | +++ b/drivers/md/raid5.c | ||
13398 | @@ -5432,11 +5432,11 @@ static int raid5_start_reshape(mddev_t *mddev) | ||
13399 | !test_bit(Faulty, &rdev->flags)) { | ||
13400 | if (raid5_add_disk(mddev, rdev) == 0) { | ||
13401 | char nm[20]; | ||
13402 | - if (rdev->raid_disk >= conf->previous_raid_disks) { | ||
13403 | + if (rdev->raid_disk >= conf->previous_raid_disks) | ||
13404 | set_bit(In_sync, &rdev->flags); | ||
13405 | - added_devices++; | ||
13406 | - } else | ||
13407 | + else | ||
13408 | rdev->recovery_offset = 0; | ||
13409 | + added_devices++; | ||
13410 | sprintf(nm, "rd%d", rdev->raid_disk); | ||
13411 | if (sysfs_create_link(&mddev->kobj, | ||
13412 | &rdev->kobj, nm)) | ||
13413 | @@ -5448,12 +5448,9 @@ static int raid5_start_reshape(mddev_t *mddev) | ||
13414 | break; | ||
13415 | } | ||
13416 | |||
13417 | - /* When a reshape changes the number of devices, ->degraded | ||
13418 | - * is measured against the large of the pre and post number of | ||
13419 | - * devices.*/ | ||
13420 | if (mddev->delta_disks > 0) { | ||
13421 | spin_lock_irqsave(&conf->device_lock, flags); | ||
13422 | - mddev->degraded += (conf->raid_disks - conf->previous_raid_disks) | ||
13423 | + mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) | ||
13424 | - added_devices; | ||
13425 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
13426 | } | ||
13427 | diff --git a/drivers/media/common/tuners/mxl5007t.c b/drivers/media/common/tuners/mxl5007t.c | ||
13428 | index 7eb1bf7..2d02698 100644 | ||
13429 | --- a/drivers/media/common/tuners/mxl5007t.c | ||
13430 | +++ b/drivers/media/common/tuners/mxl5007t.c | ||
13431 | @@ -196,7 +196,7 @@ static void copy_reg_bits(struct reg_pair_t *reg_pair1, | ||
13432 | i = j = 0; | ||
13433 | |||
13434 | while (reg_pair1[i].reg || reg_pair1[i].val) { | ||
13435 | - while (reg_pair2[j].reg || reg_pair2[j].val) { | ||
13436 | + while (reg_pair2[j].reg || reg_pair2[j].reg) { | ||
13437 | if (reg_pair1[i].reg != reg_pair2[j].reg) { | ||
13438 | j++; | ||
13439 | continue; | ||
13440 | diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c | ||
13441 | index 9ddc579..c37790a 100644 | ||
13442 | --- a/drivers/media/dvb/dvb-core/dmxdev.c | ||
13443 | +++ b/drivers/media/dvb/dvb-core/dmxdev.c | ||
13444 | @@ -761,6 +761,7 @@ static int dvb_demux_open(struct inode *inode, struct file *file) | ||
13445 | dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192); | ||
13446 | dmxdevfilter->type = DMXDEV_TYPE_NONE; | ||
13447 | dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED); | ||
13448 | + INIT_LIST_HEAD(&dmxdevfilter->feed.ts); | ||
13449 | init_timer(&dmxdevfilter->timer); | ||
13450 | |||
13451 | dvbdev->users++; | ||
13452 | @@ -886,7 +887,6 @@ static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev, | ||
13453 | dmxdevfilter->type = DMXDEV_TYPE_PES; | ||
13454 | memcpy(&dmxdevfilter->params, params, | ||
13455 | sizeof(struct dmx_pes_filter_params)); | ||
13456 | - INIT_LIST_HEAD(&dmxdevfilter->feed.ts); | ||
13457 | |||
13458 | dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET); | ||
13459 | |||
13460 | diff --git a/drivers/media/dvb/siano/smsusb.c b/drivers/media/dvb/siano/smsusb.c | ||
13461 | index 6b03dbf..8f88a58 100644 | ||
13462 | --- a/drivers/media/dvb/siano/smsusb.c | ||
13463 | +++ b/drivers/media/dvb/siano/smsusb.c | ||
13464 | @@ -533,18 +533,8 @@ struct usb_device_id smsusb_id_table[] = { | ||
13465 | .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, | ||
13466 | { USB_DEVICE(0x2040, 0xb910), | ||
13467 | .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, | ||
13468 | - { USB_DEVICE(0x2040, 0xb980), | ||
13469 | - .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, | ||
13470 | - { USB_DEVICE(0x2040, 0xb990), | ||
13471 | - .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, | ||
13472 | { USB_DEVICE(0x2040, 0xc000), | ||
13473 | .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, | ||
13474 | - { USB_DEVICE(0x2040, 0xc010), | ||
13475 | - .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, | ||
13476 | - { USB_DEVICE(0x2040, 0xc080), | ||
13477 | - .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, | ||
13478 | - { USB_DEVICE(0x2040, 0xc090), | ||
13479 | - .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM }, | ||
13480 | { } /* Terminating entry */ | ||
13481 | }; | ||
13482 | |||
13483 | diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c | ||
13484 | index e165578..a5c190e 100644 | ||
13485 | --- a/drivers/media/video/gspca/ov519.c | ||
13486 | +++ b/drivers/media/video/gspca/ov519.c | ||
13487 | @@ -3364,7 +3364,6 @@ static const __devinitdata struct usb_device_id device_table[] = { | ||
13488 | {USB_DEVICE(0x041e, 0x4061), .driver_info = BRIDGE_OV519 }, | ||
13489 | {USB_DEVICE(0x041e, 0x4064), | ||
13490 | .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, | ||
13491 | - {USB_DEVICE(0x041e, 0x4067), .driver_info = BRIDGE_OV519 }, | ||
13492 | {USB_DEVICE(0x041e, 0x4068), | ||
13493 | .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, | ||
13494 | {USB_DEVICE(0x045e, 0x028c), .driver_info = BRIDGE_OV519 }, | ||
13495 | diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c | ||
13496 | index e0a3b75..cdad3db 100644 | ||
13497 | --- a/drivers/media/video/gspca/sn9c20x.c | ||
13498 | +++ b/drivers/media/video/gspca/sn9c20x.c | ||
13499 | @@ -2319,7 +2319,7 @@ static void do_autogain(struct gspca_dev *gspca_dev, u16 avg_lum) | ||
13500 | } | ||
13501 | } | ||
13502 | if (avg_lum > MAX_AVG_LUM) { | ||
13503 | - if (sd->gain >= 1) { | ||
13504 | + if (sd->gain - 1 >= 0) { | ||
13505 | sd->gain--; | ||
13506 | set_gain(gspca_dev); | ||
13507 | } | ||
13508 | diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c | ||
13509 | index 28b4625..aa8f995 100644 | ||
13510 | --- a/drivers/media/video/gspca/sunplus.c | ||
13511 | +++ b/drivers/media/video/gspca/sunplus.c | ||
13512 | @@ -705,7 +705,7 @@ static void spca504B_SetSizeType(struct gspca_dev *gspca_dev) | ||
13513 | rc = spca504B_PollingDataReady(gspca_dev); | ||
13514 | |||
13515 | /* Init the cam width height with some values get on init ? */ | ||
13516 | - reg_w_riv(dev, 0x31, 0x04, 0); | ||
13517 | + reg_w_riv(dev, 0x31, 0, 0x04); | ||
13518 | spca504B_WaitCmdStatus(gspca_dev); | ||
13519 | rc = spca504B_PollingDataReady(gspca_dev); | ||
13520 | break; | ||
13521 | @@ -807,14 +807,14 @@ static void init_ctl_reg(struct gspca_dev *gspca_dev) | ||
13522 | default: | ||
13523 | /* case BRIDGE_SPCA533: */ | ||
13524 | /* case BRIDGE_SPCA504B: */ | ||
13525 | - reg_w_riv(dev, 0, 0x21ad, 0x00); /* hue */ | ||
13526 | - reg_w_riv(dev, 0, 0x21ac, 0x01); /* sat/hue */ | ||
13527 | - reg_w_riv(dev, 0, 0x21a3, 0x00); /* gamma */ | ||
13528 | + reg_w_riv(dev, 0, 0x00, 0x21ad); /* hue */ | ||
13529 | + reg_w_riv(dev, 0, 0x01, 0x21ac); /* sat/hue */ | ||
13530 | + reg_w_riv(dev, 0, 0x00, 0x21a3); /* gamma */ | ||
13531 | break; | ||
13532 | case BRIDGE_SPCA536: | ||
13533 | - reg_w_riv(dev, 0, 0x20f5, 0x40); | ||
13534 | - reg_w_riv(dev, 0, 0x20f4, 0x01); | ||
13535 | - reg_w_riv(dev, 0, 0x2089, 0x00); | ||
13536 | + reg_w_riv(dev, 0, 0x40, 0x20f5); | ||
13537 | + reg_w_riv(dev, 0, 0x01, 0x20f4); | ||
13538 | + reg_w_riv(dev, 0, 0x00, 0x2089); | ||
13539 | break; | ||
13540 | } | ||
13541 | if (pollreg) | ||
13542 | @@ -888,11 +888,11 @@ static int sd_init(struct gspca_dev *gspca_dev) | ||
13543 | switch (sd->bridge) { | ||
13544 | case BRIDGE_SPCA504B: | ||
13545 | reg_w_riv(dev, 0x1d, 0x00, 0); | ||
13546 | - reg_w_riv(dev, 0, 0x2306, 0x01); | ||
13547 | - reg_w_riv(dev, 0, 0x0d04, 0x00); | ||
13548 | - reg_w_riv(dev, 0, 0x2000, 0x00); | ||
13549 | - reg_w_riv(dev, 0, 0x2301, 0x13); | ||
13550 | - reg_w_riv(dev, 0, 0x2306, 0x00); | ||
13551 | + reg_w_riv(dev, 0, 0x01, 0x2306); | ||
13552 | + reg_w_riv(dev, 0, 0x00, 0x0d04); | ||
13553 | + reg_w_riv(dev, 0, 0x00, 0x2000); | ||
13554 | + reg_w_riv(dev, 0, 0x13, 0x2301); | ||
13555 | + reg_w_riv(dev, 0, 0x00, 0x2306); | ||
13556 | /* fall thru */ | ||
13557 | case BRIDGE_SPCA533: | ||
13558 | spca504B_PollingDataReady(gspca_dev); | ||
13559 | @@ -1011,7 +1011,7 @@ static int sd_start(struct gspca_dev *gspca_dev) | ||
13560 | spca504B_WaitCmdStatus(gspca_dev); | ||
13561 | break; | ||
13562 | default: | ||
13563 | - reg_w_riv(dev, 0x31, 0x04, 0); | ||
13564 | + reg_w_riv(dev, 0x31, 0, 0x04); | ||
13565 | spca504B_WaitCmdStatus(gspca_dev); | ||
13566 | spca504B_PollingDataReady(gspca_dev); | ||
13567 | break; | ||
13568 | diff --git a/drivers/media/video/ov511.c b/drivers/media/video/ov511.c | ||
13569 | index 2bed9e2..0bc2cf5 100644 | ||
13570 | --- a/drivers/media/video/ov511.c | ||
13571 | +++ b/drivers/media/video/ov511.c | ||
13572 | @@ -5878,7 +5878,7 @@ ov51x_probe(struct usb_interface *intf, const struct usb_device_id *id) | ||
13573 | goto error; | ||
13574 | } | ||
13575 | |||
13576 | - mutex_unlock(&ov->lock); | ||
13577 | + mutex_lock(&ov->lock); | ||
13578 | |||
13579 | return 0; | ||
13580 | |||
13581 | diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c | ||
13582 | index 6781a07..0901322 100644 | ||
13583 | --- a/drivers/media/video/saa7134/saa7134-cards.c | ||
13584 | +++ b/drivers/media/video/saa7134/saa7134-cards.c | ||
13585 | @@ -5279,30 +5279,6 @@ struct saa7134_board saa7134_boards[] = { | ||
13586 | .amux = TV, | ||
13587 | }, | ||
13588 | }, | ||
13589 | - [SAA7134_BOARD_ASUS_EUROPA_HYBRID] = { | ||
13590 | - .name = "Asus Europa Hybrid OEM", | ||
13591 | - .audio_clock = 0x00187de7, | ||
13592 | - .tuner_type = TUNER_PHILIPS_TD1316, | ||
13593 | - .radio_type = UNSET, | ||
13594 | - .tuner_addr = 0x61, | ||
13595 | - .radio_addr = ADDR_UNSET, | ||
13596 | - .tda9887_conf = TDA9887_PRESENT | TDA9887_PORT1_ACTIVE, | ||
13597 | - .mpeg = SAA7134_MPEG_DVB, | ||
13598 | - .inputs = { { | ||
13599 | - .name = name_tv, | ||
13600 | - .vmux = 3, | ||
13601 | - .amux = TV, | ||
13602 | - .tv = 1, | ||
13603 | - }, { | ||
13604 | - .name = name_comp1, | ||
13605 | - .vmux = 4, | ||
13606 | - .amux = LINE2, | ||
13607 | - }, { | ||
13608 | - .name = name_svideo, | ||
13609 | - .vmux = 8, | ||
13610 | - .amux = LINE2, | ||
13611 | - } }, | ||
13612 | - }, | ||
13613 | |||
13614 | }; | ||
13615 | |||
13616 | @@ -6442,12 +6418,6 @@ struct pci_device_id saa7134_pci_tbl[] = { | ||
13617 | .subdevice = 0x2004, | ||
13618 | .driver_data = SAA7134_BOARD_ZOLID_HYBRID_PCI, | ||
13619 | }, { | ||
13620 | - .vendor = PCI_VENDOR_ID_PHILIPS, | ||
13621 | - .device = PCI_DEVICE_ID_PHILIPS_SAA7134, | ||
13622 | - .subvendor = 0x1043, | ||
13623 | - .subdevice = 0x4847, | ||
13624 | - .driver_data = SAA7134_BOARD_ASUS_EUROPA_HYBRID, | ||
13625 | - }, { | ||
13626 | /* --- boards without eeprom + subsystem ID --- */ | ||
13627 | .vendor = PCI_VENDOR_ID_PHILIPS, | ||
13628 | .device = PCI_DEVICE_ID_PHILIPS_SAA7134, | ||
13629 | @@ -7109,7 +7079,6 @@ int saa7134_board_init2(struct saa7134_dev *dev) | ||
13630 | /* break intentionally omitted */ | ||
13631 | case SAA7134_BOARD_VIDEOMATE_DVBT_300: | ||
13632 | case SAA7134_BOARD_ASUS_EUROPA2_HYBRID: | ||
13633 | - case SAA7134_BOARD_ASUS_EUROPA_HYBRID: | ||
13634 | { | ||
13635 | |||
13636 | /* The Philips EUROPA based hybrid boards have the tuner | ||
13637 | diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c | ||
13638 | index b8a805c..a26e997 100644 | ||
13639 | --- a/drivers/media/video/saa7134/saa7134-dvb.c | ||
13640 | +++ b/drivers/media/video/saa7134/saa7134-dvb.c | ||
13641 | @@ -1116,7 +1116,6 @@ static int dvb_init(struct saa7134_dev *dev) | ||
13642 | break; | ||
13643 | case SAA7134_BOARD_PHILIPS_EUROPA: | ||
13644 | case SAA7134_BOARD_VIDEOMATE_DVBT_300: | ||
13645 | - case SAA7134_BOARD_ASUS_EUROPA_HYBRID: | ||
13646 | fe0->dvb.frontend = dvb_attach(tda10046_attach, | ||
13647 | &philips_europa_config, | ||
13648 | &dev->i2c_adap); | ||
13649 | diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h | ||
13650 | index 94e1a3b..f8697d4 100644 | ||
13651 | --- a/drivers/media/video/saa7134/saa7134.h | ||
13652 | +++ b/drivers/media/video/saa7134/saa7134.h | ||
13653 | @@ -297,7 +297,6 @@ struct saa7134_format { | ||
13654 | #define SAA7134_BOARD_BEHOLD_X7 171 | ||
13655 | #define SAA7134_BOARD_ROVERMEDIA_LINK_PRO_FM 172 | ||
13656 | #define SAA7134_BOARD_ZOLID_HYBRID_PCI 173 | ||
13657 | -#define SAA7134_BOARD_ASUS_EUROPA_HYBRID 174 | ||
13658 | |||
13659 | #define SAA7134_MAXBOARDS 32 | ||
13660 | #define SAA7134_INPUT_MAX 8 | ||
13661 | diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c | ||
13662 | index 4a293b4..1b89735 100644 | ||
13663 | --- a/drivers/media/video/uvc/uvc_ctrl.c | ||
13664 | +++ b/drivers/media/video/uvc/uvc_ctrl.c | ||
13665 | @@ -1405,7 +1405,7 @@ uvc_ctrl_prune_entity(struct uvc_device *dev, struct uvc_entity *entity) | ||
13666 | size = entity->processing.bControlSize; | ||
13667 | |||
13668 | for (i = 0; i < ARRAY_SIZE(blacklist); ++i) { | ||
13669 | - if (!usb_match_one_id(dev->intf, &blacklist[i].id)) | ||
13670 | + if (!usb_match_id(dev->intf, &blacklist[i].id)) | ||
13671 | continue; | ||
13672 | |||
13673 | if (blacklist[i].index >= 8 * size || | ||
13674 | diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c | ||
13675 | index b6992b7..610e914 100644 | ||
13676 | --- a/drivers/message/fusion/mptbase.c | ||
13677 | +++ b/drivers/message/fusion/mptbase.c | ||
13678 | @@ -4330,8 +4330,6 @@ initChainBuffers(MPT_ADAPTER *ioc) | ||
13679 | |||
13680 | if (ioc->bus_type == SPI) | ||
13681 | num_chain *= MPT_SCSI_CAN_QUEUE; | ||
13682 | - else if (ioc->bus_type == SAS) | ||
13683 | - num_chain *= MPT_SAS_CAN_QUEUE; | ||
13684 | else | ||
13685 | num_chain *= MPT_FC_CAN_QUEUE; | ||
13686 | |||
13687 | diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c | ||
13688 | index 6cea718..c295786 100644 | ||
13689 | --- a/drivers/message/fusion/mptscsih.c | ||
13690 | +++ b/drivers/message/fusion/mptscsih.c | ||
13691 | @@ -1720,7 +1720,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt) | ||
13692 | dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "task abort: " | ||
13693 | "Command not in the active list! (sc=%p)\n", ioc->name, | ||
13694 | SCpnt)); | ||
13695 | - retval = SUCCESS; | ||
13696 | + retval = 0; | ||
13697 | goto out; | ||
13698 | } | ||
13699 | |||
13700 | diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c | ||
13701 | index ca6b098..ba27c9d 100644 | ||
13702 | --- a/drivers/mfd/wm8350-core.c | ||
13703 | +++ b/drivers/mfd/wm8350-core.c | ||
13704 | @@ -134,7 +134,8 @@ static inline int is_reg_locked(struct wm8350 *wm8350, u8 reg) | ||
13705 | wm8350->reg_cache[WM8350_SECURITY] == WM8350_UNLOCK_KEY) | ||
13706 | return 0; | ||
13707 | |||
13708 | - if ((reg >= WM8350_GPIO_FUNCTION_SELECT_1 && | ||
13709 | + if ((reg == WM8350_GPIO_CONFIGURATION_I_O) || | ||
13710 | + (reg >= WM8350_GPIO_FUNCTION_SELECT_1 && | ||
13711 | reg <= WM8350_GPIO_FUNCTION_SELECT_4) || | ||
13712 | (reg >= WM8350_BATTERY_CHARGER_CONTROL_1 && | ||
13713 | reg <= WM8350_BATTERY_CHARGER_CONTROL_3)) | ||
13714 | diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c | ||
13715 | index 1eac626..e9eae4a 100644 | ||
13716 | --- a/drivers/misc/enclosure.c | ||
13717 | +++ b/drivers/misc/enclosure.c | ||
13718 | @@ -391,7 +391,6 @@ static const char *const enclosure_status [] = { | ||
13719 | [ENCLOSURE_STATUS_NOT_INSTALLED] = "not installed", | ||
13720 | [ENCLOSURE_STATUS_UNKNOWN] = "unknown", | ||
13721 | [ENCLOSURE_STATUS_UNAVAILABLE] = "unavailable", | ||
13722 | - [ENCLOSURE_STATUS_MAX] = NULL, | ||
13723 | }; | ||
13724 | |||
13725 | static const char *const enclosure_type [] = { | ||
13726 | diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c | ||
13727 | index 1f552c6..85f0e8c 100644 | ||
13728 | --- a/drivers/mmc/card/block.c | ||
13729 | +++ b/drivers/mmc/card/block.c | ||
13730 | @@ -85,14 +85,7 @@ static void mmc_blk_put(struct mmc_blk_data *md) | ||
13731 | mutex_lock(&open_lock); | ||
13732 | md->usage--; | ||
13733 | if (md->usage == 0) { | ||
13734 | - int devmaj = MAJOR(disk_devt(md->disk)); | ||
13735 | int devidx = MINOR(disk_devt(md->disk)) >> MMC_SHIFT; | ||
13736 | - | ||
13737 | - if (!devmaj) | ||
13738 | - devidx = md->disk->first_minor >> MMC_SHIFT; | ||
13739 | - | ||
13740 | - blk_cleanup_queue(md->queue.queue); | ||
13741 | - | ||
13742 | __clear_bit(devidx, dev_use); | ||
13743 | |||
13744 | put_disk(md->disk); | ||
13745 | @@ -620,7 +613,6 @@ static int mmc_blk_probe(struct mmc_card *card) | ||
13746 | return 0; | ||
13747 | |||
13748 | out: | ||
13749 | - mmc_cleanup_queue(&md->queue); | ||
13750 | mmc_blk_put(md); | ||
13751 | |||
13752 | return err; | ||
13753 | diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c | ||
13754 | index c5a7a85..49e5823 100644 | ||
13755 | --- a/drivers/mmc/card/queue.c | ||
13756 | +++ b/drivers/mmc/card/queue.c | ||
13757 | @@ -90,10 +90,9 @@ static void mmc_request(struct request_queue *q) | ||
13758 | struct request *req; | ||
13759 | |||
13760 | if (!mq) { | ||
13761 | - while ((req = blk_fetch_request(q)) != NULL) { | ||
13762 | - req->cmd_flags |= REQ_QUIET; | ||
13763 | + printk(KERN_ERR "MMC: killing requests for dead queue\n"); | ||
13764 | + while ((req = blk_fetch_request(q)) != NULL) | ||
13765 | __blk_end_request_all(req, -EIO); | ||
13766 | - } | ||
13767 | return; | ||
13768 | } | ||
13769 | |||
13770 | @@ -224,18 +223,17 @@ void mmc_cleanup_queue(struct mmc_queue *mq) | ||
13771 | struct request_queue *q = mq->queue; | ||
13772 | unsigned long flags; | ||
13773 | |||
13774 | + /* Mark that we should start throwing out stragglers */ | ||
13775 | + spin_lock_irqsave(q->queue_lock, flags); | ||
13776 | + q->queuedata = NULL; | ||
13777 | + spin_unlock_irqrestore(q->queue_lock, flags); | ||
13778 | + | ||
13779 | /* Make sure the queue isn't suspended, as that will deadlock */ | ||
13780 | mmc_queue_resume(mq); | ||
13781 | |||
13782 | /* Then terminate our worker thread */ | ||
13783 | kthread_stop(mq->thread); | ||
13784 | |||
13785 | - /* Empty the queue */ | ||
13786 | - spin_lock_irqsave(q->queue_lock, flags); | ||
13787 | - q->queuedata = NULL; | ||
13788 | - blk_start_queue(q); | ||
13789 | - spin_unlock_irqrestore(q->queue_lock, flags); | ||
13790 | - | ||
13791 | if (mq->bounce_sg) | ||
13792 | kfree(mq->bounce_sg); | ||
13793 | mq->bounce_sg = NULL; | ||
13794 | @@ -247,6 +245,8 @@ void mmc_cleanup_queue(struct mmc_queue *mq) | ||
13795 | kfree(mq->bounce_buf); | ||
13796 | mq->bounce_buf = NULL; | ||
13797 | |||
13798 | + blk_cleanup_queue(mq->queue); | ||
13799 | + | ||
13800 | mq->card = NULL; | ||
13801 | } | ||
13802 | EXPORT_SYMBOL(mmc_cleanup_queue); | ||
13803 | diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c | ||
13804 | index 111ea41..f237ddb 100644 | ||
13805 | --- a/drivers/mtd/ubi/cdev.c | ||
13806 | +++ b/drivers/mtd/ubi/cdev.c | ||
13807 | @@ -853,6 +853,7 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd, | ||
13808 | break; | ||
13809 | } | ||
13810 | |||
13811 | + req.name[req.name_len] = '\0'; | ||
13812 | err = verify_mkvol_req(ubi, &req); | ||
13813 | if (err) | ||
13814 | break; | ||
13815 | diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c | ||
13816 | index 425bf5a..74fdc40 100644 | ||
13817 | --- a/drivers/mtd/ubi/upd.c | ||
13818 | +++ b/drivers/mtd/ubi/upd.c | ||
13819 | @@ -147,15 +147,12 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol, | ||
13820 | } | ||
13821 | |||
13822 | if (bytes == 0) { | ||
13823 | - err = ubi_wl_flush(ubi); | ||
13824 | - if (err) | ||
13825 | - return err; | ||
13826 | - | ||
13827 | err = clear_update_marker(ubi, vol, 0); | ||
13828 | if (err) | ||
13829 | return err; | ||
13830 | - vol->updating = 0; | ||
13831 | - return 0; | ||
13832 | + err = ubi_wl_flush(ubi); | ||
13833 | + if (!err) | ||
13834 | + vol->updating = 0; | ||
13835 | } | ||
13836 | |||
13837 | vol->upd_buf = vmalloc(ubi->leb_size); | ||
13838 | @@ -365,16 +362,16 @@ int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol, | ||
13839 | |||
13840 | ubi_assert(vol->upd_received <= vol->upd_bytes); | ||
13841 | if (vol->upd_received == vol->upd_bytes) { | ||
13842 | - err = ubi_wl_flush(ubi); | ||
13843 | - if (err) | ||
13844 | - return err; | ||
13845 | /* The update is finished, clear the update marker */ | ||
13846 | err = clear_update_marker(ubi, vol, vol->upd_bytes); | ||
13847 | if (err) | ||
13848 | return err; | ||
13849 | - vol->updating = 0; | ||
13850 | - err = to_write; | ||
13851 | - vfree(vol->upd_buf); | ||
13852 | + err = ubi_wl_flush(ubi); | ||
13853 | + if (err == 0) { | ||
13854 | + vol->updating = 0; | ||
13855 | + err = to_write; | ||
13856 | + vfree(vol->upd_buf); | ||
13857 | + } | ||
13858 | } | ||
13859 | |||
13860 | return err; | ||
13861 | diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c | ||
13862 | index 4004402..1afc61e 100644 | ||
13863 | --- a/drivers/mtd/ubi/vtbl.c | ||
13864 | +++ b/drivers/mtd/ubi/vtbl.c | ||
13865 | @@ -566,7 +566,6 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si, | ||
13866 | vol->reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs); | ||
13867 | vol->alignment = be32_to_cpu(vtbl[i].alignment); | ||
13868 | vol->data_pad = be32_to_cpu(vtbl[i].data_pad); | ||
13869 | - vol->upd_marker = vtbl[i].upd_marker; | ||
13870 | vol->vol_type = vtbl[i].vol_type == UBI_VID_DYNAMIC ? | ||
13871 | UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME; | ||
13872 | vol->name_len = be16_to_cpu(vtbl[i].name_len); | ||
13873 | diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h | ||
13874 | index 790e55b..2a1120a 100644 | ||
13875 | --- a/drivers/net/atl1c/atl1c.h | ||
13876 | +++ b/drivers/net/atl1c/atl1c.h | ||
13877 | @@ -534,9 +534,6 @@ struct atl1c_adapter { | ||
13878 | #define __AT_TESTING 0x0001 | ||
13879 | #define __AT_RESETTING 0x0002 | ||
13880 | #define __AT_DOWN 0x0003 | ||
13881 | - u8 work_event; | ||
13882 | -#define ATL1C_WORK_EVENT_RESET 0x01 | ||
13883 | -#define ATL1C_WORK_EVENT_LINK_CHANGE 0x02 | ||
13884 | u32 msg_enable; | ||
13885 | |||
13886 | bool have_msi; | ||
13887 | @@ -548,7 +545,8 @@ struct atl1c_adapter { | ||
13888 | spinlock_t tx_lock; | ||
13889 | atomic_t irq_sem; | ||
13890 | |||
13891 | - struct work_struct common_task; | ||
13892 | + struct work_struct reset_task; | ||
13893 | + struct work_struct link_chg_task; | ||
13894 | struct timer_list watchdog_timer; | ||
13895 | struct timer_list phy_config_timer; | ||
13896 | |||
13897 | diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c | ||
13898 | index be00ee9..1372e9a 100644 | ||
13899 | --- a/drivers/net/atl1c/atl1c_main.c | ||
13900 | +++ b/drivers/net/atl1c/atl1c_main.c | ||
13901 | @@ -198,12 +198,27 @@ static void atl1c_phy_config(unsigned long data) | ||
13902 | |||
13903 | void atl1c_reinit_locked(struct atl1c_adapter *adapter) | ||
13904 | { | ||
13905 | + | ||
13906 | WARN_ON(in_interrupt()); | ||
13907 | atl1c_down(adapter); | ||
13908 | atl1c_up(adapter); | ||
13909 | clear_bit(__AT_RESETTING, &adapter->flags); | ||
13910 | } | ||
13911 | |||
13912 | +static void atl1c_reset_task(struct work_struct *work) | ||
13913 | +{ | ||
13914 | + struct atl1c_adapter *adapter; | ||
13915 | + struct net_device *netdev; | ||
13916 | + | ||
13917 | + adapter = container_of(work, struct atl1c_adapter, reset_task); | ||
13918 | + netdev = adapter->netdev; | ||
13919 | + | ||
13920 | + netif_device_detach(netdev); | ||
13921 | + atl1c_down(adapter); | ||
13922 | + atl1c_up(adapter); | ||
13923 | + netif_device_attach(netdev); | ||
13924 | +} | ||
13925 | + | ||
13926 | static void atl1c_check_link_status(struct atl1c_adapter *adapter) | ||
13927 | { | ||
13928 | struct atl1c_hw *hw = &adapter->hw; | ||
13929 | @@ -260,6 +275,18 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter) | ||
13930 | } | ||
13931 | } | ||
13932 | |||
13933 | +/* | ||
13934 | + * atl1c_link_chg_task - deal with link change event Out of interrupt context | ||
13935 | + * @netdev: network interface device structure | ||
13936 | + */ | ||
13937 | +static void atl1c_link_chg_task(struct work_struct *work) | ||
13938 | +{ | ||
13939 | + struct atl1c_adapter *adapter; | ||
13940 | + | ||
13941 | + adapter = container_of(work, struct atl1c_adapter, link_chg_task); | ||
13942 | + atl1c_check_link_status(adapter); | ||
13943 | +} | ||
13944 | + | ||
13945 | static void atl1c_link_chg_event(struct atl1c_adapter *adapter) | ||
13946 | { | ||
13947 | struct net_device *netdev = adapter->netdev; | ||
13948 | @@ -284,39 +311,19 @@ static void atl1c_link_chg_event(struct atl1c_adapter *adapter) | ||
13949 | adapter->link_speed = SPEED_0; | ||
13950 | } | ||
13951 | } | ||
13952 | - | ||
13953 | - adapter->work_event |= ATL1C_WORK_EVENT_LINK_CHANGE; | ||
13954 | - schedule_work(&adapter->common_task); | ||
13955 | -} | ||
13956 | - | ||
13957 | -static void atl1c_common_task(struct work_struct *work) | ||
13958 | -{ | ||
13959 | - struct atl1c_adapter *adapter; | ||
13960 | - struct net_device *netdev; | ||
13961 | - | ||
13962 | - adapter = container_of(work, struct atl1c_adapter, common_task); | ||
13963 | - netdev = adapter->netdev; | ||
13964 | - | ||
13965 | - if (adapter->work_event & ATL1C_WORK_EVENT_RESET) { | ||
13966 | - netif_device_detach(netdev); | ||
13967 | - atl1c_down(adapter); | ||
13968 | - atl1c_up(adapter); | ||
13969 | - netif_device_attach(netdev); | ||
13970 | - return; | ||
13971 | - } | ||
13972 | - | ||
13973 | - if (adapter->work_event & ATL1C_WORK_EVENT_LINK_CHANGE) | ||
13974 | - atl1c_check_link_status(adapter); | ||
13975 | - | ||
13976 | - return; | ||
13977 | + schedule_work(&adapter->link_chg_task); | ||
13978 | } | ||
13979 | |||
13980 | - | ||
13981 | static void atl1c_del_timer(struct atl1c_adapter *adapter) | ||
13982 | { | ||
13983 | del_timer_sync(&adapter->phy_config_timer); | ||
13984 | } | ||
13985 | |||
13986 | +static void atl1c_cancel_work(struct atl1c_adapter *adapter) | ||
13987 | +{ | ||
13988 | + cancel_work_sync(&adapter->reset_task); | ||
13989 | + cancel_work_sync(&adapter->link_chg_task); | ||
13990 | +} | ||
13991 | |||
13992 | /* | ||
13993 | * atl1c_tx_timeout - Respond to a Tx Hang | ||
13994 | @@ -327,8 +334,7 @@ static void atl1c_tx_timeout(struct net_device *netdev) | ||
13995 | struct atl1c_adapter *adapter = netdev_priv(netdev); | ||
13996 | |||
13997 | /* Do the reset outside of interrupt context */ | ||
13998 | - adapter->work_event |= ATL1C_WORK_EVENT_RESET; | ||
13999 | - schedule_work(&adapter->common_task); | ||
14000 | + schedule_work(&adapter->reset_task); | ||
14001 | } | ||
14002 | |||
14003 | /* | ||
14004 | @@ -1530,8 +1536,7 @@ static irqreturn_t atl1c_intr(int irq, void *data) | ||
14005 | /* reset MAC */ | ||
14006 | hw->intr_mask &= ~ISR_ERROR; | ||
14007 | AT_WRITE_REG(hw, REG_IMR, hw->intr_mask); | ||
14008 | - adapter->work_event |= ATL1C_WORK_EVENT_RESET; | ||
14009 | - schedule_work(&adapter->common_task); | ||
14010 | + schedule_work(&adapter->reset_task); | ||
14011 | break; | ||
14012 | } | ||
14013 | |||
14014 | @@ -2195,7 +2200,8 @@ void atl1c_down(struct atl1c_adapter *adapter) | ||
14015 | struct net_device *netdev = adapter->netdev; | ||
14016 | |||
14017 | atl1c_del_timer(adapter); | ||
14018 | - adapter->work_event = 0; /* clear all event */ | ||
14019 | + atl1c_cancel_work(adapter); | ||
14020 | + | ||
14021 | /* signal that we're down so the interrupt handler does not | ||
14022 | * reschedule our watchdog timer */ | ||
14023 | set_bit(__AT_DOWN, &adapter->flags); | ||
14024 | @@ -2595,8 +2601,8 @@ static int __devinit atl1c_probe(struct pci_dev *pdev, | ||
14025 | adapter->hw.mac_addr[4], adapter->hw.mac_addr[5]); | ||
14026 | |||
14027 | atl1c_hw_set_mac_addr(&adapter->hw); | ||
14028 | - INIT_WORK(&adapter->common_task, atl1c_common_task); | ||
14029 | - adapter->work_event = 0; | ||
14030 | + INIT_WORK(&adapter->reset_task, atl1c_reset_task); | ||
14031 | + INIT_WORK(&adapter->link_chg_task, atl1c_link_chg_task); | ||
14032 | err = register_netdev(netdev); | ||
14033 | if (err) { | ||
14034 | dev_err(&pdev->dev, "register netdevice failed\n"); | ||
14035 | diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c | ||
14036 | index 1b5facf..955da73 100644 | ||
14037 | --- a/drivers/net/atl1e/atl1e_main.c | ||
14038 | +++ b/drivers/net/atl1e/atl1e_main.c | ||
14039 | @@ -1666,6 +1666,41 @@ static int atl1e_tso_csum(struct atl1e_adapter *adapter, | ||
14040 | } | ||
14041 | return 0; | ||
14042 | } | ||
14043 | + | ||
14044 | + if (offload_type & SKB_GSO_TCPV6) { | ||
14045 | + real_len = (((unsigned char *)ipv6_hdr(skb) - skb->data) | ||
14046 | + + ntohs(ipv6_hdr(skb)->payload_len)); | ||
14047 | + if (real_len < skb->len) | ||
14048 | + pskb_trim(skb, real_len); | ||
14049 | + | ||
14050 | + /* check payload == 0 byte ? */ | ||
14051 | + hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb)); | ||
14052 | + if (unlikely(skb->len == hdr_len)) { | ||
14053 | + /* only xsum need */ | ||
14054 | + dev_warn(&pdev->dev, | ||
14055 | + "IPV6 tso with zero data??\n"); | ||
14056 | + goto check_sum; | ||
14057 | + } else { | ||
14058 | + tcp_hdr(skb)->check = ~csum_ipv6_magic( | ||
14059 | + &ipv6_hdr(skb)->saddr, | ||
14060 | + &ipv6_hdr(skb)->daddr, | ||
14061 | + 0, IPPROTO_TCP, 0); | ||
14062 | + tpd->word3 |= 1 << TPD_IP_VERSION_SHIFT; | ||
14063 | + hdr_len >>= 1; | ||
14064 | + tpd->word3 |= (hdr_len & TPD_V6_IPHLLO_MASK) << | ||
14065 | + TPD_V6_IPHLLO_SHIFT; | ||
14066 | + tpd->word3 |= ((hdr_len >> 3) & | ||
14067 | + TPD_V6_IPHLHI_MASK) << | ||
14068 | + TPD_V6_IPHLHI_SHIFT; | ||
14069 | + tpd->word3 |= (tcp_hdrlen(skb) >> 2 & | ||
14070 | + TPD_TCPHDRLEN_MASK) << | ||
14071 | + TPD_TCPHDRLEN_SHIFT; | ||
14072 | + tpd->word3 |= ((skb_shinfo(skb)->gso_size) & | ||
14073 | + TPD_MSS_MASK) << TPD_MSS_SHIFT; | ||
14074 | + tpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT; | ||
14075 | + } | ||
14076 | + } | ||
14077 | + return 0; | ||
14078 | } | ||
14079 | |||
14080 | check_sum: | ||
14081 | @@ -2254,6 +2289,7 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev) | ||
14082 | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; | ||
14083 | netdev->features |= NETIF_F_LLTX; | ||
14084 | netdev->features |= NETIF_F_TSO; | ||
14085 | + netdev->features |= NETIF_F_TSO6; | ||
14086 | |||
14087 | return 0; | ||
14088 | } | ||
14089 | diff --git a/drivers/net/b44.c b/drivers/net/b44.c | ||
14090 | index 4869adb..2a91323 100644 | ||
14091 | --- a/drivers/net/b44.c | ||
14092 | +++ b/drivers/net/b44.c | ||
14093 | @@ -1505,7 +1505,8 @@ static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset) | ||
14094 | for (k = 0; k< ethaddr_bytes; k++) { | ||
14095 | ppattern[offset + magicsync + | ||
14096 | (j * ETH_ALEN) + k] = macaddr[k]; | ||
14097 | - set_bit(len++, (unsigned long *) pmask); | ||
14098 | + len++; | ||
14099 | + set_bit(len, (unsigned long *) pmask); | ||
14100 | } | ||
14101 | } | ||
14102 | return len - 1; | ||
14103 | diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c | ||
14104 | index d110c1b..ba29dc3 100644 | ||
14105 | --- a/drivers/net/bcm63xx_enet.c | ||
14106 | +++ b/drivers/net/bcm63xx_enet.c | ||
14107 | @@ -1248,15 +1248,9 @@ static void bcm_enet_get_drvinfo(struct net_device *netdev, | ||
14108 | drvinfo->n_stats = BCM_ENET_STATS_LEN; | ||
14109 | } | ||
14110 | |||
14111 | -static int bcm_enet_get_sset_count(struct net_device *netdev, | ||
14112 | - int string_set) | ||
14113 | +static int bcm_enet_get_stats_count(struct net_device *netdev) | ||
14114 | { | ||
14115 | - switch (string_set) { | ||
14116 | - case ETH_SS_STATS: | ||
14117 | - return BCM_ENET_STATS_LEN; | ||
14118 | - default: | ||
14119 | - return -EINVAL; | ||
14120 | - } | ||
14121 | + return BCM_ENET_STATS_LEN; | ||
14122 | } | ||
14123 | |||
14124 | static void bcm_enet_get_strings(struct net_device *netdev, | ||
14125 | @@ -1482,7 +1476,7 @@ static int bcm_enet_set_pauseparam(struct net_device *dev, | ||
14126 | |||
14127 | static struct ethtool_ops bcm_enet_ethtool_ops = { | ||
14128 | .get_strings = bcm_enet_get_strings, | ||
14129 | - .get_sset_count = bcm_enet_get_sset_count, | ||
14130 | + .get_stats_count = bcm_enet_get_stats_count, | ||
14131 | .get_ethtool_stats = bcm_enet_get_ethtool_stats, | ||
14132 | .get_settings = bcm_enet_get_settings, | ||
14133 | .set_settings = bcm_enet_set_settings, | ||
14134 | diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h | ||
14135 | index 511b922..3b79a22 100644 | ||
14136 | --- a/drivers/net/benet/be.h | ||
14137 | +++ b/drivers/net/benet/be.h | ||
14138 | @@ -35,31 +35,20 @@ | ||
14139 | #define DRV_VER "2.101.205" | ||
14140 | #define DRV_NAME "be2net" | ||
14141 | #define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" | ||
14142 | -#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC" | ||
14143 | #define OC_NAME "Emulex OneConnect 10Gbps NIC" | ||
14144 | -#define OC_NAME1 "Emulex OneConnect 10Gbps NIC (be3)" | ||
14145 | #define DRV_DESC BE_NAME "Driver" | ||
14146 | |||
14147 | #define BE_VENDOR_ID 0x19a2 | ||
14148 | #define BE_DEVICE_ID1 0x211 | ||
14149 | -#define BE_DEVICE_ID2 0x221 | ||
14150 | #define OC_DEVICE_ID1 0x700 | ||
14151 | #define OC_DEVICE_ID2 0x701 | ||
14152 | -#define OC_DEVICE_ID3 0x710 | ||
14153 | |||
14154 | static inline char *nic_name(struct pci_dev *pdev) | ||
14155 | { | ||
14156 | - switch (pdev->device) { | ||
14157 | - case OC_DEVICE_ID1: | ||
14158 | - case OC_DEVICE_ID2: | ||
14159 | + if (pdev->device == OC_DEVICE_ID1 || pdev->device == OC_DEVICE_ID2) | ||
14160 | return OC_NAME; | ||
14161 | - case OC_DEVICE_ID3: | ||
14162 | - return OC_NAME1; | ||
14163 | - case BE_DEVICE_ID2: | ||
14164 | - return BE3_NAME; | ||
14165 | - default: | ||
14166 | + else | ||
14167 | return BE_NAME; | ||
14168 | - } | ||
14169 | } | ||
14170 | |||
14171 | /* Number of bytes of an RX frame that are copied to skb->data */ | ||
14172 | @@ -272,13 +261,8 @@ struct be_adapter { | ||
14173 | u32 cap; | ||
14174 | u32 rx_fc; /* Rx flow control */ | ||
14175 | u32 tx_fc; /* Tx flow control */ | ||
14176 | - u8 generation; /* BladeEngine ASIC generation */ | ||
14177 | }; | ||
14178 | |||
14179 | -/* BladeEngine Generation numbers */ | ||
14180 | -#define BE_GEN2 2 | ||
14181 | -#define BE_GEN3 3 | ||
14182 | - | ||
14183 | extern const struct ethtool_ops be_ethtool_ops; | ||
14184 | |||
14185 | #define drvr_stats(adapter) (&adapter->stats.drvr_stats) | ||
14186 | diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h | ||
14187 | index ad33d55..e5f9676 100644 | ||
14188 | --- a/drivers/net/benet/be_cmds.h | ||
14189 | +++ b/drivers/net/benet/be_cmds.h | ||
14190 | @@ -154,8 +154,7 @@ struct be_cmd_req_hdr { | ||
14191 | u8 domain; /* dword 0 */ | ||
14192 | u32 timeout; /* dword 1 */ | ||
14193 | u32 request_length; /* dword 2 */ | ||
14194 | - u8 version; /* dword 3 */ | ||
14195 | - u8 rsvd[3]; /* dword 3 */ | ||
14196 | + u32 rsvd; /* dword 3 */ | ||
14197 | }; | ||
14198 | |||
14199 | #define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */ | ||
14200 | diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c | ||
14201 | index ec983cb..876b357 100644 | ||
14202 | --- a/drivers/net/benet/be_main.c | ||
14203 | +++ b/drivers/net/benet/be_main.c | ||
14204 | @@ -31,10 +31,8 @@ MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data."); | ||
14205 | |||
14206 | static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = { | ||
14207 | { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, | ||
14208 | - { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, | ||
14209 | { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, | ||
14210 | { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, | ||
14211 | - { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) }, | ||
14212 | { 0 } | ||
14213 | }; | ||
14214 | MODULE_DEVICE_TABLE(pci, be_dev_ids); | ||
14215 | @@ -1944,7 +1942,6 @@ static void be_unmap_pci_bars(struct be_adapter *adapter) | ||
14216 | static int be_map_pci_bars(struct be_adapter *adapter) | ||
14217 | { | ||
14218 | u8 __iomem *addr; | ||
14219 | - int pcicfg_reg; | ||
14220 | |||
14221 | addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2), | ||
14222 | pci_resource_len(adapter->pdev, 2)); | ||
14223 | @@ -1958,13 +1955,8 @@ static int be_map_pci_bars(struct be_adapter *adapter) | ||
14224 | goto pci_map_err; | ||
14225 | adapter->db = addr; | ||
14226 | |||
14227 | - if (adapter->generation == BE_GEN2) | ||
14228 | - pcicfg_reg = 1; | ||
14229 | - else | ||
14230 | - pcicfg_reg = 0; | ||
14231 | - | ||
14232 | - addr = ioremap_nocache(pci_resource_start(adapter->pdev, pcicfg_reg), | ||
14233 | - pci_resource_len(adapter->pdev, pcicfg_reg)); | ||
14234 | + addr = ioremap_nocache(pci_resource_start(adapter->pdev, 1), | ||
14235 | + pci_resource_len(adapter->pdev, 1)); | ||
14236 | if (addr == NULL) | ||
14237 | goto pci_map_err; | ||
14238 | adapter->pcicfg = addr; | ||
14239 | @@ -2034,7 +2026,6 @@ static int be_stats_init(struct be_adapter *adapter) | ||
14240 | cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma); | ||
14241 | if (cmd->va == NULL) | ||
14242 | return -1; | ||
14243 | - memset(cmd->va, 0, cmd->size); | ||
14244 | return 0; | ||
14245 | } | ||
14246 | |||
14247 | @@ -2108,20 +2099,6 @@ static int __devinit be_probe(struct pci_dev *pdev, | ||
14248 | goto rel_reg; | ||
14249 | } | ||
14250 | adapter = netdev_priv(netdev); | ||
14251 | - | ||
14252 | - switch (pdev->device) { | ||
14253 | - case BE_DEVICE_ID1: | ||
14254 | - case OC_DEVICE_ID1: | ||
14255 | - adapter->generation = BE_GEN2; | ||
14256 | - break; | ||
14257 | - case BE_DEVICE_ID2: | ||
14258 | - case OC_DEVICE_ID2: | ||
14259 | - adapter->generation = BE_GEN3; | ||
14260 | - break; | ||
14261 | - default: | ||
14262 | - adapter->generation = 0; | ||
14263 | - } | ||
14264 | - | ||
14265 | adapter->pdev = pdev; | ||
14266 | pci_set_drvdata(pdev, adapter); | ||
14267 | adapter->netdev = netdev; | ||
14268 | diff --git a/drivers/net/e100.c b/drivers/net/e100.c | ||
14269 | index 0c53c92..d269a68 100644 | ||
14270 | --- a/drivers/net/e100.c | ||
14271 | +++ b/drivers/net/e100.c | ||
14272 | @@ -1817,7 +1817,6 @@ static int e100_alloc_cbs(struct nic *nic) | ||
14273 | &nic->cbs_dma_addr); | ||
14274 | if (!nic->cbs) | ||
14275 | return -ENOMEM; | ||
14276 | - memset(nic->cbs, 0, count * sizeof(struct cb)); | ||
14277 | |||
14278 | for (cb = nic->cbs, i = 0; i < count; cb++, i++) { | ||
14279 | cb->next = (i + 1 < count) ? cb + 1 : nic->cbs; | ||
14280 | @@ -1826,6 +1825,7 @@ static int e100_alloc_cbs(struct nic *nic) | ||
14281 | cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb); | ||
14282 | cb->link = cpu_to_le32(nic->cbs_dma_addr + | ||
14283 | ((i+1) % count) * sizeof(struct cb)); | ||
14284 | + cb->skb = NULL; | ||
14285 | } | ||
14286 | |||
14287 | nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs; | ||
14288 | diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h | ||
14289 | index 4a2ee85..42e2b7e 100644 | ||
14290 | --- a/drivers/net/e1000/e1000.h | ||
14291 | +++ b/drivers/net/e1000/e1000.h | ||
14292 | @@ -326,8 +326,6 @@ struct e1000_adapter { | ||
14293 | /* for ioport free */ | ||
14294 | int bars; | ||
14295 | int need_ioport; | ||
14296 | - | ||
14297 | - bool discarding; | ||
14298 | }; | ||
14299 | |||
14300 | enum e1000_state_t { | ||
14301 | diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c | ||
14302 | index 1a23f16..bcd192c 100644 | ||
14303 | --- a/drivers/net/e1000/e1000_main.c | ||
14304 | +++ b/drivers/net/e1000/e1000_main.c | ||
14305 | @@ -1698,6 +1698,18 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) | ||
14306 | rctl &= ~E1000_RCTL_SZ_4096; | ||
14307 | rctl |= E1000_RCTL_BSEX; | ||
14308 | switch (adapter->rx_buffer_len) { | ||
14309 | + case E1000_RXBUFFER_256: | ||
14310 | + rctl |= E1000_RCTL_SZ_256; | ||
14311 | + rctl &= ~E1000_RCTL_BSEX; | ||
14312 | + break; | ||
14313 | + case E1000_RXBUFFER_512: | ||
14314 | + rctl |= E1000_RCTL_SZ_512; | ||
14315 | + rctl &= ~E1000_RCTL_BSEX; | ||
14316 | + break; | ||
14317 | + case E1000_RXBUFFER_1024: | ||
14318 | + rctl |= E1000_RCTL_SZ_1024; | ||
14319 | + rctl &= ~E1000_RCTL_BSEX; | ||
14320 | + break; | ||
14321 | case E1000_RXBUFFER_2048: | ||
14322 | default: | ||
14323 | rctl |= E1000_RCTL_SZ_2048; | ||
14324 | @@ -3142,7 +3154,13 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | ||
14325 | * however with the new *_jumbo_rx* routines, jumbo receives will use | ||
14326 | * fragmented skbs */ | ||
14327 | |||
14328 | - if (max_frame <= E1000_RXBUFFER_2048) | ||
14329 | + if (max_frame <= E1000_RXBUFFER_256) | ||
14330 | + adapter->rx_buffer_len = E1000_RXBUFFER_256; | ||
14331 | + else if (max_frame <= E1000_RXBUFFER_512) | ||
14332 | + adapter->rx_buffer_len = E1000_RXBUFFER_512; | ||
14333 | + else if (max_frame <= E1000_RXBUFFER_1024) | ||
14334 | + adapter->rx_buffer_len = E1000_RXBUFFER_1024; | ||
14335 | + else if (max_frame <= E1000_RXBUFFER_2048) | ||
14336 | adapter->rx_buffer_len = E1000_RXBUFFER_2048; | ||
14337 | else | ||
14338 | #if (PAGE_SIZE >= E1000_RXBUFFER_16384) | ||
14339 | @@ -3809,22 +3827,13 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | ||
14340 | |||
14341 | length = le16_to_cpu(rx_desc->length); | ||
14342 | /* !EOP means multiple descriptors were used to store a single | ||
14343 | - * packet, if thats the case we need to toss it. In fact, we | ||
14344 | - * to toss every packet with the EOP bit clear and the next | ||
14345 | - * frame that _does_ have the EOP bit set, as it is by | ||
14346 | - * definition only a frame fragment | ||
14347 | - */ | ||
14348 | - if (unlikely(!(status & E1000_RXD_STAT_EOP))) | ||
14349 | - adapter->discarding = true; | ||
14350 | - | ||
14351 | - if (adapter->discarding) { | ||
14352 | + * packet, also make sure the frame isn't just CRC only */ | ||
14353 | + if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) { | ||
14354 | /* All receives must fit into a single buffer */ | ||
14355 | E1000_DBG("%s: Receive packet consumed multiple" | ||
14356 | " buffers\n", netdev->name); | ||
14357 | /* recycle */ | ||
14358 | buffer_info->skb = skb; | ||
14359 | - if (status & E1000_RXD_STAT_EOP) | ||
14360 | - adapter->discarding = false; | ||
14361 | goto next_desc; | ||
14362 | } | ||
14363 | |||
14364 | diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h | ||
14365 | index 47db9bd..3e187b0 100644 | ||
14366 | --- a/drivers/net/e1000e/e1000.h | ||
14367 | +++ b/drivers/net/e1000e/e1000.h | ||
14368 | @@ -417,7 +417,6 @@ struct e1000_info { | ||
14369 | /* CRC Stripping defines */ | ||
14370 | #define FLAG2_CRC_STRIPPING (1 << 0) | ||
14371 | #define FLAG2_HAS_PHY_WAKEUP (1 << 1) | ||
14372 | -#define FLAG2_IS_DISCARDING (1 << 2) | ||
14373 | |||
14374 | #define E1000_RX_DESC_PS(R, i) \ | ||
14375 | (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) | ||
14376 | diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c | ||
14377 | index 2154530..fad8f9e 100644 | ||
14378 | --- a/drivers/net/e1000e/netdev.c | ||
14379 | +++ b/drivers/net/e1000e/netdev.c | ||
14380 | @@ -482,24 +482,14 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | ||
14381 | |||
14382 | length = le16_to_cpu(rx_desc->length); | ||
14383 | |||
14384 | - /* | ||
14385 | - * !EOP means multiple descriptors were used to store a single | ||
14386 | - * packet, if that's the case we need to toss it. In fact, we | ||
14387 | - * need to toss every packet with the EOP bit clear and the | ||
14388 | - * next frame that _does_ have the EOP bit set, as it is by | ||
14389 | - * definition only a frame fragment | ||
14390 | - */ | ||
14391 | - if (unlikely(!(status & E1000_RXD_STAT_EOP))) | ||
14392 | - adapter->flags2 |= FLAG2_IS_DISCARDING; | ||
14393 | - | ||
14394 | - if (adapter->flags2 & FLAG2_IS_DISCARDING) { | ||
14395 | + /* !EOP means multiple descriptors were used to store a single | ||
14396 | + * packet, also make sure the frame isn't just CRC only */ | ||
14397 | + if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) { | ||
14398 | /* All receives must fit into a single buffer */ | ||
14399 | e_dbg("%s: Receive packet consumed multiple buffers\n", | ||
14400 | netdev->name); | ||
14401 | /* recycle */ | ||
14402 | buffer_info->skb = skb; | ||
14403 | - if (status & E1000_RXD_STAT_EOP) | ||
14404 | - adapter->flags2 &= ~FLAG2_IS_DISCARDING; | ||
14405 | goto next_desc; | ||
14406 | } | ||
14407 | |||
14408 | @@ -757,16 +747,10 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | ||
14409 | PCI_DMA_FROMDEVICE); | ||
14410 | buffer_info->dma = 0; | ||
14411 | |||
14412 | - /* see !EOP comment in other rx routine */ | ||
14413 | - if (!(staterr & E1000_RXD_STAT_EOP)) | ||
14414 | - adapter->flags2 |= FLAG2_IS_DISCARDING; | ||
14415 | - | ||
14416 | - if (adapter->flags2 & FLAG2_IS_DISCARDING) { | ||
14417 | + if (!(staterr & E1000_RXD_STAT_EOP)) { | ||
14418 | e_dbg("%s: Packet Split buffers didn't pick up the " | ||
14419 | "full packet\n", netdev->name); | ||
14420 | dev_kfree_skb_irq(skb); | ||
14421 | - if (staterr & E1000_RXD_STAT_EOP) | ||
14422 | - adapter->flags2 &= ~FLAG2_IS_DISCARDING; | ||
14423 | goto next_desc; | ||
14424 | } | ||
14425 | |||
14426 | @@ -1136,7 +1120,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter) | ||
14427 | |||
14428 | rx_ring->next_to_clean = 0; | ||
14429 | rx_ring->next_to_use = 0; | ||
14430 | - adapter->flags2 &= ~FLAG2_IS_DISCARDING; | ||
14431 | |||
14432 | writel(0, adapter->hw.hw_addr + rx_ring->head); | ||
14433 | writel(0, adapter->hw.hw_addr + rx_ring->tail); | ||
14434 | @@ -2347,6 +2330,18 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) | ||
14435 | rctl &= ~E1000_RCTL_SZ_4096; | ||
14436 | rctl |= E1000_RCTL_BSEX; | ||
14437 | switch (adapter->rx_buffer_len) { | ||
14438 | + case 256: | ||
14439 | + rctl |= E1000_RCTL_SZ_256; | ||
14440 | + rctl &= ~E1000_RCTL_BSEX; | ||
14441 | + break; | ||
14442 | + case 512: | ||
14443 | + rctl |= E1000_RCTL_SZ_512; | ||
14444 | + rctl &= ~E1000_RCTL_BSEX; | ||
14445 | + break; | ||
14446 | + case 1024: | ||
14447 | + rctl |= E1000_RCTL_SZ_1024; | ||
14448 | + rctl &= ~E1000_RCTL_BSEX; | ||
14449 | + break; | ||
14450 | case 2048: | ||
14451 | default: | ||
14452 | rctl |= E1000_RCTL_SZ_2048; | ||
14453 | @@ -4326,7 +4321,13 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | ||
14454 | * fragmented skbs | ||
14455 | */ | ||
14456 | |||
14457 | - if (max_frame <= 2048) | ||
14458 | + if (max_frame <= 256) | ||
14459 | + adapter->rx_buffer_len = 256; | ||
14460 | + else if (max_frame <= 512) | ||
14461 | + adapter->rx_buffer_len = 512; | ||
14462 | + else if (max_frame <= 1024) | ||
14463 | + adapter->rx_buffer_len = 1024; | ||
14464 | + else if (max_frame <= 2048) | ||
14465 | adapter->rx_buffer_len = 2048; | ||
14466 | else | ||
14467 | adapter->rx_buffer_len = 4096; | ||
14468 | diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c | ||
14469 | index 35d896b..a2fc70a 100644 | ||
14470 | --- a/drivers/net/qlge/qlge_main.c | ||
14471 | +++ b/drivers/net/qlge/qlge_main.c | ||
14472 | @@ -3310,8 +3310,10 @@ static int ql_adapter_initialize(struct ql_adapter *qdev) | ||
14473 | |||
14474 | /* Initialize the port and set the max framesize. */ | ||
14475 | status = qdev->nic_ops->port_initialize(qdev); | ||
14476 | - if (status) | ||
14477 | - QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n"); | ||
14478 | + if (status) { | ||
14479 | + QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n"); | ||
14480 | + return status; | ||
14481 | + } | ||
14482 | |||
14483 | /* Set up the MAC address and frame routing filter. */ | ||
14484 | status = ql_cam_route_initialize(qdev); | ||
14485 | @@ -3712,6 +3714,9 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p) | ||
14486 | struct sockaddr *addr = p; | ||
14487 | int status; | ||
14488 | |||
14489 | + if (netif_running(ndev)) | ||
14490 | + return -EBUSY; | ||
14491 | + | ||
14492 | if (!is_valid_ether_addr(addr->sa_data)) | ||
14493 | return -EADDRNOTAVAIL; | ||
14494 | memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); | ||
14495 | @@ -3863,7 +3868,8 @@ static int __devinit ql_init_device(struct pci_dev *pdev, | ||
14496 | struct net_device *ndev, int cards_found) | ||
14497 | { | ||
14498 | struct ql_adapter *qdev = netdev_priv(ndev); | ||
14499 | - int err = 0; | ||
14500 | + int pos, err = 0; | ||
14501 | + u16 val16; | ||
14502 | |||
14503 | memset((void *)qdev, 0, sizeof(*qdev)); | ||
14504 | err = pci_enable_device(pdev); | ||
14505 | @@ -3875,12 +3881,18 @@ static int __devinit ql_init_device(struct pci_dev *pdev, | ||
14506 | qdev->ndev = ndev; | ||
14507 | qdev->pdev = pdev; | ||
14508 | pci_set_drvdata(pdev, ndev); | ||
14509 | - | ||
14510 | - /* Set PCIe read request size */ | ||
14511 | - err = pcie_set_readrq(pdev, 4096); | ||
14512 | - if (err) { | ||
14513 | - dev_err(&pdev->dev, "Set readrq failed.\n"); | ||
14514 | - goto err_out; | ||
14515 | + pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); | ||
14516 | + if (pos <= 0) { | ||
14517 | + dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, " | ||
14518 | + "aborting.\n"); | ||
14519 | + return pos; | ||
14520 | + } else { | ||
14521 | + pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16); | ||
14522 | + val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN; | ||
14523 | + val16 |= (PCI_EXP_DEVCTL_CERE | | ||
14524 | + PCI_EXP_DEVCTL_NFERE | | ||
14525 | + PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE); | ||
14526 | + pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16); | ||
14527 | } | ||
14528 | |||
14529 | err = pci_request_regions(pdev, DRV_NAME); | ||
14530 | diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c | ||
14531 | index 32b1e1f..aec05f2 100644 | ||
14532 | --- a/drivers/net/qlge/qlge_mpi.c | ||
14533 | +++ b/drivers/net/qlge/qlge_mpi.c | ||
14534 | @@ -446,9 +446,6 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp) | ||
14535 | ql_aen_lost(qdev, mbcp); | ||
14536 | break; | ||
14537 | |||
14538 | - case AEN_DCBX_CHG: | ||
14539 | - /* Need to support AEN 8110 */ | ||
14540 | - break; | ||
14541 | default: | ||
14542 | QPRINTK(qdev, DRV, ERR, | ||
14543 | "Unsupported AE %.08x.\n", mbcp->mbox_out[0]); | ||
14544 | diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c | ||
14545 | index d443ad7..489c4de 100644 | ||
14546 | --- a/drivers/net/sfc/tx.c | ||
14547 | +++ b/drivers/net/sfc/tx.c | ||
14548 | @@ -821,6 +821,8 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) | ||
14549 | tx_queue->efx->type->txd_ring_mask]; | ||
14550 | efx_tsoh_free(tx_queue, buffer); | ||
14551 | EFX_BUG_ON_PARANOID(buffer->skb); | ||
14552 | + buffer->len = 0; | ||
14553 | + buffer->continuation = true; | ||
14554 | if (buffer->unmap_len) { | ||
14555 | unmap_addr = (buffer->dma_addr + buffer->len - | ||
14556 | buffer->unmap_len); | ||
14557 | @@ -834,8 +836,6 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) | ||
14558 | PCI_DMA_TODEVICE); | ||
14559 | buffer->unmap_len = 0; | ||
14560 | } | ||
14561 | - buffer->len = 0; | ||
14562 | - buffer->continuation = true; | ||
14563 | } | ||
14564 | } | ||
14565 | |||
14566 | diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c | ||
14567 | index f3600b3..6a10d7b 100644 | ||
14568 | --- a/drivers/net/sky2.c | ||
14569 | +++ b/drivers/net/sky2.c | ||
14570 | @@ -1806,8 +1806,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done) | ||
14571 | sky2->tx_cons = idx; | ||
14572 | smp_mb(); | ||
14573 | |||
14574 | - /* Wake unless it's detached, and called e.g. from sky2_down() */ | ||
14575 | - if (tx_avail(sky2) > MAX_SKB_TX_LE + 4 && netif_device_present(dev)) | ||
14576 | + if (tx_avail(sky2) > MAX_SKB_TX_LE + 4) | ||
14577 | netif_wake_queue(dev); | ||
14578 | } | ||
14579 | |||
14580 | diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c | ||
14581 | index e65ee4d..a36e2b5 100644 | ||
14582 | --- a/drivers/net/starfire.c | ||
14583 | +++ b/drivers/net/starfire.c | ||
14584 | @@ -1063,7 +1063,7 @@ static int netdev_open(struct net_device *dev) | ||
14585 | if (retval) { | ||
14586 | printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n", | ||
14587 | FIRMWARE_RX); | ||
14588 | - goto out_init; | ||
14589 | + return retval; | ||
14590 | } | ||
14591 | if (fw_rx->size % 4) { | ||
14592 | printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n", | ||
14593 | @@ -1108,9 +1108,6 @@ out_tx: | ||
14594 | release_firmware(fw_tx); | ||
14595 | out_rx: | ||
14596 | release_firmware(fw_rx); | ||
14597 | -out_init: | ||
14598 | - if (retval) | ||
14599 | - netdev_close(dev); | ||
14600 | return retval; | ||
14601 | } | ||
14602 | |||
14603 | diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c | ||
14604 | index f14d225..b091e20 100644 | ||
14605 | --- a/drivers/net/usb/rtl8150.c | ||
14606 | +++ b/drivers/net/usb/rtl8150.c | ||
14607 | @@ -324,7 +324,7 @@ static int rtl8150_set_mac_address(struct net_device *netdev, void *p) | ||
14608 | dbg("%02X:", netdev->dev_addr[i]); | ||
14609 | dbg("%02X\n", netdev->dev_addr[i]); | ||
14610 | /* Set the IDR registers. */ | ||
14611 | - set_registers(dev, IDR, netdev->addr_len, netdev->dev_addr); | ||
14612 | + set_registers(dev, IDR, sizeof(netdev->dev_addr), netdev->dev_addr); | ||
14613 | #ifdef EEPROM_WRITE | ||
14614 | { | ||
14615 | u8 cr; | ||
14616 | diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c | ||
14617 | index f141a4f..e974e58 100644 | ||
14618 | --- a/drivers/net/wireless/ath/ar9170/usb.c | ||
14619 | +++ b/drivers/net/wireless/ath/ar9170/usb.c | ||
14620 | @@ -68,10 +68,8 @@ static struct usb_device_id ar9170_usb_ids[] = { | ||
14621 | { USB_DEVICE(0x0cf3, 0x1002) }, | ||
14622 | /* Cace Airpcap NX */ | ||
14623 | { USB_DEVICE(0xcace, 0x0300) }, | ||
14624 | - /* D-Link DWA 160 A1 */ | ||
14625 | + /* D-Link DWA 160A */ | ||
14626 | { USB_DEVICE(0x07d1, 0x3c10) }, | ||
14627 | - /* D-Link DWA 160 A2 */ | ||
14628 | - { USB_DEVICE(0x07d1, 0x3a09) }, | ||
14629 | /* Netgear WNDA3100 */ | ||
14630 | { USB_DEVICE(0x0846, 0x9010) }, | ||
14631 | /* Netgear WN111 v2 */ | ||
14632 | diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c | ||
14633 | index 8a82c75..95a8e23 100644 | ||
14634 | --- a/drivers/net/wireless/ath/ath5k/base.c | ||
14635 | +++ b/drivers/net/wireless/ath/ath5k/base.c | ||
14636 | @@ -2349,9 +2349,6 @@ ath5k_init(struct ath5k_softc *sc) | ||
14637 | */ | ||
14638 | ath5k_stop_locked(sc); | ||
14639 | |||
14640 | - /* Set PHY calibration interval */ | ||
14641 | - ah->ah_cal_intval = ath5k_calinterval; | ||
14642 | - | ||
14643 | /* | ||
14644 | * The basic interface to setting the hardware in a good | ||
14645 | * state is ``reset''. On return the hardware is known to | ||
14646 | @@ -2379,6 +2376,10 @@ ath5k_init(struct ath5k_softc *sc) | ||
14647 | |||
14648 | /* Set ack to be sent at low bit-rates */ | ||
14649 | ath5k_hw_set_ack_bitrate_high(ah, false); | ||
14650 | + | ||
14651 | + /* Set PHY calibration inteval */ | ||
14652 | + ah->ah_cal_intval = ath5k_calinterval; | ||
14653 | + | ||
14654 | ret = 0; | ||
14655 | done: | ||
14656 | mmiowb(); | ||
14657 | diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c | ||
14658 | index 9a96550..644962a 100644 | ||
14659 | --- a/drivers/net/wireless/ath/ath5k/eeprom.c | ||
14660 | +++ b/drivers/net/wireless/ath/ath5k/eeprom.c | ||
14661 | @@ -97,7 +97,6 @@ ath5k_eeprom_init_header(struct ath5k_hw *ah) | ||
14662 | struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; | ||
14663 | int ret; | ||
14664 | u16 val; | ||
14665 | - u32 cksum, offset, eep_max = AR5K_EEPROM_INFO_MAX; | ||
14666 | |||
14667 | /* | ||
14668 | * Read values from EEPROM and store them in the capability structure | ||
14669 | @@ -112,44 +111,20 @@ ath5k_eeprom_init_header(struct ath5k_hw *ah) | ||
14670 | if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_0) | ||
14671 | return 0; | ||
14672 | |||
14673 | +#ifdef notyet | ||
14674 | /* | ||
14675 | * Validate the checksum of the EEPROM date. There are some | ||
14676 | * devices with invalid EEPROMs. | ||
14677 | */ | ||
14678 | - AR5K_EEPROM_READ(AR5K_EEPROM_SIZE_UPPER, val); | ||
14679 | - if (val) { | ||
14680 | - eep_max = (val & AR5K_EEPROM_SIZE_UPPER_MASK) << | ||
14681 | - AR5K_EEPROM_SIZE_ENDLOC_SHIFT; | ||
14682 | - AR5K_EEPROM_READ(AR5K_EEPROM_SIZE_LOWER, val); | ||
14683 | - eep_max = (eep_max | val) - AR5K_EEPROM_INFO_BASE; | ||
14684 | - | ||
14685 | - /* | ||
14686 | - * Fail safe check to prevent stupid loops due | ||
14687 | - * to busted EEPROMs. XXX: This value is likely too | ||
14688 | - * big still, waiting on a better value. | ||
14689 | - */ | ||
14690 | - if (eep_max > (3 * AR5K_EEPROM_INFO_MAX)) { | ||
14691 | - ATH5K_ERR(ah->ah_sc, "Invalid max custom EEPROM size: " | ||
14692 | - "%d (0x%04x) max expected: %d (0x%04x)\n", | ||
14693 | - eep_max, eep_max, | ||
14694 | - 3 * AR5K_EEPROM_INFO_MAX, | ||
14695 | - 3 * AR5K_EEPROM_INFO_MAX); | ||
14696 | - return -EIO; | ||
14697 | - } | ||
14698 | - } | ||
14699 | - | ||
14700 | - for (cksum = 0, offset = 0; offset < eep_max; offset++) { | ||
14701 | + for (cksum = 0, offset = 0; offset < AR5K_EEPROM_INFO_MAX; offset++) { | ||
14702 | AR5K_EEPROM_READ(AR5K_EEPROM_INFO(offset), val); | ||
14703 | cksum ^= val; | ||
14704 | } | ||
14705 | if (cksum != AR5K_EEPROM_INFO_CKSUM) { | ||
14706 | - ATH5K_ERR(ah->ah_sc, "Invalid EEPROM " | ||
14707 | - "checksum: 0x%04x eep_max: 0x%04x (%s)\n", | ||
14708 | - cksum, eep_max, | ||
14709 | - eep_max == AR5K_EEPROM_INFO_MAX ? | ||
14710 | - "default size" : "custom size"); | ||
14711 | + ATH5K_ERR(ah->ah_sc, "Invalid EEPROM checksum 0x%04x\n", cksum); | ||
14712 | return -EIO; | ||
14713 | } | ||
14714 | +#endif | ||
14715 | |||
14716 | AR5K_EEPROM_READ_HDR(AR5K_EEPROM_ANT_GAIN(ah->ah_ee_version), | ||
14717 | ee_ant_gain); | ||
14718 | diff --git a/drivers/net/wireless/ath/ath5k/eeprom.h b/drivers/net/wireless/ath/ath5k/eeprom.h | ||
14719 | index 473a483..0123f35 100644 | ||
14720 | --- a/drivers/net/wireless/ath/ath5k/eeprom.h | ||
14721 | +++ b/drivers/net/wireless/ath/ath5k/eeprom.h | ||
14722 | @@ -37,14 +37,6 @@ | ||
14723 | #define AR5K_EEPROM_RFKILL_POLARITY_S 1 | ||
14724 | |||
14725 | #define AR5K_EEPROM_REG_DOMAIN 0x00bf /* EEPROM regdom */ | ||
14726 | - | ||
14727 | -/* FLASH(EEPROM) Defines for AR531X chips */ | ||
14728 | -#define AR5K_EEPROM_SIZE_LOWER 0x1b /* size info -- lower */ | ||
14729 | -#define AR5K_EEPROM_SIZE_UPPER 0x1c /* size info -- upper */ | ||
14730 | -#define AR5K_EEPROM_SIZE_UPPER_MASK 0xfff0 | ||
14731 | -#define AR5K_EEPROM_SIZE_UPPER_SHIFT 4 | ||
14732 | -#define AR5K_EEPROM_SIZE_ENDLOC_SHIFT 12 | ||
14733 | - | ||
14734 | #define AR5K_EEPROM_CHECKSUM 0x00c0 /* EEPROM checksum */ | ||
14735 | #define AR5K_EEPROM_INFO_BASE 0x00c0 /* EEPROM header */ | ||
14736 | #define AR5K_EEPROM_INFO_MAX (0x400 - AR5K_EEPROM_INFO_BASE) | ||
14737 | diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c | ||
14738 | index 9d67647..1a039f2 100644 | ||
14739 | --- a/drivers/net/wireless/ath/ath5k/phy.c | ||
14740 | +++ b/drivers/net/wireless/ath/ath5k/phy.c | ||
14741 | @@ -2954,6 +2954,8 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, | ||
14742 | ATH5K_ERR(ah->ah_sc, "invalid tx power: %u\n", txpower); | ||
14743 | return -EINVAL; | ||
14744 | } | ||
14745 | + if (txpower == 0) | ||
14746 | + txpower = AR5K_TUNE_DEFAULT_TXPOWER; | ||
14747 | |||
14748 | /* Reset TX power values */ | ||
14749 | memset(&ah->ah_txpower, 0, sizeof(ah->ah_txpower)); | ||
14750 | diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h | ||
14751 | index cdb90c5..1d59f10 100644 | ||
14752 | --- a/drivers/net/wireless/ath/ath9k/ath9k.h | ||
14753 | +++ b/drivers/net/wireless/ath/ath9k/ath9k.h | ||
14754 | @@ -139,7 +139,6 @@ struct ath_buf { | ||
14755 | dma_addr_t bf_daddr; /* physical addr of desc */ | ||
14756 | dma_addr_t bf_buf_addr; /* physical addr of data buffer */ | ||
14757 | bool bf_stale; | ||
14758 | - bool bf_isnullfunc; | ||
14759 | u16 bf_flags; | ||
14760 | struct ath_buf_state bf_state; | ||
14761 | dma_addr_t bf_dmacontext; | ||
14762 | @@ -525,8 +524,6 @@ struct ath_led { | ||
14763 | #define SC_OP_BEACON_SYNC BIT(19) | ||
14764 | #define SC_OP_BTCOEX_ENABLED BIT(20) | ||
14765 | #define SC_OP_BT_PRIORITY_DETECTED BIT(21) | ||
14766 | -#define SC_OP_NULLFUNC_COMPLETED BIT(22) | ||
14767 | -#define SC_OP_PS_ENABLED BIT(23) | ||
14768 | |||
14769 | struct ath_bus_ops { | ||
14770 | void (*read_cachesize)(struct ath_softc *sc, int *csz); | ||
14771 | diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c | ||
14772 | index 0905b38..ca7694c 100644 | ||
14773 | --- a/drivers/net/wireless/ath/ath9k/hw.c | ||
14774 | +++ b/drivers/net/wireless/ath/ath9k/hw.c | ||
14775 | @@ -880,11 +880,12 @@ static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah) | ||
14776 | } | ||
14777 | } | ||
14778 | |||
14779 | -static void ath9k_hw_init_eeprom_fix(struct ath_hw *ah) | ||
14780 | +static void ath9k_hw_init_11a_eeprom_fix(struct ath_hw *ah) | ||
14781 | { | ||
14782 | u32 i, j; | ||
14783 | |||
14784 | - if (ah->hw_version.devid == AR9280_DEVID_PCI) { | ||
14785 | + if ((ah->hw_version.devid == AR9280_DEVID_PCI) && | ||
14786 | + test_bit(ATH9K_MODE_11A, ah->caps.wireless_modes)) { | ||
14787 | |||
14788 | /* EEPROM Fixup */ | ||
14789 | for (i = 0; i < ah->iniModes.ia_rows; i++) { | ||
14790 | @@ -936,11 +937,6 @@ int ath9k_hw_init(struct ath_hw *ah) | ||
14791 | DPRINTF(ah->ah_sc, ATH_DBG_RESET, "serialize_regmode is %d\n", | ||
14792 | ah->config.serialize_regmode); | ||
14793 | |||
14794 | - if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) | ||
14795 | - ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD >> 1; | ||
14796 | - else | ||
14797 | - ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD; | ||
14798 | - | ||
14799 | if (!ath9k_hw_macversion_supported(ah->hw_version.macVersion)) { | ||
14800 | DPRINTF(ah->ah_sc, ATH_DBG_FATAL, | ||
14801 | "Mac Chip Rev 0x%02x.%x is not supported by " | ||
14802 | @@ -979,7 +975,7 @@ int ath9k_hw_init(struct ath_hw *ah) | ||
14803 | |||
14804 | ath9k_hw_init_mode_gain_regs(ah); | ||
14805 | ath9k_hw_fill_cap_info(ah); | ||
14806 | - ath9k_hw_init_eeprom_fix(ah); | ||
14807 | + ath9k_hw_init_11a_eeprom_fix(ah); | ||
14808 | |||
14809 | r = ath9k_hw_init_macaddr(ah); | ||
14810 | if (r) { | ||
14811 | @@ -3674,11 +3670,7 @@ void ath9k_hw_fill_cap_info(struct ath_hw *ah) | ||
14812 | pCap->keycache_size = AR_KEYTABLE_SIZE; | ||
14813 | |||
14814 | pCap->hw_caps |= ATH9K_HW_CAP_FASTCC; | ||
14815 | - | ||
14816 | - if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) | ||
14817 | - pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD >> 1; | ||
14818 | - else | ||
14819 | - pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD; | ||
14820 | + pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD; | ||
14821 | |||
14822 | if (AR_SREV_9285_10_OR_LATER(ah)) | ||
14823 | pCap->num_gpio_pins = AR9285_NUM_GPIO; | ||
14824 | diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h | ||
14825 | index ff4383b..b892345 100644 | ||
14826 | --- a/drivers/net/wireless/ath/ath9k/hw.h | ||
14827 | +++ b/drivers/net/wireless/ath/ath9k/hw.h | ||
14828 | @@ -218,7 +218,6 @@ struct ath9k_ops_config { | ||
14829 | #define AR_SPUR_FEEQ_BOUND_HT20 10 | ||
14830 | int spurmode; | ||
14831 | u16 spurchans[AR_EEPROM_MODAL_SPURS][2]; | ||
14832 | - u8 max_txtrig_level; | ||
14833 | }; | ||
14834 | |||
14835 | enum ath9k_int { | ||
14836 | @@ -408,7 +407,7 @@ struct ath9k_hw_version { | ||
14837 | * Using de Bruijin sequence to to look up 1's index in a 32 bit number | ||
14838 | * debruijn32 = 0000 0111 0111 1100 1011 0101 0011 0001 | ||
14839 | */ | ||
14840 | -#define debruijn32 0x077CB531U | ||
14841 | +#define debruijn32 0x077CB531UL | ||
14842 | |||
14843 | struct ath_gen_timer_configuration { | ||
14844 | u32 next_addr; | ||
14845 | diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c | ||
14846 | index 110c16d..800bfab 100644 | ||
14847 | --- a/drivers/net/wireless/ath/ath9k/mac.c | ||
14848 | +++ b/drivers/net/wireless/ath/ath9k/mac.c | ||
14849 | @@ -70,7 +70,7 @@ bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel) | ||
14850 | u32 txcfg, curLevel, newLevel; | ||
14851 | enum ath9k_int omask; | ||
14852 | |||
14853 | - if (ah->tx_trig_level >= ah->config.max_txtrig_level) | ||
14854 | + if (ah->tx_trig_level >= MAX_TX_FIFO_THRESHOLD) | ||
14855 | return false; | ||
14856 | |||
14857 | omask = ath9k_hw_set_interrupts(ah, ah->mask_reg & ~ATH9K_INT_GLOBAL); | ||
14858 | @@ -79,7 +79,7 @@ bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel) | ||
14859 | curLevel = MS(txcfg, AR_FTRIG); | ||
14860 | newLevel = curLevel; | ||
14861 | if (bIncTrigLevel) { | ||
14862 | - if (curLevel < ah->config.max_txtrig_level) | ||
14863 | + if (curLevel < MAX_TX_FIFO_THRESHOLD) | ||
14864 | newLevel++; | ||
14865 | } else if (curLevel > MIN_TX_FIFO_THRESHOLD) | ||
14866 | newLevel--; | ||
14867 | @@ -155,7 +155,7 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q) | ||
14868 | wait = wait_time; | ||
14869 | while (ath9k_hw_numtxpending(ah, q)) { | ||
14870 | if ((--wait) == 0) { | ||
14871 | - DPRINTF(ah->ah_sc, ATH_DBG_FATAL, | ||
14872 | + DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, | ||
14873 | "Failed to stop TX DMA in 100 " | ||
14874 | "msec after killing last frame\n"); | ||
14875 | break; | ||
14876 | @@ -222,8 +222,6 @@ int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds) | ||
14877 | ds->ds_txstat.ts_status = 0; | ||
14878 | ds->ds_txstat.ts_flags = 0; | ||
14879 | |||
14880 | - if (ads->ds_txstatus1 & AR_FrmXmitOK) | ||
14881 | - ds->ds_txstat.ts_status |= ATH9K_TX_ACKED; | ||
14882 | if (ads->ds_txstatus1 & AR_ExcessiveRetries) | ||
14883 | ds->ds_txstat.ts_status |= ATH9K_TXERR_XRETRY; | ||
14884 | if (ads->ds_txstatus1 & AR_Filtered) | ||
14885 | diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h | ||
14886 | index 9720c4d..f56e77d 100644 | ||
14887 | --- a/drivers/net/wireless/ath/ath9k/mac.h | ||
14888 | +++ b/drivers/net/wireless/ath/ath9k/mac.h | ||
14889 | @@ -76,10 +76,6 @@ | ||
14890 | #define ATH9K_TXERR_FIFO 0x04 | ||
14891 | #define ATH9K_TXERR_XTXOP 0x08 | ||
14892 | #define ATH9K_TXERR_TIMER_EXPIRED 0x10 | ||
14893 | -#define ATH9K_TX_ACKED 0x20 | ||
14894 | -#define ATH9K_TXERR_MASK \ | ||
14895 | - (ATH9K_TXERR_XRETRY | ATH9K_TXERR_FILT | ATH9K_TXERR_FIFO | \ | ||
14896 | - ATH9K_TXERR_XTXOP | ATH9K_TXERR_TIMER_EXPIRED) | ||
14897 | |||
14898 | #define ATH9K_TX_BA 0x01 | ||
14899 | #define ATH9K_TX_PWRMGMT 0x02 | ||
14900 | diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c | ||
14901 | index 5864eaa..43d2be9 100644 | ||
14902 | --- a/drivers/net/wireless/ath/ath9k/main.c | ||
14903 | +++ b/drivers/net/wireless/ath/ath9k/main.c | ||
14904 | @@ -2147,9 +2147,6 @@ static void ath9k_stop(struct ieee80211_hw *hw) | ||
14905 | return; /* another wiphy still in use */ | ||
14906 | } | ||
14907 | |||
14908 | - /* Ensure HW is awake when we try to shut it down. */ | ||
14909 | - ath9k_ps_wakeup(sc); | ||
14910 | - | ||
14911 | if (sc->sc_flags & SC_OP_BTCOEX_ENABLED) { | ||
14912 | ath9k_hw_btcoex_disable(sc->sc_ah); | ||
14913 | if (sc->btcoex_info.btcoex_scheme == ATH_BTCOEX_CFG_3WIRE) | ||
14914 | @@ -2170,9 +2167,6 @@ static void ath9k_stop(struct ieee80211_hw *hw) | ||
14915 | /* disable HAL and put h/w to sleep */ | ||
14916 | ath9k_hw_disable(sc->sc_ah); | ||
14917 | ath9k_hw_configpcipowersave(sc->sc_ah, 1, 1); | ||
14918 | - ath9k_ps_restore(sc); | ||
14919 | - | ||
14920 | - /* Finally, put the chip in FULL SLEEP mode */ | ||
14921 | ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP); | ||
14922 | |||
14923 | sc->sc_flags |= SC_OP_INVALID; | ||
14924 | @@ -2283,12 +2277,10 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw, | ||
14925 | if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || | ||
14926 | (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) || | ||
14927 | (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) { | ||
14928 | - ath9k_ps_wakeup(sc); | ||
14929 | ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); | ||
14930 | - ath9k_ps_restore(sc); | ||
14931 | + ath_beacon_return(sc, avp); | ||
14932 | } | ||
14933 | |||
14934 | - ath_beacon_return(sc, avp); | ||
14935 | sc->sc_flags &= ~SC_OP_BEACONS; | ||
14936 | |||
14937 | for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) { | ||
14938 | @@ -2335,7 +2327,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) | ||
14939 | |||
14940 | if (changed & IEEE80211_CONF_CHANGE_PS) { | ||
14941 | if (conf->flags & IEEE80211_CONF_PS) { | ||
14942 | - sc->sc_flags |= SC_OP_PS_ENABLED; | ||
14943 | if (!(ah->caps.hw_caps & | ||
14944 | ATH9K_HW_CAP_AUTOSLEEP)) { | ||
14945 | if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) { | ||
14946 | @@ -2343,17 +2334,11 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) | ||
14947 | ath9k_hw_set_interrupts(sc->sc_ah, | ||
14948 | sc->imask); | ||
14949 | } | ||
14950 | - } | ||
14951 | - sc->ps_enabled = true; | ||
14952 | - if ((sc->sc_flags & SC_OP_NULLFUNC_COMPLETED)) { | ||
14953 | - sc->sc_flags &= ~SC_OP_NULLFUNC_COMPLETED; | ||
14954 | - sc->ps_enabled = true; | ||
14955 | ath9k_hw_setrxabort(sc->sc_ah, 1); | ||
14956 | } | ||
14957 | + sc->ps_enabled = true; | ||
14958 | } else { | ||
14959 | sc->ps_enabled = false; | ||
14960 | - sc->sc_flags &= ~(SC_OP_PS_ENABLED | | ||
14961 | - SC_OP_NULLFUNC_COMPLETED); | ||
14962 | ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE); | ||
14963 | if (!(ah->caps.hw_caps & | ||
14964 | ATH9K_HW_CAP_AUTOSLEEP)) { | ||
14965 | @@ -2732,21 +2717,15 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw, | ||
14966 | case IEEE80211_AMPDU_RX_STOP: | ||
14967 | break; | ||
14968 | case IEEE80211_AMPDU_TX_START: | ||
14969 | - ath9k_ps_wakeup(sc); | ||
14970 | ath_tx_aggr_start(sc, sta, tid, ssn); | ||
14971 | ieee80211_start_tx_ba_cb_irqsafe(hw, sta->addr, tid); | ||
14972 | - ath9k_ps_restore(sc); | ||
14973 | break; | ||
14974 | case IEEE80211_AMPDU_TX_STOP: | ||
14975 | - ath9k_ps_wakeup(sc); | ||
14976 | ath_tx_aggr_stop(sc, sta, tid); | ||
14977 | ieee80211_stop_tx_ba_cb_irqsafe(hw, sta->addr, tid); | ||
14978 | - ath9k_ps_restore(sc); | ||
14979 | break; | ||
14980 | case IEEE80211_AMPDU_TX_OPERATIONAL: | ||
14981 | - ath9k_ps_wakeup(sc); | ||
14982 | ath_tx_aggr_resume(sc, sta, tid); | ||
14983 | - ath9k_ps_restore(sc); | ||
14984 | break; | ||
14985 | default: | ||
14986 | DPRINTF(sc, ATH_DBG_FATAL, "Unknown AMPDU action\n"); | ||
14987 | diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h | ||
14988 | index c0d7e65..d83b77f 100644 | ||
14989 | --- a/drivers/net/wireless/ath/ath9k/reg.h | ||
14990 | +++ b/drivers/net/wireless/ath/ath9k/reg.h | ||
14991 | @@ -969,10 +969,10 @@ enum { | ||
14992 | #define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_S 4 | ||
14993 | #define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF 0x00000080 | ||
14994 | #define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF_S 7 | ||
14995 | -#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB 0x00000400 | ||
14996 | -#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB_S 10 | ||
14997 | #define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB 0x00001000 | ||
14998 | #define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB_S 12 | ||
14999 | +#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB 0x00001000 | ||
15000 | +#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB_S 1 | ||
15001 | #define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB 0x00008000 | ||
15002 | #define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB_S 15 | ||
15003 | #define AR_GPIO_RTC_RESET_OVERRIDE_ENABLE 0x00010000 | ||
15004 | diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c | ||
15005 | index 9009bac..42551a4 100644 | ||
15006 | --- a/drivers/net/wireless/ath/ath9k/xmit.c | ||
15007 | +++ b/drivers/net/wireless/ath/ath9k/xmit.c | ||
15008 | @@ -1076,10 +1076,10 @@ void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) | ||
15009 | if (npend) { | ||
15010 | int r; | ||
15011 | |||
15012 | - DPRINTF(sc, ATH_DBG_FATAL, "Unable to stop TxDMA. Reset HAL!\n"); | ||
15013 | + DPRINTF(sc, ATH_DBG_XMIT, "Unable to stop TxDMA. Reset HAL!\n"); | ||
15014 | |||
15015 | spin_lock_bh(&sc->sc_resetlock); | ||
15016 | - r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false); | ||
15017 | + r = ath9k_hw_reset(ah, sc->sc_ah->curchan, true); | ||
15018 | if (r) | ||
15019 | DPRINTF(sc, ATH_DBG_FATAL, | ||
15020 | "Unable to reset hardware; reset status %d\n", | ||
15021 | @@ -1563,7 +1563,7 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf, | ||
15022 | |||
15023 | bf->bf_frmlen = skb->len + FCS_LEN - (hdrlen & 3); | ||
15024 | |||
15025 | - if (conf_is_ht(&sc->hw->conf)) | ||
15026 | + if (conf_is_ht(&sc->hw->conf) && !is_pae(skb)) | ||
15027 | bf->bf_state.bf_type |= BUF_HT; | ||
15028 | |||
15029 | bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq); | ||
15030 | @@ -1592,13 +1592,6 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf, | ||
15031 | } | ||
15032 | |||
15033 | bf->bf_buf_addr = bf->bf_dmacontext; | ||
15034 | - | ||
15035 | - if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) { | ||
15036 | - bf->bf_isnullfunc = true; | ||
15037 | - sc->sc_flags &= ~SC_OP_NULLFUNC_COMPLETED; | ||
15038 | - } else | ||
15039 | - bf->bf_isnullfunc = false; | ||
15040 | - | ||
15041 | return 0; | ||
15042 | } | ||
15043 | |||
15044 | @@ -1648,7 +1641,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf, | ||
15045 | goto tx_done; | ||
15046 | } | ||
15047 | |||
15048 | - if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && !is_pae(skb)) { | ||
15049 | + if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { | ||
15050 | /* | ||
15051 | * Try aggregation if it's a unicast data frame | ||
15052 | * and the destination is HT capable. | ||
15053 | @@ -1996,15 +1989,6 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) | ||
15054 | if (ds == txq->axq_gatingds) | ||
15055 | txq->axq_gatingds = NULL; | ||
15056 | |||
15057 | - if (bf->bf_isnullfunc && | ||
15058 | - (ds->ds_txstat.ts_status & ATH9K_TX_ACKED)) { | ||
15059 | - if ((sc->sc_flags & SC_OP_PS_ENABLED)) { | ||
15060 | - sc->ps_enabled = true; | ||
15061 | - ath9k_hw_setrxabort(sc->sc_ah, 1); | ||
15062 | - } else | ||
15063 | - sc->sc_flags |= SC_OP_NULLFUNC_COMPLETED; | ||
15064 | - } | ||
15065 | - | ||
15066 | /* | ||
15067 | * Remove ath_buf's of the same transmit unit from txq, | ||
15068 | * however leave the last descriptor back as the holding | ||
15069 | @@ -2020,7 +2004,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) | ||
15070 | if (bf_isaggr(bf)) | ||
15071 | txq->axq_aggr_depth--; | ||
15072 | |||
15073 | - txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_MASK); | ||
15074 | + txok = (ds->ds_txstat.ts_status == 0); | ||
15075 | txq->axq_tx_inprogress = false; | ||
15076 | spin_unlock_bh(&txq->axq_lock); | ||
15077 | |||
15078 | @@ -2081,9 +2065,7 @@ static void ath_tx_complete_poll_work(struct work_struct *work) | ||
15079 | |||
15080 | if (needreset) { | ||
15081 | DPRINTF(sc, ATH_DBG_RESET, "tx hung, resetting the chip\n"); | ||
15082 | - ath9k_ps_wakeup(sc); | ||
15083 | ath_reset(sc, false); | ||
15084 | - ath9k_ps_restore(sc); | ||
15085 | } | ||
15086 | |||
15087 | ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, | ||
15088 | diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h | ||
15089 | index 0e6b154..6607162 100644 | ||
15090 | --- a/drivers/net/wireless/b43/b43.h | ||
15091 | +++ b/drivers/net/wireless/b43/b43.h | ||
15092 | @@ -117,7 +117,6 @@ | ||
15093 | #define B43_MMIO_TSF_2 0x636 /* core rev < 3 only */ | ||
15094 | #define B43_MMIO_TSF_3 0x638 /* core rev < 3 only */ | ||
15095 | #define B43_MMIO_RNG 0x65A | ||
15096 | -#define B43_MMIO_IFSSLOT 0x684 /* Interframe slot time */ | ||
15097 | #define B43_MMIO_IFSCTL 0x688 /* Interframe space control */ | ||
15098 | #define B43_MMIO_IFSCTL_USE_EDCF 0x0004 | ||
15099 | #define B43_MMIO_POWERUP_DELAY 0x6A8 | ||
15100 | diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c | ||
15101 | index 9ca253e..098dda1 100644 | ||
15102 | --- a/drivers/net/wireless/b43/main.c | ||
15103 | +++ b/drivers/net/wireless/b43/main.c | ||
15104 | @@ -628,17 +628,10 @@ static void b43_upload_card_macaddress(struct b43_wldev *dev) | ||
15105 | static void b43_set_slot_time(struct b43_wldev *dev, u16 slot_time) | ||
15106 | { | ||
15107 | /* slot_time is in usec. */ | ||
15108 | - /* This test used to exit for all but a G PHY. */ | ||
15109 | - if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) | ||
15110 | + if (dev->phy.type != B43_PHYTYPE_G) | ||
15111 | return; | ||
15112 | - b43_write16(dev, B43_MMIO_IFSSLOT, 510 + slot_time); | ||
15113 | - /* Shared memory location 0x0010 is the slot time and should be | ||
15114 | - * set to slot_time; however, this register is initially 0 and changing | ||
15115 | - * the value adversely affects the transmit rate for BCM4311 | ||
15116 | - * devices. Until this behavior is unterstood, delete this step | ||
15117 | - * | ||
15118 | - * b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time); | ||
15119 | - */ | ||
15120 | + b43_write16(dev, 0x684, 510 + slot_time); | ||
15121 | + b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time); | ||
15122 | } | ||
15123 | |||
15124 | static void b43_short_slot_timing_enable(struct b43_wldev *dev) | ||
15125 | diff --git a/drivers/net/wireless/b43/rfkill.c b/drivers/net/wireless/b43/rfkill.c | ||
15126 | index 78016ae..ffdce6f 100644 | ||
15127 | --- a/drivers/net/wireless/b43/rfkill.c | ||
15128 | +++ b/drivers/net/wireless/b43/rfkill.c | ||
15129 | @@ -33,14 +33,8 @@ bool b43_is_hw_radio_enabled(struct b43_wldev *dev) | ||
15130 | & B43_MMIO_RADIO_HWENABLED_HI_MASK)) | ||
15131 | return 1; | ||
15132 | } else { | ||
15133 | - /* To prevent CPU fault on PPC, do not read a register | ||
15134 | - * unless the interface is started; however, on resume | ||
15135 | - * for hibernation, this routine is entered early. When | ||
15136 | - * that happens, unconditionally return TRUE. | ||
15137 | - */ | ||
15138 | - if (b43_status(dev) < B43_STAT_STARTED) | ||
15139 | - return 1; | ||
15140 | - if (b43_read16(dev, B43_MMIO_RADIO_HWENABLED_LO) | ||
15141 | + if (b43_status(dev) >= B43_STAT_STARTED && | ||
15142 | + b43_read16(dev, B43_MMIO_RADIO_HWENABLED_LO) | ||
15143 | & B43_MMIO_RADIO_HWENABLED_LO_MASK) | ||
15144 | return 1; | ||
15145 | } | ||
15146 | diff --git a/drivers/net/wireless/b43legacy/rfkill.c b/drivers/net/wireless/b43legacy/rfkill.c | ||
15147 | index d579df7..8783022 100644 | ||
15148 | --- a/drivers/net/wireless/b43legacy/rfkill.c | ||
15149 | +++ b/drivers/net/wireless/b43legacy/rfkill.c | ||
15150 | @@ -34,13 +34,6 @@ bool b43legacy_is_hw_radio_enabled(struct b43legacy_wldev *dev) | ||
15151 | & B43legacy_MMIO_RADIO_HWENABLED_HI_MASK)) | ||
15152 | return 1; | ||
15153 | } else { | ||
15154 | - /* To prevent CPU fault on PPC, do not read a register | ||
15155 | - * unless the interface is started; however, on resume | ||
15156 | - * for hibernation, this routine is entered early. When | ||
15157 | - * that happens, unconditionally return TRUE. | ||
15158 | - */ | ||
15159 | - if (b43legacy_status(dev) < B43legacy_STAT_STARTED) | ||
15160 | - return 1; | ||
15161 | if (b43legacy_read16(dev, B43legacy_MMIO_RADIO_HWENABLED_LO) | ||
15162 | & B43legacy_MMIO_RADIO_HWENABLED_LO_MASK) | ||
15163 | return 1; | ||
15164 | diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c | ||
15165 | index 43102bf..6e2fc0c 100644 | ||
15166 | --- a/drivers/net/wireless/ipw2x00/ipw2100.c | ||
15167 | +++ b/drivers/net/wireless/ipw2x00/ipw2100.c | ||
15168 | @@ -6487,16 +6487,6 @@ static int ipw2100_resume(struct pci_dev *pci_dev) | ||
15169 | } | ||
15170 | #endif | ||
15171 | |||
15172 | -static void ipw2100_shutdown(struct pci_dev *pci_dev) | ||
15173 | -{ | ||
15174 | - struct ipw2100_priv *priv = pci_get_drvdata(pci_dev); | ||
15175 | - | ||
15176 | - /* Take down the device; powers it off, etc. */ | ||
15177 | - ipw2100_down(priv); | ||
15178 | - | ||
15179 | - pci_disable_device(pci_dev); | ||
15180 | -} | ||
15181 | - | ||
15182 | #define IPW2100_DEV_ID(x) { PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, x } | ||
15183 | |||
15184 | static struct pci_device_id ipw2100_pci_id_table[] __devinitdata = { | ||
15185 | @@ -6560,7 +6550,6 @@ static struct pci_driver ipw2100_pci_driver = { | ||
15186 | .suspend = ipw2100_suspend, | ||
15187 | .resume = ipw2100_resume, | ||
15188 | #endif | ||
15189 | - .shutdown = ipw2100_shutdown, | ||
15190 | }; | ||
15191 | |||
15192 | /** | ||
15193 | diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c | ||
15194 | index 9d60f6c..f059b49 100644 | ||
15195 | --- a/drivers/net/wireless/iwlwifi/iwl-3945.c | ||
15196 | +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c | ||
15197 | @@ -2895,7 +2895,6 @@ static struct iwl_cfg iwl3945_bg_cfg = { | ||
15198 | .mod_params = &iwl3945_mod_params, | ||
15199 | .use_isr_legacy = true, | ||
15200 | .ht_greenfield_support = false, | ||
15201 | - .broken_powersave = true, | ||
15202 | }; | ||
15203 | |||
15204 | static struct iwl_cfg iwl3945_abg_cfg = { | ||
15205 | @@ -2910,7 +2909,6 @@ static struct iwl_cfg iwl3945_abg_cfg = { | ||
15206 | .mod_params = &iwl3945_mod_params, | ||
15207 | .use_isr_legacy = true, | ||
15208 | .ht_greenfield_support = false, | ||
15209 | - .broken_powersave = true, | ||
15210 | }; | ||
15211 | |||
15212 | struct pci_device_id iwl3945_hw_card_ids[] = { | ||
15213 | diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c | ||
15214 | index 99331ed..6f703a0 100644 | ||
15215 | --- a/drivers/net/wireless/iwlwifi/iwl-4965.c | ||
15216 | +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c | ||
15217 | @@ -1337,7 +1337,7 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel, | ||
15218 | iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info); | ||
15219 | |||
15220 | /* calculate tx gain adjustment based on power supply voltage */ | ||
15221 | - voltage = le16_to_cpu(priv->calib_info->voltage); | ||
15222 | + voltage = priv->calib_info->voltage; | ||
15223 | init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage); | ||
15224 | voltage_compensation = | ||
15225 | iwl4965_get_voltage_compensation(voltage, init_voltage); | ||
15226 | @@ -2087,7 +2087,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv, | ||
15227 | struct ieee80211_tx_info *info; | ||
15228 | struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; | ||
15229 | u32 status = le32_to_cpu(tx_resp->u.status); | ||
15230 | - int tid = MAX_TID_COUNT - 1; | ||
15231 | + int tid = MAX_TID_COUNT; | ||
15232 | int sta_id; | ||
15233 | int freed; | ||
15234 | u8 *qc = NULL; | ||
15235 | diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h | ||
15236 | index bc056e9..4ef6804 100644 | ||
15237 | --- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h | ||
15238 | +++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h | ||
15239 | @@ -92,15 +92,11 @@ | ||
15240 | |||
15241 | static inline s32 iwl_temp_calib_to_offset(struct iwl_priv *priv) | ||
15242 | { | ||
15243 | - u16 temperature, voltage; | ||
15244 | - __le16 *temp_calib = | ||
15245 | - (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_TEMPERATURE); | ||
15246 | - | ||
15247 | - temperature = le16_to_cpu(temp_calib[0]); | ||
15248 | - voltage = le16_to_cpu(temp_calib[1]); | ||
15249 | - | ||
15250 | - /* offset = temp - volt / coeff */ | ||
15251 | - return (s32)(temperature - voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF); | ||
15252 | + u16 *temp_calib = (u16 *)iwl_eeprom_query_addr(priv, | ||
15253 | + EEPROM_5000_TEMPERATURE); | ||
15254 | + /* offset = temperature - voltage / coef */ | ||
15255 | + s32 offset = (s32)(temp_calib[0] - temp_calib[1] / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF); | ||
15256 | + return offset; | ||
15257 | } | ||
15258 | |||
15259 | /* Fixed (non-configurable) rx data from phy */ | ||
15260 | diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c | ||
15261 | index 133df70..6e6f516 100644 | ||
15262 | --- a/drivers/net/wireless/iwlwifi/iwl-5000.c | ||
15263 | +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c | ||
15264 | @@ -460,15 +460,14 @@ static void iwl5000_set_ct_threshold(struct iwl_priv *priv) | ||
15265 | static int iwl5000_set_Xtal_calib(struct iwl_priv *priv) | ||
15266 | { | ||
15267 | struct iwl_calib_xtal_freq_cmd cmd; | ||
15268 | - __le16 *xtal_calib = | ||
15269 | - (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL); | ||
15270 | + u16 *xtal_calib = (u16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL); | ||
15271 | |||
15272 | cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD; | ||
15273 | cmd.hdr.first_group = 0; | ||
15274 | cmd.hdr.groups_num = 1; | ||
15275 | cmd.hdr.data_valid = 1; | ||
15276 | - cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]); | ||
15277 | - cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]); | ||
15278 | + cmd.cap_pin1 = (u8)xtal_calib[0]; | ||
15279 | + cmd.cap_pin2 = (u8)xtal_calib[1]; | ||
15280 | return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL], | ||
15281 | (u8 *)&cmd, sizeof(cmd)); | ||
15282 | } | ||
15283 | @@ -1666,7 +1665,6 @@ struct iwl_cfg iwl5300_agn_cfg = { | ||
15284 | .valid_rx_ant = ANT_ABC, | ||
15285 | .need_pll_cfg = true, | ||
15286 | .ht_greenfield_support = true, | ||
15287 | - .use_rts_for_ht = true, /* use rts/cts protection */ | ||
15288 | }; | ||
15289 | |||
15290 | struct iwl_cfg iwl5100_bg_cfg = { | ||
15291 | @@ -1718,7 +1716,6 @@ struct iwl_cfg iwl5100_agn_cfg = { | ||
15292 | .valid_rx_ant = ANT_AB, | ||
15293 | .need_pll_cfg = true, | ||
15294 | .ht_greenfield_support = true, | ||
15295 | - .use_rts_for_ht = true, /* use rts/cts protection */ | ||
15296 | }; | ||
15297 | |||
15298 | struct iwl_cfg iwl5350_agn_cfg = { | ||
15299 | @@ -1736,7 +1733,6 @@ struct iwl_cfg iwl5350_agn_cfg = { | ||
15300 | .valid_rx_ant = ANT_ABC, | ||
15301 | .need_pll_cfg = true, | ||
15302 | .ht_greenfield_support = true, | ||
15303 | - .use_rts_for_ht = true, /* use rts/cts protection */ | ||
15304 | }; | ||
15305 | |||
15306 | struct iwl_cfg iwl5150_agn_cfg = { | ||
15307 | @@ -1754,7 +1750,6 @@ struct iwl_cfg iwl5150_agn_cfg = { | ||
15308 | .valid_rx_ant = ANT_AB, | ||
15309 | .need_pll_cfg = true, | ||
15310 | .ht_greenfield_support = true, | ||
15311 | - .use_rts_for_ht = true, /* use rts/cts protection */ | ||
15312 | }; | ||
15313 | |||
15314 | MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX)); | ||
15315 | diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c | ||
15316 | index 0eb2591..81726ee 100644 | ||
15317 | --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c | ||
15318 | +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c | ||
15319 | @@ -2808,7 +2808,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv, | ||
15320 | repeat_rate--; | ||
15321 | } | ||
15322 | |||
15323 | - lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF; | ||
15324 | + lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_MAX; | ||
15325 | lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; | ||
15326 | lq_cmd->agg_params.agg_time_limit = | ||
15327 | cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); | ||
15328 | diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c | ||
15329 | index 0cd4ec4..2dc9287 100644 | ||
15330 | --- a/drivers/net/wireless/iwlwifi/iwl-core.c | ||
15331 | +++ b/drivers/net/wireless/iwlwifi/iwl-core.c | ||
15332 | @@ -2645,7 +2645,6 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed) | ||
15333 | if ((le16_to_cpu(priv->staging_rxon.channel) != ch)) | ||
15334 | priv->staging_rxon.flags = 0; | ||
15335 | |||
15336 | - iwl_set_rxon_ht(priv, ht_conf); | ||
15337 | iwl_set_rxon_channel(priv, conf->channel); | ||
15338 | |||
15339 | iwl_set_flags_for_band(priv, conf->channel->band); | ||
15340 | diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h | ||
15341 | index cea2ee2..028d505 100644 | ||
15342 | --- a/drivers/net/wireless/iwlwifi/iwl-dev.h | ||
15343 | +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h | ||
15344 | @@ -703,7 +703,7 @@ extern void iwl_txq_ctx_stop(struct iwl_priv *priv); | ||
15345 | extern int iwl_queue_space(const struct iwl_queue *q); | ||
15346 | static inline int iwl_queue_used(const struct iwl_queue *q, int i) | ||
15347 | { | ||
15348 | - return q->write_ptr >= q->read_ptr ? | ||
15349 | + return q->write_ptr > q->read_ptr ? | ||
15350 | (i >= q->read_ptr && i < q->write_ptr) : | ||
15351 | !(i < q->read_ptr && i >= q->write_ptr); | ||
15352 | } | ||
15353 | @@ -1149,7 +1149,7 @@ struct iwl_priv { | ||
15354 | u32 last_beacon_time; | ||
15355 | u64 last_tsf; | ||
15356 | |||
15357 | - /* eeprom -- this is in the card's little endian byte order */ | ||
15358 | + /* eeprom */ | ||
15359 | u8 *eeprom; | ||
15360 | int nvm_device_type; | ||
15361 | struct iwl_eeprom_calib_info *calib_info; | ||
15362 | diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c | ||
15363 | index 18dc3a4..e14c995 100644 | ||
15364 | --- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c | ||
15365 | +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c | ||
15366 | @@ -337,7 +337,7 @@ static int iwl_init_otp_access(struct iwl_priv *priv) | ||
15367 | return ret; | ||
15368 | } | ||
15369 | |||
15370 | -static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_data) | ||
15371 | +static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data) | ||
15372 | { | ||
15373 | int ret = 0; | ||
15374 | u32 r; | ||
15375 | @@ -370,7 +370,7 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_dat | ||
15376 | CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK); | ||
15377 | IWL_ERR(priv, "Correctable OTP ECC error, continue read\n"); | ||
15378 | } | ||
15379 | - *eeprom_data = cpu_to_le16(r >> 16); | ||
15380 | + *eeprom_data = le16_to_cpu((__force __le16)(r >> 16)); | ||
15381 | return 0; | ||
15382 | } | ||
15383 | |||
15384 | @@ -379,8 +379,7 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_dat | ||
15385 | */ | ||
15386 | static bool iwl_is_otp_empty(struct iwl_priv *priv) | ||
15387 | { | ||
15388 | - u16 next_link_addr = 0; | ||
15389 | - __le16 link_value; | ||
15390 | + u16 next_link_addr = 0, link_value; | ||
15391 | bool is_empty = false; | ||
15392 | |||
15393 | /* locate the beginning of OTP link list */ | ||
15394 | @@ -410,8 +409,7 @@ static bool iwl_is_otp_empty(struct iwl_priv *priv) | ||
15395 | static int iwl_find_otp_image(struct iwl_priv *priv, | ||
15396 | u16 *validblockaddr) | ||
15397 | { | ||
15398 | - u16 next_link_addr = 0, valid_addr; | ||
15399 | - __le16 link_value = 0; | ||
15400 | + u16 next_link_addr = 0, link_value = 0, valid_addr; | ||
15401 | int usedblocks = 0; | ||
15402 | |||
15403 | /* set addressing mode to absolute to traverse the link list */ | ||
15404 | @@ -431,7 +429,7 @@ static int iwl_find_otp_image(struct iwl_priv *priv, | ||
15405 | * check for more block on the link list | ||
15406 | */ | ||
15407 | valid_addr = next_link_addr; | ||
15408 | - next_link_addr = le16_to_cpu(link_value) * sizeof(u16); | ||
15409 | + next_link_addr = link_value * sizeof(u16); | ||
15410 | IWL_DEBUG_INFO(priv, "OTP blocks %d addr 0x%x\n", | ||
15411 | usedblocks, next_link_addr); | ||
15412 | if (iwl_read_otp_word(priv, next_link_addr, &link_value)) | ||
15413 | @@ -465,7 +463,7 @@ static int iwl_find_otp_image(struct iwl_priv *priv, | ||
15414 | */ | ||
15415 | int iwl_eeprom_init(struct iwl_priv *priv) | ||
15416 | { | ||
15417 | - __le16 *e; | ||
15418 | + u16 *e; | ||
15419 | u32 gp = iwl_read32(priv, CSR_EEPROM_GP); | ||
15420 | int sz; | ||
15421 | int ret; | ||
15422 | @@ -484,7 +482,7 @@ int iwl_eeprom_init(struct iwl_priv *priv) | ||
15423 | ret = -ENOMEM; | ||
15424 | goto alloc_err; | ||
15425 | } | ||
15426 | - e = (__le16 *)priv->eeprom; | ||
15427 | + e = (u16 *)priv->eeprom; | ||
15428 | |||
15429 | ret = priv->cfg->ops->lib->eeprom_ops.verify_signature(priv); | ||
15430 | if (ret < 0) { | ||
15431 | @@ -523,7 +521,7 @@ int iwl_eeprom_init(struct iwl_priv *priv) | ||
15432 | } | ||
15433 | for (addr = validblockaddr; addr < validblockaddr + sz; | ||
15434 | addr += sizeof(u16)) { | ||
15435 | - __le16 eeprom_data; | ||
15436 | + u16 eeprom_data; | ||
15437 | |||
15438 | ret = iwl_read_otp_word(priv, addr, &eeprom_data); | ||
15439 | if (ret) | ||
15440 | @@ -547,7 +545,7 @@ int iwl_eeprom_init(struct iwl_priv *priv) | ||
15441 | goto done; | ||
15442 | } | ||
15443 | r = _iwl_read_direct32(priv, CSR_EEPROM_REG); | ||
15444 | - e[addr / 2] = cpu_to_le16(r >> 16); | ||
15445 | + e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16)); | ||
15446 | } | ||
15447 | } | ||
15448 | ret = 0; | ||
15449 | @@ -711,8 +709,7 @@ static int iwl_mod_ht40_chan_info(struct iwl_priv *priv, | ||
15450 | ch_info->ht40_min_power = 0; | ||
15451 | ch_info->ht40_scan_power = eeprom_ch->max_power_avg; | ||
15452 | ch_info->ht40_flags = eeprom_ch->flags; | ||
15453 | - if (eeprom_ch->flags & EEPROM_CHANNEL_VALID) | ||
15454 | - ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel; | ||
15455 | + ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel; | ||
15456 | |||
15457 | return 0; | ||
15458 | } | ||
15459 | diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h | ||
15460 | index fc93f12..80b9e45 100644 | ||
15461 | --- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h | ||
15462 | +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h | ||
15463 | @@ -133,7 +133,7 @@ struct iwl_eeprom_channel { | ||
15464 | * | ||
15465 | */ | ||
15466 | struct iwl_eeprom_enhanced_txpwr { | ||
15467 | - __le16 common; | ||
15468 | + u16 reserved; | ||
15469 | s8 chain_a_max; | ||
15470 | s8 chain_b_max; | ||
15471 | s8 chain_c_max; | ||
15472 | @@ -347,7 +347,7 @@ struct iwl_eeprom_calib_subband_info { | ||
15473 | struct iwl_eeprom_calib_info { | ||
15474 | u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */ | ||
15475 | u8 saturation_power52; /* half-dBm */ | ||
15476 | - __le16 voltage; /* signed */ | ||
15477 | + s16 voltage; /* signed */ | ||
15478 | struct iwl_eeprom_calib_subband_info | ||
15479 | band_info[EEPROM_TX_POWER_BANDS]; | ||
15480 | } __attribute__ ((packed)); | ||
15481 | diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c | ||
15482 | index 5f26c93..d00a803 100644 | ||
15483 | --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c | ||
15484 | +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c | ||
15485 | @@ -562,9 +562,6 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | ||
15486 | txq = &priv->txq[txq_id]; | ||
15487 | q = &txq->q; | ||
15488 | |||
15489 | - if ((iwl_queue_space(q) < q->high_mark)) | ||
15490 | - goto drop; | ||
15491 | - | ||
15492 | spin_lock_irqsave(&priv->lock, flags); | ||
15493 | |||
15494 | idx = get_cmd_index(q, q->write_ptr, 0); | ||
15495 | @@ -3857,11 +3854,9 @@ static int iwl3945_setup_mac(struct iwl_priv *priv) | ||
15496 | /* Tell mac80211 our characteristics */ | ||
15497 | hw->flags = IEEE80211_HW_SIGNAL_DBM | | ||
15498 | IEEE80211_HW_NOISE_DBM | | ||
15499 | - IEEE80211_HW_SPECTRUM_MGMT; | ||
15500 | - | ||
15501 | - if (!priv->cfg->broken_powersave) | ||
15502 | - hw->flags |= IEEE80211_HW_SUPPORTS_PS | | ||
15503 | - IEEE80211_HW_SUPPORTS_DYNAMIC_PS; | ||
15504 | + IEEE80211_HW_SPECTRUM_MGMT | | ||
15505 | + IEEE80211_HW_SUPPORTS_PS | | ||
15506 | + IEEE80211_HW_SUPPORTS_DYNAMIC_PS; | ||
15507 | |||
15508 | hw->wiphy->interface_modes = | ||
15509 | BIT(NL80211_IFTYPE_STATION) | | ||
15510 | diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h | ||
15511 | index 93c8989..1b02a4e 100644 | ||
15512 | --- a/drivers/net/wireless/iwmc3200wifi/iwm.h | ||
15513 | +++ b/drivers/net/wireless/iwmc3200wifi/iwm.h | ||
15514 | @@ -258,7 +258,7 @@ struct iwm_priv { | ||
15515 | |||
15516 | struct sk_buff_head rx_list; | ||
15517 | struct list_head rx_tickets; | ||
15518 | - struct list_head rx_packets[IWM_RX_ID_HASH + 1]; | ||
15519 | + struct list_head rx_packets[IWM_RX_ID_HASH]; | ||
15520 | struct workqueue_struct *rx_wq; | ||
15521 | struct work_struct rx_worker; | ||
15522 | |||
15523 | diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c | ||
15524 | index 06d66a1..6c95af3 100644 | ||
15525 | --- a/drivers/net/wireless/libertas/scan.c | ||
15526 | +++ b/drivers/net/wireless/libertas/scan.c | ||
15527 | @@ -399,8 +399,11 @@ int lbs_scan_networks(struct lbs_private *priv, int full_scan) | ||
15528 | chan_count = lbs_scan_create_channel_list(priv, chan_list); | ||
15529 | |||
15530 | netif_stop_queue(priv->dev); | ||
15531 | - if (priv->mesh_dev) | ||
15532 | + netif_carrier_off(priv->dev); | ||
15533 | + if (priv->mesh_dev) { | ||
15534 | netif_stop_queue(priv->mesh_dev); | ||
15535 | + netif_carrier_off(priv->mesh_dev); | ||
15536 | + } | ||
15537 | |||
15538 | /* Prepare to continue an interrupted scan */ | ||
15539 | lbs_deb_scan("chan_count %d, scan_channel %d\n", | ||
15540 | @@ -464,13 +467,16 @@ out2: | ||
15541 | priv->scan_channel = 0; | ||
15542 | |||
15543 | out: | ||
15544 | - if (priv->connect_status == LBS_CONNECTED && !priv->tx_pending_len) | ||
15545 | - netif_wake_queue(priv->dev); | ||
15546 | - | ||
15547 | - if (priv->mesh_dev && (priv->mesh_connect_status == LBS_CONNECTED) && | ||
15548 | - !priv->tx_pending_len) | ||
15549 | - netif_wake_queue(priv->mesh_dev); | ||
15550 | - | ||
15551 | + if (priv->connect_status == LBS_CONNECTED) { | ||
15552 | + netif_carrier_on(priv->dev); | ||
15553 | + if (!priv->tx_pending_len) | ||
15554 | + netif_wake_queue(priv->dev); | ||
15555 | + } | ||
15556 | + if (priv->mesh_dev && (priv->mesh_connect_status == LBS_CONNECTED)) { | ||
15557 | + netif_carrier_on(priv->mesh_dev); | ||
15558 | + if (!priv->tx_pending_len) | ||
15559 | + netif_wake_queue(priv->mesh_dev); | ||
15560 | + } | ||
15561 | kfree(chan_list); | ||
15562 | |||
15563 | lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret); | ||
15564 | diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c | ||
15565 | index 01c738b..be837a0 100644 | ||
15566 | --- a/drivers/net/wireless/libertas/wext.c | ||
15567 | +++ b/drivers/net/wireless/libertas/wext.c | ||
15568 | @@ -1953,8 +1953,10 @@ static int lbs_get_essid(struct net_device *dev, struct iw_request_info *info, | ||
15569 | if (priv->connect_status == LBS_CONNECTED) { | ||
15570 | memcpy(extra, priv->curbssparams.ssid, | ||
15571 | priv->curbssparams.ssid_len); | ||
15572 | + extra[priv->curbssparams.ssid_len] = '\0'; | ||
15573 | } else { | ||
15574 | memset(extra, 0, 32); | ||
15575 | + extra[priv->curbssparams.ssid_len] = '\0'; | ||
15576 | } | ||
15577 | /* | ||
15578 | * If none, we may want to get the one that was set | ||
15579 | diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c | ||
15580 | index 31ca241..7698fdd 100644 | ||
15581 | --- a/drivers/net/wireless/orinoco/wext.c | ||
15582 | +++ b/drivers/net/wireless/orinoco/wext.c | ||
15583 | @@ -23,7 +23,7 @@ | ||
15584 | #define MAX_RID_LEN 1024 | ||
15585 | |||
15586 | /* Helper routine to record keys | ||
15587 | - * It is called under orinoco_lock so it may not sleep */ | ||
15588 | + * Do not call from interrupt context */ | ||
15589 | static int orinoco_set_key(struct orinoco_private *priv, int index, | ||
15590 | enum orinoco_alg alg, const u8 *key, int key_len, | ||
15591 | const u8 *seq, int seq_len) | ||
15592 | @@ -32,14 +32,14 @@ static int orinoco_set_key(struct orinoco_private *priv, int index, | ||
15593 | kzfree(priv->keys[index].seq); | ||
15594 | |||
15595 | if (key_len) { | ||
15596 | - priv->keys[index].key = kzalloc(key_len, GFP_ATOMIC); | ||
15597 | + priv->keys[index].key = kzalloc(key_len, GFP_KERNEL); | ||
15598 | if (!priv->keys[index].key) | ||
15599 | goto nomem; | ||
15600 | } else | ||
15601 | priv->keys[index].key = NULL; | ||
15602 | |||
15603 | if (seq_len) { | ||
15604 | - priv->keys[index].seq = kzalloc(seq_len, GFP_ATOMIC); | ||
15605 | + priv->keys[index].seq = kzalloc(seq_len, GFP_KERNEL); | ||
15606 | if (!priv->keys[index].seq) | ||
15607 | goto free_key; | ||
15608 | } else | ||
15609 | diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c | ||
15610 | index 9a6ceb4..b20e3ea 100644 | ||
15611 | --- a/drivers/net/wireless/rt2x00/rt61pci.c | ||
15612 | +++ b/drivers/net/wireless/rt2x00/rt61pci.c | ||
15613 | @@ -2538,11 +2538,6 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) | ||
15614 | unsigned int i; | ||
15615 | |||
15616 | /* | ||
15617 | - * Disable powersaving as default. | ||
15618 | - */ | ||
15619 | - rt2x00dev->hw->wiphy->ps_default = false; | ||
15620 | - | ||
15621 | - /* | ||
15622 | * Initialize all hw fields. | ||
15623 | */ | ||
15624 | rt2x00dev->hw->flags = | ||
15625 | diff --git a/drivers/net/wireless/rtl818x/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187.h | ||
15626 | index 99406bf..bf9175a 100644 | ||
15627 | --- a/drivers/net/wireless/rtl818x/rtl8187.h | ||
15628 | +++ b/drivers/net/wireless/rtl818x/rtl8187.h | ||
15629 | @@ -23,7 +23,6 @@ | ||
15630 | #define RTL8187_EEPROM_TXPWR_CHAN_1 0x16 /* 3 channels */ | ||
15631 | #define RTL8187_EEPROM_TXPWR_CHAN_6 0x1B /* 2 channels */ | ||
15632 | #define RTL8187_EEPROM_TXPWR_CHAN_4 0x3D /* 2 channels */ | ||
15633 | -#define RTL8187_EEPROM_SELECT_GPIO 0x3B | ||
15634 | |||
15635 | #define RTL8187_REQT_READ 0xC0 | ||
15636 | #define RTL8187_REQT_WRITE 0x40 | ||
15637 | @@ -32,9 +31,6 @@ | ||
15638 | |||
15639 | #define RTL8187_MAX_RX 0x9C4 | ||
15640 | |||
15641 | -#define RFKILL_MASK_8187_89_97 0x2 | ||
15642 | -#define RFKILL_MASK_8198 0x4 | ||
15643 | - | ||
15644 | struct rtl8187_rx_info { | ||
15645 | struct urb *urb; | ||
15646 | struct ieee80211_hw *dev; | ||
15647 | @@ -127,7 +123,6 @@ struct rtl8187_priv { | ||
15648 | u8 noise; | ||
15649 | u8 slot_time; | ||
15650 | u8 aifsn[4]; | ||
15651 | - u8 rfkill_mask; | ||
15652 | struct { | ||
15653 | __le64 buf; | ||
15654 | struct sk_buff_head queue; | ||
15655 | diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c | ||
15656 | index 9921147..2017ccc 100644 | ||
15657 | --- a/drivers/net/wireless/rtl818x/rtl8187_dev.c | ||
15658 | +++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c | ||
15659 | @@ -65,7 +65,6 @@ static struct usb_device_id rtl8187_table[] __devinitdata = { | ||
15660 | /* Sitecom */ | ||
15661 | {USB_DEVICE(0x0df6, 0x000d), .driver_info = DEVICE_RTL8187}, | ||
15662 | {USB_DEVICE(0x0df6, 0x0028), .driver_info = DEVICE_RTL8187B}, | ||
15663 | - {USB_DEVICE(0x0df6, 0x0029), .driver_info = DEVICE_RTL8187B}, | ||
15664 | /* Sphairon Access Systems GmbH */ | ||
15665 | {USB_DEVICE(0x114B, 0x0150), .driver_info = DEVICE_RTL8187}, | ||
15666 | /* Dick Smith Electronics */ | ||
15667 | @@ -1330,7 +1329,6 @@ static int __devinit rtl8187_probe(struct usb_interface *intf, | ||
15668 | struct ieee80211_channel *channel; | ||
15669 | const char *chip_name; | ||
15670 | u16 txpwr, reg; | ||
15671 | - u16 product_id = le16_to_cpu(udev->descriptor.idProduct); | ||
15672 | int err, i; | ||
15673 | |||
15674 | dev = ieee80211_alloc_hw(sizeof(*priv), &rtl8187_ops); | ||
15675 | @@ -1490,13 +1488,6 @@ static int __devinit rtl8187_probe(struct usb_interface *intf, | ||
15676 | (*channel++).hw_value = txpwr & 0xFF; | ||
15677 | (*channel++).hw_value = txpwr >> 8; | ||
15678 | } | ||
15679 | - /* Handle the differing rfkill GPIO bit in different models */ | ||
15680 | - priv->rfkill_mask = RFKILL_MASK_8187_89_97; | ||
15681 | - if (product_id == 0x8197 || product_id == 0x8198) { | ||
15682 | - eeprom_93cx6_read(&eeprom, RTL8187_EEPROM_SELECT_GPIO, ®); | ||
15683 | - if (reg & 0xFF00) | ||
15684 | - priv->rfkill_mask = RFKILL_MASK_8198; | ||
15685 | - } | ||
15686 | |||
15687 | /* | ||
15688 | * XXX: Once this driver supports anything that requires | ||
15689 | @@ -1525,9 +1516,9 @@ static int __devinit rtl8187_probe(struct usb_interface *intf, | ||
15690 | mutex_init(&priv->conf_mutex); | ||
15691 | skb_queue_head_init(&priv->b_tx_status.queue); | ||
15692 | |||
15693 | - printk(KERN_INFO "%s: hwaddr %pM, %s V%d + %s, rfkill mask %d\n", | ||
15694 | + printk(KERN_INFO "%s: hwaddr %pM, %s V%d + %s\n", | ||
15695 | wiphy_name(dev->wiphy), dev->wiphy->perm_addr, | ||
15696 | - chip_name, priv->asic_rev, priv->rf->name, priv->rfkill_mask); | ||
15697 | + chip_name, priv->asic_rev, priv->rf->name); | ||
15698 | |||
15699 | #ifdef CONFIG_RTL8187_LEDS | ||
15700 | eeprom_93cx6_read(&eeprom, 0x3F, ®); | ||
15701 | diff --git a/drivers/net/wireless/rtl818x/rtl8187_rfkill.c b/drivers/net/wireless/rtl818x/rtl8187_rfkill.c | ||
15702 | index 03555e1..cad8037 100644 | ||
15703 | --- a/drivers/net/wireless/rtl818x/rtl8187_rfkill.c | ||
15704 | +++ b/drivers/net/wireless/rtl818x/rtl8187_rfkill.c | ||
15705 | @@ -25,10 +25,10 @@ static bool rtl8187_is_radio_enabled(struct rtl8187_priv *priv) | ||
15706 | u8 gpio; | ||
15707 | |||
15708 | gpio = rtl818x_ioread8(priv, &priv->map->GPIO0); | ||
15709 | - rtl818x_iowrite8(priv, &priv->map->GPIO0, gpio & ~priv->rfkill_mask); | ||
15710 | + rtl818x_iowrite8(priv, &priv->map->GPIO0, gpio & ~0x02); | ||
15711 | gpio = rtl818x_ioread8(priv, &priv->map->GPIO1); | ||
15712 | |||
15713 | - return gpio & priv->rfkill_mask; | ||
15714 | + return gpio & 0x02; | ||
15715 | } | ||
15716 | |||
15717 | void rtl8187_rfkill_init(struct ieee80211_hw *hw) | ||
15718 | diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c | ||
15719 | index 5753036..b952ebc 100644 | ||
15720 | --- a/drivers/pci/dmar.c | ||
15721 | +++ b/drivers/pci/dmar.c | ||
15722 | @@ -582,8 +582,6 @@ int __init dmar_table_init(void) | ||
15723 | return 0; | ||
15724 | } | ||
15725 | |||
15726 | -static int bios_warned; | ||
15727 | - | ||
15728 | int __init check_zero_address(void) | ||
15729 | { | ||
15730 | struct acpi_table_dmar *dmar; | ||
15731 | @@ -603,9 +601,6 @@ int __init check_zero_address(void) | ||
15732 | } | ||
15733 | |||
15734 | if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) { | ||
15735 | - void __iomem *addr; | ||
15736 | - u64 cap, ecap; | ||
15737 | - | ||
15738 | drhd = (void *)entry_header; | ||
15739 | if (!drhd->address) { | ||
15740 | /* Promote an attitude of violence to a BIOS engineer today */ | ||
15741 | @@ -614,40 +609,17 @@ int __init check_zero_address(void) | ||
15742 | dmi_get_system_info(DMI_BIOS_VENDOR), | ||
15743 | dmi_get_system_info(DMI_BIOS_VERSION), | ||
15744 | dmi_get_system_info(DMI_PRODUCT_VERSION)); | ||
15745 | - bios_warned = 1; | ||
15746 | - goto failed; | ||
15747 | - } | ||
15748 | - | ||
15749 | - addr = early_ioremap(drhd->address, VTD_PAGE_SIZE); | ||
15750 | - if (!addr ) { | ||
15751 | - printk("IOMMU: can't validate: %llx\n", drhd->address); | ||
15752 | - goto failed; | ||
15753 | - } | ||
15754 | - cap = dmar_readq(addr + DMAR_CAP_REG); | ||
15755 | - ecap = dmar_readq(addr + DMAR_ECAP_REG); | ||
15756 | - early_iounmap(addr, VTD_PAGE_SIZE); | ||
15757 | - if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) { | ||
15758 | - /* Promote an attitude of violence to a BIOS engineer today */ | ||
15759 | - WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n" | ||
15760 | - "BIOS vendor: %s; Ver: %s; Product Version: %s\n", | ||
15761 | - drhd->address, | ||
15762 | - dmi_get_system_info(DMI_BIOS_VENDOR), | ||
15763 | - dmi_get_system_info(DMI_BIOS_VERSION), | ||
15764 | - dmi_get_system_info(DMI_PRODUCT_VERSION)); | ||
15765 | - bios_warned = 1; | ||
15766 | - goto failed; | ||
15767 | +#ifdef CONFIG_DMAR | ||
15768 | + dmar_disabled = 1; | ||
15769 | +#endif | ||
15770 | + return 0; | ||
15771 | } | ||
15772 | + break; | ||
15773 | } | ||
15774 | |||
15775 | entry_header = ((void *)entry_header + entry_header->length); | ||
15776 | } | ||
15777 | return 1; | ||
15778 | - | ||
15779 | -failed: | ||
15780 | -#ifdef CONFIG_DMAR | ||
15781 | - dmar_disabled = 1; | ||
15782 | -#endif | ||
15783 | - return 0; | ||
15784 | } | ||
15785 | |||
15786 | void __init detect_intel_iommu(void) | ||
15787 | @@ -692,18 +664,6 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | ||
15788 | int agaw = 0; | ||
15789 | int msagaw = 0; | ||
15790 | |||
15791 | - if (!drhd->reg_base_addr) { | ||
15792 | - if (!bios_warned) { | ||
15793 | - WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n" | ||
15794 | - "BIOS vendor: %s; Ver: %s; Product Version: %s\n", | ||
15795 | - dmi_get_system_info(DMI_BIOS_VENDOR), | ||
15796 | - dmi_get_system_info(DMI_BIOS_VERSION), | ||
15797 | - dmi_get_system_info(DMI_PRODUCT_VERSION)); | ||
15798 | - bios_warned = 1; | ||
15799 | - } | ||
15800 | - return -EINVAL; | ||
15801 | - } | ||
15802 | - | ||
15803 | iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); | ||
15804 | if (!iommu) | ||
15805 | return -ENOMEM; | ||
15806 | @@ -720,16 +680,13 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | ||
15807 | iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); | ||
15808 | |||
15809 | if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) { | ||
15810 | - if (!bios_warned) { | ||
15811 | - /* Promote an attitude of violence to a BIOS engineer today */ | ||
15812 | - WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n" | ||
15813 | - "BIOS vendor: %s; Ver: %s; Product Version: %s\n", | ||
15814 | - drhd->reg_base_addr, | ||
15815 | - dmi_get_system_info(DMI_BIOS_VENDOR), | ||
15816 | - dmi_get_system_info(DMI_BIOS_VERSION), | ||
15817 | - dmi_get_system_info(DMI_PRODUCT_VERSION)); | ||
15818 | - bios_warned = 1; | ||
15819 | - } | ||
15820 | + /* Promote an attitude of violence to a BIOS engineer today */ | ||
15821 | + WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n" | ||
15822 | + "BIOS vendor: %s; Ver: %s; Product Version: %s\n", | ||
15823 | + drhd->reg_base_addr, | ||
15824 | + dmi_get_system_info(DMI_BIOS_VENDOR), | ||
15825 | + dmi_get_system_info(DMI_BIOS_VERSION), | ||
15826 | + dmi_get_system_info(DMI_PRODUCT_VERSION)); | ||
15827 | goto err_unmap; | ||
15828 | } | ||
15829 | |||
15830 | diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c | ||
15831 | index 2498602..1840a05 100644 | ||
15832 | --- a/drivers/pci/intel-iommu.c | ||
15833 | +++ b/drivers/pci/intel-iommu.c | ||
15834 | @@ -1523,15 +1523,12 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment, | ||
15835 | |||
15836 | /* Skip top levels of page tables for | ||
15837 | * iommu which has less agaw than default. | ||
15838 | - * Unnecessary for PT mode. | ||
15839 | */ | ||
15840 | - if (translation != CONTEXT_TT_PASS_THROUGH) { | ||
15841 | - for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) { | ||
15842 | - pgd = phys_to_virt(dma_pte_addr(pgd)); | ||
15843 | - if (!dma_pte_present(pgd)) { | ||
15844 | - spin_unlock_irqrestore(&iommu->lock, flags); | ||
15845 | - return -ENOMEM; | ||
15846 | - } | ||
15847 | + for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) { | ||
15848 | + pgd = phys_to_virt(dma_pte_addr(pgd)); | ||
15849 | + if (!dma_pte_present(pgd)) { | ||
15850 | + spin_unlock_irqrestore(&iommu->lock, flags); | ||
15851 | + return -ENOMEM; | ||
15852 | } | ||
15853 | } | ||
15854 | } | ||
15855 | @@ -1994,16 +1991,6 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, | ||
15856 | "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", | ||
15857 | pci_name(pdev), start, end); | ||
15858 | |||
15859 | - if (end < start) { | ||
15860 | - WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n" | ||
15861 | - "BIOS vendor: %s; Ver: %s; Product Version: %s\n", | ||
15862 | - dmi_get_system_info(DMI_BIOS_VENDOR), | ||
15863 | - dmi_get_system_info(DMI_BIOS_VERSION), | ||
15864 | - dmi_get_system_info(DMI_PRODUCT_VERSION)); | ||
15865 | - ret = -EIO; | ||
15866 | - goto error; | ||
15867 | - } | ||
15868 | - | ||
15869 | if (end >> agaw_to_width(domain->agaw)) { | ||
15870 | WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n" | ||
15871 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", | ||
15872 | @@ -3241,9 +3228,6 @@ static int device_notifier(struct notifier_block *nb, | ||
15873 | struct pci_dev *pdev = to_pci_dev(dev); | ||
15874 | struct dmar_domain *domain; | ||
15875 | |||
15876 | - if (iommu_no_mapping(dev)) | ||
15877 | - return 0; | ||
15878 | - | ||
15879 | domain = find_domain(pdev); | ||
15880 | if (!domain) | ||
15881 | return 0; | ||
15882 | diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c | ||
15883 | index 6477722..4e4c295 100644 | ||
15884 | --- a/drivers/pci/pci.c | ||
15885 | +++ b/drivers/pci/pci.c | ||
15886 | @@ -2723,11 +2723,6 @@ int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev) | ||
15887 | return 1; | ||
15888 | } | ||
15889 | |||
15890 | -void __weak pci_fixup_cardbus(struct pci_bus *bus) | ||
15891 | -{ | ||
15892 | -} | ||
15893 | -EXPORT_SYMBOL(pci_fixup_cardbus); | ||
15894 | - | ||
15895 | static int __init pci_setup(char *str) | ||
15896 | { | ||
15897 | while (str) { | ||
15898 | diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c | ||
15899 | index 0d91a8a..62d15f6 100644 | ||
15900 | --- a/drivers/pci/pcie/aer/aer_inject.c | ||
15901 | +++ b/drivers/pci/pcie/aer/aer_inject.c | ||
15902 | @@ -392,14 +392,8 @@ static int aer_inject(struct aer_error_inj *einj) | ||
15903 | if (ret) | ||
15904 | goto out_put; | ||
15905 | |||
15906 | - if (find_aer_device(rpdev, &edev)) { | ||
15907 | - if (!get_service_data(edev)) { | ||
15908 | - printk(KERN_WARNING "AER service is not initialized\n"); | ||
15909 | - ret = -EINVAL; | ||
15910 | - goto out_put; | ||
15911 | - } | ||
15912 | + if (find_aer_device(rpdev, &edev)) | ||
15913 | aer_irq(-1, edev); | ||
15914 | - } | ||
15915 | else | ||
15916 | ret = -EINVAL; | ||
15917 | out_put: | ||
15918 | diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c | ||
15919 | index dd58c6a..cb1a027 100644 | ||
15920 | --- a/drivers/pci/setup-bus.c | ||
15921 | +++ b/drivers/pci/setup-bus.c | ||
15922 | @@ -142,6 +142,7 @@ static void pci_setup_bridge(struct pci_bus *bus) | ||
15923 | struct pci_dev *bridge = bus->self; | ||
15924 | struct pci_bus_region region; | ||
15925 | u32 l, bu, lu, io_upper16; | ||
15926 | + int pref_mem64; | ||
15927 | |||
15928 | if (pci_is_enabled(bridge)) | ||
15929 | return; | ||
15930 | @@ -197,6 +198,7 @@ static void pci_setup_bridge(struct pci_bus *bus) | ||
15931 | pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0); | ||
15932 | |||
15933 | /* Set up PREF base/limit. */ | ||
15934 | + pref_mem64 = 0; | ||
15935 | bu = lu = 0; | ||
15936 | pcibios_resource_to_bus(bridge, ®ion, bus->resource[2]); | ||
15937 | if (bus->resource[2]->flags & IORESOURCE_PREFETCH) { | ||
15938 | @@ -204,6 +206,7 @@ static void pci_setup_bridge(struct pci_bus *bus) | ||
15939 | l = (region.start >> 16) & 0xfff0; | ||
15940 | l |= region.end & 0xfff00000; | ||
15941 | if (bus->resource[2]->flags & IORESOURCE_MEM_64) { | ||
15942 | + pref_mem64 = 1; | ||
15943 | bu = upper_32_bits(region.start); | ||
15944 | lu = upper_32_bits(region.end); | ||
15945 | width = 16; | ||
15946 | @@ -218,9 +221,11 @@ static void pci_setup_bridge(struct pci_bus *bus) | ||
15947 | } | ||
15948 | pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l); | ||
15949 | |||
15950 | - /* Set the upper 32 bits of PREF base & limit. */ | ||
15951 | - pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu); | ||
15952 | - pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu); | ||
15953 | + if (pref_mem64) { | ||
15954 | + /* Set the upper 32 bits of PREF base & limit. */ | ||
15955 | + pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu); | ||
15956 | + pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu); | ||
15957 | + } | ||
15958 | |||
15959 | pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl); | ||
15960 | } | ||
15961 | diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c | ||
15962 | index 5c26793..db77e1f 100644 | ||
15963 | --- a/drivers/pcmcia/cardbus.c | ||
15964 | +++ b/drivers/pcmcia/cardbus.c | ||
15965 | @@ -214,7 +214,7 @@ int __ref cb_alloc(struct pcmcia_socket * s) | ||
15966 | unsigned int max, pass; | ||
15967 | |||
15968 | s->functions = pci_scan_slot(bus, PCI_DEVFN(0, 0)); | ||
15969 | - pci_fixup_cardbus(bus); | ||
15970 | +// pcibios_fixup_bus(bus); | ||
15971 | |||
15972 | max = bus->secondary; | ||
15973 | for (pass = 0; pass < 2; pass++) | ||
15974 | diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c | ||
15975 | index 4d922e4..ab64522 100644 | ||
15976 | --- a/drivers/platform/x86/acerhdf.c | ||
15977 | +++ b/drivers/platform/x86/acerhdf.c | ||
15978 | @@ -52,7 +52,7 @@ | ||
15979 | */ | ||
15980 | #undef START_IN_KERNEL_MODE | ||
15981 | |||
15982 | -#define DRV_VER "0.5.20" | ||
15983 | +#define DRV_VER "0.5.18" | ||
15984 | |||
15985 | /* | ||
15986 | * According to the Atom N270 datasheet, | ||
15987 | @@ -112,14 +112,12 @@ module_param_string(force_product, force_product, 16, 0); | ||
15988 | MODULE_PARM_DESC(force_product, "Force BIOS product and omit BIOS check"); | ||
15989 | |||
15990 | /* | ||
15991 | - * cmd_off: to switch the fan completely off | ||
15992 | - * chk_off: to check if the fan is off | ||
15993 | + * cmd_off: to switch the fan completely off / to check if the fan is off | ||
15994 | * cmd_auto: to set the BIOS in control of the fan. The BIOS regulates then | ||
15995 | * the fan speed depending on the temperature | ||
15996 | */ | ||
15997 | struct fancmd { | ||
15998 | u8 cmd_off; | ||
15999 | - u8 chk_off; | ||
16000 | u8 cmd_auto; | ||
16001 | }; | ||
16002 | |||
16003 | @@ -136,41 +134,32 @@ struct bios_settings_t { | ||
16004 | /* Register addresses and values for different BIOS versions */ | ||
16005 | static const struct bios_settings_t bios_tbl[] = { | ||
16006 | /* AOA110 */ | ||
16007 | - {"Acer", "AOA110", "v0.3109", 0x55, 0x58, {0x1f, 0x1f, 0x00} }, | ||
16008 | - {"Acer", "AOA110", "v0.3114", 0x55, 0x58, {0x1f, 0x1f, 0x00} }, | ||
16009 | - {"Acer", "AOA110", "v0.3301", 0x55, 0x58, {0xaf, 0xaf, 0x00} }, | ||
16010 | - {"Acer", "AOA110", "v0.3304", 0x55, 0x58, {0xaf, 0xaf, 0x00} }, | ||
16011 | - {"Acer", "AOA110", "v0.3305", 0x55, 0x58, {0xaf, 0xaf, 0x00} }, | ||
16012 | - {"Acer", "AOA110", "v0.3307", 0x55, 0x58, {0xaf, 0xaf, 0x00} }, | ||
16013 | - {"Acer", "AOA110", "v0.3308", 0x55, 0x58, {0x21, 0x21, 0x00} }, | ||
16014 | - {"Acer", "AOA110", "v0.3309", 0x55, 0x58, {0x21, 0x21, 0x00} }, | ||
16015 | - {"Acer", "AOA110", "v0.3310", 0x55, 0x58, {0x21, 0x21, 0x00} }, | ||
16016 | + {"Acer", "AOA110", "v0.3109", 0x55, 0x58, {0x1f, 0x00} }, | ||
16017 | + {"Acer", "AOA110", "v0.3114", 0x55, 0x58, {0x1f, 0x00} }, | ||
16018 | + {"Acer", "AOA110", "v0.3301", 0x55, 0x58, {0xaf, 0x00} }, | ||
16019 | + {"Acer", "AOA110", "v0.3304", 0x55, 0x58, {0xaf, 0x00} }, | ||
16020 | + {"Acer", "AOA110", "v0.3305", 0x55, 0x58, {0xaf, 0x00} }, | ||
16021 | + {"Acer", "AOA110", "v0.3307", 0x55, 0x58, {0xaf, 0x00} }, | ||
16022 | + {"Acer", "AOA110", "v0.3308", 0x55, 0x58, {0x21, 0x00} }, | ||
16023 | + {"Acer", "AOA110", "v0.3309", 0x55, 0x58, {0x21, 0x00} }, | ||
16024 | + {"Acer", "AOA110", "v0.3310", 0x55, 0x58, {0x21, 0x00} }, | ||
16025 | /* AOA150 */ | ||
16026 | - {"Acer", "AOA150", "v0.3114", 0x55, 0x58, {0x20, 0x20, 0x00} }, | ||
16027 | - {"Acer", "AOA150", "v0.3301", 0x55, 0x58, {0x20, 0x20, 0x00} }, | ||
16028 | - {"Acer", "AOA150", "v0.3304", 0x55, 0x58, {0x20, 0x20, 0x00} }, | ||
16029 | - {"Acer", "AOA150", "v0.3305", 0x55, 0x58, {0x20, 0x20, 0x00} }, | ||
16030 | - {"Acer", "AOA150", "v0.3307", 0x55, 0x58, {0x20, 0x20, 0x00} }, | ||
16031 | - {"Acer", "AOA150", "v0.3308", 0x55, 0x58, {0x20, 0x20, 0x00} }, | ||
16032 | - {"Acer", "AOA150", "v0.3309", 0x55, 0x58, {0x20, 0x20, 0x00} }, | ||
16033 | - {"Acer", "AOA150", "v0.3310", 0x55, 0x58, {0x20, 0x20, 0x00} }, | ||
16034 | - /* Acer 1410 */ | ||
16035 | - {"Acer", "Aspire 1410", "v0.3120", 0x55, 0x58, {0x9e, 0x9e, 0x00} }, | ||
16036 | + {"Acer", "AOA150", "v0.3114", 0x55, 0x58, {0x20, 0x00} }, | ||
16037 | + {"Acer", "AOA150", "v0.3301", 0x55, 0x58, {0x20, 0x00} }, | ||
16038 | + {"Acer", "AOA150", "v0.3304", 0x55, 0x58, {0x20, 0x00} }, | ||
16039 | + {"Acer", "AOA150", "v0.3305", 0x55, 0x58, {0x20, 0x00} }, | ||
16040 | + {"Acer", "AOA150", "v0.3307", 0x55, 0x58, {0x20, 0x00} }, | ||
16041 | + {"Acer", "AOA150", "v0.3308", 0x55, 0x58, {0x20, 0x00} }, | ||
16042 | + {"Acer", "AOA150", "v0.3309", 0x55, 0x58, {0x20, 0x00} }, | ||
16043 | + {"Acer", "AOA150", "v0.3310", 0x55, 0x58, {0x20, 0x00} }, | ||
16044 | /* special BIOS / other */ | ||
16045 | - {"Gateway", "AOA110", "v0.3103", 0x55, 0x58, {0x21, 0x21, 0x00} }, | ||
16046 | - {"Gateway", "AOA150", "v0.3103", 0x55, 0x58, {0x20, 0x20, 0x00} }, | ||
16047 | - {"Gateway ", "LT31 ", "v1.3103 ", 0x55, 0x58, | ||
16048 | - {0x10, 0x0f, 0x00} }, | ||
16049 | - {"Gateway ", "LT31 ", "v1.3201 ", 0x55, 0x58, | ||
16050 | - {0x10, 0x0f, 0x00} }, | ||
16051 | - {"Gateway ", "LT31 ", "v1.3302 ", 0x55, 0x58, | ||
16052 | - {0x10, 0x0f, 0x00} }, | ||
16053 | - {"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x21, 0x00} }, | ||
16054 | - {"Packard Bell", "DOA150", "v0.3105", 0x55, 0x58, {0x20, 0x20, 0x00} }, | ||
16055 | - {"Packard Bell", "AOA110", "v0.3105", 0x55, 0x58, {0x21, 0x21, 0x00} }, | ||
16056 | - {"Packard Bell", "AOA150", "v0.3105", 0x55, 0x58, {0x20, 0x20, 0x00} }, | ||
16057 | + {"Gateway", "AOA110", "v0.3103", 0x55, 0x58, {0x21, 0x00} }, | ||
16058 | + {"Gateway", "AOA150", "v0.3103", 0x55, 0x58, {0x20, 0x00} }, | ||
16059 | + {"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x00} }, | ||
16060 | + {"Packard Bell", "AOA110", "v0.3105", 0x55, 0x58, {0x21, 0x00} }, | ||
16061 | + {"Packard Bell", "AOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00} }, | ||
16062 | /* pewpew-terminator */ | ||
16063 | - {"", "", "", 0, 0, {0, 0, 0} } | ||
16064 | + {"", "", "", 0, 0, {0, 0} } | ||
16065 | }; | ||
16066 | |||
16067 | static const struct bios_settings_t *bios_cfg __read_mostly; | ||
16068 | @@ -194,7 +183,7 @@ static int acerhdf_get_fanstate(int *state) | ||
16069 | if (ec_read(bios_cfg->fanreg, &fan)) | ||
16070 | return -EINVAL; | ||
16071 | |||
16072 | - if (fan != bios_cfg->cmd.chk_off) | ||
16073 | + if (fan != bios_cfg->cmd.cmd_off) | ||
16074 | *state = ACERHDF_FAN_AUTO; | ||
16075 | else | ||
16076 | *state = ACERHDF_FAN_OFF; | ||
16077 | @@ -640,10 +629,9 @@ static void __exit acerhdf_exit(void) | ||
16078 | MODULE_LICENSE("GPL"); | ||
16079 | MODULE_AUTHOR("Peter Feuerer"); | ||
16080 | MODULE_DESCRIPTION("Aspire One temperature and fan driver"); | ||
16081 | -MODULE_ALIAS("dmi:*:*Acer*:pnAOA*:"); | ||
16082 | -MODULE_ALIAS("dmi:*:*Gateway*:pnAOA*:"); | ||
16083 | -MODULE_ALIAS("dmi:*:*Packard Bell*:pnAOA*:"); | ||
16084 | -MODULE_ALIAS("dmi:*:*Packard Bell*:pnDOA*:"); | ||
16085 | +MODULE_ALIAS("dmi:*:*Acer*:*:"); | ||
16086 | +MODULE_ALIAS("dmi:*:*Gateway*:*:"); | ||
16087 | +MODULE_ALIAS("dmi:*:*Packard Bell*:*:"); | ||
16088 | |||
16089 | module_init(acerhdf_init); | ||
16090 | module_exit(acerhdf_exit); | ||
16091 | diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c | ||
16092 | index 767cb61..b39d2bb 100644 | ||
16093 | --- a/drivers/platform/x86/asus-laptop.c | ||
16094 | +++ b/drivers/platform/x86/asus-laptop.c | ||
16095 | @@ -221,7 +221,6 @@ static struct asus_hotk *hotk; | ||
16096 | */ | ||
16097 | static const struct acpi_device_id asus_device_ids[] = { | ||
16098 | {"ATK0100", 0}, | ||
16099 | - {"ATK0101", 0}, | ||
16100 | {"", 0}, | ||
16101 | }; | ||
16102 | MODULE_DEVICE_TABLE(acpi, asus_device_ids); | ||
16103 | @@ -294,11 +293,6 @@ struct key_entry { | ||
16104 | enum { KE_KEY, KE_END }; | ||
16105 | |||
16106 | static struct key_entry asus_keymap[] = { | ||
16107 | - {KE_KEY, 0x02, KEY_SCREENLOCK}, | ||
16108 | - {KE_KEY, 0x05, KEY_WLAN}, | ||
16109 | - {KE_KEY, 0x08, BTN_TOUCH}, | ||
16110 | - {KE_KEY, 0x17, KEY_ZOOM}, | ||
16111 | - {KE_KEY, 0x1f, KEY_BATTERY}, | ||
16112 | {KE_KEY, 0x30, KEY_VOLUMEUP}, | ||
16113 | {KE_KEY, 0x31, KEY_VOLUMEDOWN}, | ||
16114 | {KE_KEY, 0x32, KEY_MUTE}, | ||
16115 | @@ -318,8 +312,6 @@ static struct key_entry asus_keymap[] = { | ||
16116 | {KE_KEY, 0x5F, KEY_WLAN}, | ||
16117 | {KE_KEY, 0x60, KEY_SWITCHVIDEOMODE}, | ||
16118 | {KE_KEY, 0x61, KEY_SWITCHVIDEOMODE}, | ||
16119 | - {KE_KEY, 0x62, KEY_SWITCHVIDEOMODE}, | ||
16120 | - {KE_KEY, 0x63, KEY_SWITCHVIDEOMODE}, | ||
16121 | {KE_KEY, 0x6B, BTN_TOUCH}, /* Lock Mouse */ | ||
16122 | {KE_KEY, 0x82, KEY_CAMERA}, | ||
16123 | {KE_KEY, 0x8A, KEY_PROG1}, | ||
16124 | @@ -1291,8 +1283,8 @@ static int asus_hotk_add(struct acpi_device *device) | ||
16125 | hotk->ledd_status = 0xFFF; | ||
16126 | |||
16127 | /* Set initial values of light sensor and level */ | ||
16128 | - hotk->light_switch = 0; /* Default to light sensor disabled */ | ||
16129 | - hotk->light_level = 5; /* level 5 for sensor sensitivity */ | ||
16130 | + hotk->light_switch = 1; /* Default to light sensor disabled */ | ||
16131 | + hotk->light_level = 0; /* level 5 for sensor sensitivity */ | ||
16132 | |||
16133 | if (ls_switch_handle) | ||
16134 | set_light_sens_switch(hotk->light_switch); | ||
16135 | diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c | ||
16136 | index 6dec7cc..0f900cc 100644 | ||
16137 | --- a/drivers/platform/x86/dell-wmi.c | ||
16138 | +++ b/drivers/platform/x86/dell-wmi.c | ||
16139 | @@ -158,13 +158,8 @@ static void dell_wmi_notify(u32 value, void *context) | ||
16140 | struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
16141 | static struct key_entry *key; | ||
16142 | union acpi_object *obj; | ||
16143 | - acpi_status status; | ||
16144 | |||
16145 | - status = wmi_get_event_data(value, &response); | ||
16146 | - if (status != AE_OK) { | ||
16147 | - printk(KERN_INFO "dell-wmi: bad event status 0x%x\n", status); | ||
16148 | - return; | ||
16149 | - } | ||
16150 | + wmi_get_event_data(value, &response); | ||
16151 | |||
16152 | obj = (union acpi_object *)response.pointer; | ||
16153 | |||
16154 | @@ -185,7 +180,6 @@ static void dell_wmi_notify(u32 value, void *context) | ||
16155 | printk(KERN_INFO "dell-wmi: Unknown key %x pressed\n", | ||
16156 | buffer[1] & 0xFFFF); | ||
16157 | } | ||
16158 | - kfree(obj); | ||
16159 | } | ||
16160 | |||
16161 | static int __init dell_wmi_input_setup(void) | ||
16162 | diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c | ||
16163 | index deb53b5..c284217 100644 | ||
16164 | --- a/drivers/platform/x86/hp-wmi.c | ||
16165 | +++ b/drivers/platform/x86/hp-wmi.c | ||
16166 | @@ -334,13 +334,8 @@ static void hp_wmi_notify(u32 value, void *context) | ||
16167 | struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
16168 | static struct key_entry *key; | ||
16169 | union acpi_object *obj; | ||
16170 | - acpi_status status; | ||
16171 | |||
16172 | - status = wmi_get_event_data(value, &response); | ||
16173 | - if (status != AE_OK) { | ||
16174 | - printk(KERN_INFO "hp-wmi: bad event status 0x%x\n", status); | ||
16175 | - return; | ||
16176 | - } | ||
16177 | + wmi_get_event_data(value, &response); | ||
16178 | |||
16179 | obj = (union acpi_object *)response.pointer; | ||
16180 | |||
16181 | @@ -382,8 +377,6 @@ static void hp_wmi_notify(u32 value, void *context) | ||
16182 | eventcode); | ||
16183 | } else | ||
16184 | printk(KERN_INFO "HP WMI: Unknown response received\n"); | ||
16185 | - | ||
16186 | - kfree(obj); | ||
16187 | } | ||
16188 | |||
16189 | static int __init hp_wmi_input_setup(void) | ||
16190 | diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c | ||
16191 | index 1ee734c..a848c7e 100644 | ||
16192 | --- a/drivers/platform/x86/thinkpad_acpi.c | ||
16193 | +++ b/drivers/platform/x86/thinkpad_acpi.c | ||
16194 | @@ -3866,6 +3866,15 @@ enum { | ||
16195 | |||
16196 | #define TPACPI_RFK_BLUETOOTH_SW_NAME "tpacpi_bluetooth_sw" | ||
16197 | |||
16198 | +static void bluetooth_suspend(pm_message_t state) | ||
16199 | +{ | ||
16200 | + /* Try to make sure radio will resume powered off */ | ||
16201 | + if (!acpi_evalf(NULL, NULL, "\\BLTH", "vd", | ||
16202 | + TP_ACPI_BLTH_PWR_OFF_ON_RESUME)) | ||
16203 | + vdbg_printk(TPACPI_DBG_RFKILL, | ||
16204 | + "bluetooth power down on resume request failed\n"); | ||
16205 | +} | ||
16206 | + | ||
16207 | static int bluetooth_get_status(void) | ||
16208 | { | ||
16209 | int status; | ||
16210 | @@ -3899,9 +3908,10 @@ static int bluetooth_set_status(enum tpacpi_rfkill_state state) | ||
16211 | #endif | ||
16212 | |||
16213 | /* We make sure to keep TP_ACPI_BLUETOOTH_RESUMECTRL off */ | ||
16214 | - status = TP_ACPI_BLUETOOTH_RESUMECTRL; | ||
16215 | if (state == TPACPI_RFK_RADIO_ON) | ||
16216 | - status |= TP_ACPI_BLUETOOTH_RADIOSSW; | ||
16217 | + status = TP_ACPI_BLUETOOTH_RADIOSSW; | ||
16218 | + else | ||
16219 | + status = 0; | ||
16220 | |||
16221 | if (!acpi_evalf(hkey_handle, NULL, "SBDC", "vd", status)) | ||
16222 | return -EIO; | ||
16223 | @@ -4040,6 +4050,7 @@ static struct ibm_struct bluetooth_driver_data = { | ||
16224 | .read = bluetooth_read, | ||
16225 | .write = bluetooth_write, | ||
16226 | .exit = bluetooth_exit, | ||
16227 | + .suspend = bluetooth_suspend, | ||
16228 | .shutdown = bluetooth_shutdown, | ||
16229 | }; | ||
16230 | |||
16231 | @@ -4057,6 +4068,15 @@ enum { | ||
16232 | |||
16233 | #define TPACPI_RFK_WWAN_SW_NAME "tpacpi_wwan_sw" | ||
16234 | |||
16235 | +static void wan_suspend(pm_message_t state) | ||
16236 | +{ | ||
16237 | + /* Try to make sure radio will resume powered off */ | ||
16238 | + if (!acpi_evalf(NULL, NULL, "\\WGSV", "qvd", | ||
16239 | + TP_ACPI_WGSV_PWR_OFF_ON_RESUME)) | ||
16240 | + vdbg_printk(TPACPI_DBG_RFKILL, | ||
16241 | + "WWAN power down on resume request failed\n"); | ||
16242 | +} | ||
16243 | + | ||
16244 | static int wan_get_status(void) | ||
16245 | { | ||
16246 | int status; | ||
16247 | @@ -4089,10 +4109,11 @@ static int wan_set_status(enum tpacpi_rfkill_state state) | ||
16248 | } | ||
16249 | #endif | ||
16250 | |||
16251 | - /* We make sure to set TP_ACPI_WANCARD_RESUMECTRL */ | ||
16252 | - status = TP_ACPI_WANCARD_RESUMECTRL; | ||
16253 | + /* We make sure to keep TP_ACPI_WANCARD_RESUMECTRL off */ | ||
16254 | if (state == TPACPI_RFK_RADIO_ON) | ||
16255 | - status |= TP_ACPI_WANCARD_RADIOSSW; | ||
16256 | + status = TP_ACPI_WANCARD_RADIOSSW; | ||
16257 | + else | ||
16258 | + status = 0; | ||
16259 | |||
16260 | if (!acpi_evalf(hkey_handle, NULL, "SWAN", "vd", status)) | ||
16261 | return -EIO; | ||
16262 | @@ -4230,6 +4251,7 @@ static struct ibm_struct wan_driver_data = { | ||
16263 | .read = wan_read, | ||
16264 | .write = wan_write, | ||
16265 | .exit = wan_exit, | ||
16266 | + .suspend = wan_suspend, | ||
16267 | .shutdown = wan_shutdown, | ||
16268 | }; | ||
16269 | |||
16270 | @@ -6101,8 +6123,8 @@ static const struct tpacpi_quirk brightness_quirk_table[] __initconst = { | ||
16271 | |||
16272 | /* Models with Intel Extreme Graphics 2 */ | ||
16273 | TPACPI_Q_IBM('1', 'U', TPACPI_BRGHT_Q_NOEC), | ||
16274 | - TPACPI_Q_IBM('1', 'V', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC), | ||
16275 | - TPACPI_Q_IBM('1', 'W', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC), | ||
16276 | + TPACPI_Q_IBM('1', 'V', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_NOEC), | ||
16277 | + TPACPI_Q_IBM('1', 'W', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_NOEC), | ||
16278 | |||
16279 | /* Models with Intel GMA900 */ | ||
16280 | TPACPI_Q_IBM('7', '0', TPACPI_BRGHT_Q_NOEC), /* T43, R52 */ | ||
16281 | diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c | ||
16282 | index 87f4c97..177f8d7 100644 | ||
16283 | --- a/drivers/platform/x86/wmi.c | ||
16284 | +++ b/drivers/platform/x86/wmi.c | ||
16285 | @@ -510,8 +510,8 @@ EXPORT_SYMBOL_GPL(wmi_remove_notify_handler); | ||
16286 | /** | ||
16287 | * wmi_get_event_data - Get WMI data associated with an event | ||
16288 | * | ||
16289 | - * @event: Event to find | ||
16290 | - * @out: Buffer to hold event data. out->pointer should be freed with kfree() | ||
16291 | + * @event - Event to find | ||
16292 | + * &out - Buffer to hold event data | ||
16293 | * | ||
16294 | * Returns extra data associated with an event in WMI. | ||
16295 | */ | ||
16296 | diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c | ||
16297 | index 1836053..efe568d 100644 | ||
16298 | --- a/drivers/regulator/core.c | ||
16299 | +++ b/drivers/regulator/core.c | ||
16300 | @@ -640,7 +640,7 @@ static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state) | ||
16301 | static void print_constraints(struct regulator_dev *rdev) | ||
16302 | { | ||
16303 | struct regulation_constraints *constraints = rdev->constraints; | ||
16304 | - char buf[80] = ""; | ||
16305 | + char buf[80]; | ||
16306 | int count; | ||
16307 | |||
16308 | if (rdev->desc->type == REGULATOR_VOLTAGE) { | ||
16309 | diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c | ||
16310 | index 43ed81e..768bd0e 100644 | ||
16311 | --- a/drivers/regulator/wm8350-regulator.c | ||
16312 | +++ b/drivers/regulator/wm8350-regulator.c | ||
16313 | @@ -1504,8 +1504,7 @@ int wm8350_register_led(struct wm8350 *wm8350, int lednum, int dcdc, int isink, | ||
16314 | led->isink_init.consumer_supplies = &led->isink_consumer; | ||
16315 | led->isink_init.constraints.min_uA = 0; | ||
16316 | led->isink_init.constraints.max_uA = pdata->max_uA; | ||
16317 | - led->isink_init.constraints.valid_ops_mask | ||
16318 | - = REGULATOR_CHANGE_CURRENT | REGULATOR_CHANGE_STATUS; | ||
16319 | + led->isink_init.constraints.valid_ops_mask = REGULATOR_CHANGE_CURRENT; | ||
16320 | led->isink_init.constraints.valid_modes_mask = REGULATOR_MODE_NORMAL; | ||
16321 | ret = wm8350_register_regulator(wm8350, isink, &led->isink_init); | ||
16322 | if (ret != 0) { | ||
16323 | @@ -1518,7 +1517,6 @@ int wm8350_register_led(struct wm8350 *wm8350, int lednum, int dcdc, int isink, | ||
16324 | led->dcdc_init.num_consumer_supplies = 1; | ||
16325 | led->dcdc_init.consumer_supplies = &led->dcdc_consumer; | ||
16326 | led->dcdc_init.constraints.valid_modes_mask = REGULATOR_MODE_NORMAL; | ||
16327 | - led->dcdc_init.constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS; | ||
16328 | ret = wm8350_register_regulator(wm8350, dcdc, &led->dcdc_init); | ||
16329 | if (ret != 0) { | ||
16330 | platform_device_put(pdev); | ||
16331 | diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c | ||
16332 | index 473e5f2..f7a4701 100644 | ||
16333 | --- a/drivers/rtc/rtc-cmos.c | ||
16334 | +++ b/drivers/rtc/rtc-cmos.c | ||
16335 | @@ -1099,9 +1099,9 @@ static int cmos_pnp_resume(struct pnp_dev *pnp) | ||
16336 | #define cmos_pnp_resume NULL | ||
16337 | #endif | ||
16338 | |||
16339 | -static void cmos_pnp_shutdown(struct pnp_dev *pnp) | ||
16340 | +static void cmos_pnp_shutdown(struct device *pdev) | ||
16341 | { | ||
16342 | - if (system_state == SYSTEM_POWER_OFF && !cmos_poweroff(&pnp->dev)) | ||
16343 | + if (system_state == SYSTEM_POWER_OFF && !cmos_poweroff(pdev)) | ||
16344 | return; | ||
16345 | |||
16346 | cmos_do_shutdown(); | ||
16347 | @@ -1120,12 +1120,15 @@ static struct pnp_driver cmos_pnp_driver = { | ||
16348 | .id_table = rtc_ids, | ||
16349 | .probe = cmos_pnp_probe, | ||
16350 | .remove = __exit_p(cmos_pnp_remove), | ||
16351 | - .shutdown = cmos_pnp_shutdown, | ||
16352 | |||
16353 | /* flag ensures resume() gets called, and stops syslog spam */ | ||
16354 | .flags = PNP_DRIVER_RES_DO_NOT_CHANGE, | ||
16355 | .suspend = cmos_pnp_suspend, | ||
16356 | .resume = cmos_pnp_resume, | ||
16357 | + .driver = { | ||
16358 | + .name = (char *)driver_name, | ||
16359 | + .shutdown = cmos_pnp_shutdown, | ||
16360 | + } | ||
16361 | }; | ||
16362 | |||
16363 | #endif /* CONFIG_PNP */ | ||
16364 | diff --git a/drivers/rtc/rtc-fm3130.c b/drivers/rtc/rtc-fm3130.c | ||
16365 | index 812c667..3a7be11 100644 | ||
16366 | --- a/drivers/rtc/rtc-fm3130.c | ||
16367 | +++ b/drivers/rtc/rtc-fm3130.c | ||
16368 | @@ -376,22 +376,20 @@ static int __devinit fm3130_probe(struct i2c_client *client, | ||
16369 | } | ||
16370 | |||
16371 | /* Disabling calibration mode */ | ||
16372 | - if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_CAL) { | ||
16373 | + if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_CAL) | ||
16374 | i2c_smbus_write_byte_data(client, FM3130_RTC_CONTROL, | ||
16375 | fm3130->regs[FM3130_RTC_CONTROL] & | ||
16376 | ~(FM3130_RTC_CONTROL_BIT_CAL)); | ||
16377 | dev_warn(&client->dev, "Disabling calibration mode!\n"); | ||
16378 | - } | ||
16379 | |||
16380 | /* Disabling read and write modes */ | ||
16381 | if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_WRITE || | ||
16382 | - fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_READ) { | ||
16383 | + fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_READ) | ||
16384 | i2c_smbus_write_byte_data(client, FM3130_RTC_CONTROL, | ||
16385 | fm3130->regs[FM3130_RTC_CONTROL] & | ||
16386 | ~(FM3130_RTC_CONTROL_BIT_READ | | ||
16387 | FM3130_RTC_CONTROL_BIT_WRITE)); | ||
16388 | dev_warn(&client->dev, "Disabling READ or WRITE mode!\n"); | ||
16389 | - } | ||
16390 | |||
16391 | /* oscillator off? turn it on, so clock can tick. */ | ||
16392 | if (fm3130->regs[FM3130_CAL_CONTROL] & FM3130_CAL_CONTROL_BIT_nOSCEN) | ||
16393 | diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c | ||
16394 | index d0ef15a..aaccc8e 100644 | ||
16395 | --- a/drivers/s390/block/dasd.c | ||
16396 | +++ b/drivers/s390/block/dasd.c | ||
16397 | @@ -994,9 +994,10 @@ static void dasd_handle_killed_request(struct ccw_device *cdev, | ||
16398 | return; | ||
16399 | cqr = (struct dasd_ccw_req *) intparm; | ||
16400 | if (cqr->status != DASD_CQR_IN_IO) { | ||
16401 | - DBF_EVENT_DEVID(DBF_DEBUG, cdev, | ||
16402 | - "invalid status in handle_killed_request: " | ||
16403 | - "%02x", cqr->status); | ||
16404 | + DBF_EVENT(DBF_DEBUG, | ||
16405 | + "invalid status in handle_killed_request: " | ||
16406 | + "bus_id %s, status %02x", | ||
16407 | + dev_name(&cdev->dev), cqr->status); | ||
16408 | return; | ||
16409 | } | ||
16410 | |||
16411 | @@ -1004,8 +1005,8 @@ static void dasd_handle_killed_request(struct ccw_device *cdev, | ||
16412 | if (device == NULL || | ||
16413 | device != dasd_device_from_cdev_locked(cdev) || | ||
16414 | strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { | ||
16415 | - DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", | ||
16416 | - "invalid device in request"); | ||
16417 | + DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: " | ||
16418 | + "bus_id %s", dev_name(&cdev->dev)); | ||
16419 | return; | ||
16420 | } | ||
16421 | |||
16422 | @@ -1044,13 +1045,12 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, | ||
16423 | case -EIO: | ||
16424 | break; | ||
16425 | case -ETIMEDOUT: | ||
16426 | - DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " | ||
16427 | - "request timed out\n", __func__); | ||
16428 | + DBF_EVENT(DBF_WARNING, "%s(%s): request timed out\n", | ||
16429 | + __func__, dev_name(&cdev->dev)); | ||
16430 | break; | ||
16431 | default: | ||
16432 | - DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " | ||
16433 | - "unknown error %ld\n", __func__, | ||
16434 | - PTR_ERR(irb)); | ||
16435 | + DBF_EVENT(DBF_WARNING, "%s(%s): unknown error %ld\n", | ||
16436 | + __func__, dev_name(&cdev->dev), PTR_ERR(irb)); | ||
16437 | } | ||
16438 | dasd_handle_killed_request(cdev, intparm); | ||
16439 | return; | ||
16440 | @@ -1078,8 +1078,8 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, | ||
16441 | device = (struct dasd_device *) cqr->startdev; | ||
16442 | if (!device || | ||
16443 | strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { | ||
16444 | - DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", | ||
16445 | - "invalid device in request"); | ||
16446 | + DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: " | ||
16447 | + "bus_id %s", dev_name(&cdev->dev)); | ||
16448 | return; | ||
16449 | } | ||
16450 | |||
16451 | @@ -2217,9 +2217,9 @@ int dasd_generic_probe(struct ccw_device *cdev, | ||
16452 | } | ||
16453 | ret = dasd_add_sysfs_files(cdev); | ||
16454 | if (ret) { | ||
16455 | - DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", | ||
16456 | - "dasd_generic_probe: could not add " | ||
16457 | - "sysfs entries"); | ||
16458 | + DBF_EVENT(DBF_WARNING, | ||
16459 | + "dasd_generic_probe: could not add sysfs entries " | ||
16460 | + "for %s\n", dev_name(&cdev->dev)); | ||
16461 | return ret; | ||
16462 | } | ||
16463 | cdev->handler = &dasd_int_handler; | ||
16464 | diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c | ||
16465 | index 8174ec9..4e49b4a 100644 | ||
16466 | --- a/drivers/s390/block/dasd_diag.c | ||
16467 | +++ b/drivers/s390/block/dasd_diag.c | ||
16468 | @@ -145,15 +145,6 @@ dasd_diag_erp(struct dasd_device *device) | ||
16469 | |||
16470 | mdsk_term_io(device); | ||
16471 | rc = mdsk_init_io(device, device->block->bp_block, 0, NULL); | ||
16472 | - if (rc == 4) { | ||
16473 | - if (!(device->features & DASD_FEATURE_READONLY)) { | ||
16474 | - dev_warn(&device->cdev->dev, | ||
16475 | - "The access mode of a DIAG device changed" | ||
16476 | - " to read-only"); | ||
16477 | - device->features |= DASD_FEATURE_READONLY; | ||
16478 | - } | ||
16479 | - rc = 0; | ||
16480 | - } | ||
16481 | if (rc) | ||
16482 | dev_warn(&device->cdev->dev, "DIAG ERP failed with " | ||
16483 | "rc=%d\n", rc); | ||
16484 | @@ -442,20 +433,16 @@ dasd_diag_check_device(struct dasd_device *device) | ||
16485 | for (sb = 512; sb < bsize; sb = sb << 1) | ||
16486 | block->s2b_shift++; | ||
16487 | rc = mdsk_init_io(device, block->bp_block, 0, NULL); | ||
16488 | - if (rc && (rc != 4)) { | ||
16489 | + if (rc) { | ||
16490 | dev_warn(&device->cdev->dev, "DIAG initialization " | ||
16491 | "failed with rc=%d\n", rc); | ||
16492 | rc = -EIO; | ||
16493 | } else { | ||
16494 | - if (rc == 4) | ||
16495 | - device->features |= DASD_FEATURE_READONLY; | ||
16496 | dev_info(&device->cdev->dev, | ||
16497 | - "New DASD with %ld byte/block, total size %ld KB%s\n", | ||
16498 | + "New DASD with %ld byte/block, total size %ld KB\n", | ||
16499 | (unsigned long) block->bp_block, | ||
16500 | (unsigned long) (block->blocks << | ||
16501 | - block->s2b_shift) >> 1, | ||
16502 | - (rc == 4) ? ", read-only device" : ""); | ||
16503 | - rc = 0; | ||
16504 | + block->s2b_shift) >> 1); | ||
16505 | } | ||
16506 | out_label: | ||
16507 | free_page((long) label); | ||
16508 | diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c | ||
16509 | index 678bb94..417b97c 100644 | ||
16510 | --- a/drivers/s390/block/dasd_eckd.c | ||
16511 | +++ b/drivers/s390/block/dasd_eckd.c | ||
16512 | @@ -88,9 +88,9 @@ dasd_eckd_probe (struct ccw_device *cdev) | ||
16513 | /* set ECKD specific ccw-device options */ | ||
16514 | ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE); | ||
16515 | if (ret) { | ||
16516 | - DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", | ||
16517 | - "dasd_eckd_probe: could not set " | ||
16518 | - "ccw-device options"); | ||
16519 | + DBF_EVENT(DBF_WARNING, | ||
16520 | + "dasd_eckd_probe: could not set ccw-device options " | ||
16521 | + "for %s\n", dev_name(&cdev->dev)); | ||
16522 | return ret; | ||
16523 | } | ||
16524 | ret = dasd_generic_probe(cdev, &dasd_eckd_discipline); | ||
16525 | @@ -885,15 +885,16 @@ static int dasd_eckd_read_conf(struct dasd_device *device) | ||
16526 | rc = dasd_eckd_read_conf_lpm(device, &conf_data, | ||
16527 | &conf_len, lpm); | ||
16528 | if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */ | ||
16529 | - DBF_EVENT_DEVID(DBF_WARNING, device->cdev, | ||
16530 | + DBF_EVENT(DBF_WARNING, | ||
16531 | "Read configuration data returned " | ||
16532 | - "error %d", rc); | ||
16533 | + "error %d for device: %s", rc, | ||
16534 | + dev_name(&device->cdev->dev)); | ||
16535 | return rc; | ||
16536 | } | ||
16537 | if (conf_data == NULL) { | ||
16538 | - DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", | ||
16539 | - "No configuration data " | ||
16540 | - "retrieved"); | ||
16541 | + DBF_EVENT(DBF_WARNING, "No configuration " | ||
16542 | + "data retrieved for device: %s", | ||
16543 | + dev_name(&device->cdev->dev)); | ||
16544 | continue; /* no error */ | ||
16545 | } | ||
16546 | /* save first valid configuration data */ | ||
16547 | @@ -940,8 +941,9 @@ static int dasd_eckd_read_features(struct dasd_device *device) | ||
16548 | sizeof(struct dasd_rssd_features)), | ||
16549 | device); | ||
16550 | if (IS_ERR(cqr)) { | ||
16551 | - DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not " | ||
16552 | - "allocate initialization request"); | ||
16553 | + DBF_EVENT(DBF_WARNING, "Could not allocate initialization " | ||
16554 | + "request for device: %s", | ||
16555 | + dev_name(&device->cdev->dev)); | ||
16556 | return PTR_ERR(cqr); | ||
16557 | } | ||
16558 | cqr->startdev = device; | ||
16559 | @@ -1069,8 +1071,10 @@ static int dasd_eckd_validate_server(struct dasd_device *device) | ||
16560 | /* may be requested feature is not available on server, | ||
16561 | * therefore just report error and go ahead */ | ||
16562 | private = (struct dasd_eckd_private *) device->private; | ||
16563 | - DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x " | ||
16564 | - "returned rc=%d", private->uid.ssid, rc); | ||
16565 | + DBF_EVENT(DBF_WARNING, "PSF-SSC on storage subsystem %s.%s.%04x " | ||
16566 | + "returned rc=%d for device: %s", | ||
16567 | + private->uid.vendor, private->uid.serial, | ||
16568 | + private->uid.ssid, rc, dev_name(&device->cdev->dev)); | ||
16569 | /* RE-Read Configuration Data */ | ||
16570 | return dasd_eckd_read_conf(device); | ||
16571 | } | ||
16572 | @@ -1119,9 +1123,9 @@ dasd_eckd_check_characteristics(struct dasd_device *device) | ||
16573 | if (private->uid.type == UA_BASE_DEVICE) { | ||
16574 | block = dasd_alloc_block(); | ||
16575 | if (IS_ERR(block)) { | ||
16576 | - DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", | ||
16577 | - "could not allocate dasd " | ||
16578 | - "block structure"); | ||
16579 | + DBF_EVENT(DBF_WARNING, "could not allocate dasd " | ||
16580 | + "block structure for device: %s", | ||
16581 | + dev_name(&device->cdev->dev)); | ||
16582 | rc = PTR_ERR(block); | ||
16583 | goto out_err1; | ||
16584 | } | ||
16585 | @@ -1149,8 +1153,9 @@ dasd_eckd_check_characteristics(struct dasd_device *device) | ||
16586 | rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, | ||
16587 | &private->rdc_data, 64); | ||
16588 | if (rc) { | ||
16589 | - DBF_EVENT_DEVID(DBF_WARNING, device->cdev, | ||
16590 | - "Read device characteristic failed, rc=%d", rc); | ||
16591 | + DBF_EVENT(DBF_WARNING, | ||
16592 | + "Read device characteristics failed, rc=%d for " | ||
16593 | + "device: %s", rc, dev_name(&device->cdev->dev)); | ||
16594 | goto out_err3; | ||
16595 | } | ||
16596 | /* find the vaild cylinder size */ | ||
16597 | @@ -2975,7 +2980,7 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device, | ||
16598 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER | ||
16599 | " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d\n", | ||
16600 | req, scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw), | ||
16601 | - scsw_cc(&irb->scsw), req ? req->intrc : 0); | ||
16602 | + scsw_cc(&irb->scsw), req->intrc); | ||
16603 | len += sprintf(page + len, KERN_ERR PRINTK_HEADER | ||
16604 | " device %s: Failing CCW: %p\n", | ||
16605 | dev_name(&device->cdev->dev), | ||
16606 | @@ -3248,8 +3253,9 @@ int dasd_eckd_restore_device(struct dasd_device *device) | ||
16607 | rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, | ||
16608 | &temp_rdc_data, 64); | ||
16609 | if (rc) { | ||
16610 | - DBF_EVENT_DEVID(DBF_WARNING, device->cdev, | ||
16611 | - "Read device characteristic failed, rc=%d", rc); | ||
16612 | + DBF_EVENT(DBF_WARNING, | ||
16613 | + "Read device characteristics failed, rc=%d for " | ||
16614 | + "device: %s", rc, dev_name(&device->cdev->dev)); | ||
16615 | goto out_err; | ||
16616 | } | ||
16617 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); | ||
16618 | diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c | ||
16619 | index 227b4e9..f245377 100644 | ||
16620 | --- a/drivers/s390/block/dasd_fba.c | ||
16621 | +++ b/drivers/s390/block/dasd_fba.c | ||
16622 | @@ -141,8 +141,9 @@ dasd_fba_check_characteristics(struct dasd_device *device) | ||
16623 | } | ||
16624 | block = dasd_alloc_block(); | ||
16625 | if (IS_ERR(block)) { | ||
16626 | - DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", "could not allocate " | ||
16627 | - "dasd block structure"); | ||
16628 | + DBF_EVENT(DBF_WARNING, "could not allocate dasd block " | ||
16629 | + "structure for device: %s", | ||
16630 | + dev_name(&device->cdev->dev)); | ||
16631 | device->private = NULL; | ||
16632 | kfree(private); | ||
16633 | return PTR_ERR(block); | ||
16634 | @@ -154,8 +155,9 @@ dasd_fba_check_characteristics(struct dasd_device *device) | ||
16635 | rc = dasd_generic_read_dev_chars(device, DASD_FBA_MAGIC, | ||
16636 | &private->rdc_data, 32); | ||
16637 | if (rc) { | ||
16638 | - DBF_EVENT_DEVID(DBF_WARNING, cdev, "Read device " | ||
16639 | - "characteristics returned error %d", rc); | ||
16640 | + DBF_EVENT(DBF_WARNING, "Read device characteristics returned " | ||
16641 | + "error %d for device: %s", | ||
16642 | + rc, dev_name(&device->cdev->dev)); | ||
16643 | device->block = NULL; | ||
16644 | dasd_free_block(block); | ||
16645 | device->private = NULL; | ||
16646 | diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h | ||
16647 | index b19f309..8afd9fa 100644 | ||
16648 | --- a/drivers/s390/block/dasd_int.h | ||
16649 | +++ b/drivers/s390/block/dasd_int.h | ||
16650 | @@ -108,16 +108,6 @@ do { \ | ||
16651 | d_data); \ | ||
16652 | } while(0) | ||
16653 | |||
16654 | -#define DBF_EVENT_DEVID(d_level, d_cdev, d_str, d_data...) \ | ||
16655 | -do { \ | ||
16656 | - struct ccw_dev_id __dev_id; \ | ||
16657 | - ccw_device_get_id(d_cdev, &__dev_id); \ | ||
16658 | - debug_sprintf_event(dasd_debug_area, \ | ||
16659 | - d_level, \ | ||
16660 | - "0.%x.%04x " d_str "\n", \ | ||
16661 | - __dev_id.ssid, __dev_id.devno, d_data); \ | ||
16662 | -} while (0) | ||
16663 | - | ||
16664 | #define DBF_EXC(d_level, d_str, d_data...)\ | ||
16665 | do { \ | ||
16666 | debug_sprintf_exception(dasd_debug_area, \ | ||
16667 | diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c | ||
16668 | index a5354b8..f756a1b 100644 | ||
16669 | --- a/drivers/s390/block/dasd_ioctl.c | ||
16670 | +++ b/drivers/s390/block/dasd_ioctl.c | ||
16671 | @@ -260,7 +260,7 @@ static int dasd_ioctl_information(struct dasd_block *block, | ||
16672 | struct ccw_dev_id dev_id; | ||
16673 | |||
16674 | base = block->base; | ||
16675 | - if (!base->discipline || !base->discipline->fill_info) | ||
16676 | + if (!base->discipline->fill_info) | ||
16677 | return -EINVAL; | ||
16678 | |||
16679 | dasd_info = kzalloc(sizeof(struct dasd_information2_t), GFP_KERNEL); | ||
16680 | @@ -303,7 +303,10 @@ static int dasd_ioctl_information(struct dasd_block *block, | ||
16681 | dasd_info->features |= | ||
16682 | ((base->features & DASD_FEATURE_READONLY) != 0); | ||
16683 | |||
16684 | - memcpy(dasd_info->type, base->discipline->name, 4); | ||
16685 | + if (base->discipline) | ||
16686 | + memcpy(dasd_info->type, base->discipline->name, 4); | ||
16687 | + else | ||
16688 | + memcpy(dasd_info->type, "none", 4); | ||
16689 | |||
16690 | if (block->request_queue->request_fn) { | ||
16691 | struct list_head *l; | ||
16692 | diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c | ||
16693 | index f9d7d38..654daa3 100644 | ||
16694 | --- a/drivers/s390/block/dasd_proc.c | ||
16695 | +++ b/drivers/s390/block/dasd_proc.c | ||
16696 | @@ -71,7 +71,7 @@ dasd_devices_show(struct seq_file *m, void *v) | ||
16697 | /* Print device number. */ | ||
16698 | seq_printf(m, "%s", dev_name(&device->cdev->dev)); | ||
16699 | /* Print discipline string. */ | ||
16700 | - if (device->discipline != NULL) | ||
16701 | + if (device != NULL && device->discipline != NULL) | ||
16702 | seq_printf(m, "(%s)", device->discipline->name); | ||
16703 | else | ||
16704 | seq_printf(m, "(none)"); | ||
16705 | @@ -91,7 +91,10 @@ dasd_devices_show(struct seq_file *m, void *v) | ||
16706 | substr = (device->features & DASD_FEATURE_READONLY) ? "(ro)" : " "; | ||
16707 | seq_printf(m, "%4s: ", substr); | ||
16708 | /* Print device status information. */ | ||
16709 | - switch (device->state) { | ||
16710 | + switch ((device != NULL) ? device->state : -1) { | ||
16711 | + case -1: | ||
16712 | + seq_printf(m, "unknown"); | ||
16713 | + break; | ||
16714 | case DASD_STATE_NEW: | ||
16715 | seq_printf(m, "new"); | ||
16716 | break; | ||
16717 | diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c | ||
16718 | index 55f9973..2490b74 100644 | ||
16719 | --- a/drivers/s390/cio/device.c | ||
16720 | +++ b/drivers/s390/cio/device.c | ||
16721 | @@ -1292,7 +1292,7 @@ static int io_subchannel_probe(struct subchannel *sch) | ||
16722 | sch->private = kzalloc(sizeof(struct io_subchannel_private), | ||
16723 | GFP_KERNEL | GFP_DMA); | ||
16724 | if (!sch->private) | ||
16725 | - goto out_schedule; | ||
16726 | + goto out_err; | ||
16727 | /* | ||
16728 | * First check if a fitting device may be found amongst the | ||
16729 | * disconnected devices or in the orphanage. | ||
16730 | @@ -1317,7 +1317,7 @@ static int io_subchannel_probe(struct subchannel *sch) | ||
16731 | } | ||
16732 | cdev = io_subchannel_create_ccwdev(sch); | ||
16733 | if (IS_ERR(cdev)) | ||
16734 | - goto out_schedule; | ||
16735 | + goto out_err; | ||
16736 | rc = io_subchannel_recog(cdev, sch); | ||
16737 | if (rc) { | ||
16738 | spin_lock_irqsave(sch->lock, flags); | ||
16739 | @@ -1325,7 +1325,9 @@ static int io_subchannel_probe(struct subchannel *sch) | ||
16740 | spin_unlock_irqrestore(sch->lock, flags); | ||
16741 | } | ||
16742 | return 0; | ||
16743 | - | ||
16744 | +out_err: | ||
16745 | + kfree(sch->private); | ||
16746 | + sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); | ||
16747 | out_schedule: | ||
16748 | io_subchannel_schedule_removal(sch); | ||
16749 | return 0; | ||
16750 | @@ -1339,14 +1341,13 @@ io_subchannel_remove (struct subchannel *sch) | ||
16751 | |||
16752 | cdev = sch_get_cdev(sch); | ||
16753 | if (!cdev) | ||
16754 | - goto out_free; | ||
16755 | + return 0; | ||
16756 | /* Set ccw device to not operational and drop reference. */ | ||
16757 | spin_lock_irqsave(cdev->ccwlock, flags); | ||
16758 | sch_set_cdev(sch, NULL); | ||
16759 | cdev->private->state = DEV_STATE_NOT_OPER; | ||
16760 | spin_unlock_irqrestore(cdev->ccwlock, flags); | ||
16761 | ccw_device_unregister(cdev); | ||
16762 | -out_free: | ||
16763 | kfree(sch->private); | ||
16764 | sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); | ||
16765 | return 0; | ||
16766 | diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c | ||
16767 | index 13b703a..b9613d7 100644 | ||
16768 | --- a/drivers/s390/cio/device_fsm.c | ||
16769 | +++ b/drivers/s390/cio/device_fsm.c | ||
16770 | @@ -1080,14 +1080,14 @@ void ccw_device_trigger_reprobe(struct ccw_device *cdev) | ||
16771 | ccw_device_start_id(cdev, 0); | ||
16772 | } | ||
16773 | |||
16774 | -static void ccw_device_disabled_irq(struct ccw_device *cdev, | ||
16775 | - enum dev_event dev_event) | ||
16776 | +static void | ||
16777 | +ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event) | ||
16778 | { | ||
16779 | struct subchannel *sch; | ||
16780 | |||
16781 | sch = to_subchannel(cdev->dev.parent); | ||
16782 | /* | ||
16783 | - * An interrupt in a disabled state means a previous disable was not | ||
16784 | + * An interrupt in state offline means a previous disable was not | ||
16785 | * successful - should not happen, but we try to disable again. | ||
16786 | */ | ||
16787 | cio_disable_subchannel(sch); | ||
16788 | @@ -1150,12 +1150,25 @@ ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event) | ||
16789 | } | ||
16790 | |||
16791 | /* | ||
16792 | + * Bug operation action. | ||
16793 | + */ | ||
16794 | +static void | ||
16795 | +ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event) | ||
16796 | +{ | ||
16797 | + CIO_MSG_EVENT(0, "Internal state [%i][%i] not handled for device " | ||
16798 | + "0.%x.%04x\n", cdev->private->state, dev_event, | ||
16799 | + cdev->private->dev_id.ssid, | ||
16800 | + cdev->private->dev_id.devno); | ||
16801 | + BUG(); | ||
16802 | +} | ||
16803 | + | ||
16804 | +/* | ||
16805 | * device statemachine | ||
16806 | */ | ||
16807 | fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { | ||
16808 | [DEV_STATE_NOT_OPER] = { | ||
16809 | [DEV_EVENT_NOTOPER] = ccw_device_nop, | ||
16810 | - [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq, | ||
16811 | + [DEV_EVENT_INTERRUPT] = ccw_device_bug, | ||
16812 | [DEV_EVENT_TIMEOUT] = ccw_device_nop, | ||
16813 | [DEV_EVENT_VERIFY] = ccw_device_nop, | ||
16814 | }, | ||
16815 | @@ -1173,7 +1186,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { | ||
16816 | }, | ||
16817 | [DEV_STATE_OFFLINE] = { | ||
16818 | [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, | ||
16819 | - [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq, | ||
16820 | + [DEV_EVENT_INTERRUPT] = ccw_device_offline_irq, | ||
16821 | [DEV_EVENT_TIMEOUT] = ccw_device_nop, | ||
16822 | [DEV_EVENT_VERIFY] = ccw_device_offline_verify, | ||
16823 | }, | ||
16824 | @@ -1230,7 +1243,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { | ||
16825 | [DEV_STATE_DISCONNECTED] = { | ||
16826 | [DEV_EVENT_NOTOPER] = ccw_device_nop, | ||
16827 | [DEV_EVENT_INTERRUPT] = ccw_device_start_id, | ||
16828 | - [DEV_EVENT_TIMEOUT] = ccw_device_nop, | ||
16829 | + [DEV_EVENT_TIMEOUT] = ccw_device_bug, | ||
16830 | [DEV_EVENT_VERIFY] = ccw_device_start_id, | ||
16831 | }, | ||
16832 | [DEV_STATE_DISCONNECTED_SENSE_ID] = { | ||
16833 | diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c | ||
16834 | index 7f1e3ba..f4b0c47 100644 | ||
16835 | --- a/drivers/s390/crypto/zcrypt_pcicc.c | ||
16836 | +++ b/drivers/s390/crypto/zcrypt_pcicc.c | ||
16837 | @@ -373,8 +373,6 @@ static int convert_type86(struct zcrypt_device *zdev, | ||
16838 | zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD; | ||
16839 | return -EAGAIN; | ||
16840 | } | ||
16841 | - if (service_rc == 8 && service_rs == 72) | ||
16842 | - return -EINVAL; | ||
16843 | zdev->online = 0; | ||
16844 | return -EAGAIN; /* repeat the request on a different device. */ | ||
16845 | } | ||
16846 | diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c | ||
16847 | index 1f9e923..5677b40 100644 | ||
16848 | --- a/drivers/s390/crypto/zcrypt_pcixcc.c | ||
16849 | +++ b/drivers/s390/crypto/zcrypt_pcixcc.c | ||
16850 | @@ -462,8 +462,6 @@ static int convert_type86_ica(struct zcrypt_device *zdev, | ||
16851 | } | ||
16852 | if (service_rc == 12 && service_rs == 769) | ||
16853 | return -EINVAL; | ||
16854 | - if (service_rc == 8 && service_rs == 72) | ||
16855 | - return -EINVAL; | ||
16856 | zdev->online = 0; | ||
16857 | return -EAGAIN; /* repeat the request on a different device. */ | ||
16858 | } | ||
16859 | diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c | ||
16860 | index 395c04c..c84eadd 100644 | ||
16861 | --- a/drivers/s390/net/netiucv.c | ||
16862 | +++ b/drivers/s390/net/netiucv.c | ||
16863 | @@ -741,13 +741,13 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg) | ||
16864 | if (single_flag) { | ||
16865 | if ((skb = skb_dequeue(&conn->commit_queue))) { | ||
16866 | atomic_dec(&skb->users); | ||
16867 | + dev_kfree_skb_any(skb); | ||
16868 | if (privptr) { | ||
16869 | privptr->stats.tx_packets++; | ||
16870 | privptr->stats.tx_bytes += | ||
16871 | (skb->len - NETIUCV_HDRLEN | ||
16872 | - - NETIUCV_HDRLEN); | ||
16873 | + - NETIUCV_HDRLEN); | ||
16874 | } | ||
16875 | - dev_kfree_skb_any(skb); | ||
16876 | } | ||
16877 | } | ||
16878 | conn->tx_buff->data = conn->tx_buff->head; | ||
16879 | diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c | ||
16880 | index bfec4fa..3ee1cbc 100644 | ||
16881 | --- a/drivers/scsi/device_handler/scsi_dh.c | ||
16882 | +++ b/drivers/scsi/device_handler/scsi_dh.c | ||
16883 | @@ -304,15 +304,18 @@ static int scsi_dh_notifier(struct notifier_block *nb, | ||
16884 | sdev = to_scsi_device(dev); | ||
16885 | |||
16886 | if (action == BUS_NOTIFY_ADD_DEVICE) { | ||
16887 | - err = device_create_file(dev, &scsi_dh_state_attr); | ||
16888 | - /* don't care about err */ | ||
16889 | devinfo = device_handler_match(NULL, sdev); | ||
16890 | - if (devinfo) | ||
16891 | - err = scsi_dh_handler_attach(sdev, devinfo); | ||
16892 | + if (!devinfo) | ||
16893 | + goto out; | ||
16894 | + | ||
16895 | + err = scsi_dh_handler_attach(sdev, devinfo); | ||
16896 | + if (!err) | ||
16897 | + err = device_create_file(dev, &scsi_dh_state_attr); | ||
16898 | } else if (action == BUS_NOTIFY_DEL_DEVICE) { | ||
16899 | device_remove_file(dev, &scsi_dh_state_attr); | ||
16900 | scsi_dh_handler_detach(sdev, NULL); | ||
16901 | } | ||
16902 | +out: | ||
16903 | return err; | ||
16904 | } | ||
16905 | |||
16906 | diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c | ||
16907 | index 70ab5d0..704b8e0 100644 | ||
16908 | --- a/drivers/scsi/fcoe/fcoe.c | ||
16909 | +++ b/drivers/scsi/fcoe/fcoe.c | ||
16910 | @@ -137,7 +137,7 @@ static struct scsi_host_template fcoe_shost_template = { | ||
16911 | .change_queue_depth = fc_change_queue_depth, | ||
16912 | .change_queue_type = fc_change_queue_type, | ||
16913 | .this_id = -1, | ||
16914 | - .cmd_per_lun = 3, | ||
16915 | + .cmd_per_lun = 32, | ||
16916 | .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS, | ||
16917 | .use_clustering = ENABLE_CLUSTERING, | ||
16918 | .sg_tablesize = SG_ALL, | ||
16919 | @@ -160,7 +160,6 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe, | ||
16920 | { | ||
16921 | struct fcoe_ctlr *fip = &fcoe->ctlr; | ||
16922 | struct netdev_hw_addr *ha; | ||
16923 | - struct net_device *real_dev; | ||
16924 | u8 flogi_maddr[ETH_ALEN]; | ||
16925 | |||
16926 | fcoe->netdev = netdev; | ||
16927 | @@ -174,12 +173,10 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe, | ||
16928 | |||
16929 | /* look for SAN MAC address, if multiple SAN MACs exist, only | ||
16930 | * use the first one for SPMA */ | ||
16931 | - real_dev = (netdev->priv_flags & IFF_802_1Q_VLAN) ? | ||
16932 | - vlan_dev_real_dev(netdev) : netdev; | ||
16933 | rcu_read_lock(); | ||
16934 | - for_each_dev_addr(real_dev, ha) { | ||
16935 | + for_each_dev_addr(netdev, ha) { | ||
16936 | if ((ha->type == NETDEV_HW_ADDR_T_SAN) && | ||
16937 | - (is_valid_ether_addr(ha->addr))) { | ||
16938 | + (is_valid_ether_addr(fip->ctl_src_addr))) { | ||
16939 | memcpy(fip->ctl_src_addr, ha->addr, ETH_ALEN); | ||
16940 | fip->spma = 1; | ||
16941 | break; | ||
16942 | @@ -667,7 +664,7 @@ static int fcoe_ddp_setup(struct fc_lport *lp, u16 xid, | ||
16943 | { | ||
16944 | struct net_device *n = fcoe_netdev(lp); | ||
16945 | |||
16946 | - if (n->netdev_ops->ndo_fcoe_ddp_setup) | ||
16947 | + if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_setup) | ||
16948 | return n->netdev_ops->ndo_fcoe_ddp_setup(n, xid, sgl, sgc); | ||
16949 | |||
16950 | return 0; | ||
16951 | @@ -684,7 +681,7 @@ static int fcoe_ddp_done(struct fc_lport *lp, u16 xid) | ||
16952 | { | ||
16953 | struct net_device *n = fcoe_netdev(lp); | ||
16954 | |||
16955 | - if (n->netdev_ops->ndo_fcoe_ddp_done) | ||
16956 | + if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_done) | ||
16957 | return n->netdev_ops->ndo_fcoe_ddp_done(n, xid); | ||
16958 | return 0; | ||
16959 | } | ||
16960 | @@ -1634,7 +1631,7 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp) | ||
16961 | { | ||
16962 | struct fcoe_interface *fcoe; | ||
16963 | struct net_device *netdev; | ||
16964 | - int rc = 0; | ||
16965 | + int rc; | ||
16966 | |||
16967 | mutex_lock(&fcoe_config_mutex); | ||
16968 | #ifdef CONFIG_FCOE_MODULE | ||
16969 | diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c | ||
16970 | index 554626e..c968cc3 100644 | ||
16971 | --- a/drivers/scsi/hosts.c | ||
16972 | +++ b/drivers/scsi/hosts.c | ||
16973 | @@ -180,20 +180,14 @@ void scsi_remove_host(struct Scsi_Host *shost) | ||
16974 | EXPORT_SYMBOL(scsi_remove_host); | ||
16975 | |||
16976 | /** | ||
16977 | - * scsi_add_host_with_dma - add a scsi host with dma device | ||
16978 | + * scsi_add_host - add a scsi host | ||
16979 | * @shost: scsi host pointer to add | ||
16980 | * @dev: a struct device of type scsi class | ||
16981 | - * @dma_dev: dma device for the host | ||
16982 | - * | ||
16983 | - * Note: You rarely need to worry about this unless you're in a | ||
16984 | - * virtualised host environments, so use the simpler scsi_add_host() | ||
16985 | - * function instead. | ||
16986 | * | ||
16987 | * Return value: | ||
16988 | * 0 on success / != 0 for error | ||
16989 | **/ | ||
16990 | -int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, | ||
16991 | - struct device *dma_dev) | ||
16992 | +int scsi_add_host(struct Scsi_Host *shost, struct device *dev) | ||
16993 | { | ||
16994 | struct scsi_host_template *sht = shost->hostt; | ||
16995 | int error = -EINVAL; | ||
16996 | @@ -213,7 +207,6 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, | ||
16997 | |||
16998 | if (!shost->shost_gendev.parent) | ||
16999 | shost->shost_gendev.parent = dev ? dev : &platform_bus; | ||
17000 | - shost->dma_dev = dma_dev; | ||
17001 | |||
17002 | error = device_add(&shost->shost_gendev); | ||
17003 | if (error) | ||
17004 | @@ -269,7 +262,7 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, | ||
17005 | fail: | ||
17006 | return error; | ||
17007 | } | ||
17008 | -EXPORT_SYMBOL(scsi_add_host_with_dma); | ||
17009 | +EXPORT_SYMBOL(scsi_add_host); | ||
17010 | |||
17011 | static void scsi_host_dev_release(struct device *dev) | ||
17012 | { | ||
17013 | diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c | ||
17014 | index c3ff9a6..76d294f 100644 | ||
17015 | --- a/drivers/scsi/ipr.c | ||
17016 | +++ b/drivers/scsi/ipr.c | ||
17017 | @@ -6516,7 +6516,6 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) | ||
17018 | int rc; | ||
17019 | |||
17020 | ENTER; | ||
17021 | - ioa_cfg->pdev->state_saved = true; | ||
17022 | rc = pci_restore_state(ioa_cfg->pdev); | ||
17023 | |||
17024 | if (rc != PCIBIOS_SUCCESSFUL) { | ||
17025 | diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c | ||
17026 | index d4cb3f9..c48799e 100644 | ||
17027 | --- a/drivers/scsi/libfc/fc_disc.c | ||
17028 | +++ b/drivers/scsi/libfc/fc_disc.c | ||
17029 | @@ -371,7 +371,7 @@ static void fc_disc_gpn_ft_req(struct fc_disc *disc) | ||
17030 | disc, lport->e_d_tov)) | ||
17031 | return; | ||
17032 | err: | ||
17033 | - fc_disc_error(disc, NULL); | ||
17034 | + fc_disc_error(disc, fp); | ||
17035 | } | ||
17036 | |||
17037 | /** | ||
17038 | diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c | ||
17039 | index 9298458..5cfa687 100644 | ||
17040 | --- a/drivers/scsi/libfc/fc_elsct.c | ||
17041 | +++ b/drivers/scsi/libfc/fc_elsct.c | ||
17042 | @@ -53,10 +53,8 @@ static struct fc_seq *fc_elsct_send(struct fc_lport *lport, | ||
17043 | did = FC_FID_DIR_SERV; | ||
17044 | } | ||
17045 | |||
17046 | - if (rc) { | ||
17047 | - fc_frame_free(fp); | ||
17048 | + if (rc) | ||
17049 | return NULL; | ||
17050 | - } | ||
17051 | |||
17052 | fc_fill_fc_hdr(fp, r_ctl, did, fc_host_port_id(lport->host), fh_type, | ||
17053 | FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); | ||
17054 | diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c | ||
17055 | index 7a14402..59a4408 100644 | ||
17056 | --- a/drivers/scsi/libfc/fc_fcp.c | ||
17057 | +++ b/drivers/scsi/libfc/fc_fcp.c | ||
17058 | @@ -302,13 +302,10 @@ static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp) | ||
17059 | if (!fsp) | ||
17060 | return; | ||
17061 | |||
17062 | - if (fsp->xfer_ddp == FC_XID_UNKNOWN) | ||
17063 | - return; | ||
17064 | - | ||
17065 | lp = fsp->lp; | ||
17066 | - if (lp->tt.ddp_done) { | ||
17067 | + if (fsp->xfer_ddp && lp->tt.ddp_done) { | ||
17068 | fsp->xfer_len = lp->tt.ddp_done(lp, fsp->xfer_ddp); | ||
17069 | - fsp->xfer_ddp = FC_XID_UNKNOWN; | ||
17070 | + fsp->xfer_ddp = 0; | ||
17071 | } | ||
17072 | } | ||
17073 | |||
17074 | @@ -575,8 +572,7 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq, | ||
17075 | tlen -= sg_bytes; | ||
17076 | remaining -= sg_bytes; | ||
17077 | |||
17078 | - if ((skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN) && | ||
17079 | - (tlen)) | ||
17080 | + if (tlen) | ||
17081 | continue; | ||
17082 | |||
17083 | /* | ||
17084 | @@ -1052,6 +1048,7 @@ static int fc_fcp_cmd_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp, | ||
17085 | |||
17086 | seq = lp->tt.exch_seq_send(lp, fp, resp, fc_fcp_pkt_destroy, fsp, 0); | ||
17087 | if (!seq) { | ||
17088 | + fc_frame_free(fp); | ||
17089 | rc = -1; | ||
17090 | goto unlock; | ||
17091 | } | ||
17092 | @@ -1316,6 +1313,7 @@ static void fc_fcp_rec(struct fc_fcp_pkt *fsp) | ||
17093 | fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */ | ||
17094 | return; | ||
17095 | } | ||
17096 | + fc_frame_free(fp); | ||
17097 | retry: | ||
17098 | if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) | ||
17099 | fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV); | ||
17100 | @@ -1563,9 +1561,10 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset) | ||
17101 | |||
17102 | seq = lp->tt.exch_seq_send(lp, fp, fc_fcp_srr_resp, NULL, | ||
17103 | fsp, jiffies_to_msecs(FC_SCSI_REC_TOV)); | ||
17104 | - if (!seq) | ||
17105 | + if (!seq) { | ||
17106 | + fc_frame_free(fp); | ||
17107 | goto retry; | ||
17108 | - | ||
17109 | + } | ||
17110 | fsp->recov_seq = seq; | ||
17111 | fsp->xfer_len = offset; | ||
17112 | fsp->xfer_contig_end = offset; | ||
17113 | @@ -1709,7 +1708,6 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) | ||
17114 | fsp->cmd = sc_cmd; /* save the cmd */ | ||
17115 | fsp->lp = lp; /* save the softc ptr */ | ||
17116 | fsp->rport = rport; /* set the remote port ptr */ | ||
17117 | - fsp->xfer_ddp = FC_XID_UNKNOWN; | ||
17118 | sc_cmd->scsi_done = done; | ||
17119 | |||
17120 | /* | ||
17121 | @@ -1848,8 +1846,7 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp) | ||
17122 | * scsi status is good but transport level | ||
17123 | * underrun. | ||
17124 | */ | ||
17125 | - sc_cmd->result = (fsp->state & FC_SRB_RCV_STATUS ? | ||
17126 | - DID_OK : DID_ERROR) << 16; | ||
17127 | + sc_cmd->result = DID_OK << 16; | ||
17128 | } else { | ||
17129 | /* | ||
17130 | * scsi got underrun, this is an error | ||
17131 | @@ -2049,16 +2046,18 @@ EXPORT_SYMBOL(fc_eh_host_reset); | ||
17132 | int fc_slave_alloc(struct scsi_device *sdev) | ||
17133 | { | ||
17134 | struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); | ||
17135 | + int queue_depth; | ||
17136 | |||
17137 | if (!rport || fc_remote_port_chkready(rport)) | ||
17138 | return -ENXIO; | ||
17139 | |||
17140 | - if (sdev->tagged_supported) | ||
17141 | - scsi_activate_tcq(sdev, FC_FCP_DFLT_QUEUE_DEPTH); | ||
17142 | - else | ||
17143 | - scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), | ||
17144 | - FC_FCP_DFLT_QUEUE_DEPTH); | ||
17145 | - | ||
17146 | + if (sdev->tagged_supported) { | ||
17147 | + if (sdev->host->hostt->cmd_per_lun) | ||
17148 | + queue_depth = sdev->host->hostt->cmd_per_lun; | ||
17149 | + else | ||
17150 | + queue_depth = FC_FCP_DFLT_QUEUE_DEPTH; | ||
17151 | + scsi_activate_tcq(sdev, queue_depth); | ||
17152 | + } | ||
17153 | return 0; | ||
17154 | } | ||
17155 | EXPORT_SYMBOL(fc_slave_alloc); | ||
17156 | diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c | ||
17157 | index 536492a..bd2f771 100644 | ||
17158 | --- a/drivers/scsi/libfc/fc_lport.c | ||
17159 | +++ b/drivers/scsi/libfc/fc_lport.c | ||
17160 | @@ -329,7 +329,7 @@ static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type) | ||
17161 | * @sp: current sequence in the RLIR exchange | ||
17162 | * @fp: RLIR request frame | ||
17163 | * | ||
17164 | - * Locking Note: The lport lock is expected to be held before calling | ||
17165 | + * Locking Note: The lport lock is exected to be held before calling | ||
17166 | * this function. | ||
17167 | */ | ||
17168 | static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp, | ||
17169 | @@ -348,7 +348,7 @@ static void fc_lport_recv_rlir_req(struct fc_seq *sp, struct fc_frame *fp, | ||
17170 | * @sp: current sequence in the ECHO exchange | ||
17171 | * @fp: ECHO request frame | ||
17172 | * | ||
17173 | - * Locking Note: The lport lock is expected to be held before calling | ||
17174 | + * Locking Note: The lport lock is exected to be held before calling | ||
17175 | * this function. | ||
17176 | */ | ||
17177 | static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp, | ||
17178 | @@ -361,7 +361,7 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp, | ||
17179 | void *dp; | ||
17180 | u32 f_ctl; | ||
17181 | |||
17182 | - FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n", | ||
17183 | + FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n", | ||
17184 | fc_lport_state(lport)); | ||
17185 | |||
17186 | len = fr_len(in_fp) - sizeof(struct fc_frame_header); | ||
17187 | @@ -374,7 +374,7 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp, | ||
17188 | if (fp) { | ||
17189 | dp = fc_frame_payload_get(fp, len); | ||
17190 | memcpy(dp, pp, len); | ||
17191 | - *((__be32 *)dp) = htonl(ELS_LS_ACC << 24); | ||
17192 | + *((u32 *)dp) = htonl(ELS_LS_ACC << 24); | ||
17193 | sp = lport->tt.seq_start_next(sp); | ||
17194 | f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ; | ||
17195 | fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, | ||
17196 | @@ -385,12 +385,12 @@ static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp, | ||
17197 | } | ||
17198 | |||
17199 | /** | ||
17200 | - * fc_lport_recv_rnid_req() - Handle received Request Node ID data request | ||
17201 | - * @sp: The sequence in the RNID exchange | ||
17202 | - * @fp: The RNID request frame | ||
17203 | - * @lport: The local port recieving the RNID | ||
17204 | + * fc_lport_recv_echo_req() - Handle received Request Node ID data request | ||
17205 | + * @lport: Fibre Channel local port recieving the RNID | ||
17206 | + * @sp: current sequence in the RNID exchange | ||
17207 | + * @fp: RNID request frame | ||
17208 | * | ||
17209 | - * Locking Note: The lport lock is expected to be held before calling | ||
17210 | + * Locking Note: The lport lock is exected to be held before calling | ||
17211 | * this function. | ||
17212 | */ | ||
17213 | static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp, | ||
17214 | @@ -667,7 +667,7 @@ static void fc_lport_enter_ready(struct fc_lport *lport) | ||
17215 | * Accept it with the common service parameters indicating our N port. | ||
17216 | * Set up to do a PLOGI if we have the higher-number WWPN. | ||
17217 | * | ||
17218 | - * Locking Note: The lport lock is expected to be held before calling | ||
17219 | + * Locking Note: The lport lock is exected to be held before calling | ||
17220 | * this function. | ||
17221 | */ | ||
17222 | static void fc_lport_recv_flogi_req(struct fc_seq *sp_in, | ||
17223 | @@ -1115,7 +1115,7 @@ static void fc_lport_enter_scr(struct fc_lport *lport) | ||
17224 | |||
17225 | if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR, | ||
17226 | fc_lport_scr_resp, lport, lport->e_d_tov)) | ||
17227 | - fc_lport_error(lport, NULL); | ||
17228 | + fc_lport_error(lport, fp); | ||
17229 | } | ||
17230 | |||
17231 | /** | ||
17232 | @@ -1186,7 +1186,7 @@ static void fc_lport_enter_rpn_id(struct fc_lport *lport) | ||
17233 | if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RPN_ID, | ||
17234 | fc_lport_rpn_id_resp, | ||
17235 | lport, lport->e_d_tov)) | ||
17236 | - fc_lport_error(lport, NULL); | ||
17237 | + fc_lport_error(lport, fp); | ||
17238 | } | ||
17239 | |||
17240 | static struct fc_rport_operations fc_lport_rport_ops = { | ||
17241 | @@ -1237,12 +1237,9 @@ static void fc_lport_timeout(struct work_struct *work) | ||
17242 | |||
17243 | switch (lport->state) { | ||
17244 | case LPORT_ST_DISABLED: | ||
17245 | - WARN_ON(1); | ||
17246 | - break; | ||
17247 | case LPORT_ST_READY: | ||
17248 | - WARN_ON(1); | ||
17249 | - break; | ||
17250 | case LPORT_ST_RESET: | ||
17251 | + WARN_ON(1); | ||
17252 | break; | ||
17253 | case LPORT_ST_FLOGI: | ||
17254 | fc_lport_enter_flogi(lport); | ||
17255 | @@ -1340,7 +1337,7 @@ static void fc_lport_enter_logo(struct fc_lport *lport) | ||
17256 | |||
17257 | if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO, | ||
17258 | fc_lport_logo_resp, lport, lport->e_d_tov)) | ||
17259 | - fc_lport_error(lport, NULL); | ||
17260 | + fc_lport_error(lport, fp); | ||
17261 | } | ||
17262 | |||
17263 | /** | ||
17264 | @@ -1456,7 +1453,7 @@ void fc_lport_enter_flogi(struct fc_lport *lport) | ||
17265 | |||
17266 | if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_FLOGI, | ||
17267 | fc_lport_flogi_resp, lport, lport->e_d_tov)) | ||
17268 | - fc_lport_error(lport, NULL); | ||
17269 | + fc_lport_error(lport, fp); | ||
17270 | } | ||
17271 | |||
17272 | /* Configure a fc_lport */ | ||
17273 | diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c | ||
17274 | index ff558a6..03ea674 100644 | ||
17275 | --- a/drivers/scsi/libfc/fc_rport.c | ||
17276 | +++ b/drivers/scsi/libfc/fc_rport.c | ||
17277 | @@ -86,7 +86,6 @@ static const char *fc_rport_state_names[] = { | ||
17278 | [RPORT_ST_LOGO] = "LOGO", | ||
17279 | [RPORT_ST_ADISC] = "ADISC", | ||
17280 | [RPORT_ST_DELETE] = "Delete", | ||
17281 | - [RPORT_ST_RESTART] = "Restart", | ||
17282 | }; | ||
17283 | |||
17284 | /** | ||
17285 | @@ -100,7 +99,8 @@ static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport, | ||
17286 | struct fc_rport_priv *rdata; | ||
17287 | |||
17288 | list_for_each_entry(rdata, &lport->disc.rports, peers) | ||
17289 | - if (rdata->ids.port_id == port_id) | ||
17290 | + if (rdata->ids.port_id == port_id && | ||
17291 | + rdata->rp_state != RPORT_ST_DELETE) | ||
17292 | return rdata; | ||
17293 | return NULL; | ||
17294 | } | ||
17295 | @@ -235,7 +235,6 @@ static void fc_rport_work(struct work_struct *work) | ||
17296 | struct fc_rport_operations *rport_ops; | ||
17297 | struct fc_rport_identifiers ids; | ||
17298 | struct fc_rport *rport; | ||
17299 | - int restart = 0; | ||
17300 | |||
17301 | mutex_lock(&rdata->rp_mutex); | ||
17302 | event = rdata->event; | ||
17303 | @@ -288,20 +287,8 @@ static void fc_rport_work(struct work_struct *work) | ||
17304 | mutex_unlock(&rdata->rp_mutex); | ||
17305 | |||
17306 | if (port_id != FC_FID_DIR_SERV) { | ||
17307 | - /* | ||
17308 | - * We must drop rp_mutex before taking disc_mutex. | ||
17309 | - * Re-evaluate state to allow for restart. | ||
17310 | - * A transition to RESTART state must only happen | ||
17311 | - * while disc_mutex is held and rdata is on the list. | ||
17312 | - */ | ||
17313 | mutex_lock(&lport->disc.disc_mutex); | ||
17314 | - mutex_lock(&rdata->rp_mutex); | ||
17315 | - if (rdata->rp_state == RPORT_ST_RESTART) | ||
17316 | - restart = 1; | ||
17317 | - else | ||
17318 | - list_del(&rdata->peers); | ||
17319 | - rdata->event = RPORT_EV_NONE; | ||
17320 | - mutex_unlock(&rdata->rp_mutex); | ||
17321 | + list_del(&rdata->peers); | ||
17322 | mutex_unlock(&lport->disc.disc_mutex); | ||
17323 | } | ||
17324 | |||
17325 | @@ -325,13 +312,7 @@ static void fc_rport_work(struct work_struct *work) | ||
17326 | mutex_unlock(&rdata->rp_mutex); | ||
17327 | fc_remote_port_delete(rport); | ||
17328 | } | ||
17329 | - if (restart) { | ||
17330 | - mutex_lock(&rdata->rp_mutex); | ||
17331 | - FC_RPORT_DBG(rdata, "work restart\n"); | ||
17332 | - fc_rport_enter_plogi(rdata); | ||
17333 | - mutex_unlock(&rdata->rp_mutex); | ||
17334 | - } else | ||
17335 | - kref_put(&rdata->kref, lport->tt.rport_destroy); | ||
17336 | + kref_put(&rdata->kref, lport->tt.rport_destroy); | ||
17337 | break; | ||
17338 | |||
17339 | default: | ||
17340 | @@ -361,12 +342,6 @@ int fc_rport_login(struct fc_rport_priv *rdata) | ||
17341 | FC_RPORT_DBG(rdata, "ADISC port\n"); | ||
17342 | fc_rport_enter_adisc(rdata); | ||
17343 | break; | ||
17344 | - case RPORT_ST_RESTART: | ||
17345 | - break; | ||
17346 | - case RPORT_ST_DELETE: | ||
17347 | - FC_RPORT_DBG(rdata, "Restart deleted port\n"); | ||
17348 | - fc_rport_state_enter(rdata, RPORT_ST_RESTART); | ||
17349 | - break; | ||
17350 | default: | ||
17351 | FC_RPORT_DBG(rdata, "Login to port\n"); | ||
17352 | fc_rport_enter_plogi(rdata); | ||
17353 | @@ -422,21 +397,20 @@ int fc_rport_logoff(struct fc_rport_priv *rdata) | ||
17354 | |||
17355 | if (rdata->rp_state == RPORT_ST_DELETE) { | ||
17356 | FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n"); | ||
17357 | + mutex_unlock(&rdata->rp_mutex); | ||
17358 | goto out; | ||
17359 | } | ||
17360 | |||
17361 | - if (rdata->rp_state == RPORT_ST_RESTART) | ||
17362 | - FC_RPORT_DBG(rdata, "Port in Restart state, deleting\n"); | ||
17363 | - else | ||
17364 | - fc_rport_enter_logo(rdata); | ||
17365 | + fc_rport_enter_logo(rdata); | ||
17366 | |||
17367 | /* | ||
17368 | * Change the state to Delete so that we discard | ||
17369 | * the response. | ||
17370 | */ | ||
17371 | fc_rport_enter_delete(rdata, RPORT_EV_STOP); | ||
17372 | -out: | ||
17373 | mutex_unlock(&rdata->rp_mutex); | ||
17374 | + | ||
17375 | +out: | ||
17376 | return 0; | ||
17377 | } | ||
17378 | |||
17379 | @@ -492,7 +466,6 @@ static void fc_rport_timeout(struct work_struct *work) | ||
17380 | case RPORT_ST_READY: | ||
17381 | case RPORT_ST_INIT: | ||
17382 | case RPORT_ST_DELETE: | ||
17383 | - case RPORT_ST_RESTART: | ||
17384 | break; | ||
17385 | } | ||
17386 | |||
17387 | @@ -526,7 +499,6 @@ static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp) | ||
17388 | fc_rport_enter_logo(rdata); | ||
17389 | break; | ||
17390 | case RPORT_ST_DELETE: | ||
17391 | - case RPORT_ST_RESTART: | ||
17392 | case RPORT_ST_READY: | ||
17393 | case RPORT_ST_INIT: | ||
17394 | break; | ||
17395 | @@ -660,7 +632,7 @@ static void fc_rport_enter_plogi(struct fc_rport_priv *rdata) | ||
17396 | |||
17397 | if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI, | ||
17398 | fc_rport_plogi_resp, rdata, lport->e_d_tov)) | ||
17399 | - fc_rport_error_retry(rdata, NULL); | ||
17400 | + fc_rport_error_retry(rdata, fp); | ||
17401 | else | ||
17402 | kref_get(&rdata->kref); | ||
17403 | } | ||
17404 | @@ -821,7 +793,7 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata) | ||
17405 | |||
17406 | if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PRLI, | ||
17407 | fc_rport_prli_resp, rdata, lport->e_d_tov)) | ||
17408 | - fc_rport_error_retry(rdata, NULL); | ||
17409 | + fc_rport_error_retry(rdata, fp); | ||
17410 | else | ||
17411 | kref_get(&rdata->kref); | ||
17412 | } | ||
17413 | @@ -917,7 +889,7 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata) | ||
17414 | |||
17415 | if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV, | ||
17416 | fc_rport_rtv_resp, rdata, lport->e_d_tov)) | ||
17417 | - fc_rport_error_retry(rdata, NULL); | ||
17418 | + fc_rport_error_retry(rdata, fp); | ||
17419 | else | ||
17420 | kref_get(&rdata->kref); | ||
17421 | } | ||
17422 | @@ -947,7 +919,7 @@ static void fc_rport_enter_logo(struct fc_rport_priv *rdata) | ||
17423 | |||
17424 | if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO, | ||
17425 | fc_rport_logo_resp, rdata, lport->e_d_tov)) | ||
17426 | - fc_rport_error_retry(rdata, NULL); | ||
17427 | + fc_rport_error_retry(rdata, fp); | ||
17428 | else | ||
17429 | kref_get(&rdata->kref); | ||
17430 | } | ||
17431 | @@ -1034,7 +1006,7 @@ static void fc_rport_enter_adisc(struct fc_rport_priv *rdata) | ||
17432 | } | ||
17433 | if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_ADISC, | ||
17434 | fc_rport_adisc_resp, rdata, lport->e_d_tov)) | ||
17435 | - fc_rport_error_retry(rdata, NULL); | ||
17436 | + fc_rport_error_retry(rdata, fp); | ||
17437 | else | ||
17438 | kref_get(&rdata->kref); | ||
17439 | } | ||
17440 | @@ -1276,7 +1248,6 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport, | ||
17441 | } | ||
17442 | break; | ||
17443 | case RPORT_ST_PRLI: | ||
17444 | - case RPORT_ST_RTV: | ||
17445 | case RPORT_ST_READY: | ||
17446 | case RPORT_ST_ADISC: | ||
17447 | FC_RPORT_DBG(rdata, "Received PLOGI in logged-in state %d " | ||
17448 | @@ -1284,14 +1255,11 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport, | ||
17449 | /* XXX TBD - should reset */ | ||
17450 | break; | ||
17451 | case RPORT_ST_DELETE: | ||
17452 | - case RPORT_ST_LOGO: | ||
17453 | - case RPORT_ST_RESTART: | ||
17454 | - FC_RPORT_DBG(rdata, "Received PLOGI in state %s - send busy\n", | ||
17455 | - fc_rport_state(rdata)); | ||
17456 | - mutex_unlock(&rdata->rp_mutex); | ||
17457 | - rjt_data.reason = ELS_RJT_BUSY; | ||
17458 | - rjt_data.explan = ELS_EXPL_NONE; | ||
17459 | - goto reject; | ||
17460 | + default: | ||
17461 | + FC_RPORT_DBG(rdata, "Received PLOGI in unexpected state %d\n", | ||
17462 | + rdata->rp_state); | ||
17463 | + fc_frame_free(rx_fp); | ||
17464 | + goto out; | ||
17465 | } | ||
17466 | |||
17467 | /* | ||
17468 | @@ -1434,7 +1402,7 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, | ||
17469 | break; | ||
17470 | case FC_TYPE_FCP: | ||
17471 | fcp_parm = ntohl(rspp->spp_params); | ||
17472 | - if (fcp_parm & FCP_SPPF_RETRY) | ||
17473 | + if (fcp_parm * FCP_SPPF_RETRY) | ||
17474 | rdata->flags |= FC_RP_FLAGS_RETRY; | ||
17475 | rdata->supported_classes = FC_COS_CLASS3; | ||
17476 | if (fcp_parm & FCP_SPPF_INIT_FCN) | ||
17477 | @@ -1542,14 +1510,14 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, | ||
17478 | FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n", | ||
17479 | fc_rport_state(rdata)); | ||
17480 | |||
17481 | - fc_rport_enter_delete(rdata, RPORT_EV_LOGO); | ||
17482 | - | ||
17483 | /* | ||
17484 | - * If the remote port was created due to discovery, set state | ||
17485 | - * to log back in. It may have seen a stale RSCN about us. | ||
17486 | + * If the remote port was created due to discovery, | ||
17487 | + * log back in. It may have seen a stale RSCN about us. | ||
17488 | */ | ||
17489 | - if (rdata->disc_id) | ||
17490 | - fc_rport_state_enter(rdata, RPORT_ST_RESTART); | ||
17491 | + if (rdata->rp_state != RPORT_ST_DELETE && rdata->disc_id) | ||
17492 | + fc_rport_enter_plogi(rdata); | ||
17493 | + else | ||
17494 | + fc_rport_enter_delete(rdata, RPORT_EV_LOGO); | ||
17495 | mutex_unlock(&rdata->rp_mutex); | ||
17496 | } else | ||
17497 | FC_RPORT_ID_DBG(lport, sid, | ||
17498 | diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c | ||
17499 | index 549bc7d..562d8ce 100644 | ||
17500 | --- a/drivers/scsi/lpfc/lpfc_init.c | ||
17501 | +++ b/drivers/scsi/lpfc/lpfc_init.c | ||
17502 | @@ -2408,7 +2408,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) | ||
17503 | vport->els_tmofunc.function = lpfc_els_timeout; | ||
17504 | vport->els_tmofunc.data = (unsigned long)vport; | ||
17505 | |||
17506 | - error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); | ||
17507 | + error = scsi_add_host(shost, dev); | ||
17508 | if (error) | ||
17509 | goto out_put_shost; | ||
17510 | |||
17511 | @@ -4384,13 +4384,9 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) | ||
17512 | pdev = phba->pcidev; | ||
17513 | |||
17514 | /* Set the device DMA mask size */ | ||
17515 | - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 | ||
17516 | - || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { | ||
17517 | - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 | ||
17518 | - || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { | ||
17519 | + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) | ||
17520 | + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) | ||
17521 | return error; | ||
17522 | - } | ||
17523 | - } | ||
17524 | |||
17525 | /* Get the bus address of Bar0 and Bar2 and the number of bytes | ||
17526 | * required by each mapping. | ||
17527 | @@ -5944,13 +5940,9 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) | ||
17528 | pdev = phba->pcidev; | ||
17529 | |||
17530 | /* Set the device DMA mask size */ | ||
17531 | - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 | ||
17532 | - || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) { | ||
17533 | - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 | ||
17534 | - || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) { | ||
17535 | + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) | ||
17536 | + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) | ||
17537 | return error; | ||
17538 | - } | ||
17539 | - } | ||
17540 | |||
17541 | /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the | ||
17542 | * number of bytes required by each mapping. They are actually | ||
17543 | diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c | ||
17544 | index 518712c..a39addc 100644 | ||
17545 | --- a/drivers/scsi/megaraid/megaraid_sas.c | ||
17546 | +++ b/drivers/scsi/megaraid/megaraid_sas.c | ||
17547 | @@ -3032,7 +3032,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, | ||
17548 | int error = 0, i; | ||
17549 | void *sense = NULL; | ||
17550 | dma_addr_t sense_handle; | ||
17551 | - unsigned long *sense_ptr; | ||
17552 | + u32 *sense_ptr; | ||
17553 | |||
17554 | memset(kbuff_arr, 0, sizeof(kbuff_arr)); | ||
17555 | |||
17556 | @@ -3109,7 +3109,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, | ||
17557 | } | ||
17558 | |||
17559 | sense_ptr = | ||
17560 | - (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off); | ||
17561 | + (u32 *) ((unsigned long)cmd->frame + ioc->sense_off); | ||
17562 | *sense_ptr = sense_handle; | ||
17563 | } | ||
17564 | |||
17565 | @@ -3140,8 +3140,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, | ||
17566 | * sense_ptr points to the location that has the user | ||
17567 | * sense buffer address | ||
17568 | */ | ||
17569 | - sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw + | ||
17570 | - ioc->sense_off); | ||
17571 | + sense_ptr = (u32 *) ((unsigned long)ioc->frame.raw + | ||
17572 | + ioc->sense_off); | ||
17573 | |||
17574 | if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)), | ||
17575 | sense, ioc->sense_len)) { | ||
17576 | @@ -3451,7 +3451,7 @@ out: | ||
17577 | return retval; | ||
17578 | } | ||
17579 | |||
17580 | -static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUSR, | ||
17581 | +static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUGO, | ||
17582 | megasas_sysfs_show_poll_mode_io, | ||
17583 | megasas_sysfs_set_poll_mode_io); | ||
17584 | |||
17585 | diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h | ||
17586 | index 5af66db..ab47c46 100644 | ||
17587 | --- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h | ||
17588 | +++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h | ||
17589 | @@ -348,14 +348,6 @@ typedef struct _MPI2_CONFIG_REPLY | ||
17590 | #define MPI2_MFGPAGE_DEVID_SAS2108_3 (0x0077) | ||
17591 | #define MPI2_MFGPAGE_DEVID_SAS2116_1 (0x0064) | ||
17592 | #define MPI2_MFGPAGE_DEVID_SAS2116_2 (0x0065) | ||
17593 | -#define MPI2_MFGPAGE_DEVID_SAS2208_1 (0x0080) | ||
17594 | -#define MPI2_MFGPAGE_DEVID_SAS2208_2 (0x0081) | ||
17595 | -#define MPI2_MFGPAGE_DEVID_SAS2208_3 (0x0082) | ||
17596 | -#define MPI2_MFGPAGE_DEVID_SAS2208_4 (0x0083) | ||
17597 | -#define MPI2_MFGPAGE_DEVID_SAS2208_5 (0x0084) | ||
17598 | -#define MPI2_MFGPAGE_DEVID_SAS2208_6 (0x0085) | ||
17599 | -#define MPI2_MFGPAGE_DEVID_SAS2208_7 (0x0086) | ||
17600 | -#define MPI2_MFGPAGE_DEVID_SAS2208_8 (0x0087) | ||
17601 | |||
17602 | |||
17603 | /* Manufacturing Page 0 */ | ||
17604 | diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c | ||
17605 | index 1743640..86ab32d 100644 | ||
17606 | --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c | ||
17607 | +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c | ||
17608 | @@ -196,28 +196,10 @@ static struct pci_device_id scsih_pci_table[] = { | ||
17609 | PCI_ANY_ID, PCI_ANY_ID }, | ||
17610 | { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3, | ||
17611 | PCI_ANY_ID, PCI_ANY_ID }, | ||
17612 | - /* Meteor ~ 2116 */ | ||
17613 | { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1, | ||
17614 | PCI_ANY_ID, PCI_ANY_ID }, | ||
17615 | { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2, | ||
17616 | PCI_ANY_ID, PCI_ANY_ID }, | ||
17617 | - /* Thunderbolt ~ 2208 */ | ||
17618 | - { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1, | ||
17619 | - PCI_ANY_ID, PCI_ANY_ID }, | ||
17620 | - { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2, | ||
17621 | - PCI_ANY_ID, PCI_ANY_ID }, | ||
17622 | - { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3, | ||
17623 | - PCI_ANY_ID, PCI_ANY_ID }, | ||
17624 | - { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4, | ||
17625 | - PCI_ANY_ID, PCI_ANY_ID }, | ||
17626 | - { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5, | ||
17627 | - PCI_ANY_ID, PCI_ANY_ID }, | ||
17628 | - { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6, | ||
17629 | - PCI_ANY_ID, PCI_ANY_ID }, | ||
17630 | - { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_7, | ||
17631 | - PCI_ANY_ID, PCI_ANY_ID }, | ||
17632 | - { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_8, | ||
17633 | - PCI_ANY_ID, PCI_ANY_ID }, | ||
17634 | {0} /* Terminating entry */ | ||
17635 | }; | ||
17636 | MODULE_DEVICE_TABLE(pci, scsih_pci_table); | ||
17637 | diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c | ||
17638 | index 21e2bc4..fbcb82a 100644 | ||
17639 | --- a/drivers/scsi/qla2xxx/qla_attr.c | ||
17640 | +++ b/drivers/scsi/qla2xxx/qla_attr.c | ||
17641 | @@ -1654,8 +1654,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) | ||
17642 | fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); | ||
17643 | } | ||
17644 | |||
17645 | - if (scsi_add_host_with_dma(vha->host, &fc_vport->dev, | ||
17646 | - &ha->pdev->dev)) { | ||
17647 | + if (scsi_add_host(vha->host, &fc_vport->dev)) { | ||
17648 | DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n", | ||
17649 | vha->host_no, vha->vp_idx)); | ||
17650 | goto vport_create_failed_2; | ||
17651 | diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c | ||
17652 | index 06bbe0d..b79fca7 100644 | ||
17653 | --- a/drivers/scsi/qla2xxx/qla_os.c | ||
17654 | +++ b/drivers/scsi/qla2xxx/qla_os.c | ||
17655 | @@ -2016,13 +2016,13 @@ skip_dpc: | ||
17656 | DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n", | ||
17657 | base_vha->host_no, ha)); | ||
17658 | |||
17659 | + base_vha->flags.init_done = 1; | ||
17660 | + base_vha->flags.online = 1; | ||
17661 | + | ||
17662 | ret = scsi_add_host(host, &pdev->dev); | ||
17663 | if (ret) | ||
17664 | goto probe_failed; | ||
17665 | |||
17666 | - base_vha->flags.init_done = 1; | ||
17667 | - base_vha->flags.online = 1; | ||
17668 | - | ||
17669 | ha->isp_ops->enable_intrs(ha); | ||
17670 | |||
17671 | scsi_scan_host(host); | ||
17672 | diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c | ||
17673 | index 802e91c..93c2622 100644 | ||
17674 | --- a/drivers/scsi/scsi_devinfo.c | ||
17675 | +++ b/drivers/scsi/scsi_devinfo.c | ||
17676 | @@ -168,10 +168,11 @@ static struct { | ||
17677 | {"Generic", "USB SD Reader", "1.00", BLIST_FORCELUN | BLIST_INQUIRY_36}, | ||
17678 | {"Generic", "USB Storage-SMC", "0180", BLIST_FORCELUN | BLIST_INQUIRY_36}, | ||
17679 | {"Generic", "USB Storage-SMC", "0207", BLIST_FORCELUN | BLIST_INQUIRY_36}, | ||
17680 | - {"HITACHI", "DF400", "*", BLIST_REPORTLUN2}, | ||
17681 | - {"HITACHI", "DF500", "*", BLIST_REPORTLUN2}, | ||
17682 | - {"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2}, | ||
17683 | - {"HITACHI", "OPEN-", "*", BLIST_REPORTLUN2}, | ||
17684 | + {"HITACHI", "DF400", "*", BLIST_SPARSELUN}, | ||
17685 | + {"HITACHI", "DF500", "*", BLIST_SPARSELUN}, | ||
17686 | + {"HITACHI", "DF600", "*", BLIST_SPARSELUN}, | ||
17687 | + {"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_ATTACH_PQ3 | BLIST_SPARSELUN | BLIST_LARGELUN}, | ||
17688 | + {"HITACHI", "OPEN-E", "*", BLIST_ATTACH_PQ3 | BLIST_SPARSELUN | BLIST_LARGELUN}, | ||
17689 | {"HITACHI", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, | ||
17690 | {"HITACHI", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, | ||
17691 | {"HITACHI", "3390-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, | ||
17692 | diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c | ||
17693 | index bc9a881..5987da8 100644 | ||
17694 | --- a/drivers/scsi/scsi_lib.c | ||
17695 | +++ b/drivers/scsi/scsi_lib.c | ||
17696 | @@ -749,9 +749,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | ||
17697 | */ | ||
17698 | req->next_rq->resid_len = scsi_in(cmd)->resid; | ||
17699 | |||
17700 | - scsi_release_buffers(cmd); | ||
17701 | blk_end_request_all(req, 0); | ||
17702 | |||
17703 | + scsi_release_buffers(cmd); | ||
17704 | scsi_next_command(cmd); | ||
17705 | return; | ||
17706 | } | ||
17707 | diff --git a/drivers/scsi/scsi_lib_dma.c b/drivers/scsi/scsi_lib_dma.c | ||
17708 | index dcd1285..ac6855c 100644 | ||
17709 | --- a/drivers/scsi/scsi_lib_dma.c | ||
17710 | +++ b/drivers/scsi/scsi_lib_dma.c | ||
17711 | @@ -23,7 +23,7 @@ int scsi_dma_map(struct scsi_cmnd *cmd) | ||
17712 | int nseg = 0; | ||
17713 | |||
17714 | if (scsi_sg_count(cmd)) { | ||
17715 | - struct device *dev = cmd->device->host->dma_dev; | ||
17716 | + struct device *dev = cmd->device->host->shost_gendev.parent; | ||
17717 | |||
17718 | nseg = dma_map_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd), | ||
17719 | cmd->sc_data_direction); | ||
17720 | @@ -41,7 +41,7 @@ EXPORT_SYMBOL(scsi_dma_map); | ||
17721 | void scsi_dma_unmap(struct scsi_cmnd *cmd) | ||
17722 | { | ||
17723 | if (scsi_sg_count(cmd)) { | ||
17724 | - struct device *dev = cmd->device->host->dma_dev; | ||
17725 | + struct device *dev = cmd->device->host->shost_gendev.parent; | ||
17726 | |||
17727 | dma_unmap_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd), | ||
17728 | cmd->sc_data_direction); | ||
17729 | diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c | ||
17730 | index bf52dec..c6f70da 100644 | ||
17731 | --- a/drivers/scsi/scsi_transport_fc.c | ||
17732 | +++ b/drivers/scsi/scsi_transport_fc.c | ||
17733 | @@ -648,22 +648,11 @@ static __init int fc_transport_init(void) | ||
17734 | return error; | ||
17735 | error = transport_class_register(&fc_vport_class); | ||
17736 | if (error) | ||
17737 | - goto unreg_host_class; | ||
17738 | + return error; | ||
17739 | error = transport_class_register(&fc_rport_class); | ||
17740 | if (error) | ||
17741 | - goto unreg_vport_class; | ||
17742 | - error = transport_class_register(&fc_transport_class); | ||
17743 | - if (error) | ||
17744 | - goto unreg_rport_class; | ||
17745 | - return 0; | ||
17746 | - | ||
17747 | -unreg_rport_class: | ||
17748 | - transport_class_unregister(&fc_rport_class); | ||
17749 | -unreg_vport_class: | ||
17750 | - transport_class_unregister(&fc_vport_class); | ||
17751 | -unreg_host_class: | ||
17752 | - transport_class_unregister(&fc_host_class); | ||
17753 | - return error; | ||
17754 | + return error; | ||
17755 | + return transport_class_register(&fc_transport_class); | ||
17756 | } | ||
17757 | |||
17758 | static void __exit fc_transport_exit(void) | ||
17759 | @@ -2395,7 +2384,6 @@ fc_rport_final_delete(struct work_struct *work) | ||
17760 | struct Scsi_Host *shost = rport_to_shost(rport); | ||
17761 | struct fc_internal *i = to_fc_internal(shost->transportt); | ||
17762 | unsigned long flags; | ||
17763 | - int do_callback = 0; | ||
17764 | |||
17765 | /* | ||
17766 | * if a scan is pending, flush the SCSI Host work_q so that | ||
17767 | @@ -2434,15 +2422,8 @@ fc_rport_final_delete(struct work_struct *work) | ||
17768 | * Avoid this call if we already called it when we preserved the | ||
17769 | * rport for the binding. | ||
17770 | */ | ||
17771 | - spin_lock_irqsave(shost->host_lock, flags); | ||
17772 | if (!(rport->flags & FC_RPORT_DEVLOSS_CALLBK_DONE) && | ||
17773 | - (i->f->dev_loss_tmo_callbk)) { | ||
17774 | - rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE; | ||
17775 | - do_callback = 1; | ||
17776 | - } | ||
17777 | - spin_unlock_irqrestore(shost->host_lock, flags); | ||
17778 | - | ||
17779 | - if (do_callback) | ||
17780 | + (i->f->dev_loss_tmo_callbk)) | ||
17781 | i->f->dev_loss_tmo_callbk(rport); | ||
17782 | |||
17783 | fc_bsg_remove(rport->rqst_q); | ||
17784 | @@ -2989,7 +2970,6 @@ fc_timeout_deleted_rport(struct work_struct *work) | ||
17785 | struct fc_internal *i = to_fc_internal(shost->transportt); | ||
17786 | struct fc_host_attrs *fc_host = shost_to_fc_host(shost); | ||
17787 | unsigned long flags; | ||
17788 | - int do_callback = 0; | ||
17789 | |||
17790 | spin_lock_irqsave(shost->host_lock, flags); | ||
17791 | |||
17792 | @@ -3055,6 +3035,7 @@ fc_timeout_deleted_rport(struct work_struct *work) | ||
17793 | rport->roles = FC_PORT_ROLE_UNKNOWN; | ||
17794 | rport->port_state = FC_PORTSTATE_NOTPRESENT; | ||
17795 | rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT; | ||
17796 | + rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE; | ||
17797 | |||
17798 | /* | ||
17799 | * Pre-emptively kill I/O rather than waiting for the work queue | ||
17800 | @@ -3064,40 +3045,32 @@ fc_timeout_deleted_rport(struct work_struct *work) | ||
17801 | spin_unlock_irqrestore(shost->host_lock, flags); | ||
17802 | fc_terminate_rport_io(rport); | ||
17803 | |||
17804 | - spin_lock_irqsave(shost->host_lock, flags); | ||
17805 | - | ||
17806 | - if (rport->port_state == FC_PORTSTATE_NOTPRESENT) { /* still missing */ | ||
17807 | - | ||
17808 | - /* remove the identifiers that aren't used in the consisting binding */ | ||
17809 | - switch (fc_host->tgtid_bind_type) { | ||
17810 | - case FC_TGTID_BIND_BY_WWPN: | ||
17811 | - rport->node_name = -1; | ||
17812 | - rport->port_id = -1; | ||
17813 | - break; | ||
17814 | - case FC_TGTID_BIND_BY_WWNN: | ||
17815 | - rport->port_name = -1; | ||
17816 | - rport->port_id = -1; | ||
17817 | - break; | ||
17818 | - case FC_TGTID_BIND_BY_ID: | ||
17819 | - rport->node_name = -1; | ||
17820 | - rport->port_name = -1; | ||
17821 | - break; | ||
17822 | - case FC_TGTID_BIND_NONE: /* to keep compiler happy */ | ||
17823 | - break; | ||
17824 | - } | ||
17825 | - | ||
17826 | - /* | ||
17827 | - * As this only occurs if the remote port (scsi target) | ||
17828 | - * went away and didn't come back - we'll remove | ||
17829 | - * all attached scsi devices. | ||
17830 | - */ | ||
17831 | - rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE; | ||
17832 | - fc_queue_work(shost, &rport->stgt_delete_work); | ||
17833 | + BUG_ON(rport->port_state != FC_PORTSTATE_NOTPRESENT); | ||
17834 | |||
17835 | - do_callback = 1; | ||
17836 | + /* remove the identifiers that aren't used in the consisting binding */ | ||
17837 | + switch (fc_host->tgtid_bind_type) { | ||
17838 | + case FC_TGTID_BIND_BY_WWPN: | ||
17839 | + rport->node_name = -1; | ||
17840 | + rport->port_id = -1; | ||
17841 | + break; | ||
17842 | + case FC_TGTID_BIND_BY_WWNN: | ||
17843 | + rport->port_name = -1; | ||
17844 | + rport->port_id = -1; | ||
17845 | + break; | ||
17846 | + case FC_TGTID_BIND_BY_ID: | ||
17847 | + rport->node_name = -1; | ||
17848 | + rport->port_name = -1; | ||
17849 | + break; | ||
17850 | + case FC_TGTID_BIND_NONE: /* to keep compiler happy */ | ||
17851 | + break; | ||
17852 | } | ||
17853 | |||
17854 | - spin_unlock_irqrestore(shost->host_lock, flags); | ||
17855 | + /* | ||
17856 | + * As this only occurs if the remote port (scsi target) | ||
17857 | + * went away and didn't come back - we'll remove | ||
17858 | + * all attached scsi devices. | ||
17859 | + */ | ||
17860 | + fc_queue_work(shost, &rport->stgt_delete_work); | ||
17861 | |||
17862 | /* | ||
17863 | * Notify the driver that the rport is now dead. The LLDD will | ||
17864 | @@ -3105,7 +3078,7 @@ fc_timeout_deleted_rport(struct work_struct *work) | ||
17865 | * | ||
17866 | * Note: we set the CALLBK_DONE flag above to correspond | ||
17867 | */ | ||
17868 | - if (do_callback && i->f->dev_loss_tmo_callbk) | ||
17869 | + if (i->f->dev_loss_tmo_callbk) | ||
17870 | i->f->dev_loss_tmo_callbk(rport); | ||
17871 | } | ||
17872 | |||
17873 | diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c | ||
17874 | index de2f8c4..ad897df 100644 | ||
17875 | --- a/drivers/scsi/scsi_transport_iscsi.c | ||
17876 | +++ b/drivers/scsi/scsi_transport_iscsi.c | ||
17877 | @@ -627,10 +627,8 @@ static void __iscsi_block_session(struct work_struct *work) | ||
17878 | spin_unlock_irqrestore(&session->lock, flags); | ||
17879 | scsi_target_block(&session->dev); | ||
17880 | ISCSI_DBG_TRANS_SESSION(session, "Completed SCSI target blocking\n"); | ||
17881 | - if (session->recovery_tmo >= 0) | ||
17882 | - queue_delayed_work(iscsi_eh_timer_workq, | ||
17883 | - &session->recovery_work, | ||
17884 | - session->recovery_tmo * HZ); | ||
17885 | + queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work, | ||
17886 | + session->recovery_tmo * HZ); | ||
17887 | } | ||
17888 | |||
17889 | void iscsi_block_session(struct iscsi_cls_session *session) | ||
17890 | @@ -1350,7 +1348,8 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev) | ||
17891 | switch (ev->u.set_param.param) { | ||
17892 | case ISCSI_PARAM_SESS_RECOVERY_TMO: | ||
17893 | sscanf(data, "%d", &value); | ||
17894 | - session->recovery_tmo = value; | ||
17895 | + if (value != 0) | ||
17896 | + session->recovery_tmo = value; | ||
17897 | break; | ||
17898 | default: | ||
17899 | err = transport->set_param(conn, ev->u.set_param.param, | ||
17900 | diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c | ||
17901 | index 5081f97..12d58a7 100644 | ||
17902 | --- a/drivers/scsi/st.c | ||
17903 | +++ b/drivers/scsi/st.c | ||
17904 | @@ -552,15 +552,13 @@ st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd | ||
17905 | SRpnt->waiting = waiting; | ||
17906 | |||
17907 | if (STp->buffer->do_dio) { | ||
17908 | - mdata->page_order = 0; | ||
17909 | mdata->nr_entries = STp->buffer->sg_segs; | ||
17910 | mdata->pages = STp->buffer->mapped_pages; | ||
17911 | } else { | ||
17912 | - mdata->page_order = STp->buffer->reserved_page_order; | ||
17913 | mdata->nr_entries = | ||
17914 | DIV_ROUND_UP(bytes, PAGE_SIZE << mdata->page_order); | ||
17915 | - mdata->pages = STp->buffer->reserved_pages; | ||
17916 | - mdata->offset = 0; | ||
17917 | + STp->buffer->map_data.pages = STp->buffer->reserved_pages; | ||
17918 | + STp->buffer->map_data.offset = 0; | ||
17919 | } | ||
17920 | |||
17921 | memcpy(SRpnt->cmd, cmd, sizeof(SRpnt->cmd)); | ||
17922 | @@ -3720,7 +3718,7 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm | ||
17923 | priority |= __GFP_ZERO; | ||
17924 | |||
17925 | if (STbuffer->frp_segs) { | ||
17926 | - order = STbuffer->reserved_page_order; | ||
17927 | + order = STbuffer->map_data.page_order; | ||
17928 | b_size = PAGE_SIZE << order; | ||
17929 | } else { | ||
17930 | for (b_size = PAGE_SIZE, order = 0; | ||
17931 | @@ -3753,7 +3751,7 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm | ||
17932 | segs++; | ||
17933 | } | ||
17934 | STbuffer->b_data = page_address(STbuffer->reserved_pages[0]); | ||
17935 | - STbuffer->reserved_page_order = order; | ||
17936 | + STbuffer->map_data.page_order = order; | ||
17937 | |||
17938 | return 1; | ||
17939 | } | ||
17940 | @@ -3766,7 +3764,7 @@ static void clear_buffer(struct st_buffer * st_bp) | ||
17941 | |||
17942 | for (i=0; i < st_bp->frp_segs; i++) | ||
17943 | memset(page_address(st_bp->reserved_pages[i]), 0, | ||
17944 | - PAGE_SIZE << st_bp->reserved_page_order); | ||
17945 | + PAGE_SIZE << st_bp->map_data.page_order); | ||
17946 | st_bp->cleared = 1; | ||
17947 | } | ||
17948 | |||
17949 | @@ -3774,7 +3772,7 @@ static void clear_buffer(struct st_buffer * st_bp) | ||
17950 | /* Release the extra buffer */ | ||
17951 | static void normalize_buffer(struct st_buffer * STbuffer) | ||
17952 | { | ||
17953 | - int i, order = STbuffer->reserved_page_order; | ||
17954 | + int i, order = STbuffer->map_data.page_order; | ||
17955 | |||
17956 | for (i = 0; i < STbuffer->frp_segs; i++) { | ||
17957 | __free_pages(STbuffer->reserved_pages[i], order); | ||
17958 | @@ -3782,7 +3780,7 @@ static void normalize_buffer(struct st_buffer * STbuffer) | ||
17959 | } | ||
17960 | STbuffer->frp_segs = 0; | ||
17961 | STbuffer->sg_segs = 0; | ||
17962 | - STbuffer->reserved_page_order = 0; | ||
17963 | + STbuffer->map_data.page_order = 0; | ||
17964 | STbuffer->map_data.offset = 0; | ||
17965 | } | ||
17966 | |||
17967 | @@ -3792,7 +3790,7 @@ static void normalize_buffer(struct st_buffer * STbuffer) | ||
17968 | static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, int do_count) | ||
17969 | { | ||
17970 | int i, cnt, res, offset; | ||
17971 | - int length = PAGE_SIZE << st_bp->reserved_page_order; | ||
17972 | + int length = PAGE_SIZE << st_bp->map_data.page_order; | ||
17973 | |||
17974 | for (i = 0, offset = st_bp->buffer_bytes; | ||
17975 | i < st_bp->frp_segs && offset >= length; i++) | ||
17976 | @@ -3824,7 +3822,7 @@ static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, in | ||
17977 | static int from_buffer(struct st_buffer * st_bp, char __user *ubp, int do_count) | ||
17978 | { | ||
17979 | int i, cnt, res, offset; | ||
17980 | - int length = PAGE_SIZE << st_bp->reserved_page_order; | ||
17981 | + int length = PAGE_SIZE << st_bp->map_data.page_order; | ||
17982 | |||
17983 | for (i = 0, offset = st_bp->read_pointer; | ||
17984 | i < st_bp->frp_segs && offset >= length; i++) | ||
17985 | @@ -3857,7 +3855,7 @@ static void move_buffer_data(struct st_buffer * st_bp, int offset) | ||
17986 | { | ||
17987 | int src_seg, dst_seg, src_offset = 0, dst_offset; | ||
17988 | int count, total; | ||
17989 | - int length = PAGE_SIZE << st_bp->reserved_page_order; | ||
17990 | + int length = PAGE_SIZE << st_bp->map_data.page_order; | ||
17991 | |||
17992 | if (offset == 0) | ||
17993 | return; | ||
17994 | @@ -4579,6 +4577,7 @@ static int sgl_map_user_pages(struct st_buffer *STbp, | ||
17995 | } | ||
17996 | |||
17997 | mdata->offset = uaddr & ~PAGE_MASK; | ||
17998 | + mdata->page_order = 0; | ||
17999 | STbp->mapped_pages = pages; | ||
18000 | |||
18001 | return nr_pages; | ||
18002 | diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h | ||
18003 | index f91a67c..544dc6b 100644 | ||
18004 | --- a/drivers/scsi/st.h | ||
18005 | +++ b/drivers/scsi/st.h | ||
18006 | @@ -46,7 +46,6 @@ struct st_buffer { | ||
18007 | struct st_request *last_SRpnt; | ||
18008 | struct st_cmdstatus cmdstat; | ||
18009 | struct page **reserved_pages; | ||
18010 | - int reserved_page_order; | ||
18011 | struct page **mapped_pages; | ||
18012 | struct rq_map_data map_data; | ||
18013 | unsigned char *b_data; | ||
18014 | diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c | ||
18015 | index 5ed1b82..737b4c9 100644 | ||
18016 | --- a/drivers/serial/8250.c | ||
18017 | +++ b/drivers/serial/8250.c | ||
18018 | @@ -83,9 +83,6 @@ static unsigned int skip_txen_test; /* force skip of txen test at init time */ | ||
18019 | |||
18020 | #define PASS_LIMIT 256 | ||
18021 | |||
18022 | -#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) | ||
18023 | - | ||
18024 | - | ||
18025 | /* | ||
18026 | * We default to IRQ0 for the "no irq" hack. Some | ||
18027 | * machine types want others as well - they're free | ||
18028 | @@ -1342,12 +1339,14 @@ static void serial8250_start_tx(struct uart_port *port) | ||
18029 | serial_out(up, UART_IER, up->ier); | ||
18030 | |||
18031 | if (up->bugs & UART_BUG_TXEN) { | ||
18032 | - unsigned char lsr; | ||
18033 | + unsigned char lsr, iir; | ||
18034 | lsr = serial_in(up, UART_LSR); | ||
18035 | up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; | ||
18036 | + iir = serial_in(up, UART_IIR) & 0x0f; | ||
18037 | if ((up->port.type == PORT_RM9000) ? | ||
18038 | - (lsr & UART_LSR_THRE) : | ||
18039 | - (lsr & UART_LSR_TEMT)) | ||
18040 | + (lsr & UART_LSR_THRE && | ||
18041 | + (iir == UART_IIR_NO_INT || iir == UART_IIR_THRI)) : | ||
18042 | + (lsr & UART_LSR_TEMT && iir & UART_IIR_NO_INT)) | ||
18043 | transmit_chars(up); | ||
18044 | } | ||
18045 | } | ||
18046 | @@ -1795,7 +1794,7 @@ static unsigned int serial8250_tx_empty(struct uart_port *port) | ||
18047 | up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; | ||
18048 | spin_unlock_irqrestore(&up->port.lock, flags); | ||
18049 | |||
18050 | - return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0; | ||
18051 | + return lsr & UART_LSR_TEMT ? TIOCSER_TEMT : 0; | ||
18052 | } | ||
18053 | |||
18054 | static unsigned int serial8250_get_mctrl(struct uart_port *port) | ||
18055 | @@ -1853,6 +1852,8 @@ static void serial8250_break_ctl(struct uart_port *port, int break_state) | ||
18056 | spin_unlock_irqrestore(&up->port.lock, flags); | ||
18057 | } | ||
18058 | |||
18059 | +#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) | ||
18060 | + | ||
18061 | /* | ||
18062 | * Wait for transmitter & holding register to empty | ||
18063 | */ | ||
18064 | diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c | ||
18065 | index deac67e..d71dfe3 100644 | ||
18066 | --- a/drivers/serial/8250_pnp.c | ||
18067 | +++ b/drivers/serial/8250_pnp.c | ||
18068 | @@ -328,7 +328,15 @@ static const struct pnp_device_id pnp_dev_table[] = { | ||
18069 | /* U.S. Robotics 56K Voice INT PnP*/ | ||
18070 | { "USR9190", 0 }, | ||
18071 | /* Wacom tablets */ | ||
18072 | - { "WACFXXX", 0 }, | ||
18073 | + { "WACF004", 0 }, | ||
18074 | + { "WACF005", 0 }, | ||
18075 | + { "WACF006", 0 }, | ||
18076 | + { "WACF007", 0 }, | ||
18077 | + { "WACF008", 0 }, | ||
18078 | + { "WACF009", 0 }, | ||
18079 | + { "WACF00A", 0 }, | ||
18080 | + { "WACF00B", 0 }, | ||
18081 | + { "WACF00C", 0 }, | ||
18082 | /* Compaq touchscreen */ | ||
18083 | { "FPI2002", 0 }, | ||
18084 | /* Fujitsu Stylistic touchscreens */ | ||
18085 | @@ -346,8 +354,6 @@ static const struct pnp_device_id pnp_dev_table[] = { | ||
18086 | { "FUJ02E5", 0 }, | ||
18087 | /* Fujitsu P-series tablet PC device */ | ||
18088 | { "FUJ02E6", 0 }, | ||
18089 | - /* Fujitsu Wacom 2FGT Tablet PC device */ | ||
18090 | - { "FUJ02E7", 0 }, | ||
18091 | /* | ||
18092 | * LG C1 EXPRESS DUAL (C1-PB11A3) touch screen (actually a FUJ02E6 in | ||
18093 | * disguise) | ||
18094 | diff --git a/drivers/serial/uartlite.c b/drivers/serial/uartlite.c | ||
18095 | index ab2ab3c..377f271 100644 | ||
18096 | --- a/drivers/serial/uartlite.c | ||
18097 | +++ b/drivers/serial/uartlite.c | ||
18098 | @@ -394,7 +394,7 @@ static void ulite_console_write(struct console *co, const char *s, | ||
18099 | spin_unlock_irqrestore(&port->lock, flags); | ||
18100 | } | ||
18101 | |||
18102 | -static int __devinit ulite_console_setup(struct console *co, char *options) | ||
18103 | +static int __init ulite_console_setup(struct console *co, char *options) | ||
18104 | { | ||
18105 | struct uart_port *port; | ||
18106 | int baud = 9600; | ||
18107 | diff --git a/drivers/ssb/sprom.c b/drivers/ssb/sprom.c | ||
18108 | index eb70843..8943015 100644 | ||
18109 | --- a/drivers/ssb/sprom.c | ||
18110 | +++ b/drivers/ssb/sprom.c | ||
18111 | @@ -13,8 +13,6 @@ | ||
18112 | |||
18113 | #include "ssb_private.h" | ||
18114 | |||
18115 | -#include <linux/ctype.h> | ||
18116 | - | ||
18117 | |||
18118 | static const struct ssb_sprom *fallback_sprom; | ||
18119 | |||
18120 | @@ -35,27 +33,17 @@ static int sprom2hex(const u16 *sprom, char *buf, size_t buf_len, | ||
18121 | static int hex2sprom(u16 *sprom, const char *dump, size_t len, | ||
18122 | size_t sprom_size_words) | ||
18123 | { | ||
18124 | - char c, tmp[5] = { 0 }; | ||
18125 | - int err, cnt = 0; | ||
18126 | + char tmp[5] = { 0 }; | ||
18127 | + int cnt = 0; | ||
18128 | unsigned long parsed; | ||
18129 | |||
18130 | - /* Strip whitespace at the end. */ | ||
18131 | - while (len) { | ||
18132 | - c = dump[len - 1]; | ||
18133 | - if (!isspace(c) && c != '\0') | ||
18134 | - break; | ||
18135 | - len--; | ||
18136 | - } | ||
18137 | - /* Length must match exactly. */ | ||
18138 | - if (len != sprom_size_words * 4) | ||
18139 | + if (len < sprom_size_words * 2) | ||
18140 | return -EINVAL; | ||
18141 | |||
18142 | while (cnt < sprom_size_words) { | ||
18143 | memcpy(tmp, dump, 4); | ||
18144 | dump += 4; | ||
18145 | - err = strict_strtoul(tmp, 16, &parsed); | ||
18146 | - if (err) | ||
18147 | - return err; | ||
18148 | + parsed = simple_strtoul(tmp, NULL, 16); | ||
18149 | sprom[cnt++] = swab16((u16)parsed); | ||
18150 | } | ||
18151 | |||
18152 | diff --git a/drivers/staging/asus_oled/asus_oled.c b/drivers/staging/asus_oled/asus_oled.c | ||
18153 | index 43c57b7..f4c2657 100644 | ||
18154 | --- a/drivers/staging/asus_oled/asus_oled.c | ||
18155 | +++ b/drivers/staging/asus_oled/asus_oled.c | ||
18156 | @@ -194,11 +194,9 @@ static ssize_t set_enabled(struct device *dev, struct device_attribute *attr, | ||
18157 | { | ||
18158 | struct usb_interface *intf = to_usb_interface(dev); | ||
18159 | struct asus_oled_dev *odev = usb_get_intfdata(intf); | ||
18160 | - unsigned long value; | ||
18161 | - if (strict_strtoul(buf, 10, &value)) | ||
18162 | - return -EINVAL; | ||
18163 | + int temp = strict_strtoul(buf, 10, NULL); | ||
18164 | |||
18165 | - enable_oled(odev, value); | ||
18166 | + enable_oled(odev, temp); | ||
18167 | |||
18168 | return count; | ||
18169 | } | ||
18170 | @@ -209,12 +207,10 @@ static ssize_t class_set_enabled(struct device *device, | ||
18171 | { | ||
18172 | struct asus_oled_dev *odev = | ||
18173 | (struct asus_oled_dev *) dev_get_drvdata(device); | ||
18174 | - unsigned long value; | ||
18175 | |||
18176 | - if (strict_strtoul(buf, 10, &value)) | ||
18177 | - return -EINVAL; | ||
18178 | + int temp = strict_strtoul(buf, 10, NULL); | ||
18179 | |||
18180 | - enable_oled(odev, value); | ||
18181 | + enable_oled(odev, temp); | ||
18182 | |||
18183 | return count; | ||
18184 | } | ||
18185 | diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/Hv.c | ||
18186 | index c2809f2..c5b6613 100644 | ||
18187 | --- a/drivers/staging/hv/Hv.c | ||
18188 | +++ b/drivers/staging/hv/Hv.c | ||
18189 | @@ -386,7 +386,7 @@ u16 HvSignalEvent(void) | ||
18190 | * retrieve the initialized message and event pages. Otherwise, we create and | ||
18191 | * initialize the message and event pages. | ||
18192 | */ | ||
18193 | -void HvSynicInit(void *irqarg) | ||
18194 | +int HvSynicInit(u32 irqVector) | ||
18195 | { | ||
18196 | u64 version; | ||
18197 | union hv_synic_simp simp; | ||
18198 | @@ -394,14 +394,13 @@ void HvSynicInit(void *irqarg) | ||
18199 | union hv_synic_sint sharedSint; | ||
18200 | union hv_synic_scontrol sctrl; | ||
18201 | u64 guestID; | ||
18202 | - u32 irqVector = *((u32 *)(irqarg)); | ||
18203 | - int cpu = smp_processor_id(); | ||
18204 | + int ret = 0; | ||
18205 | |||
18206 | DPRINT_ENTER(VMBUS); | ||
18207 | |||
18208 | if (!gHvContext.HypercallPage) { | ||
18209 | DPRINT_EXIT(VMBUS); | ||
18210 | - return; | ||
18211 | + return ret; | ||
18212 | } | ||
18213 | |||
18214 | /* Check the version */ | ||
18215 | @@ -426,27 +425,27 @@ void HvSynicInit(void *irqarg) | ||
18216 | */ | ||
18217 | rdmsrl(HV_X64_MSR_GUEST_OS_ID, guestID); | ||
18218 | if (guestID == HV_LINUX_GUEST_ID) { | ||
18219 | - gHvContext.synICMessagePage[cpu] = | ||
18220 | + gHvContext.synICMessagePage[0] = | ||
18221 | phys_to_virt(simp.BaseSimpGpa << PAGE_SHIFT); | ||
18222 | - gHvContext.synICEventPage[cpu] = | ||
18223 | + gHvContext.synICEventPage[0] = | ||
18224 | phys_to_virt(siefp.BaseSiefpGpa << PAGE_SHIFT); | ||
18225 | } else { | ||
18226 | DPRINT_ERR(VMBUS, "unknown guest id!!"); | ||
18227 | goto Cleanup; | ||
18228 | } | ||
18229 | DPRINT_DBG(VMBUS, "MAPPED: Simp: %p, Sifep: %p", | ||
18230 | - gHvContext.synICMessagePage[cpu], | ||
18231 | - gHvContext.synICEventPage[cpu]); | ||
18232 | + gHvContext.synICMessagePage[0], | ||
18233 | + gHvContext.synICEventPage[0]); | ||
18234 | } else { | ||
18235 | - gHvContext.synICMessagePage[cpu] = (void *)get_zeroed_page(GFP_ATOMIC); | ||
18236 | - if (gHvContext.synICMessagePage[cpu] == NULL) { | ||
18237 | + gHvContext.synICMessagePage[0] = osd_PageAlloc(1); | ||
18238 | + if (gHvContext.synICMessagePage[0] == NULL) { | ||
18239 | DPRINT_ERR(VMBUS, | ||
18240 | "unable to allocate SYNIC message page!!"); | ||
18241 | goto Cleanup; | ||
18242 | } | ||
18243 | |||
18244 | - gHvContext.synICEventPage[cpu] = (void *)get_zeroed_page(GFP_ATOMIC); | ||
18245 | - if (gHvContext.synICEventPage[cpu] == NULL) { | ||
18246 | + gHvContext.synICEventPage[0] = osd_PageAlloc(1); | ||
18247 | + if (gHvContext.synICEventPage[0] == NULL) { | ||
18248 | DPRINT_ERR(VMBUS, | ||
18249 | "unable to allocate SYNIC event page!!"); | ||
18250 | goto Cleanup; | ||
18251 | @@ -455,7 +454,7 @@ void HvSynicInit(void *irqarg) | ||
18252 | /* Setup the Synic's message page */ | ||
18253 | rdmsrl(HV_X64_MSR_SIMP, simp.AsUINT64); | ||
18254 | simp.SimpEnabled = 1; | ||
18255 | - simp.BaseSimpGpa = virt_to_phys(gHvContext.synICMessagePage[cpu]) | ||
18256 | + simp.BaseSimpGpa = virt_to_phys(gHvContext.synICMessagePage[0]) | ||
18257 | >> PAGE_SHIFT; | ||
18258 | |||
18259 | DPRINT_DBG(VMBUS, "HV_X64_MSR_SIMP msr set to: %llx", | ||
18260 | @@ -466,7 +465,7 @@ void HvSynicInit(void *irqarg) | ||
18261 | /* Setup the Synic's event page */ | ||
18262 | rdmsrl(HV_X64_MSR_SIEFP, siefp.AsUINT64); | ||
18263 | siefp.SiefpEnabled = 1; | ||
18264 | - siefp.BaseSiefpGpa = virt_to_phys(gHvContext.synICEventPage[cpu]) | ||
18265 | + siefp.BaseSiefpGpa = virt_to_phys(gHvContext.synICEventPage[0]) | ||
18266 | >> PAGE_SHIFT; | ||
18267 | |||
18268 | DPRINT_DBG(VMBUS, "HV_X64_MSR_SIEFP msr set to: %llx", | ||
18269 | @@ -502,30 +501,32 @@ void HvSynicInit(void *irqarg) | ||
18270 | |||
18271 | DPRINT_EXIT(VMBUS); | ||
18272 | |||
18273 | - return; | ||
18274 | + return ret; | ||
18275 | |||
18276 | Cleanup: | ||
18277 | + ret = -1; | ||
18278 | + | ||
18279 | if (gHvContext.GuestId == HV_LINUX_GUEST_ID) { | ||
18280 | - if (gHvContext.synICEventPage[cpu]) | ||
18281 | - osd_PageFree(gHvContext.synICEventPage[cpu], 1); | ||
18282 | + if (gHvContext.synICEventPage[0]) | ||
18283 | + osd_PageFree(gHvContext.synICEventPage[0], 1); | ||
18284 | |||
18285 | - if (gHvContext.synICMessagePage[cpu]) | ||
18286 | - osd_PageFree(gHvContext.synICMessagePage[cpu], 1); | ||
18287 | + if (gHvContext.synICMessagePage[0]) | ||
18288 | + osd_PageFree(gHvContext.synICMessagePage[0], 1); | ||
18289 | } | ||
18290 | |||
18291 | DPRINT_EXIT(VMBUS); | ||
18292 | - return; | ||
18293 | + | ||
18294 | + return ret; | ||
18295 | } | ||
18296 | |||
18297 | /** | ||
18298 | * HvSynicCleanup - Cleanup routine for HvSynicInit(). | ||
18299 | */ | ||
18300 | -void HvSynicCleanup(void *arg) | ||
18301 | +void HvSynicCleanup(void) | ||
18302 | { | ||
18303 | union hv_synic_sint sharedSint; | ||
18304 | union hv_synic_simp simp; | ||
18305 | union hv_synic_siefp siefp; | ||
18306 | - int cpu = smp_processor_id(); | ||
18307 | |||
18308 | DPRINT_ENTER(VMBUS); | ||
18309 | |||
18310 | @@ -538,7 +539,6 @@ void HvSynicCleanup(void *arg) | ||
18311 | |||
18312 | sharedSint.Masked = 1; | ||
18313 | |||
18314 | - /* Need to correctly cleanup in the case of SMP!!! */ | ||
18315 | /* Disable the interrupt */ | ||
18316 | wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, sharedSint.AsUINT64); | ||
18317 | |||
18318 | @@ -560,8 +560,8 @@ void HvSynicCleanup(void *arg) | ||
18319 | |||
18320 | wrmsrl(HV_X64_MSR_SIEFP, siefp.AsUINT64); | ||
18321 | |||
18322 | - osd_PageFree(gHvContext.synICMessagePage[cpu], 1); | ||
18323 | - osd_PageFree(gHvContext.synICEventPage[cpu], 1); | ||
18324 | + osd_PageFree(gHvContext.synICMessagePage[0], 1); | ||
18325 | + osd_PageFree(gHvContext.synICEventPage[0], 1); | ||
18326 | } | ||
18327 | |||
18328 | DPRINT_EXIT(VMBUS); | ||
18329 | diff --git a/drivers/staging/hv/Hv.h b/drivers/staging/hv/Hv.h | ||
18330 | index fce4b5c..5379e4b 100644 | ||
18331 | --- a/drivers/staging/hv/Hv.h | ||
18332 | +++ b/drivers/staging/hv/Hv.h | ||
18333 | @@ -93,7 +93,7 @@ static const struct hv_guid VMBUS_SERVICE_ID = { | ||
18334 | }, | ||
18335 | }; | ||
18336 | |||
18337 | -#define MAX_NUM_CPUS 32 | ||
18338 | +#define MAX_NUM_CPUS 1 | ||
18339 | |||
18340 | |||
18341 | struct hv_input_signal_event_buffer { | ||
18342 | @@ -137,8 +137,8 @@ extern u16 HvPostMessage(union hv_connection_id connectionId, | ||
18343 | |||
18344 | extern u16 HvSignalEvent(void); | ||
18345 | |||
18346 | -extern void HvSynicInit(void *irqarg); | ||
18347 | +extern int HvSynicInit(u32 irqVector); | ||
18348 | |||
18349 | -extern void HvSynicCleanup(void *arg); | ||
18350 | +extern void HvSynicCleanup(void); | ||
18351 | |||
18352 | #endif /* __HV_H__ */ | ||
18353 | diff --git a/drivers/staging/hv/Vmbus.c b/drivers/staging/hv/Vmbus.c | ||
18354 | index 35a023e..a4dd06f 100644 | ||
18355 | --- a/drivers/staging/hv/Vmbus.c | ||
18356 | +++ b/drivers/staging/hv/Vmbus.c | ||
18357 | @@ -129,7 +129,7 @@ static int VmbusOnDeviceAdd(struct hv_device *dev, void *AdditionalInfo) | ||
18358 | |||
18359 | /* strcpy(dev->name, "vmbus"); */ | ||
18360 | /* SynIC setup... */ | ||
18361 | - on_each_cpu(HvSynicInit, (void *)irqvector, 1); | ||
18362 | + ret = HvSynicInit(*irqvector); | ||
18363 | |||
18364 | /* Connect to VMBus in the root partition */ | ||
18365 | ret = VmbusConnect(); | ||
18366 | @@ -150,7 +150,7 @@ static int VmbusOnDeviceRemove(struct hv_device *dev) | ||
18367 | DPRINT_ENTER(VMBUS); | ||
18368 | VmbusChannelReleaseUnattachedChannels(); | ||
18369 | VmbusDisconnect(); | ||
18370 | - on_each_cpu(HvSynicCleanup, NULL, 1); | ||
18371 | + HvSynicCleanup(); | ||
18372 | DPRINT_EXIT(VMBUS); | ||
18373 | |||
18374 | return ret; | ||
18375 | @@ -173,8 +173,7 @@ static void VmbusOnCleanup(struct hv_driver *drv) | ||
18376 | */ | ||
18377 | static void VmbusOnMsgDPC(struct hv_driver *drv) | ||
18378 | { | ||
18379 | - int cpu = smp_processor_id(); | ||
18380 | - void *page_addr = gHvContext.synICMessagePage[cpu]; | ||
18381 | + void *page_addr = gHvContext.synICMessagePage[0]; | ||
18382 | struct hv_message *msg = (struct hv_message *)page_addr + | ||
18383 | VMBUS_MESSAGE_SINT; | ||
18384 | struct hv_message *copied; | ||
18385 | @@ -231,12 +230,11 @@ static void VmbusOnEventDPC(struct hv_driver *drv) | ||
18386 | static int VmbusOnISR(struct hv_driver *drv) | ||
18387 | { | ||
18388 | int ret = 0; | ||
18389 | - int cpu = smp_processor_id(); | ||
18390 | void *page_addr; | ||
18391 | struct hv_message *msg; | ||
18392 | union hv_synic_event_flags *event; | ||
18393 | |||
18394 | - page_addr = gHvContext.synICMessagePage[cpu]; | ||
18395 | + page_addr = gHvContext.synICMessagePage[0]; | ||
18396 | msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT; | ||
18397 | |||
18398 | DPRINT_ENTER(VMBUS); | ||
18399 | @@ -250,7 +248,7 @@ static int VmbusOnISR(struct hv_driver *drv) | ||
18400 | } | ||
18401 | |||
18402 | /* TODO: Check if there are events to be process */ | ||
18403 | - page_addr = gHvContext.synICEventPage[cpu]; | ||
18404 | + page_addr = gHvContext.synICEventPage[0]; | ||
18405 | event = (union hv_synic_event_flags *)page_addr + VMBUS_MESSAGE_SINT; | ||
18406 | |||
18407 | /* Since we are a child, we only need to check bit 0 */ | ||
18408 | diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211.h b/drivers/staging/rtl8187se/ieee80211/ieee80211.h | ||
18409 | index 0d490c1..3222c22 100644 | ||
18410 | --- a/drivers/staging/rtl8187se/ieee80211/ieee80211.h | ||
18411 | +++ b/drivers/staging/rtl8187se/ieee80211/ieee80211.h | ||
18412 | @@ -1318,13 +1318,13 @@ extern int ieee80211_encrypt_fragment( | ||
18413 | struct sk_buff *frag, | ||
18414 | int hdr_len); | ||
18415 | |||
18416 | -extern int ieee80211_rtl_xmit(struct sk_buff *skb, | ||
18417 | +extern int ieee80211_xmit(struct sk_buff *skb, | ||
18418 | struct net_device *dev); | ||
18419 | extern void ieee80211_txb_free(struct ieee80211_txb *); | ||
18420 | |||
18421 | |||
18422 | /* ieee80211_rx.c */ | ||
18423 | -extern int ieee80211_rtl_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | ||
18424 | +extern int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | ||
18425 | struct ieee80211_rx_stats *rx_stats); | ||
18426 | extern void ieee80211_rx_mgt(struct ieee80211_device *ieee, | ||
18427 | struct ieee80211_hdr_4addr *header, | ||
18428 | @@ -1376,8 +1376,8 @@ extern void ieee80211_stop_protocol(struct ieee80211_device *ieee); | ||
18429 | extern void ieee80211_softmac_start_protocol(struct ieee80211_device *ieee); | ||
18430 | extern void ieee80211_softmac_stop_protocol(struct ieee80211_device *ieee); | ||
18431 | extern void ieee80211_reset_queue(struct ieee80211_device *ieee); | ||
18432 | -extern void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee); | ||
18433 | -extern void ieee80211_rtl_stop_queue(struct ieee80211_device *ieee); | ||
18434 | +extern void ieee80211_wake_queue(struct ieee80211_device *ieee); | ||
18435 | +extern void ieee80211_stop_queue(struct ieee80211_device *ieee); | ||
18436 | extern struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee); | ||
18437 | extern void ieee80211_start_send_beacons(struct ieee80211_device *ieee); | ||
18438 | extern void ieee80211_stop_send_beacons(struct ieee80211_device *ieee); | ||
18439 | @@ -1385,7 +1385,7 @@ extern int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee, struct | ||
18440 | extern void notify_wx_assoc_event(struct ieee80211_device *ieee); | ||
18441 | extern void ieee80211_ps_tx_ack(struct ieee80211_device *ieee, short success); | ||
18442 | extern void SendDisassociation(struct ieee80211_device *ieee,u8* asSta,u8 asRsn); | ||
18443 | -extern void ieee80211_rtl_start_scan(struct ieee80211_device *ieee); | ||
18444 | +extern void ieee80211_start_scan(struct ieee80211_device *ieee); | ||
18445 | |||
18446 | //Add for RF power on power off by lizhaoming 080512 | ||
18447 | extern void SendDisassociation(struct ieee80211_device *ieee, | ||
18448 | diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c | ||
18449 | index 7ad305b..5e2e79b 100644 | ||
18450 | --- a/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c | ||
18451 | +++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c | ||
18452 | @@ -470,7 +470,7 @@ drop: | ||
18453 | /* All received frames are sent to this function. @skb contains the frame in | ||
18454 | * IEEE 802.11 format, i.e., in the format it was sent over air. | ||
18455 | * This function is called only as a tasklet (software IRQ). */ | ||
18456 | -int ieee80211_rtl_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | ||
18457 | +int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, | ||
18458 | struct ieee80211_rx_stats *rx_stats) | ||
18459 | { | ||
18460 | struct net_device *dev = ieee->dev; | ||
18461 | diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c | ||
18462 | index a2fa9a9..334e4c7 100644 | ||
18463 | --- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c | ||
18464 | +++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c | ||
18465 | @@ -689,7 +689,7 @@ void ieee80211_stop_scan(struct ieee80211_device *ieee) | ||
18466 | } | ||
18467 | |||
18468 | /* called with ieee->lock held */ | ||
18469 | -void ieee80211_rtl_start_scan(struct ieee80211_device *ieee) | ||
18470 | +void ieee80211_start_scan(struct ieee80211_device *ieee) | ||
18471 | { | ||
18472 | if(IS_DOT11D_ENABLE(ieee) ) | ||
18473 | { | ||
18474 | @@ -1196,7 +1196,7 @@ void ieee80211_associate_step1(struct ieee80211_device *ieee) | ||
18475 | } | ||
18476 | } | ||
18477 | |||
18478 | -void ieee80211_rtl_auth_challenge(struct ieee80211_device *ieee, u8 *challenge, int chlen) | ||
18479 | +void ieee80211_auth_challenge(struct ieee80211_device *ieee, u8 *challenge, int chlen) | ||
18480 | { | ||
18481 | u8 *c; | ||
18482 | struct sk_buff *skb; | ||
18483 | @@ -1898,7 +1898,7 @@ associate_complete: | ||
18484 | |||
18485 | ieee80211_associate_step2(ieee); | ||
18486 | }else{ | ||
18487 | - ieee80211_rtl_auth_challenge(ieee, challenge, chlen); | ||
18488 | + ieee80211_auth_challenge(ieee, challenge, chlen); | ||
18489 | } | ||
18490 | }else{ | ||
18491 | ieee->softmac_stats.rx_auth_rs_err++; | ||
18492 | @@ -2047,7 +2047,7 @@ void ieee80211_reset_queue(struct ieee80211_device *ieee) | ||
18493 | |||
18494 | } | ||
18495 | |||
18496 | -void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee) | ||
18497 | +void ieee80211_wake_queue(struct ieee80211_device *ieee) | ||
18498 | { | ||
18499 | |||
18500 | unsigned long flags; | ||
18501 | @@ -2089,7 +2089,7 @@ exit : | ||
18502 | } | ||
18503 | |||
18504 | |||
18505 | -void ieee80211_rtl_stop_queue(struct ieee80211_device *ieee) | ||
18506 | +void ieee80211_stop_queue(struct ieee80211_device *ieee) | ||
18507 | { | ||
18508 | //unsigned long flags; | ||
18509 | //spin_lock_irqsave(&ieee->lock,flags); | ||
18510 | @@ -2301,7 +2301,7 @@ void ieee80211_start_bss(struct ieee80211_device *ieee) | ||
18511 | //#else | ||
18512 | if (ieee->state == IEEE80211_NOLINK){ | ||
18513 | ieee->actscanning = true; | ||
18514 | - ieee80211_rtl_start_scan(ieee); | ||
18515 | + ieee80211_start_scan(ieee); | ||
18516 | } | ||
18517 | //#endif | ||
18518 | spin_unlock_irqrestore(&ieee->lock, flags); | ||
18519 | @@ -2357,7 +2357,7 @@ void ieee80211_associate_retry_wq(struct work_struct *work) | ||
18520 | if(ieee->state == IEEE80211_NOLINK){ | ||
18521 | ieee->beinretry = false; | ||
18522 | ieee->actscanning = true; | ||
18523 | - ieee80211_rtl_start_scan(ieee); | ||
18524 | + ieee80211_start_scan(ieee); | ||
18525 | } | ||
18526 | //YJ,add,080828, notify os here | ||
18527 | if(ieee->state == IEEE80211_NOLINK) | ||
18528 | diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c | ||
18529 | index c7996ea..e2945db 100644 | ||
18530 | --- a/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c | ||
18531 | +++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c | ||
18532 | @@ -305,7 +305,7 @@ ieee80211_classify(struct sk_buff *skb, struct ieee80211_network *network) | ||
18533 | } | ||
18534 | |||
18535 | /* SKBs are added to the ieee->tx_queue. */ | ||
18536 | -int ieee80211_rtl_xmit(struct sk_buff *skb, | ||
18537 | +int ieee80211_xmit(struct sk_buff *skb, | ||
18538 | struct net_device *dev) | ||
18539 | { | ||
18540 | struct ieee80211_device *ieee = netdev_priv(dev); | ||
18541 | diff --git a/drivers/staging/rtl8187se/r8180_core.c b/drivers/staging/rtl8187se/r8180_core.c | ||
18542 | index 3f19143..53e654d 100644 | ||
18543 | --- a/drivers/staging/rtl8187se/r8180_core.c | ||
18544 | +++ b/drivers/staging/rtl8187se/r8180_core.c | ||
18545 | @@ -1830,7 +1830,7 @@ void rtl8180_rx(struct net_device *dev) | ||
18546 | if(priv->rx_skb->len > 4) | ||
18547 | skb_trim(priv->rx_skb,priv->rx_skb->len-4); | ||
18548 | #ifndef RX_DONT_PASS_UL | ||
18549 | - if(!ieee80211_rtl_rx(priv->ieee80211, | ||
18550 | + if(!ieee80211_rx(priv->ieee80211, | ||
18551 | priv->rx_skb, &stats)){ | ||
18552 | #endif // RX_DONT_PASS_UL | ||
18553 | |||
18554 | @@ -1936,11 +1936,11 @@ rate) | ||
18555 | if (!check_nic_enought_desc(dev, priority)){ | ||
18556 | DMESGW("Error: no descriptor left by previous TX (avail %d) ", | ||
18557 | get_curr_tx_free_desc(dev, priority)); | ||
18558 | - ieee80211_rtl_stop_queue(priv->ieee80211); | ||
18559 | + ieee80211_stop_queue(priv->ieee80211); | ||
18560 | } | ||
18561 | rtl8180_tx(dev, skb->data, skb->len, priority, morefrag,0,rate); | ||
18562 | if (!check_nic_enought_desc(dev, priority)) | ||
18563 | - ieee80211_rtl_stop_queue(priv->ieee80211); | ||
18564 | + ieee80211_stop_queue(priv->ieee80211); | ||
18565 | |||
18566 | spin_unlock_irqrestore(&priv->tx_lock,flags); | ||
18567 | } | ||
18568 | @@ -3846,7 +3846,7 @@ static const struct net_device_ops rtl8180_netdev_ops = { | ||
18569 | .ndo_set_mac_address = r8180_set_mac_adr, | ||
18570 | .ndo_validate_addr = eth_validate_addr, | ||
18571 | .ndo_change_mtu = eth_change_mtu, | ||
18572 | - .ndo_start_xmit = ieee80211_rtl_xmit, | ||
18573 | + .ndo_start_xmit = ieee80211_xmit, | ||
18574 | }; | ||
18575 | |||
18576 | static int __devinit rtl8180_pci_probe(struct pci_dev *pdev, | ||
18577 | @@ -4066,7 +4066,7 @@ void rtl8180_try_wake_queue(struct net_device *dev, int pri) | ||
18578 | spin_unlock_irqrestore(&priv->tx_lock,flags); | ||
18579 | |||
18580 | if(enough_desc) | ||
18581 | - ieee80211_rtl_wake_queue(priv->ieee80211); | ||
18582 | + ieee80211_wake_queue(priv->ieee80211); | ||
18583 | } | ||
18584 | |||
18585 | void rtl8180_tx_isr(struct net_device *dev, int pri,short error) | ||
18586 | diff --git a/drivers/staging/rtl8187se/r8180_wx.c b/drivers/staging/rtl8187se/r8180_wx.c | ||
18587 | index 637ee8e..766892e 100644 | ||
18588 | --- a/drivers/staging/rtl8187se/r8180_wx.c | ||
18589 | +++ b/drivers/staging/rtl8187se/r8180_wx.c | ||
18590 | @@ -377,7 +377,7 @@ static int r8180_wx_set_scan(struct net_device *dev, struct iw_request_info *a, | ||
18591 | // queue_work(priv->ieee80211->wq, &priv->ieee80211->wx_sync_scan_wq); | ||
18592 | //printk("start scan============================>\n"); | ||
18593 | ieee80211_softmac_ips_scan_syncro(priv->ieee80211); | ||
18594 | -//ieee80211_rtl_start_scan(priv->ieee80211); | ||
18595 | +//ieee80211_start_scan(priv->ieee80211); | ||
18596 | /* intentionally forget to up sem */ | ||
18597 | // up(&priv->ieee80211->wx_sem); | ||
18598 | ret = 0; | ||
18599 | diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c | ||
18600 | index d9461c9..2473cf0 100644 | ||
18601 | --- a/drivers/usb/class/usbtmc.c | ||
18602 | +++ b/drivers/usb/class/usbtmc.c | ||
18603 | @@ -562,16 +562,10 @@ static ssize_t usbtmc_write(struct file *filp, const char __user *buf, | ||
18604 | n_bytes = roundup(12 + this_part, 4); | ||
18605 | memset(buffer + 12 + this_part, 0, n_bytes - (12 + this_part)); | ||
18606 | |||
18607 | - do { | ||
18608 | - retval = usb_bulk_msg(data->usb_dev, | ||
18609 | - usb_sndbulkpipe(data->usb_dev, | ||
18610 | - data->bulk_out), | ||
18611 | - buffer, n_bytes, | ||
18612 | - &actual, USBTMC_TIMEOUT); | ||
18613 | - if (retval != 0) | ||
18614 | - break; | ||
18615 | - n_bytes -= actual; | ||
18616 | - } while (n_bytes); | ||
18617 | + retval = usb_bulk_msg(data->usb_dev, | ||
18618 | + usb_sndbulkpipe(data->usb_dev, | ||
18619 | + data->bulk_out), | ||
18620 | + buffer, n_bytes, &actual, USBTMC_TIMEOUT); | ||
18621 | |||
18622 | data->bTag_last_write = data->bTag; | ||
18623 | data->bTag++; | ||
18624 | diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c | ||
18625 | index 355dffc..96f1171 100644 | ||
18626 | --- a/drivers/usb/core/devices.c | ||
18627 | +++ b/drivers/usb/core/devices.c | ||
18628 | @@ -494,7 +494,7 @@ static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes, | ||
18629 | return 0; | ||
18630 | /* allocate 2^1 pages = 8K (on i386); | ||
18631 | * should be more than enough for one device */ | ||
18632 | - pages_start = (char *)__get_free_pages(GFP_NOIO, 1); | ||
18633 | + pages_start = (char *)__get_free_pages(GFP_KERNEL, 1); | ||
18634 | if (!pages_start) | ||
18635 | return -ENOMEM; | ||
18636 | |||
18637 | diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c | ||
18638 | index 24120db..181f78c 100644 | ||
18639 | --- a/drivers/usb/core/devio.c | ||
18640 | +++ b/drivers/usb/core/devio.c | ||
18641 | @@ -1312,9 +1312,9 @@ static int processcompl(struct async *as, void __user * __user *arg) | ||
18642 | void __user *addr = as->userurb; | ||
18643 | unsigned int i; | ||
18644 | |||
18645 | - if (as->userbuffer && urb->actual_length) | ||
18646 | + if (as->userbuffer) | ||
18647 | if (copy_to_user(as->userbuffer, urb->transfer_buffer, | ||
18648 | - urb->actual_length)) | ||
18649 | + urb->transfer_buffer_length)) | ||
18650 | goto err_out; | ||
18651 | if (put_user(as->status, &userurb->status)) | ||
18652 | goto err_out; | ||
18653 | @@ -1334,11 +1334,14 @@ static int processcompl(struct async *as, void __user * __user *arg) | ||
18654 | } | ||
18655 | } | ||
18656 | |||
18657 | + free_async(as); | ||
18658 | + | ||
18659 | if (put_user(addr, (void __user * __user *)arg)) | ||
18660 | return -EFAULT; | ||
18661 | return 0; | ||
18662 | |||
18663 | err_out: | ||
18664 | + free_async(as); | ||
18665 | return -EFAULT; | ||
18666 | } | ||
18667 | |||
18668 | @@ -1368,11 +1371,8 @@ static struct async *reap_as(struct dev_state *ps) | ||
18669 | static int proc_reapurb(struct dev_state *ps, void __user *arg) | ||
18670 | { | ||
18671 | struct async *as = reap_as(ps); | ||
18672 | - if (as) { | ||
18673 | - int retval = processcompl(as, (void __user * __user *)arg); | ||
18674 | - free_async(as); | ||
18675 | - return retval; | ||
18676 | - } | ||
18677 | + if (as) | ||
18678 | + return processcompl(as, (void __user * __user *)arg); | ||
18679 | if (signal_pending(current)) | ||
18680 | return -EINTR; | ||
18681 | return -EIO; | ||
18682 | @@ -1380,16 +1380,11 @@ static int proc_reapurb(struct dev_state *ps, void __user *arg) | ||
18683 | |||
18684 | static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg) | ||
18685 | { | ||
18686 | - int retval; | ||
18687 | struct async *as; | ||
18688 | |||
18689 | - as = async_getcompleted(ps); | ||
18690 | - retval = -EAGAIN; | ||
18691 | - if (as) { | ||
18692 | - retval = processcompl(as, (void __user * __user *)arg); | ||
18693 | - free_async(as); | ||
18694 | - } | ||
18695 | - return retval; | ||
18696 | + if (!(as = async_getcompleted(ps))) | ||
18697 | + return -EAGAIN; | ||
18698 | + return processcompl(as, (void __user * __user *)arg); | ||
18699 | } | ||
18700 | |||
18701 | #ifdef CONFIG_COMPAT | ||
18702 | @@ -1440,9 +1435,9 @@ static int processcompl_compat(struct async *as, void __user * __user *arg) | ||
18703 | void __user *addr = as->userurb; | ||
18704 | unsigned int i; | ||
18705 | |||
18706 | - if (as->userbuffer && urb->actual_length) | ||
18707 | + if (as->userbuffer) | ||
18708 | if (copy_to_user(as->userbuffer, urb->transfer_buffer, | ||
18709 | - urb->actual_length)) | ||
18710 | + urb->transfer_buffer_length)) | ||
18711 | return -EFAULT; | ||
18712 | if (put_user(as->status, &userurb->status)) | ||
18713 | return -EFAULT; | ||
18714 | @@ -1462,6 +1457,7 @@ static int processcompl_compat(struct async *as, void __user * __user *arg) | ||
18715 | } | ||
18716 | } | ||
18717 | |||
18718 | + free_async(as); | ||
18719 | if (put_user(ptr_to_compat(addr), (u32 __user *)arg)) | ||
18720 | return -EFAULT; | ||
18721 | return 0; | ||
18722 | @@ -1470,11 +1466,8 @@ static int processcompl_compat(struct async *as, void __user * __user *arg) | ||
18723 | static int proc_reapurb_compat(struct dev_state *ps, void __user *arg) | ||
18724 | { | ||
18725 | struct async *as = reap_as(ps); | ||
18726 | - if (as) { | ||
18727 | - int retval = processcompl_compat(as, (void __user * __user *)arg); | ||
18728 | - free_async(as); | ||
18729 | - return retval; | ||
18730 | - } | ||
18731 | + if (as) | ||
18732 | + return processcompl_compat(as, (void __user * __user *)arg); | ||
18733 | if (signal_pending(current)) | ||
18734 | return -EINTR; | ||
18735 | return -EIO; | ||
18736 | @@ -1482,16 +1475,11 @@ static int proc_reapurb_compat(struct dev_state *ps, void __user *arg) | ||
18737 | |||
18738 | static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg) | ||
18739 | { | ||
18740 | - int retval; | ||
18741 | struct async *as; | ||
18742 | |||
18743 | - retval = -EAGAIN; | ||
18744 | - as = async_getcompleted(ps); | ||
18745 | - if (as) { | ||
18746 | - retval = processcompl_compat(as, (void __user * __user *)arg); | ||
18747 | - free_async(as); | ||
18748 | - } | ||
18749 | - return retval; | ||
18750 | + if (!(as = async_getcompleted(ps))) | ||
18751 | + return -EAGAIN; | ||
18752 | + return processcompl_compat(as, (void __user * __user *)arg); | ||
18753 | } | ||
18754 | |||
18755 | #endif | ||
18756 | diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c | ||
18757 | index 1a7d54b..0f857e6 100644 | ||
18758 | --- a/drivers/usb/core/hub.c | ||
18759 | +++ b/drivers/usb/core/hub.c | ||
18760 | @@ -1612,12 +1612,12 @@ static inline void announce_device(struct usb_device *udev) { } | ||
18761 | #endif | ||
18762 | |||
18763 | /** | ||
18764 | - * usb_enumerate_device_otg - FIXME (usbcore-internal) | ||
18765 | + * usb_configure_device_otg - FIXME (usbcore-internal) | ||
18766 | * @udev: newly addressed device (in ADDRESS state) | ||
18767 | * | ||
18768 | - * Finish enumeration for On-The-Go devices | ||
18769 | + * Do configuration for On-The-Go devices | ||
18770 | */ | ||
18771 | -static int usb_enumerate_device_otg(struct usb_device *udev) | ||
18772 | +static int usb_configure_device_otg(struct usb_device *udev) | ||
18773 | { | ||
18774 | int err = 0; | ||
18775 | |||
18776 | @@ -1688,7 +1688,7 @@ fail: | ||
18777 | |||
18778 | |||
18779 | /** | ||
18780 | - * usb_enumerate_device - Read device configs/intfs/otg (usbcore-internal) | ||
18781 | + * usb_configure_device - Detect and probe device intfs/otg (usbcore-internal) | ||
18782 | * @udev: newly addressed device (in ADDRESS state) | ||
18783 | * | ||
18784 | * This is only called by usb_new_device() and usb_authorize_device() | ||
18785 | @@ -1699,7 +1699,7 @@ fail: | ||
18786 | * the string descriptors, as they will be errored out by the device | ||
18787 | * until it has been authorized. | ||
18788 | */ | ||
18789 | -static int usb_enumerate_device(struct usb_device *udev) | ||
18790 | +static int usb_configure_device(struct usb_device *udev) | ||
18791 | { | ||
18792 | int err; | ||
18793 | |||
18794 | @@ -1723,7 +1723,7 @@ static int usb_enumerate_device(struct usb_device *udev) | ||
18795 | udev->descriptor.iManufacturer); | ||
18796 | udev->serial = usb_cache_string(udev, udev->descriptor.iSerialNumber); | ||
18797 | } | ||
18798 | - err = usb_enumerate_device_otg(udev); | ||
18799 | + err = usb_configure_device_otg(udev); | ||
18800 | fail: | ||
18801 | return err; | ||
18802 | } | ||
18803 | @@ -1733,8 +1733,8 @@ fail: | ||
18804 | * usb_new_device - perform initial device setup (usbcore-internal) | ||
18805 | * @udev: newly addressed device (in ADDRESS state) | ||
18806 | * | ||
18807 | - * This is called with devices which have been detected but not fully | ||
18808 | - * enumerated. The device descriptor is available, but not descriptors | ||
18809 | + * This is called with devices which have been enumerated, but not yet | ||
18810 | + * configured. The device descriptor is available, but not descriptors | ||
18811 | * for any device configuration. The caller must have locked either | ||
18812 | * the parent hub (if udev is a normal device) or else the | ||
18813 | * usb_bus_list_lock (if udev is a root hub). The parent's pointer to | ||
18814 | @@ -1757,8 +1757,8 @@ int usb_new_device(struct usb_device *udev) | ||
18815 | if (udev->parent) | ||
18816 | usb_autoresume_device(udev->parent); | ||
18817 | |||
18818 | - usb_detect_quirks(udev); | ||
18819 | - err = usb_enumerate_device(udev); /* Read descriptors */ | ||
18820 | + usb_detect_quirks(udev); /* Determine quirks */ | ||
18821 | + err = usb_configure_device(udev); /* detect & probe dev/intfs */ | ||
18822 | if (err < 0) | ||
18823 | goto fail; | ||
18824 | dev_dbg(&udev->dev, "udev %d, busnum %d, minor = %d\n", | ||
18825 | @@ -1803,23 +1803,21 @@ fail: | ||
18826 | */ | ||
18827 | int usb_deauthorize_device(struct usb_device *usb_dev) | ||
18828 | { | ||
18829 | + unsigned cnt; | ||
18830 | usb_lock_device(usb_dev); | ||
18831 | if (usb_dev->authorized == 0) | ||
18832 | goto out_unauthorized; | ||
18833 | - | ||
18834 | usb_dev->authorized = 0; | ||
18835 | usb_set_configuration(usb_dev, -1); | ||
18836 | - | ||
18837 | - kfree(usb_dev->product); | ||
18838 | usb_dev->product = kstrdup("n/a (unauthorized)", GFP_KERNEL); | ||
18839 | - kfree(usb_dev->manufacturer); | ||
18840 | usb_dev->manufacturer = kstrdup("n/a (unauthorized)", GFP_KERNEL); | ||
18841 | - kfree(usb_dev->serial); | ||
18842 | usb_dev->serial = kstrdup("n/a (unauthorized)", GFP_KERNEL); | ||
18843 | - | ||
18844 | - usb_destroy_configuration(usb_dev); | ||
18845 | + kfree(usb_dev->config); | ||
18846 | + usb_dev->config = NULL; | ||
18847 | + for (cnt = 0; cnt < usb_dev->descriptor.bNumConfigurations; cnt++) | ||
18848 | + kfree(usb_dev->rawdescriptors[cnt]); | ||
18849 | usb_dev->descriptor.bNumConfigurations = 0; | ||
18850 | - | ||
18851 | + kfree(usb_dev->rawdescriptors); | ||
18852 | out_unauthorized: | ||
18853 | usb_unlock_device(usb_dev); | ||
18854 | return 0; | ||
18855 | @@ -1829,11 +1827,15 @@ out_unauthorized: | ||
18856 | int usb_authorize_device(struct usb_device *usb_dev) | ||
18857 | { | ||
18858 | int result = 0, c; | ||
18859 | - | ||
18860 | usb_lock_device(usb_dev); | ||
18861 | if (usb_dev->authorized == 1) | ||
18862 | goto out_authorized; | ||
18863 | - | ||
18864 | + kfree(usb_dev->product); | ||
18865 | + usb_dev->product = NULL; | ||
18866 | + kfree(usb_dev->manufacturer); | ||
18867 | + usb_dev->manufacturer = NULL; | ||
18868 | + kfree(usb_dev->serial); | ||
18869 | + usb_dev->serial = NULL; | ||
18870 | result = usb_autoresume_device(usb_dev); | ||
18871 | if (result < 0) { | ||
18872 | dev_err(&usb_dev->dev, | ||
18873 | @@ -1846,18 +1848,10 @@ int usb_authorize_device(struct usb_device *usb_dev) | ||
18874 | "authorization: %d\n", result); | ||
18875 | goto error_device_descriptor; | ||
18876 | } | ||
18877 | - | ||
18878 | - kfree(usb_dev->product); | ||
18879 | - usb_dev->product = NULL; | ||
18880 | - kfree(usb_dev->manufacturer); | ||
18881 | - usb_dev->manufacturer = NULL; | ||
18882 | - kfree(usb_dev->serial); | ||
18883 | - usb_dev->serial = NULL; | ||
18884 | - | ||
18885 | usb_dev->authorized = 1; | ||
18886 | - result = usb_enumerate_device(usb_dev); | ||
18887 | + result = usb_configure_device(usb_dev); | ||
18888 | if (result < 0) | ||
18889 | - goto error_enumerate; | ||
18890 | + goto error_configure; | ||
18891 | /* Choose and set the configuration. This registers the interfaces | ||
18892 | * with the driver core and lets interface drivers bind to them. | ||
18893 | */ | ||
18894 | @@ -1872,10 +1866,8 @@ int usb_authorize_device(struct usb_device *usb_dev) | ||
18895 | } | ||
18896 | } | ||
18897 | dev_info(&usb_dev->dev, "authorized to connect\n"); | ||
18898 | - | ||
18899 | -error_enumerate: | ||
18900 | +error_configure: | ||
18901 | error_device_descriptor: | ||
18902 | - usb_autosuspend_device(usb_dev); | ||
18903 | error_autoresume: | ||
18904 | out_authorized: | ||
18905 | usb_unlock_device(usb_dev); // complements locktree | ||
18906 | @@ -3286,9 +3278,6 @@ static void hub_events(void) | ||
18907 | USB_PORT_FEAT_C_SUSPEND); | ||
18908 | udev = hdev->children[i-1]; | ||
18909 | if (udev) { | ||
18910 | - /* TRSMRCY = 10 msec */ | ||
18911 | - msleep(10); | ||
18912 | - | ||
18913 | usb_lock_device(udev); | ||
18914 | ret = remote_wakeup(hdev-> | ||
18915 | children[i-1]); | ||
18916 | diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c | ||
18917 | index 980a8d2..da718e8 100644 | ||
18918 | --- a/drivers/usb/core/message.c | ||
18919 | +++ b/drivers/usb/core/message.c | ||
18920 | @@ -911,11 +911,11 @@ char *usb_cache_string(struct usb_device *udev, int index) | ||
18921 | if (index <= 0) | ||
18922 | return NULL; | ||
18923 | |||
18924 | - buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO); | ||
18925 | + buf = kmalloc(MAX_USB_STRING_SIZE, GFP_KERNEL); | ||
18926 | if (buf) { | ||
18927 | len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE); | ||
18928 | if (len > 0) { | ||
18929 | - smallbuf = kmalloc(++len, GFP_NOIO); | ||
18930 | + smallbuf = kmalloc(++len, GFP_KERNEL); | ||
18931 | if (!smallbuf) | ||
18932 | return buf; | ||
18933 | memcpy(smallbuf, buf, len); | ||
18934 | @@ -1682,7 +1682,7 @@ int usb_set_configuration(struct usb_device *dev, int configuration) | ||
18935 | if (cp) { | ||
18936 | nintf = cp->desc.bNumInterfaces; | ||
18937 | new_interfaces = kmalloc(nintf * sizeof(*new_interfaces), | ||
18938 | - GFP_NOIO); | ||
18939 | + GFP_KERNEL); | ||
18940 | if (!new_interfaces) { | ||
18941 | dev_err(&dev->dev, "Out of memory\n"); | ||
18942 | return -ENOMEM; | ||
18943 | @@ -1691,7 +1691,7 @@ int usb_set_configuration(struct usb_device *dev, int configuration) | ||
18944 | for (; n < nintf; ++n) { | ||
18945 | new_interfaces[n] = kzalloc( | ||
18946 | sizeof(struct usb_interface), | ||
18947 | - GFP_NOIO); | ||
18948 | + GFP_KERNEL); | ||
18949 | if (!new_interfaces[n]) { | ||
18950 | dev_err(&dev->dev, "Out of memory\n"); | ||
18951 | ret = -ENOMEM; | ||
18952 | diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c | ||
18953 | index fcdcad4..7ec3041 100644 | ||
18954 | --- a/drivers/usb/core/sysfs.c | ||
18955 | +++ b/drivers/usb/core/sysfs.c | ||
18956 | @@ -82,13 +82,9 @@ static ssize_t show_##name(struct device *dev, \ | ||
18957 | struct device_attribute *attr, char *buf) \ | ||
18958 | { \ | ||
18959 | struct usb_device *udev; \ | ||
18960 | - int retval; \ | ||
18961 | \ | ||
18962 | udev = to_usb_device(dev); \ | ||
18963 | - usb_lock_device(udev); \ | ||
18964 | - retval = sprintf(buf, "%s\n", udev->name); \ | ||
18965 | - usb_unlock_device(udev); \ | ||
18966 | - return retval; \ | ||
18967 | + return sprintf(buf, "%s\n", udev->name); \ | ||
18968 | } \ | ||
18969 | static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL); | ||
18970 | |||
18971 | @@ -115,12 +111,6 @@ show_speed(struct device *dev, struct device_attribute *attr, char *buf) | ||
18972 | case USB_SPEED_HIGH: | ||
18973 | speed = "480"; | ||
18974 | break; | ||
18975 | - case USB_SPEED_VARIABLE: | ||
18976 | - speed = "480"; | ||
18977 | - break; | ||
18978 | - case USB_SPEED_SUPER: | ||
18979 | - speed = "5000"; | ||
18980 | - break; | ||
18981 | default: | ||
18982 | speed = "unknown"; | ||
18983 | } | ||
18984 | diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c | ||
18985 | index 52e5e31..b1b85ab 100644 | ||
18986 | --- a/drivers/usb/core/usb.c | ||
18987 | +++ b/drivers/usb/core/usb.c | ||
18988 | @@ -132,7 +132,7 @@ EXPORT_SYMBOL_GPL(usb_altnum_to_altsetting); | ||
18989 | |||
18990 | struct find_interface_arg { | ||
18991 | int minor; | ||
18992 | - struct device_driver *drv; | ||
18993 | + struct usb_interface *interface; | ||
18994 | }; | ||
18995 | |||
18996 | static int __find_interface(struct device *dev, void *data) | ||
18997 | @@ -143,10 +143,12 @@ static int __find_interface(struct device *dev, void *data) | ||
18998 | if (!is_usb_interface(dev)) | ||
18999 | return 0; | ||
19000 | |||
19001 | - if (dev->driver != arg->drv) | ||
19002 | - return 0; | ||
19003 | intf = to_usb_interface(dev); | ||
19004 | - return intf->minor == arg->minor; | ||
19005 | + if (intf->minor != -1 && intf->minor == arg->minor) { | ||
19006 | + arg->interface = intf; | ||
19007 | + return 1; | ||
19008 | + } | ||
19009 | + return 0; | ||
19010 | } | ||
19011 | |||
19012 | /** | ||
19013 | @@ -154,24 +156,21 @@ static int __find_interface(struct device *dev, void *data) | ||
19014 | * @drv: the driver whose current configuration is considered | ||
19015 | * @minor: the minor number of the desired device | ||
19016 | * | ||
19017 | - * This walks the bus device list and returns a pointer to the interface | ||
19018 | - * with the matching minor and driver. Note, this only works for devices | ||
19019 | - * that share the USB major number. | ||
19020 | + * This walks the driver device list and returns a pointer to the interface | ||
19021 | + * with the matching minor. Note, this only works for devices that share the | ||
19022 | + * USB major number. | ||
19023 | */ | ||
19024 | struct usb_interface *usb_find_interface(struct usb_driver *drv, int minor) | ||
19025 | { | ||
19026 | struct find_interface_arg argb; | ||
19027 | - struct device *dev; | ||
19028 | + int retval; | ||
19029 | |||
19030 | argb.minor = minor; | ||
19031 | - argb.drv = &drv->drvwrap.driver; | ||
19032 | - | ||
19033 | - dev = bus_find_device(&usb_bus_type, NULL, &argb, __find_interface); | ||
19034 | - | ||
19035 | - /* Drop reference count from bus_find_device */ | ||
19036 | - put_device(dev); | ||
19037 | - | ||
19038 | - return dev ? to_usb_interface(dev) : NULL; | ||
19039 | + argb.interface = NULL; | ||
19040 | + /* eat the error, it will be in argb.interface */ | ||
19041 | + retval = driver_for_each_device(&drv->drvwrap.driver, NULL, &argb, | ||
19042 | + __find_interface); | ||
19043 | + return argb.interface; | ||
19044 | } | ||
19045 | EXPORT_SYMBOL_GPL(usb_find_interface); | ||
19046 | |||
19047 | diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c | ||
19048 | index e18c677..f5f5601 100644 | ||
19049 | --- a/drivers/usb/host/ehci-hcd.c | ||
19050 | +++ b/drivers/usb/host/ehci-hcd.c | ||
19051 | @@ -785,10 +785,9 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd) | ||
19052 | |||
19053 | /* start 20 msec resume signaling from this port, | ||
19054 | * and make khubd collect PORT_STAT_C_SUSPEND to | ||
19055 | - * stop that signaling. Use 5 ms extra for safety, | ||
19056 | - * like usb_port_resume() does. | ||
19057 | + * stop that signaling. | ||
19058 | */ | ||
19059 | - ehci->reset_done[i] = jiffies + msecs_to_jiffies(25); | ||
19060 | + ehci->reset_done [i] = jiffies + msecs_to_jiffies (20); | ||
19061 | ehci_dbg (ehci, "port %d remote wakeup\n", i + 1); | ||
19062 | mod_timer(&hcd->rh_timer, ehci->reset_done[i]); | ||
19063 | } | ||
19064 | diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c | ||
19065 | index 698f461..1b6f1c0 100644 | ||
19066 | --- a/drivers/usb/host/ehci-hub.c | ||
19067 | +++ b/drivers/usb/host/ehci-hub.c | ||
19068 | @@ -120,26 +120,9 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) | ||
19069 | del_timer_sync(&ehci->watchdog); | ||
19070 | del_timer_sync(&ehci->iaa_watchdog); | ||
19071 | |||
19072 | + port = HCS_N_PORTS (ehci->hcs_params); | ||
19073 | spin_lock_irq (&ehci->lock); | ||
19074 | |||
19075 | - /* Once the controller is stopped, port resumes that are already | ||
19076 | - * in progress won't complete. Hence if remote wakeup is enabled | ||
19077 | - * for the root hub and any ports are in the middle of a resume or | ||
19078 | - * remote wakeup, we must fail the suspend. | ||
19079 | - */ | ||
19080 | - if (hcd->self.root_hub->do_remote_wakeup) { | ||
19081 | - port = HCS_N_PORTS(ehci->hcs_params); | ||
19082 | - while (port--) { | ||
19083 | - if (ehci->reset_done[port] != 0) { | ||
19084 | - spin_unlock_irq(&ehci->lock); | ||
19085 | - ehci_dbg(ehci, "suspend failed because " | ||
19086 | - "port %d is resuming\n", | ||
19087 | - port + 1); | ||
19088 | - return -EBUSY; | ||
19089 | - } | ||
19090 | - } | ||
19091 | - } | ||
19092 | - | ||
19093 | /* stop schedules, clean any completed work */ | ||
19094 | if (HC_IS_RUNNING(hcd->state)) { | ||
19095 | ehci_quiesce (ehci); | ||
19096 | @@ -155,7 +138,6 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) | ||
19097 | */ | ||
19098 | ehci->bus_suspended = 0; | ||
19099 | ehci->owned_ports = 0; | ||
19100 | - port = HCS_N_PORTS(ehci->hcs_params); | ||
19101 | while (port--) { | ||
19102 | u32 __iomem *reg = &ehci->regs->port_status [port]; | ||
19103 | u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS; | ||
19104 | diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c | ||
19105 | index c0d4b39..139a2cc 100644 | ||
19106 | --- a/drivers/usb/host/ehci-q.c | ||
19107 | +++ b/drivers/usb/host/ehci-q.c | ||
19108 | @@ -827,10 +827,9 @@ qh_make ( | ||
19109 | * But interval 1 scheduling is simpler, and | ||
19110 | * includes high bandwidth. | ||
19111 | */ | ||
19112 | - urb->interval = 1; | ||
19113 | - } else if (qh->period > ehci->periodic_size) { | ||
19114 | - qh->period = ehci->periodic_size; | ||
19115 | - urb->interval = qh->period << 3; | ||
19116 | + dbg ("intr period %d uframes, NYET!", | ||
19117 | + urb->interval); | ||
19118 | + goto done; | ||
19119 | } | ||
19120 | } else { | ||
19121 | int think_time; | ||
19122 | @@ -853,10 +852,6 @@ qh_make ( | ||
19123 | usb_calc_bus_time (urb->dev->speed, | ||
19124 | is_input, 0, max_packet (maxp))); | ||
19125 | qh->period = urb->interval; | ||
19126 | - if (qh->period > ehci->periodic_size) { | ||
19127 | - qh->period = ehci->periodic_size; | ||
19128 | - urb->interval = qh->period; | ||
19129 | - } | ||
19130 | } | ||
19131 | } | ||
19132 | |||
19133 | diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c | ||
19134 | index 9260c74..e33d362 100644 | ||
19135 | --- a/drivers/usb/host/r8a66597-hcd.c | ||
19136 | +++ b/drivers/usb/host/r8a66597-hcd.c | ||
19137 | @@ -35,9 +35,7 @@ | ||
19138 | #include <linux/usb.h> | ||
19139 | #include <linux/platform_device.h> | ||
19140 | #include <linux/io.h> | ||
19141 | -#include <linux/mm.h> | ||
19142 | #include <linux/irq.h> | ||
19143 | -#include <asm/cacheflush.h> | ||
19144 | |||
19145 | #include "../core/hcd.h" | ||
19146 | #include "r8a66597.h" | ||
19147 | @@ -218,17 +216,8 @@ static void disable_controller(struct r8a66597 *r8a66597) | ||
19148 | { | ||
19149 | int port; | ||
19150 | |||
19151 | - /* disable interrupts */ | ||
19152 | r8a66597_write(r8a66597, 0, INTENB0); | ||
19153 | - r8a66597_write(r8a66597, 0, INTENB1); | ||
19154 | - r8a66597_write(r8a66597, 0, BRDYENB); | ||
19155 | - r8a66597_write(r8a66597, 0, BEMPENB); | ||
19156 | - r8a66597_write(r8a66597, 0, NRDYENB); | ||
19157 | - | ||
19158 | - /* clear status */ | ||
19159 | - r8a66597_write(r8a66597, 0, BRDYSTS); | ||
19160 | - r8a66597_write(r8a66597, 0, NRDYSTS); | ||
19161 | - r8a66597_write(r8a66597, 0, BEMPSTS); | ||
19162 | + r8a66597_write(r8a66597, 0, INTSTS0); | ||
19163 | |||
19164 | for (port = 0; port < r8a66597->max_root_hub; port++) | ||
19165 | r8a66597_disable_port(r8a66597, port); | ||
19166 | @@ -822,26 +811,6 @@ static void enable_r8a66597_pipe(struct r8a66597 *r8a66597, struct urb *urb, | ||
19167 | enable_r8a66597_pipe_dma(r8a66597, dev, pipe, urb); | ||
19168 | } | ||
19169 | |||
19170 | -static void r8a66597_urb_done(struct r8a66597 *r8a66597, struct urb *urb, | ||
19171 | - int status) | ||
19172 | -__releases(r8a66597->lock) | ||
19173 | -__acquires(r8a66597->lock) | ||
19174 | -{ | ||
19175 | - if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) { | ||
19176 | - void *ptr; | ||
19177 | - | ||
19178 | - for (ptr = urb->transfer_buffer; | ||
19179 | - ptr < urb->transfer_buffer + urb->transfer_buffer_length; | ||
19180 | - ptr += PAGE_SIZE) | ||
19181 | - flush_dcache_page(virt_to_page(ptr)); | ||
19182 | - } | ||
19183 | - | ||
19184 | - usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb); | ||
19185 | - spin_unlock(&r8a66597->lock); | ||
19186 | - usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb, status); | ||
19187 | - spin_lock(&r8a66597->lock); | ||
19188 | -} | ||
19189 | - | ||
19190 | /* this function must be called with interrupt disabled */ | ||
19191 | static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address) | ||
19192 | { | ||
19193 | @@ -862,9 +831,15 @@ static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address) | ||
19194 | list_del(&td->queue); | ||
19195 | kfree(td); | ||
19196 | |||
19197 | - if (urb) | ||
19198 | - r8a66597_urb_done(r8a66597, urb, -ENODEV); | ||
19199 | + if (urb) { | ||
19200 | + usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), | ||
19201 | + urb); | ||
19202 | |||
19203 | + spin_unlock(&r8a66597->lock); | ||
19204 | + usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb, | ||
19205 | + -ENODEV); | ||
19206 | + spin_lock(&r8a66597->lock); | ||
19207 | + } | ||
19208 | break; | ||
19209 | } | ||
19210 | } | ||
19211 | @@ -1301,7 +1276,10 @@ __releases(r8a66597->lock) __acquires(r8a66597->lock) | ||
19212 | if (usb_pipeisoc(urb->pipe)) | ||
19213 | urb->start_frame = r8a66597_get_frame(hcd); | ||
19214 | |||
19215 | - r8a66597_urb_done(r8a66597, urb, status); | ||
19216 | + usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb); | ||
19217 | + spin_unlock(&r8a66597->lock); | ||
19218 | + usb_hcd_giveback_urb(hcd, urb, status); | ||
19219 | + spin_lock(&r8a66597->lock); | ||
19220 | } | ||
19221 | |||
19222 | if (restart) { | ||
19223 | @@ -2492,12 +2470,6 @@ static int __devinit r8a66597_probe(struct platform_device *pdev) | ||
19224 | r8a66597->rh_timer.data = (unsigned long)r8a66597; | ||
19225 | r8a66597->reg = (unsigned long)reg; | ||
19226 | |||
19227 | - /* make sure no interrupts are pending */ | ||
19228 | - ret = r8a66597_clock_enable(r8a66597); | ||
19229 | - if (ret < 0) | ||
19230 | - goto clean_up3; | ||
19231 | - disable_controller(r8a66597); | ||
19232 | - | ||
19233 | for (i = 0; i < R8A66597_MAX_NUM_PIPE; i++) { | ||
19234 | INIT_LIST_HEAD(&r8a66597->pipe_queue[i]); | ||
19235 | init_timer(&r8a66597->td_timer[i]); | ||
19236 | diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c | ||
19237 | index 99cd00f..5cd0e48 100644 | ||
19238 | --- a/drivers/usb/host/uhci-hcd.c | ||
19239 | +++ b/drivers/usb/host/uhci-hcd.c | ||
19240 | @@ -749,20 +749,7 @@ static int uhci_rh_suspend(struct usb_hcd *hcd) | ||
19241 | spin_lock_irq(&uhci->lock); | ||
19242 | if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) | ||
19243 | rc = -ESHUTDOWN; | ||
19244 | - else if (uhci->dead) | ||
19245 | - ; /* Dead controllers tell no tales */ | ||
19246 | - | ||
19247 | - /* Once the controller is stopped, port resumes that are already | ||
19248 | - * in progress won't complete. Hence if remote wakeup is enabled | ||
19249 | - * for the root hub and any ports are in the middle of a resume or | ||
19250 | - * remote wakeup, we must fail the suspend. | ||
19251 | - */ | ||
19252 | - else if (hcd->self.root_hub->do_remote_wakeup && | ||
19253 | - uhci->resuming_ports) { | ||
19254 | - dev_dbg(uhci_dev(uhci), "suspend failed because a port " | ||
19255 | - "is resuming\n"); | ||
19256 | - rc = -EBUSY; | ||
19257 | - } else | ||
19258 | + else if (!uhci->dead) | ||
19259 | suspend_rh(uhci, UHCI_RH_SUSPENDED); | ||
19260 | spin_unlock_irq(&uhci->lock); | ||
19261 | return rc; | ||
19262 | diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c | ||
19263 | index 8270055..885b585 100644 | ||
19264 | --- a/drivers/usb/host/uhci-hub.c | ||
19265 | +++ b/drivers/usb/host/uhci-hub.c | ||
19266 | @@ -167,7 +167,7 @@ static void uhci_check_ports(struct uhci_hcd *uhci) | ||
19267 | /* Port received a wakeup request */ | ||
19268 | set_bit(port, &uhci->resuming_ports); | ||
19269 | uhci->ports_timeout = jiffies + | ||
19270 | - msecs_to_jiffies(25); | ||
19271 | + msecs_to_jiffies(20); | ||
19272 | |||
19273 | /* Make sure we see the port again | ||
19274 | * after the resuming period is over. */ | ||
19275 | diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c | ||
19276 | index 62ff5e7..1d8e39a 100644 | ||
19277 | --- a/drivers/usb/misc/appledisplay.c | ||
19278 | +++ b/drivers/usb/misc/appledisplay.c | ||
19279 | @@ -72,8 +72,8 @@ struct appledisplay { | ||
19280 | struct usb_device *udev; /* usb device */ | ||
19281 | struct urb *urb; /* usb request block */ | ||
19282 | struct backlight_device *bd; /* backlight device */ | ||
19283 | - u8 *urbdata; /* interrupt URB data buffer */ | ||
19284 | - u8 *msgdata; /* control message data buffer */ | ||
19285 | + char *urbdata; /* interrupt URB data buffer */ | ||
19286 | + char *msgdata; /* control message data buffer */ | ||
19287 | |||
19288 | struct delayed_work work; | ||
19289 | int button_pressed; | ||
19290 | diff --git a/drivers/usb/misc/emi62.c b/drivers/usb/misc/emi62.c | ||
19291 | index 59860b3..602ee05 100644 | ||
19292 | --- a/drivers/usb/misc/emi62.c | ||
19293 | +++ b/drivers/usb/misc/emi62.c | ||
19294 | @@ -167,7 +167,7 @@ static int emi62_load_firmware (struct usb_device *dev) | ||
19295 | err("%s - error loading firmware: error = %d", __func__, err); | ||
19296 | goto wraperr; | ||
19297 | } | ||
19298 | - } while (rec); | ||
19299 | + } while (i > 0); | ||
19300 | |||
19301 | /* Assert reset (stop the CPU in the EMI) */ | ||
19302 | err = emi62_set_reset(dev,1); | ||
19303 | diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c | ||
19304 | index 067e5a9..522efb3 100644 | ||
19305 | --- a/drivers/usb/musb/musb_gadget_ep0.c | ||
19306 | +++ b/drivers/usb/musb/musb_gadget_ep0.c | ||
19307 | @@ -199,6 +199,7 @@ service_in_request(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest) | ||
19308 | static void musb_g_ep0_giveback(struct musb *musb, struct usb_request *req) | ||
19309 | { | ||
19310 | musb_g_giveback(&musb->endpoints[0].ep_in, req, 0); | ||
19311 | + musb->ep0_state = MUSB_EP0_STAGE_SETUP; | ||
19312 | } | ||
19313 | |||
19314 | /* | ||
19315 | @@ -647,7 +648,7 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb) | ||
19316 | musb->ep0_state = MUSB_EP0_STAGE_STATUSIN; | ||
19317 | break; | ||
19318 | default: | ||
19319 | - ERR("SetupEnd came in a wrong ep0stage %s\n", | ||
19320 | + ERR("SetupEnd came in a wrong ep0stage %s", | ||
19321 | decode_ep0stage(musb->ep0_state)); | ||
19322 | } | ||
19323 | csr = musb_readw(regs, MUSB_CSR0); | ||
19324 | @@ -770,18 +771,12 @@ setup: | ||
19325 | handled = service_zero_data_request( | ||
19326 | musb, &setup); | ||
19327 | |||
19328 | - /* | ||
19329 | - * We're expecting no data in any case, so | ||
19330 | - * always set the DATAEND bit -- doing this | ||
19331 | - * here helps avoid SetupEnd interrupt coming | ||
19332 | - * in the idle stage when we're stalling... | ||
19333 | - */ | ||
19334 | - musb->ackpend |= MUSB_CSR0_P_DATAEND; | ||
19335 | - | ||
19336 | /* status stage might be immediate */ | ||
19337 | - if (handled > 0) | ||
19338 | + if (handled > 0) { | ||
19339 | + musb->ackpend |= MUSB_CSR0_P_DATAEND; | ||
19340 | musb->ep0_state = | ||
19341 | MUSB_EP0_STAGE_STATUSIN; | ||
19342 | + } | ||
19343 | break; | ||
19344 | |||
19345 | /* sequence #1 (IN to host), includes GET_STATUS | ||
19346 | diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c | ||
19347 | index 13a1b39..ebcc6d0 100644 | ||
19348 | --- a/drivers/usb/serial/ftdi_sio.c | ||
19349 | +++ b/drivers/usb/serial/ftdi_sio.c | ||
19350 | @@ -598,20 +598,6 @@ static struct usb_device_id id_table_combined [] = { | ||
19351 | { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) }, | ||
19352 | { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) }, | ||
19353 | { USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) }, | ||
19354 | - { USB_DEVICE(BANDB_VID, BANDB_USOPTL4_PID) }, | ||
19355 | - { USB_DEVICE(BANDB_VID, BANDB_USPTL4_PID) }, | ||
19356 | - { USB_DEVICE(BANDB_VID, BANDB_USO9ML2DR_2_PID) }, | ||
19357 | - { USB_DEVICE(BANDB_VID, BANDB_USO9ML2DR_PID) }, | ||
19358 | - { USB_DEVICE(BANDB_VID, BANDB_USOPTL4DR2_PID) }, | ||
19359 | - { USB_DEVICE(BANDB_VID, BANDB_USOPTL4DR_PID) }, | ||
19360 | - { USB_DEVICE(BANDB_VID, BANDB_485USB9F_2W_PID) }, | ||
19361 | - { USB_DEVICE(BANDB_VID, BANDB_485USB9F_4W_PID) }, | ||
19362 | - { USB_DEVICE(BANDB_VID, BANDB_232USB9M_PID) }, | ||
19363 | - { USB_DEVICE(BANDB_VID, BANDB_485USBTB_2W_PID) }, | ||
19364 | - { USB_DEVICE(BANDB_VID, BANDB_485USBTB_4W_PID) }, | ||
19365 | - { USB_DEVICE(BANDB_VID, BANDB_TTL5USB9M_PID) }, | ||
19366 | - { USB_DEVICE(BANDB_VID, BANDB_TTL3USB9M_PID) }, | ||
19367 | - { USB_DEVICE(BANDB_VID, BANDB_ZZ_PROG1_USB_PID) }, | ||
19368 | { USB_DEVICE(FTDI_VID, EVER_ECO_PRO_CDS) }, | ||
19369 | { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_1_PID) }, | ||
19370 | { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_2_PID) }, | ||
19371 | diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h | ||
19372 | index 4586a24..6f31e0d 100644 | ||
19373 | --- a/drivers/usb/serial/ftdi_sio.h | ||
19374 | +++ b/drivers/usb/serial/ftdi_sio.h | ||
19375 | @@ -662,20 +662,6 @@ | ||
19376 | #define BANDB_USOTL4_PID 0xAC01 /* USOTL4 Isolated RS-485 Converter */ | ||
19377 | #define BANDB_USTL4_PID 0xAC02 /* USTL4 RS-485 Converter */ | ||
19378 | #define BANDB_USO9ML2_PID 0xAC03 /* USO9ML2 Isolated RS-232 Converter */ | ||
19379 | -#define BANDB_USOPTL4_PID 0xAC11 | ||
19380 | -#define BANDB_USPTL4_PID 0xAC12 | ||
19381 | -#define BANDB_USO9ML2DR_2_PID 0xAC16 | ||
19382 | -#define BANDB_USO9ML2DR_PID 0xAC17 | ||
19383 | -#define BANDB_USOPTL4DR2_PID 0xAC18 /* USOPTL4R-2 2-port Isolated RS-232 Converter */ | ||
19384 | -#define BANDB_USOPTL4DR_PID 0xAC19 | ||
19385 | -#define BANDB_485USB9F_2W_PID 0xAC25 | ||
19386 | -#define BANDB_485USB9F_4W_PID 0xAC26 | ||
19387 | -#define BANDB_232USB9M_PID 0xAC27 | ||
19388 | -#define BANDB_485USBTB_2W_PID 0xAC33 | ||
19389 | -#define BANDB_485USBTB_4W_PID 0xAC34 | ||
19390 | -#define BANDB_TTL5USB9M_PID 0xAC49 | ||
19391 | -#define BANDB_TTL3USB9M_PID 0xAC50 | ||
19392 | -#define BANDB_ZZ_PROG1_USB_PID 0xBA02 | ||
19393 | |||
19394 | /* | ||
19395 | * RM Michaelides CANview USB (http://www.rmcan.com) | ||
19396 | diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c | ||
19397 | index e0fb294..bbe005c 100644 | ||
19398 | --- a/drivers/usb/serial/generic.c | ||
19399 | +++ b/drivers/usb/serial/generic.c | ||
19400 | @@ -489,8 +489,6 @@ void usb_serial_generic_write_bulk_callback(struct urb *urb) | ||
19401 | dbg("%s - port %d", __func__, port->number); | ||
19402 | |||
19403 | if (port->serial->type->max_in_flight_urbs) { | ||
19404 | - kfree(urb->transfer_buffer); | ||
19405 | - | ||
19406 | spin_lock_irqsave(&port->lock, flags); | ||
19407 | --port->urbs_in_flight; | ||
19408 | port->tx_bytes_flight -= urb->transfer_buffer_length; | ||
19409 | diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c | ||
19410 | index 485fa9c..f11abf5 100644 | ||
19411 | --- a/drivers/usb/serial/mos7840.c | ||
19412 | +++ b/drivers/usb/serial/mos7840.c | ||
19413 | @@ -121,14 +121,8 @@ | ||
19414 | * moschip_id_table_combined | ||
19415 | */ | ||
19416 | #define USB_VENDOR_ID_BANDB 0x0856 | ||
19417 | -#define BANDB_DEVICE_ID_USO9ML2_2 0xAC22 | ||
19418 | -#define BANDB_DEVICE_ID_USO9ML2_4 0xAC24 | ||
19419 | -#define BANDB_DEVICE_ID_US9ML2_2 0xAC29 | ||
19420 | -#define BANDB_DEVICE_ID_US9ML2_4 0xAC30 | ||
19421 | -#define BANDB_DEVICE_ID_USPTL4_2 0xAC31 | ||
19422 | -#define BANDB_DEVICE_ID_USPTL4_4 0xAC32 | ||
19423 | -#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 | ||
19424 | #define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 | ||
19425 | +#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 | ||
19426 | |||
19427 | /* This driver also supports | ||
19428 | * ATEN UC2324 device using Moschip MCS7840 | ||
19429 | @@ -183,14 +177,8 @@ | ||
19430 | static struct usb_device_id moschip_port_id_table[] = { | ||
19431 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, | ||
19432 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, | ||
19433 | - {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)}, | ||
19434 | - {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)}, | ||
19435 | - {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)}, | ||
19436 | - {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)}, | ||
19437 | - {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)}, | ||
19438 | - {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)}, | ||
19439 | - {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, | ||
19440 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, | ||
19441 | + {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, | ||
19442 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, | ||
19443 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, | ||
19444 | {} /* terminating entry */ | ||
19445 | @@ -199,14 +187,8 @@ static struct usb_device_id moschip_port_id_table[] = { | ||
19446 | static __devinitdata struct usb_device_id moschip_id_table_combined[] = { | ||
19447 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, | ||
19448 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, | ||
19449 | - {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)}, | ||
19450 | - {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)}, | ||
19451 | - {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)}, | ||
19452 | - {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)}, | ||
19453 | - {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)}, | ||
19454 | - {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)}, | ||
19455 | - {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, | ||
19456 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, | ||
19457 | + {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, | ||
19458 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, | ||
19459 | {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, | ||
19460 | {} /* terminating entry */ | ||
19461 | diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c | ||
19462 | index be3dff1..0577e4b 100644 | ||
19463 | --- a/drivers/usb/serial/option.c | ||
19464 | +++ b/drivers/usb/serial/option.c | ||
19465 | @@ -340,10 +340,6 @@ static int option_resume(struct usb_serial *serial); | ||
19466 | #define FOUR_G_SYSTEMS_VENDOR_ID 0x1c9e | ||
19467 | #define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603 | ||
19468 | |||
19469 | -/* Haier products */ | ||
19470 | -#define HAIER_VENDOR_ID 0x201e | ||
19471 | -#define HAIER_PRODUCT_CE100 0x2009 | ||
19472 | - | ||
19473 | static struct usb_device_id option_ids[] = { | ||
19474 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, | ||
19475 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, | ||
19476 | @@ -584,48 +580,12 @@ static struct usb_device_id option_ids[] = { | ||
19477 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) }, | ||
19478 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff) }, | ||
19479 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) }, | ||
19480 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff) }, | ||
19481 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0106, 0xff, 0xff, 0xff) }, | ||
19482 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0108, 0xff, 0xff, 0xff) }, | ||
19483 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff) }, | ||
19484 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) }, | ||
19485 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff) }, | ||
19486 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff) }, | ||
19487 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0122, 0xff, 0xff, 0xff) }, | ||
19488 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff) }, | ||
19489 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff) }, | ||
19490 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff) }, | ||
19491 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff) }, | ||
19492 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) }, | ||
19493 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0142, 0xff, 0xff, 0xff) }, | ||
19494 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0143, 0xff, 0xff, 0xff) }, | ||
19495 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0144, 0xff, 0xff, 0xff) }, | ||
19496 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0145, 0xff, 0xff, 0xff) }, | ||
19497 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0146, 0xff, 0xff, 0xff) }, | ||
19498 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) }, | ||
19499 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0148, 0xff, 0xff, 0xff) }, | ||
19500 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0149, 0xff, 0xff, 0xff) }, | ||
19501 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0150, 0xff, 0xff, 0xff) }, | ||
19502 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0151, 0xff, 0xff, 0xff) }, | ||
19503 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) }, | ||
19504 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0153, 0xff, 0xff, 0xff) }, | ||
19505 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0154, 0xff, 0xff, 0xff) }, | ||
19506 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) }, | ||
19507 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) }, | ||
19508 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff) }, | ||
19509 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff) }, | ||
19510 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) }, | ||
19511 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0160, 0xff, 0xff, 0xff) }, | ||
19512 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) }, | ||
19513 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) }, | ||
19514 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */ | ||
19515 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) }, | ||
19516 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) }, | ||
19517 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) }, | ||
19518 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) }, | ||
19519 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) }, | ||
19520 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) }, | ||
19521 | - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) }, | ||
19522 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) }, | ||
19523 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) }, | ||
19524 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, | ||
19525 | @@ -639,13 +599,11 @@ static struct usb_device_id option_ids[] = { | ||
19526 | { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_G450) }, | ||
19527 | { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */ | ||
19528 | { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) }, | ||
19529 | - { USB_DEVICE(ALINK_VENDOR_ID, 0xce16) }, | ||
19530 | { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, | ||
19531 | { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) }, | ||
19532 | { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, | ||
19533 | { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) }, | ||
19534 | { USB_DEVICE(FOUR_G_SYSTEMS_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14) }, | ||
19535 | - { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) }, | ||
19536 | { } /* Terminating entry */ | ||
19537 | }; | ||
19538 | MODULE_DEVICE_TABLE(usb, option_ids); | ||
19539 | diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c | ||
19540 | index cc313d1..589f6b4 100644 | ||
19541 | --- a/drivers/usb/storage/transport.c | ||
19542 | +++ b/drivers/usb/storage/transport.c | ||
19543 | @@ -666,11 +666,10 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us) | ||
19544 | * to wait for at least one CHECK_CONDITION to determine | ||
19545 | * SANE_SENSE support | ||
19546 | */ | ||
19547 | - if (unlikely((srb->cmnd[0] == ATA_16 || srb->cmnd[0] == ATA_12) && | ||
19548 | + if ((srb->cmnd[0] == ATA_16 || srb->cmnd[0] == ATA_12) && | ||
19549 | result == USB_STOR_TRANSPORT_GOOD && | ||
19550 | !(us->fflags & US_FL_SANE_SENSE) && | ||
19551 | - !(us->fflags & US_FL_BAD_SENSE) && | ||
19552 | - !(srb->cmnd[2] & 0x20))) { | ||
19553 | + !(srb->cmnd[2] & 0x20)) { | ||
19554 | US_DEBUGP("-- SAT supported, increasing auto-sense\n"); | ||
19555 | us->fflags |= US_FL_SANE_SENSE; | ||
19556 | } | ||
19557 | @@ -719,12 +718,6 @@ Retry_Sense: | ||
19558 | if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) { | ||
19559 | US_DEBUGP("-- auto-sense aborted\n"); | ||
19560 | srb->result = DID_ABORT << 16; | ||
19561 | - | ||
19562 | - /* If SANE_SENSE caused this problem, disable it */ | ||
19563 | - if (sense_size != US_SENSE_SIZE) { | ||
19564 | - us->fflags &= ~US_FL_SANE_SENSE; | ||
19565 | - us->fflags |= US_FL_BAD_SENSE; | ||
19566 | - } | ||
19567 | goto Handle_Errors; | ||
19568 | } | ||
19569 | |||
19570 | @@ -734,11 +727,10 @@ Retry_Sense: | ||
19571 | * (small) sense request. This fixes some USB GSM modems | ||
19572 | */ | ||
19573 | if (temp_result == USB_STOR_TRANSPORT_FAILED && | ||
19574 | - sense_size != US_SENSE_SIZE) { | ||
19575 | + (us->fflags & US_FL_SANE_SENSE) && | ||
19576 | + sense_size != US_SENSE_SIZE) { | ||
19577 | US_DEBUGP("-- auto-sense failure, retry small sense\n"); | ||
19578 | sense_size = US_SENSE_SIZE; | ||
19579 | - us->fflags &= ~US_FL_SANE_SENSE; | ||
19580 | - us->fflags |= US_FL_BAD_SENSE; | ||
19581 | goto Retry_Sense; | ||
19582 | } | ||
19583 | |||
19584 | @@ -762,7 +754,6 @@ Retry_Sense: | ||
19585 | */ | ||
19586 | if (srb->sense_buffer[7] > (US_SENSE_SIZE - 8) && | ||
19587 | !(us->fflags & US_FL_SANE_SENSE) && | ||
19588 | - !(us->fflags & US_FL_BAD_SENSE) && | ||
19589 | (srb->sense_buffer[0] & 0x7C) == 0x70) { | ||
19590 | US_DEBUGP("-- SANE_SENSE support enabled\n"); | ||
19591 | us->fflags |= US_FL_SANE_SENSE; | ||
19592 | diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h | ||
19593 | index c932f90..d4f034e 100644 | ||
19594 | --- a/drivers/usb/storage/unusual_devs.h | ||
19595 | +++ b/drivers/usb/storage/unusual_devs.h | ||
19596 | @@ -818,13 +818,6 @@ UNUSUAL_DEV( 0x066f, 0x8000, 0x0001, 0x0001, | ||
19597 | US_SC_DEVICE, US_PR_DEVICE, NULL, | ||
19598 | US_FL_FIX_CAPACITY ), | ||
19599 | |||
19600 | -/* Reported by Daniel Kukula <daniel.kuku@gmail.com> */ | ||
19601 | -UNUSUAL_DEV( 0x067b, 0x1063, 0x0100, 0x0100, | ||
19602 | - "Prolific Technology, Inc.", | ||
19603 | - "Prolific Storage Gadget", | ||
19604 | - US_SC_DEVICE, US_PR_DEVICE, NULL, | ||
19605 | - US_FL_BAD_SENSE ), | ||
19606 | - | ||
19607 | /* Reported by Rogerio Brito <rbrito@ime.usp.br> */ | ||
19608 | UNUSUAL_DEV( 0x067b, 0x2317, 0x0001, 0x001, | ||
19609 | "Prolific Technology, Inc.", | ||
19610 | @@ -1807,6 +1800,13 @@ UNUSUAL_DEV( 0x2735, 0x100b, 0x0000, 0x9999, | ||
19611 | US_SC_DEVICE, US_PR_DEVICE, NULL, | ||
19612 | US_FL_GO_SLOW ), | ||
19613 | |||
19614 | +/* Reported by Rohan Hart <rohan.hart17@gmail.com> */ | ||
19615 | +UNUSUAL_DEV( 0x2770, 0x915d, 0x0010, 0x0010, | ||
19616 | + "INTOVA", | ||
19617 | + "Pixtreme", | ||
19618 | + US_SC_DEVICE, US_PR_DEVICE, NULL, | ||
19619 | + US_FL_FIX_CAPACITY ), | ||
19620 | + | ||
19621 | /* Reported by Frederic Marchal <frederic.marchal@wowcompany.com> | ||
19622 | * Mio Moov 330 | ||
19623 | */ | ||
19624 | diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c | ||
19625 | index 33197fa..8060b85 100644 | ||
19626 | --- a/drivers/usb/storage/usb.c | ||
19627 | +++ b/drivers/usb/storage/usb.c | ||
19628 | @@ -228,7 +228,6 @@ void fill_inquiry_response(struct us_data *us, unsigned char *data, | ||
19629 | if (data_len<36) // You lose. | ||
19630 | return; | ||
19631 | |||
19632 | - memset(data+8, ' ', 28); | ||
19633 | if(data[0]&0x20) { /* USB device currently not connected. Return | ||
19634 | peripheral qualifier 001b ("...however, the | ||
19635 | physical device is not currently connected | ||
19636 | @@ -238,15 +237,15 @@ void fill_inquiry_response(struct us_data *us, unsigned char *data, | ||
19637 | device, it may return zeros or ASCII spaces | ||
19638 | (20h) in those fields until the data is | ||
19639 | available from the device."). */ | ||
19640 | + memset(data+8,0,28); | ||
19641 | } else { | ||
19642 | u16 bcdDevice = le16_to_cpu(us->pusb_dev->descriptor.bcdDevice); | ||
19643 | - int n; | ||
19644 | - | ||
19645 | - n = strlen(us->unusual_dev->vendorName); | ||
19646 | - memcpy(data+8, us->unusual_dev->vendorName, min(8, n)); | ||
19647 | - n = strlen(us->unusual_dev->productName); | ||
19648 | - memcpy(data+16, us->unusual_dev->productName, min(16, n)); | ||
19649 | - | ||
19650 | + memcpy(data+8, us->unusual_dev->vendorName, | ||
19651 | + strlen(us->unusual_dev->vendorName) > 8 ? 8 : | ||
19652 | + strlen(us->unusual_dev->vendorName)); | ||
19653 | + memcpy(data+16, us->unusual_dev->productName, | ||
19654 | + strlen(us->unusual_dev->productName) > 16 ? 16 : | ||
19655 | + strlen(us->unusual_dev->productName)); | ||
19656 | data[32] = 0x30 + ((bcdDevice>>12) & 0x0F); | ||
19657 | data[33] = 0x30 + ((bcdDevice>>8) & 0x0F); | ||
19658 | data[34] = 0x30 + ((bcdDevice>>4) & 0x0F); | ||
19659 | @@ -430,8 +429,7 @@ static void adjust_quirks(struct us_data *us) | ||
19660 | u16 vid = le16_to_cpu(us->pusb_dev->descriptor.idVendor); | ||
19661 | u16 pid = le16_to_cpu(us->pusb_dev->descriptor.idProduct); | ||
19662 | unsigned f = 0; | ||
19663 | - unsigned int mask = (US_FL_SANE_SENSE | US_FL_BAD_SENSE | | ||
19664 | - US_FL_FIX_CAPACITY | | ||
19665 | + unsigned int mask = (US_FL_SANE_SENSE | US_FL_FIX_CAPACITY | | ||
19666 | US_FL_CAPACITY_HEURISTICS | US_FL_IGNORE_DEVICE | | ||
19667 | US_FL_NOT_LOCKABLE | US_FL_MAX_SECTORS_64 | | ||
19668 | US_FL_CAPACITY_OK | US_FL_IGNORE_RESIDUE | | ||
19669 | @@ -461,9 +459,6 @@ static void adjust_quirks(struct us_data *us) | ||
19670 | case 'a': | ||
19671 | f |= US_FL_SANE_SENSE; | ||
19672 | break; | ||
19673 | - case 'b': | ||
19674 | - f |= US_FL_BAD_SENSE; | ||
19675 | - break; | ||
19676 | case 'c': | ||
19677 | f |= US_FL_FIX_CAPACITY; | ||
19678 | break; | ||
19679 | diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c | ||
19680 | index b4b6dec..66358fa 100644 | ||
19681 | --- a/drivers/video/imxfb.c | ||
19682 | +++ b/drivers/video/imxfb.c | ||
19683 | @@ -593,8 +593,7 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf | ||
19684 | */ | ||
19685 | static int imxfb_suspend(struct platform_device *dev, pm_message_t state) | ||
19686 | { | ||
19687 | - struct fb_info *info = platform_get_drvdata(dev); | ||
19688 | - struct imxfb_info *fbi = info->par; | ||
19689 | + struct imxfb_info *fbi = platform_get_drvdata(dev); | ||
19690 | |||
19691 | pr_debug("%s\n", __func__); | ||
19692 | |||
19693 | @@ -604,8 +603,7 @@ static int imxfb_suspend(struct platform_device *dev, pm_message_t state) | ||
19694 | |||
19695 | static int imxfb_resume(struct platform_device *dev) | ||
19696 | { | ||
19697 | - struct fb_info *info = platform_get_drvdata(dev); | ||
19698 | - struct imxfb_info *fbi = info->par; | ||
19699 | + struct imxfb_info *fbi = platform_get_drvdata(dev); | ||
19700 | |||
19701 | pr_debug("%s\n", __func__); | ||
19702 | |||
19703 | diff --git a/drivers/video/matrox/g450_pll.c b/drivers/video/matrox/g450_pll.c | ||
19704 | index c15f8a5..09f6e04 100644 | ||
19705 | --- a/drivers/video/matrox/g450_pll.c | ||
19706 | +++ b/drivers/video/matrox/g450_pll.c | ||
19707 | @@ -368,8 +368,7 @@ static int __g450_setclk(struct matrox_fb_info *minfo, unsigned int fout, | ||
19708 | M1064_XDVICLKCTRL_C1DVICLKEN | | ||
19709 | M1064_XDVICLKCTRL_DVILOOPCTL | | ||
19710 | M1064_XDVICLKCTRL_P1LOOPBWDTCTL; | ||
19711 | - /* Setting this breaks PC systems so don't do it */ | ||
19712 | - /* matroxfb_DAC_out(minfo, M1064_XDVICLKCTRL, tmp); */ | ||
19713 | + matroxfb_DAC_out(minfo, M1064_XDVICLKCTRL, tmp); | ||
19714 | matroxfb_DAC_out(minfo, M1064_XPWRCTRL, | ||
19715 | xpwrctrl); | ||
19716 | |||
19717 | diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c | ||
19718 | index 772ba3f..054ef29 100644 | ||
19719 | --- a/drivers/video/mx3fb.c | ||
19720 | +++ b/drivers/video/mx3fb.c | ||
19721 | @@ -324,11 +324,8 @@ static void sdc_enable_channel(struct mx3fb_info *mx3_fbi) | ||
19722 | unsigned long flags; | ||
19723 | dma_cookie_t cookie; | ||
19724 | |||
19725 | - if (mx3_fbi->txd) | ||
19726 | - dev_dbg(mx3fb->dev, "mx3fbi %p, desc %p, sg %p\n", mx3_fbi, | ||
19727 | - to_tx_desc(mx3_fbi->txd), to_tx_desc(mx3_fbi->txd)->sg); | ||
19728 | - else | ||
19729 | - dev_dbg(mx3fb->dev, "mx3fbi %p, txd = NULL\n", mx3_fbi); | ||
19730 | + dev_dbg(mx3fb->dev, "mx3fbi %p, desc %p, sg %p\n", mx3_fbi, | ||
19731 | + to_tx_desc(mx3_fbi->txd), to_tx_desc(mx3_fbi->txd)->sg); | ||
19732 | |||
19733 | /* This enables the channel */ | ||
19734 | if (mx3_fbi->cookie < 0) { | ||
19735 | @@ -649,7 +646,6 @@ static int sdc_set_global_alpha(struct mx3fb_data *mx3fb, bool enable, uint8_t a | ||
19736 | |||
19737 | static void sdc_set_brightness(struct mx3fb_data *mx3fb, uint8_t value) | ||
19738 | { | ||
19739 | - dev_dbg(mx3fb->dev, "%s: value = %d\n", __func__, value); | ||
19740 | /* This might be board-specific */ | ||
19741 | mx3fb_write_reg(mx3fb, 0x03000000UL | value << 16, SDC_PWM_CTRL); | ||
19742 | return; | ||
19743 | @@ -1490,12 +1486,12 @@ static int mx3fb_probe(struct platform_device *pdev) | ||
19744 | goto ersdc0; | ||
19745 | } | ||
19746 | |||
19747 | - mx3fb->backlight_level = 255; | ||
19748 | - | ||
19749 | ret = init_fb_chan(mx3fb, to_idmac_chan(chan)); | ||
19750 | if (ret < 0) | ||
19751 | goto eisdc0; | ||
19752 | |||
19753 | + mx3fb->backlight_level = 255; | ||
19754 | + | ||
19755 | return 0; | ||
19756 | |||
19757 | eisdc0: | ||
19758 | diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c | ||
19759 | index 53cb722..adf9632 100644 | ||
19760 | --- a/drivers/video/s3c-fb.c | ||
19761 | +++ b/drivers/video/s3c-fb.c | ||
19762 | @@ -211,23 +211,21 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var, | ||
19763 | |||
19764 | /** | ||
19765 | * s3c_fb_calc_pixclk() - calculate the divider to create the pixel clock. | ||
19766 | + * @id: window id. | ||
19767 | * @sfb: The hardware state. | ||
19768 | * @pixclock: The pixel clock wanted, in picoseconds. | ||
19769 | * | ||
19770 | * Given the specified pixel clock, work out the necessary divider to get | ||
19771 | * close to the output frequency. | ||
19772 | */ | ||
19773 | -static int s3c_fb_calc_pixclk(struct s3c_fb *sfb, unsigned int pixclk) | ||
19774 | +static int s3c_fb_calc_pixclk(unsigned char id, struct s3c_fb *sfb, unsigned int pixclk) | ||
19775 | { | ||
19776 | + struct s3c_fb_pd_win *win = sfb->pdata->win[id]; | ||
19777 | unsigned long clk = clk_get_rate(sfb->bus_clk); | ||
19778 | - unsigned long long tmp; | ||
19779 | unsigned int result; | ||
19780 | |||
19781 | - tmp = (unsigned long long)clk; | ||
19782 | - tmp *= pixclk; | ||
19783 | - | ||
19784 | - do_div(tmp, 1000000000UL); | ||
19785 | - result = (unsigned int)tmp / 1000; | ||
19786 | + pixclk *= win->win_mode.refresh; | ||
19787 | + result = clk / pixclk; | ||
19788 | |||
19789 | dev_dbg(sfb->dev, "pixclk=%u, clk=%lu, div=%d (%lu)\n", | ||
19790 | pixclk, clk, result, clk / result); | ||
19791 | @@ -303,7 +301,7 @@ static int s3c_fb_set_par(struct fb_info *info) | ||
19792 | /* use window 0 as the basis for the lcd output timings */ | ||
19793 | |||
19794 | if (win_no == 0) { | ||
19795 | - clkdiv = s3c_fb_calc_pixclk(sfb, var->pixclock); | ||
19796 | + clkdiv = s3c_fb_calc_pixclk(win_no, sfb, var->pixclock); | ||
19797 | |||
19798 | data = sfb->pdata->vidcon0; | ||
19799 | data &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR); | ||
19800 | diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c | ||
19801 | index 4bdb7f1..6a51edd 100644 | ||
19802 | --- a/drivers/watchdog/iTCO_wdt.c | ||
19803 | +++ b/drivers/watchdog/iTCO_wdt.c | ||
19804 | @@ -1,5 +1,5 @@ | ||
19805 | /* | ||
19806 | - * intel TCO Watchdog Driver | ||
19807 | + * intel TCO Watchdog Driver (Used in i82801 and i63xxESB chipsets) | ||
19808 | * | ||
19809 | * (c) Copyright 2006-2009 Wim Van Sebroeck <wim@iguana.be>. | ||
19810 | * | ||
19811 | @@ -14,24 +14,47 @@ | ||
19812 | * | ||
19813 | * The TCO watchdog is implemented in the following I/O controller hubs: | ||
19814 | * (See the intel documentation on http://developer.intel.com.) | ||
19815 | - * document number 290655-003, 290677-014: 82801AA (ICH), 82801AB (ICHO) | ||
19816 | - * document number 290687-002, 298242-027: 82801BA (ICH2) | ||
19817 | - * document number 290733-003, 290739-013: 82801CA (ICH3-S) | ||
19818 | - * document number 290716-001, 290718-007: 82801CAM (ICH3-M) | ||
19819 | - * document number 290744-001, 290745-025: 82801DB (ICH4) | ||
19820 | - * document number 252337-001, 252663-008: 82801DBM (ICH4-M) | ||
19821 | - * document number 273599-001, 273645-002: 82801E (C-ICH) | ||
19822 | - * document number 252516-001, 252517-028: 82801EB (ICH5), 82801ER (ICH5R) | ||
19823 | - * document number 300641-004, 300884-013: 6300ESB | ||
19824 | - * document number 301473-002, 301474-026: 82801F (ICH6) | ||
19825 | - * document number 313082-001, 313075-006: 631xESB, 632xESB | ||
19826 | - * document number 307013-003, 307014-024: 82801G (ICH7) | ||
19827 | - * document number 313056-003, 313057-017: 82801H (ICH8) | ||
19828 | - * document number 316972-004, 316973-012: 82801I (ICH9) | ||
19829 | - * document number 319973-002, 319974-002: 82801J (ICH10) | ||
19830 | - * document number 322169-001, 322170-003: 5 Series, 3400 Series (PCH) | ||
19831 | - * document number 320066-003, 320257-008: EP80597 (IICH) | ||
19832 | - * document number TBD : Cougar Point (CPT) | ||
19833 | + * 82801AA (ICH) : document number 290655-003, 290677-014, | ||
19834 | + * 82801AB (ICHO) : document number 290655-003, 290677-014, | ||
19835 | + * 82801BA (ICH2) : document number 290687-002, 298242-027, | ||
19836 | + * 82801BAM (ICH2-M) : document number 290687-002, 298242-027, | ||
19837 | + * 82801CA (ICH3-S) : document number 290733-003, 290739-013, | ||
19838 | + * 82801CAM (ICH3-M) : document number 290716-001, 290718-007, | ||
19839 | + * 82801DB (ICH4) : document number 290744-001, 290745-025, | ||
19840 | + * 82801DBM (ICH4-M) : document number 252337-001, 252663-008, | ||
19841 | + * 82801E (C-ICH) : document number 273599-001, 273645-002, | ||
19842 | + * 82801EB (ICH5) : document number 252516-001, 252517-028, | ||
19843 | + * 82801ER (ICH5R) : document number 252516-001, 252517-028, | ||
19844 | + * 6300ESB (6300ESB) : document number 300641-004, 300884-013, | ||
19845 | + * 82801FB (ICH6) : document number 301473-002, 301474-026, | ||
19846 | + * 82801FR (ICH6R) : document number 301473-002, 301474-026, | ||
19847 | + * 82801FBM (ICH6-M) : document number 301473-002, 301474-026, | ||
19848 | + * 82801FW (ICH6W) : document number 301473-001, 301474-026, | ||
19849 | + * 82801FRW (ICH6RW) : document number 301473-001, 301474-026, | ||
19850 | + * 631xESB (631xESB) : document number 313082-001, 313075-006, | ||
19851 | + * 632xESB (632xESB) : document number 313082-001, 313075-006, | ||
19852 | + * 82801GB (ICH7) : document number 307013-003, 307014-024, | ||
19853 | + * 82801GR (ICH7R) : document number 307013-003, 307014-024, | ||
19854 | + * 82801GDH (ICH7DH) : document number 307013-003, 307014-024, | ||
19855 | + * 82801GBM (ICH7-M) : document number 307013-003, 307014-024, | ||
19856 | + * 82801GHM (ICH7-M DH) : document number 307013-003, 307014-024, | ||
19857 | + * 82801GU (ICH7-U) : document number 307013-003, 307014-024, | ||
19858 | + * 82801HB (ICH8) : document number 313056-003, 313057-017, | ||
19859 | + * 82801HR (ICH8R) : document number 313056-003, 313057-017, | ||
19860 | + * 82801HBM (ICH8M) : document number 313056-003, 313057-017, | ||
19861 | + * 82801HH (ICH8DH) : document number 313056-003, 313057-017, | ||
19862 | + * 82801HO (ICH8DO) : document number 313056-003, 313057-017, | ||
19863 | + * 82801HEM (ICH8M-E) : document number 313056-003, 313057-017, | ||
19864 | + * 82801IB (ICH9) : document number 316972-004, 316973-012, | ||
19865 | + * 82801IR (ICH9R) : document number 316972-004, 316973-012, | ||
19866 | + * 82801IH (ICH9DH) : document number 316972-004, 316973-012, | ||
19867 | + * 82801IO (ICH9DO) : document number 316972-004, 316973-012, | ||
19868 | + * 82801IBM (ICH9M) : document number 316972-004, 316973-012, | ||
19869 | + * 82801IEM (ICH9M-E) : document number 316972-004, 316973-012, | ||
19870 | + * 82801JIB (ICH10) : document number 319973-002, 319974-002, | ||
19871 | + * 82801JIR (ICH10R) : document number 319973-002, 319974-002, | ||
19872 | + * 82801JD (ICH10D) : document number 319973-002, 319974-002, | ||
19873 | + * 82801JDO (ICH10DO) : document number 319973-002, 319974-002 | ||
19874 | */ | ||
19875 | |||
19876 | /* | ||
19877 | @@ -99,24 +122,6 @@ enum iTCO_chipsets { | ||
19878 | TCO_ICH10R, /* ICH10R */ | ||
19879 | TCO_ICH10D, /* ICH10D */ | ||
19880 | TCO_ICH10DO, /* ICH10DO */ | ||
19881 | - TCO_PCH, /* PCH Desktop Full Featured */ | ||
19882 | - TCO_PCHM, /* PCH Mobile Full Featured */ | ||
19883 | - TCO_P55, /* P55 */ | ||
19884 | - TCO_PM55, /* PM55 */ | ||
19885 | - TCO_H55, /* H55 */ | ||
19886 | - TCO_QM57, /* QM57 */ | ||
19887 | - TCO_H57, /* H57 */ | ||
19888 | - TCO_HM55, /* HM55 */ | ||
19889 | - TCO_Q57, /* Q57 */ | ||
19890 | - TCO_HM57, /* HM57 */ | ||
19891 | - TCO_PCHMSFF, /* PCH Mobile SFF Full Featured */ | ||
19892 | - TCO_QS57, /* QS57 */ | ||
19893 | - TCO_3400, /* 3400 */ | ||
19894 | - TCO_3420, /* 3420 */ | ||
19895 | - TCO_3450, /* 3450 */ | ||
19896 | - TCO_EP80579, /* EP80579 */ | ||
19897 | - TCO_CPTD, /* CPT Desktop */ | ||
19898 | - TCO_CPTM, /* CPT Mobile */ | ||
19899 | }; | ||
19900 | |||
19901 | static struct { | ||
19902 | @@ -157,24 +162,6 @@ static struct { | ||
19903 | {"ICH10R", 2}, | ||
19904 | {"ICH10D", 2}, | ||
19905 | {"ICH10DO", 2}, | ||
19906 | - {"PCH Desktop Full Featured", 2}, | ||
19907 | - {"PCH Mobile Full Featured", 2}, | ||
19908 | - {"P55", 2}, | ||
19909 | - {"PM55", 2}, | ||
19910 | - {"H55", 2}, | ||
19911 | - {"QM57", 2}, | ||
19912 | - {"H57", 2}, | ||
19913 | - {"HM55", 2}, | ||
19914 | - {"Q57", 2}, | ||
19915 | - {"HM57", 2}, | ||
19916 | - {"PCH Mobile SFF Full Featured", 2}, | ||
19917 | - {"QS57", 2}, | ||
19918 | - {"3400", 2}, | ||
19919 | - {"3420", 2}, | ||
19920 | - {"3450", 2}, | ||
19921 | - {"EP80579", 2}, | ||
19922 | - {"CPT Desktop", 2}, | ||
19923 | - {"CPT Mobile", 2}, | ||
19924 | {NULL, 0} | ||
19925 | }; | ||
19926 | |||
19927 | @@ -243,24 +230,6 @@ static struct pci_device_id iTCO_wdt_pci_tbl[] = { | ||
19928 | { ITCO_PCI_DEVICE(0x3a16, TCO_ICH10R)}, | ||
19929 | { ITCO_PCI_DEVICE(0x3a1a, TCO_ICH10D)}, | ||
19930 | { ITCO_PCI_DEVICE(0x3a14, TCO_ICH10DO)}, | ||
19931 | - { ITCO_PCI_DEVICE(0x3b00, TCO_PCH)}, | ||
19932 | - { ITCO_PCI_DEVICE(0x3b01, TCO_PCHM)}, | ||
19933 | - { ITCO_PCI_DEVICE(0x3b02, TCO_P55)}, | ||
19934 | - { ITCO_PCI_DEVICE(0x3b03, TCO_PM55)}, | ||
19935 | - { ITCO_PCI_DEVICE(0x3b06, TCO_H55)}, | ||
19936 | - { ITCO_PCI_DEVICE(0x3b07, TCO_QM57)}, | ||
19937 | - { ITCO_PCI_DEVICE(0x3b08, TCO_H57)}, | ||
19938 | - { ITCO_PCI_DEVICE(0x3b09, TCO_HM55)}, | ||
19939 | - { ITCO_PCI_DEVICE(0x3b0a, TCO_Q57)}, | ||
19940 | - { ITCO_PCI_DEVICE(0x3b0b, TCO_HM57)}, | ||
19941 | - { ITCO_PCI_DEVICE(0x3b0d, TCO_PCHMSFF)}, | ||
19942 | - { ITCO_PCI_DEVICE(0x3b0f, TCO_QS57)}, | ||
19943 | - { ITCO_PCI_DEVICE(0x3b12, TCO_3400)}, | ||
19944 | - { ITCO_PCI_DEVICE(0x3b14, TCO_3420)}, | ||
19945 | - { ITCO_PCI_DEVICE(0x3b16, TCO_3450)}, | ||
19946 | - { ITCO_PCI_DEVICE(0x5031, TCO_EP80579)}, | ||
19947 | - { ITCO_PCI_DEVICE(0x1c42, TCO_CPTD)}, | ||
19948 | - { ITCO_PCI_DEVICE(0x1c43, TCO_CPTM)}, | ||
19949 | { 0, }, /* End of list */ | ||
19950 | }; | ||
19951 | MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl); | ||
19952 | diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c | ||
19953 | index 4204336..d31505b 100644 | ||
19954 | --- a/drivers/xen/balloon.c | ||
19955 | +++ b/drivers/xen/balloon.c | ||
19956 | @@ -66,6 +66,8 @@ struct balloon_stats { | ||
19957 | /* We aim for 'current allocation' == 'target allocation'. */ | ||
19958 | unsigned long current_pages; | ||
19959 | unsigned long target_pages; | ||
19960 | + /* We may hit the hard limit in Xen. If we do then we remember it. */ | ||
19961 | + unsigned long hard_limit; | ||
19962 | /* | ||
19963 | * Drivers may alter the memory reservation independently, but they | ||
19964 | * must inform the balloon driver so we avoid hitting the hard limit. | ||
19965 | @@ -134,8 +136,6 @@ static void balloon_append(struct page *page) | ||
19966 | list_add(&page->lru, &ballooned_pages); | ||
19967 | balloon_stats.balloon_low++; | ||
19968 | } | ||
19969 | - | ||
19970 | - totalram_pages--; | ||
19971 | } | ||
19972 | |||
19973 | /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ | ||
19974 | @@ -156,8 +156,6 @@ static struct page *balloon_retrieve(void) | ||
19975 | else | ||
19976 | balloon_stats.balloon_low--; | ||
19977 | |||
19978 | - totalram_pages++; | ||
19979 | - | ||
19980 | return page; | ||
19981 | } | ||
19982 | |||
19983 | @@ -183,7 +181,7 @@ static void balloon_alarm(unsigned long unused) | ||
19984 | |||
19985 | static unsigned long current_target(void) | ||
19986 | { | ||
19987 | - unsigned long target = balloon_stats.target_pages; | ||
19988 | + unsigned long target = min(balloon_stats.target_pages, balloon_stats.hard_limit); | ||
19989 | |||
19990 | target = min(target, | ||
19991 | balloon_stats.current_pages + | ||
19992 | @@ -219,10 +217,23 @@ static int increase_reservation(unsigned long nr_pages) | ||
19993 | set_xen_guest_handle(reservation.extent_start, frame_list); | ||
19994 | reservation.nr_extents = nr_pages; | ||
19995 | rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation); | ||
19996 | - if (rc < 0) | ||
19997 | + if (rc < nr_pages) { | ||
19998 | + if (rc > 0) { | ||
19999 | + int ret; | ||
20000 | + | ||
20001 | + /* We hit the Xen hard limit: reprobe. */ | ||
20002 | + reservation.nr_extents = rc; | ||
20003 | + ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, | ||
20004 | + &reservation); | ||
20005 | + BUG_ON(ret != rc); | ||
20006 | + } | ||
20007 | + if (rc >= 0) | ||
20008 | + balloon_stats.hard_limit = (balloon_stats.current_pages + rc - | ||
20009 | + balloon_stats.driver_pages); | ||
20010 | goto out; | ||
20011 | + } | ||
20012 | |||
20013 | - for (i = 0; i < rc; i++) { | ||
20014 | + for (i = 0; i < nr_pages; i++) { | ||
20015 | page = balloon_retrieve(); | ||
20016 | BUG_ON(page == NULL); | ||
20017 | |||
20018 | @@ -248,12 +259,13 @@ static int increase_reservation(unsigned long nr_pages) | ||
20019 | __free_page(page); | ||
20020 | } | ||
20021 | |||
20022 | - balloon_stats.current_pages += rc; | ||
20023 | + balloon_stats.current_pages += nr_pages; | ||
20024 | + totalram_pages = balloon_stats.current_pages; | ||
20025 | |||
20026 | out: | ||
20027 | spin_unlock_irqrestore(&balloon_lock, flags); | ||
20028 | |||
20029 | - return rc < 0 ? rc : rc != nr_pages; | ||
20030 | + return 0; | ||
20031 | } | ||
20032 | |||
20033 | static int decrease_reservation(unsigned long nr_pages) | ||
20034 | @@ -311,6 +323,7 @@ static int decrease_reservation(unsigned long nr_pages) | ||
20035 | BUG_ON(ret != nr_pages); | ||
20036 | |||
20037 | balloon_stats.current_pages -= nr_pages; | ||
20038 | + totalram_pages = balloon_stats.current_pages; | ||
20039 | |||
20040 | spin_unlock_irqrestore(&balloon_lock, flags); | ||
20041 | |||
20042 | @@ -354,6 +367,7 @@ static void balloon_process(struct work_struct *work) | ||
20043 | static void balloon_set_new_target(unsigned long target) | ||
20044 | { | ||
20045 | /* No need for lock. Not read-modify-write updates. */ | ||
20046 | + balloon_stats.hard_limit = ~0UL; | ||
20047 | balloon_stats.target_pages = target; | ||
20048 | schedule_work(&balloon_worker); | ||
20049 | } | ||
20050 | @@ -408,10 +422,12 @@ static int __init balloon_init(void) | ||
20051 | pr_info("xen_balloon: Initialising balloon driver.\n"); | ||
20052 | |||
20053 | balloon_stats.current_pages = min(xen_start_info->nr_pages, max_pfn); | ||
20054 | + totalram_pages = balloon_stats.current_pages; | ||
20055 | balloon_stats.target_pages = balloon_stats.current_pages; | ||
20056 | balloon_stats.balloon_low = 0; | ||
20057 | balloon_stats.balloon_high = 0; | ||
20058 | balloon_stats.driver_pages = 0UL; | ||
20059 | + balloon_stats.hard_limit = ~0UL; | ||
20060 | |||
20061 | init_timer(&balloon_timer); | ||
20062 | balloon_timer.data = 0; | ||
20063 | @@ -456,6 +472,9 @@ module_exit(balloon_exit); | ||
20064 | BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(balloon_stats.current_pages)); | ||
20065 | BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_low)); | ||
20066 | BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_high)); | ||
20067 | +BALLOON_SHOW(hard_limit_kb, | ||
20068 | + (balloon_stats.hard_limit!=~0UL) ? "%lu\n" : "???\n", | ||
20069 | + (balloon_stats.hard_limit!=~0UL) ? PAGES2KB(balloon_stats.hard_limit) : 0); | ||
20070 | BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(balloon_stats.driver_pages)); | ||
20071 | |||
20072 | static ssize_t show_target_kb(struct sys_device *dev, struct sysdev_attribute *attr, | ||
20073 | @@ -525,6 +544,7 @@ static struct attribute *balloon_info_attrs[] = { | ||
20074 | &attr_current_kb.attr, | ||
20075 | &attr_low_kb.attr, | ||
20076 | &attr_high_kb.attr, | ||
20077 | + &attr_hard_limit_kb.attr, | ||
20078 | &attr_driver_kb.attr, | ||
20079 | NULL | ||
20080 | }; | ||
20081 | diff --git a/drivers/xen/events.c b/drivers/xen/events.c | ||
20082 | index ce602dd..2f57276 100644 | ||
20083 | --- a/drivers/xen/events.c | ||
20084 | +++ b/drivers/xen/events.c | ||
20085 | @@ -474,9 +474,6 @@ static void unbind_from_irq(unsigned int irq) | ||
20086 | bind_evtchn_to_cpu(evtchn, 0); | ||
20087 | |||
20088 | evtchn_to_irq[evtchn] = -1; | ||
20089 | - } | ||
20090 | - | ||
20091 | - if (irq_info[irq].type != IRQT_UNBOUND) { | ||
20092 | irq_info[irq] = mk_unbound_info(); | ||
20093 | |||
20094 | dynamic_irq_cleanup(irq); | ||
20095 | diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c | ||
20096 | index 5d42d55..10d03d7 100644 | ||
20097 | --- a/drivers/xen/manage.c | ||
20098 | +++ b/drivers/xen/manage.c | ||
20099 | @@ -43,6 +43,7 @@ static int xen_suspend(void *data) | ||
20100 | if (err) { | ||
20101 | printk(KERN_ERR "xen_suspend: sysdev_suspend failed: %d\n", | ||
20102 | err); | ||
20103 | + dpm_resume_noirq(PMSG_RESUME); | ||
20104 | return err; | ||
20105 | } | ||
20106 | |||
20107 | @@ -68,6 +69,7 @@ static int xen_suspend(void *data) | ||
20108 | } | ||
20109 | |||
20110 | sysdev_resume(); | ||
20111 | + dpm_resume_noirq(PMSG_RESUME); | ||
20112 | |||
20113 | return 0; | ||
20114 | } | ||
20115 | @@ -79,12 +81,6 @@ static void do_suspend(void) | ||
20116 | |||
20117 | shutting_down = SHUTDOWN_SUSPEND; | ||
20118 | |||
20119 | - err = stop_machine_create(); | ||
20120 | - if (err) { | ||
20121 | - printk(KERN_ERR "xen suspend: failed to setup stop_machine %d\n", err); | ||
20122 | - goto out; | ||
20123 | - } | ||
20124 | - | ||
20125 | #ifdef CONFIG_PREEMPT | ||
20126 | /* If the kernel is preemptible, we need to freeze all the processes | ||
20127 | to prevent them from being in the middle of a pagetable update | ||
20128 | @@ -92,14 +88,14 @@ static void do_suspend(void) | ||
20129 | err = freeze_processes(); | ||
20130 | if (err) { | ||
20131 | printk(KERN_ERR "xen suspend: freeze failed %d\n", err); | ||
20132 | - goto out_destroy_sm; | ||
20133 | + return; | ||
20134 | } | ||
20135 | #endif | ||
20136 | |||
20137 | err = dpm_suspend_start(PMSG_SUSPEND); | ||
20138 | if (err) { | ||
20139 | printk(KERN_ERR "xen suspend: dpm_suspend_start %d\n", err); | ||
20140 | - goto out_thaw; | ||
20141 | + goto out; | ||
20142 | } | ||
20143 | |||
20144 | printk(KERN_DEBUG "suspending xenstore...\n"); | ||
20145 | @@ -108,39 +104,32 @@ static void do_suspend(void) | ||
20146 | err = dpm_suspend_noirq(PMSG_SUSPEND); | ||
20147 | if (err) { | ||
20148 | printk(KERN_ERR "dpm_suspend_noirq failed: %d\n", err); | ||
20149 | - goto out_resume; | ||
20150 | + goto resume_devices; | ||
20151 | } | ||
20152 | |||
20153 | err = stop_machine(xen_suspend, &cancelled, cpumask_of(0)); | ||
20154 | - | ||
20155 | - dpm_resume_noirq(PMSG_RESUME); | ||
20156 | - | ||
20157 | if (err) { | ||
20158 | printk(KERN_ERR "failed to start xen_suspend: %d\n", err); | ||
20159 | - cancelled = 1; | ||
20160 | + goto out; | ||
20161 | } | ||
20162 | |||
20163 | -out_resume: | ||
20164 | if (!cancelled) { | ||
20165 | xen_arch_resume(); | ||
20166 | xs_resume(); | ||
20167 | } else | ||
20168 | xs_suspend_cancel(); | ||
20169 | |||
20170 | + dpm_resume_noirq(PMSG_RESUME); | ||
20171 | + | ||
20172 | +resume_devices: | ||
20173 | dpm_resume_end(PMSG_RESUME); | ||
20174 | |||
20175 | /* Make sure timer events get retriggered on all CPUs */ | ||
20176 | clock_was_set(); | ||
20177 | - | ||
20178 | -out_thaw: | ||
20179 | +out: | ||
20180 | #ifdef CONFIG_PREEMPT | ||
20181 | thaw_processes(); | ||
20182 | - | ||
20183 | -out_destroy_sm: | ||
20184 | #endif | ||
20185 | - stop_machine_destroy(); | ||
20186 | - | ||
20187 | -out: | ||
20188 | shutting_down = SHUTDOWN_INVALID; | ||
20189 | } | ||
20190 | #endif /* CONFIG_PM_SLEEP */ | ||
20191 | diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c | ||
20192 | index 649fcdf..d42e25d 100644 | ||
20193 | --- a/drivers/xen/xenbus/xenbus_probe.c | ||
20194 | +++ b/drivers/xen/xenbus/xenbus_probe.c | ||
20195 | @@ -454,21 +454,21 @@ static ssize_t xendev_show_nodename(struct device *dev, | ||
20196 | { | ||
20197 | return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename); | ||
20198 | } | ||
20199 | -static DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL); | ||
20200 | +DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL); | ||
20201 | |||
20202 | static ssize_t xendev_show_devtype(struct device *dev, | ||
20203 | struct device_attribute *attr, char *buf) | ||
20204 | { | ||
20205 | return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype); | ||
20206 | } | ||
20207 | -static DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL); | ||
20208 | +DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL); | ||
20209 | |||
20210 | static ssize_t xendev_show_modalias(struct device *dev, | ||
20211 | struct device_attribute *attr, char *buf) | ||
20212 | { | ||
20213 | return sprintf(buf, "xen:%s\n", to_xenbus_device(dev)->devicetype); | ||
20214 | } | ||
20215 | -static DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_modalias, NULL); | ||
20216 | +DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_modalias, NULL); | ||
20217 | |||
20218 | int xenbus_probe_node(struct xen_bus_type *bus, | ||
20219 | const char *type, | ||
20220 | @@ -843,7 +843,7 @@ postcore_initcall(xenbus_probe_init); | ||
20221 | |||
20222 | MODULE_LICENSE("GPL"); | ||
20223 | |||
20224 | -static int is_device_connecting(struct device *dev, void *data) | ||
20225 | +static int is_disconnected_device(struct device *dev, void *data) | ||
20226 | { | ||
20227 | struct xenbus_device *xendev = to_xenbus_device(dev); | ||
20228 | struct device_driver *drv = data; | ||
20229 | @@ -861,15 +861,14 @@ static int is_device_connecting(struct device *dev, void *data) | ||
20230 | return 0; | ||
20231 | |||
20232 | xendrv = to_xenbus_driver(dev->driver); | ||
20233 | - return (xendev->state < XenbusStateConnected || | ||
20234 | - (xendev->state == XenbusStateConnected && | ||
20235 | - xendrv->is_ready && !xendrv->is_ready(xendev))); | ||
20236 | + return (xendev->state != XenbusStateConnected || | ||
20237 | + (xendrv->is_ready && !xendrv->is_ready(xendev))); | ||
20238 | } | ||
20239 | |||
20240 | -static int exists_connecting_device(struct device_driver *drv) | ||
20241 | +static int exists_disconnected_device(struct device_driver *drv) | ||
20242 | { | ||
20243 | return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, | ||
20244 | - is_device_connecting); | ||
20245 | + is_disconnected_device); | ||
20246 | } | ||
20247 | |||
20248 | static int print_device_status(struct device *dev, void *data) | ||
20249 | @@ -885,13 +884,10 @@ static int print_device_status(struct device *dev, void *data) | ||
20250 | /* Information only: is this too noisy? */ | ||
20251 | printk(KERN_INFO "XENBUS: Device with no driver: %s\n", | ||
20252 | xendev->nodename); | ||
20253 | - } else if (xendev->state < XenbusStateConnected) { | ||
20254 | - enum xenbus_state rstate = XenbusStateUnknown; | ||
20255 | - if (xendev->otherend) | ||
20256 | - rstate = xenbus_read_driver_state(xendev->otherend); | ||
20257 | + } else if (xendev->state != XenbusStateConnected) { | ||
20258 | printk(KERN_WARNING "XENBUS: Timeout connecting " | ||
20259 | - "to device: %s (local state %d, remote state %d)\n", | ||
20260 | - xendev->nodename, xendev->state, rstate); | ||
20261 | + "to device: %s (state %d)\n", | ||
20262 | + xendev->nodename, xendev->state); | ||
20263 | } | ||
20264 | |||
20265 | return 0; | ||
20266 | @@ -901,7 +897,7 @@ static int print_device_status(struct device *dev, void *data) | ||
20267 | static int ready_to_wait_for_devices; | ||
20268 | |||
20269 | /* | ||
20270 | - * On a 5-minute timeout, wait for all devices currently configured. We need | ||
20271 | + * On a 10 second timeout, wait for all devices currently configured. We need | ||
20272 | * to do this to guarantee that the filesystems and / or network devices | ||
20273 | * needed for boot are available, before we can allow the boot to proceed. | ||
20274 | * | ||
20275 | @@ -916,30 +912,18 @@ static int ready_to_wait_for_devices; | ||
20276 | */ | ||
20277 | static void wait_for_devices(struct xenbus_driver *xendrv) | ||
20278 | { | ||
20279 | - unsigned long start = jiffies; | ||
20280 | + unsigned long timeout = jiffies + 10*HZ; | ||
20281 | struct device_driver *drv = xendrv ? &xendrv->driver : NULL; | ||
20282 | - unsigned int seconds_waited = 0; | ||
20283 | |||
20284 | if (!ready_to_wait_for_devices || !xen_domain()) | ||
20285 | return; | ||
20286 | |||
20287 | - while (exists_connecting_device(drv)) { | ||
20288 | - if (time_after(jiffies, start + (seconds_waited+5)*HZ)) { | ||
20289 | - if (!seconds_waited) | ||
20290 | - printk(KERN_WARNING "XENBUS: Waiting for " | ||
20291 | - "devices to initialise: "); | ||
20292 | - seconds_waited += 5; | ||
20293 | - printk("%us...", 300 - seconds_waited); | ||
20294 | - if (seconds_waited == 300) | ||
20295 | - break; | ||
20296 | - } | ||
20297 | - | ||
20298 | + while (exists_disconnected_device(drv)) { | ||
20299 | + if (time_after(jiffies, timeout)) | ||
20300 | + break; | ||
20301 | schedule_timeout_interruptible(HZ/10); | ||
20302 | } | ||
20303 | |||
20304 | - if (seconds_waited) | ||
20305 | - printk("\n"); | ||
20306 | - | ||
20307 | bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, | ||
20308 | print_device_status); | ||
20309 | } | ||
20310 | diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c | ||
20311 | index 69357c0..14a8644 100644 | ||
20312 | --- a/fs/9p/vfs_super.c | ||
20313 | +++ b/fs/9p/vfs_super.c | ||
20314 | @@ -188,8 +188,7 @@ static void v9fs_kill_super(struct super_block *s) | ||
20315 | |||
20316 | P9_DPRINTK(P9_DEBUG_VFS, " %p\n", s); | ||
20317 | |||
20318 | - if (s->s_root) | ||
20319 | - v9fs_dentry_release(s->s_root); /* clunk root */ | ||
20320 | + v9fs_dentry_release(s->s_root); /* clunk root */ | ||
20321 | |||
20322 | kill_anon_super(s); | ||
20323 | |||
20324 | diff --git a/fs/affs/affs.h b/fs/affs/affs.h | ||
20325 | index 0e40caa..e511dc6 100644 | ||
20326 | --- a/fs/affs/affs.h | ||
20327 | +++ b/fs/affs/affs.h | ||
20328 | @@ -106,8 +106,8 @@ struct affs_sb_info { | ||
20329 | u32 s_last_bmap; | ||
20330 | struct buffer_head *s_bmap_bh; | ||
20331 | char *s_prefix; /* Prefix for volumes and assigns. */ | ||
20332 | + int s_prefix_len; /* Length of prefix. */ | ||
20333 | char s_volume[32]; /* Volume prefix for absolute symlinks. */ | ||
20334 | - spinlock_t symlink_lock; /* protects the previous two */ | ||
20335 | }; | ||
20336 | |||
20337 | #define SF_INTL 0x0001 /* International filesystem. */ | ||
20338 | diff --git a/fs/affs/namei.c b/fs/affs/namei.c | ||
20339 | index d70bbba..960d336 100644 | ||
20340 | --- a/fs/affs/namei.c | ||
20341 | +++ b/fs/affs/namei.c | ||
20342 | @@ -341,13 +341,10 @@ affs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) | ||
20343 | p = (char *)AFFS_HEAD(bh)->table; | ||
20344 | lc = '/'; | ||
20345 | if (*symname == '/') { | ||
20346 | - struct affs_sb_info *sbi = AFFS_SB(sb); | ||
20347 | while (*symname == '/') | ||
20348 | symname++; | ||
20349 | - spin_lock(&sbi->symlink_lock); | ||
20350 | - while (sbi->s_volume[i]) /* Cannot overflow */ | ||
20351 | - *p++ = sbi->s_volume[i++]; | ||
20352 | - spin_unlock(&sbi->symlink_lock); | ||
20353 | + while (AFFS_SB(sb)->s_volume[i]) /* Cannot overflow */ | ||
20354 | + *p++ = AFFS_SB(sb)->s_volume[i++]; | ||
20355 | } | ||
20356 | while (i < maxlen && (c = *symname++)) { | ||
20357 | if (c == '.' && lc == '/' && *symname == '.' && symname[1] == '/') { | ||
20358 | diff --git a/fs/affs/super.c b/fs/affs/super.c | ||
20359 | index d41e967..104fdcb 100644 | ||
20360 | --- a/fs/affs/super.c | ||
20361 | +++ b/fs/affs/super.c | ||
20362 | @@ -203,7 +203,7 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s | ||
20363 | switch (token) { | ||
20364 | case Opt_bs: | ||
20365 | if (match_int(&args[0], &n)) | ||
20366 | - return 0; | ||
20367 | + return -EINVAL; | ||
20368 | if (n != 512 && n != 1024 && n != 2048 | ||
20369 | && n != 4096) { | ||
20370 | printk ("AFFS: Invalid blocksize (512, 1024, 2048, 4096 allowed)\n"); | ||
20371 | @@ -213,7 +213,7 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s | ||
20372 | break; | ||
20373 | case Opt_mode: | ||
20374 | if (match_octal(&args[0], &option)) | ||
20375 | - return 0; | ||
20376 | + return 1; | ||
20377 | *mode = option & 0777; | ||
20378 | *mount_opts |= SF_SETMODE; | ||
20379 | break; | ||
20380 | @@ -221,6 +221,8 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s | ||
20381 | *mount_opts |= SF_MUFS; | ||
20382 | break; | ||
20383 | case Opt_prefix: | ||
20384 | + /* Free any previous prefix */ | ||
20385 | + kfree(*prefix); | ||
20386 | *prefix = match_strdup(&args[0]); | ||
20387 | if (!*prefix) | ||
20388 | return 0; | ||
20389 | @@ -231,21 +233,21 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s | ||
20390 | break; | ||
20391 | case Opt_reserved: | ||
20392 | if (match_int(&args[0], reserved)) | ||
20393 | - return 0; | ||
20394 | + return 1; | ||
20395 | break; | ||
20396 | case Opt_root: | ||
20397 | if (match_int(&args[0], root)) | ||
20398 | - return 0; | ||
20399 | + return 1; | ||
20400 | break; | ||
20401 | case Opt_setgid: | ||
20402 | if (match_int(&args[0], &option)) | ||
20403 | - return 0; | ||
20404 | + return 1; | ||
20405 | *gid = option; | ||
20406 | *mount_opts |= SF_SETGID; | ||
20407 | break; | ||
20408 | case Opt_setuid: | ||
20409 | if (match_int(&args[0], &option)) | ||
20410 | - return 0; | ||
20411 | + return -EINVAL; | ||
20412 | *uid = option; | ||
20413 | *mount_opts |= SF_SETUID; | ||
20414 | break; | ||
20415 | @@ -309,14 +311,11 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent) | ||
20416 | return -ENOMEM; | ||
20417 | sb->s_fs_info = sbi; | ||
20418 | mutex_init(&sbi->s_bmlock); | ||
20419 | - spin_lock_init(&sbi->symlink_lock); | ||
20420 | |||
20421 | if (!parse_options(data,&uid,&gid,&i,&reserved,&root_block, | ||
20422 | &blocksize,&sbi->s_prefix, | ||
20423 | sbi->s_volume, &mount_flags)) { | ||
20424 | printk(KERN_ERR "AFFS: Error parsing options\n"); | ||
20425 | - kfree(sbi->s_prefix); | ||
20426 | - kfree(sbi); | ||
20427 | return -EINVAL; | ||
20428 | } | ||
20429 | /* N.B. after this point s_prefix must be released */ | ||
20430 | @@ -517,18 +516,14 @@ affs_remount(struct super_block *sb, int *flags, char *data) | ||
20431 | unsigned long mount_flags; | ||
20432 | int res = 0; | ||
20433 | char *new_opts = kstrdup(data, GFP_KERNEL); | ||
20434 | - char volume[32]; | ||
20435 | - char *prefix = NULL; | ||
20436 | |||
20437 | pr_debug("AFFS: remount(flags=0x%x,opts=\"%s\")\n",*flags,data); | ||
20438 | |||
20439 | *flags |= MS_NODIRATIME; | ||
20440 | |||
20441 | - memcpy(volume, sbi->s_volume, 32); | ||
20442 | if (!parse_options(data, &uid, &gid, &mode, &reserved, &root_block, | ||
20443 | - &blocksize, &prefix, volume, | ||
20444 | + &blocksize, &sbi->s_prefix, sbi->s_volume, | ||
20445 | &mount_flags)) { | ||
20446 | - kfree(prefix); | ||
20447 | kfree(new_opts); | ||
20448 | return -EINVAL; | ||
20449 | } | ||
20450 | @@ -539,14 +534,6 @@ affs_remount(struct super_block *sb, int *flags, char *data) | ||
20451 | sbi->s_mode = mode; | ||
20452 | sbi->s_uid = uid; | ||
20453 | sbi->s_gid = gid; | ||
20454 | - /* protect against readers */ | ||
20455 | - spin_lock(&sbi->symlink_lock); | ||
20456 | - if (prefix) { | ||
20457 | - kfree(sbi->s_prefix); | ||
20458 | - sbi->s_prefix = prefix; | ||
20459 | - } | ||
20460 | - memcpy(sbi->s_volume, volume, 32); | ||
20461 | - spin_unlock(&sbi->symlink_lock); | ||
20462 | |||
20463 | if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) { | ||
20464 | unlock_kernel(); | ||
20465 | diff --git a/fs/affs/symlink.c b/fs/affs/symlink.c | ||
20466 | index ee00f08..4178253 100644 | ||
20467 | --- a/fs/affs/symlink.c | ||
20468 | +++ b/fs/affs/symlink.c | ||
20469 | @@ -20,6 +20,7 @@ static int affs_symlink_readpage(struct file *file, struct page *page) | ||
20470 | int i, j; | ||
20471 | char c; | ||
20472 | char lc; | ||
20473 | + char *pf; | ||
20474 | |||
20475 | pr_debug("AFFS: follow_link(ino=%lu)\n",inode->i_ino); | ||
20476 | |||
20477 | @@ -31,15 +32,11 @@ static int affs_symlink_readpage(struct file *file, struct page *page) | ||
20478 | j = 0; | ||
20479 | lf = (struct slink_front *)bh->b_data; | ||
20480 | lc = 0; | ||
20481 | + pf = AFFS_SB(inode->i_sb)->s_prefix ? AFFS_SB(inode->i_sb)->s_prefix : "/"; | ||
20482 | |||
20483 | if (strchr(lf->symname,':')) { /* Handle assign or volume name */ | ||
20484 | - struct affs_sb_info *sbi = AFFS_SB(inode->i_sb); | ||
20485 | - char *pf; | ||
20486 | - spin_lock(&sbi->symlink_lock); | ||
20487 | - pf = sbi->s_prefix ? sbi->s_prefix : "/"; | ||
20488 | while (i < 1023 && (c = pf[i])) | ||
20489 | link[i++] = c; | ||
20490 | - spin_unlock(&sbi->symlink_lock); | ||
20491 | while (i < 1023 && lf->symname[j] != ':') | ||
20492 | link[i++] = lf->symname[j++]; | ||
20493 | if (i < 1023) | ||
20494 | diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c | ||
20495 | index 34ddda8..33baf27 100644 | ||
20496 | --- a/fs/befs/linuxvfs.c | ||
20497 | +++ b/fs/befs/linuxvfs.c | ||
20498 | @@ -873,7 +873,6 @@ befs_fill_super(struct super_block *sb, void *data, int silent) | ||
20499 | brelse(bh); | ||
20500 | |||
20501 | unacquire_priv_sbp: | ||
20502 | - kfree(befs_sb->mount_opts.iocharset); | ||
20503 | kfree(sb->s_fs_info); | ||
20504 | |||
20505 | unacquire_none: | ||
20506 | diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c | ||
20507 | index 8f3d9fd..6f60336 100644 | ||
20508 | --- a/fs/bfs/inode.c | ||
20509 | +++ b/fs/bfs/inode.c | ||
20510 | @@ -353,35 +353,35 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) | ||
20511 | struct inode *inode; | ||
20512 | unsigned i, imap_len; | ||
20513 | struct bfs_sb_info *info; | ||
20514 | - int ret = -EINVAL; | ||
20515 | + long ret = -EINVAL; | ||
20516 | unsigned long i_sblock, i_eblock, i_eoff, s_size; | ||
20517 | |||
20518 | info = kzalloc(sizeof(*info), GFP_KERNEL); | ||
20519 | if (!info) | ||
20520 | return -ENOMEM; | ||
20521 | - mutex_init(&info->bfs_lock); | ||
20522 | s->s_fs_info = info; | ||
20523 | |||
20524 | sb_set_blocksize(s, BFS_BSIZE); | ||
20525 | |||
20526 | - info->si_sbh = sb_bread(s, 0); | ||
20527 | - if (!info->si_sbh) | ||
20528 | + bh = sb_bread(s, 0); | ||
20529 | + if(!bh) | ||
20530 | goto out; | ||
20531 | - bfs_sb = (struct bfs_super_block *)info->si_sbh->b_data; | ||
20532 | + bfs_sb = (struct bfs_super_block *)bh->b_data; | ||
20533 | if (le32_to_cpu(bfs_sb->s_magic) != BFS_MAGIC) { | ||
20534 | if (!silent) | ||
20535 | printf("No BFS filesystem on %s (magic=%08x)\n", | ||
20536 | s->s_id, le32_to_cpu(bfs_sb->s_magic)); | ||
20537 | - goto out1; | ||
20538 | + goto out; | ||
20539 | } | ||
20540 | if (BFS_UNCLEAN(bfs_sb, s) && !silent) | ||
20541 | printf("%s is unclean, continuing\n", s->s_id); | ||
20542 | |||
20543 | s->s_magic = BFS_MAGIC; | ||
20544 | + info->si_sbh = bh; | ||
20545 | |||
20546 | if (le32_to_cpu(bfs_sb->s_start) > le32_to_cpu(bfs_sb->s_end)) { | ||
20547 | printf("Superblock is corrupted\n"); | ||
20548 | - goto out1; | ||
20549 | + goto out; | ||
20550 | } | ||
20551 | |||
20552 | info->si_lasti = (le32_to_cpu(bfs_sb->s_start) - BFS_BSIZE) / | ||
20553 | @@ -390,7 +390,7 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) | ||
20554 | imap_len = (info->si_lasti / 8) + 1; | ||
20555 | info->si_imap = kzalloc(imap_len, GFP_KERNEL); | ||
20556 | if (!info->si_imap) | ||
20557 | - goto out1; | ||
20558 | + goto out; | ||
20559 | for (i = 0; i < BFS_ROOT_INO; i++) | ||
20560 | set_bit(i, info->si_imap); | ||
20561 | |||
20562 | @@ -398,13 +398,15 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) | ||
20563 | inode = bfs_iget(s, BFS_ROOT_INO); | ||
20564 | if (IS_ERR(inode)) { | ||
20565 | ret = PTR_ERR(inode); | ||
20566 | - goto out2; | ||
20567 | + kfree(info->si_imap); | ||
20568 | + goto out; | ||
20569 | } | ||
20570 | s->s_root = d_alloc_root(inode); | ||
20571 | if (!s->s_root) { | ||
20572 | iput(inode); | ||
20573 | ret = -ENOMEM; | ||
20574 | - goto out2; | ||
20575 | + kfree(info->si_imap); | ||
20576 | + goto out; | ||
20577 | } | ||
20578 | |||
20579 | info->si_blocks = (le32_to_cpu(bfs_sb->s_end) + 1) >> BFS_BSIZE_BITS; | ||
20580 | @@ -417,8 +419,10 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) | ||
20581 | bh = sb_bread(s, info->si_blocks - 1); | ||
20582 | if (!bh) { | ||
20583 | printf("Last block not available: %lu\n", info->si_blocks - 1); | ||
20584 | + iput(inode); | ||
20585 | ret = -EIO; | ||
20586 | - goto out3; | ||
20587 | + kfree(info->si_imap); | ||
20588 | + goto out; | ||
20589 | } | ||
20590 | brelse(bh); | ||
20591 | |||
20592 | @@ -455,8 +459,11 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) | ||
20593 | printf("Inode 0x%08x corrupted\n", i); | ||
20594 | |||
20595 | brelse(bh); | ||
20596 | - ret = -EIO; | ||
20597 | - goto out3; | ||
20598 | + s->s_root = NULL; | ||
20599 | + kfree(info->si_imap); | ||
20600 | + kfree(info); | ||
20601 | + s->s_fs_info = NULL; | ||
20602 | + return -EIO; | ||
20603 | } | ||
20604 | |||
20605 | if (!di->i_ino) { | ||
20606 | @@ -476,17 +483,11 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) | ||
20607 | s->s_dirt = 1; | ||
20608 | } | ||
20609 | dump_imap("read_super", s); | ||
20610 | + mutex_init(&info->bfs_lock); | ||
20611 | return 0; | ||
20612 | |||
20613 | -out3: | ||
20614 | - dput(s->s_root); | ||
20615 | - s->s_root = NULL; | ||
20616 | -out2: | ||
20617 | - kfree(info->si_imap); | ||
20618 | -out1: | ||
20619 | - brelse(info->si_sbh); | ||
20620 | out: | ||
20621 | - mutex_destroy(&info->bfs_lock); | ||
20622 | + brelse(bh); | ||
20623 | kfree(info); | ||
20624 | s->s_fs_info = NULL; | ||
20625 | return ret; | ||
20626 | diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c | ||
20627 | index 0133b5a..b639dcf 100644 | ||
20628 | --- a/fs/binfmt_aout.c | ||
20629 | +++ b/fs/binfmt_aout.c | ||
20630 | @@ -263,7 +263,6 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) | ||
20631 | #else | ||
20632 | set_personality(PER_LINUX); | ||
20633 | #endif | ||
20634 | - setup_new_exec(bprm); | ||
20635 | |||
20636 | current->mm->end_code = ex.a_text + | ||
20637 | (current->mm->start_code = N_TXTADDR(ex)); | ||
20638 | diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c | ||
20639 | index 1ed37ba..b9b3bb5 100644 | ||
20640 | --- a/fs/binfmt_elf.c | ||
20641 | +++ b/fs/binfmt_elf.c | ||
20642 | @@ -662,6 +662,27 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) | ||
20643 | if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0') | ||
20644 | goto out_free_interp; | ||
20645 | |||
20646 | + /* | ||
20647 | + * The early SET_PERSONALITY here is so that the lookup | ||
20648 | + * for the interpreter happens in the namespace of the | ||
20649 | + * to-be-execed image. SET_PERSONALITY can select an | ||
20650 | + * alternate root. | ||
20651 | + * | ||
20652 | + * However, SET_PERSONALITY is NOT allowed to switch | ||
20653 | + * this task into the new images's memory mapping | ||
20654 | + * policy - that is, TASK_SIZE must still evaluate to | ||
20655 | + * that which is appropriate to the execing application. | ||
20656 | + * This is because exit_mmap() needs to have TASK_SIZE | ||
20657 | + * evaluate to the size of the old image. | ||
20658 | + * | ||
20659 | + * So if (say) a 64-bit application is execing a 32-bit | ||
20660 | + * application it is the architecture's responsibility | ||
20661 | + * to defer changing the value of TASK_SIZE until the | ||
20662 | + * switch really is going to happen - do this in | ||
20663 | + * flush_thread(). - akpm | ||
20664 | + */ | ||
20665 | + SET_PERSONALITY(loc->elf_ex); | ||
20666 | + | ||
20667 | interpreter = open_exec(elf_interpreter); | ||
20668 | retval = PTR_ERR(interpreter); | ||
20669 | if (IS_ERR(interpreter)) | ||
20670 | @@ -709,6 +730,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) | ||
20671 | /* Verify the interpreter has a valid arch */ | ||
20672 | if (!elf_check_arch(&loc->interp_elf_ex)) | ||
20673 | goto out_free_dentry; | ||
20674 | + } else { | ||
20675 | + /* Executables without an interpreter also need a personality */ | ||
20676 | + SET_PERSONALITY(loc->elf_ex); | ||
20677 | } | ||
20678 | |||
20679 | /* Flush all traces of the currently running executable */ | ||
20680 | @@ -728,8 +752,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) | ||
20681 | |||
20682 | if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) | ||
20683 | current->flags |= PF_RANDOMIZE; | ||
20684 | - | ||
20685 | - setup_new_exec(bprm); | ||
20686 | + arch_pick_mmap_layout(current->mm); | ||
20687 | |||
20688 | /* Do this so that we can load the interpreter, if need be. We will | ||
20689 | change some of these later */ | ||
20690 | diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c | ||
20691 | index e7a0bb4..38502c6 100644 | ||
20692 | --- a/fs/binfmt_elf_fdpic.c | ||
20693 | +++ b/fs/binfmt_elf_fdpic.c | ||
20694 | @@ -171,9 +171,6 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm, | ||
20695 | #ifdef ELF_FDPIC_PLAT_INIT | ||
20696 | unsigned long dynaddr; | ||
20697 | #endif | ||
20698 | -#ifndef CONFIG_MMU | ||
20699 | - unsigned long stack_prot; | ||
20700 | -#endif | ||
20701 | struct file *interpreter = NULL; /* to shut gcc up */ | ||
20702 | char *interpreter_name = NULL; | ||
20703 | int executable_stack; | ||
20704 | @@ -319,11 +316,6 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm, | ||
20705 | * defunct, deceased, etc. after this point we have to exit via | ||
20706 | * error_kill */ | ||
20707 | set_personality(PER_LINUX_FDPIC); | ||
20708 | - if (elf_read_implies_exec(&exec_params.hdr, executable_stack)) | ||
20709 | - current->personality |= READ_IMPLIES_EXEC; | ||
20710 | - | ||
20711 | - setup_new_exec(bprm); | ||
20712 | - | ||
20713 | set_binfmt(&elf_fdpic_format); | ||
20714 | |||
20715 | current->mm->start_code = 0; | ||
20716 | @@ -385,13 +377,9 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm, | ||
20717 | if (stack_size < PAGE_SIZE * 2) | ||
20718 | stack_size = PAGE_SIZE * 2; | ||
20719 | |||
20720 | - stack_prot = PROT_READ | PROT_WRITE; | ||
20721 | - if (executable_stack == EXSTACK_ENABLE_X || | ||
20722 | - (executable_stack == EXSTACK_DEFAULT && VM_STACK_FLAGS & VM_EXEC)) | ||
20723 | - stack_prot |= PROT_EXEC; | ||
20724 | - | ||
20725 | down_write(¤t->mm->mmap_sem); | ||
20726 | - current->mm->start_brk = do_mmap(NULL, 0, stack_size, stack_prot, | ||
20727 | + current->mm->start_brk = do_mmap(NULL, 0, stack_size, | ||
20728 | + PROT_READ | PROT_WRITE | PROT_EXEC, | ||
20729 | MAP_PRIVATE | MAP_ANONYMOUS | MAP_GROWSDOWN, | ||
20730 | 0); | ||
20731 | |||
20732 | diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c | ||
20733 | index ca88c46..a279665 100644 | ||
20734 | --- a/fs/binfmt_flat.c | ||
20735 | +++ b/fs/binfmt_flat.c | ||
20736 | @@ -519,7 +519,6 @@ static int load_flat_file(struct linux_binprm * bprm, | ||
20737 | |||
20738 | /* OK, This is the point of no return */ | ||
20739 | set_personality(PER_LINUX_32BIT); | ||
20740 | - setup_new_exec(bprm); | ||
20741 | } | ||
20742 | |||
20743 | /* | ||
20744 | diff --git a/fs/binfmt_som.c b/fs/binfmt_som.c | ||
20745 | index 35cf002..eff74b9 100644 | ||
20746 | --- a/fs/binfmt_som.c | ||
20747 | +++ b/fs/binfmt_som.c | ||
20748 | @@ -227,7 +227,6 @@ load_som_binary(struct linux_binprm * bprm, struct pt_regs * regs) | ||
20749 | /* OK, This is the point of no return */ | ||
20750 | current->flags &= ~PF_FORKNOEXEC; | ||
20751 | current->personality = PER_HPUX; | ||
20752 | - setup_new_exec(bprm); | ||
20753 | |||
20754 | /* Set the task size for HP-UX processes such that | ||
20755 | * the gateway page is outside the address space. | ||
20756 | diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c | ||
20757 | index a16f29e..49a34e7 100644 | ||
20758 | --- a/fs/bio-integrity.c | ||
20759 | +++ b/fs/bio-integrity.c | ||
20760 | @@ -61,7 +61,7 @@ static inline unsigned int vecs_to_idx(unsigned int nr) | ||
20761 | |||
20762 | static inline int use_bip_pool(unsigned int idx) | ||
20763 | { | ||
20764 | - if (idx == BIOVEC_MAX_IDX) | ||
20765 | + if (idx == BIOVEC_NR_POOLS) | ||
20766 | return 1; | ||
20767 | |||
20768 | return 0; | ||
20769 | @@ -95,7 +95,6 @@ struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *bio, | ||
20770 | |||
20771 | /* Use mempool if lower order alloc failed or max vecs were requested */ | ||
20772 | if (bip == NULL) { | ||
20773 | - idx = BIOVEC_MAX_IDX; /* so we free the payload properly later */ | ||
20774 | bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask); | ||
20775 | |||
20776 | if (unlikely(bip == NULL)) { | ||
20777 | diff --git a/fs/bio.c b/fs/bio.c | ||
20778 | index e0c9e71..12da5db 100644 | ||
20779 | --- a/fs/bio.c | ||
20780 | +++ b/fs/bio.c | ||
20781 | @@ -542,18 +542,13 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page | ||
20782 | |||
20783 | if (page == prev->bv_page && | ||
20784 | offset == prev->bv_offset + prev->bv_len) { | ||
20785 | - unsigned int prev_bv_len = prev->bv_len; | ||
20786 | prev->bv_len += len; | ||
20787 | |||
20788 | if (q->merge_bvec_fn) { | ||
20789 | struct bvec_merge_data bvm = { | ||
20790 | - /* prev_bvec is already charged in | ||
20791 | - bi_size, discharge it in order to | ||
20792 | - simulate merging updated prev_bvec | ||
20793 | - as new bvec. */ | ||
20794 | .bi_bdev = bio->bi_bdev, | ||
20795 | .bi_sector = bio->bi_sector, | ||
20796 | - .bi_size = bio->bi_size - prev_bv_len, | ||
20797 | + .bi_size = bio->bi_size, | ||
20798 | .bi_rw = bio->bi_rw, | ||
20799 | }; | ||
20800 | |||
20801 | diff --git a/fs/block_dev.c b/fs/block_dev.c | ||
20802 | index 34e2d20..8bed055 100644 | ||
20803 | --- a/fs/block_dev.c | ||
20804 | +++ b/fs/block_dev.c | ||
20805 | @@ -246,8 +246,7 @@ struct super_block *freeze_bdev(struct block_device *bdev) | ||
20806 | if (!sb) | ||
20807 | goto out; | ||
20808 | if (sb->s_flags & MS_RDONLY) { | ||
20809 | - sb->s_frozen = SB_FREEZE_TRANS; | ||
20810 | - up_write(&sb->s_umount); | ||
20811 | + deactivate_locked_super(sb); | ||
20812 | mutex_unlock(&bdev->bd_fsfreeze_mutex); | ||
20813 | return sb; | ||
20814 | } | ||
20815 | @@ -308,7 +307,7 @@ int thaw_bdev(struct block_device *bdev, struct super_block *sb) | ||
20816 | BUG_ON(sb->s_bdev != bdev); | ||
20817 | down_write(&sb->s_umount); | ||
20818 | if (sb->s_flags & MS_RDONLY) | ||
20819 | - goto out_unfrozen; | ||
20820 | + goto out_deactivate; | ||
20821 | |||
20822 | if (sb->s_op->unfreeze_fs) { | ||
20823 | error = sb->s_op->unfreeze_fs(sb); | ||
20824 | @@ -322,11 +321,11 @@ int thaw_bdev(struct block_device *bdev, struct super_block *sb) | ||
20825 | } | ||
20826 | } | ||
20827 | |||
20828 | -out_unfrozen: | ||
20829 | sb->s_frozen = SB_UNFROZEN; | ||
20830 | smp_wmb(); | ||
20831 | wake_up(&sb->s_wait_unfrozen); | ||
20832 | |||
20833 | +out_deactivate: | ||
20834 | if (sb) | ||
20835 | deactivate_locked_super(sb); | ||
20836 | out_unlock: | ||
20837 | diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c | ||
20838 | index 3bbcaa7..63ea83f 100644 | ||
20839 | --- a/fs/cifs/connect.c | ||
20840 | +++ b/fs/cifs/connect.c | ||
20841 | @@ -2287,12 +2287,12 @@ int | ||
20842 | cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, | ||
20843 | char *mount_data_global, const char *devname) | ||
20844 | { | ||
20845 | - int rc; | ||
20846 | + int rc = 0; | ||
20847 | int xid; | ||
20848 | struct smb_vol *volume_info; | ||
20849 | - struct cifsSesInfo *pSesInfo; | ||
20850 | - struct cifsTconInfo *tcon; | ||
20851 | - struct TCP_Server_Info *srvTcp; | ||
20852 | + struct cifsSesInfo *pSesInfo = NULL; | ||
20853 | + struct cifsTconInfo *tcon = NULL; | ||
20854 | + struct TCP_Server_Info *srvTcp = NULL; | ||
20855 | char *full_path; | ||
20856 | char *mount_data = mount_data_global; | ||
20857 | #ifdef CONFIG_CIFS_DFS_UPCALL | ||
20858 | @@ -2301,10 +2301,6 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, | ||
20859 | int referral_walks_count = 0; | ||
20860 | try_mount_again: | ||
20861 | #endif | ||
20862 | - rc = 0; | ||
20863 | - tcon = NULL; | ||
20864 | - pSesInfo = NULL; | ||
20865 | - srvTcp = NULL; | ||
20866 | full_path = NULL; | ||
20867 | |||
20868 | xid = GetXid(); | ||
20869 | @@ -2601,7 +2597,6 @@ remote_path_check: | ||
20870 | |||
20871 | cleanup_volume_info(&volume_info); | ||
20872 | referral_walks_count++; | ||
20873 | - FreeXid(xid); | ||
20874 | goto try_mount_again; | ||
20875 | } | ||
20876 | #else /* No DFS support, return error on mount */ | ||
20877 | diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c | ||
20878 | index f5618f8..f84062f 100644 | ||
20879 | --- a/fs/cifs/readdir.c | ||
20880 | +++ b/fs/cifs/readdir.c | ||
20881 | @@ -666,7 +666,6 @@ static int cifs_get_name_from_search_buf(struct qstr *pqst, | ||
20882 | min(len, max_len), nlt, | ||
20883 | cifs_sb->mnt_cifs_flags & | ||
20884 | CIFS_MOUNT_MAP_SPECIAL_CHR); | ||
20885 | - pqst->len -= nls_nullsize(nlt); | ||
20886 | } else { | ||
20887 | pqst->name = filename; | ||
20888 | pqst->len = len; | ||
20889 | diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c | ||
20890 | index 39c6ee8..d22438e 100644 | ||
20891 | --- a/fs/debugfs/inode.c | ||
20892 | +++ b/fs/debugfs/inode.c | ||
20893 | @@ -32,9 +32,7 @@ static struct vfsmount *debugfs_mount; | ||
20894 | static int debugfs_mount_count; | ||
20895 | static bool debugfs_registered; | ||
20896 | |||
20897 | -static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t dev, | ||
20898 | - void *data, const struct file_operations *fops) | ||
20899 | - | ||
20900 | +static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t dev) | ||
20901 | { | ||
20902 | struct inode *inode = new_inode(sb); | ||
20903 | |||
20904 | @@ -46,18 +44,14 @@ static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t d | ||
20905 | init_special_inode(inode, mode, dev); | ||
20906 | break; | ||
20907 | case S_IFREG: | ||
20908 | - inode->i_fop = fops ? fops : &debugfs_file_operations; | ||
20909 | - inode->i_private = data; | ||
20910 | + inode->i_fop = &debugfs_file_operations; | ||
20911 | break; | ||
20912 | case S_IFLNK: | ||
20913 | inode->i_op = &debugfs_link_operations; | ||
20914 | - inode->i_fop = fops; | ||
20915 | - inode->i_private = data; | ||
20916 | break; | ||
20917 | case S_IFDIR: | ||
20918 | inode->i_op = &simple_dir_inode_operations; | ||
20919 | - inode->i_fop = fops ? fops : &simple_dir_operations; | ||
20920 | - inode->i_private = data; | ||
20921 | + inode->i_fop = &simple_dir_operations; | ||
20922 | |||
20923 | /* directory inodes start off with i_nlink == 2 | ||
20924 | * (for "." entry) */ | ||
20925 | @@ -70,8 +64,7 @@ static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t d | ||
20926 | |||
20927 | /* SMP-safe */ | ||
20928 | static int debugfs_mknod(struct inode *dir, struct dentry *dentry, | ||
20929 | - int mode, dev_t dev, void *data, | ||
20930 | - const struct file_operations *fops) | ||
20931 | + int mode, dev_t dev) | ||
20932 | { | ||
20933 | struct inode *inode; | ||
20934 | int error = -EPERM; | ||
20935 | @@ -79,7 +72,7 @@ static int debugfs_mknod(struct inode *dir, struct dentry *dentry, | ||
20936 | if (dentry->d_inode) | ||
20937 | return -EEXIST; | ||
20938 | |||
20939 | - inode = debugfs_get_inode(dir->i_sb, mode, dev, data, fops); | ||
20940 | + inode = debugfs_get_inode(dir->i_sb, mode, dev); | ||
20941 | if (inode) { | ||
20942 | d_instantiate(dentry, inode); | ||
20943 | dget(dentry); | ||
20944 | @@ -88,13 +81,12 @@ static int debugfs_mknod(struct inode *dir, struct dentry *dentry, | ||
20945 | return error; | ||
20946 | } | ||
20947 | |||
20948 | -static int debugfs_mkdir(struct inode *dir, struct dentry *dentry, int mode, | ||
20949 | - void *data, const struct file_operations *fops) | ||
20950 | +static int debugfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) | ||
20951 | { | ||
20952 | int res; | ||
20953 | |||
20954 | mode = (mode & (S_IRWXUGO | S_ISVTX)) | S_IFDIR; | ||
20955 | - res = debugfs_mknod(dir, dentry, mode, 0, data, fops); | ||
20956 | + res = debugfs_mknod(dir, dentry, mode, 0); | ||
20957 | if (!res) { | ||
20958 | inc_nlink(dir); | ||
20959 | fsnotify_mkdir(dir, dentry); | ||
20960 | @@ -102,20 +94,18 @@ static int debugfs_mkdir(struct inode *dir, struct dentry *dentry, int mode, | ||
20961 | return res; | ||
20962 | } | ||
20963 | |||
20964 | -static int debugfs_link(struct inode *dir, struct dentry *dentry, int mode, | ||
20965 | - void *data, const struct file_operations *fops) | ||
20966 | +static int debugfs_link(struct inode *dir, struct dentry *dentry, int mode) | ||
20967 | { | ||
20968 | mode = (mode & S_IALLUGO) | S_IFLNK; | ||
20969 | - return debugfs_mknod(dir, dentry, mode, 0, data, fops); | ||
20970 | + return debugfs_mknod(dir, dentry, mode, 0); | ||
20971 | } | ||
20972 | |||
20973 | -static int debugfs_create(struct inode *dir, struct dentry *dentry, int mode, | ||
20974 | - void *data, const struct file_operations *fops) | ||
20975 | +static int debugfs_create(struct inode *dir, struct dentry *dentry, int mode) | ||
20976 | { | ||
20977 | int res; | ||
20978 | |||
20979 | mode = (mode & S_IALLUGO) | S_IFREG; | ||
20980 | - res = debugfs_mknod(dir, dentry, mode, 0, data, fops); | ||
20981 | + res = debugfs_mknod(dir, dentry, mode, 0); | ||
20982 | if (!res) | ||
20983 | fsnotify_create(dir, dentry); | ||
20984 | return res; | ||
20985 | @@ -149,9 +139,7 @@ static struct file_system_type debug_fs_type = { | ||
20986 | |||
20987 | static int debugfs_create_by_name(const char *name, mode_t mode, | ||
20988 | struct dentry *parent, | ||
20989 | - struct dentry **dentry, | ||
20990 | - void *data, | ||
20991 | - const struct file_operations *fops) | ||
20992 | + struct dentry **dentry) | ||
20993 | { | ||
20994 | int error = 0; | ||
20995 | |||
20996 | @@ -176,16 +164,13 @@ static int debugfs_create_by_name(const char *name, mode_t mode, | ||
20997 | if (!IS_ERR(*dentry)) { | ||
20998 | switch (mode & S_IFMT) { | ||
20999 | case S_IFDIR: | ||
21000 | - error = debugfs_mkdir(parent->d_inode, *dentry, mode, | ||
21001 | - data, fops); | ||
21002 | + error = debugfs_mkdir(parent->d_inode, *dentry, mode); | ||
21003 | break; | ||
21004 | case S_IFLNK: | ||
21005 | - error = debugfs_link(parent->d_inode, *dentry, mode, | ||
21006 | - data, fops); | ||
21007 | + error = debugfs_link(parent->d_inode, *dentry, mode); | ||
21008 | break; | ||
21009 | default: | ||
21010 | - error = debugfs_create(parent->d_inode, *dentry, mode, | ||
21011 | - data, fops); | ||
21012 | + error = debugfs_create(parent->d_inode, *dentry, mode); | ||
21013 | break; | ||
21014 | } | ||
21015 | dput(*dentry); | ||
21016 | @@ -236,13 +221,19 @@ struct dentry *debugfs_create_file(const char *name, mode_t mode, | ||
21017 | if (error) | ||
21018 | goto exit; | ||
21019 | |||
21020 | - error = debugfs_create_by_name(name, mode, parent, &dentry, | ||
21021 | - data, fops); | ||
21022 | + error = debugfs_create_by_name(name, mode, parent, &dentry); | ||
21023 | if (error) { | ||
21024 | dentry = NULL; | ||
21025 | simple_release_fs(&debugfs_mount, &debugfs_mount_count); | ||
21026 | goto exit; | ||
21027 | } | ||
21028 | + | ||
21029 | + if (dentry->d_inode) { | ||
21030 | + if (data) | ||
21031 | + dentry->d_inode->i_private = data; | ||
21032 | + if (fops) | ||
21033 | + dentry->d_inode->i_fop = fops; | ||
21034 | + } | ||
21035 | exit: | ||
21036 | return dentry; | ||
21037 | } | ||
21038 | diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c | ||
21039 | index 8882ecc..d5f8c96 100644 | ||
21040 | --- a/fs/devpts/inode.c | ||
21041 | +++ b/fs/devpts/inode.c | ||
21042 | @@ -517,23 +517,11 @@ int devpts_pty_new(struct inode *ptmx_inode, struct tty_struct *tty) | ||
21043 | |||
21044 | struct tty_struct *devpts_get_tty(struct inode *pts_inode, int number) | ||
21045 | { | ||
21046 | - struct dentry *dentry; | ||
21047 | - struct tty_struct *tty; | ||
21048 | - | ||
21049 | BUG_ON(pts_inode->i_rdev == MKDEV(TTYAUX_MAJOR, PTMX_MINOR)); | ||
21050 | |||
21051 | - /* Ensure dentry has not been deleted by devpts_pty_kill() */ | ||
21052 | - dentry = d_find_alias(pts_inode); | ||
21053 | - if (!dentry) | ||
21054 | - return NULL; | ||
21055 | - | ||
21056 | - tty = NULL; | ||
21057 | if (pts_inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC) | ||
21058 | - tty = (struct tty_struct *)pts_inode->i_private; | ||
21059 | - | ||
21060 | - dput(dentry); | ||
21061 | - | ||
21062 | - return tty; | ||
21063 | + return (struct tty_struct *)pts_inode->i_private; | ||
21064 | + return NULL; | ||
21065 | } | ||
21066 | |||
21067 | void devpts_pty_kill(struct tty_struct *tty) | ||
21068 | diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c | ||
21069 | index 7cb0a59..fbb6e5e 100644 | ||
21070 | --- a/fs/ecryptfs/crypto.c | ||
21071 | +++ b/fs/ecryptfs/crypto.c | ||
21072 | @@ -1748,7 +1748,7 @@ ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm, | ||
21073 | char *cipher_name, size_t *key_size) | ||
21074 | { | ||
21075 | char dummy_key[ECRYPTFS_MAX_KEY_BYTES]; | ||
21076 | - char *full_alg_name = NULL; | ||
21077 | + char *full_alg_name; | ||
21078 | int rc; | ||
21079 | |||
21080 | *key_tfm = NULL; | ||
21081 | @@ -1763,6 +1763,7 @@ ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm, | ||
21082 | if (rc) | ||
21083 | goto out; | ||
21084 | *key_tfm = crypto_alloc_blkcipher(full_alg_name, 0, CRYPTO_ALG_ASYNC); | ||
21085 | + kfree(full_alg_name); | ||
21086 | if (IS_ERR(*key_tfm)) { | ||
21087 | rc = PTR_ERR(*key_tfm); | ||
21088 | printk(KERN_ERR "Unable to allocate crypto cipher with name " | ||
21089 | @@ -1785,7 +1786,6 @@ ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm, | ||
21090 | goto out; | ||
21091 | } | ||
21092 | out: | ||
21093 | - kfree(full_alg_name); | ||
21094 | return rc; | ||
21095 | } | ||
21096 | |||
21097 | diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c | ||
21098 | index 1744f17..9e94405 100644 | ||
21099 | --- a/fs/ecryptfs/file.c | ||
21100 | +++ b/fs/ecryptfs/file.c | ||
21101 | @@ -191,6 +191,13 @@ static int ecryptfs_open(struct inode *inode, struct file *file) | ||
21102 | | ECRYPTFS_ENCRYPTED); | ||
21103 | } | ||
21104 | mutex_unlock(&crypt_stat->cs_mutex); | ||
21105 | + if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_RDONLY) | ||
21106 | + && !(file->f_flags & O_RDONLY)) { | ||
21107 | + rc = -EPERM; | ||
21108 | + printk(KERN_WARNING "%s: Lower persistent file is RO; eCryptfs " | ||
21109 | + "file must hence be opened RO\n", __func__); | ||
21110 | + goto out; | ||
21111 | + } | ||
21112 | if (!ecryptfs_inode_to_private(inode)->lower_file) { | ||
21113 | rc = ecryptfs_init_persistent_file(ecryptfs_dentry); | ||
21114 | if (rc) { | ||
21115 | @@ -201,13 +208,6 @@ static int ecryptfs_open(struct inode *inode, struct file *file) | ||
21116 | goto out; | ||
21117 | } | ||
21118 | } | ||
21119 | - if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_RDONLY) | ||
21120 | - && !(file->f_flags & O_RDONLY)) { | ||
21121 | - rc = -EPERM; | ||
21122 | - printk(KERN_WARNING "%s: Lower persistent file is RO; eCryptfs " | ||
21123 | - "file must hence be opened RO\n", __func__); | ||
21124 | - goto out; | ||
21125 | - } | ||
21126 | ecryptfs_set_file_lower( | ||
21127 | file, ecryptfs_inode_to_private(inode)->lower_file); | ||
21128 | if (S_ISDIR(ecryptfs_dentry->d_inode->i_mode)) { | ||
21129 | diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c | ||
21130 | index 728f07e..056fed6 100644 | ||
21131 | --- a/fs/ecryptfs/inode.c | ||
21132 | +++ b/fs/ecryptfs/inode.c | ||
21133 | @@ -971,21 +971,6 @@ out: | ||
21134 | return rc; | ||
21135 | } | ||
21136 | |||
21137 | -int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry, | ||
21138 | - struct kstat *stat) | ||
21139 | -{ | ||
21140 | - struct kstat lower_stat; | ||
21141 | - int rc; | ||
21142 | - | ||
21143 | - rc = vfs_getattr(ecryptfs_dentry_to_lower_mnt(dentry), | ||
21144 | - ecryptfs_dentry_to_lower(dentry), &lower_stat); | ||
21145 | - if (!rc) { | ||
21146 | - generic_fillattr(dentry->d_inode, stat); | ||
21147 | - stat->blocks = lower_stat.blocks; | ||
21148 | - } | ||
21149 | - return rc; | ||
21150 | -} | ||
21151 | - | ||
21152 | int | ||
21153 | ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value, | ||
21154 | size_t size, int flags) | ||
21155 | @@ -1115,7 +1100,6 @@ const struct inode_operations ecryptfs_dir_iops = { | ||
21156 | const struct inode_operations ecryptfs_main_iops = { | ||
21157 | .permission = ecryptfs_permission, | ||
21158 | .setattr = ecryptfs_setattr, | ||
21159 | - .getattr = ecryptfs_getattr, | ||
21160 | .setxattr = ecryptfs_setxattr, | ||
21161 | .getxattr = ecryptfs_getxattr, | ||
21162 | .listxattr = ecryptfs_listxattr, | ||
21163 | diff --git a/fs/exec.c b/fs/exec.c | ||
21164 | index da36c20..606cf96 100644 | ||
21165 | --- a/fs/exec.c | ||
21166 | +++ b/fs/exec.c | ||
21167 | @@ -19,7 +19,7 @@ | ||
21168 | * current->executable is only used by the procfs. This allows a dispatch | ||
21169 | * table to check for several different types of binary formats. We keep | ||
21170 | * trying until we recognize the file or we run out of supported binary | ||
21171 | - * formats. | ||
21172 | + * formats. | ||
21173 | */ | ||
21174 | |||
21175 | #include <linux/slab.h> | ||
21176 | @@ -57,6 +57,8 @@ | ||
21177 | #include <linux/fs_struct.h> | ||
21178 | #include <linux/pipe_fs_i.h> | ||
21179 | |||
21180 | +#include <litmus/litmus.h> | ||
21181 | + | ||
21182 | #include <asm/uaccess.h> | ||
21183 | #include <asm/mmu_context.h> | ||
21184 | #include <asm/tlb.h> | ||
21185 | @@ -80,7 +82,7 @@ int __register_binfmt(struct linux_binfmt * fmt, int insert) | ||
21186 | insert ? list_add(&fmt->lh, &formats) : | ||
21187 | list_add_tail(&fmt->lh, &formats); | ||
21188 | write_unlock(&binfmt_lock); | ||
21189 | - return 0; | ||
21190 | + return 0; | ||
21191 | } | ||
21192 | |||
21193 | EXPORT_SYMBOL(__register_binfmt); | ||
21194 | @@ -572,9 +574,6 @@ int setup_arg_pages(struct linux_binprm *bprm, | ||
21195 | struct vm_area_struct *prev = NULL; | ||
21196 | unsigned long vm_flags; | ||
21197 | unsigned long stack_base; | ||
21198 | - unsigned long stack_size; | ||
21199 | - unsigned long stack_expand; | ||
21200 | - unsigned long rlim_stack; | ||
21201 | |||
21202 | #ifdef CONFIG_STACK_GROWSUP | ||
21203 | /* Limit stack size to 1GB */ | ||
21204 | @@ -631,24 +630,10 @@ int setup_arg_pages(struct linux_binprm *bprm, | ||
21205 | goto out_unlock; | ||
21206 | } | ||
21207 | |||
21208 | - stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE; | ||
21209 | - stack_size = vma->vm_end - vma->vm_start; | ||
21210 | - /* | ||
21211 | - * Align this down to a page boundary as expand_stack | ||
21212 | - * will align it up. | ||
21213 | - */ | ||
21214 | - rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK; | ||
21215 | - rlim_stack = min(rlim_stack, stack_size); | ||
21216 | #ifdef CONFIG_STACK_GROWSUP | ||
21217 | - if (stack_size + stack_expand > rlim_stack) | ||
21218 | - stack_base = vma->vm_start + rlim_stack; | ||
21219 | - else | ||
21220 | - stack_base = vma->vm_end + stack_expand; | ||
21221 | + stack_base = vma->vm_end + EXTRA_STACK_VM_PAGES * PAGE_SIZE; | ||
21222 | #else | ||
21223 | - if (stack_size + stack_expand > rlim_stack) | ||
21224 | - stack_base = vma->vm_end - rlim_stack; | ||
21225 | - else | ||
21226 | - stack_base = vma->vm_start - stack_expand; | ||
21227 | + stack_base = vma->vm_start - EXTRA_STACK_VM_PAGES * PAGE_SIZE; | ||
21228 | #endif | ||
21229 | ret = expand_stack(vma, stack_base); | ||
21230 | if (ret) | ||
21231 | @@ -948,7 +933,9 @@ void set_task_comm(struct task_struct *tsk, char *buf) | ||
21232 | |||
21233 | int flush_old_exec(struct linux_binprm * bprm) | ||
21234 | { | ||
21235 | - int retval; | ||
21236 | + char * name; | ||
21237 | + int i, ch, retval; | ||
21238 | + char tcomm[sizeof(current->comm)]; | ||
21239 | |||
21240 | /* | ||
21241 | * Make sure we have a private signal table and that | ||
21242 | @@ -969,25 +956,6 @@ int flush_old_exec(struct linux_binprm * bprm) | ||
21243 | |||
21244 | bprm->mm = NULL; /* We're using it now */ | ||
21245 | |||
21246 | - current->flags &= ~PF_RANDOMIZE; | ||
21247 | - flush_thread(); | ||
21248 | - current->personality &= ~bprm->per_clear; | ||
21249 | - | ||
21250 | - return 0; | ||
21251 | - | ||
21252 | -out: | ||
21253 | - return retval; | ||
21254 | -} | ||
21255 | -EXPORT_SYMBOL(flush_old_exec); | ||
21256 | - | ||
21257 | -void setup_new_exec(struct linux_binprm * bprm) | ||
21258 | -{ | ||
21259 | - int i, ch; | ||
21260 | - char * name; | ||
21261 | - char tcomm[sizeof(current->comm)]; | ||
21262 | - | ||
21263 | - arch_pick_mmap_layout(current->mm); | ||
21264 | - | ||
21265 | /* This is the point of no return */ | ||
21266 | current->sas_ss_sp = current->sas_ss_size = 0; | ||
21267 | |||
21268 | @@ -1009,6 +977,9 @@ void setup_new_exec(struct linux_binprm * bprm) | ||
21269 | tcomm[i] = '\0'; | ||
21270 | set_task_comm(current, tcomm); | ||
21271 | |||
21272 | + current->flags &= ~PF_RANDOMIZE; | ||
21273 | + flush_thread(); | ||
21274 | + | ||
21275 | /* Set the new mm task size. We have to do that late because it may | ||
21276 | * depend on TIF_32BIT which is only updated in flush_thread() on | ||
21277 | * some architectures like powerpc | ||
21278 | @@ -1024,6 +995,8 @@ void setup_new_exec(struct linux_binprm * bprm) | ||
21279 | set_dumpable(current->mm, suid_dumpable); | ||
21280 | } | ||
21281 | |||
21282 | + current->personality &= ~bprm->per_clear; | ||
21283 | + | ||
21284 | /* | ||
21285 | * Flush performance counters when crossing a | ||
21286 | * security domain: | ||
21287 | @@ -1035,11 +1008,17 @@ void setup_new_exec(struct linux_binprm * bprm) | ||
21288 | group */ | ||
21289 | |||
21290 | current->self_exec_id++; | ||
21291 | - | ||
21292 | + | ||
21293 | flush_signal_handlers(current, 0); | ||
21294 | flush_old_files(current->files); | ||
21295 | + | ||
21296 | + return 0; | ||
21297 | + | ||
21298 | +out: | ||
21299 | + return retval; | ||
21300 | } | ||
21301 | -EXPORT_SYMBOL(setup_new_exec); | ||
21302 | + | ||
21303 | +EXPORT_SYMBOL(flush_old_exec); | ||
21304 | |||
21305 | /* | ||
21306 | * Prepare credentials and lock ->cred_guard_mutex. | ||
21307 | @@ -1125,8 +1104,8 @@ int check_unsafe_exec(struct linux_binprm *bprm) | ||
21308 | return res; | ||
21309 | } | ||
21310 | |||
21311 | -/* | ||
21312 | - * Fill the binprm structure from the inode. | ||
21313 | +/* | ||
21314 | + * Fill the binprm structure from the inode. | ||
21315 | * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes | ||
21316 | * | ||
21317 | * This may be called multiple times for binary chains (scripts for example). | ||
21318 | @@ -1341,6 +1320,7 @@ int do_execve(char * filename, | ||
21319 | goto out_unmark; | ||
21320 | |||
21321 | sched_exec(); | ||
21322 | + litmus_exec(); | ||
21323 | |||
21324 | bprm->file = file; | ||
21325 | bprm->filename = filename; | ||
21326 | diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c | ||
21327 | index 6f7df0f..6c10f74 100644 | ||
21328 | --- a/fs/exofs/inode.c | ||
21329 | +++ b/fs/exofs/inode.c | ||
21330 | @@ -731,28 +731,13 @@ static int exofs_write_begin_export(struct file *file, | ||
21331 | fsdata); | ||
21332 | } | ||
21333 | |||
21334 | -static int exofs_write_end(struct file *file, struct address_space *mapping, | ||
21335 | - loff_t pos, unsigned len, unsigned copied, | ||
21336 | - struct page *page, void *fsdata) | ||
21337 | -{ | ||
21338 | - struct inode *inode = mapping->host; | ||
21339 | - /* According to comment in simple_write_end i_mutex is held */ | ||
21340 | - loff_t i_size = inode->i_size; | ||
21341 | - int ret; | ||
21342 | - | ||
21343 | - ret = simple_write_end(file, mapping,pos, len, copied, page, fsdata); | ||
21344 | - if (i_size != inode->i_size) | ||
21345 | - mark_inode_dirty(inode); | ||
21346 | - return ret; | ||
21347 | -} | ||
21348 | - | ||
21349 | const struct address_space_operations exofs_aops = { | ||
21350 | .readpage = exofs_readpage, | ||
21351 | .readpages = exofs_readpages, | ||
21352 | .writepage = exofs_writepage, | ||
21353 | .writepages = exofs_writepages, | ||
21354 | .write_begin = exofs_write_begin_export, | ||
21355 | - .write_end = exofs_write_end, | ||
21356 | + .write_end = simple_write_end, | ||
21357 | }; | ||
21358 | |||
21359 | /****************************************************************************** | ||
21360 | diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c | ||
21361 | index f9d6937..354ed3b 100644 | ||
21362 | --- a/fs/ext3/inode.c | ||
21363 | +++ b/fs/ext3/inode.c | ||
21364 | @@ -1151,16 +1151,6 @@ static int do_journal_get_write_access(handle_t *handle, | ||
21365 | return ext3_journal_get_write_access(handle, bh); | ||
21366 | } | ||
21367 | |||
21368 | -/* | ||
21369 | - * Truncate blocks that were not used by write. We have to truncate the | ||
21370 | - * pagecache as well so that corresponding buffers get properly unmapped. | ||
21371 | - */ | ||
21372 | -static void ext3_truncate_failed_write(struct inode *inode) | ||
21373 | -{ | ||
21374 | - truncate_inode_pages(inode->i_mapping, inode->i_size); | ||
21375 | - ext3_truncate(inode); | ||
21376 | -} | ||
21377 | - | ||
21378 | static int ext3_write_begin(struct file *file, struct address_space *mapping, | ||
21379 | loff_t pos, unsigned len, unsigned flags, | ||
21380 | struct page **pagep, void **fsdata) | ||
21381 | @@ -1219,7 +1209,7 @@ write_begin_failed: | ||
21382 | unlock_page(page); | ||
21383 | page_cache_release(page); | ||
21384 | if (pos + len > inode->i_size) | ||
21385 | - ext3_truncate_failed_write(inode); | ||
21386 | + ext3_truncate(inode); | ||
21387 | } | ||
21388 | if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries)) | ||
21389 | goto retry; | ||
21390 | @@ -1314,7 +1304,7 @@ static int ext3_ordered_write_end(struct file *file, | ||
21391 | page_cache_release(page); | ||
21392 | |||
21393 | if (pos + len > inode->i_size) | ||
21394 | - ext3_truncate_failed_write(inode); | ||
21395 | + ext3_truncate(inode); | ||
21396 | return ret ? ret : copied; | ||
21397 | } | ||
21398 | |||
21399 | @@ -1340,7 +1330,7 @@ static int ext3_writeback_write_end(struct file *file, | ||
21400 | page_cache_release(page); | ||
21401 | |||
21402 | if (pos + len > inode->i_size) | ||
21403 | - ext3_truncate_failed_write(inode); | ||
21404 | + ext3_truncate(inode); | ||
21405 | return ret ? ret : copied; | ||
21406 | } | ||
21407 | |||
21408 | @@ -1393,7 +1383,7 @@ static int ext3_journalled_write_end(struct file *file, | ||
21409 | page_cache_release(page); | ||
21410 | |||
21411 | if (pos + len > inode->i_size) | ||
21412 | - ext3_truncate_failed_write(inode); | ||
21413 | + ext3_truncate(inode); | ||
21414 | return ret ? ret : copied; | ||
21415 | } | ||
21416 | |||
21417 | diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c | ||
21418 | index f3032c9..1d04189 100644 | ||
21419 | --- a/fs/ext4/balloc.c | ||
21420 | +++ b/fs/ext4/balloc.c | ||
21421 | @@ -761,13 +761,7 @@ static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb, | ||
21422 | static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb, | ||
21423 | ext4_group_t group) | ||
21424 | { | ||
21425 | - if (!ext4_bg_has_super(sb, group)) | ||
21426 | - return 0; | ||
21427 | - | ||
21428 | - if (EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG)) | ||
21429 | - return le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg); | ||
21430 | - else | ||
21431 | - return EXT4_SB(sb)->s_gdb_count; | ||
21432 | + return ext4_bg_has_super(sb, group) ? EXT4_SB(sb)->s_gdb_count : 0; | ||
21433 | } | ||
21434 | |||
21435 | /** | ||
21436 | diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c | ||
21437 | index dc79b75..50784ef 100644 | ||
21438 | --- a/fs/ext4/block_validity.c | ||
21439 | +++ b/fs/ext4/block_validity.c | ||
21440 | @@ -160,7 +160,7 @@ int ext4_setup_system_zone(struct super_block *sb) | ||
21441 | if (ext4_bg_has_super(sb, i) && | ||
21442 | ((i < 5) || ((i % flex_size) == 0))) | ||
21443 | add_system_zone(sbi, ext4_group_first_block_no(sb, i), | ||
21444 | - ext4_bg_num_gdb(sb, i) + 1); | ||
21445 | + sbi->s_gdb_count + 1); | ||
21446 | gdp = ext4_get_group_desc(sb, i, NULL); | ||
21447 | ret = add_system_zone(sbi, ext4_block_bitmap(sb, gdp), 1); | ||
21448 | if (ret) | ||
21449 | diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h | ||
21450 | index d0a2afb..8825515 100644 | ||
21451 | --- a/fs/ext4/ext4.h | ||
21452 | +++ b/fs/ext4/ext4.h | ||
21453 | @@ -698,22 +698,11 @@ struct ext4_inode_info { | ||
21454 | __u16 i_extra_isize; | ||
21455 | |||
21456 | spinlock_t i_block_reservation_lock; | ||
21457 | -#ifdef CONFIG_QUOTA | ||
21458 | - /* quota space reservation, managed internally by quota code */ | ||
21459 | - qsize_t i_reserved_quota; | ||
21460 | -#endif | ||
21461 | |||
21462 | /* completed async DIOs that might need unwritten extents handling */ | ||
21463 | struct list_head i_aio_dio_complete_list; | ||
21464 | /* current io_end structure for async DIO write*/ | ||
21465 | ext4_io_end_t *cur_aio_dio; | ||
21466 | - | ||
21467 | - /* | ||
21468 | - * Transactions that contain inode's metadata needed to complete | ||
21469 | - * fsync and fdatasync, respectively. | ||
21470 | - */ | ||
21471 | - tid_t i_sync_tid; | ||
21472 | - tid_t i_datasync_tid; | ||
21473 | }; | ||
21474 | |||
21475 | /* | ||
21476 | @@ -761,7 +750,6 @@ struct ext4_inode_info { | ||
21477 | #define EXT4_MOUNT_DELALLOC 0x8000000 /* Delalloc support */ | ||
21478 | #define EXT4_MOUNT_DATA_ERR_ABORT 0x10000000 /* Abort on file data write */ | ||
21479 | #define EXT4_MOUNT_BLOCK_VALIDITY 0x20000000 /* Block validity checking */ | ||
21480 | -#define EXT4_MOUNT_DISCARD 0x40000000 /* Issue DISCARD requests */ | ||
21481 | |||
21482 | #define clear_opt(o, opt) o &= ~EXT4_MOUNT_##opt | ||
21483 | #define set_opt(o, opt) o |= EXT4_MOUNT_##opt | ||
21484 | @@ -1436,7 +1424,7 @@ extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks); | ||
21485 | extern int ext4_block_truncate_page(handle_t *handle, | ||
21486 | struct address_space *mapping, loff_t from); | ||
21487 | extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); | ||
21488 | -extern qsize_t *ext4_get_reserved_space(struct inode *inode); | ||
21489 | +extern qsize_t ext4_get_reserved_space(struct inode *inode); | ||
21490 | extern int flush_aio_dio_completed_IO(struct inode *inode); | ||
21491 | /* ioctl.c */ | ||
21492 | extern long ext4_ioctl(struct file *, unsigned int, unsigned long); | ||
21493 | diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h | ||
21494 | index 1892a77..a286598 100644 | ||
21495 | --- a/fs/ext4/ext4_jbd2.h | ||
21496 | +++ b/fs/ext4/ext4_jbd2.h | ||
21497 | @@ -49,7 +49,7 @@ | ||
21498 | |||
21499 | #define EXT4_DATA_TRANS_BLOCKS(sb) (EXT4_SINGLEDATA_TRANS_BLOCKS(sb) + \ | ||
21500 | EXT4_XATTR_TRANS_BLOCKS - 2 + \ | ||
21501 | - EXT4_MAXQUOTAS_TRANS_BLOCKS(sb)) | ||
21502 | + 2*EXT4_QUOTA_TRANS_BLOCKS(sb)) | ||
21503 | |||
21504 | /* | ||
21505 | * Define the number of metadata blocks we need to account to modify data. | ||
21506 | @@ -57,7 +57,7 @@ | ||
21507 | * This include super block, inode block, quota blocks and xattr blocks | ||
21508 | */ | ||
21509 | #define EXT4_META_TRANS_BLOCKS(sb) (EXT4_XATTR_TRANS_BLOCKS + \ | ||
21510 | - EXT4_MAXQUOTAS_TRANS_BLOCKS(sb)) | ||
21511 | + 2*EXT4_QUOTA_TRANS_BLOCKS(sb)) | ||
21512 | |||
21513 | /* Delete operations potentially hit one directory's namespace plus an | ||
21514 | * entire inode, plus arbitrary amounts of bitmap/indirection data. Be | ||
21515 | @@ -92,7 +92,6 @@ | ||
21516 | * but inode, sb and group updates are done only once */ | ||
21517 | #define EXT4_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\ | ||
21518 | (EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)+3+DQUOT_INIT_REWRITE) : 0) | ||
21519 | - | ||
21520 | #define EXT4_QUOTA_DEL_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_DEL_ALLOC*\ | ||
21521 | (EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)+3+DQUOT_DEL_REWRITE) : 0) | ||
21522 | #else | ||
21523 | @@ -100,9 +99,6 @@ | ||
21524 | #define EXT4_QUOTA_INIT_BLOCKS(sb) 0 | ||
21525 | #define EXT4_QUOTA_DEL_BLOCKS(sb) 0 | ||
21526 | #endif | ||
21527 | -#define EXT4_MAXQUOTAS_TRANS_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_TRANS_BLOCKS(sb)) | ||
21528 | -#define EXT4_MAXQUOTAS_INIT_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_INIT_BLOCKS(sb)) | ||
21529 | -#define EXT4_MAXQUOTAS_DEL_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_DEL_BLOCKS(sb)) | ||
21530 | |||
21531 | int | ||
21532 | ext4_mark_iloc_dirty(handle_t *handle, | ||
21533 | @@ -258,19 +254,6 @@ static inline int ext4_jbd2_file_inode(handle_t *handle, struct inode *inode) | ||
21534 | return 0; | ||
21535 | } | ||
21536 | |||
21537 | -static inline void ext4_update_inode_fsync_trans(handle_t *handle, | ||
21538 | - struct inode *inode, | ||
21539 | - int datasync) | ||
21540 | -{ | ||
21541 | - struct ext4_inode_info *ei = EXT4_I(inode); | ||
21542 | - | ||
21543 | - if (ext4_handle_valid(handle)) { | ||
21544 | - ei->i_sync_tid = handle->h_transaction->t_tid; | ||
21545 | - if (datasync) | ||
21546 | - ei->i_datasync_tid = handle->h_transaction->t_tid; | ||
21547 | - } | ||
21548 | -} | ||
21549 | - | ||
21550 | /* super.c */ | ||
21551 | int ext4_force_commit(struct super_block *sb); | ||
21552 | |||
21553 | diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c | ||
21554 | index 8b8bae4..715264b 100644 | ||
21555 | --- a/fs/ext4/extents.c | ||
21556 | +++ b/fs/ext4/extents.c | ||
21557 | @@ -1761,9 +1761,7 @@ int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block, | ||
21558 | while (block < last && block != EXT_MAX_BLOCK) { | ||
21559 | num = last - block; | ||
21560 | /* find extent for this block */ | ||
21561 | - down_read(&EXT4_I(inode)->i_data_sem); | ||
21562 | path = ext4_ext_find_extent(inode, block, path); | ||
21563 | - up_read(&EXT4_I(inode)->i_data_sem); | ||
21564 | if (IS_ERR(path)) { | ||
21565 | err = PTR_ERR(path); | ||
21566 | path = NULL; | ||
21567 | @@ -2076,7 +2074,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode, | ||
21568 | ext_debug("free last %u blocks starting %llu\n", num, start); | ||
21569 | for (i = 0; i < num; i++) { | ||
21570 | bh = sb_find_get_block(inode->i_sb, start + i); | ||
21571 | - ext4_forget(handle, metadata, inode, bh, start + i); | ||
21572 | + ext4_forget(handle, 0, inode, bh, start + i); | ||
21573 | } | ||
21574 | ext4_free_blocks(handle, inode, start, num, metadata); | ||
21575 | } else if (from == le32_to_cpu(ex->ee_block) | ||
21576 | @@ -2169,7 +2167,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, | ||
21577 | correct_index = 1; | ||
21578 | credits += (ext_depth(inode)) + 1; | ||
21579 | } | ||
21580 | - credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); | ||
21581 | + credits += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb); | ||
21582 | |||
21583 | err = ext4_ext_truncate_extend_restart(handle, inode, credits); | ||
21584 | if (err) | ||
21585 | @@ -3066,8 +3064,6 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, | ||
21586 | if (flags == EXT4_GET_BLOCKS_DIO_CONVERT_EXT) { | ||
21587 | ret = ext4_convert_unwritten_extents_dio(handle, inode, | ||
21588 | path); | ||
21589 | - if (ret >= 0) | ||
21590 | - ext4_update_inode_fsync_trans(handle, inode, 1); | ||
21591 | goto out2; | ||
21592 | } | ||
21593 | /* buffered IO case */ | ||
21594 | @@ -3095,8 +3091,6 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, | ||
21595 | ret = ext4_ext_convert_to_initialized(handle, inode, | ||
21596 | path, iblock, | ||
21597 | max_blocks); | ||
21598 | - if (ret >= 0) | ||
21599 | - ext4_update_inode_fsync_trans(handle, inode, 1); | ||
21600 | out: | ||
21601 | if (ret <= 0) { | ||
21602 | err = ret; | ||
21603 | @@ -3335,16 +3329,10 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode, | ||
21604 | allocated = ext4_ext_get_actual_len(&newex); | ||
21605 | set_buffer_new(bh_result); | ||
21606 | |||
21607 | - /* | ||
21608 | - * Cache the extent and update transaction to commit on fdatasync only | ||
21609 | - * when it is _not_ an uninitialized extent. | ||
21610 | - */ | ||
21611 | - if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) { | ||
21612 | + /* Cache only when it is _not_ an uninitialized extent */ | ||
21613 | + if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) | ||
21614 | ext4_ext_put_in_cache(inode, iblock, allocated, newblock, | ||
21615 | EXT4_EXT_CACHE_EXTENT); | ||
21616 | - ext4_update_inode_fsync_trans(handle, inode, 1); | ||
21617 | - } else | ||
21618 | - ext4_update_inode_fsync_trans(handle, inode, 0); | ||
21619 | out: | ||
21620 | if (allocated > max_blocks) | ||
21621 | allocated = max_blocks; | ||
21622 | @@ -3732,8 +3720,10 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | ||
21623 | * Walk the extent tree gathering extent information. | ||
21624 | * ext4_ext_fiemap_cb will push extents back to user. | ||
21625 | */ | ||
21626 | + down_read(&EXT4_I(inode)->i_data_sem); | ||
21627 | error = ext4_ext_walk_space(inode, start_blk, len_blks, | ||
21628 | ext4_ext_fiemap_cb, fieinfo); | ||
21629 | + up_read(&EXT4_I(inode)->i_data_sem); | ||
21630 | } | ||
21631 | |||
21632 | return error; | ||
21633 | diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c | ||
21634 | index d6049e4..2b15312 100644 | ||
21635 | --- a/fs/ext4/fsync.c | ||
21636 | +++ b/fs/ext4/fsync.c | ||
21637 | @@ -51,30 +51,25 @@ | ||
21638 | int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync) | ||
21639 | { | ||
21640 | struct inode *inode = dentry->d_inode; | ||
21641 | - struct ext4_inode_info *ei = EXT4_I(inode); | ||
21642 | journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; | ||
21643 | - int ret; | ||
21644 | - tid_t commit_tid; | ||
21645 | + int err, ret = 0; | ||
21646 | |||
21647 | J_ASSERT(ext4_journal_current_handle() == NULL); | ||
21648 | |||
21649 | trace_ext4_sync_file(file, dentry, datasync); | ||
21650 | |||
21651 | - if (inode->i_sb->s_flags & MS_RDONLY) | ||
21652 | - return 0; | ||
21653 | - | ||
21654 | ret = flush_aio_dio_completed_IO(inode); | ||
21655 | if (ret < 0) | ||
21656 | - return ret; | ||
21657 | - | ||
21658 | - if (!journal) | ||
21659 | - return simple_fsync(file, dentry, datasync); | ||
21660 | - | ||
21661 | + goto out; | ||
21662 | /* | ||
21663 | - * data=writeback,ordered: | ||
21664 | + * data=writeback: | ||
21665 | * The caller's filemap_fdatawrite()/wait will sync the data. | ||
21666 | - * Metadata is in the journal, we wait for proper transaction to | ||
21667 | - * commit here. | ||
21668 | + * sync_inode() will sync the metadata | ||
21669 | + * | ||
21670 | + * data=ordered: | ||
21671 | + * The caller's filemap_fdatawrite() will write the data and | ||
21672 | + * sync_inode() will write the inode if it is dirty. Then the caller's | ||
21673 | + * filemap_fdatawait() will wait on the pages. | ||
21674 | * | ||
21675 | * data=journal: | ||
21676 | * filemap_fdatawrite won't do anything (the buffers are clean). | ||
21677 | @@ -84,13 +79,32 @@ int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync) | ||
21678 | * (they were dirtied by commit). But that's OK - the blocks are | ||
21679 | * safe in-journal, which is all fsync() needs to ensure. | ||
21680 | */ | ||
21681 | - if (ext4_should_journal_data(inode)) | ||
21682 | - return ext4_force_commit(inode->i_sb); | ||
21683 | + if (ext4_should_journal_data(inode)) { | ||
21684 | + ret = ext4_force_commit(inode->i_sb); | ||
21685 | + goto out; | ||
21686 | + } | ||
21687 | |||
21688 | - commit_tid = datasync ? ei->i_datasync_tid : ei->i_sync_tid; | ||
21689 | - if (jbd2_log_start_commit(journal, commit_tid)) | ||
21690 | - jbd2_log_wait_commit(journal, commit_tid); | ||
21691 | - else if (journal->j_flags & JBD2_BARRIER) | ||
21692 | + if (!journal) | ||
21693 | + ret = sync_mapping_buffers(inode->i_mapping); | ||
21694 | + | ||
21695 | + if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) | ||
21696 | + goto out; | ||
21697 | + | ||
21698 | + /* | ||
21699 | + * The VFS has written the file data. If the inode is unaltered | ||
21700 | + * then we need not start a commit. | ||
21701 | + */ | ||
21702 | + if (inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC)) { | ||
21703 | + struct writeback_control wbc = { | ||
21704 | + .sync_mode = WB_SYNC_ALL, | ||
21705 | + .nr_to_write = 0, /* sys_fsync did this */ | ||
21706 | + }; | ||
21707 | + err = sync_inode(inode, &wbc); | ||
21708 | + if (ret == 0) | ||
21709 | + ret = err; | ||
21710 | + } | ||
21711 | +out: | ||
21712 | + if (journal && (journal->j_flags & JBD2_BARRIER)) | ||
21713 | blkdev_issue_flush(inode->i_sb->s_bdev, NULL); | ||
21714 | return ret; | ||
21715 | } | ||
21716 | diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c | ||
21717 | index e233879..2c8caa5 100644 | ||
21718 | --- a/fs/ext4/inode.c | ||
21719 | +++ b/fs/ext4/inode.c | ||
21720 | @@ -1021,12 +1021,10 @@ static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode, | ||
21721 | if (!err) | ||
21722 | err = ext4_splice_branch(handle, inode, iblock, | ||
21723 | partial, indirect_blks, count); | ||
21724 | - if (err) | ||
21725 | + else | ||
21726 | goto cleanup; | ||
21727 | |||
21728 | set_buffer_new(bh_result); | ||
21729 | - | ||
21730 | - ext4_update_inode_fsync_trans(handle, inode, 1); | ||
21731 | got_it: | ||
21732 | map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key)); | ||
21733 | if (count > blocks_to_boundary) | ||
21734 | @@ -1045,12 +1043,17 @@ out: | ||
21735 | return err; | ||
21736 | } | ||
21737 | |||
21738 | -#ifdef CONFIG_QUOTA | ||
21739 | -qsize_t *ext4_get_reserved_space(struct inode *inode) | ||
21740 | +qsize_t ext4_get_reserved_space(struct inode *inode) | ||
21741 | { | ||
21742 | - return &EXT4_I(inode)->i_reserved_quota; | ||
21743 | + unsigned long long total; | ||
21744 | + | ||
21745 | + spin_lock(&EXT4_I(inode)->i_block_reservation_lock); | ||
21746 | + total = EXT4_I(inode)->i_reserved_data_blocks + | ||
21747 | + EXT4_I(inode)->i_reserved_meta_blocks; | ||
21748 | + spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); | ||
21749 | + | ||
21750 | + return total; | ||
21751 | } | ||
21752 | -#endif | ||
21753 | /* | ||
21754 | * Calculate the number of metadata blocks need to reserve | ||
21755 | * to allocate @blocks for non extent file based file | ||
21756 | @@ -1531,16 +1534,6 @@ static int do_journal_get_write_access(handle_t *handle, | ||
21757 | return ext4_journal_get_write_access(handle, bh); | ||
21758 | } | ||
21759 | |||
21760 | -/* | ||
21761 | - * Truncate blocks that were not used by write. We have to truncate the | ||
21762 | - * pagecache as well so that corresponding buffers get properly unmapped. | ||
21763 | - */ | ||
21764 | -static void ext4_truncate_failed_write(struct inode *inode) | ||
21765 | -{ | ||
21766 | - truncate_inode_pages(inode->i_mapping, inode->i_size); | ||
21767 | - ext4_truncate(inode); | ||
21768 | -} | ||
21769 | - | ||
21770 | static int ext4_write_begin(struct file *file, struct address_space *mapping, | ||
21771 | loff_t pos, unsigned len, unsigned flags, | ||
21772 | struct page **pagep, void **fsdata) | ||
21773 | @@ -1606,7 +1599,7 @@ retry: | ||
21774 | |||
21775 | ext4_journal_stop(handle); | ||
21776 | if (pos + len > inode->i_size) { | ||
21777 | - ext4_truncate_failed_write(inode); | ||
21778 | + ext4_truncate(inode); | ||
21779 | /* | ||
21780 | * If truncate failed early the inode might | ||
21781 | * still be on the orphan list; we need to | ||
21782 | @@ -1716,7 +1709,7 @@ static int ext4_ordered_write_end(struct file *file, | ||
21783 | ret = ret2; | ||
21784 | |||
21785 | if (pos + len > inode->i_size) { | ||
21786 | - ext4_truncate_failed_write(inode); | ||
21787 | + ext4_truncate(inode); | ||
21788 | /* | ||
21789 | * If truncate failed early the inode might still be | ||
21790 | * on the orphan list; we need to make sure the inode | ||
21791 | @@ -1758,7 +1751,7 @@ static int ext4_writeback_write_end(struct file *file, | ||
21792 | ret = ret2; | ||
21793 | |||
21794 | if (pos + len > inode->i_size) { | ||
21795 | - ext4_truncate_failed_write(inode); | ||
21796 | + ext4_truncate(inode); | ||
21797 | /* | ||
21798 | * If truncate failed early the inode might still be | ||
21799 | * on the orphan list; we need to make sure the inode | ||
21800 | @@ -1821,7 +1814,7 @@ static int ext4_journalled_write_end(struct file *file, | ||
21801 | if (!ret) | ||
21802 | ret = ret2; | ||
21803 | if (pos + len > inode->i_size) { | ||
21804 | - ext4_truncate_failed_write(inode); | ||
21805 | + ext4_truncate(inode); | ||
21806 | /* | ||
21807 | * If truncate failed early the inode might still be | ||
21808 | * on the orphan list; we need to make sure the inode | ||
21809 | @@ -1853,17 +1846,19 @@ repeat: | ||
21810 | |||
21811 | md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks; | ||
21812 | total = md_needed + nrblocks; | ||
21813 | - spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); | ||
21814 | |||
21815 | /* | ||
21816 | * Make quota reservation here to prevent quota overflow | ||
21817 | * later. Real quota accounting is done at pages writeout | ||
21818 | * time. | ||
21819 | */ | ||
21820 | - if (vfs_dq_reserve_block(inode, total)) | ||
21821 | + if (vfs_dq_reserve_block(inode, total)) { | ||
21822 | + spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); | ||
21823 | return -EDQUOT; | ||
21824 | + } | ||
21825 | |||
21826 | if (ext4_claim_free_blocks(sbi, total)) { | ||
21827 | + spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); | ||
21828 | vfs_dq_release_reservation_block(inode, total); | ||
21829 | if (ext4_should_retry_alloc(inode->i_sb, &retries)) { | ||
21830 | yield(); | ||
21831 | @@ -1871,11 +1866,10 @@ repeat: | ||
21832 | } | ||
21833 | return -ENOSPC; | ||
21834 | } | ||
21835 | - spin_lock(&EXT4_I(inode)->i_block_reservation_lock); | ||
21836 | EXT4_I(inode)->i_reserved_data_blocks += nrblocks; | ||
21837 | - EXT4_I(inode)->i_reserved_meta_blocks += md_needed; | ||
21838 | - spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); | ||
21839 | + EXT4_I(inode)->i_reserved_meta_blocks = mdblocks; | ||
21840 | |||
21841 | + spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); | ||
21842 | return 0; /* success */ | ||
21843 | } | ||
21844 | |||
21845 | @@ -2794,7 +2788,7 @@ static int ext4_da_writepages_trans_blocks(struct inode *inode) | ||
21846 | * number of contiguous block. So we will limit | ||
21847 | * number of contiguous block to a sane value | ||
21848 | */ | ||
21849 | - if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) && | ||
21850 | + if (!(inode->i_flags & EXT4_EXTENTS_FL) && | ||
21851 | (max_blocks > EXT4_MAX_TRANS_DATA)) | ||
21852 | max_blocks = EXT4_MAX_TRANS_DATA; | ||
21853 | |||
21854 | @@ -3097,7 +3091,7 @@ retry: | ||
21855 | * i_size_read because we hold i_mutex. | ||
21856 | */ | ||
21857 | if (pos + len > inode->i_size) | ||
21858 | - ext4_truncate_failed_write(inode); | ||
21859 | + ext4_truncate(inode); | ||
21860 | } | ||
21861 | |||
21862 | if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) | ||
21863 | @@ -4126,8 +4120,6 @@ static void ext4_clear_blocks(handle_t *handle, struct inode *inode, | ||
21864 | __le32 *last) | ||
21865 | { | ||
21866 | __le32 *p; | ||
21867 | - int is_metadata = S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode); | ||
21868 | - | ||
21869 | if (try_to_extend_transaction(handle, inode)) { | ||
21870 | if (bh) { | ||
21871 | BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); | ||
21872 | @@ -4158,11 +4150,11 @@ static void ext4_clear_blocks(handle_t *handle, struct inode *inode, | ||
21873 | |||
21874 | *p = 0; | ||
21875 | tbh = sb_find_get_block(inode->i_sb, nr); | ||
21876 | - ext4_forget(handle, is_metadata, inode, tbh, nr); | ||
21877 | + ext4_forget(handle, 0, inode, tbh, nr); | ||
21878 | } | ||
21879 | } | ||
21880 | |||
21881 | - ext4_free_blocks(handle, inode, block_to_free, count, is_metadata); | ||
21882 | + ext4_free_blocks(handle, inode, block_to_free, count, 0); | ||
21883 | } | ||
21884 | |||
21885 | /** | ||
21886 | @@ -4789,8 +4781,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) | ||
21887 | struct ext4_iloc iloc; | ||
21888 | struct ext4_inode *raw_inode; | ||
21889 | struct ext4_inode_info *ei; | ||
21890 | + struct buffer_head *bh; | ||
21891 | struct inode *inode; | ||
21892 | - journal_t *journal = EXT4_SB(sb)->s_journal; | ||
21893 | long ret; | ||
21894 | int block; | ||
21895 | |||
21896 | @@ -4801,11 +4793,11 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) | ||
21897 | return inode; | ||
21898 | |||
21899 | ei = EXT4_I(inode); | ||
21900 | - iloc.bh = 0; | ||
21901 | |||
21902 | ret = __ext4_get_inode_loc(inode, &iloc, 0); | ||
21903 | if (ret < 0) | ||
21904 | goto bad_inode; | ||
21905 | + bh = iloc.bh; | ||
21906 | raw_inode = ext4_raw_inode(&iloc); | ||
21907 | inode->i_mode = le16_to_cpu(raw_inode->i_mode); | ||
21908 | inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); | ||
21909 | @@ -4828,6 +4820,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) | ||
21910 | if (inode->i_mode == 0 || | ||
21911 | !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) { | ||
21912 | /* this inode is deleted */ | ||
21913 | + brelse(bh); | ||
21914 | ret = -ESTALE; | ||
21915 | goto bad_inode; | ||
21916 | } | ||
21917 | @@ -4844,9 +4837,6 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) | ||
21918 | ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; | ||
21919 | inode->i_size = ext4_isize(raw_inode); | ||
21920 | ei->i_disksize = inode->i_size; | ||
21921 | -#ifdef CONFIG_QUOTA | ||
21922 | - ei->i_reserved_quota = 0; | ||
21923 | -#endif | ||
21924 | inode->i_generation = le32_to_cpu(raw_inode->i_generation); | ||
21925 | ei->i_block_group = iloc.block_group; | ||
21926 | ei->i_last_alloc_group = ~0; | ||
21927 | @@ -4858,35 +4848,11 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) | ||
21928 | ei->i_data[block] = raw_inode->i_block[block]; | ||
21929 | INIT_LIST_HEAD(&ei->i_orphan); | ||
21930 | |||
21931 | - /* | ||
21932 | - * Set transaction id's of transactions that have to be committed | ||
21933 | - * to finish f[data]sync. We set them to currently running transaction | ||
21934 | - * as we cannot be sure that the inode or some of its metadata isn't | ||
21935 | - * part of the transaction - the inode could have been reclaimed and | ||
21936 | - * now it is reread from disk. | ||
21937 | - */ | ||
21938 | - if (journal) { | ||
21939 | - transaction_t *transaction; | ||
21940 | - tid_t tid; | ||
21941 | - | ||
21942 | - spin_lock(&journal->j_state_lock); | ||
21943 | - if (journal->j_running_transaction) | ||
21944 | - transaction = journal->j_running_transaction; | ||
21945 | - else | ||
21946 | - transaction = journal->j_committing_transaction; | ||
21947 | - if (transaction) | ||
21948 | - tid = transaction->t_tid; | ||
21949 | - else | ||
21950 | - tid = journal->j_commit_sequence; | ||
21951 | - spin_unlock(&journal->j_state_lock); | ||
21952 | - ei->i_sync_tid = tid; | ||
21953 | - ei->i_datasync_tid = tid; | ||
21954 | - } | ||
21955 | - | ||
21956 | if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { | ||
21957 | ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize); | ||
21958 | if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > | ||
21959 | EXT4_INODE_SIZE(inode->i_sb)) { | ||
21960 | + brelse(bh); | ||
21961 | ret = -EIO; | ||
21962 | goto bad_inode; | ||
21963 | } | ||
21964 | @@ -4918,7 +4884,10 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) | ||
21965 | |||
21966 | ret = 0; | ||
21967 | if (ei->i_file_acl && | ||
21968 | - !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { | ||
21969 | + ((ei->i_file_acl < | ||
21970 | + (le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block) + | ||
21971 | + EXT4_SB(sb)->s_gdb_count)) || | ||
21972 | + (ei->i_file_acl >= ext4_blocks_count(EXT4_SB(sb)->s_es)))) { | ||
21973 | ext4_error(sb, __func__, | ||
21974 | "bad extended attribute block %llu in inode #%lu", | ||
21975 | ei->i_file_acl, inode->i_ino); | ||
21976 | @@ -4936,8 +4905,10 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) | ||
21977 | /* Validate block references which are part of inode */ | ||
21978 | ret = ext4_check_inode_blockref(inode); | ||
21979 | } | ||
21980 | - if (ret) | ||
21981 | + if (ret) { | ||
21982 | + brelse(bh); | ||
21983 | goto bad_inode; | ||
21984 | + } | ||
21985 | |||
21986 | if (S_ISREG(inode->i_mode)) { | ||
21987 | inode->i_op = &ext4_file_inode_operations; | ||
21988 | @@ -4965,6 +4936,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) | ||
21989 | init_special_inode(inode, inode->i_mode, | ||
21990 | new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); | ||
21991 | } else { | ||
21992 | + brelse(bh); | ||
21993 | ret = -EIO; | ||
21994 | ext4_error(inode->i_sb, __func__, | ||
21995 | "bogus i_mode (%o) for inode=%lu", | ||
21996 | @@ -4977,7 +4949,6 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) | ||
21997 | return inode; | ||
21998 | |||
21999 | bad_inode: | ||
22000 | - brelse(iloc.bh); | ||
22001 | iget_failed(inode); | ||
22002 | return ERR_PTR(ret); | ||
22003 | } | ||
22004 | @@ -5137,7 +5108,6 @@ static int ext4_do_update_inode(handle_t *handle, | ||
22005 | err = rc; | ||
22006 | ei->i_state &= ~EXT4_STATE_NEW; | ||
22007 | |||
22008 | - ext4_update_inode_fsync_trans(handle, inode, 0); | ||
22009 | out_brelse: | ||
22010 | brelse(bh); | ||
22011 | ext4_std_error(inode->i_sb, err); | ||
22012 | @@ -5257,8 +5227,8 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) | ||
22013 | |||
22014 | /* (user+group)*(old+new) structure, inode write (sb, | ||
22015 | * inode block, ? - but truncate inode update has it) */ | ||
22016 | - handle = ext4_journal_start(inode, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+ | ||
22017 | - EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb))+3); | ||
22018 | + handle = ext4_journal_start(inode, 2*(EXT4_QUOTA_INIT_BLOCKS(inode->i_sb)+ | ||
22019 | + EXT4_QUOTA_DEL_BLOCKS(inode->i_sb))+3); | ||
22020 | if (IS_ERR(handle)) { | ||
22021 | error = PTR_ERR(handle); | ||
22022 | goto err_out; | ||
22023 | diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c | ||
22024 | index b63d193..c1cdf61 100644 | ||
22025 | --- a/fs/ext4/ioctl.c | ||
22026 | +++ b/fs/ext4/ioctl.c | ||
22027 | @@ -221,38 +221,31 @@ setversion_out: | ||
22028 | struct file *donor_filp; | ||
22029 | int err; | ||
22030 | |||
22031 | - if (!(filp->f_mode & FMODE_READ) || | ||
22032 | - !(filp->f_mode & FMODE_WRITE)) | ||
22033 | - return -EBADF; | ||
22034 | - | ||
22035 | if (copy_from_user(&me, | ||
22036 | (struct move_extent __user *)arg, sizeof(me))) | ||
22037 | return -EFAULT; | ||
22038 | - me.moved_len = 0; | ||
22039 | |||
22040 | donor_filp = fget(me.donor_fd); | ||
22041 | if (!donor_filp) | ||
22042 | return -EBADF; | ||
22043 | |||
22044 | - if (!(donor_filp->f_mode & FMODE_WRITE)) { | ||
22045 | - err = -EBADF; | ||
22046 | - goto mext_out; | ||
22047 | + if (!capable(CAP_DAC_OVERRIDE)) { | ||
22048 | + if ((current->real_cred->fsuid != inode->i_uid) || | ||
22049 | + !(inode->i_mode & S_IRUSR) || | ||
22050 | + !(donor_filp->f_dentry->d_inode->i_mode & | ||
22051 | + S_IRUSR)) { | ||
22052 | + fput(donor_filp); | ||
22053 | + return -EACCES; | ||
22054 | + } | ||
22055 | } | ||
22056 | |||
22057 | - err = mnt_want_write(filp->f_path.mnt); | ||
22058 | - if (err) | ||
22059 | - goto mext_out; | ||
22060 | - | ||
22061 | err = ext4_move_extents(filp, donor_filp, me.orig_start, | ||
22062 | me.donor_start, me.len, &me.moved_len); | ||
22063 | - mnt_drop_write(filp->f_path.mnt); | ||
22064 | - if (me.moved_len > 0) | ||
22065 | - file_remove_suid(donor_filp); | ||
22066 | + fput(donor_filp); | ||
22067 | |||
22068 | if (copy_to_user((struct move_extent *)arg, &me, sizeof(me))) | ||
22069 | - err = -EFAULT; | ||
22070 | -mext_out: | ||
22071 | - fput(donor_filp); | ||
22072 | + return -EFAULT; | ||
22073 | + | ||
22074 | return err; | ||
22075 | } | ||
22076 | |||
22077 | diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c | ||
22078 | index 7d71148..bba1282 100644 | ||
22079 | --- a/fs/ext4/mballoc.c | ||
22080 | +++ b/fs/ext4/mballoc.c | ||
22081 | @@ -2529,6 +2529,7 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn) | ||
22082 | struct ext4_group_info *db; | ||
22083 | int err, count = 0, count2 = 0; | ||
22084 | struct ext4_free_data *entry; | ||
22085 | + ext4_fsblk_t discard_block; | ||
22086 | struct list_head *l, *ltmp; | ||
22087 | |||
22088 | list_for_each_safe(l, ltmp, &txn->t_private_list) { | ||
22089 | @@ -2558,19 +2559,13 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn) | ||
22090 | page_cache_release(e4b.bd_bitmap_page); | ||
22091 | } | ||
22092 | ext4_unlock_group(sb, entry->group); | ||
22093 | - if (test_opt(sb, DISCARD)) { | ||
22094 | - ext4_fsblk_t discard_block; | ||
22095 | - struct ext4_super_block *es = EXT4_SB(sb)->s_es; | ||
22096 | - | ||
22097 | - discard_block = (ext4_fsblk_t)entry->group * | ||
22098 | - EXT4_BLOCKS_PER_GROUP(sb) | ||
22099 | - + entry->start_blk | ||
22100 | - + le32_to_cpu(es->s_first_data_block); | ||
22101 | - trace_ext4_discard_blocks(sb, | ||
22102 | - (unsigned long long)discard_block, | ||
22103 | - entry->count); | ||
22104 | - sb_issue_discard(sb, discard_block, entry->count); | ||
22105 | - } | ||
22106 | + discard_block = (ext4_fsblk_t) entry->group * EXT4_BLOCKS_PER_GROUP(sb) | ||
22107 | + + entry->start_blk | ||
22108 | + + le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); | ||
22109 | + trace_ext4_discard_blocks(sb, (unsigned long long)discard_block, | ||
22110 | + entry->count); | ||
22111 | + sb_issue_discard(sb, discard_block, entry->count); | ||
22112 | + | ||
22113 | kmem_cache_free(ext4_free_ext_cachep, entry); | ||
22114 | ext4_mb_release_desc(&e4b); | ||
22115 | } | ||
22116 | @@ -3011,24 +3006,6 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) | ||
22117 | } | ||
22118 | |||
22119 | /* | ||
22120 | - * Called on failure; free up any blocks from the inode PA for this | ||
22121 | - * context. We don't need this for MB_GROUP_PA because we only change | ||
22122 | - * pa_free in ext4_mb_release_context(), but on failure, we've already | ||
22123 | - * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed. | ||
22124 | - */ | ||
22125 | -static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac) | ||
22126 | -{ | ||
22127 | - struct ext4_prealloc_space *pa = ac->ac_pa; | ||
22128 | - int len; | ||
22129 | - | ||
22130 | - if (pa && pa->pa_type == MB_INODE_PA) { | ||
22131 | - len = ac->ac_b_ex.fe_len; | ||
22132 | - pa->pa_free += len; | ||
22133 | - } | ||
22134 | - | ||
22135 | -} | ||
22136 | - | ||
22137 | -/* | ||
22138 | * use blocks preallocated to inode | ||
22139 | */ | ||
22140 | static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac, | ||
22141 | @@ -4313,7 +4290,6 @@ repeat: | ||
22142 | ac->ac_status = AC_STATUS_CONTINUE; | ||
22143 | goto repeat; | ||
22144 | } else if (*errp) { | ||
22145 | - ext4_discard_allocated_blocks(ac); | ||
22146 | ac->ac_b_ex.fe_len = 0; | ||
22147 | ar->len = 0; | ||
22148 | ext4_mb_show_ac(ac); | ||
22149 | diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c | ||
22150 | index 8646149..a93d5b8 100644 | ||
22151 | --- a/fs/ext4/migrate.c | ||
22152 | +++ b/fs/ext4/migrate.c | ||
22153 | @@ -238,7 +238,7 @@ static int extend_credit_for_blkdel(handle_t *handle, struct inode *inode) | ||
22154 | * So allocate a credit of 3. We may update | ||
22155 | * quota (user and group). | ||
22156 | */ | ||
22157 | - needed = 3 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); | ||
22158 | + needed = 3 + 2*EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb); | ||
22159 | |||
22160 | if (ext4_journal_extend(handle, needed) != 0) | ||
22161 | retval = ext4_journal_restart(handle, needed); | ||
22162 | @@ -477,7 +477,7 @@ int ext4_ext_migrate(struct inode *inode) | ||
22163 | handle = ext4_journal_start(inode, | ||
22164 | EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + | ||
22165 | EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + | ||
22166 | - EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) | ||
22167 | + 2 * EXT4_QUOTA_INIT_BLOCKS(inode->i_sb) | ||
22168 | + 1); | ||
22169 | if (IS_ERR(handle)) { | ||
22170 | retval = PTR_ERR(handle); | ||
22171 | diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c | ||
22172 | index f5b03a1..25b6b14 100644 | ||
22173 | --- a/fs/ext4/move_extent.c | ||
22174 | +++ b/fs/ext4/move_extent.c | ||
22175 | @@ -77,14 +77,12 @@ static int | ||
22176 | mext_next_extent(struct inode *inode, struct ext4_ext_path *path, | ||
22177 | struct ext4_extent **extent) | ||
22178 | { | ||
22179 | - struct ext4_extent_header *eh; | ||
22180 | int ppos, leaf_ppos = path->p_depth; | ||
22181 | |||
22182 | ppos = leaf_ppos; | ||
22183 | if (EXT_LAST_EXTENT(path[ppos].p_hdr) > path[ppos].p_ext) { | ||
22184 | /* leaf block */ | ||
22185 | *extent = ++path[ppos].p_ext; | ||
22186 | - path[ppos].p_block = ext_pblock(path[ppos].p_ext); | ||
22187 | return 0; | ||
22188 | } | ||
22189 | |||
22190 | @@ -121,18 +119,9 @@ mext_next_extent(struct inode *inode, struct ext4_ext_path *path, | ||
22191 | ext_block_hdr(path[cur_ppos+1].p_bh); | ||
22192 | } | ||
22193 | |||
22194 | - path[leaf_ppos].p_ext = *extent = NULL; | ||
22195 | - | ||
22196 | - eh = path[leaf_ppos].p_hdr; | ||
22197 | - if (le16_to_cpu(eh->eh_entries) == 0) | ||
22198 | - /* empty leaf is found */ | ||
22199 | - return -ENODATA; | ||
22200 | - | ||
22201 | /* leaf block */ | ||
22202 | path[leaf_ppos].p_ext = *extent = | ||
22203 | EXT_FIRST_EXTENT(path[leaf_ppos].p_hdr); | ||
22204 | - path[leaf_ppos].p_block = | ||
22205 | - ext_pblock(path[leaf_ppos].p_ext); | ||
22206 | return 0; | ||
22207 | } | ||
22208 | } | ||
22209 | @@ -166,15 +155,40 @@ mext_check_null_inode(struct inode *inode1, struct inode *inode2, | ||
22210 | } | ||
22211 | |||
22212 | /** | ||
22213 | - * double_down_write_data_sem - Acquire two inodes' write lock of i_data_sem | ||
22214 | + * mext_double_down_read - Acquire two inodes' read semaphore | ||
22215 | + * | ||
22216 | + * @orig_inode: original inode structure | ||
22217 | + * @donor_inode: donor inode structure | ||
22218 | + * Acquire read semaphore of the two inodes (orig and donor) by i_ino order. | ||
22219 | + */ | ||
22220 | +static void | ||
22221 | +mext_double_down_read(struct inode *orig_inode, struct inode *donor_inode) | ||
22222 | +{ | ||
22223 | + struct inode *first = orig_inode, *second = donor_inode; | ||
22224 | + | ||
22225 | + /* | ||
22226 | + * Use the inode number to provide the stable locking order instead | ||
22227 | + * of its address, because the C language doesn't guarantee you can | ||
22228 | + * compare pointers that don't come from the same array. | ||
22229 | + */ | ||
22230 | + if (donor_inode->i_ino < orig_inode->i_ino) { | ||
22231 | + first = donor_inode; | ||
22232 | + second = orig_inode; | ||
22233 | + } | ||
22234 | + | ||
22235 | + down_read(&EXT4_I(first)->i_data_sem); | ||
22236 | + down_read(&EXT4_I(second)->i_data_sem); | ||
22237 | +} | ||
22238 | + | ||
22239 | +/** | ||
22240 | + * mext_double_down_write - Acquire two inodes' write semaphore | ||
22241 | * | ||
22242 | * @orig_inode: original inode structure | ||
22243 | * @donor_inode: donor inode structure | ||
22244 | - * Acquire write lock of i_data_sem of the two inodes (orig and donor) by | ||
22245 | - * i_ino order. | ||
22246 | + * Acquire write semaphore of the two inodes (orig and donor) by i_ino order. | ||
22247 | */ | ||
22248 | static void | ||
22249 | -double_down_write_data_sem(struct inode *orig_inode, struct inode *donor_inode) | ||
22250 | +mext_double_down_write(struct inode *orig_inode, struct inode *donor_inode) | ||
22251 | { | ||
22252 | struct inode *first = orig_inode, *second = donor_inode; | ||
22253 | |||
22254 | @@ -189,18 +203,32 @@ double_down_write_data_sem(struct inode *orig_inode, struct inode *donor_inode) | ||
22255 | } | ||
22256 | |||
22257 | down_write(&EXT4_I(first)->i_data_sem); | ||
22258 | - down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING); | ||
22259 | + down_write(&EXT4_I(second)->i_data_sem); | ||
22260 | } | ||
22261 | |||
22262 | /** | ||
22263 | - * double_up_write_data_sem - Release two inodes' write lock of i_data_sem | ||
22264 | + * mext_double_up_read - Release two inodes' read semaphore | ||
22265 | * | ||
22266 | * @orig_inode: original inode structure to be released its lock first | ||
22267 | * @donor_inode: donor inode structure to be released its lock second | ||
22268 | - * Release write lock of i_data_sem of two inodes (orig and donor). | ||
22269 | + * Release read semaphore of two inodes (orig and donor). | ||
22270 | */ | ||
22271 | static void | ||
22272 | -double_up_write_data_sem(struct inode *orig_inode, struct inode *donor_inode) | ||
22273 | +mext_double_up_read(struct inode *orig_inode, struct inode *donor_inode) | ||
22274 | +{ | ||
22275 | + up_read(&EXT4_I(orig_inode)->i_data_sem); | ||
22276 | + up_read(&EXT4_I(donor_inode)->i_data_sem); | ||
22277 | +} | ||
22278 | + | ||
22279 | +/** | ||
22280 | + * mext_double_up_write - Release two inodes' write semaphore | ||
22281 | + * | ||
22282 | + * @orig_inode: original inode structure to be released its lock first | ||
22283 | + * @donor_inode: donor inode structure to be released its lock second | ||
22284 | + * Release write semaphore of two inodes (orig and donor). | ||
22285 | + */ | ||
22286 | +static void | ||
22287 | +mext_double_up_write(struct inode *orig_inode, struct inode *donor_inode) | ||
22288 | { | ||
22289 | up_write(&EXT4_I(orig_inode)->i_data_sem); | ||
22290 | up_write(&EXT4_I(donor_inode)->i_data_sem); | ||
22291 | @@ -633,7 +661,6 @@ mext_calc_swap_extents(struct ext4_extent *tmp_dext, | ||
22292 | * @donor_inode: donor inode | ||
22293 | * @from: block offset of orig_inode | ||
22294 | * @count: block count to be replaced | ||
22295 | - * @err: pointer to save return value | ||
22296 | * | ||
22297 | * Replace original inode extents and donor inode extents page by page. | ||
22298 | * We implement this replacement in the following three steps: | ||
22299 | @@ -644,33 +671,33 @@ mext_calc_swap_extents(struct ext4_extent *tmp_dext, | ||
22300 | * 3. Change the block information of donor inode to point at the saved | ||
22301 | * original inode blocks in the dummy extents. | ||
22302 | * | ||
22303 | - * Return replaced block count. | ||
22304 | + * Return 0 on success, or a negative error value on failure. | ||
22305 | */ | ||
22306 | static int | ||
22307 | mext_replace_branches(handle_t *handle, struct inode *orig_inode, | ||
22308 | struct inode *donor_inode, ext4_lblk_t from, | ||
22309 | - ext4_lblk_t count, int *err) | ||
22310 | + ext4_lblk_t count) | ||
22311 | { | ||
22312 | struct ext4_ext_path *orig_path = NULL; | ||
22313 | struct ext4_ext_path *donor_path = NULL; | ||
22314 | struct ext4_extent *oext, *dext; | ||
22315 | struct ext4_extent tmp_dext, tmp_oext; | ||
22316 | ext4_lblk_t orig_off = from, donor_off = from; | ||
22317 | + int err = 0; | ||
22318 | int depth; | ||
22319 | int replaced_count = 0; | ||
22320 | int dext_alen; | ||
22321 | |||
22322 | - /* Protect extent trees against block allocations via delalloc */ | ||
22323 | - double_down_write_data_sem(orig_inode, donor_inode); | ||
22324 | + mext_double_down_write(orig_inode, donor_inode); | ||
22325 | |||
22326 | /* Get the original extent for the block "orig_off" */ | ||
22327 | - *err = get_ext_path(orig_inode, orig_off, &orig_path); | ||
22328 | - if (*err) | ||
22329 | + err = get_ext_path(orig_inode, orig_off, &orig_path); | ||
22330 | + if (err) | ||
22331 | goto out; | ||
22332 | |||
22333 | /* Get the donor extent for the head */ | ||
22334 | - *err = get_ext_path(donor_inode, donor_off, &donor_path); | ||
22335 | - if (*err) | ||
22336 | + err = get_ext_path(donor_inode, donor_off, &donor_path); | ||
22337 | + if (err) | ||
22338 | goto out; | ||
22339 | depth = ext_depth(orig_inode); | ||
22340 | oext = orig_path[depth].p_ext; | ||
22341 | @@ -680,9 +707,9 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode, | ||
22342 | dext = donor_path[depth].p_ext; | ||
22343 | tmp_dext = *dext; | ||
22344 | |||
22345 | - *err = mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off, | ||
22346 | + err = mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off, | ||
22347 | donor_off, count); | ||
22348 | - if (*err) | ||
22349 | + if (err) | ||
22350 | goto out; | ||
22351 | |||
22352 | /* Loop for the donor extents */ | ||
22353 | @@ -691,7 +718,7 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode, | ||
22354 | if (!dext) { | ||
22355 | ext4_error(donor_inode->i_sb, __func__, | ||
22356 | "The extent for donor must be found"); | ||
22357 | - *err = -EIO; | ||
22358 | + err = -EIO; | ||
22359 | goto out; | ||
22360 | } else if (donor_off != le32_to_cpu(tmp_dext.ee_block)) { | ||
22361 | ext4_error(donor_inode->i_sb, __func__, | ||
22362 | @@ -699,20 +726,20 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode, | ||
22363 | "extent(%u) should be equal", | ||
22364 | donor_off, | ||
22365 | le32_to_cpu(tmp_dext.ee_block)); | ||
22366 | - *err = -EIO; | ||
22367 | + err = -EIO; | ||
22368 | goto out; | ||
22369 | } | ||
22370 | |||
22371 | /* Set donor extent to orig extent */ | ||
22372 | - *err = mext_leaf_block(handle, orig_inode, | ||
22373 | + err = mext_leaf_block(handle, orig_inode, | ||
22374 | orig_path, &tmp_dext, &orig_off); | ||
22375 | - if (*err) | ||
22376 | + if (err < 0) | ||
22377 | goto out; | ||
22378 | |||
22379 | /* Set orig extent to donor extent */ | ||
22380 | - *err = mext_leaf_block(handle, donor_inode, | ||
22381 | + err = mext_leaf_block(handle, donor_inode, | ||
22382 | donor_path, &tmp_oext, &donor_off); | ||
22383 | - if (*err) | ||
22384 | + if (err < 0) | ||
22385 | goto out; | ||
22386 | |||
22387 | dext_alen = ext4_ext_get_actual_len(&tmp_dext); | ||
22388 | @@ -726,25 +753,35 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode, | ||
22389 | |||
22390 | if (orig_path) | ||
22391 | ext4_ext_drop_refs(orig_path); | ||
22392 | - *err = get_ext_path(orig_inode, orig_off, &orig_path); | ||
22393 | - if (*err) | ||
22394 | + err = get_ext_path(orig_inode, orig_off, &orig_path); | ||
22395 | + if (err) | ||
22396 | goto out; | ||
22397 | depth = ext_depth(orig_inode); | ||
22398 | oext = orig_path[depth].p_ext; | ||
22399 | + if (le32_to_cpu(oext->ee_block) + | ||
22400 | + ext4_ext_get_actual_len(oext) <= orig_off) { | ||
22401 | + err = 0; | ||
22402 | + goto out; | ||
22403 | + } | ||
22404 | tmp_oext = *oext; | ||
22405 | |||
22406 | if (donor_path) | ||
22407 | ext4_ext_drop_refs(donor_path); | ||
22408 | - *err = get_ext_path(donor_inode, donor_off, &donor_path); | ||
22409 | - if (*err) | ||
22410 | + err = get_ext_path(donor_inode, donor_off, &donor_path); | ||
22411 | + if (err) | ||
22412 | goto out; | ||
22413 | depth = ext_depth(donor_inode); | ||
22414 | dext = donor_path[depth].p_ext; | ||
22415 | + if (le32_to_cpu(dext->ee_block) + | ||
22416 | + ext4_ext_get_actual_len(dext) <= donor_off) { | ||
22417 | + err = 0; | ||
22418 | + goto out; | ||
22419 | + } | ||
22420 | tmp_dext = *dext; | ||
22421 | |||
22422 | - *err = mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off, | ||
22423 | + err = mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off, | ||
22424 | donor_off, count - replaced_count); | ||
22425 | - if (*err) | ||
22426 | + if (err) | ||
22427 | goto out; | ||
22428 | } | ||
22429 | |||
22430 | @@ -758,12 +795,8 @@ out: | ||
22431 | kfree(donor_path); | ||
22432 | } | ||
22433 | |||
22434 | - ext4_ext_invalidate_cache(orig_inode); | ||
22435 | - ext4_ext_invalidate_cache(donor_inode); | ||
22436 | - | ||
22437 | - double_up_write_data_sem(orig_inode, donor_inode); | ||
22438 | - | ||
22439 | - return replaced_count; | ||
22440 | + mext_double_up_write(orig_inode, donor_inode); | ||
22441 | + return err; | ||
22442 | } | ||
22443 | |||
22444 | /** | ||
22445 | @@ -775,17 +808,16 @@ out: | ||
22446 | * @data_offset_in_page: block index where data swapping starts | ||
22447 | * @block_len_in_page: the number of blocks to be swapped | ||
22448 | * @uninit: orig extent is uninitialized or not | ||
22449 | - * @err: pointer to save return value | ||
22450 | * | ||
22451 | * Save the data in original inode blocks and replace original inode extents | ||
22452 | * with donor inode extents by calling mext_replace_branches(). | ||
22453 | - * Finally, write out the saved data in new original inode blocks. Return | ||
22454 | - * replaced block count. | ||
22455 | + * Finally, write out the saved data in new original inode blocks. Return 0 | ||
22456 | + * on success, or a negative error value on failure. | ||
22457 | */ | ||
22458 | static int | ||
22459 | move_extent_per_page(struct file *o_filp, struct inode *donor_inode, | ||
22460 | pgoff_t orig_page_offset, int data_offset_in_page, | ||
22461 | - int block_len_in_page, int uninit, int *err) | ||
22462 | + int block_len_in_page, int uninit) | ||
22463 | { | ||
22464 | struct inode *orig_inode = o_filp->f_dentry->d_inode; | ||
22465 | struct address_space *mapping = orig_inode->i_mapping; | ||
22466 | @@ -797,11 +829,9 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode, | ||
22467 | long long offs = orig_page_offset << PAGE_CACHE_SHIFT; | ||
22468 | unsigned long blocksize = orig_inode->i_sb->s_blocksize; | ||
22469 | unsigned int w_flags = 0; | ||
22470 | - unsigned int tmp_data_size, data_size, replaced_size; | ||
22471 | + unsigned int tmp_data_len, data_len; | ||
22472 | void *fsdata; | ||
22473 | - int i, jblocks; | ||
22474 | - int err2 = 0; | ||
22475 | - int replaced_count = 0; | ||
22476 | + int ret, i, jblocks; | ||
22477 | int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits; | ||
22478 | |||
22479 | /* | ||
22480 | @@ -811,8 +841,8 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode, | ||
22481 | jblocks = ext4_writepage_trans_blocks(orig_inode) * 2; | ||
22482 | handle = ext4_journal_start(orig_inode, jblocks); | ||
22483 | if (IS_ERR(handle)) { | ||
22484 | - *err = PTR_ERR(handle); | ||
22485 | - return 0; | ||
22486 | + ret = PTR_ERR(handle); | ||
22487 | + return ret; | ||
22488 | } | ||
22489 | |||
22490 | if (segment_eq(get_fs(), KERNEL_DS)) | ||
22491 | @@ -828,36 +858,39 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode, | ||
22492 | * Just swap data blocks between orig and donor. | ||
22493 | */ | ||
22494 | if (uninit) { | ||
22495 | - replaced_count = mext_replace_branches(handle, orig_inode, | ||
22496 | - donor_inode, orig_blk_offset, | ||
22497 | - block_len_in_page, err); | ||
22498 | + ret = mext_replace_branches(handle, orig_inode, | ||
22499 | + donor_inode, orig_blk_offset, | ||
22500 | + block_len_in_page); | ||
22501 | + | ||
22502 | + /* Clear the inode cache not to refer to the old data */ | ||
22503 | + ext4_ext_invalidate_cache(orig_inode); | ||
22504 | + ext4_ext_invalidate_cache(donor_inode); | ||
22505 | goto out2; | ||
22506 | } | ||
22507 | |||
22508 | offs = (long long)orig_blk_offset << orig_inode->i_blkbits; | ||
22509 | |||
22510 | - /* Calculate data_size */ | ||
22511 | + /* Calculate data_len */ | ||
22512 | if ((orig_blk_offset + block_len_in_page - 1) == | ||
22513 | ((orig_inode->i_size - 1) >> orig_inode->i_blkbits)) { | ||
22514 | /* Replace the last block */ | ||
22515 | - tmp_data_size = orig_inode->i_size & (blocksize - 1); | ||
22516 | + tmp_data_len = orig_inode->i_size & (blocksize - 1); | ||
22517 | /* | ||
22518 | - * If data_size equal zero, it shows data_size is multiples of | ||
22519 | + * If data_len equal zero, it shows data_len is multiples of | ||
22520 | * blocksize. So we set appropriate value. | ||
22521 | */ | ||
22522 | - if (tmp_data_size == 0) | ||
22523 | - tmp_data_size = blocksize; | ||
22524 | + if (tmp_data_len == 0) | ||
22525 | + tmp_data_len = blocksize; | ||
22526 | |||
22527 | - data_size = tmp_data_size + | ||
22528 | + data_len = tmp_data_len + | ||
22529 | ((block_len_in_page - 1) << orig_inode->i_blkbits); | ||
22530 | - } else | ||
22531 | - data_size = block_len_in_page << orig_inode->i_blkbits; | ||
22532 | - | ||
22533 | - replaced_size = data_size; | ||
22534 | + } else { | ||
22535 | + data_len = block_len_in_page << orig_inode->i_blkbits; | ||
22536 | + } | ||
22537 | |||
22538 | - *err = a_ops->write_begin(o_filp, mapping, offs, data_size, w_flags, | ||
22539 | + ret = a_ops->write_begin(o_filp, mapping, offs, data_len, w_flags, | ||
22540 | &page, &fsdata); | ||
22541 | - if (unlikely(*err < 0)) | ||
22542 | + if (unlikely(ret < 0)) | ||
22543 | goto out; | ||
22544 | |||
22545 | if (!PageUptodate(page)) { | ||
22546 | @@ -878,17 +911,14 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode, | ||
22547 | /* Release old bh and drop refs */ | ||
22548 | try_to_release_page(page, 0); | ||
22549 | |||
22550 | - replaced_count = mext_replace_branches(handle, orig_inode, donor_inode, | ||
22551 | - orig_blk_offset, block_len_in_page, | ||
22552 | - &err2); | ||
22553 | - if (err2) { | ||
22554 | - if (replaced_count) { | ||
22555 | - block_len_in_page = replaced_count; | ||
22556 | - replaced_size = | ||
22557 | - block_len_in_page << orig_inode->i_blkbits; | ||
22558 | - } else | ||
22559 | - goto out; | ||
22560 | - } | ||
22561 | + ret = mext_replace_branches(handle, orig_inode, donor_inode, | ||
22562 | + orig_blk_offset, block_len_in_page); | ||
22563 | + if (ret < 0) | ||
22564 | + goto out; | ||
22565 | + | ||
22566 | + /* Clear the inode cache not to refer to the old data */ | ||
22567 | + ext4_ext_invalidate_cache(orig_inode); | ||
22568 | + ext4_ext_invalidate_cache(donor_inode); | ||
22569 | |||
22570 | if (!page_has_buffers(page)) | ||
22571 | create_empty_buffers(page, 1 << orig_inode->i_blkbits, 0); | ||
22572 | @@ -898,16 +928,16 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode, | ||
22573 | bh = bh->b_this_page; | ||
22574 | |||
22575 | for (i = 0; i < block_len_in_page; i++) { | ||
22576 | - *err = ext4_get_block(orig_inode, | ||
22577 | + ret = ext4_get_block(orig_inode, | ||
22578 | (sector_t)(orig_blk_offset + i), bh, 0); | ||
22579 | - if (*err < 0) | ||
22580 | + if (ret < 0) | ||
22581 | goto out; | ||
22582 | |||
22583 | if (bh->b_this_page != NULL) | ||
22584 | bh = bh->b_this_page; | ||
22585 | } | ||
22586 | |||
22587 | - *err = a_ops->write_end(o_filp, mapping, offs, data_size, replaced_size, | ||
22588 | + ret = a_ops->write_end(o_filp, mapping, offs, data_len, data_len, | ||
22589 | page, fsdata); | ||
22590 | page = NULL; | ||
22591 | |||
22592 | @@ -921,10 +951,7 @@ out: | ||
22593 | out2: | ||
22594 | ext4_journal_stop(handle); | ||
22595 | |||
22596 | - if (err2) | ||
22597 | - *err = err2; | ||
22598 | - | ||
22599 | - return replaced_count; | ||
22600 | + return ret < 0 ? ret : 0; | ||
22601 | } | ||
22602 | |||
22603 | /** | ||
22604 | @@ -935,6 +962,7 @@ out2: | ||
22605 | * @orig_start: logical start offset in block for orig | ||
22606 | * @donor_start: logical start offset in block for donor | ||
22607 | * @len: the number of blocks to be moved | ||
22608 | + * @moved_len: moved block length | ||
22609 | * | ||
22610 | * Check the arguments of ext4_move_extents() whether the files can be | ||
22611 | * exchanged with each other. | ||
22612 | @@ -942,8 +970,8 @@ out2: | ||
22613 | */ | ||
22614 | static int | ||
22615 | mext_check_arguments(struct inode *orig_inode, | ||
22616 | - struct inode *donor_inode, __u64 orig_start, | ||
22617 | - __u64 donor_start, __u64 *len) | ||
22618 | + struct inode *donor_inode, __u64 orig_start, | ||
22619 | + __u64 donor_start, __u64 *len, __u64 moved_len) | ||
22620 | { | ||
22621 | ext4_lblk_t orig_blocks, donor_blocks; | ||
22622 | unsigned int blkbits = orig_inode->i_blkbits; | ||
22623 | @@ -957,13 +985,6 @@ mext_check_arguments(struct inode *orig_inode, | ||
22624 | return -EINVAL; | ||
22625 | } | ||
22626 | |||
22627 | - if (donor_inode->i_mode & (S_ISUID|S_ISGID)) { | ||
22628 | - ext4_debug("ext4 move extent: suid or sgid is set" | ||
22629 | - " to donor file [ino:orig %lu, donor %lu]\n", | ||
22630 | - orig_inode->i_ino, donor_inode->i_ino); | ||
22631 | - return -EINVAL; | ||
22632 | - } | ||
22633 | - | ||
22634 | /* Ext4 move extent does not support swapfile */ | ||
22635 | if (IS_SWAPFILE(orig_inode) || IS_SWAPFILE(donor_inode)) { | ||
22636 | ext4_debug("ext4 move extent: The argument files should " | ||
22637 | @@ -1004,6 +1025,13 @@ mext_check_arguments(struct inode *orig_inode, | ||
22638 | return -EINVAL; | ||
22639 | } | ||
22640 | |||
22641 | + if (moved_len) { | ||
22642 | + ext4_debug("ext4 move extent: moved_len should be 0 " | ||
22643 | + "[ino:orig %lu, donor %lu]\n", orig_inode->i_ino, | ||
22644 | + donor_inode->i_ino); | ||
22645 | + return -EINVAL; | ||
22646 | + } | ||
22647 | + | ||
22648 | if ((orig_start > EXT_MAX_BLOCK) || | ||
22649 | (donor_start > EXT_MAX_BLOCK) || | ||
22650 | (*len > EXT_MAX_BLOCK) || | ||
22651 | @@ -1204,16 +1232,16 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, | ||
22652 | return -EINVAL; | ||
22653 | } | ||
22654 | |||
22655 | - /* Protect orig and donor inodes against a truncate */ | ||
22656 | + /* protect orig and donor against a truncate */ | ||
22657 | ret1 = mext_inode_double_lock(orig_inode, donor_inode); | ||
22658 | if (ret1 < 0) | ||
22659 | return ret1; | ||
22660 | |||
22661 | - /* Protect extent tree against block allocations via delalloc */ | ||
22662 | - double_down_write_data_sem(orig_inode, donor_inode); | ||
22663 | + mext_double_down_read(orig_inode, donor_inode); | ||
22664 | /* Check the filesystem environment whether move_extent can be done */ | ||
22665 | ret1 = mext_check_arguments(orig_inode, donor_inode, orig_start, | ||
22666 | - donor_start, &len); | ||
22667 | + donor_start, &len, *moved_len); | ||
22668 | + mext_double_up_read(orig_inode, donor_inode); | ||
22669 | if (ret1) | ||
22670 | goto out; | ||
22671 | |||
22672 | @@ -1327,39 +1355,36 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, | ||
22673 | seq_start = le32_to_cpu(ext_cur->ee_block); | ||
22674 | rest_blocks = seq_blocks; | ||
22675 | |||
22676 | - /* | ||
22677 | - * Up semaphore to avoid following problems: | ||
22678 | - * a. transaction deadlock among ext4_journal_start, | ||
22679 | - * ->write_begin via pagefault, and jbd2_journal_commit | ||
22680 | - * b. racing with ->readpage, ->write_begin, and ext4_get_block | ||
22681 | - * in move_extent_per_page | ||
22682 | - */ | ||
22683 | - double_up_write_data_sem(orig_inode, donor_inode); | ||
22684 | + /* Discard preallocations of two inodes */ | ||
22685 | + down_write(&EXT4_I(orig_inode)->i_data_sem); | ||
22686 | + ext4_discard_preallocations(orig_inode); | ||
22687 | + up_write(&EXT4_I(orig_inode)->i_data_sem); | ||
22688 | + | ||
22689 | + down_write(&EXT4_I(donor_inode)->i_data_sem); | ||
22690 | + ext4_discard_preallocations(donor_inode); | ||
22691 | + up_write(&EXT4_I(donor_inode)->i_data_sem); | ||
22692 | |||
22693 | while (orig_page_offset <= seq_end_page) { | ||
22694 | |||
22695 | /* Swap original branches with new branches */ | ||
22696 | - block_len_in_page = move_extent_per_page( | ||
22697 | - o_filp, donor_inode, | ||
22698 | + ret1 = move_extent_per_page(o_filp, donor_inode, | ||
22699 | orig_page_offset, | ||
22700 | data_offset_in_page, | ||
22701 | - block_len_in_page, uninit, | ||
22702 | - &ret1); | ||
22703 | - | ||
22704 | + block_len_in_page, uninit); | ||
22705 | + if (ret1 < 0) | ||
22706 | + goto out; | ||
22707 | + orig_page_offset++; | ||
22708 | /* Count how many blocks we have exchanged */ | ||
22709 | *moved_len += block_len_in_page; | ||
22710 | - if (ret1 < 0) | ||
22711 | - break; | ||
22712 | if (*moved_len > len) { | ||
22713 | ext4_error(orig_inode->i_sb, __func__, | ||
22714 | "We replaced blocks too much! " | ||
22715 | "sum of replaced: %llu requested: %llu", | ||
22716 | *moved_len, len); | ||
22717 | ret1 = -EIO; | ||
22718 | - break; | ||
22719 | + goto out; | ||
22720 | } | ||
22721 | |||
22722 | - orig_page_offset++; | ||
22723 | data_offset_in_page = 0; | ||
22724 | rest_blocks -= block_len_in_page; | ||
22725 | if (rest_blocks > blocks_per_page) | ||
22726 | @@ -1368,10 +1393,6 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, | ||
22727 | block_len_in_page = rest_blocks; | ||
22728 | } | ||
22729 | |||
22730 | - double_down_write_data_sem(orig_inode, donor_inode); | ||
22731 | - if (ret1 < 0) | ||
22732 | - break; | ||
22733 | - | ||
22734 | /* Decrease buffer counter */ | ||
22735 | if (holecheck_path) | ||
22736 | ext4_ext_drop_refs(holecheck_path); | ||
22737 | @@ -1393,11 +1414,6 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, | ||
22738 | |||
22739 | } | ||
22740 | out: | ||
22741 | - if (*moved_len) { | ||
22742 | - ext4_discard_preallocations(orig_inode); | ||
22743 | - ext4_discard_preallocations(donor_inode); | ||
22744 | - } | ||
22745 | - | ||
22746 | if (orig_path) { | ||
22747 | ext4_ext_drop_refs(orig_path); | ||
22748 | kfree(orig_path); | ||
22749 | @@ -1406,7 +1422,7 @@ out: | ||
22750 | ext4_ext_drop_refs(holecheck_path); | ||
22751 | kfree(holecheck_path); | ||
22752 | } | ||
22753 | - double_up_write_data_sem(orig_inode, donor_inode); | ||
22754 | + | ||
22755 | ret2 = mext_inode_double_unlock(orig_inode, donor_inode); | ||
22756 | |||
22757 | if (ret1) | ||
22758 | diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c | ||
22759 | index 17a17e1..6d2c1b8 100644 | ||
22760 | --- a/fs/ext4/namei.c | ||
22761 | +++ b/fs/ext4/namei.c | ||
22762 | @@ -1292,6 +1292,9 @@ errout: | ||
22763 | * add_dirent_to_buf will attempt search the directory block for | ||
22764 | * space. It will return -ENOSPC if no space is available, and -EIO | ||
22765 | * and -EEXIST if directory entry already exists. | ||
22766 | + * | ||
22767 | + * NOTE! bh is NOT released in the case where ENOSPC is returned. In | ||
22768 | + * all other cases bh is released. | ||
22769 | */ | ||
22770 | static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry, | ||
22771 | struct inode *inode, struct ext4_dir_entry_2 *de, | ||
22772 | @@ -1312,10 +1315,14 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry, | ||
22773 | top = bh->b_data + blocksize - reclen; | ||
22774 | while ((char *) de <= top) { | ||
22775 | if (!ext4_check_dir_entry("ext4_add_entry", dir, de, | ||
22776 | - bh, offset)) | ||
22777 | + bh, offset)) { | ||
22778 | + brelse(bh); | ||
22779 | return -EIO; | ||
22780 | - if (ext4_match(namelen, name, de)) | ||
22781 | + } | ||
22782 | + if (ext4_match(namelen, name, de)) { | ||
22783 | + brelse(bh); | ||
22784 | return -EEXIST; | ||
22785 | + } | ||
22786 | nlen = EXT4_DIR_REC_LEN(de->name_len); | ||
22787 | rlen = ext4_rec_len_from_disk(de->rec_len, blocksize); | ||
22788 | if ((de->inode? rlen - nlen: rlen) >= reclen) | ||
22789 | @@ -1330,6 +1337,7 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry, | ||
22790 | err = ext4_journal_get_write_access(handle, bh); | ||
22791 | if (err) { | ||
22792 | ext4_std_error(dir->i_sb, err); | ||
22793 | + brelse(bh); | ||
22794 | return err; | ||
22795 | } | ||
22796 | |||
22797 | @@ -1369,6 +1377,7 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry, | ||
22798 | err = ext4_handle_dirty_metadata(handle, dir, bh); | ||
22799 | if (err) | ||
22800 | ext4_std_error(dir->i_sb, err); | ||
22801 | + brelse(bh); | ||
22802 | return 0; | ||
22803 | } | ||
22804 | |||
22805 | @@ -1462,9 +1471,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry, | ||
22806 | if (!(de)) | ||
22807 | return retval; | ||
22808 | |||
22809 | - retval = add_dirent_to_buf(handle, dentry, inode, de, bh); | ||
22810 | - brelse(bh); | ||
22811 | - return retval; | ||
22812 | + return add_dirent_to_buf(handle, dentry, inode, de, bh); | ||
22813 | } | ||
22814 | |||
22815 | /* | ||
22816 | @@ -1507,10 +1514,8 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, | ||
22817 | if(!bh) | ||
22818 | return retval; | ||
22819 | retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh); | ||
22820 | - if (retval != -ENOSPC) { | ||
22821 | - brelse(bh); | ||
22822 | + if (retval != -ENOSPC) | ||
22823 | return retval; | ||
22824 | - } | ||
22825 | |||
22826 | if (blocks == 1 && !dx_fallback && | ||
22827 | EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) | ||
22828 | @@ -1523,9 +1528,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, | ||
22829 | de = (struct ext4_dir_entry_2 *) bh->b_data; | ||
22830 | de->inode = 0; | ||
22831 | de->rec_len = ext4_rec_len_to_disk(blocksize, blocksize); | ||
22832 | - retval = add_dirent_to_buf(handle, dentry, inode, de, bh); | ||
22833 | - brelse(bh); | ||
22834 | - return retval; | ||
22835 | + return add_dirent_to_buf(handle, dentry, inode, de, bh); | ||
22836 | } | ||
22837 | |||
22838 | /* | ||
22839 | @@ -1558,8 +1561,10 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry, | ||
22840 | goto journal_error; | ||
22841 | |||
22842 | err = add_dirent_to_buf(handle, dentry, inode, NULL, bh); | ||
22843 | - if (err != -ENOSPC) | ||
22844 | + if (err != -ENOSPC) { | ||
22845 | + bh = NULL; | ||
22846 | goto cleanup; | ||
22847 | + } | ||
22848 | |||
22849 | /* Block full, should compress but for now just split */ | ||
22850 | dxtrace(printk(KERN_DEBUG "using %u of %u node entries\n", | ||
22851 | @@ -1652,6 +1657,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry, | ||
22852 | if (!de) | ||
22853 | goto cleanup; | ||
22854 | err = add_dirent_to_buf(handle, dentry, inode, de, bh); | ||
22855 | + bh = NULL; | ||
22856 | goto cleanup; | ||
22857 | |||
22858 | journal_error: | ||
22859 | @@ -1769,7 +1775,7 @@ static int ext4_create(struct inode *dir, struct dentry *dentry, int mode, | ||
22860 | retry: | ||
22861 | handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + | ||
22862 | EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + | ||
22863 | - EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb)); | ||
22864 | + 2*EXT4_QUOTA_INIT_BLOCKS(dir->i_sb)); | ||
22865 | if (IS_ERR(handle)) | ||
22866 | return PTR_ERR(handle); | ||
22867 | |||
22868 | @@ -1803,7 +1809,7 @@ static int ext4_mknod(struct inode *dir, struct dentry *dentry, | ||
22869 | retry: | ||
22870 | handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + | ||
22871 | EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + | ||
22872 | - EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb)); | ||
22873 | + 2*EXT4_QUOTA_INIT_BLOCKS(dir->i_sb)); | ||
22874 | if (IS_ERR(handle)) | ||
22875 | return PTR_ERR(handle); | ||
22876 | |||
22877 | @@ -1840,7 +1846,7 @@ static int ext4_mkdir(struct inode *dir, struct dentry *dentry, int mode) | ||
22878 | retry: | ||
22879 | handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + | ||
22880 | EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + | ||
22881 | - EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb)); | ||
22882 | + 2*EXT4_QUOTA_INIT_BLOCKS(dir->i_sb)); | ||
22883 | if (IS_ERR(handle)) | ||
22884 | return PTR_ERR(handle); | ||
22885 | |||
22886 | @@ -2253,7 +2259,7 @@ static int ext4_symlink(struct inode *dir, | ||
22887 | retry: | ||
22888 | handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + | ||
22889 | EXT4_INDEX_EXTRA_TRANS_BLOCKS + 5 + | ||
22890 | - EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb)); | ||
22891 | + 2*EXT4_QUOTA_INIT_BLOCKS(dir->i_sb)); | ||
22892 | if (IS_ERR(handle)) | ||
22893 | return PTR_ERR(handle); | ||
22894 | |||
22895 | diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c | ||
22896 | index 3b2c554..3cfc343 100644 | ||
22897 | --- a/fs/ext4/resize.c | ||
22898 | +++ b/fs/ext4/resize.c | ||
22899 | @@ -247,7 +247,7 @@ static int setup_new_group_blocks(struct super_block *sb, | ||
22900 | goto exit_bh; | ||
22901 | |||
22902 | if (IS_ERR(gdb = bclean(handle, sb, block))) { | ||
22903 | - err = PTR_ERR(gdb); | ||
22904 | + err = PTR_ERR(bh); | ||
22905 | goto exit_bh; | ||
22906 | } | ||
22907 | ext4_handle_dirty_metadata(handle, NULL, gdb); | ||
22908 | diff --git a/fs/ext4/super.c b/fs/ext4/super.c | ||
22909 | index 92943f2..d4ca92a 100644 | ||
22910 | --- a/fs/ext4/super.c | ||
22911 | +++ b/fs/ext4/super.c | ||
22912 | @@ -603,6 +603,10 @@ static void ext4_put_super(struct super_block *sb) | ||
22913 | if (sb->s_dirt) | ||
22914 | ext4_commit_super(sb, 1); | ||
22915 | |||
22916 | + ext4_release_system_zone(sb); | ||
22917 | + ext4_mb_release(sb); | ||
22918 | + ext4_ext_release(sb); | ||
22919 | + ext4_xattr_put_super(sb); | ||
22920 | if (sbi->s_journal) { | ||
22921 | err = jbd2_journal_destroy(sbi->s_journal); | ||
22922 | sbi->s_journal = NULL; | ||
22923 | @@ -610,12 +614,6 @@ static void ext4_put_super(struct super_block *sb) | ||
22924 | ext4_abort(sb, __func__, | ||
22925 | "Couldn't clean up the journal"); | ||
22926 | } | ||
22927 | - | ||
22928 | - ext4_release_system_zone(sb); | ||
22929 | - ext4_mb_release(sb); | ||
22930 | - ext4_ext_release(sb); | ||
22931 | - ext4_xattr_put_super(sb); | ||
22932 | - | ||
22933 | if (!(sb->s_flags & MS_RDONLY)) { | ||
22934 | EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); | ||
22935 | es->s_state = cpu_to_le16(sbi->s_mount_state); | ||
22936 | @@ -704,13 +702,8 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) | ||
22937 | ei->i_allocated_meta_blocks = 0; | ||
22938 | ei->i_delalloc_reserved_flag = 0; | ||
22939 | spin_lock_init(&(ei->i_block_reservation_lock)); | ||
22940 | -#ifdef CONFIG_QUOTA | ||
22941 | - ei->i_reserved_quota = 0; | ||
22942 | -#endif | ||
22943 | INIT_LIST_HEAD(&ei->i_aio_dio_complete_list); | ||
22944 | ei->cur_aio_dio = NULL; | ||
22945 | - ei->i_sync_tid = 0; | ||
22946 | - ei->i_datasync_tid = 0; | ||
22947 | |||
22948 | return &ei->vfs_inode; | ||
22949 | } | ||
22950 | @@ -906,12 +899,6 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs) | ||
22951 | if (test_opt(sb, NO_AUTO_DA_ALLOC)) | ||
22952 | seq_puts(seq, ",noauto_da_alloc"); | ||
22953 | |||
22954 | - if (test_opt(sb, DISCARD)) | ||
22955 | - seq_puts(seq, ",discard"); | ||
22956 | - | ||
22957 | - if (test_opt(sb, NOLOAD)) | ||
22958 | - seq_puts(seq, ",norecovery"); | ||
22959 | - | ||
22960 | ext4_show_quota_options(seq, sb); | ||
22961 | |||
22962 | return 0; | ||
22963 | @@ -1004,9 +991,7 @@ static const struct dquot_operations ext4_quota_operations = { | ||
22964 | .reserve_space = dquot_reserve_space, | ||
22965 | .claim_space = dquot_claim_space, | ||
22966 | .release_rsv = dquot_release_reserved_space, | ||
22967 | -#ifdef CONFIG_QUOTA | ||
22968 | .get_reserved_space = ext4_get_reserved_space, | ||
22969 | -#endif | ||
22970 | .alloc_inode = dquot_alloc_inode, | ||
22971 | .free_space = dquot_free_space, | ||
22972 | .free_inode = dquot_free_inode, | ||
22973 | @@ -1094,8 +1079,7 @@ enum { | ||
22974 | Opt_usrquota, Opt_grpquota, Opt_i_version, | ||
22975 | Opt_stripe, Opt_delalloc, Opt_nodelalloc, | ||
22976 | Opt_block_validity, Opt_noblock_validity, | ||
22977 | - Opt_inode_readahead_blks, Opt_journal_ioprio, | ||
22978 | - Opt_discard, Opt_nodiscard, | ||
22979 | + Opt_inode_readahead_blks, Opt_journal_ioprio | ||
22980 | }; | ||
22981 | |||
22982 | static const match_table_t tokens = { | ||
22983 | @@ -1120,7 +1104,6 @@ static const match_table_t tokens = { | ||
22984 | {Opt_acl, "acl"}, | ||
22985 | {Opt_noacl, "noacl"}, | ||
22986 | {Opt_noload, "noload"}, | ||
22987 | - {Opt_noload, "norecovery"}, | ||
22988 | {Opt_nobh, "nobh"}, | ||
22989 | {Opt_bh, "bh"}, | ||
22990 | {Opt_commit, "commit=%u"}, | ||
22991 | @@ -1161,8 +1144,6 @@ static const match_table_t tokens = { | ||
22992 | {Opt_auto_da_alloc, "auto_da_alloc=%u"}, | ||
22993 | {Opt_auto_da_alloc, "auto_da_alloc"}, | ||
22994 | {Opt_noauto_da_alloc, "noauto_da_alloc"}, | ||
22995 | - {Opt_discard, "discard"}, | ||
22996 | - {Opt_nodiscard, "nodiscard"}, | ||
22997 | {Opt_err, NULL}, | ||
22998 | }; | ||
22999 | |||
23000 | @@ -1584,12 +1565,6 @@ set_qf_format: | ||
23001 | else | ||
23002 | set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC); | ||
23003 | break; | ||
23004 | - case Opt_discard: | ||
23005 | - set_opt(sbi->s_mount_opt, DISCARD); | ||
23006 | - break; | ||
23007 | - case Opt_nodiscard: | ||
23008 | - clear_opt(sbi->s_mount_opt, DISCARD); | ||
23009 | - break; | ||
23010 | default: | ||
23011 | ext4_msg(sb, KERN_ERR, | ||
23012 | "Unrecognized mount option \"%s\" " | ||
23013 | @@ -1698,14 +1673,14 @@ static int ext4_fill_flex_info(struct super_block *sb) | ||
23014 | size_t size; | ||
23015 | int i; | ||
23016 | |||
23017 | - sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex; | ||
23018 | - groups_per_flex = 1 << sbi->s_log_groups_per_flex; | ||
23019 | - | ||
23020 | - if (groups_per_flex < 2) { | ||
23021 | + if (!sbi->s_es->s_log_groups_per_flex) { | ||
23022 | sbi->s_log_groups_per_flex = 0; | ||
23023 | return 1; | ||
23024 | } | ||
23025 | |||
23026 | + sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex; | ||
23027 | + groups_per_flex = 1 << sbi->s_log_groups_per_flex; | ||
23028 | + | ||
23029 | /* We allocate both existing and potentially added groups */ | ||
23030 | flex_group_count = ((sbi->s_groups_count + groups_per_flex - 1) + | ||
23031 | ((le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) + 1) << | ||
23032 | @@ -3693,11 +3668,13 @@ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf) | ||
23033 | buf->f_blocks = ext4_blocks_count(es) - sbi->s_overhead_last; | ||
23034 | buf->f_bfree = percpu_counter_sum_positive(&sbi->s_freeblocks_counter) - | ||
23035 | percpu_counter_sum_positive(&sbi->s_dirtyblocks_counter); | ||
23036 | + ext4_free_blocks_count_set(es, buf->f_bfree); | ||
23037 | buf->f_bavail = buf->f_bfree - ext4_r_blocks_count(es); | ||
23038 | if (buf->f_bfree < ext4_r_blocks_count(es)) | ||
23039 | buf->f_bavail = 0; | ||
23040 | buf->f_files = le32_to_cpu(es->s_inodes_count); | ||
23041 | buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter); | ||
23042 | + es->s_free_inodes_count = cpu_to_le32(buf->f_ffree); | ||
23043 | buf->f_namelen = EXT4_NAME_LEN; | ||
23044 | fsid = le64_to_cpup((void *)es->s_uuid) ^ | ||
23045 | le64_to_cpup((void *)es->s_uuid + sizeof(u64)); | ||
23046 | diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c | ||
23047 | index 0257019..fed5b01 100644 | ||
23048 | --- a/fs/ext4/xattr.c | ||
23049 | +++ b/fs/ext4/xattr.c | ||
23050 | @@ -988,10 +988,6 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index, | ||
23051 | if (error) | ||
23052 | goto cleanup; | ||
23053 | |||
23054 | - error = ext4_journal_get_write_access(handle, is.iloc.bh); | ||
23055 | - if (error) | ||
23056 | - goto cleanup; | ||
23057 | - | ||
23058 | if (EXT4_I(inode)->i_state & EXT4_STATE_NEW) { | ||
23059 | struct ext4_inode *raw_inode = ext4_raw_inode(&is.iloc); | ||
23060 | memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); | ||
23061 | @@ -1017,6 +1013,9 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index, | ||
23062 | if (flags & XATTR_CREATE) | ||
23063 | goto cleanup; | ||
23064 | } | ||
23065 | + error = ext4_journal_get_write_access(handle, is.iloc.bh); | ||
23066 | + if (error) | ||
23067 | + goto cleanup; | ||
23068 | if (!value) { | ||
23069 | if (!is.s.not_found) | ||
23070 | error = ext4_xattr_ibody_set(handle, inode, &i, &is); | ||
23071 | diff --git a/fs/fcntl.c b/fs/fcntl.c | ||
23072 | index 97e01dc..2cf93ec 100644 | ||
23073 | --- a/fs/fcntl.c | ||
23074 | +++ b/fs/fcntl.c | ||
23075 | @@ -618,90 +618,60 @@ static DEFINE_RWLOCK(fasync_lock); | ||
23076 | static struct kmem_cache *fasync_cache __read_mostly; | ||
23077 | |||
23078 | /* | ||
23079 | - * Remove a fasync entry. If successfully removed, return | ||
23080 | - * positive and clear the FASYNC flag. If no entry exists, | ||
23081 | - * do nothing and return 0. | ||
23082 | - * | ||
23083 | - * NOTE! It is very important that the FASYNC flag always | ||
23084 | - * match the state "is the filp on a fasync list". | ||
23085 | - * | ||
23086 | - * We always take the 'filp->f_lock', in since fasync_lock | ||
23087 | - * needs to be irq-safe. | ||
23088 | + * fasync_helper() is used by almost all character device drivers | ||
23089 | + * to set up the fasync queue. It returns negative on error, 0 if it did | ||
23090 | + * no changes and positive if it added/deleted the entry. | ||
23091 | */ | ||
23092 | -static int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp) | ||
23093 | +int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp) | ||
23094 | { | ||
23095 | struct fasync_struct *fa, **fp; | ||
23096 | + struct fasync_struct *new = NULL; | ||
23097 | int result = 0; | ||
23098 | |||
23099 | - spin_lock(&filp->f_lock); | ||
23100 | - write_lock_irq(&fasync_lock); | ||
23101 | - for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) { | ||
23102 | - if (fa->fa_file != filp) | ||
23103 | - continue; | ||
23104 | - *fp = fa->fa_next; | ||
23105 | - kmem_cache_free(fasync_cache, fa); | ||
23106 | - filp->f_flags &= ~FASYNC; | ||
23107 | - result = 1; | ||
23108 | - break; | ||
23109 | + if (on) { | ||
23110 | + new = kmem_cache_alloc(fasync_cache, GFP_KERNEL); | ||
23111 | + if (!new) | ||
23112 | + return -ENOMEM; | ||
23113 | } | ||
23114 | - write_unlock_irq(&fasync_lock); | ||
23115 | - spin_unlock(&filp->f_lock); | ||
23116 | - return result; | ||
23117 | -} | ||
23118 | - | ||
23119 | -/* | ||
23120 | - * Add a fasync entry. Return negative on error, positive if | ||
23121 | - * added, and zero if did nothing but change an existing one. | ||
23122 | - * | ||
23123 | - * NOTE! It is very important that the FASYNC flag always | ||
23124 | - * match the state "is the filp on a fasync list". | ||
23125 | - */ | ||
23126 | -static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp) | ||
23127 | -{ | ||
23128 | - struct fasync_struct *new, *fa, **fp; | ||
23129 | - int result = 0; | ||
23130 | - | ||
23131 | - new = kmem_cache_alloc(fasync_cache, GFP_KERNEL); | ||
23132 | - if (!new) | ||
23133 | - return -ENOMEM; | ||
23134 | |||
23135 | + /* | ||
23136 | + * We need to take f_lock first since it's not an IRQ-safe | ||
23137 | + * lock. | ||
23138 | + */ | ||
23139 | spin_lock(&filp->f_lock); | ||
23140 | write_lock_irq(&fasync_lock); | ||
23141 | for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) { | ||
23142 | - if (fa->fa_file != filp) | ||
23143 | - continue; | ||
23144 | - fa->fa_fd = fd; | ||
23145 | - kmem_cache_free(fasync_cache, new); | ||
23146 | - goto out; | ||
23147 | + if (fa->fa_file == filp) { | ||
23148 | + if(on) { | ||
23149 | + fa->fa_fd = fd; | ||
23150 | + kmem_cache_free(fasync_cache, new); | ||
23151 | + } else { | ||
23152 | + *fp = fa->fa_next; | ||
23153 | + kmem_cache_free(fasync_cache, fa); | ||
23154 | + result = 1; | ||
23155 | + } | ||
23156 | + goto out; | ||
23157 | + } | ||
23158 | } | ||
23159 | |||
23160 | - new->magic = FASYNC_MAGIC; | ||
23161 | - new->fa_file = filp; | ||
23162 | - new->fa_fd = fd; | ||
23163 | - new->fa_next = *fapp; | ||
23164 | - *fapp = new; | ||
23165 | - result = 1; | ||
23166 | - filp->f_flags |= FASYNC; | ||
23167 | - | ||
23168 | + if (on) { | ||
23169 | + new->magic = FASYNC_MAGIC; | ||
23170 | + new->fa_file = filp; | ||
23171 | + new->fa_fd = fd; | ||
23172 | + new->fa_next = *fapp; | ||
23173 | + *fapp = new; | ||
23174 | + result = 1; | ||
23175 | + } | ||
23176 | out: | ||
23177 | + if (on) | ||
23178 | + filp->f_flags |= FASYNC; | ||
23179 | + else | ||
23180 | + filp->f_flags &= ~FASYNC; | ||
23181 | write_unlock_irq(&fasync_lock); | ||
23182 | spin_unlock(&filp->f_lock); | ||
23183 | return result; | ||
23184 | } | ||
23185 | |||
23186 | -/* | ||
23187 | - * fasync_helper() is used by almost all character device drivers | ||
23188 | - * to set up the fasync queue, and for regular files by the file | ||
23189 | - * lease code. It returns negative on error, 0 if it did no changes | ||
23190 | - * and positive if it added/deleted the entry. | ||
23191 | - */ | ||
23192 | -int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp) | ||
23193 | -{ | ||
23194 | - if (!on) | ||
23195 | - return fasync_remove_entry(filp, fapp); | ||
23196 | - return fasync_add_entry(fd, filp, fapp); | ||
23197 | -} | ||
23198 | - | ||
23199 | EXPORT_SYMBOL(fasync_helper); | ||
23200 | |||
23201 | void __kill_fasync(struct fasync_struct *fa, int sig, int band) | ||
23202 | diff --git a/fs/fuse/file.c b/fs/fuse/file.c | ||
23203 | index a9f5e13..c18913a 100644 | ||
23204 | --- a/fs/fuse/file.c | ||
23205 | +++ b/fs/fuse/file.c | ||
23206 | @@ -828,9 +828,6 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req, | ||
23207 | if (!page) | ||
23208 | break; | ||
23209 | |||
23210 | - if (mapping_writably_mapped(mapping)) | ||
23211 | - flush_dcache_page(page); | ||
23212 | - | ||
23213 | pagefault_disable(); | ||
23214 | tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes); | ||
23215 | pagefault_enable(); | ||
23216 | diff --git a/fs/hfs/catalog.c b/fs/hfs/catalog.c | ||
23217 | index 424b033..6d98f11 100644 | ||
23218 | --- a/fs/hfs/catalog.c | ||
23219 | +++ b/fs/hfs/catalog.c | ||
23220 | @@ -289,10 +289,6 @@ int hfs_cat_move(u32 cnid, struct inode *src_dir, struct qstr *src_name, | ||
23221 | err = hfs_brec_find(&src_fd); | ||
23222 | if (err) | ||
23223 | goto out; | ||
23224 | - if (src_fd.entrylength > sizeof(entry) || src_fd.entrylength < 0) { | ||
23225 | - err = -EIO; | ||
23226 | - goto out; | ||
23227 | - } | ||
23228 | |||
23229 | hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset, | ||
23230 | src_fd.entrylength); | ||
23231 | diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c | ||
23232 | index 2b3b861..7c69b98 100644 | ||
23233 | --- a/fs/hfs/dir.c | ||
23234 | +++ b/fs/hfs/dir.c | ||
23235 | @@ -79,11 +79,6 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir) | ||
23236 | filp->f_pos++; | ||
23237 | /* fall through */ | ||
23238 | case 1: | ||
23239 | - if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) { | ||
23240 | - err = -EIO; | ||
23241 | - goto out; | ||
23242 | - } | ||
23243 | - | ||
23244 | hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength); | ||
23245 | if (entry.type != HFS_CDR_THD) { | ||
23246 | printk(KERN_ERR "hfs: bad catalog folder thread\n"); | ||
23247 | @@ -114,12 +109,6 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir) | ||
23248 | err = -EIO; | ||
23249 | goto out; | ||
23250 | } | ||
23251 | - | ||
23252 | - if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) { | ||
23253 | - err = -EIO; | ||
23254 | - goto out; | ||
23255 | - } | ||
23256 | - | ||
23257 | hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength); | ||
23258 | type = entry.type; | ||
23259 | len = hfs_mac2asc(sb, strbuf, &fd.key->cat.CName); | ||
23260 | diff --git a/fs/hfs/super.c b/fs/hfs/super.c | ||
23261 | index 5ed7252..f7fcbe4 100644 | ||
23262 | --- a/fs/hfs/super.c | ||
23263 | +++ b/fs/hfs/super.c | ||
23264 | @@ -409,13 +409,8 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent) | ||
23265 | /* try to get the root inode */ | ||
23266 | hfs_find_init(HFS_SB(sb)->cat_tree, &fd); | ||
23267 | res = hfs_cat_find_brec(sb, HFS_ROOT_CNID, &fd); | ||
23268 | - if (!res) { | ||
23269 | - if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) { | ||
23270 | - res = -EIO; | ||
23271 | - goto bail; | ||
23272 | - } | ||
23273 | + if (!res) | ||
23274 | hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength); | ||
23275 | - } | ||
23276 | if (res) { | ||
23277 | hfs_find_exit(&fd); | ||
23278 | goto bail_no_root; | ||
23279 | diff --git a/fs/inode.c b/fs/inode.c | ||
23280 | index 4d8e3be..de80bc2 100644 | ||
23281 | --- a/fs/inode.c | ||
23282 | +++ b/fs/inode.c | ||
23283 | @@ -282,6 +282,8 @@ void inode_init_once(struct inode *inode) | ||
23284 | #ifdef CONFIG_FSNOTIFY | ||
23285 | INIT_HLIST_HEAD(&inode->i_fsnotify_mark_entries); | ||
23286 | #endif | ||
23287 | + INIT_LIST_HEAD(&inode->i_obj_list); | ||
23288 | + mutex_init(&inode->i_obj_mutex); | ||
23289 | } | ||
23290 | EXPORT_SYMBOL(inode_init_once); | ||
23291 | |||
23292 | diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c | ||
23293 | index 8896c1d..d4cfd6d 100644 | ||
23294 | --- a/fs/jbd2/commit.c | ||
23295 | +++ b/fs/jbd2/commit.c | ||
23296 | @@ -636,10 +636,6 @@ void jbd2_journal_commit_transaction(journal_t *journal) | ||
23297 | JBUFFER_TRACE(jh, "ph3: write metadata"); | ||
23298 | flags = jbd2_journal_write_metadata_buffer(commit_transaction, | ||
23299 | jh, &new_jh, blocknr); | ||
23300 | - if (flags < 0) { | ||
23301 | - jbd2_journal_abort(journal, flags); | ||
23302 | - continue; | ||
23303 | - } | ||
23304 | set_bit(BH_JWrite, &jh2bh(new_jh)->b_state); | ||
23305 | wbuf[bufs++] = jh2bh(new_jh); | ||
23306 | |||
23307 | diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c | ||
23308 | index b7ca3a9..fed8538 100644 | ||
23309 | --- a/fs/jbd2/journal.c | ||
23310 | +++ b/fs/jbd2/journal.c | ||
23311 | @@ -78,7 +78,6 @@ EXPORT_SYMBOL(jbd2_journal_errno); | ||
23312 | EXPORT_SYMBOL(jbd2_journal_ack_err); | ||
23313 | EXPORT_SYMBOL(jbd2_journal_clear_err); | ||
23314 | EXPORT_SYMBOL(jbd2_log_wait_commit); | ||
23315 | -EXPORT_SYMBOL(jbd2_log_start_commit); | ||
23316 | EXPORT_SYMBOL(jbd2_journal_start_commit); | ||
23317 | EXPORT_SYMBOL(jbd2_journal_force_commit_nested); | ||
23318 | EXPORT_SYMBOL(jbd2_journal_wipe); | ||
23319 | @@ -359,10 +358,6 @@ repeat: | ||
23320 | |||
23321 | jbd_unlock_bh_state(bh_in); | ||
23322 | tmp = jbd2_alloc(bh_in->b_size, GFP_NOFS); | ||
23323 | - if (!tmp) { | ||
23324 | - jbd2_journal_put_journal_head(new_jh); | ||
23325 | - return -ENOMEM; | ||
23326 | - } | ||
23327 | jbd_lock_bh_state(bh_in); | ||
23328 | if (jh_in->b_frozen_data) { | ||
23329 | jbd2_free(tmp, bh_in->b_size); | ||
23330 | @@ -1253,13 +1248,6 @@ int jbd2_journal_load(journal_t *journal) | ||
23331 | if (jbd2_journal_recover(journal)) | ||
23332 | goto recovery_error; | ||
23333 | |||
23334 | - if (journal->j_failed_commit) { | ||
23335 | - printk(KERN_ERR "JBD2: journal transaction %u on %s " | ||
23336 | - "is corrupt.\n", journal->j_failed_commit, | ||
23337 | - journal->j_devname); | ||
23338 | - return -EIO; | ||
23339 | - } | ||
23340 | - | ||
23341 | /* OK, we've finished with the dynamic journal bits: | ||
23342 | * reinitialise the dynamic contents of the superblock in memory | ||
23343 | * and reset them on disk. */ | ||
23344 | diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c | ||
23345 | index 3b6f2fa..090c556 100644 | ||
23346 | --- a/fs/jffs2/gc.c | ||
23347 | +++ b/fs/jffs2/gc.c | ||
23348 | @@ -700,8 +700,7 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_ | ||
23349 | struct jffs2_raw_inode ri; | ||
23350 | struct jffs2_node_frag *last_frag; | ||
23351 | union jffs2_device_node dev; | ||
23352 | - char *mdata = NULL; | ||
23353 | - int mdatalen = 0; | ||
23354 | + char *mdata = NULL, mdatalen = 0; | ||
23355 | uint32_t alloclen, ilen; | ||
23356 | int ret; | ||
23357 | |||
23358 | diff --git a/fs/namei.c b/fs/namei.c | ||
23359 | index a2b3c28..d11f404 100644 | ||
23360 | --- a/fs/namei.c | ||
23361 | +++ b/fs/namei.c | ||
23362 | @@ -234,7 +234,6 @@ int generic_permission(struct inode *inode, int mask, | ||
23363 | /* | ||
23364 | * Searching includes executable on directories, else just read. | ||
23365 | */ | ||
23366 | - mask &= MAY_READ | MAY_WRITE | MAY_EXEC; | ||
23367 | if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) | ||
23368 | if (capable(CAP_DAC_READ_SEARCH)) | ||
23369 | return 0; | ||
23370 | diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c | ||
23371 | index 0d28982..e1d415e 100644 | ||
23372 | --- a/fs/nfs/direct.c | ||
23373 | +++ b/fs/nfs/direct.c | ||
23374 | @@ -342,7 +342,6 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq, | ||
23375 | data->res.fattr = &data->fattr; | ||
23376 | data->res.eof = 0; | ||
23377 | data->res.count = bytes; | ||
23378 | - nfs_fattr_init(&data->fattr); | ||
23379 | msg.rpc_argp = &data->args; | ||
23380 | msg.rpc_resp = &data->res; | ||
23381 | |||
23382 | @@ -576,7 +575,6 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq) | ||
23383 | data->res.count = 0; | ||
23384 | data->res.fattr = &data->fattr; | ||
23385 | data->res.verf = &data->verf; | ||
23386 | - nfs_fattr_init(&data->fattr); | ||
23387 | |||
23388 | NFS_PROTO(data->inode)->commit_setup(data, &msg); | ||
23389 | |||
23390 | @@ -768,7 +766,6 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq, | ||
23391 | data->res.fattr = &data->fattr; | ||
23392 | data->res.count = bytes; | ||
23393 | data->res.verf = &data->verf; | ||
23394 | - nfs_fattr_init(&data->fattr); | ||
23395 | |||
23396 | task_setup_data.task = &data->task; | ||
23397 | task_setup_data.callback_data = data; | ||
23398 | diff --git a/fs/nfs/file.c b/fs/nfs/file.c | ||
23399 | index 393d40f..f5fdd39 100644 | ||
23400 | --- a/fs/nfs/file.c | ||
23401 | +++ b/fs/nfs/file.c | ||
23402 | @@ -486,8 +486,6 @@ static int nfs_release_page(struct page *page, gfp_t gfp) | ||
23403 | { | ||
23404 | dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page); | ||
23405 | |||
23406 | - if (gfp & __GFP_WAIT) | ||
23407 | - nfs_wb_page(page->mapping->host, page); | ||
23408 | /* If PagePrivate() is set, then the page is not freeable */ | ||
23409 | if (PagePrivate(page)) | ||
23410 | return 0; | ||
23411 | diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c | ||
23412 | index 237874f..fa58800 100644 | ||
23413 | --- a/fs/nfs/fscache.c | ||
23414 | +++ b/fs/nfs/fscache.c | ||
23415 | @@ -354,11 +354,12 @@ void nfs_fscache_reset_inode_cookie(struct inode *inode) | ||
23416 | */ | ||
23417 | int nfs_fscache_release_page(struct page *page, gfp_t gfp) | ||
23418 | { | ||
23419 | - if (PageFsCache(page)) { | ||
23420 | - struct nfs_inode *nfsi = NFS_I(page->mapping->host); | ||
23421 | - struct fscache_cookie *cookie = nfsi->fscache; | ||
23422 | + struct nfs_inode *nfsi = NFS_I(page->mapping->host); | ||
23423 | + struct fscache_cookie *cookie = nfsi->fscache; | ||
23424 | |||
23425 | - BUG_ON(!cookie); | ||
23426 | + BUG_ON(!cookie); | ||
23427 | + | ||
23428 | + if (PageFsCache(page)) { | ||
23429 | dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n", | ||
23430 | cookie, page, nfsi); | ||
23431 | |||
23432 | diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c | ||
23433 | index 59047f8..0adefc4 100644 | ||
23434 | --- a/fs/nfs/mount_clnt.c | ||
23435 | +++ b/fs/nfs/mount_clnt.c | ||
23436 | @@ -120,7 +120,7 @@ static struct { | ||
23437 | { .status = MNT3ERR_INVAL, .errno = -EINVAL, }, | ||
23438 | { .status = MNT3ERR_NAMETOOLONG, .errno = -ENAMETOOLONG, }, | ||
23439 | { .status = MNT3ERR_NOTSUPP, .errno = -ENOTSUPP, }, | ||
23440 | - { .status = MNT3ERR_SERVERFAULT, .errno = -EREMOTEIO, }, | ||
23441 | + { .status = MNT3ERR_SERVERFAULT, .errno = -ESERVERFAULT, }, | ||
23442 | }; | ||
23443 | |||
23444 | struct mountres { | ||
23445 | diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c | ||
23446 | index 7bc2da8..5e078b2 100644 | ||
23447 | --- a/fs/nfs/nfs2xdr.c | ||
23448 | +++ b/fs/nfs/nfs2xdr.c | ||
23449 | @@ -699,7 +699,7 @@ static struct { | ||
23450 | { NFSERR_BAD_COOKIE, -EBADCOOKIE }, | ||
23451 | { NFSERR_NOTSUPP, -ENOTSUPP }, | ||
23452 | { NFSERR_TOOSMALL, -ETOOSMALL }, | ||
23453 | - { NFSERR_SERVERFAULT, -EREMOTEIO }, | ||
23454 | + { NFSERR_SERVERFAULT, -ESERVERFAULT }, | ||
23455 | { NFSERR_BADTYPE, -EBADTYPE }, | ||
23456 | { NFSERR_JUKEBOX, -EJUKEBOX }, | ||
23457 | { -1, -EIO } | ||
23458 | diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h | ||
23459 | index b4a6b1a..6ea07a3 100644 | ||
23460 | --- a/fs/nfs/nfs4_fs.h | ||
23461 | +++ b/fs/nfs/nfs4_fs.h | ||
23462 | @@ -141,7 +141,6 @@ enum { | ||
23463 | NFS_O_RDWR_STATE, /* OPEN stateid has read/write state */ | ||
23464 | NFS_STATE_RECLAIM_REBOOT, /* OPEN stateid server rebooted */ | ||
23465 | NFS_STATE_RECLAIM_NOGRACE, /* OPEN stateid needs to recover state */ | ||
23466 | - NFS_STATE_POSIX_LOCKS, /* Posix locks are supported */ | ||
23467 | }; | ||
23468 | |||
23469 | struct nfs4_state { | ||
23470 | diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c | ||
23471 | index 6c20059..741a562 100644 | ||
23472 | --- a/fs/nfs/nfs4proc.c | ||
23473 | +++ b/fs/nfs/nfs4proc.c | ||
23474 | @@ -1573,8 +1573,6 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, in | ||
23475 | status = PTR_ERR(state); | ||
23476 | if (IS_ERR(state)) | ||
23477 | goto err_opendata_put; | ||
23478 | - if ((opendata->o_res.rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) != 0) | ||
23479 | - set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); | ||
23480 | nfs4_opendata_put(opendata); | ||
23481 | nfs4_put_state_owner(sp); | ||
23482 | *res = state; | ||
23483 | @@ -3978,22 +3976,6 @@ static const struct rpc_call_ops nfs4_lock_ops = { | ||
23484 | .rpc_release = nfs4_lock_release, | ||
23485 | }; | ||
23486 | |||
23487 | -static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) | ||
23488 | -{ | ||
23489 | - struct nfs_client *clp = server->nfs_client; | ||
23490 | - struct nfs4_state *state = lsp->ls_state; | ||
23491 | - | ||
23492 | - switch (error) { | ||
23493 | - case -NFS4ERR_ADMIN_REVOKED: | ||
23494 | - case -NFS4ERR_BAD_STATEID: | ||
23495 | - case -NFS4ERR_EXPIRED: | ||
23496 | - if (new_lock_owner != 0 || | ||
23497 | - (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) | ||
23498 | - nfs4_state_mark_reclaim_nograce(clp, state); | ||
23499 | - lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; | ||
23500 | - }; | ||
23501 | -} | ||
23502 | - | ||
23503 | static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int reclaim) | ||
23504 | { | ||
23505 | struct nfs4_lockdata *data; | ||
23506 | @@ -4029,9 +4011,6 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f | ||
23507 | ret = nfs4_wait_for_completion_rpc_task(task); | ||
23508 | if (ret == 0) { | ||
23509 | ret = data->rpc_status; | ||
23510 | - if (ret) | ||
23511 | - nfs4_handle_setlk_error(data->server, data->lsp, | ||
23512 | - data->arg.new_lock_owner, ret); | ||
23513 | } else | ||
23514 | data->cancelled = 1; | ||
23515 | rpc_put_task(task); | ||
23516 | @@ -4081,11 +4060,8 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock | ||
23517 | { | ||
23518 | struct nfs_inode *nfsi = NFS_I(state->inode); | ||
23519 | unsigned char fl_flags = request->fl_flags; | ||
23520 | - int status = -ENOLCK; | ||
23521 | + int status; | ||
23522 | |||
23523 | - if ((fl_flags & FL_POSIX) && | ||
23524 | - !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) | ||
23525 | - goto out; | ||
23526 | /* Is this a delegated open? */ | ||
23527 | status = nfs4_set_lock_state(state, request); | ||
23528 | if (status != 0) | ||
23529 | diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c | ||
23530 | index a4cd1b7..20b4e30 100644 | ||
23531 | --- a/fs/nfs/nfs4xdr.c | ||
23532 | +++ b/fs/nfs/nfs4xdr.c | ||
23533 | @@ -4554,7 +4554,7 @@ static int decode_sequence(struct xdr_stream *xdr, | ||
23534 | * If the server returns different values for sessionID, slotID or | ||
23535 | * sequence number, the server is looney tunes. | ||
23536 | */ | ||
23537 | - status = -EREMOTEIO; | ||
23538 | + status = -ESERVERFAULT; | ||
23539 | |||
23540 | if (memcmp(id.data, res->sr_session->sess_id.data, | ||
23541 | NFS4_MAX_SESSIONID_LEN)) { | ||
23542 | @@ -5678,7 +5678,7 @@ static struct { | ||
23543 | { NFS4ERR_BAD_COOKIE, -EBADCOOKIE }, | ||
23544 | { NFS4ERR_NOTSUPP, -ENOTSUPP }, | ||
23545 | { NFS4ERR_TOOSMALL, -ETOOSMALL }, | ||
23546 | - { NFS4ERR_SERVERFAULT, -EREMOTEIO }, | ||
23547 | + { NFS4ERR_SERVERFAULT, -ESERVERFAULT }, | ||
23548 | { NFS4ERR_BADTYPE, -EBADTYPE }, | ||
23549 | { NFS4ERR_LOCKED, -EAGAIN }, | ||
23550 | { NFS4ERR_SYMLINK, -ELOOP }, | ||
23551 | @@ -5705,7 +5705,7 @@ nfs4_stat_to_errno(int stat) | ||
23552 | } | ||
23553 | if (stat <= 10000 || stat > 10100) { | ||
23554 | /* The server is looney tunes. */ | ||
23555 | - return -EREMOTEIO; | ||
23556 | + return -ESERVERFAULT; | ||
23557 | } | ||
23558 | /* If we cannot translate the error, the recovery routines should | ||
23559 | * handle it. | ||
23560 | diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c | ||
23561 | index a12c45b..e297593 100644 | ||
23562 | --- a/fs/nfs/pagelist.c | ||
23563 | +++ b/fs/nfs/pagelist.c | ||
23564 | @@ -176,12 +176,6 @@ void nfs_release_request(struct nfs_page *req) | ||
23565 | kref_put(&req->wb_kref, nfs_free_request); | ||
23566 | } | ||
23567 | |||
23568 | -static int nfs_wait_bit_uninterruptible(void *word) | ||
23569 | -{ | ||
23570 | - io_schedule(); | ||
23571 | - return 0; | ||
23572 | -} | ||
23573 | - | ||
23574 | /** | ||
23575 | * nfs_wait_on_request - Wait for a request to complete. | ||
23576 | * @req: request to wait upon. | ||
23577 | @@ -192,9 +186,14 @@ static int nfs_wait_bit_uninterruptible(void *word) | ||
23578 | int | ||
23579 | nfs_wait_on_request(struct nfs_page *req) | ||
23580 | { | ||
23581 | - return wait_on_bit(&req->wb_flags, PG_BUSY, | ||
23582 | - nfs_wait_bit_uninterruptible, | ||
23583 | - TASK_UNINTERRUPTIBLE); | ||
23584 | + int ret = 0; | ||
23585 | + | ||
23586 | + if (!test_bit(PG_BUSY, &req->wb_flags)) | ||
23587 | + goto out; | ||
23588 | + ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY, | ||
23589 | + nfs_wait_bit_killable, TASK_KILLABLE); | ||
23590 | +out: | ||
23591 | + return ret; | ||
23592 | } | ||
23593 | |||
23594 | /** | ||
23595 | diff --git a/fs/nfs/super.c b/fs/nfs/super.c | ||
23596 | index 4bf23f6..90be551 100644 | ||
23597 | --- a/fs/nfs/super.c | ||
23598 | +++ b/fs/nfs/super.c | ||
23599 | @@ -241,7 +241,6 @@ static int nfs_show_stats(struct seq_file *, struct vfsmount *); | ||
23600 | static int nfs_get_sb(struct file_system_type *, int, const char *, void *, struct vfsmount *); | ||
23601 | static int nfs_xdev_get_sb(struct file_system_type *fs_type, | ||
23602 | int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt); | ||
23603 | -static void nfs_put_super(struct super_block *); | ||
23604 | static void nfs_kill_super(struct super_block *); | ||
23605 | static int nfs_remount(struct super_block *sb, int *flags, char *raw_data); | ||
23606 | |||
23607 | @@ -265,7 +264,6 @@ static const struct super_operations nfs_sops = { | ||
23608 | .alloc_inode = nfs_alloc_inode, | ||
23609 | .destroy_inode = nfs_destroy_inode, | ||
23610 | .write_inode = nfs_write_inode, | ||
23611 | - .put_super = nfs_put_super, | ||
23612 | .statfs = nfs_statfs, | ||
23613 | .clear_inode = nfs_clear_inode, | ||
23614 | .umount_begin = nfs_umount_begin, | ||
23615 | @@ -335,7 +333,6 @@ static const struct super_operations nfs4_sops = { | ||
23616 | .alloc_inode = nfs_alloc_inode, | ||
23617 | .destroy_inode = nfs_destroy_inode, | ||
23618 | .write_inode = nfs_write_inode, | ||
23619 | - .put_super = nfs_put_super, | ||
23620 | .statfs = nfs_statfs, | ||
23621 | .clear_inode = nfs4_clear_inode, | ||
23622 | .umount_begin = nfs_umount_begin, | ||
23623 | @@ -737,6 +734,8 @@ static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(unsigned int ve | ||
23624 | |||
23625 | data = kzalloc(sizeof(*data), GFP_KERNEL); | ||
23626 | if (data) { | ||
23627 | + data->rsize = NFS_MAX_FILE_IO_SIZE; | ||
23628 | + data->wsize = NFS_MAX_FILE_IO_SIZE; | ||
23629 | data->acregmin = NFS_DEF_ACREGMIN; | ||
23630 | data->acregmax = NFS_DEF_ACREGMAX; | ||
23631 | data->acdirmin = NFS_DEF_ACDIRMIN; | ||
23632 | @@ -2199,17 +2198,6 @@ error_splat_super: | ||
23633 | } | ||
23634 | |||
23635 | /* | ||
23636 | - * Ensure that we unregister the bdi before kill_anon_super | ||
23637 | - * releases the device name | ||
23638 | - */ | ||
23639 | -static void nfs_put_super(struct super_block *s) | ||
23640 | -{ | ||
23641 | - struct nfs_server *server = NFS_SB(s); | ||
23642 | - | ||
23643 | - bdi_unregister(&server->backing_dev_info); | ||
23644 | -} | ||
23645 | - | ||
23646 | -/* | ||
23647 | * Destroy an NFS2/3 superblock | ||
23648 | */ | ||
23649 | static void nfs_kill_super(struct super_block *s) | ||
23650 | @@ -2217,6 +2205,7 @@ static void nfs_kill_super(struct super_block *s) | ||
23651 | struct nfs_server *server = NFS_SB(s); | ||
23652 | |||
23653 | kill_anon_super(s); | ||
23654 | + bdi_unregister(&server->backing_dev_info); | ||
23655 | nfs_fscache_release_super_cookie(s); | ||
23656 | nfs_free_server(server); | ||
23657 | } | ||
23658 | diff --git a/fs/nfs/write.c b/fs/nfs/write.c | ||
23659 | index cf6c06f..53eb26c 100644 | ||
23660 | --- a/fs/nfs/write.c | ||
23661 | +++ b/fs/nfs/write.c | ||
23662 | @@ -1542,7 +1542,6 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page) | ||
23663 | break; | ||
23664 | } | ||
23665 | ret = nfs_wait_on_request(req); | ||
23666 | - nfs_release_request(req); | ||
23667 | if (ret < 0) | ||
23668 | goto out; | ||
23669 | } | ||
23670 | @@ -1613,16 +1612,15 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage, | ||
23671 | if (ret) | ||
23672 | goto out_unlock; | ||
23673 | page_cache_get(newpage); | ||
23674 | - spin_lock(&mapping->host->i_lock); | ||
23675 | req->wb_page = newpage; | ||
23676 | SetPagePrivate(newpage); | ||
23677 | - set_page_private(newpage, (unsigned long)req); | ||
23678 | + set_page_private(newpage, page_private(page)); | ||
23679 | ClearPagePrivate(page); | ||
23680 | set_page_private(page, 0); | ||
23681 | - spin_unlock(&mapping->host->i_lock); | ||
23682 | page_cache_release(page); | ||
23683 | out_unlock: | ||
23684 | nfs_clear_page_tag_locked(req); | ||
23685 | + nfs_release_request(req); | ||
23686 | out: | ||
23687 | return ret; | ||
23688 | } | ||
23689 | diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c | ||
23690 | index 6d9c6aa..725d02f 100644 | ||
23691 | --- a/fs/nfsd/nfs4acl.c | ||
23692 | +++ b/fs/nfsd/nfs4acl.c | ||
23693 | @@ -389,7 +389,7 @@ sort_pacl(struct posix_acl *pacl) | ||
23694 | sort_pacl_range(pacl, 1, i-1); | ||
23695 | |||
23696 | BUG_ON(pacl->a_entries[i].e_tag != ACL_GROUP_OBJ); | ||
23697 | - j = ++i; | ||
23698 | + j = i++; | ||
23699 | while (pacl->a_entries[j].e_tag == ACL_GROUP) | ||
23700 | j++; | ||
23701 | sort_pacl_range(pacl, i, j-1); | ||
23702 | diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c | ||
23703 | index 570dd1c..a293f02 100644 | ||
23704 | --- a/fs/nfsd/vfs.c | ||
23705 | +++ b/fs/nfsd/vfs.c | ||
23706 | @@ -774,9 +774,12 @@ static inline int nfsd_dosync(struct file *filp, struct dentry *dp, | ||
23707 | int (*fsync) (struct file *, struct dentry *, int); | ||
23708 | int err; | ||
23709 | |||
23710 | - err = filemap_write_and_wait(inode->i_mapping); | ||
23711 | + err = filemap_fdatawrite(inode->i_mapping); | ||
23712 | if (err == 0 && fop && (fsync = fop->fsync)) | ||
23713 | err = fsync(filp, dp, 0); | ||
23714 | + if (err == 0) | ||
23715 | + err = filemap_fdatawait(inode->i_mapping); | ||
23716 | + | ||
23717 | return err; | ||
23718 | } | ||
23719 | |||
23720 | diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c | ||
23721 | index 1afb0a1..c9ee67b 100644 | ||
23722 | --- a/fs/notify/inotify/inotify_fsnotify.c | ||
23723 | +++ b/fs/notify/inotify/inotify_fsnotify.c | ||
23724 | @@ -121,7 +121,7 @@ static int idr_callback(int id, void *p, void *data) | ||
23725 | if (warned) | ||
23726 | return 0; | ||
23727 | |||
23728 | - warned = true; | ||
23729 | + warned = false; | ||
23730 | entry = p; | ||
23731 | ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry); | ||
23732 | |||
23733 | diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c | ||
23734 | index ca44337..dcd2040 100644 | ||
23735 | --- a/fs/notify/inotify/inotify_user.c | ||
23736 | +++ b/fs/notify/inotify/inotify_user.c | ||
23737 | @@ -558,7 +558,7 @@ retry: | ||
23738 | |||
23739 | spin_lock(&group->inotify_data.idr_lock); | ||
23740 | ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry, | ||
23741 | - group->inotify_data.last_wd+1, | ||
23742 | + group->inotify_data.last_wd, | ||
23743 | &tmp_ientry->wd); | ||
23744 | spin_unlock(&group->inotify_data.idr_lock); | ||
23745 | if (ret) { | ||
23746 | @@ -638,7 +638,7 @@ static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsign | ||
23747 | |||
23748 | spin_lock_init(&group->inotify_data.idr_lock); | ||
23749 | idr_init(&group->inotify_data.idr); | ||
23750 | - group->inotify_data.last_wd = 0; | ||
23751 | + group->inotify_data.last_wd = 1; | ||
23752 | group->inotify_data.user = user; | ||
23753 | group->inotify_data.fa = NULL; | ||
23754 | |||
23755 | diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c | ||
23756 | index 49cfd5f..038a602 100644 | ||
23757 | --- a/fs/partitions/efi.c | ||
23758 | +++ b/fs/partitions/efi.c | ||
23759 | @@ -1,9 +1,7 @@ | ||
23760 | /************************************************************ | ||
23761 | * EFI GUID Partition Table handling | ||
23762 | - * | ||
23763 | - * http://www.uefi.org/specs/ | ||
23764 | - * http://www.intel.com/technology/efi/ | ||
23765 | - * | ||
23766 | + * Per Intel EFI Specification v1.02 | ||
23767 | + * http://developer.intel.com/technology/efi/efi.htm | ||
23768 | * efi.[ch] by Matt Domsch <Matt_Domsch@dell.com> | ||
23769 | * Copyright 2000,2001,2002,2004 Dell Inc. | ||
23770 | * | ||
23771 | @@ -94,7 +92,6 @@ | ||
23772 | * | ||
23773 | ************************************************************/ | ||
23774 | #include <linux/crc32.h> | ||
23775 | -#include <linux/math64.h> | ||
23776 | #include "check.h" | ||
23777 | #include "efi.h" | ||
23778 | |||
23779 | @@ -144,8 +141,7 @@ last_lba(struct block_device *bdev) | ||
23780 | { | ||
23781 | if (!bdev || !bdev->bd_inode) | ||
23782 | return 0; | ||
23783 | - return div_u64(bdev->bd_inode->i_size, | ||
23784 | - bdev_logical_block_size(bdev)) - 1ULL; | ||
23785 | + return (bdev->bd_inode->i_size >> 9) - 1ULL; | ||
23786 | } | ||
23787 | |||
23788 | static inline int | ||
23789 | @@ -192,7 +188,6 @@ static size_t | ||
23790 | read_lba(struct block_device *bdev, u64 lba, u8 * buffer, size_t count) | ||
23791 | { | ||
23792 | size_t totalreadcount = 0; | ||
23793 | - sector_t n = lba * (bdev_logical_block_size(bdev) / 512); | ||
23794 | |||
23795 | if (!bdev || !buffer || lba > last_lba(bdev)) | ||
23796 | return 0; | ||
23797 | @@ -200,7 +195,7 @@ read_lba(struct block_device *bdev, u64 lba, u8 * buffer, size_t count) | ||
23798 | while (count) { | ||
23799 | int copied = 512; | ||
23800 | Sector sect; | ||
23801 | - unsigned char *data = read_dev_sector(bdev, n++, §); | ||
23802 | + unsigned char *data = read_dev_sector(bdev, lba++, §); | ||
23803 | if (!data) | ||
23804 | break; | ||
23805 | if (copied > count) | ||
23806 | @@ -262,16 +257,15 @@ static gpt_header * | ||
23807 | alloc_read_gpt_header(struct block_device *bdev, u64 lba) | ||
23808 | { | ||
23809 | gpt_header *gpt; | ||
23810 | - unsigned ssz = bdev_logical_block_size(bdev); | ||
23811 | - | ||
23812 | if (!bdev) | ||
23813 | return NULL; | ||
23814 | |||
23815 | - gpt = kzalloc(ssz, GFP_KERNEL); | ||
23816 | + gpt = kzalloc(sizeof (gpt_header), GFP_KERNEL); | ||
23817 | if (!gpt) | ||
23818 | return NULL; | ||
23819 | |||
23820 | - if (read_lba(bdev, lba, (u8 *) gpt, ssz) < ssz) { | ||
23821 | + if (read_lba(bdev, lba, (u8 *) gpt, | ||
23822 | + sizeof (gpt_header)) < sizeof (gpt_header)) { | ||
23823 | kfree(gpt); | ||
23824 | gpt=NULL; | ||
23825 | return NULL; | ||
23826 | @@ -607,7 +601,6 @@ efi_partition(struct parsed_partitions *state, struct block_device *bdev) | ||
23827 | gpt_header *gpt = NULL; | ||
23828 | gpt_entry *ptes = NULL; | ||
23829 | u32 i; | ||
23830 | - unsigned ssz = bdev_logical_block_size(bdev) / 512; | ||
23831 | |||
23832 | if (!find_valid_gpt(bdev, &gpt, &ptes) || !gpt || !ptes) { | ||
23833 | kfree(gpt); | ||
23834 | @@ -618,14 +611,13 @@ efi_partition(struct parsed_partitions *state, struct block_device *bdev) | ||
23835 | pr_debug("GUID Partition Table is valid! Yea!\n"); | ||
23836 | |||
23837 | for (i = 0; i < le32_to_cpu(gpt->num_partition_entries) && i < state->limit-1; i++) { | ||
23838 | - u64 start = le64_to_cpu(ptes[i].starting_lba); | ||
23839 | - u64 size = le64_to_cpu(ptes[i].ending_lba) - | ||
23840 | - le64_to_cpu(ptes[i].starting_lba) + 1ULL; | ||
23841 | - | ||
23842 | if (!is_pte_valid(&ptes[i], last_lba(bdev))) | ||
23843 | continue; | ||
23844 | |||
23845 | - put_partition(state, i+1, start * ssz, size * ssz); | ||
23846 | + put_partition(state, i+1, le64_to_cpu(ptes[i].starting_lba), | ||
23847 | + (le64_to_cpu(ptes[i].ending_lba) - | ||
23848 | + le64_to_cpu(ptes[i].starting_lba) + | ||
23849 | + 1ULL)); | ||
23850 | |||
23851 | /* If this is a RAID volume, tell md */ | ||
23852 | if (!efi_guidcmp(ptes[i].partition_type_guid, | ||
23853 | diff --git a/fs/partitions/efi.h b/fs/partitions/efi.h | ||
23854 | index 6998b58..2cc89d0 100644 | ||
23855 | --- a/fs/partitions/efi.h | ||
23856 | +++ b/fs/partitions/efi.h | ||
23857 | @@ -37,6 +37,7 @@ | ||
23858 | #define EFI_PMBR_OSTYPE_EFI 0xEF | ||
23859 | #define EFI_PMBR_OSTYPE_EFI_GPT 0xEE | ||
23860 | |||
23861 | +#define GPT_BLOCK_SIZE 512 | ||
23862 | #define GPT_HEADER_SIGNATURE 0x5452415020494645ULL | ||
23863 | #define GPT_HEADER_REVISION_V1 0x00010000 | ||
23864 | #define GPT_PRIMARY_PARTITION_TABLE_LBA 1 | ||
23865 | @@ -78,12 +79,7 @@ typedef struct _gpt_header { | ||
23866 | __le32 num_partition_entries; | ||
23867 | __le32 sizeof_partition_entry; | ||
23868 | __le32 partition_entry_array_crc32; | ||
23869 | - | ||
23870 | - /* The rest of the logical block is reserved by UEFI and must be zero. | ||
23871 | - * EFI standard handles this by: | ||
23872 | - * | ||
23873 | - * uint8_t reserved2[ BlockSize - 92 ]; | ||
23874 | - */ | ||
23875 | + u8 reserved2[GPT_BLOCK_SIZE - 92]; | ||
23876 | } __attribute__ ((packed)) gpt_header; | ||
23877 | |||
23878 | typedef struct _gpt_entry_attributes { | ||
23879 | diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c | ||
23880 | index 2534987..39b49c4 100644 | ||
23881 | --- a/fs/quota/dquot.c | ||
23882 | +++ b/fs/quota/dquot.c | ||
23883 | @@ -1388,70 +1388,6 @@ void vfs_dq_drop(struct inode *inode) | ||
23884 | EXPORT_SYMBOL(vfs_dq_drop); | ||
23885 | |||
23886 | /* | ||
23887 | - * inode_reserved_space is managed internally by quota, and protected by | ||
23888 | - * i_lock similar to i_blocks+i_bytes. | ||
23889 | - */ | ||
23890 | -static qsize_t *inode_reserved_space(struct inode * inode) | ||
23891 | -{ | ||
23892 | - /* Filesystem must explicitly define it's own method in order to use | ||
23893 | - * quota reservation interface */ | ||
23894 | - BUG_ON(!inode->i_sb->dq_op->get_reserved_space); | ||
23895 | - return inode->i_sb->dq_op->get_reserved_space(inode); | ||
23896 | -} | ||
23897 | - | ||
23898 | -static void inode_add_rsv_space(struct inode *inode, qsize_t number) | ||
23899 | -{ | ||
23900 | - spin_lock(&inode->i_lock); | ||
23901 | - *inode_reserved_space(inode) += number; | ||
23902 | - spin_unlock(&inode->i_lock); | ||
23903 | -} | ||
23904 | - | ||
23905 | - | ||
23906 | -static void inode_claim_rsv_space(struct inode *inode, qsize_t number) | ||
23907 | -{ | ||
23908 | - spin_lock(&inode->i_lock); | ||
23909 | - *inode_reserved_space(inode) -= number; | ||
23910 | - __inode_add_bytes(inode, number); | ||
23911 | - spin_unlock(&inode->i_lock); | ||
23912 | -} | ||
23913 | - | ||
23914 | -static void inode_sub_rsv_space(struct inode *inode, qsize_t number) | ||
23915 | -{ | ||
23916 | - spin_lock(&inode->i_lock); | ||
23917 | - *inode_reserved_space(inode) -= number; | ||
23918 | - spin_unlock(&inode->i_lock); | ||
23919 | -} | ||
23920 | - | ||
23921 | -static qsize_t inode_get_rsv_space(struct inode *inode) | ||
23922 | -{ | ||
23923 | - qsize_t ret; | ||
23924 | - | ||
23925 | - if (!inode->i_sb->dq_op->get_reserved_space) | ||
23926 | - return 0; | ||
23927 | - spin_lock(&inode->i_lock); | ||
23928 | - ret = *inode_reserved_space(inode); | ||
23929 | - spin_unlock(&inode->i_lock); | ||
23930 | - return ret; | ||
23931 | -} | ||
23932 | - | ||
23933 | -static void inode_incr_space(struct inode *inode, qsize_t number, | ||
23934 | - int reserve) | ||
23935 | -{ | ||
23936 | - if (reserve) | ||
23937 | - inode_add_rsv_space(inode, number); | ||
23938 | - else | ||
23939 | - inode_add_bytes(inode, number); | ||
23940 | -} | ||
23941 | - | ||
23942 | -static void inode_decr_space(struct inode *inode, qsize_t number, int reserve) | ||
23943 | -{ | ||
23944 | - if (reserve) | ||
23945 | - inode_sub_rsv_space(inode, number); | ||
23946 | - else | ||
23947 | - inode_sub_bytes(inode, number); | ||
23948 | -} | ||
23949 | - | ||
23950 | -/* | ||
23951 | * Following four functions update i_blocks+i_bytes fields and | ||
23952 | * quota information (together with appropriate checks) | ||
23953 | * NOTE: We absolutely rely on the fact that caller dirties | ||
23954 | @@ -1469,21 +1405,6 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, | ||
23955 | int cnt, ret = QUOTA_OK; | ||
23956 | char warntype[MAXQUOTAS]; | ||
23957 | |||
23958 | - /* | ||
23959 | - * First test before acquiring mutex - solves deadlocks when we | ||
23960 | - * re-enter the quota code and are already holding the mutex | ||
23961 | - */ | ||
23962 | - if (IS_NOQUOTA(inode)) { | ||
23963 | - inode_incr_space(inode, number, reserve); | ||
23964 | - goto out; | ||
23965 | - } | ||
23966 | - | ||
23967 | - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
23968 | - if (IS_NOQUOTA(inode)) { | ||
23969 | - inode_incr_space(inode, number, reserve); | ||
23970 | - goto out_unlock; | ||
23971 | - } | ||
23972 | - | ||
23973 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
23974 | warntype[cnt] = QUOTA_NL_NOWARN; | ||
23975 | |||
23976 | @@ -1494,8 +1415,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, | ||
23977 | if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) | ||
23978 | == NO_QUOTA) { | ||
23979 | ret = NO_QUOTA; | ||
23980 | - spin_unlock(&dq_data_lock); | ||
23981 | - goto out_flush_warn; | ||
23982 | + goto out_unlock; | ||
23983 | } | ||
23984 | } | ||
23985 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
23986 | @@ -1506,32 +1426,64 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, | ||
23987 | else | ||
23988 | dquot_incr_space(inode->i_dquot[cnt], number); | ||
23989 | } | ||
23990 | - inode_incr_space(inode, number, reserve); | ||
23991 | + if (!reserve) | ||
23992 | + inode_add_bytes(inode, number); | ||
23993 | +out_unlock: | ||
23994 | spin_unlock(&dq_data_lock); | ||
23995 | + flush_warnings(inode->i_dquot, warntype); | ||
23996 | + return ret; | ||
23997 | +} | ||
23998 | + | ||
23999 | +int dquot_alloc_space(struct inode *inode, qsize_t number, int warn) | ||
24000 | +{ | ||
24001 | + int cnt, ret = QUOTA_OK; | ||
24002 | + | ||
24003 | + /* | ||
24004 | + * First test before acquiring mutex - solves deadlocks when we | ||
24005 | + * re-enter the quota code and are already holding the mutex | ||
24006 | + */ | ||
24007 | + if (IS_NOQUOTA(inode)) { | ||
24008 | + inode_add_bytes(inode, number); | ||
24009 | + goto out; | ||
24010 | + } | ||
24011 | + | ||
24012 | + down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
24013 | + if (IS_NOQUOTA(inode)) { | ||
24014 | + inode_add_bytes(inode, number); | ||
24015 | + goto out_unlock; | ||
24016 | + } | ||
24017 | + | ||
24018 | + ret = __dquot_alloc_space(inode, number, warn, 0); | ||
24019 | + if (ret == NO_QUOTA) | ||
24020 | + goto out_unlock; | ||
24021 | |||
24022 | - if (reserve) | ||
24023 | - goto out_flush_warn; | ||
24024 | /* Dirtify all the dquots - this can block when journalling */ | ||
24025 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
24026 | if (inode->i_dquot[cnt]) | ||
24027 | mark_dquot_dirty(inode->i_dquot[cnt]); | ||
24028 | -out_flush_warn: | ||
24029 | - flush_warnings(inode->i_dquot, warntype); | ||
24030 | out_unlock: | ||
24031 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
24032 | out: | ||
24033 | return ret; | ||
24034 | } | ||
24035 | - | ||
24036 | -int dquot_alloc_space(struct inode *inode, qsize_t number, int warn) | ||
24037 | -{ | ||
24038 | - return __dquot_alloc_space(inode, number, warn, 0); | ||
24039 | -} | ||
24040 | EXPORT_SYMBOL(dquot_alloc_space); | ||
24041 | |||
24042 | int dquot_reserve_space(struct inode *inode, qsize_t number, int warn) | ||
24043 | { | ||
24044 | - return __dquot_alloc_space(inode, number, warn, 1); | ||
24045 | + int ret = QUOTA_OK; | ||
24046 | + | ||
24047 | + if (IS_NOQUOTA(inode)) | ||
24048 | + goto out; | ||
24049 | + | ||
24050 | + down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
24051 | + if (IS_NOQUOTA(inode)) | ||
24052 | + goto out_unlock; | ||
24053 | + | ||
24054 | + ret = __dquot_alloc_space(inode, number, warn, 1); | ||
24055 | +out_unlock: | ||
24056 | + up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
24057 | +out: | ||
24058 | + return ret; | ||
24059 | } | ||
24060 | EXPORT_SYMBOL(dquot_reserve_space); | ||
24061 | |||
24062 | @@ -1588,14 +1540,14 @@ int dquot_claim_space(struct inode *inode, qsize_t number) | ||
24063 | int ret = QUOTA_OK; | ||
24064 | |||
24065 | if (IS_NOQUOTA(inode)) { | ||
24066 | - inode_claim_rsv_space(inode, number); | ||
24067 | + inode_add_bytes(inode, number); | ||
24068 | goto out; | ||
24069 | } | ||
24070 | |||
24071 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
24072 | if (IS_NOQUOTA(inode)) { | ||
24073 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
24074 | - inode_claim_rsv_space(inode, number); | ||
24075 | + inode_add_bytes(inode, number); | ||
24076 | goto out; | ||
24077 | } | ||
24078 | |||
24079 | @@ -1607,7 +1559,7 @@ int dquot_claim_space(struct inode *inode, qsize_t number) | ||
24080 | number); | ||
24081 | } | ||
24082 | /* Update inode bytes */ | ||
24083 | - inode_claim_rsv_space(inode, number); | ||
24084 | + inode_add_bytes(inode, number); | ||
24085 | spin_unlock(&dq_data_lock); | ||
24086 | /* Dirtify all the dquots - this can block when journalling */ | ||
24087 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
24088 | @@ -1620,9 +1572,38 @@ out: | ||
24089 | EXPORT_SYMBOL(dquot_claim_space); | ||
24090 | |||
24091 | /* | ||
24092 | + * Release reserved quota space | ||
24093 | + */ | ||
24094 | +void dquot_release_reserved_space(struct inode *inode, qsize_t number) | ||
24095 | +{ | ||
24096 | + int cnt; | ||
24097 | + | ||
24098 | + if (IS_NOQUOTA(inode)) | ||
24099 | + goto out; | ||
24100 | + | ||
24101 | + down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
24102 | + if (IS_NOQUOTA(inode)) | ||
24103 | + goto out_unlock; | ||
24104 | + | ||
24105 | + spin_lock(&dq_data_lock); | ||
24106 | + /* Release reserved dquots */ | ||
24107 | + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
24108 | + if (inode->i_dquot[cnt]) | ||
24109 | + dquot_free_reserved_space(inode->i_dquot[cnt], number); | ||
24110 | + } | ||
24111 | + spin_unlock(&dq_data_lock); | ||
24112 | + | ||
24113 | +out_unlock: | ||
24114 | + up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
24115 | +out: | ||
24116 | + return; | ||
24117 | +} | ||
24118 | +EXPORT_SYMBOL(dquot_release_reserved_space); | ||
24119 | + | ||
24120 | +/* | ||
24121 | * This operation can block, but only after everything is updated | ||
24122 | */ | ||
24123 | -int __dquot_free_space(struct inode *inode, qsize_t number, int reserve) | ||
24124 | +int dquot_free_space(struct inode *inode, qsize_t number) | ||
24125 | { | ||
24126 | unsigned int cnt; | ||
24127 | char warntype[MAXQUOTAS]; | ||
24128 | @@ -1631,7 +1612,7 @@ int __dquot_free_space(struct inode *inode, qsize_t number, int reserve) | ||
24129 | * re-enter the quota code and are already holding the mutex */ | ||
24130 | if (IS_NOQUOTA(inode)) { | ||
24131 | out_sub: | ||
24132 | - inode_decr_space(inode, number, reserve); | ||
24133 | + inode_sub_bytes(inode, number); | ||
24134 | return QUOTA_OK; | ||
24135 | } | ||
24136 | |||
24137 | @@ -1646,43 +1627,21 @@ out_sub: | ||
24138 | if (!inode->i_dquot[cnt]) | ||
24139 | continue; | ||
24140 | warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number); | ||
24141 | - if (reserve) | ||
24142 | - dquot_free_reserved_space(inode->i_dquot[cnt], number); | ||
24143 | - else | ||
24144 | - dquot_decr_space(inode->i_dquot[cnt], number); | ||
24145 | + dquot_decr_space(inode->i_dquot[cnt], number); | ||
24146 | } | ||
24147 | - inode_decr_space(inode, number, reserve); | ||
24148 | + inode_sub_bytes(inode, number); | ||
24149 | spin_unlock(&dq_data_lock); | ||
24150 | - | ||
24151 | - if (reserve) | ||
24152 | - goto out_unlock; | ||
24153 | /* Dirtify all the dquots - this can block when journalling */ | ||
24154 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
24155 | if (inode->i_dquot[cnt]) | ||
24156 | mark_dquot_dirty(inode->i_dquot[cnt]); | ||
24157 | -out_unlock: | ||
24158 | flush_warnings(inode->i_dquot, warntype); | ||
24159 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
24160 | return QUOTA_OK; | ||
24161 | } | ||
24162 | - | ||
24163 | -int dquot_free_space(struct inode *inode, qsize_t number) | ||
24164 | -{ | ||
24165 | - return __dquot_free_space(inode, number, 0); | ||
24166 | -} | ||
24167 | EXPORT_SYMBOL(dquot_free_space); | ||
24168 | |||
24169 | /* | ||
24170 | - * Release reserved quota space | ||
24171 | - */ | ||
24172 | -void dquot_release_reserved_space(struct inode *inode, qsize_t number) | ||
24173 | -{ | ||
24174 | - __dquot_free_space(inode, number, 1); | ||
24175 | - | ||
24176 | -} | ||
24177 | -EXPORT_SYMBOL(dquot_release_reserved_space); | ||
24178 | - | ||
24179 | -/* | ||
24180 | * This operation can block, but only after everything is updated | ||
24181 | */ | ||
24182 | int dquot_free_inode(const struct inode *inode, qsize_t number) | ||
24183 | @@ -1720,6 +1679,19 @@ int dquot_free_inode(const struct inode *inode, qsize_t number) | ||
24184 | EXPORT_SYMBOL(dquot_free_inode); | ||
24185 | |||
24186 | /* | ||
24187 | + * call back function, get reserved quota space from underlying fs | ||
24188 | + */ | ||
24189 | +qsize_t dquot_get_reserved_space(struct inode *inode) | ||
24190 | +{ | ||
24191 | + qsize_t reserved_space = 0; | ||
24192 | + | ||
24193 | + if (sb_any_quota_active(inode->i_sb) && | ||
24194 | + inode->i_sb->dq_op->get_reserved_space) | ||
24195 | + reserved_space = inode->i_sb->dq_op->get_reserved_space(inode); | ||
24196 | + return reserved_space; | ||
24197 | +} | ||
24198 | + | ||
24199 | +/* | ||
24200 | * Transfer the number of inode and blocks from one diskquota to an other. | ||
24201 | * | ||
24202 | * This operation can block, but only after everything is updated | ||
24203 | @@ -1762,7 +1734,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr) | ||
24204 | } | ||
24205 | spin_lock(&dq_data_lock); | ||
24206 | cur_space = inode_get_bytes(inode); | ||
24207 | - rsv_space = inode_get_rsv_space(inode); | ||
24208 | + rsv_space = dquot_get_reserved_space(inode); | ||
24209 | space = cur_space + rsv_space; | ||
24210 | /* Build the transfer_from list and check the limits */ | ||
24211 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
24212 | diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c | ||
24213 | index d240c15..a14d6cd 100644 | ||
24214 | --- a/fs/reiserfs/inode.c | ||
24215 | +++ b/fs/reiserfs/inode.c | ||
24216 | @@ -2531,12 +2531,6 @@ static int reiserfs_writepage(struct page *page, struct writeback_control *wbc) | ||
24217 | return reiserfs_write_full_page(page, wbc); | ||
24218 | } | ||
24219 | |||
24220 | -static void reiserfs_truncate_failed_write(struct inode *inode) | ||
24221 | -{ | ||
24222 | - truncate_inode_pages(inode->i_mapping, inode->i_size); | ||
24223 | - reiserfs_truncate_file(inode, 0); | ||
24224 | -} | ||
24225 | - | ||
24226 | static int reiserfs_write_begin(struct file *file, | ||
24227 | struct address_space *mapping, | ||
24228 | loff_t pos, unsigned len, unsigned flags, | ||
24229 | @@ -2603,8 +2597,6 @@ static int reiserfs_write_begin(struct file *file, | ||
24230 | if (ret) { | ||
24231 | unlock_page(page); | ||
24232 | page_cache_release(page); | ||
24233 | - /* Truncate allocated blocks */ | ||
24234 | - reiserfs_truncate_failed_write(inode); | ||
24235 | } | ||
24236 | return ret; | ||
24237 | } | ||
24238 | @@ -2697,7 +2689,8 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping, | ||
24239 | ** transaction tracking stuff when the size changes. So, we have | ||
24240 | ** to do the i_size updates here. | ||
24241 | */ | ||
24242 | - if (pos + copied > inode->i_size) { | ||
24243 | + pos += copied; | ||
24244 | + if (pos > inode->i_size) { | ||
24245 | struct reiserfs_transaction_handle myth; | ||
24246 | reiserfs_write_lock(inode->i_sb); | ||
24247 | /* If the file have grown beyond the border where it | ||
24248 | @@ -2715,7 +2708,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping, | ||
24249 | goto journal_error; | ||
24250 | } | ||
24251 | reiserfs_update_inode_transaction(inode); | ||
24252 | - inode->i_size = pos + copied; | ||
24253 | + inode->i_size = pos; | ||
24254 | /* | ||
24255 | * this will just nest into our transaction. It's important | ||
24256 | * to use mark_inode_dirty so the inode gets pushed around on the | ||
24257 | @@ -2742,10 +2735,6 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping, | ||
24258 | out: | ||
24259 | unlock_page(page); | ||
24260 | page_cache_release(page); | ||
24261 | - | ||
24262 | - if (pos + len > inode->i_size) | ||
24263 | - reiserfs_truncate_failed_write(inode); | ||
24264 | - | ||
24265 | return ret == 0 ? copied : ret; | ||
24266 | |||
24267 | journal_error: | ||
24268 | diff --git a/fs/romfs/super.c b/fs/romfs/super.c | ||
24269 | index 42d2135..c117fa8 100644 | ||
24270 | --- a/fs/romfs/super.c | ||
24271 | +++ b/fs/romfs/super.c | ||
24272 | @@ -544,7 +544,6 @@ error: | ||
24273 | error_rsb_inval: | ||
24274 | ret = -EINVAL; | ||
24275 | error_rsb: | ||
24276 | - kfree(rsb); | ||
24277 | return ret; | ||
24278 | } | ||
24279 | |||
24280 | diff --git a/fs/stat.c b/fs/stat.c | ||
24281 | index c4ecd52..075694e 100644 | ||
24282 | --- a/fs/stat.c | ||
24283 | +++ b/fs/stat.c | ||
24284 | @@ -401,9 +401,9 @@ SYSCALL_DEFINE4(fstatat64, int, dfd, char __user *, filename, | ||
24285 | } | ||
24286 | #endif /* __ARCH_WANT_STAT64 */ | ||
24287 | |||
24288 | -/* Caller is here responsible for sufficient locking (ie. inode->i_lock) */ | ||
24289 | -void __inode_add_bytes(struct inode *inode, loff_t bytes) | ||
24290 | +void inode_add_bytes(struct inode *inode, loff_t bytes) | ||
24291 | { | ||
24292 | + spin_lock(&inode->i_lock); | ||
24293 | inode->i_blocks += bytes >> 9; | ||
24294 | bytes &= 511; | ||
24295 | inode->i_bytes += bytes; | ||
24296 | @@ -411,12 +411,6 @@ void __inode_add_bytes(struct inode *inode, loff_t bytes) | ||
24297 | inode->i_blocks++; | ||
24298 | inode->i_bytes -= 512; | ||
24299 | } | ||
24300 | -} | ||
24301 | - | ||
24302 | -void inode_add_bytes(struct inode *inode, loff_t bytes) | ||
24303 | -{ | ||
24304 | - spin_lock(&inode->i_lock); | ||
24305 | - __inode_add_bytes(inode, bytes); | ||
24306 | spin_unlock(&inode->i_lock); | ||
24307 | } | ||
24308 | |||
24309 | diff --git a/fs/super.c b/fs/super.c | ||
24310 | index aff046b..19eb70b 100644 | ||
24311 | --- a/fs/super.c | ||
24312 | +++ b/fs/super.c | ||
24313 | @@ -901,9 +901,8 @@ int get_sb_single(struct file_system_type *fs_type, | ||
24314 | return error; | ||
24315 | } | ||
24316 | s->s_flags |= MS_ACTIVE; | ||
24317 | - } else { | ||
24318 | - do_remount_sb(s, flags, data, 0); | ||
24319 | } | ||
24320 | + do_remount_sb(s, flags, data, 0); | ||
24321 | simple_set_mnt(mnt, s); | ||
24322 | return 0; | ||
24323 | } | ||
24324 | diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c | ||
24325 | index 02a022a..e28cecf 100644 | ||
24326 | --- a/fs/sysfs/inode.c | ||
24327 | +++ b/fs/sysfs/inode.c | ||
24328 | @@ -94,29 +94,30 @@ int sysfs_setattr(struct dentry * dentry, struct iattr * iattr) | ||
24329 | if (!sd_attrs) | ||
24330 | return -ENOMEM; | ||
24331 | sd->s_iattr = sd_attrs; | ||
24332 | - } | ||
24333 | - /* attributes were changed at least once in past */ | ||
24334 | - iattrs = &sd_attrs->ia_iattr; | ||
24335 | - | ||
24336 | - if (ia_valid & ATTR_UID) | ||
24337 | - iattrs->ia_uid = iattr->ia_uid; | ||
24338 | - if (ia_valid & ATTR_GID) | ||
24339 | - iattrs->ia_gid = iattr->ia_gid; | ||
24340 | - if (ia_valid & ATTR_ATIME) | ||
24341 | - iattrs->ia_atime = timespec_trunc(iattr->ia_atime, | ||
24342 | - inode->i_sb->s_time_gran); | ||
24343 | - if (ia_valid & ATTR_MTIME) | ||
24344 | - iattrs->ia_mtime = timespec_trunc(iattr->ia_mtime, | ||
24345 | - inode->i_sb->s_time_gran); | ||
24346 | - if (ia_valid & ATTR_CTIME) | ||
24347 | - iattrs->ia_ctime = timespec_trunc(iattr->ia_ctime, | ||
24348 | - inode->i_sb->s_time_gran); | ||
24349 | - if (ia_valid & ATTR_MODE) { | ||
24350 | - umode_t mode = iattr->ia_mode; | ||
24351 | - | ||
24352 | - if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) | ||
24353 | - mode &= ~S_ISGID; | ||
24354 | - iattrs->ia_mode = sd->s_mode = mode; | ||
24355 | + } else { | ||
24356 | + /* attributes were changed at least once in past */ | ||
24357 | + iattrs = &sd_attrs->ia_iattr; | ||
24358 | + | ||
24359 | + if (ia_valid & ATTR_UID) | ||
24360 | + iattrs->ia_uid = iattr->ia_uid; | ||
24361 | + if (ia_valid & ATTR_GID) | ||
24362 | + iattrs->ia_gid = iattr->ia_gid; | ||
24363 | + if (ia_valid & ATTR_ATIME) | ||
24364 | + iattrs->ia_atime = timespec_trunc(iattr->ia_atime, | ||
24365 | + inode->i_sb->s_time_gran); | ||
24366 | + if (ia_valid & ATTR_MTIME) | ||
24367 | + iattrs->ia_mtime = timespec_trunc(iattr->ia_mtime, | ||
24368 | + inode->i_sb->s_time_gran); | ||
24369 | + if (ia_valid & ATTR_CTIME) | ||
24370 | + iattrs->ia_ctime = timespec_trunc(iattr->ia_ctime, | ||
24371 | + inode->i_sb->s_time_gran); | ||
24372 | + if (ia_valid & ATTR_MODE) { | ||
24373 | + umode_t mode = iattr->ia_mode; | ||
24374 | + | ||
24375 | + if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) | ||
24376 | + mode &= ~S_ISGID; | ||
24377 | + iattrs->ia_mode = sd->s_mode = mode; | ||
24378 | + } | ||
24379 | } | ||
24380 | return error; | ||
24381 | } | ||
24382 | diff --git a/fs/udf/super.c b/fs/udf/super.c | ||
24383 | index 1e4543c..9d1b8c2 100644 | ||
24384 | --- a/fs/udf/super.c | ||
24385 | +++ b/fs/udf/super.c | ||
24386 | @@ -1078,39 +1078,21 @@ static int udf_fill_partdesc_info(struct super_block *sb, | ||
24387 | return 0; | ||
24388 | } | ||
24389 | |||
24390 | -static void udf_find_vat_block(struct super_block *sb, int p_index, | ||
24391 | - int type1_index, sector_t start_block) | ||
24392 | -{ | ||
24393 | - struct udf_sb_info *sbi = UDF_SB(sb); | ||
24394 | - struct udf_part_map *map = &sbi->s_partmaps[p_index]; | ||
24395 | - sector_t vat_block; | ||
24396 | - struct kernel_lb_addr ino; | ||
24397 | - | ||
24398 | - /* | ||
24399 | - * VAT file entry is in the last recorded block. Some broken disks have | ||
24400 | - * it a few blocks before so try a bit harder... | ||
24401 | - */ | ||
24402 | - ino.partitionReferenceNum = type1_index; | ||
24403 | - for (vat_block = start_block; | ||
24404 | - vat_block >= map->s_partition_root && | ||
24405 | - vat_block >= start_block - 3 && | ||
24406 | - !sbi->s_vat_inode; vat_block--) { | ||
24407 | - ino.logicalBlockNum = vat_block - map->s_partition_root; | ||
24408 | - sbi->s_vat_inode = udf_iget(sb, &ino); | ||
24409 | - } | ||
24410 | -} | ||
24411 | - | ||
24412 | static int udf_load_vat(struct super_block *sb, int p_index, int type1_index) | ||
24413 | { | ||
24414 | struct udf_sb_info *sbi = UDF_SB(sb); | ||
24415 | struct udf_part_map *map = &sbi->s_partmaps[p_index]; | ||
24416 | + struct kernel_lb_addr ino; | ||
24417 | struct buffer_head *bh = NULL; | ||
24418 | struct udf_inode_info *vati; | ||
24419 | uint32_t pos; | ||
24420 | struct virtualAllocationTable20 *vat20; | ||
24421 | sector_t blocks = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits; | ||
24422 | |||
24423 | - udf_find_vat_block(sb, p_index, type1_index, sbi->s_last_block); | ||
24424 | + /* VAT file entry is in the last recorded block */ | ||
24425 | + ino.partitionReferenceNum = type1_index; | ||
24426 | + ino.logicalBlockNum = sbi->s_last_block - map->s_partition_root; | ||
24427 | + sbi->s_vat_inode = udf_iget(sb, &ino); | ||
24428 | if (!sbi->s_vat_inode && | ||
24429 | sbi->s_last_block != blocks - 1) { | ||
24430 | printk(KERN_NOTICE "UDF-fs: Failed to read VAT inode from the" | ||
24431 | @@ -1118,7 +1100,9 @@ static int udf_load_vat(struct super_block *sb, int p_index, int type1_index) | ||
24432 | "block of the device (%lu).\n", | ||
24433 | (unsigned long)sbi->s_last_block, | ||
24434 | (unsigned long)blocks - 1); | ||
24435 | - udf_find_vat_block(sb, p_index, type1_index, blocks - 1); | ||
24436 | + ino.partitionReferenceNum = type1_index; | ||
24437 | + ino.logicalBlockNum = blocks - 1 - map->s_partition_root; | ||
24438 | + sbi->s_vat_inode = udf_iget(sb, &ino); | ||
24439 | } | ||
24440 | if (!sbi->s_vat_inode) | ||
24441 | return 1; | ||
24442 | diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h | ||
24443 | index 0946997..9d7febd 100644 | ||
24444 | --- a/include/acpi/platform/aclinux.h | ||
24445 | +++ b/include/acpi/platform/aclinux.h | ||
24446 | @@ -152,7 +152,7 @@ static inline void *acpi_os_acquire_object(acpi_cache_t * cache) | ||
24447 | #include <linux/hardirq.h> | ||
24448 | #define ACPI_PREEMPTION_POINT() \ | ||
24449 | do { \ | ||
24450 | - if (!in_atomic_preempt_off() && !irqs_disabled()) \ | ||
24451 | + if (!in_atomic_preempt_off()) \ | ||
24452 | cond_resched(); \ | ||
24453 | } while (0) | ||
24454 | |||
24455 | diff --git a/include/drm/drmP.h b/include/drm/drmP.h | ||
24456 | index 7ad3faa..c8e64bb 100644 | ||
24457 | --- a/include/drm/drmP.h | ||
24458 | +++ b/include/drm/drmP.h | ||
24459 | @@ -1295,7 +1295,6 @@ extern u32 drm_vblank_count(struct drm_device *dev, int crtc); | ||
24460 | extern void drm_handle_vblank(struct drm_device *dev, int crtc); | ||
24461 | extern int drm_vblank_get(struct drm_device *dev, int crtc); | ||
24462 | extern void drm_vblank_put(struct drm_device *dev, int crtc); | ||
24463 | -extern void drm_vblank_off(struct drm_device *dev, int crtc); | ||
24464 | extern void drm_vblank_cleanup(struct drm_device *dev); | ||
24465 | /* Modesetting support */ | ||
24466 | extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc); | ||
24467 | @@ -1402,7 +1401,7 @@ extern int drm_ati_pcigart_cleanup(struct drm_device *dev, | ||
24468 | struct drm_ati_pcigart_info * gart_info); | ||
24469 | |||
24470 | extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size, | ||
24471 | - size_t align); | ||
24472 | + size_t align, dma_addr_t maxaddr); | ||
24473 | extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah); | ||
24474 | extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah); | ||
24475 | |||
24476 | diff --git a/include/drm/drm_os_linux.h b/include/drm/drm_os_linux.h | ||
24477 | index 3933691..26641e9 100644 | ||
24478 | --- a/include/drm/drm_os_linux.h | ||
24479 | +++ b/include/drm/drm_os_linux.h | ||
24480 | @@ -123,5 +123,5 @@ do { \ | ||
24481 | remove_wait_queue(&(queue), &entry); \ | ||
24482 | } while (0) | ||
24483 | |||
24484 | -#define DRM_WAKEUP( queue ) wake_up( queue ) | ||
24485 | +#define DRM_WAKEUP( queue ) wake_up_interruptible( queue ) | ||
24486 | #define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue ) | ||
24487 | diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h | ||
24488 | index b199170..6983a7c 100644 | ||
24489 | --- a/include/drm/ttm/ttm_memory.h | ||
24490 | +++ b/include/drm/ttm/ttm_memory.h | ||
24491 | @@ -33,7 +33,6 @@ | ||
24492 | #include <linux/wait.h> | ||
24493 | #include <linux/errno.h> | ||
24494 | #include <linux/kobject.h> | ||
24495 | -#include <linux/mm.h> | ||
24496 | |||
24497 | /** | ||
24498 | * struct ttm_mem_shrink - callback to shrink TTM memory usage. | ||
24499 | diff --git a/include/linux/acpi.h b/include/linux/acpi.h | ||
24500 | index c010b94..dfcd920 100644 | ||
24501 | --- a/include/linux/acpi.h | ||
24502 | +++ b/include/linux/acpi.h | ||
24503 | @@ -253,13 +253,6 @@ void __init acpi_old_suspend_ordering(void); | ||
24504 | void __init acpi_s4_no_nvs(void); | ||
24505 | #endif /* CONFIG_PM_SLEEP */ | ||
24506 | |||
24507 | -struct acpi_osc_context { | ||
24508 | - char *uuid_str; /* uuid string */ | ||
24509 | - int rev; | ||
24510 | - struct acpi_buffer cap; /* arg2/arg3 */ | ||
24511 | - struct acpi_buffer ret; /* free by caller if success */ | ||
24512 | -}; | ||
24513 | - | ||
24514 | #define OSC_QUERY_TYPE 0 | ||
24515 | #define OSC_SUPPORT_TYPE 1 | ||
24516 | #define OSC_CONTROL_TYPE 2 | ||
24517 | @@ -272,15 +265,6 @@ struct acpi_osc_context { | ||
24518 | #define OSC_INVALID_REVISION_ERROR 8 | ||
24519 | #define OSC_CAPABILITIES_MASK_ERROR 16 | ||
24520 | |||
24521 | -acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); | ||
24522 | - | ||
24523 | -/* platform-wide _OSC bits */ | ||
24524 | -#define OSC_SB_PAD_SUPPORT 1 | ||
24525 | -#define OSC_SB_PPC_OST_SUPPORT 2 | ||
24526 | -#define OSC_SB_PR3_SUPPORT 4 | ||
24527 | -#define OSC_SB_CPUHP_OST_SUPPORT 8 | ||
24528 | -#define OSC_SB_APEI_SUPPORT 16 | ||
24529 | - | ||
24530 | /* _OSC DW1 Definition (OS Support Fields) */ | ||
24531 | #define OSC_EXT_PCI_CONFIG_SUPPORT 1 | ||
24532 | #define OSC_ACTIVE_STATE_PWR_SUPPORT 2 | ||
24533 | diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h | ||
24534 | index 340f441..aece486 100644 | ||
24535 | --- a/include/linux/binfmts.h | ||
24536 | +++ b/include/linux/binfmts.h | ||
24537 | @@ -101,7 +101,6 @@ extern int prepare_binprm(struct linux_binprm *); | ||
24538 | extern int __must_check remove_arg_zero(struct linux_binprm *); | ||
24539 | extern int search_binary_handler(struct linux_binprm *,struct pt_regs *); | ||
24540 | extern int flush_old_exec(struct linux_binprm * bprm); | ||
24541 | -extern void setup_new_exec(struct linux_binprm * bprm); | ||
24542 | |||
24543 | extern int suid_dumpable; | ||
24544 | #define SUID_DUMP_DISABLE 0 /* No setuid dumping */ | ||
24545 | diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h | ||
24546 | index 912b8ff..221cecd 100644 | ||
24547 | --- a/include/linux/blkdev.h | ||
24548 | +++ b/include/linux/blkdev.h | ||
24549 | @@ -942,8 +942,6 @@ extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); | ||
24550 | extern void blk_set_default_limits(struct queue_limits *lim); | ||
24551 | extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | ||
24552 | sector_t offset); | ||
24553 | -extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, | ||
24554 | - sector_t offset); | ||
24555 | extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, | ||
24556 | sector_t offset); | ||
24557 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); | ||
24558 | @@ -1116,18 +1114,11 @@ static inline int queue_alignment_offset(struct request_queue *q) | ||
24559 | return q->limits.alignment_offset; | ||
24560 | } | ||
24561 | |||
24562 | -static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t offset) | ||
24563 | -{ | ||
24564 | - unsigned int granularity = max(lim->physical_block_size, lim->io_min); | ||
24565 | - | ||
24566 | - offset &= granularity - 1; | ||
24567 | - return (granularity + lim->alignment_offset - offset) & (granularity - 1); | ||
24568 | -} | ||
24569 | - | ||
24570 | static inline int queue_sector_alignment_offset(struct request_queue *q, | ||
24571 | sector_t sector) | ||
24572 | { | ||
24573 | - return queue_limit_alignment_offset(&q->limits, sector << 9); | ||
24574 | + return ((sector << 9) - q->limits.alignment_offset) | ||
24575 | + & (q->limits.io_min - 1); | ||
24576 | } | ||
24577 | |||
24578 | static inline int bdev_alignment_offset(struct block_device *bdev) | ||
24579 | diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h | ||
24580 | index 64b1a4c..83d2fbd 100644 | ||
24581 | --- a/include/linux/clocksource.h | ||
24582 | +++ b/include/linux/clocksource.h | ||
24583 | @@ -151,7 +151,6 @@ extern u64 timecounter_cyc2time(struct timecounter *tc, | ||
24584 | * subtraction of non 64 bit counters | ||
24585 | * @mult: cycle to nanosecond multiplier | ||
24586 | * @shift: cycle to nanosecond divisor (power of two) | ||
24587 | - * @max_idle_ns: max idle time permitted by the clocksource (nsecs) | ||
24588 | * @flags: flags describing special properties | ||
24589 | * @vread: vsyscall based read | ||
24590 | * @resume: resume function for the clocksource, if necessary | ||
24591 | @@ -169,7 +168,6 @@ struct clocksource { | ||
24592 | cycle_t mask; | ||
24593 | u32 mult; | ||
24594 | u32 shift; | ||
24595 | - u64 max_idle_ns; | ||
24596 | unsigned long flags; | ||
24597 | cycle_t (*vread)(void); | ||
24598 | void (*resume)(void); | ||
24599 | diff --git a/include/linux/completion.h b/include/linux/completion.h | ||
24600 | index 4a6b604..258bec1 100644 | ||
24601 | --- a/include/linux/completion.h | ||
24602 | +++ b/include/linux/completion.h | ||
24603 | @@ -88,6 +88,7 @@ extern bool completion_done(struct completion *x); | ||
24604 | |||
24605 | extern void complete(struct completion *); | ||
24606 | extern void complete_all(struct completion *); | ||
24607 | +extern void complete_n(struct completion *, int n); | ||
24608 | |||
24609 | /** | ||
24610 | * INIT_COMPLETION: - reinitialize a completion structure | ||
24611 | diff --git a/include/linux/connector.h b/include/linux/connector.h | ||
24612 | index ecb61c4..3a14615 100644 | ||
24613 | --- a/include/linux/connector.h | ||
24614 | +++ b/include/linux/connector.h | ||
24615 | @@ -24,6 +24,9 @@ | ||
24616 | |||
24617 | #include <linux/types.h> | ||
24618 | |||
24619 | +#define CN_IDX_CONNECTOR 0xffffffff | ||
24620 | +#define CN_VAL_CONNECTOR 0xffffffff | ||
24621 | + | ||
24622 | /* | ||
24623 | * Process Events connector unique ids -- used for message routing | ||
24624 | */ | ||
24625 | @@ -70,6 +73,30 @@ struct cn_msg { | ||
24626 | __u8 data[0]; | ||
24627 | }; | ||
24628 | |||
24629 | +/* | ||
24630 | + * Notify structure - requests notification about | ||
24631 | + * registering/unregistering idx/val in range [first, first+range]. | ||
24632 | + */ | ||
24633 | +struct cn_notify_req { | ||
24634 | + __u32 first; | ||
24635 | + __u32 range; | ||
24636 | +}; | ||
24637 | + | ||
24638 | +/* | ||
24639 | + * Main notification control message | ||
24640 | + * *_notify_num - number of appropriate cn_notify_req structures after | ||
24641 | + * this struct. | ||
24642 | + * group - notification receiver's idx. | ||
24643 | + * len - total length of the attached data. | ||
24644 | + */ | ||
24645 | +struct cn_ctl_msg { | ||
24646 | + __u32 idx_notify_num; | ||
24647 | + __u32 val_notify_num; | ||
24648 | + __u32 group; | ||
24649 | + __u32 len; | ||
24650 | + __u8 data[0]; | ||
24651 | +}; | ||
24652 | + | ||
24653 | #ifdef __KERNEL__ | ||
24654 | |||
24655 | #include <asm/atomic.h> | ||
24656 | @@ -122,6 +149,11 @@ struct cn_callback_entry { | ||
24657 | u32 seq, group; | ||
24658 | }; | ||
24659 | |||
24660 | +struct cn_ctl_entry { | ||
24661 | + struct list_head notify_entry; | ||
24662 | + struct cn_ctl_msg *msg; | ||
24663 | +}; | ||
24664 | + | ||
24665 | struct cn_dev { | ||
24666 | struct cb_id id; | ||
24667 | |||
24668 | diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h | ||
24669 | index d77b547..789cf5f 100644 | ||
24670 | --- a/include/linux/cpumask.h | ||
24671 | +++ b/include/linux/cpumask.h | ||
24672 | @@ -84,7 +84,6 @@ extern const struct cpumask *const cpu_active_mask; | ||
24673 | #define num_online_cpus() cpumask_weight(cpu_online_mask) | ||
24674 | #define num_possible_cpus() cpumask_weight(cpu_possible_mask) | ||
24675 | #define num_present_cpus() cpumask_weight(cpu_present_mask) | ||
24676 | -#define num_active_cpus() cpumask_weight(cpu_active_mask) | ||
24677 | #define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask) | ||
24678 | #define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask) | ||
24679 | #define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask) | ||
24680 | @@ -93,7 +92,6 @@ extern const struct cpumask *const cpu_active_mask; | ||
24681 | #define num_online_cpus() 1 | ||
24682 | #define num_possible_cpus() 1 | ||
24683 | #define num_present_cpus() 1 | ||
24684 | -#define num_active_cpus() 1 | ||
24685 | #define cpu_online(cpu) ((cpu) == 0) | ||
24686 | #define cpu_possible(cpu) ((cpu) == 0) | ||
24687 | #define cpu_present(cpu) ((cpu) == 0) | ||
24688 | diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h | ||
24689 | index 9a33c5f..90d1c21 100644 | ||
24690 | --- a/include/linux/enclosure.h | ||
24691 | +++ b/include/linux/enclosure.h | ||
24692 | @@ -42,8 +42,6 @@ enum enclosure_status { | ||
24693 | ENCLOSURE_STATUS_NOT_INSTALLED, | ||
24694 | ENCLOSURE_STATUS_UNKNOWN, | ||
24695 | ENCLOSURE_STATUS_UNAVAILABLE, | ||
24696 | - /* last element for counting purposes */ | ||
24697 | - ENCLOSURE_STATUS_MAX | ||
24698 | }; | ||
24699 | |||
24700 | /* SFF-8485 activity light settings */ | ||
24701 | diff --git a/include/linux/fs.h b/include/linux/fs.h | ||
24702 | index 98ea200..5c7e0ff 100644 | ||
24703 | --- a/include/linux/fs.h | ||
24704 | +++ b/include/linux/fs.h | ||
24705 | @@ -15,8 +15,8 @@ | ||
24706 | * nr_file rlimit, so it's safe to set up a ridiculously high absolute | ||
24707 | * upper limit on files-per-process. | ||
24708 | * | ||
24709 | - * Some programs (notably those using select()) may have to be | ||
24710 | - * recompiled to take full advantage of the new limits.. | ||
24711 | + * Some programs (notably those using select()) may have to be | ||
24712 | + * recompiled to take full advantage of the new limits.. | ||
24713 | */ | ||
24714 | |||
24715 | /* Fixed constants first: */ | ||
24716 | @@ -169,7 +169,7 @@ struct inodes_stat_t { | ||
24717 | #define SEL_EX 4 | ||
24718 | |||
24719 | /* public flags for file_system_type */ | ||
24720 | -#define FS_REQUIRES_DEV 1 | ||
24721 | +#define FS_REQUIRES_DEV 1 | ||
24722 | #define FS_BINARY_MOUNTDATA 2 | ||
24723 | #define FS_HAS_SUBTYPE 4 | ||
24724 | #define FS_REVAL_DOT 16384 /* Check the paths ".", ".." for staleness */ | ||
24725 | @@ -466,7 +466,7 @@ struct iattr { | ||
24726 | */ | ||
24727 | #include <linux/quota.h> | ||
24728 | |||
24729 | -/** | ||
24730 | +/** | ||
24731 | * enum positive_aop_returns - aop return codes with specific semantics | ||
24732 | * | ||
24733 | * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has | ||
24734 | @@ -476,7 +476,7 @@ struct iattr { | ||
24735 | * be a candidate for writeback again in the near | ||
24736 | * future. Other callers must be careful to unlock | ||
24737 | * the page if they get this return. Returned by | ||
24738 | - * writepage(); | ||
24739 | + * writepage(); | ||
24740 | * | ||
24741 | * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has | ||
24742 | * unlocked it and the page might have been truncated. | ||
24743 | @@ -715,6 +715,7 @@ static inline int mapping_writably_mapped(struct address_space *mapping) | ||
24744 | |||
24745 | struct posix_acl; | ||
24746 | #define ACL_NOT_CACHED ((void *)(-1)) | ||
24747 | +struct inode_obj_id_table; | ||
24748 | |||
24749 | struct inode { | ||
24750 | struct hlist_node i_hash; | ||
24751 | @@ -783,6 +784,8 @@ struct inode { | ||
24752 | struct posix_acl *i_acl; | ||
24753 | struct posix_acl *i_default_acl; | ||
24754 | #endif | ||
24755 | + struct list_head i_obj_list; | ||
24756 | + struct mutex i_obj_mutex; | ||
24757 | void *i_private; /* fs or device private pointer */ | ||
24758 | }; | ||
24759 | |||
24760 | @@ -995,10 +998,10 @@ static inline int file_check_writeable(struct file *filp) | ||
24761 | |||
24762 | #define MAX_NON_LFS ((1UL<<31) - 1) | ||
24763 | |||
24764 | -/* Page cache limit. The filesystems should put that into their s_maxbytes | ||
24765 | - limits, otherwise bad things can happen in VM. */ | ||
24766 | +/* Page cache limit. The filesystems should put that into their s_maxbytes | ||
24767 | + limits, otherwise bad things can happen in VM. */ | ||
24768 | #if BITS_PER_LONG==32 | ||
24769 | -#define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) | ||
24770 | +#define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) | ||
24771 | #elif BITS_PER_LONG==64 | ||
24772 | #define MAX_LFS_FILESIZE 0x7fffffffffffffffUL | ||
24773 | #endif | ||
24774 | @@ -2139,7 +2142,7 @@ extern int may_open(struct path *, int, int); | ||
24775 | |||
24776 | extern int kernel_read(struct file *, loff_t, char *, unsigned long); | ||
24777 | extern struct file * open_exec(const char *); | ||
24778 | - | ||
24779 | + | ||
24780 | /* fs/dcache.c -- generic fs support functions */ | ||
24781 | extern int is_subdir(struct dentry *, struct dentry *); | ||
24782 | extern ino_t find_inode_number(struct dentry *, struct qstr *); | ||
24783 | @@ -2314,7 +2317,6 @@ extern const struct inode_operations page_symlink_inode_operations; | ||
24784 | extern int generic_readlink(struct dentry *, char __user *, int); | ||
24785 | extern void generic_fillattr(struct inode *, struct kstat *); | ||
24786 | extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *); | ||
24787 | -void __inode_add_bytes(struct inode *inode, loff_t bytes); | ||
24788 | void inode_add_bytes(struct inode *inode, loff_t bytes); | ||
24789 | void inode_sub_bytes(struct inode *inode, loff_t bytes); | ||
24790 | loff_t inode_get_bytes(struct inode *inode); | ||
24791 | diff --git a/include/linux/hid.h b/include/linux/hid.h | ||
24792 | index 8709365..10f6284 100644 | ||
24793 | --- a/include/linux/hid.h | ||
24794 | +++ b/include/linux/hid.h | ||
24795 | @@ -312,7 +312,6 @@ struct hid_item { | ||
24796 | #define HID_QUIRK_MULTI_INPUT 0x00000040 | ||
24797 | #define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000 | ||
24798 | #define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000 | ||
24799 | -#define HID_QUIRK_NO_INIT_REPORTS 0x20000000 | ||
24800 | |||
24801 | /* | ||
24802 | * This is the global environment of the parser. This information is | ||
24803 | diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h | ||
24804 | index 9bace4b..b984b94 100644 | ||
24805 | --- a/include/linux/hrtimer.h | ||
24806 | +++ b/include/linux/hrtimer.h | ||
24807 | @@ -166,6 +166,7 @@ struct hrtimer_clock_base { | ||
24808 | * event devices whether high resolution mode can be | ||
24809 | * activated. | ||
24810 | * @nr_events: Total number of timer interrupt events | ||
24811 | + * @to_pull: LITMUS^RT list of timers to be pulled on this cpu | ||
24812 | */ | ||
24813 | struct hrtimer_cpu_base { | ||
24814 | spinlock_t lock; | ||
24815 | @@ -175,6 +176,26 @@ struct hrtimer_cpu_base { | ||
24816 | int hres_active; | ||
24817 | unsigned long nr_events; | ||
24818 | #endif | ||
24819 | + struct list_head to_pull; | ||
24820 | +}; | ||
24821 | + | ||
24822 | +#define HRTIMER_START_ON_INACTIVE 0 | ||
24823 | +#define HRTIMER_START_ON_QUEUED 1 | ||
24824 | + | ||
24825 | +/* | ||
24826 | + * struct hrtimer_start_on_info - save timer info on remote cpu | ||
24827 | + * @list: list of hrtimer_start_on_info on remote cpu (to_pull) | ||
24828 | + * @timer: timer to be triggered on remote cpu | ||
24829 | + * @time: time event | ||
24830 | + * @mode: timer mode | ||
24831 | + * @state: activity flag | ||
24832 | + */ | ||
24833 | +struct hrtimer_start_on_info { | ||
24834 | + struct list_head list; | ||
24835 | + struct hrtimer *timer; | ||
24836 | + ktime_t time; | ||
24837 | + enum hrtimer_mode mode; | ||
24838 | + atomic_t state; | ||
24839 | }; | ||
24840 | |||
24841 | static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) | ||
24842 | @@ -343,6 +364,10 @@ __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, | ||
24843 | unsigned long delta_ns, | ||
24844 | const enum hrtimer_mode mode, int wakeup); | ||
24845 | |||
24846 | +extern int hrtimer_start_on(int cpu, struct hrtimer_start_on_info *info, | ||
24847 | + struct hrtimer *timer, ktime_t time, | ||
24848 | + const enum hrtimer_mode mode); | ||
24849 | + | ||
24850 | extern int hrtimer_cancel(struct hrtimer *timer); | ||
24851 | extern int hrtimer_try_to_cancel(struct hrtimer *timer); | ||
24852 | |||
24853 | @@ -446,7 +471,7 @@ extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf, | ||
24854 | |||
24855 | static inline void timer_stats_account_hrtimer(struct hrtimer *timer) | ||
24856 | { | ||
24857 | - if (likely(!timer_stats_active)) | ||
24858 | + if (likely(!timer->start_site)) | ||
24859 | return; | ||
24860 | timer_stats_update_stats(timer, timer->start_pid, timer->start_site, | ||
24861 | timer->function, timer->start_comm, 0); | ||
24862 | @@ -457,6 +482,8 @@ extern void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer, | ||
24863 | |||
24864 | static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer) | ||
24865 | { | ||
24866 | + if (likely(!timer_stats_active)) | ||
24867 | + return; | ||
24868 | __timer_stats_hrtimer_set_start_info(timer, __builtin_return_address(0)); | ||
24869 | } | ||
24870 | |||
24871 | diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h | ||
24872 | index 9cd0bcf..ad27c7d 100644 | ||
24873 | --- a/include/linux/inetdevice.h | ||
24874 | +++ b/include/linux/inetdevice.h | ||
24875 | @@ -83,7 +83,6 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev) | ||
24876 | #define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING) | ||
24877 | #define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING) | ||
24878 | #define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER) | ||
24879 | -#define IN_DEV_SRC_VMARK(in_dev) IN_DEV_ORCONF((in_dev), SRC_VMARK) | ||
24880 | #define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \ | ||
24881 | ACCEPT_SOURCE_ROUTE) | ||
24882 | #define IN_DEV_BOOTP_RELAY(in_dev) IN_DEV_ANDCONF((in_dev), BOOTP_RELAY) | ||
24883 | diff --git a/include/linux/kvm.h b/include/linux/kvm.h | ||
24884 | index 0eadd71..f8f8900 100644 | ||
24885 | --- a/include/linux/kvm.h | ||
24886 | +++ b/include/linux/kvm.h | ||
24887 | @@ -116,11 +116,6 @@ struct kvm_run { | ||
24888 | __u64 cr8; | ||
24889 | __u64 apic_base; | ||
24890 | |||
24891 | -#ifdef __KVM_S390 | ||
24892 | - /* the processor status word for s390 */ | ||
24893 | - __u64 psw_mask; /* psw upper half */ | ||
24894 | - __u64 psw_addr; /* psw lower half */ | ||
24895 | -#endif | ||
24896 | union { | ||
24897 | /* KVM_EXIT_UNKNOWN */ | ||
24898 | struct { | ||
24899 | @@ -172,6 +167,8 @@ struct kvm_run { | ||
24900 | /* KVM_EXIT_S390_SIEIC */ | ||
24901 | struct { | ||
24902 | __u8 icptcode; | ||
24903 | + __u64 mask; /* psw upper half */ | ||
24904 | + __u64 addr; /* psw lower half */ | ||
24905 | __u16 ipa; | ||
24906 | __u32 ipb; | ||
24907 | } s390_sieic; | ||
24908 | @@ -439,7 +436,6 @@ struct kvm_ioeventfd { | ||
24909 | #endif | ||
24910 | #define KVM_CAP_IOEVENTFD 36 | ||
24911 | #define KVM_CAP_SET_IDENTITY_MAP_ADDR 37 | ||
24912 | -#define KVM_CAP_ADJUST_CLOCK 39 | ||
24913 | |||
24914 | #ifdef KVM_CAP_IRQ_ROUTING | ||
24915 | |||
24916 | @@ -478,7 +474,6 @@ struct kvm_irq_routing { | ||
24917 | }; | ||
24918 | |||
24919 | #endif | ||
24920 | -#define KVM_CAP_S390_PSW 42 | ||
24921 | |||
24922 | #ifdef KVM_CAP_MCE | ||
24923 | /* x86 MCE */ | ||
24924 | @@ -502,12 +497,6 @@ struct kvm_irqfd { | ||
24925 | __u8 pad[20]; | ||
24926 | }; | ||
24927 | |||
24928 | -struct kvm_clock_data { | ||
24929 | - __u64 clock; | ||
24930 | - __u32 flags; | ||
24931 | - __u32 pad[9]; | ||
24932 | -}; | ||
24933 | - | ||
24934 | /* | ||
24935 | * ioctls for VM fds | ||
24936 | */ | ||
24937 | @@ -557,8 +546,6 @@ struct kvm_clock_data { | ||
24938 | #define KVM_CREATE_PIT2 _IOW(KVMIO, 0x77, struct kvm_pit_config) | ||
24939 | #define KVM_SET_BOOT_CPU_ID _IO(KVMIO, 0x78) | ||
24940 | #define KVM_IOEVENTFD _IOW(KVMIO, 0x79, struct kvm_ioeventfd) | ||
24941 | -#define KVM_SET_CLOCK _IOW(KVMIO, 0x7b, struct kvm_clock_data) | ||
24942 | -#define KVM_GET_CLOCK _IOR(KVMIO, 0x7c, struct kvm_clock_data) | ||
24943 | |||
24944 | /* | ||
24945 | * ioctls for vcpu fds | ||
24946 | diff --git a/include/linux/libata.h b/include/linux/libata.h | ||
24947 | index b0f6d97..8769864 100644 | ||
24948 | --- a/include/linux/libata.h | ||
24949 | +++ b/include/linux/libata.h | ||
24950 | @@ -354,9 +354,6 @@ enum { | ||
24951 | /* max tries if error condition is still set after ->error_handler */ | ||
24952 | ATA_EH_MAX_TRIES = 5, | ||
24953 | |||
24954 | - /* sometimes resuming a link requires several retries */ | ||
24955 | - ATA_LINK_RESUME_TRIES = 5, | ||
24956 | - | ||
24957 | /* how hard are we gonna try to probe/recover devices */ | ||
24958 | ATA_PROBE_MAX_TRIES = 3, | ||
24959 | ATA_EH_DEV_TRIES = 3, | ||
24960 | diff --git a/include/linux/mfd/wm8350/pmic.h b/include/linux/mfd/wm8350/pmic.h | ||
24961 | index e786fe9..be3264e 100644 | ||
24962 | --- a/include/linux/mfd/wm8350/pmic.h | ||
24963 | +++ b/include/linux/mfd/wm8350/pmic.h | ||
24964 | @@ -666,20 +666,20 @@ | ||
24965 | #define WM8350_ISINK_FLASH_DUR_64MS (1 << 8) | ||
24966 | #define WM8350_ISINK_FLASH_DUR_96MS (2 << 8) | ||
24967 | #define WM8350_ISINK_FLASH_DUR_1024MS (3 << 8) | ||
24968 | -#define WM8350_ISINK_FLASH_ON_INSTANT (0 << 0) | ||
24969 | -#define WM8350_ISINK_FLASH_ON_0_25S (1 << 0) | ||
24970 | -#define WM8350_ISINK_FLASH_ON_0_50S (2 << 0) | ||
24971 | -#define WM8350_ISINK_FLASH_ON_1_00S (3 << 0) | ||
24972 | -#define WM8350_ISINK_FLASH_ON_1_95S (1 << 0) | ||
24973 | -#define WM8350_ISINK_FLASH_ON_3_91S (2 << 0) | ||
24974 | -#define WM8350_ISINK_FLASH_ON_7_80S (3 << 0) | ||
24975 | -#define WM8350_ISINK_FLASH_OFF_INSTANT (0 << 4) | ||
24976 | -#define WM8350_ISINK_FLASH_OFF_0_25S (1 << 4) | ||
24977 | -#define WM8350_ISINK_FLASH_OFF_0_50S (2 << 4) | ||
24978 | -#define WM8350_ISINK_FLASH_OFF_1_00S (3 << 4) | ||
24979 | -#define WM8350_ISINK_FLASH_OFF_1_95S (1 << 4) | ||
24980 | -#define WM8350_ISINK_FLASH_OFF_3_91S (2 << 4) | ||
24981 | -#define WM8350_ISINK_FLASH_OFF_7_80S (3 << 4) | ||
24982 | +#define WM8350_ISINK_FLASH_ON_INSTANT (0 << 4) | ||
24983 | +#define WM8350_ISINK_FLASH_ON_0_25S (1 << 4) | ||
24984 | +#define WM8350_ISINK_FLASH_ON_0_50S (2 << 4) | ||
24985 | +#define WM8350_ISINK_FLASH_ON_1_00S (3 << 4) | ||
24986 | +#define WM8350_ISINK_FLASH_ON_1_95S (1 << 4) | ||
24987 | +#define WM8350_ISINK_FLASH_ON_3_91S (2 << 4) | ||
24988 | +#define WM8350_ISINK_FLASH_ON_7_80S (3 << 4) | ||
24989 | +#define WM8350_ISINK_FLASH_OFF_INSTANT (0 << 0) | ||
24990 | +#define WM8350_ISINK_FLASH_OFF_0_25S (1 << 0) | ||
24991 | +#define WM8350_ISINK_FLASH_OFF_0_50S (2 << 0) | ||
24992 | +#define WM8350_ISINK_FLASH_OFF_1_00S (3 << 0) | ||
24993 | +#define WM8350_ISINK_FLASH_OFF_1_95S (1 << 0) | ||
24994 | +#define WM8350_ISINK_FLASH_OFF_3_91S (2 << 0) | ||
24995 | +#define WM8350_ISINK_FLASH_OFF_7_80S (3 << 0) | ||
24996 | |||
24997 | /* | ||
24998 | * Regulator Interrupts. | ||
24999 | diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h | ||
25000 | index 3c62ed4..ed5d750 100644 | ||
25001 | --- a/include/linux/pagemap.h | ||
25002 | +++ b/include/linux/pagemap.h | ||
25003 | @@ -253,8 +253,6 @@ extern struct page * read_cache_page_async(struct address_space *mapping, | ||
25004 | extern struct page * read_cache_page(struct address_space *mapping, | ||
25005 | pgoff_t index, filler_t *filler, | ||
25006 | void *data); | ||
25007 | -extern struct page * read_cache_page_gfp(struct address_space *mapping, | ||
25008 | - pgoff_t index, gfp_t gfp_mask); | ||
25009 | extern int read_cache_pages(struct address_space *mapping, | ||
25010 | struct list_head *pages, filler_t *filler, void *data); | ||
25011 | |||
25012 | diff --git a/include/linux/pci.h b/include/linux/pci.h | ||
25013 | index 2547515..f5c7cd3 100644 | ||
25014 | --- a/include/linux/pci.h | ||
25015 | +++ b/include/linux/pci.h | ||
25016 | @@ -564,9 +564,6 @@ void pcibios_align_resource(void *, struct resource *, resource_size_t, | ||
25017 | resource_size_t); | ||
25018 | void pcibios_update_irq(struct pci_dev *, int irq); | ||
25019 | |||
25020 | -/* Weak but can be overriden by arch */ | ||
25021 | -void pci_fixup_cardbus(struct pci_bus *); | ||
25022 | - | ||
25023 | /* Generic PCI functions used internally */ | ||
25024 | |||
25025 | extern struct pci_bus *pci_find_bus(int domain, int busnr); | ||
25026 | diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h | ||
25027 | index 1b7f2a7..84cf1f3 100644 | ||
25028 | --- a/include/linux/pci_ids.h | ||
25029 | +++ b/include/linux/pci_ids.h | ||
25030 | @@ -2290,20 +2290,6 @@ | ||
25031 | #define PCI_DEVICE_ID_MPC8536 0x0051 | ||
25032 | #define PCI_DEVICE_ID_P2020E 0x0070 | ||
25033 | #define PCI_DEVICE_ID_P2020 0x0071 | ||
25034 | -#define PCI_DEVICE_ID_P2010E 0x0078 | ||
25035 | -#define PCI_DEVICE_ID_P2010 0x0079 | ||
25036 | -#define PCI_DEVICE_ID_P1020E 0x0100 | ||
25037 | -#define PCI_DEVICE_ID_P1020 0x0101 | ||
25038 | -#define PCI_DEVICE_ID_P1011E 0x0108 | ||
25039 | -#define PCI_DEVICE_ID_P1011 0x0109 | ||
25040 | -#define PCI_DEVICE_ID_P1022E 0x0110 | ||
25041 | -#define PCI_DEVICE_ID_P1022 0x0111 | ||
25042 | -#define PCI_DEVICE_ID_P1013E 0x0118 | ||
25043 | -#define PCI_DEVICE_ID_P1013 0x0119 | ||
25044 | -#define PCI_DEVICE_ID_P4080E 0x0400 | ||
25045 | -#define PCI_DEVICE_ID_P4080 0x0401 | ||
25046 | -#define PCI_DEVICE_ID_P4040E 0x0408 | ||
25047 | -#define PCI_DEVICE_ID_P4040 0x0409 | ||
25048 | #define PCI_DEVICE_ID_MPC8641 0x7010 | ||
25049 | #define PCI_DEVICE_ID_MPC8641D 0x7011 | ||
25050 | #define PCI_DEVICE_ID_MPC8610 0x7018 | ||
25051 | diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h | ||
25052 | index 81c9689..9e70126 100644 | ||
25053 | --- a/include/linux/perf_event.h | ||
25054 | +++ b/include/linux/perf_event.h | ||
25055 | @@ -219,7 +219,7 @@ struct perf_event_attr { | ||
25056 | #define PERF_EVENT_IOC_DISABLE _IO ('$', 1) | ||
25057 | #define PERF_EVENT_IOC_REFRESH _IO ('$', 2) | ||
25058 | #define PERF_EVENT_IOC_RESET _IO ('$', 3) | ||
25059 | -#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64) | ||
25060 | +#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, u64) | ||
25061 | #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) | ||
25062 | |||
25063 | enum perf_event_ioc_flags { | ||
25064 | diff --git a/include/linux/quota.h b/include/linux/quota.h | ||
25065 | index 8fd8efc..78c4889 100644 | ||
25066 | --- a/include/linux/quota.h | ||
25067 | +++ b/include/linux/quota.h | ||
25068 | @@ -313,9 +313,8 @@ struct dquot_operations { | ||
25069 | int (*claim_space) (struct inode *, qsize_t); | ||
25070 | /* release rsved quota for delayed alloc */ | ||
25071 | void (*release_rsv) (struct inode *, qsize_t); | ||
25072 | - /* get reserved quota for delayed alloc, value returned is managed by | ||
25073 | - * quota code only */ | ||
25074 | - qsize_t *(*get_reserved_space) (struct inode *); | ||
25075 | + /* get reserved quota for delayed alloc */ | ||
25076 | + qsize_t (*get_reserved_space) (struct inode *); | ||
25077 | }; | ||
25078 | |||
25079 | /* Operations handling requests from userspace */ | ||
25080 | diff --git a/include/linux/sched.h b/include/linux/sched.h | ||
25081 | index e48311e..7248141 100644 | ||
25082 | --- a/include/linux/sched.h | ||
25083 | +++ b/include/linux/sched.h | ||
25084 | @@ -38,6 +38,7 @@ | ||
25085 | #define SCHED_BATCH 3 | ||
25086 | /* SCHED_ISO: reserved but not implemented yet */ | ||
25087 | #define SCHED_IDLE 5 | ||
25088 | +#define SCHED_LITMUS 6 | ||
25089 | /* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */ | ||
25090 | #define SCHED_RESET_ON_FORK 0x40000000 | ||
25091 | |||
25092 | @@ -94,6 +95,8 @@ struct sched_param { | ||
25093 | |||
25094 | #include <asm/processor.h> | ||
25095 | |||
25096 | +#include <litmus/rt_param.h> | ||
25097 | + | ||
25098 | struct exec_domain; | ||
25099 | struct futex_pi_state; | ||
25100 | struct robust_list_head; | ||
25101 | @@ -1211,6 +1214,7 @@ struct sched_rt_entity { | ||
25102 | }; | ||
25103 | |||
25104 | struct rcu_node; | ||
25105 | +struct od_table_entry; | ||
25106 | |||
25107 | struct task_struct { | ||
25108 | volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ | ||
25109 | @@ -1293,9 +1297,9 @@ struct task_struct { | ||
25110 | unsigned long stack_canary; | ||
25111 | #endif | ||
25112 | |||
25113 | - /* | ||
25114 | + /* | ||
25115 | * pointers to (original) parent process, youngest child, younger sibling, | ||
25116 | - * older sibling, respectively. (p->father can be replaced with | ||
25117 | + * older sibling, respectively. (p->father can be replaced with | ||
25118 | * p->real_parent->pid) | ||
25119 | */ | ||
25120 | struct task_struct *real_parent; /* real parent process */ | ||
25121 | @@ -1354,7 +1358,7 @@ struct task_struct { | ||
25122 | char comm[TASK_COMM_LEN]; /* executable name excluding path | ||
25123 | - access with [gs]et_task_comm (which lock | ||
25124 | it with task_lock()) | ||
25125 | - - initialized normally by setup_new_exec */ | ||
25126 | + - initialized normally by flush_old_exec */ | ||
25127 | /* file system info */ | ||
25128 | int link_count, total_link_count; | ||
25129 | #ifdef CONFIG_SYSVIPC | ||
25130 | @@ -1505,6 +1509,13 @@ struct task_struct { | ||
25131 | int make_it_fail; | ||
25132 | #endif | ||
25133 | struct prop_local_single dirties; | ||
25134 | + | ||
25135 | + /* LITMUS RT parameters and state */ | ||
25136 | + struct rt_param rt_param; | ||
25137 | + | ||
25138 | + /* references to PI semaphores, etc. */ | ||
25139 | + struct od_table_entry *od_table; | ||
25140 | + | ||
25141 | #ifdef CONFIG_LATENCYTOP | ||
25142 | int latency_record_count; | ||
25143 | struct latency_record latency_record[LT_SAVECOUNT]; | ||
25144 | @@ -2044,7 +2055,7 @@ static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, s | ||
25145 | spin_unlock_irqrestore(&tsk->sighand->siglock, flags); | ||
25146 | |||
25147 | return ret; | ||
25148 | -} | ||
25149 | +} | ||
25150 | |||
25151 | extern void block_all_signals(int (*notifier)(void *priv), void *priv, | ||
25152 | sigset_t *mask); | ||
25153 | @@ -2086,18 +2097,11 @@ static inline int is_si_special(const struct siginfo *info) | ||
25154 | return info <= SEND_SIG_FORCED; | ||
25155 | } | ||
25156 | |||
25157 | -/* | ||
25158 | - * True if we are on the alternate signal stack. | ||
25159 | - */ | ||
25160 | +/* True if we are on the alternate signal stack. */ | ||
25161 | + | ||
25162 | static inline int on_sig_stack(unsigned long sp) | ||
25163 | { | ||
25164 | -#ifdef CONFIG_STACK_GROWSUP | ||
25165 | - return sp >= current->sas_ss_sp && | ||
25166 | - sp - current->sas_ss_sp < current->sas_ss_size; | ||
25167 | -#else | ||
25168 | - return sp > current->sas_ss_sp && | ||
25169 | - sp - current->sas_ss_sp <= current->sas_ss_size; | ||
25170 | -#endif | ||
25171 | + return (sp - current->sas_ss_sp < current->sas_ss_size); | ||
25172 | } | ||
25173 | |||
25174 | static inline int sas_ss_flags(unsigned long sp) | ||
25175 | @@ -2583,28 +2587,6 @@ static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p) | ||
25176 | |||
25177 | #define TASK_STATE_TO_CHAR_STR "RSDTtZX" | ||
25178 | |||
25179 | -static inline unsigned long task_rlimit(const struct task_struct *tsk, | ||
25180 | - unsigned int limit) | ||
25181 | -{ | ||
25182 | - return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur); | ||
25183 | -} | ||
25184 | - | ||
25185 | -static inline unsigned long task_rlimit_max(const struct task_struct *tsk, | ||
25186 | - unsigned int limit) | ||
25187 | -{ | ||
25188 | - return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max); | ||
25189 | -} | ||
25190 | - | ||
25191 | -static inline unsigned long rlimit(unsigned int limit) | ||
25192 | -{ | ||
25193 | - return task_rlimit(current, limit); | ||
25194 | -} | ||
25195 | - | ||
25196 | -static inline unsigned long rlimit_max(unsigned int limit) | ||
25197 | -{ | ||
25198 | - return task_rlimit_max(current, limit); | ||
25199 | -} | ||
25200 | - | ||
25201 | #endif /* __KERNEL__ */ | ||
25202 | |||
25203 | #endif | ||
25204 | diff --git a/include/linux/security.h b/include/linux/security.h | ||
25205 | index d40d23f..239e40d 100644 | ||
25206 | --- a/include/linux/security.h | ||
25207 | +++ b/include/linux/security.h | ||
25208 | @@ -95,13 +95,8 @@ struct seq_file; | ||
25209 | extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb); | ||
25210 | extern int cap_netlink_recv(struct sk_buff *skb, int cap); | ||
25211 | |||
25212 | -#ifdef CONFIG_MMU | ||
25213 | extern unsigned long mmap_min_addr; | ||
25214 | extern unsigned long dac_mmap_min_addr; | ||
25215 | -#else | ||
25216 | -#define dac_mmap_min_addr 0UL | ||
25217 | -#endif | ||
25218 | - | ||
25219 | /* | ||
25220 | * Values used in the task_security_ops calls | ||
25221 | */ | ||
25222 | @@ -126,7 +121,6 @@ struct request_sock; | ||
25223 | #define LSM_UNSAFE_PTRACE 2 | ||
25224 | #define LSM_UNSAFE_PTRACE_CAP 4 | ||
25225 | |||
25226 | -#ifdef CONFIG_MMU | ||
25227 | /* | ||
25228 | * If a hint addr is less than mmap_min_addr change hint to be as | ||
25229 | * low as possible but still greater than mmap_min_addr | ||
25230 | @@ -141,7 +135,6 @@ static inline unsigned long round_hint_to_min(unsigned long hint) | ||
25231 | } | ||
25232 | extern int mmap_min_addr_handler(struct ctl_table *table, int write, | ||
25233 | void __user *buffer, size_t *lenp, loff_t *ppos); | ||
25234 | -#endif | ||
25235 | |||
25236 | #ifdef CONFIG_SECURITY | ||
25237 | |||
25238 | diff --git a/include/linux/smp.h b/include/linux/smp.h | ||
25239 | index 39c64ba..76bb3e4 100644 | ||
25240 | --- a/include/linux/smp.h | ||
25241 | +++ b/include/linux/smp.h | ||
25242 | @@ -77,6 +77,11 @@ void __smp_call_function_single(int cpuid, struct call_single_data *data, | ||
25243 | int wait); | ||
25244 | |||
25245 | /* | ||
25246 | + * sends a 'pull timer' event to a remote CPU | ||
25247 | + */ | ||
25248 | +extern void smp_send_pull_timers(int cpu); | ||
25249 | + | ||
25250 | +/* | ||
25251 | * Generic and arch helpers | ||
25252 | */ | ||
25253 | #ifdef CONFIG_USE_GENERIC_SMP_HELPERS | ||
25254 | diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h | ||
25255 | index 93515c6..a990ace 100644 | ||
25256 | --- a/include/linux/syscalls.h | ||
25257 | +++ b/include/linux/syscalls.h | ||
25258 | @@ -879,8 +879,4 @@ int kernel_execve(const char *filename, char *const argv[], char *const envp[]); | ||
25259 | asmlinkage long sys_perf_event_open( | ||
25260 | struct perf_event_attr __user *attr_uptr, | ||
25261 | pid_t pid, int cpu, int group_fd, unsigned long flags); | ||
25262 | - | ||
25263 | -asmlinkage long sys_mmap_pgoff(unsigned long addr, unsigned long len, | ||
25264 | - unsigned long prot, unsigned long flags, | ||
25265 | - unsigned long fd, unsigned long pgoff); | ||
25266 | #endif | ||
25267 | diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h | ||
25268 | index 0eb6942..1e4743e 100644 | ||
25269 | --- a/include/linux/sysctl.h | ||
25270 | +++ b/include/linux/sysctl.h | ||
25271 | @@ -490,7 +490,6 @@ enum | ||
25272 | NET_IPV4_CONF_PROMOTE_SECONDARIES=20, | ||
25273 | NET_IPV4_CONF_ARP_ACCEPT=21, | ||
25274 | NET_IPV4_CONF_ARP_NOTIFY=22, | ||
25275 | - NET_IPV4_CONF_SRC_VMARK=24, | ||
25276 | __NET_IPV4_CONF_MAX | ||
25277 | }; | ||
25278 | |||
25279 | diff --git a/include/linux/tick.h b/include/linux/tick.h | ||
25280 | index 0482229..4f9ba05 100644 | ||
25281 | --- a/include/linux/tick.h | ||
25282 | +++ b/include/linux/tick.h | ||
25283 | @@ -71,6 +71,11 @@ extern int tick_is_oneshot_available(void); | ||
25284 | extern struct tick_device *tick_get_device(int cpu); | ||
25285 | |||
25286 | # ifdef CONFIG_HIGH_RES_TIMERS | ||
25287 | +/* LITMUS^RT tick alignment */ | ||
25288 | +#define LINUX_DEFAULT_TICKS 0 | ||
25289 | +#define LITMUS_ALIGNED_TICKS 1 | ||
25290 | +#define LITMUS_STAGGERED_TICKS 2 | ||
25291 | + | ||
25292 | extern int tick_init_highres(void); | ||
25293 | extern int tick_program_event(ktime_t expires, int force); | ||
25294 | extern void tick_setup_sched_timer(void); | ||
25295 | diff --git a/include/linux/time.h b/include/linux/time.h | ||
25296 | index 6e026e4..fe04e5e 100644 | ||
25297 | --- a/include/linux/time.h | ||
25298 | +++ b/include/linux/time.h | ||
25299 | @@ -148,7 +148,6 @@ extern void monotonic_to_bootbased(struct timespec *ts); | ||
25300 | |||
25301 | extern struct timespec timespec_trunc(struct timespec t, unsigned gran); | ||
25302 | extern int timekeeping_valid_for_hres(void); | ||
25303 | -extern u64 timekeeping_max_deferment(void); | ||
25304 | extern void update_wall_time(void); | ||
25305 | extern void update_xtime_cache(u64 nsec); | ||
25306 | extern void timekeeping_leap_insert(int leapsecond); | ||
25307 | diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h | ||
25308 | index a4b947e..3d15fb9 100644 | ||
25309 | --- a/include/linux/usb_usual.h | ||
25310 | +++ b/include/linux/usb_usual.h | ||
25311 | @@ -56,9 +56,7 @@ | ||
25312 | US_FLAG(SANE_SENSE, 0x00008000) \ | ||
25313 | /* Sane Sense (> 18 bytes) */ \ | ||
25314 | US_FLAG(CAPACITY_OK, 0x00010000) \ | ||
25315 | - /* READ CAPACITY response is correct */ \ | ||
25316 | - US_FLAG(BAD_SENSE, 0x00020000) \ | ||
25317 | - /* Bad Sense (never more than 18 bytes) */ | ||
25318 | + /* READ CAPACITY response is correct */ | ||
25319 | |||
25320 | #define US_FLAG(name, value) US_FL_##name = value , | ||
25321 | enum { US_DO_ALL_FLAGS }; | ||
25322 | diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h | ||
25323 | index 3c123c3..227c2a5 100644 | ||
25324 | --- a/include/linux/vmalloc.h | ||
25325 | +++ b/include/linux/vmalloc.h | ||
25326 | @@ -115,11 +115,9 @@ extern rwlock_t vmlist_lock; | ||
25327 | extern struct vm_struct *vmlist; | ||
25328 | extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); | ||
25329 | |||
25330 | -#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA | ||
25331 | struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, | ||
25332 | const size_t *sizes, int nr_vms, | ||
25333 | size_t align, gfp_t gfp_mask); | ||
25334 | -#endif | ||
25335 | |||
25336 | void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); | ||
25337 | |||
25338 | diff --git a/include/litmus/bheap.h b/include/litmus/bheap.h | ||
25339 | new file mode 100644 | ||
25340 | index 0000000..cf4864a | ||
25341 | --- /dev/null | ||
25342 | +++ b/include/litmus/bheap.h | ||
25343 | @@ -0,0 +1,77 @@ | ||
25344 | +/* bheaps.h -- Binomial Heaps | ||
25345 | + * | ||
25346 | + * (c) 2008, 2009 Bjoern Brandenburg | ||
25347 | + */ | ||
25348 | + | ||
25349 | +#ifndef BHEAP_H | ||
25350 | +#define BHEAP_H | ||
25351 | + | ||
25352 | +#define NOT_IN_HEAP UINT_MAX | ||
25353 | + | ||
25354 | +struct bheap_node { | ||
25355 | + struct bheap_node* parent; | ||
25356 | + struct bheap_node* next; | ||
25357 | + struct bheap_node* child; | ||
25358 | + | ||
25359 | + unsigned int degree; | ||
25360 | + void* value; | ||
25361 | + struct bheap_node** ref; | ||
25362 | +}; | ||
25363 | + | ||
25364 | +struct bheap { | ||
25365 | + struct bheap_node* head; | ||
25366 | + /* We cache the minimum of the heap. | ||
25367 | + * This speeds up repeated peek operations. | ||
25368 | + */ | ||
25369 | + struct bheap_node* min; | ||
25370 | +}; | ||
25371 | + | ||
25372 | +typedef int (*bheap_prio_t)(struct bheap_node* a, struct bheap_node* b); | ||
25373 | + | ||
25374 | +void bheap_init(struct bheap* heap); | ||
25375 | +void bheap_node_init(struct bheap_node** ref_to_bheap_node_ptr, void* value); | ||
25376 | + | ||
25377 | +static inline int bheap_node_in_heap(struct bheap_node* h) | ||
25378 | +{ | ||
25379 | + return h->degree != NOT_IN_HEAP; | ||
25380 | +} | ||
25381 | + | ||
25382 | +static inline int bheap_empty(struct bheap* heap) | ||
25383 | +{ | ||
25384 | + return heap->head == NULL && heap->min == NULL; | ||
25385 | +} | ||
25386 | + | ||
25387 | +/* insert (and reinitialize) a node into the heap */ | ||
25388 | +void bheap_insert(bheap_prio_t higher_prio, | ||
25389 | + struct bheap* heap, | ||
25390 | + struct bheap_node* node); | ||
25391 | + | ||
25392 | +/* merge addition into target */ | ||
25393 | +void bheap_union(bheap_prio_t higher_prio, | ||
25394 | + struct bheap* target, | ||
25395 | + struct bheap* addition); | ||
25396 | + | ||
25397 | +struct bheap_node* bheap_peek(bheap_prio_t higher_prio, | ||
25398 | + struct bheap* heap); | ||
25399 | + | ||
25400 | +struct bheap_node* bheap_take(bheap_prio_t higher_prio, | ||
25401 | + struct bheap* heap); | ||
25402 | + | ||
25403 | +void bheap_uncache_min(bheap_prio_t higher_prio, struct bheap* heap); | ||
25404 | +int bheap_decrease(bheap_prio_t higher_prio, struct bheap_node* node); | ||
25405 | + | ||
25406 | +void bheap_delete(bheap_prio_t higher_prio, | ||
25407 | + struct bheap* heap, | ||
25408 | + struct bheap_node* node); | ||
25409 | + | ||
25410 | +/* allocate from memcache */ | ||
25411 | +struct bheap_node* bheap_node_alloc(int gfp_flags); | ||
25412 | +void bheap_node_free(struct bheap_node* hn); | ||
25413 | + | ||
25414 | +/* allocate a heap node for value and insert into the heap */ | ||
25415 | +int bheap_add(bheap_prio_t higher_prio, struct bheap* heap, | ||
25416 | + void* value, int gfp_flags); | ||
25417 | + | ||
25418 | +void* bheap_take_del(bheap_prio_t higher_prio, | ||
25419 | + struct bheap* heap); | ||
25420 | +#endif | ||
25421 | diff --git a/include/litmus/edf_common.h b/include/litmus/edf_common.h | ||
25422 | new file mode 100644 | ||
25423 | index 0000000..80d4321 | ||
25424 | --- /dev/null | ||
25425 | +++ b/include/litmus/edf_common.h | ||
25426 | @@ -0,0 +1,27 @@ | ||
25427 | +/* | ||
25428 | + * EDF common data structures and utility functions shared by all EDF | ||
25429 | + * based scheduler plugins | ||
25430 | + */ | ||
25431 | + | ||
25432 | +/* CLEANUP: Add comments and make it less messy. | ||
25433 | + * | ||
25434 | + */ | ||
25435 | + | ||
25436 | +#ifndef __UNC_EDF_COMMON_H__ | ||
25437 | +#define __UNC_EDF_COMMON_H__ | ||
25438 | + | ||
25439 | +#include <litmus/rt_domain.h> | ||
25440 | + | ||
25441 | +void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched, | ||
25442 | + release_jobs_t release); | ||
25443 | + | ||
25444 | +int edf_higher_prio(struct task_struct* first, | ||
25445 | + struct task_struct* second); | ||
25446 | + | ||
25447 | +int edf_ready_order(struct bheap_node* a, struct bheap_node* b); | ||
25448 | + | ||
25449 | +int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t); | ||
25450 | + | ||
25451 | +int edf_set_hp_task(struct pi_semaphore *sem); | ||
25452 | +int edf_set_hp_cpu_task(struct pi_semaphore *sem, int cpu); | ||
25453 | +#endif | ||
25454 | diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h | ||
25455 | new file mode 100644 | ||
25456 | index 0000000..286e10f | ||
25457 | --- /dev/null | ||
25458 | +++ b/include/litmus/fdso.h | ||
25459 | @@ -0,0 +1,69 @@ | ||
25460 | +/* fdso.h - file descriptor attached shared objects | ||
25461 | + * | ||
25462 | + * (c) 2007 B. Brandenburg, LITMUS^RT project | ||
25463 | + */ | ||
25464 | + | ||
25465 | +#ifndef _LINUX_FDSO_H_ | ||
25466 | +#define _LINUX_FDSO_H_ | ||
25467 | + | ||
25468 | +#include <linux/list.h> | ||
25469 | +#include <asm/atomic.h> | ||
25470 | + | ||
25471 | +#include <linux/fs.h> | ||
25472 | + | ||
25473 | +#define MAX_OBJECT_DESCRIPTORS 32 | ||
25474 | + | ||
25475 | +typedef enum { | ||
25476 | + MIN_OBJ_TYPE = 0, | ||
25477 | + | ||
25478 | + FMLP_SEM = 0, | ||
25479 | + SRP_SEM = 1, | ||
25480 | + | ||
25481 | + MAX_OBJ_TYPE = 1 | ||
25482 | +} obj_type_t; | ||
25483 | + | ||
25484 | +struct inode_obj_id { | ||
25485 | + struct list_head list; | ||
25486 | + atomic_t count; | ||
25487 | + struct inode* inode; | ||
25488 | + | ||
25489 | + obj_type_t type; | ||
25490 | + void* obj; | ||
25491 | + unsigned int id; | ||
25492 | +}; | ||
25493 | + | ||
25494 | + | ||
25495 | +struct od_table_entry { | ||
25496 | + unsigned int used; | ||
25497 | + | ||
25498 | + struct inode_obj_id* obj; | ||
25499 | + void* extra; | ||
25500 | +}; | ||
25501 | + | ||
25502 | +struct fdso_ops { | ||
25503 | + void* (*create) (void); | ||
25504 | + void (*destroy)(void*); | ||
25505 | + int (*open) (struct od_table_entry*, void* __user); | ||
25506 | + int (*close) (struct od_table_entry*); | ||
25507 | +}; | ||
25508 | + | ||
25509 | +/* translate a userspace supplied od into the raw table entry | ||
25510 | + * returns NULL if od is invalid | ||
25511 | + */ | ||
25512 | +struct od_table_entry* __od_lookup(int od); | ||
25513 | + | ||
25514 | +/* translate a userspace supplied od into the associated object | ||
25515 | + * returns NULL if od is invalid | ||
25516 | + */ | ||
25517 | +static inline void* od_lookup(int od, obj_type_t type) | ||
25518 | +{ | ||
25519 | + struct od_table_entry* e = __od_lookup(od); | ||
25520 | + return e && e->obj->type == type ? e->obj->obj : NULL; | ||
25521 | +} | ||
25522 | + | ||
25523 | +#define lookup_fmlp_sem(od)((struct pi_semaphore*) od_lookup(od, FMLP_SEM)) | ||
25524 | +#define lookup_srp_sem(od) ((struct srp_semaphore*) od_lookup(od, SRP_SEM)) | ||
25525 | +#define lookup_ics(od) ((struct ics*) od_lookup(od, ICS_ID)) | ||
25526 | + | ||
25527 | + | ||
25528 | +#endif | ||
25529 | diff --git a/include/litmus/feather_buffer.h b/include/litmus/feather_buffer.h | ||
25530 | new file mode 100644 | ||
25531 | index 0000000..6c18277 | ||
25532 | --- /dev/null | ||
25533 | +++ b/include/litmus/feather_buffer.h | ||
25534 | @@ -0,0 +1,94 @@ | ||
25535 | +#ifndef _FEATHER_BUFFER_H_ | ||
25536 | +#define _FEATHER_BUFFER_H_ | ||
25537 | + | ||
25538 | +/* requires UINT_MAX and memcpy */ | ||
25539 | + | ||
25540 | +#define SLOT_FREE 0 | ||
25541 | +#define SLOT_BUSY 1 | ||
25542 | +#define SLOT_READY 2 | ||
25543 | + | ||
25544 | +struct ft_buffer { | ||
25545 | + unsigned int slot_count; | ||
25546 | + unsigned int slot_size; | ||
25547 | + | ||
25548 | + int free_count; | ||
25549 | + unsigned int write_idx; | ||
25550 | + unsigned int read_idx; | ||
25551 | + | ||
25552 | + char* slots; | ||
25553 | + void* buffer_mem; | ||
25554 | + unsigned int failed_writes; | ||
25555 | +}; | ||
25556 | + | ||
25557 | +static inline int init_ft_buffer(struct ft_buffer* buf, | ||
25558 | + unsigned int slot_count, | ||
25559 | + unsigned int slot_size, | ||
25560 | + char* slots, | ||
25561 | + void* buffer_mem) | ||
25562 | +{ | ||
25563 | + int i = 0; | ||
25564 | + if (!slot_count || UINT_MAX % slot_count != slot_count - 1) { | ||
25565 | + /* The slot count must divide UNIT_MAX + 1 so that when it | ||
25566 | + * wraps around the index correctly points to 0. | ||
25567 | + */ | ||
25568 | + return 0; | ||
25569 | + } else { | ||
25570 | + buf->slot_count = slot_count; | ||
25571 | + buf->slot_size = slot_size; | ||
25572 | + buf->slots = slots; | ||
25573 | + buf->buffer_mem = buffer_mem; | ||
25574 | + buf->free_count = slot_count; | ||
25575 | + buf->write_idx = 0; | ||
25576 | + buf->read_idx = 0; | ||
25577 | + buf->failed_writes = 0; | ||
25578 | + for (i = 0; i < slot_count; i++) | ||
25579 | + buf->slots[i] = SLOT_FREE; | ||
25580 | + return 1; | ||
25581 | + } | ||
25582 | +} | ||
25583 | + | ||
25584 | +static inline int ft_buffer_start_write(struct ft_buffer* buf, void **ptr) | ||
25585 | +{ | ||
25586 | + int free = fetch_and_dec(&buf->free_count); | ||
25587 | + unsigned int idx; | ||
25588 | + if (free <= 0) { | ||
25589 | + fetch_and_inc(&buf->free_count); | ||
25590 | + *ptr = 0; | ||
25591 | + fetch_and_inc(&buf->failed_writes); | ||
25592 | + return 0; | ||
25593 | + } else { | ||
25594 | + idx = fetch_and_inc((int*) &buf->write_idx) % buf->slot_count; | ||
25595 | + buf->slots[idx] = SLOT_BUSY; | ||
25596 | + *ptr = ((char*) buf->buffer_mem) + idx * buf->slot_size; | ||
25597 | + return 1; | ||
25598 | + } | ||
25599 | +} | ||
25600 | + | ||
25601 | +static inline void ft_buffer_finish_write(struct ft_buffer* buf, void *ptr) | ||
25602 | +{ | ||
25603 | + unsigned int idx = ((char*) ptr - (char*) buf->buffer_mem) / buf->slot_size; | ||
25604 | + buf->slots[idx] = SLOT_READY; | ||
25605 | +} | ||
25606 | + | ||
25607 | + | ||
25608 | +/* exclusive reader access is assumed */ | ||
25609 | +static inline int ft_buffer_read(struct ft_buffer* buf, void* dest) | ||
25610 | +{ | ||
25611 | + unsigned int idx; | ||
25612 | + if (buf->free_count == buf->slot_count) | ||
25613 | + /* nothing available */ | ||
25614 | + return 0; | ||
25615 | + idx = buf->read_idx % buf->slot_count; | ||
25616 | + if (buf->slots[idx] == SLOT_READY) { | ||
25617 | + memcpy(dest, ((char*) buf->buffer_mem) + idx * buf->slot_size, | ||
25618 | + buf->slot_size); | ||
25619 | + buf->slots[idx] = SLOT_FREE; | ||
25620 | + buf->read_idx++; | ||
25621 | + fetch_and_inc(&buf->free_count); | ||
25622 | + return 1; | ||
25623 | + } else | ||
25624 | + return 0; | ||
25625 | +} | ||
25626 | + | ||
25627 | + | ||
25628 | +#endif | ||
25629 | diff --git a/include/litmus/feather_trace.h b/include/litmus/feather_trace.h | ||
25630 | new file mode 100644 | ||
25631 | index 0000000..7d27e76 | ||
25632 | --- /dev/null | ||
25633 | +++ b/include/litmus/feather_trace.h | ||
25634 | @@ -0,0 +1,49 @@ | ||
25635 | +#ifndef _FEATHER_TRACE_H_ | ||
25636 | +#define _FEATHER_TRACE_H_ | ||
25637 | + | ||
25638 | +#include <asm/atomic.h> | ||
25639 | +#include <asm/feather_trace.h> | ||
25640 | + | ||
25641 | +int ft_enable_event(unsigned long id); | ||
25642 | +int ft_disable_event(unsigned long id); | ||
25643 | +int ft_is_event_enabled(unsigned long id); | ||
25644 | +int ft_disable_all_events(void); | ||
25645 | + | ||
25646 | +/* atomic_* funcitons are inline anyway */ | ||
25647 | +static inline int fetch_and_inc(int *val) | ||
25648 | +{ | ||
25649 | + return atomic_add_return(1, (atomic_t*) val) - 1; | ||
25650 | +} | ||
25651 | + | ||
25652 | +static inline int fetch_and_dec(int *val) | ||
25653 | +{ | ||
25654 | + return atomic_sub_return(1, (atomic_t*) val) + 1; | ||
25655 | +} | ||
25656 | + | ||
25657 | +#ifndef __ARCH_HAS_FEATHER_TRACE | ||
25658 | +/* provide default implementation */ | ||
25659 | + | ||
25660 | +#define feather_callback | ||
25661 | + | ||
25662 | +#define MAX_EVENTS 1024 | ||
25663 | + | ||
25664 | +extern int ft_events[MAX_EVENTS]; | ||
25665 | + | ||
25666 | +#define ft_event(id, callback) \ | ||
25667 | + if (ft_events[id]) callback(); | ||
25668 | + | ||
25669 | +#define ft_event0(id, callback) \ | ||
25670 | + if (ft_events[id]) callback(id); | ||
25671 | + | ||
25672 | +#define ft_event1(id, callback, param) \ | ||
25673 | + if (ft_events[id]) callback(id, param); | ||
25674 | + | ||
25675 | +#define ft_event2(id, callback, param, param2) \ | ||
25676 | + if (ft_events[id]) callback(id, param, param2); | ||
25677 | + | ||
25678 | +#define ft_event3(id, callback, p, p2, p3) \ | ||
25679 | + if (ft_events[id]) callback(id, p, p2, p3); | ||
25680 | + | ||
25681 | +#endif | ||
25682 | + | ||
25683 | +#endif | ||
25684 | diff --git a/include/litmus/ftdev.h b/include/litmus/ftdev.h | ||
25685 | new file mode 100644 | ||
25686 | index 0000000..7697b46 | ||
25687 | --- /dev/null | ||
25688 | +++ b/include/litmus/ftdev.h | ||
25689 | @@ -0,0 +1,49 @@ | ||
25690 | +#ifndef _LITMUS_FTDEV_H_ | ||
25691 | +#define _LITMUS_FTDEV_H_ | ||
25692 | + | ||
25693 | +#include <litmus/feather_trace.h> | ||
25694 | +#include <litmus/feather_buffer.h> | ||
25695 | +#include <linux/mutex.h> | ||
25696 | +#include <linux/cdev.h> | ||
25697 | + | ||
25698 | +#define MAX_FTDEV_MINORS NR_CPUS | ||
25699 | + | ||
25700 | +#define FTDEV_ENABLE_CMD 0 | ||
25701 | +#define FTDEV_DISABLE_CMD 1 | ||
25702 | + | ||
25703 | +struct ftdev; | ||
25704 | + | ||
25705 | +/* return 0 if buffer can be opened, otherwise -$REASON */ | ||
25706 | +typedef int (*ftdev_can_open_t)(struct ftdev* dev, unsigned int buf_no); | ||
25707 | +/* return 0 on success, otherwise -$REASON */ | ||
25708 | +typedef int (*ftdev_alloc_t)(struct ftdev* dev, unsigned int buf_no); | ||
25709 | +typedef void (*ftdev_free_t)(struct ftdev* dev, unsigned int buf_no); | ||
25710 | + | ||
25711 | + | ||
25712 | +struct ftdev_event; | ||
25713 | + | ||
25714 | +struct ftdev_minor { | ||
25715 | + struct ft_buffer* buf; | ||
25716 | + unsigned int readers; | ||
25717 | + struct mutex lock; | ||
25718 | + /* FIXME: filter for authorized events */ | ||
25719 | + struct ftdev_event* events; | ||
25720 | +}; | ||
25721 | + | ||
25722 | +struct ftdev { | ||
25723 | + struct cdev cdev; | ||
25724 | + /* FIXME: don't waste memory, allocate dynamically */ | ||
25725 | + struct ftdev_minor minor[MAX_FTDEV_MINORS]; | ||
25726 | + unsigned int minor_cnt; | ||
25727 | + ftdev_alloc_t alloc; | ||
25728 | + ftdev_free_t free; | ||
25729 | + ftdev_can_open_t can_open; | ||
25730 | +}; | ||
25731 | + | ||
25732 | +struct ft_buffer* alloc_ft_buffer(unsigned int count, size_t size); | ||
25733 | +void free_ft_buffer(struct ft_buffer* buf); | ||
25734 | + | ||
25735 | +void ftdev_init(struct ftdev* ftdev, struct module* owner); | ||
25736 | +int register_ftdev(struct ftdev* ftdev, const char* name, int major); | ||
25737 | + | ||
25738 | +#endif | ||
25739 | diff --git a/include/litmus/jobs.h b/include/litmus/jobs.h | ||
25740 | new file mode 100644 | ||
25741 | index 0000000..9bd361e | ||
25742 | --- /dev/null | ||
25743 | +++ b/include/litmus/jobs.h | ||
25744 | @@ -0,0 +1,9 @@ | ||
25745 | +#ifndef __LITMUS_JOBS_H__ | ||
25746 | +#define __LITMUS_JOBS_H__ | ||
25747 | + | ||
25748 | +void prepare_for_next_period(struct task_struct *t); | ||
25749 | +void release_at(struct task_struct *t, lt_t start); | ||
25750 | +long complete_job(void); | ||
25751 | + | ||
25752 | +#endif | ||
25753 | + | ||
25754 | diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h | ||
25755 | new file mode 100644 | ||
25756 | index 0000000..62107e6 | ||
25757 | --- /dev/null | ||
25758 | +++ b/include/litmus/litmus.h | ||
25759 | @@ -0,0 +1,252 @@ | ||
25760 | +/* | ||
25761 | + * Constant definitions related to | ||
25762 | + * scheduling policy. | ||
25763 | + */ | ||
25764 | + | ||
25765 | +#ifndef _LINUX_LITMUS_H_ | ||
25766 | +#define _LINUX_LITMUS_H_ | ||
25767 | + | ||
25768 | +#include <linux/jiffies.h> | ||
25769 | +#include <litmus/sched_trace.h> | ||
25770 | + | ||
25771 | +extern atomic_t release_master_cpu; | ||
25772 | + | ||
25773 | +extern atomic_t __log_seq_no; | ||
25774 | + | ||
25775 | +#define TRACE(fmt, args...) \ | ||
25776 | + sched_trace_log_message("%d P%d: " fmt, atomic_add_return(1, &__log_seq_no), \ | ||
25777 | + raw_smp_processor_id(), ## args) | ||
25778 | + | ||
25779 | +#define TRACE_TASK(t, fmt, args...) \ | ||
25780 | + TRACE("(%s/%d) " fmt, (t)->comm, (t)->pid, ##args) | ||
25781 | + | ||
25782 | +#define TRACE_CUR(fmt, args...) \ | ||
25783 | + TRACE_TASK(current, fmt, ## args) | ||
25784 | + | ||
25785 | +#define TRACE_BUG_ON(cond) \ | ||
25786 | + do { if (cond) TRACE("BUG_ON(%s) at %s:%d " \ | ||
25787 | + "called from %p current=%s/%d state=%d " \ | ||
25788 | + "flags=%x partition=%d cpu=%d rtflags=%d"\ | ||
25789 | + " job=%u timeslice=%u\n", \ | ||
25790 | + #cond, __FILE__, __LINE__, __builtin_return_address(0), current->comm, \ | ||
25791 | + current->pid, current->state, current->flags, \ | ||
25792 | + get_partition(current), smp_processor_id(), get_rt_flags(current), \ | ||
25793 | + current->rt_param.job_params.job_no, \ | ||
25794 | + current->rt.time_slice\ | ||
25795 | + ); } while(0); | ||
25796 | + | ||
25797 | + | ||
25798 | +/* in_list - is a given list_head queued on some list? | ||
25799 | + */ | ||
25800 | +static inline int in_list(struct list_head* list) | ||
25801 | +{ | ||
25802 | + return !( /* case 1: deleted */ | ||
25803 | + (list->next == LIST_POISON1 && | ||
25804 | + list->prev == LIST_POISON2) | ||
25805 | + || | ||
25806 | + /* case 2: initialized */ | ||
25807 | + (list->next == list && | ||
25808 | + list->prev == list) | ||
25809 | + ); | ||
25810 | +} | ||
25811 | + | ||
25812 | +#define NO_CPU 0xffffffff | ||
25813 | + | ||
25814 | +void litmus_fork(struct task_struct *tsk); | ||
25815 | +void litmus_exec(void); | ||
25816 | +/* clean up real-time state of a task */ | ||
25817 | +void exit_litmus(struct task_struct *dead_tsk); | ||
25818 | + | ||
25819 | +long litmus_admit_task(struct task_struct *tsk); | ||
25820 | +void litmus_exit_task(struct task_struct *tsk); | ||
25821 | + | ||
25822 | +#define is_realtime(t) ((t)->policy == SCHED_LITMUS) | ||
25823 | +#define rt_transition_pending(t) \ | ||
25824 | + ((t)->rt_param.transition_pending) | ||
25825 | + | ||
25826 | +#define tsk_rt(t) (&(t)->rt_param) | ||
25827 | + | ||
25828 | +/* Realtime utility macros */ | ||
25829 | +#define get_rt_flags(t) (tsk_rt(t)->flags) | ||
25830 | +#define set_rt_flags(t,f) (tsk_rt(t)->flags=(f)) | ||
25831 | +#define get_exec_cost(t) (tsk_rt(t)->task_params.exec_cost) | ||
25832 | +#define get_exec_time(t) (tsk_rt(t)->job_params.exec_time) | ||
25833 | +#define get_rt_period(t) (tsk_rt(t)->task_params.period) | ||
25834 | +#define get_rt_phase(t) (tsk_rt(t)->task_params.phase) | ||
25835 | +#define get_partition(t) (tsk_rt(t)->task_params.cpu) | ||
25836 | +#define get_deadline(t) (tsk_rt(t)->job_params.deadline) | ||
25837 | +#define get_release(t) (tsk_rt(t)->job_params.release) | ||
25838 | +#define get_class(t) (tsk_rt(t)->task_params.cls) | ||
25839 | + | ||
25840 | +inline static int budget_exhausted(struct task_struct* t) | ||
25841 | +{ | ||
25842 | + return get_exec_time(t) >= get_exec_cost(t); | ||
25843 | +} | ||
25844 | + | ||
25845 | + | ||
25846 | +#define is_hrt(t) \ | ||
25847 | + (tsk_rt(t)->task_params.class == RT_CLASS_HARD) | ||
25848 | +#define is_srt(t) \ | ||
25849 | + (tsk_rt(t)->task_params.class == RT_CLASS_SOFT) | ||
25850 | +#define is_be(t) \ | ||
25851 | + (tsk_rt(t)->task_params.class == RT_CLASS_BEST_EFFORT) | ||
25852 | + | ||
25853 | +/* Our notion of time within LITMUS: kernel monotonic time. */ | ||
25854 | +static inline lt_t litmus_clock(void) | ||
25855 | +{ | ||
25856 | + return ktime_to_ns(ktime_get()); | ||
25857 | +} | ||
25858 | + | ||
25859 | +/* A macro to convert from nanoseconds to ktime_t. */ | ||
25860 | +#define ns_to_ktime(t) ktime_add_ns(ktime_set(0, 0), t) | ||
25861 | + | ||
25862 | +#define get_domain(t) (tsk_rt(t)->domain) | ||
25863 | + | ||
25864 | +/* Honor the flag in the preempt_count variable that is set | ||
25865 | + * when scheduling is in progress. | ||
25866 | + */ | ||
25867 | +#define is_running(t) \ | ||
25868 | + ((t)->state == TASK_RUNNING || \ | ||
25869 | + task_thread_info(t)->preempt_count & PREEMPT_ACTIVE) | ||
25870 | + | ||
25871 | +#define is_blocked(t) \ | ||
25872 | + (!is_running(t)) | ||
25873 | +#define is_released(t, now) \ | ||
25874 | + (lt_before_eq(get_release(t), now)) | ||
25875 | +#define is_tardy(t, now) \ | ||
25876 | + (lt_before_eq(tsk_rt(t)->job_params.deadline, now)) | ||
25877 | + | ||
25878 | +/* real-time comparison macros */ | ||
25879 | +#define earlier_deadline(a, b) (lt_before(\ | ||
25880 | + (a)->rt_param.job_params.deadline,\ | ||
25881 | + (b)->rt_param.job_params.deadline)) | ||
25882 | +#define earlier_release(a, b) (lt_before(\ | ||
25883 | + (a)->rt_param.job_params.release,\ | ||
25884 | + (b)->rt_param.job_params.release)) | ||
25885 | + | ||
25886 | +void preempt_if_preemptable(struct task_struct* t, int on_cpu); | ||
25887 | + | ||
25888 | +#ifdef CONFIG_SRP | ||
25889 | +void srp_ceiling_block(void); | ||
25890 | +#else | ||
25891 | +#define srp_ceiling_block() /* nothing */ | ||
25892 | +#endif | ||
25893 | + | ||
25894 | +#define bheap2task(hn) ((struct task_struct*) hn->value) | ||
25895 | + | ||
25896 | +#ifdef CONFIG_NP_SECTION | ||
25897 | + | ||
25898 | +static inline int is_kernel_np(struct task_struct *t) | ||
25899 | +{ | ||
25900 | + return tsk_rt(t)->kernel_np; | ||
25901 | +} | ||
25902 | + | ||
25903 | +static inline int is_user_np(struct task_struct *t) | ||
25904 | +{ | ||
25905 | + return tsk_rt(t)->ctrl_page ? tsk_rt(t)->ctrl_page->np_flag : 0; | ||
25906 | +} | ||
25907 | + | ||
25908 | +static inline void request_exit_np(struct task_struct *t) | ||
25909 | +{ | ||
25910 | + if (is_user_np(t)) { | ||
25911 | + /* Set the flag that tells user space to call | ||
25912 | + * into the kernel at the end of a critical section. */ | ||
25913 | + if (likely(tsk_rt(t)->ctrl_page)) { | ||
25914 | + TRACE_TASK(t, "setting delayed_preemption flag\n"); | ||
25915 | + tsk_rt(t)->ctrl_page->delayed_preemption = 1; | ||
25916 | + } | ||
25917 | + } | ||
25918 | +} | ||
25919 | + | ||
25920 | +static inline void clear_exit_np(struct task_struct *t) | ||
25921 | +{ | ||
25922 | + if (likely(tsk_rt(t)->ctrl_page)) | ||
25923 | + tsk_rt(t)->ctrl_page->delayed_preemption = 0; | ||
25924 | +} | ||
25925 | + | ||
25926 | +static inline void make_np(struct task_struct *t) | ||
25927 | +{ | ||
25928 | + tsk_rt(t)->kernel_np++; | ||
25929 | +} | ||
25930 | + | ||
25931 | +/* Caller should check if preemption is necessary when | ||
25932 | + * the function return 0. | ||
25933 | + */ | ||
25934 | +static inline int take_np(struct task_struct *t) | ||
25935 | +{ | ||
25936 | + return --tsk_rt(t)->kernel_np; | ||
25937 | +} | ||
25938 | + | ||
25939 | +#else | ||
25940 | + | ||
25941 | +static inline int is_kernel_np(struct task_struct* t) | ||
25942 | +{ | ||
25943 | + return 0; | ||
25944 | +} | ||
25945 | + | ||
25946 | +static inline int is_user_np(struct task_struct* t) | ||
25947 | +{ | ||
25948 | + return 0; | ||
25949 | +} | ||
25950 | + | ||
25951 | +static inline void request_exit_np(struct task_struct *t) | ||
25952 | +{ | ||
25953 | + /* request_exit_np() shouldn't be called if !CONFIG_NP_SECTION */ | ||
25954 | + BUG(); | ||
25955 | +} | ||
25956 | + | ||
25957 | +static inline void clear_exit_np(struct task_struct* t) | ||
25958 | +{ | ||
25959 | +} | ||
25960 | + | ||
25961 | +#endif | ||
25962 | + | ||
25963 | +static inline int is_np(struct task_struct *t) | ||
25964 | +{ | ||
25965 | +#ifdef CONFIG_SCHED_DEBUG_TRACE | ||
25966 | + int kernel, user; | ||
25967 | + kernel = is_kernel_np(t); | ||
25968 | + user = is_user_np(t); | ||
25969 | + if (kernel || user) | ||
25970 | + TRACE_TASK(t, " is non-preemptive: kernel=%d user=%d\n", | ||
25971 | + | ||
25972 | + kernel, user); | ||
25973 | + return kernel || user; | ||
25974 | +#else | ||
25975 | + return unlikely(is_kernel_np(t) || is_user_np(t)); | ||
25976 | +#endif | ||
25977 | +} | ||
25978 | + | ||
25979 | +static inline int is_present(struct task_struct* t) | ||
25980 | +{ | ||
25981 | + return t && tsk_rt(t)->present; | ||
25982 | +} | ||
25983 | + | ||
25984 | + | ||
25985 | +/* make the unit explicit */ | ||
25986 | +typedef unsigned long quanta_t; | ||
25987 | + | ||
25988 | +enum round { | ||
25989 | + FLOOR, | ||
25990 | + CEIL | ||
25991 | +}; | ||
25992 | + | ||
25993 | + | ||
25994 | +/* Tick period is used to convert ns-specified execution | ||
25995 | + * costs and periods into tick-based equivalents. | ||
25996 | + */ | ||
25997 | +extern ktime_t tick_period; | ||
25998 | + | ||
25999 | +static inline quanta_t time2quanta(lt_t time, enum round round) | ||
26000 | +{ | ||
26001 | + s64 quantum_length = ktime_to_ns(tick_period); | ||
26002 | + | ||
26003 | + if (do_div(time, quantum_length) && round == CEIL) | ||
26004 | + time++; | ||
26005 | + return (quanta_t) time; | ||
26006 | +} | ||
26007 | + | ||
26008 | +/* By how much is cpu staggered behind CPU 0? */ | ||
26009 | +u64 cpu_stagger_offset(int cpu); | ||
26010 | + | ||
26011 | +#endif | ||
26012 | diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h | ||
26013 | new file mode 100644 | ||
26014 | index 0000000..b452be1 | ||
26015 | --- /dev/null | ||
26016 | +++ b/include/litmus/rt_domain.h | ||
26017 | @@ -0,0 +1,162 @@ | ||
26018 | +/* CLEANUP: Add comments and make it less messy. | ||
26019 | + * | ||
26020 | + */ | ||
26021 | + | ||
26022 | +#ifndef __UNC_RT_DOMAIN_H__ | ||
26023 | +#define __UNC_RT_DOMAIN_H__ | ||
26024 | + | ||
26025 | +#include <litmus/bheap.h> | ||
26026 | + | ||
26027 | +#define RELEASE_QUEUE_SLOTS 127 /* prime */ | ||
26028 | + | ||
26029 | +struct _rt_domain; | ||
26030 | + | ||
26031 | +typedef int (*check_resched_needed_t)(struct _rt_domain *rt); | ||
26032 | +typedef void (*release_jobs_t)(struct _rt_domain *rt, struct bheap* tasks); | ||
26033 | + | ||
26034 | +struct release_queue { | ||
26035 | + /* each slot maintains a list of release heaps sorted | ||
26036 | + * by release time */ | ||
26037 | + struct list_head slot[RELEASE_QUEUE_SLOTS]; | ||
26038 | +}; | ||
26039 | + | ||
26040 | +typedef struct _rt_domain { | ||
26041 | + /* runnable rt tasks are in here */ | ||
26042 | + spinlock_t ready_lock; | ||
26043 | + struct bheap ready_queue; | ||
26044 | + | ||
26045 | + /* real-time tasks waiting for release are in here */ | ||
26046 | + spinlock_t release_lock; | ||
26047 | + struct release_queue release_queue; | ||
26048 | + int release_master; | ||
26049 | + | ||
26050 | + /* for moving tasks to the release queue */ | ||
26051 | + spinlock_t tobe_lock; | ||
26052 | + struct list_head tobe_released; | ||
26053 | + | ||
26054 | + /* how do we check if we need to kick another CPU? */ | ||
26055 | + check_resched_needed_t check_resched; | ||
26056 | + | ||
26057 | + /* how do we release jobs? */ | ||
26058 | + release_jobs_t release_jobs; | ||
26059 | + | ||
26060 | + /* how are tasks ordered in the ready queue? */ | ||
26061 | + bheap_prio_t order; | ||
26062 | +} rt_domain_t; | ||
26063 | + | ||
26064 | +struct release_heap { | ||
26065 | + /* list_head for per-time-slot list */ | ||
26066 | + struct list_head list; | ||
26067 | + lt_t release_time; | ||
26068 | + /* all tasks to be released at release_time */ | ||
26069 | + struct bheap heap; | ||
26070 | + /* used to trigger the release */ | ||
26071 | + struct hrtimer timer; | ||
26072 | + /* used to delegate releases */ | ||
26073 | + struct hrtimer_start_on_info info; | ||
26074 | + /* required for the timer callback */ | ||
26075 | + rt_domain_t* dom; | ||
26076 | +}; | ||
26077 | + | ||
26078 | + | ||
26079 | +static inline struct task_struct* __next_ready(rt_domain_t* rt) | ||
26080 | +{ | ||
26081 | + struct bheap_node *hn = bheap_peek(rt->order, &rt->ready_queue); | ||
26082 | + if (hn) | ||
26083 | + return bheap2task(hn); | ||
26084 | + else | ||
26085 | + return NULL; | ||
26086 | +} | ||
26087 | + | ||
26088 | +void rt_domain_init(rt_domain_t *rt, bheap_prio_t order, | ||
26089 | + check_resched_needed_t check, | ||
26090 | + release_jobs_t relase); | ||
26091 | + | ||
26092 | +void __add_ready(rt_domain_t* rt, struct task_struct *new); | ||
26093 | +void __merge_ready(rt_domain_t* rt, struct bheap *tasks); | ||
26094 | +void __add_release(rt_domain_t* rt, struct task_struct *task); | ||
26095 | + | ||
26096 | +static inline struct task_struct* __take_ready(rt_domain_t* rt) | ||
26097 | +{ | ||
26098 | + struct bheap_node* hn = bheap_take(rt->order, &rt->ready_queue); | ||
26099 | + if (hn) | ||
26100 | + return bheap2task(hn); | ||
26101 | + else | ||
26102 | + return NULL; | ||
26103 | +} | ||
26104 | + | ||
26105 | +static inline struct task_struct* __peek_ready(rt_domain_t* rt) | ||
26106 | +{ | ||
26107 | + struct bheap_node* hn = bheap_peek(rt->order, &rt->ready_queue); | ||
26108 | + if (hn) | ||
26109 | + return bheap2task(hn); | ||
26110 | + else | ||
26111 | + return NULL; | ||
26112 | +} | ||
26113 | + | ||
26114 | +static inline int is_queued(struct task_struct *t) | ||
26115 | +{ | ||
26116 | + BUG_ON(!tsk_rt(t)->heap_node); | ||
26117 | + return bheap_node_in_heap(tsk_rt(t)->heap_node); | ||
26118 | +} | ||
26119 | + | ||
26120 | +static inline void remove(rt_domain_t* rt, struct task_struct *t) | ||
26121 | +{ | ||
26122 | + bheap_delete(rt->order, &rt->ready_queue, tsk_rt(t)->heap_node); | ||
26123 | +} | ||
26124 | + | ||
26125 | +static inline void add_ready(rt_domain_t* rt, struct task_struct *new) | ||
26126 | +{ | ||
26127 | + unsigned long flags; | ||
26128 | + /* first we need the write lock for rt_ready_queue */ | ||
26129 | + spin_lock_irqsave(&rt->ready_lock, flags); | ||
26130 | + __add_ready(rt, new); | ||
26131 | + spin_unlock_irqrestore(&rt->ready_lock, flags); | ||
26132 | +} | ||
26133 | + | ||
26134 | +static inline void merge_ready(rt_domain_t* rt, struct bheap* tasks) | ||
26135 | +{ | ||
26136 | + unsigned long flags; | ||
26137 | + spin_lock_irqsave(&rt->ready_lock, flags); | ||
26138 | + __merge_ready(rt, tasks); | ||
26139 | + spin_unlock_irqrestore(&rt->ready_lock, flags); | ||
26140 | +} | ||
26141 | + | ||
26142 | +static inline struct task_struct* take_ready(rt_domain_t* rt) | ||
26143 | +{ | ||
26144 | + unsigned long flags; | ||
26145 | + struct task_struct* ret; | ||
26146 | + /* first we need the write lock for rt_ready_queue */ | ||
26147 | + spin_lock_irqsave(&rt->ready_lock, flags); | ||
26148 | + ret = __take_ready(rt); | ||
26149 | + spin_unlock_irqrestore(&rt->ready_lock, flags); | ||
26150 | + return ret; | ||
26151 | +} | ||
26152 | + | ||
26153 | + | ||
26154 | +static inline void add_release(rt_domain_t* rt, struct task_struct *task) | ||
26155 | +{ | ||
26156 | + unsigned long flags; | ||
26157 | + /* first we need the write lock for rt_ready_queue */ | ||
26158 | + spin_lock_irqsave(&rt->tobe_lock, flags); | ||
26159 | + __add_release(rt, task); | ||
26160 | + spin_unlock_irqrestore(&rt->tobe_lock, flags); | ||
26161 | +} | ||
26162 | + | ||
26163 | +static inline int __jobs_pending(rt_domain_t* rt) | ||
26164 | +{ | ||
26165 | + return !bheap_empty(&rt->ready_queue); | ||
26166 | +} | ||
26167 | + | ||
26168 | +static inline int jobs_pending(rt_domain_t* rt) | ||
26169 | +{ | ||
26170 | + unsigned long flags; | ||
26171 | + int ret; | ||
26172 | + /* first we need the write lock for rt_ready_queue */ | ||
26173 | + spin_lock_irqsave(&rt->ready_lock, flags); | ||
26174 | + ret = !bheap_empty(&rt->ready_queue); | ||
26175 | + spin_unlock_irqrestore(&rt->ready_lock, flags); | ||
26176 | + return ret; | ||
26177 | +} | ||
26178 | + | ||
26179 | +#endif | ||
26180 | diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h | ||
26181 | new file mode 100644 | ||
26182 | index 0000000..5b94d1a | ||
26183 | --- /dev/null | ||
26184 | +++ b/include/litmus/rt_param.h | ||
26185 | @@ -0,0 +1,189 @@ | ||
26186 | +/* | ||
26187 | + * Definition of the scheduler plugin interface. | ||
26188 | + * | ||
26189 | + */ | ||
26190 | +#ifndef _LINUX_RT_PARAM_H_ | ||
26191 | +#define _LINUX_RT_PARAM_H_ | ||
26192 | + | ||
26193 | +/* Litmus time type. */ | ||
26194 | +typedef unsigned long long lt_t; | ||
26195 | + | ||
26196 | +static inline int lt_after(lt_t a, lt_t b) | ||
26197 | +{ | ||
26198 | + return ((long long) b) - ((long long) a) < 0; | ||
26199 | +} | ||
26200 | +#define lt_before(a, b) lt_after(b, a) | ||
26201 | + | ||
26202 | +static inline int lt_after_eq(lt_t a, lt_t b) | ||
26203 | +{ | ||
26204 | + return ((long long) a) - ((long long) b) >= 0; | ||
26205 | +} | ||
26206 | +#define lt_before_eq(a, b) lt_after_eq(b, a) | ||
26207 | + | ||
26208 | +/* different types of clients */ | ||
26209 | +typedef enum { | ||
26210 | + RT_CLASS_HARD, | ||
26211 | + RT_CLASS_SOFT, | ||
26212 | + RT_CLASS_BEST_EFFORT | ||
26213 | +} task_class_t; | ||
26214 | + | ||
26215 | +struct rt_task { | ||
26216 | + lt_t exec_cost; | ||
26217 | + lt_t period; | ||
26218 | + lt_t phase; | ||
26219 | + unsigned int cpu; | ||
26220 | + task_class_t cls; | ||
26221 | +}; | ||
26222 | + | ||
26223 | +/* The definition of the data that is shared between the kernel and real-time | ||
26224 | + * tasks via a shared page (see litmus/ctrldev.c). | ||
26225 | + * | ||
26226 | + * WARNING: User space can write to this, so don't trust | ||
26227 | + * the correctness of the fields! | ||
26228 | + * | ||
26229 | + * This servees two purposes: to enable efficient signaling | ||
26230 | + * of non-preemptive sections (user->kernel) and | ||
26231 | + * delayed preemptions (kernel->user), and to export | ||
26232 | + * some real-time relevant statistics such as preemption and | ||
26233 | + * migration data to user space. We can't use a device to export | ||
26234 | + * statistics because we want to avoid system call overhead when | ||
26235 | + * determining preemption/migration overheads). | ||
26236 | + */ | ||
26237 | +struct control_page { | ||
26238 | + /* Is the task currently in a non-preemptive section? */ | ||
26239 | + int np_flag; | ||
26240 | + /* Should the task call into the kernel when it leaves | ||
26241 | + * its non-preemptive section? */ | ||
26242 | + int delayed_preemption; | ||
26243 | + | ||
26244 | + /* to be extended */ | ||
26245 | +}; | ||
26246 | + | ||
26247 | +/* don't export internal data structures to user space (liblitmus) */ | ||
26248 | +#ifdef __KERNEL__ | ||
26249 | + | ||
26250 | +struct _rt_domain; | ||
26251 | +struct bheap_node; | ||
26252 | +struct release_heap; | ||
26253 | + | ||
26254 | +struct rt_job { | ||
26255 | + /* Time instant the the job was or will be released. */ | ||
26256 | + lt_t release; | ||
26257 | + /* What is the current deadline? */ | ||
26258 | + lt_t deadline; | ||
26259 | + | ||
26260 | + /* How much service has this job received so far? */ | ||
26261 | + lt_t exec_time; | ||
26262 | + | ||
26263 | + /* Which job is this. This is used to let user space | ||
26264 | + * specify which job to wait for, which is important if jobs | ||
26265 | + * overrun. If we just call sys_sleep_next_period() then we | ||
26266 | + * will unintentionally miss jobs after an overrun. | ||
26267 | + * | ||
26268 | + * Increase this sequence number when a job is released. | ||
26269 | + */ | ||
26270 | + unsigned int job_no; | ||
26271 | +}; | ||
26272 | + | ||
26273 | +struct pfair_param; | ||
26274 | + | ||
26275 | +/* RT task parameters for scheduling extensions | ||
26276 | + * These parameters are inherited during clone and therefore must | ||
26277 | + * be explicitly set up before the task set is launched. | ||
26278 | + */ | ||
26279 | +struct rt_param { | ||
26280 | + /* is the task sleeping? */ | ||
26281 | + unsigned int flags:8; | ||
26282 | + | ||
26283 | + /* do we need to check for srp blocking? */ | ||
26284 | + unsigned int srp_non_recurse:1; | ||
26285 | + | ||
26286 | + /* is the task present? (true if it can be scheduled) */ | ||
26287 | + unsigned int present:1; | ||
26288 | + | ||
26289 | + /* user controlled parameters */ | ||
26290 | + struct rt_task task_params; | ||
26291 | + | ||
26292 | + /* timing parameters */ | ||
26293 | + struct rt_job job_params; | ||
26294 | + | ||
26295 | + /* task representing the current "inherited" task | ||
26296 | + * priority, assigned by inherit_priority and | ||
26297 | + * return priority in the scheduler plugins. | ||
26298 | + * could point to self if PI does not result in | ||
26299 | + * an increased task priority. | ||
26300 | + */ | ||
26301 | + struct task_struct* inh_task; | ||
26302 | + | ||
26303 | +#ifdef CONFIG_NP_SECTION | ||
26304 | + /* For the FMLP under PSN-EDF, it is required to make the task | ||
26305 | + * non-preemptive from kernel space. In order not to interfere with | ||
26306 | + * user space, this counter indicates the kernel space np setting. | ||
26307 | + * kernel_np > 0 => task is non-preemptive | ||
26308 | + */ | ||
26309 | + unsigned int kernel_np; | ||
26310 | +#endif | ||
26311 | + | ||
26312 | + /* This field can be used by plugins to store where the task | ||
26313 | + * is currently scheduled. It is the responsibility of the | ||
26314 | + * plugin to avoid race conditions. | ||
26315 | + * | ||
26316 | + * This used by GSN-EDF and PFAIR. | ||
26317 | + */ | ||
26318 | + volatile int scheduled_on; | ||
26319 | + | ||
26320 | + /* Is the stack of the task currently in use? This is updated by | ||
26321 | + * the LITMUS core. | ||
26322 | + * | ||
26323 | + * Be careful to avoid deadlocks! | ||
26324 | + */ | ||
26325 | + volatile int stack_in_use; | ||
26326 | + | ||
26327 | + /* This field can be used by plugins to store where the task | ||
26328 | + * is currently linked. It is the responsibility of the plugin | ||
26329 | + * to avoid race conditions. | ||
26330 | + * | ||
26331 | + * Used by GSN-EDF. | ||
26332 | + */ | ||
26333 | + volatile int linked_on; | ||
26334 | + | ||
26335 | + /* PFAIR/PD^2 state. Allocated on demand. */ | ||
26336 | + struct pfair_param* pfair; | ||
26337 | + | ||
26338 | + /* Fields saved before BE->RT transition. | ||
26339 | + */ | ||
26340 | + int old_policy; | ||
26341 | + int old_prio; | ||
26342 | + | ||
26343 | + /* ready queue for this task */ | ||
26344 | + struct _rt_domain* domain; | ||
26345 | + | ||
26346 | + /* heap element for this task | ||
26347 | + * | ||
26348 | + * Warning: Don't statically allocate this node. The heap | ||
26349 | + * implementation swaps these between tasks, thus after | ||
26350 | + * dequeuing from a heap you may end up with a different node | ||
26351 | + * then the one you had when enqueuing the task. For the same | ||
26352 | + * reason, don't obtain and store references to this node | ||
26353 | + * other than this pointer (which is updated by the heap | ||
26354 | + * implementation). | ||
26355 | + */ | ||
26356 | + struct bheap_node* heap_node; | ||
26357 | + struct release_heap* rel_heap; | ||
26358 | + | ||
26359 | + /* Used by rt_domain to queue task in release list. | ||
26360 | + */ | ||
26361 | + struct list_head list; | ||
26362 | + | ||
26363 | + /* Pointer to the page shared between userspace and kernel. */ | ||
26364 | + struct control_page * ctrl_page; | ||
26365 | +}; | ||
26366 | + | ||
26367 | +/* Possible RT flags */ | ||
26368 | +#define RT_F_RUNNING 0x00000000 | ||
26369 | +#define RT_F_SLEEP 0x00000001 | ||
26370 | +#define RT_F_EXIT_SEM 0x00000008 | ||
26371 | + | ||
26372 | +#endif | ||
26373 | + | ||
26374 | +#endif | ||
26375 | diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h | ||
26376 | new file mode 100644 | ||
26377 | index 0000000..2d856d5 | ||
26378 | --- /dev/null | ||
26379 | +++ b/include/litmus/sched_plugin.h | ||
26380 | @@ -0,0 +1,159 @@ | ||
26381 | +/* | ||
26382 | + * Definition of the scheduler plugin interface. | ||
26383 | + * | ||
26384 | + */ | ||
26385 | +#ifndef _LINUX_SCHED_PLUGIN_H_ | ||
26386 | +#define _LINUX_SCHED_PLUGIN_H_ | ||
26387 | + | ||
26388 | +#include <linux/sched.h> | ||
26389 | + | ||
26390 | +/* struct for semaphore with priority inheritance */ | ||
26391 | +struct pi_semaphore { | ||
26392 | + atomic_t count; | ||
26393 | + int sleepers; | ||
26394 | + wait_queue_head_t wait; | ||
26395 | + struct { | ||
26396 | + /* highest-prio holder/waiter */ | ||
26397 | + struct task_struct *task; | ||
26398 | + struct task_struct* cpu_task[NR_CPUS]; | ||
26399 | + } hp; | ||
26400 | + /* current lock holder */ | ||
26401 | + struct task_struct *holder; | ||
26402 | +}; | ||
26403 | + | ||
26404 | +/************************ setup/tear down ********************/ | ||
26405 | + | ||
26406 | +typedef long (*activate_plugin_t) (void); | ||
26407 | +typedef long (*deactivate_plugin_t) (void); | ||
26408 | + | ||
26409 | + | ||
26410 | + | ||
26411 | +/********************* scheduler invocation ******************/ | ||
26412 | + | ||
26413 | +/* Plugin-specific realtime tick handler */ | ||
26414 | +typedef void (*scheduler_tick_t) (struct task_struct *cur); | ||
26415 | +/* Novell make sched decision function */ | ||
26416 | +typedef struct task_struct* (*schedule_t)(struct task_struct * prev); | ||
26417 | +/* Clean up after the task switch has occured. | ||
26418 | + * This function is called after every (even non-rt) task switch. | ||
26419 | + */ | ||
26420 | +typedef void (*finish_switch_t)(struct task_struct *prev); | ||
26421 | + | ||
26422 | + | ||
26423 | +/********************* task state changes ********************/ | ||
26424 | + | ||
26425 | +/* Called to setup a new real-time task. | ||
26426 | + * Release the first job, enqueue, etc. | ||
26427 | + * Task may already be running. | ||
26428 | + */ | ||
26429 | +typedef void (*task_new_t) (struct task_struct *task, | ||
26430 | + int on_rq, | ||
26431 | + int running); | ||
26432 | + | ||
26433 | +/* Called to re-introduce a task after blocking. | ||
26434 | + * Can potentially be called multiple times. | ||
26435 | + */ | ||
26436 | +typedef void (*task_wake_up_t) (struct task_struct *task); | ||
26437 | +/* called to notify the plugin of a blocking real-time task | ||
26438 | + * it will only be called for real-time tasks and before schedule is called */ | ||
26439 | +typedef void (*task_block_t) (struct task_struct *task); | ||
26440 | +/* Called when a real-time task exits or changes to a different scheduling | ||
26441 | + * class. | ||
26442 | + * Free any allocated resources | ||
26443 | + */ | ||
26444 | +typedef void (*task_exit_t) (struct task_struct *); | ||
26445 | + | ||
26446 | +/* Called when the new_owner is released from the wait queue | ||
26447 | + * it should now inherit the priority from sem, _before_ it gets readded | ||
26448 | + * to any queue | ||
26449 | + */ | ||
26450 | +typedef long (*inherit_priority_t) (struct pi_semaphore *sem, | ||
26451 | + struct task_struct *new_owner); | ||
26452 | + | ||
26453 | +/* Called when the current task releases a semahpore where it might have | ||
26454 | + * inherited a piority from | ||
26455 | + */ | ||
26456 | +typedef long (*return_priority_t) (struct pi_semaphore *sem); | ||
26457 | + | ||
26458 | +/* Called when a task tries to acquire a semaphore and fails. Check if its | ||
26459 | + * priority is higher than that of the current holder. | ||
26460 | + */ | ||
26461 | +typedef long (*pi_block_t) (struct pi_semaphore *sem, struct task_struct *t); | ||
26462 | + | ||
26463 | + | ||
26464 | + | ||
26465 | + | ||
26466 | +/********************* sys call backends ********************/ | ||
26467 | +/* This function causes the caller to sleep until the next release */ | ||
26468 | +typedef long (*complete_job_t) (void); | ||
26469 | + | ||
26470 | +typedef long (*admit_task_t)(struct task_struct* tsk); | ||
26471 | + | ||
26472 | +typedef void (*release_at_t)(struct task_struct *t, lt_t start); | ||
26473 | + | ||
26474 | +struct sched_plugin { | ||
26475 | + struct list_head list; | ||
26476 | + /* basic info */ | ||
26477 | + char *plugin_name; | ||
26478 | + | ||
26479 | + /* setup */ | ||
26480 | + activate_plugin_t activate_plugin; | ||
26481 | + deactivate_plugin_t deactivate_plugin; | ||
26482 | + | ||
26483 | +#ifdef CONFIG_SRP | ||
26484 | + unsigned int srp_active; | ||
26485 | +#endif | ||
26486 | + | ||
26487 | + /* scheduler invocation */ | ||
26488 | + scheduler_tick_t tick; | ||
26489 | + schedule_t schedule; | ||
26490 | + finish_switch_t finish_switch; | ||
26491 | + | ||
26492 | + /* syscall backend */ | ||
26493 | + complete_job_t complete_job; | ||
26494 | + release_at_t release_at; | ||
26495 | + | ||
26496 | + /* task state changes */ | ||
26497 | + admit_task_t admit_task; | ||
26498 | + | ||
26499 | + task_new_t task_new; | ||
26500 | + task_wake_up_t task_wake_up; | ||
26501 | + task_block_t task_block; | ||
26502 | + task_exit_t task_exit; | ||
26503 | + | ||
26504 | +#ifdef CONFIG_FMLP | ||
26505 | + /* priority inheritance */ | ||
26506 | + unsigned int fmlp_active; | ||
26507 | + inherit_priority_t inherit_priority; | ||
26508 | + return_priority_t return_priority; | ||
26509 | + pi_block_t pi_block; | ||
26510 | +#endif | ||
26511 | +} __attribute__ ((__aligned__(SMP_CACHE_BYTES))); | ||
26512 | + | ||
26513 | + | ||
26514 | +extern struct sched_plugin *litmus; | ||
26515 | + | ||
26516 | +int register_sched_plugin(struct sched_plugin* plugin); | ||
26517 | +struct sched_plugin* find_sched_plugin(const char* name); | ||
26518 | +int print_sched_plugins(char* buf, int max); | ||
26519 | + | ||
26520 | +static inline int srp_active(void) | ||
26521 | +{ | ||
26522 | +#ifdef CONFIG_SRP | ||
26523 | + return litmus->srp_active; | ||
26524 | +#else | ||
26525 | + return 0; | ||
26526 | +#endif | ||
26527 | +} | ||
26528 | +static inline int fmlp_active(void) | ||
26529 | +{ | ||
26530 | +#ifdef CONFIG_FMLP | ||
26531 | + return litmus->fmlp_active; | ||
26532 | +#else | ||
26533 | + return 0; | ||
26534 | +#endif | ||
26535 | +} | ||
26536 | + | ||
26537 | +extern struct sched_plugin linux_sched_plugin; | ||
26538 | + | ||
26539 | +#endif | ||
26540 | diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h | ||
26541 | new file mode 100644 | ||
26542 | index 0000000..e1b0c97 | ||
26543 | --- /dev/null | ||
26544 | +++ b/include/litmus/sched_trace.h | ||
26545 | @@ -0,0 +1,192 @@ | ||
26546 | +/* | ||
26547 | + * sched_trace.h -- record scheduler events to a byte stream for offline analysis. | ||
26548 | + */ | ||
26549 | +#ifndef _LINUX_SCHED_TRACE_H_ | ||
26550 | +#define _LINUX_SCHED_TRACE_H_ | ||
26551 | + | ||
26552 | +/* all times in nanoseconds */ | ||
26553 | + | ||
26554 | +struct st_trace_header { | ||
26555 | + u8 type; /* Of what type is this record? */ | ||
26556 | + u8 cpu; /* On which CPU was it recorded? */ | ||
26557 | + u16 pid; /* PID of the task. */ | ||
26558 | + u32 job; /* The job sequence number. */ | ||
26559 | +}; | ||
26560 | + | ||
26561 | +#define ST_NAME_LEN 16 | ||
26562 | +struct st_name_data { | ||
26563 | + char cmd[ST_NAME_LEN];/* The name of the executable of this process. */ | ||
26564 | +}; | ||
26565 | + | ||
26566 | +struct st_param_data { /* regular params */ | ||
26567 | + u32 wcet; | ||
26568 | + u32 period; | ||
26569 | + u32 phase; | ||
26570 | + u8 partition; | ||
26571 | + u8 __unused[3]; | ||
26572 | +}; | ||
26573 | + | ||
26574 | +struct st_release_data { /* A job is was/is going to be released. */ | ||
26575 | + u64 release; /* What's the release time? */ | ||
26576 | + u64 deadline; /* By when must it finish? */ | ||
26577 | +}; | ||
26578 | + | ||
26579 | +struct st_assigned_data { /* A job was asigned to a CPU. */ | ||
26580 | + u64 when; | ||
26581 | + u8 target; /* Where should it execute? */ | ||
26582 | + u8 __unused[3]; | ||
26583 | +}; | ||
26584 | + | ||
26585 | +struct st_switch_to_data { /* A process was switched to on a given CPU. */ | ||
26586 | + u64 when; /* When did this occur? */ | ||
26587 | + u32 exec_time; /* Time the current job has executed. */ | ||
26588 | + | ||
26589 | +}; | ||
26590 | + | ||
26591 | +struct st_switch_away_data { /* A process was switched away from on a given CPU. */ | ||
26592 | + u64 when; | ||
26593 | + u64 exec_time; | ||
26594 | +}; | ||
26595 | + | ||
26596 | +struct st_completion_data { /* A job completed. */ | ||
26597 | + u64 when; | ||
26598 | + u8 forced:1; /* Set to 1 if job overran and kernel advanced to the | ||
26599 | + * next task automatically; set to 0 otherwise. | ||
26600 | + */ | ||
26601 | + u8 __uflags:7; | ||
26602 | + u8 __unused[3]; | ||
26603 | +}; | ||
26604 | + | ||
26605 | +struct st_block_data { /* A task blocks. */ | ||
26606 | + u64 when; | ||
26607 | + u64 __unused; | ||
26608 | +}; | ||
26609 | + | ||
26610 | +struct st_resume_data { /* A task resumes. */ | ||
26611 | + u64 when; | ||
26612 | + u64 __unused; | ||
26613 | +}; | ||
26614 | + | ||
26615 | +struct st_sys_release_data { | ||
26616 | + u64 when; | ||
26617 | + u64 release; | ||
26618 | +}; | ||
26619 | + | ||
26620 | +#define DATA(x) struct st_ ## x ## _data x; | ||
26621 | + | ||
26622 | +typedef enum { | ||
26623 | + ST_NAME = 1, /* Start at one, so that we can spot | ||
26624 | + * uninitialized records. */ | ||
26625 | + ST_PARAM, | ||
26626 | + ST_RELEASE, | ||
26627 | + ST_ASSIGNED, | ||
26628 | + ST_SWITCH_TO, | ||
26629 | + ST_SWITCH_AWAY, | ||
26630 | + ST_COMPLETION, | ||
26631 | + ST_BLOCK, | ||
26632 | + ST_RESUME, | ||
26633 | + ST_SYS_RELEASE, | ||
26634 | +} st_event_record_type_t; | ||
26635 | + | ||
26636 | +struct st_event_record { | ||
26637 | + struct st_trace_header hdr; | ||
26638 | + union { | ||
26639 | + u64 raw[2]; | ||
26640 | + | ||
26641 | + DATA(name); | ||
26642 | + DATA(param); | ||
26643 | + DATA(release); | ||
26644 | + DATA(assigned); | ||
26645 | + DATA(switch_to); | ||
26646 | + DATA(switch_away); | ||
26647 | + DATA(completion); | ||
26648 | + DATA(block); | ||
26649 | + DATA(resume); | ||
26650 | + DATA(sys_release); | ||
26651 | + | ||
26652 | + } data; | ||
26653 | +}; | ||
26654 | + | ||
26655 | +#undef DATA | ||
26656 | + | ||
26657 | +#ifdef __KERNEL__ | ||
26658 | + | ||
26659 | +#include <linux/sched.h> | ||
26660 | +#include <litmus/feather_trace.h> | ||
26661 | + | ||
26662 | +#ifdef CONFIG_SCHED_TASK_TRACE | ||
26663 | + | ||
26664 | +#define SCHED_TRACE(id, callback, task) \ | ||
26665 | + ft_event1(id, callback, task) | ||
26666 | +#define SCHED_TRACE2(id, callback, task, xtra) \ | ||
26667 | + ft_event2(id, callback, task, xtra) | ||
26668 | + | ||
26669 | +/* provide prototypes; needed on sparc64 */ | ||
26670 | +#ifndef NO_TASK_TRACE_DECLS | ||
26671 | +feather_callback void do_sched_trace_task_name(unsigned long id, | ||
26672 | + struct task_struct* task); | ||
26673 | +feather_callback void do_sched_trace_task_param(unsigned long id, | ||
26674 | + struct task_struct* task); | ||
26675 | +feather_callback void do_sched_trace_task_release(unsigned long id, | ||
26676 | + struct task_struct* task); | ||
26677 | +feather_callback void do_sched_trace_task_switch_to(unsigned long id, | ||
26678 | + struct task_struct* task); | ||
26679 | +feather_callback void do_sched_trace_task_switch_away(unsigned long id, | ||
26680 | + struct task_struct* task); | ||
26681 | +feather_callback void do_sched_trace_task_completion(unsigned long id, | ||
26682 | + struct task_struct* task, | ||
26683 | + unsigned long forced); | ||
26684 | +feather_callback void do_sched_trace_task_block(unsigned long id, | ||
26685 | + struct task_struct* task); | ||
26686 | +feather_callback void do_sched_trace_task_resume(unsigned long id, | ||
26687 | + struct task_struct* task); | ||
26688 | +feather_callback void do_sched_trace_sys_release(unsigned long id, | ||
26689 | + lt_t* start); | ||
26690 | +#endif | ||
26691 | + | ||
26692 | +#else | ||
26693 | + | ||
26694 | +#define SCHED_TRACE(id, callback, task) /* no tracing */ | ||
26695 | +#define SCHED_TRACE2(id, callback, task, xtra) /* no tracing */ | ||
26696 | + | ||
26697 | +#endif | ||
26698 | + | ||
26699 | + | ||
26700 | +#define SCHED_TRACE_BASE_ID 500 | ||
26701 | + | ||
26702 | + | ||
26703 | +#define sched_trace_task_name(t) \ | ||
26704 | + SCHED_TRACE(SCHED_TRACE_BASE_ID + 1, do_sched_trace_task_name, t) | ||
26705 | +#define sched_trace_task_param(t) \ | ||
26706 | + SCHED_TRACE(SCHED_TRACE_BASE_ID + 2, do_sched_trace_task_param, t) | ||
26707 | +#define sched_trace_task_release(t) \ | ||
26708 | + SCHED_TRACE(SCHED_TRACE_BASE_ID + 3, do_sched_trace_task_release, t) | ||
26709 | +#define sched_trace_task_switch_to(t) \ | ||
26710 | + SCHED_TRACE(SCHED_TRACE_BASE_ID + 4, do_sched_trace_task_switch_to, t) | ||
26711 | +#define sched_trace_task_switch_away(t) \ | ||
26712 | + SCHED_TRACE(SCHED_TRACE_BASE_ID + 5, do_sched_trace_task_switch_away, t) | ||
26713 | +#define sched_trace_task_completion(t, forced) \ | ||
26714 | + SCHED_TRACE2(SCHED_TRACE_BASE_ID + 6, do_sched_trace_task_completion, t, \ | ||
26715 | + (unsigned long) forced) | ||
26716 | +#define sched_trace_task_block(t) \ | ||
26717 | + SCHED_TRACE(SCHED_TRACE_BASE_ID + 7, do_sched_trace_task_block, t) | ||
26718 | +#define sched_trace_task_resume(t) \ | ||
26719 | + SCHED_TRACE(SCHED_TRACE_BASE_ID + 8, do_sched_trace_task_resume, t) | ||
26720 | +/* when is a pointer, it does not need an explicit cast to unsigned long */ | ||
26721 | +#define sched_trace_sys_release(when) \ | ||
26722 | + SCHED_TRACE(SCHED_TRACE_BASE_ID + 9, do_sched_trace_sys_release, when) | ||
26723 | + | ||
26724 | +#define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */ | ||
26725 | + | ||
26726 | +#ifdef CONFIG_SCHED_DEBUG_TRACE | ||
26727 | +void sched_trace_log_message(const char* fmt, ...); | ||
26728 | +void dump_trace_buffer(int max); | ||
26729 | +#else | ||
26730 | + | ||
26731 | +#define sched_trace_log_message(fmt, ...) | ||
26732 | + | ||
26733 | +#endif | ||
26734 | + | ||
26735 | +#endif /* __KERNEL__ */ | ||
26736 | + | ||
26737 | +#endif | ||
26738 | diff --git a/include/litmus/trace.h b/include/litmus/trace.h | ||
26739 | new file mode 100644 | ||
26740 | index 0000000..b32c711 | ||
26741 | --- /dev/null | ||
26742 | +++ b/include/litmus/trace.h | ||
26743 | @@ -0,0 +1,113 @@ | ||
26744 | +#ifndef _SYS_TRACE_H_ | ||
26745 | +#define _SYS_TRACE_H_ | ||
26746 | + | ||
26747 | +#ifdef CONFIG_SCHED_OVERHEAD_TRACE | ||
26748 | + | ||
26749 | +#include <litmus/feather_trace.h> | ||
26750 | +#include <litmus/feather_buffer.h> | ||
26751 | + | ||
26752 | + | ||
26753 | +/*********************** TIMESTAMPS ************************/ | ||
26754 | + | ||
26755 | +enum task_type_marker { | ||
26756 | + TSK_BE, | ||
26757 | + TSK_RT, | ||
26758 | + TSK_UNKNOWN | ||
26759 | +}; | ||
26760 | + | ||
26761 | +struct timestamp { | ||
26762 | + uint64_t timestamp; | ||
26763 | + uint32_t seq_no; | ||
26764 | + uint8_t cpu; | ||
26765 | + uint8_t event; | ||
26766 | + uint8_t task_type; | ||
26767 | +}; | ||
26768 | + | ||
26769 | +/* tracing callbacks */ | ||
26770 | +feather_callback void save_timestamp(unsigned long event); | ||
26771 | +feather_callback void save_timestamp_def(unsigned long event, unsigned long type); | ||
26772 | +feather_callback void save_timestamp_task(unsigned long event, unsigned long t_ptr); | ||
26773 | +feather_callback void save_timestamp_cpu(unsigned long event, unsigned long cpu); | ||
26774 | + | ||
26775 | + | ||
26776 | +#define TIMESTAMP(id) ft_event0(id, save_timestamp) | ||
26777 | + | ||
26778 | +#define DTIMESTAMP(id, def) ft_event1(id, save_timestamp_def, (unsigned long) def) | ||
26779 | + | ||
26780 | +#define TTIMESTAMP(id, task) \ | ||
26781 | + ft_event1(id, save_timestamp_task, (unsigned long) task) | ||
26782 | + | ||
26783 | +#define CTIMESTAMP(id, cpu) \ | ||
26784 | + ft_event1(id, save_timestamp_cpu, (unsigned long) cpu) | ||
26785 | + | ||
26786 | +#else /* !CONFIG_SCHED_OVERHEAD_TRACE */ | ||
26787 | + | ||
26788 | +#define TIMESTAMP(id) /* no tracing */ | ||
26789 | + | ||
26790 | +#define DTIMESTAMP(id, def) /* no tracing */ | ||
26791 | + | ||
26792 | +#define TTIMESTAMP(id, task) /* no tracing */ | ||
26793 | + | ||
26794 | +#define CTIMESTAMP(id, cpu) /* no tracing */ | ||
26795 | + | ||
26796 | +#endif | ||
26797 | + | ||
26798 | + | ||
26799 | +/* Convention for timestamps | ||
26800 | + * ========================= | ||
26801 | + * | ||
26802 | + * In order to process the trace files with a common tool, we use the following | ||
26803 | + * convention to measure execution times: The end time id of a code segment is | ||
26804 | + * always the next number after the start time event id. | ||
26805 | + */ | ||
26806 | + | ||
26807 | +#define TS_SCHED_START DTIMESTAMP(100, TSK_UNKNOWN) /* we only | ||
26808 | + * care | ||
26809 | + * about | ||
26810 | + * next */ | ||
26811 | +#define TS_SCHED_END(t) TTIMESTAMP(101, t) | ||
26812 | +#define TS_SCHED2_START(t) TTIMESTAMP(102, t) | ||
26813 | +#define TS_SCHED2_END(t) TTIMESTAMP(103, t) | ||
26814 | + | ||
26815 | +#define TS_CXS_START(t) TTIMESTAMP(104, t) | ||
26816 | +#define TS_CXS_END(t) TTIMESTAMP(105, t) | ||
26817 | + | ||
26818 | +#define TS_RELEASE_START DTIMESTAMP(106, TSK_RT) | ||
26819 | +#define TS_RELEASE_END DTIMESTAMP(107, TSK_RT) | ||
26820 | + | ||
26821 | +#define TS_TICK_START(t) TTIMESTAMP(110, t) | ||
26822 | +#define TS_TICK_END(t) TTIMESTAMP(111, t) | ||
26823 | + | ||
26824 | + | ||
26825 | +#define TS_PLUGIN_SCHED_START /* TIMESTAMP(120) */ /* currently unused */ | ||
26826 | +#define TS_PLUGIN_SCHED_END /* TIMESTAMP(121) */ | ||
26827 | + | ||
26828 | +#define TS_PLUGIN_TICK_START /* TIMESTAMP(130) */ | ||
26829 | +#define TS_PLUGIN_TICK_END /* TIMESTAMP(131) */ | ||
26830 | + | ||
26831 | +#define TS_ENTER_NP_START TIMESTAMP(140) | ||
26832 | +#define TS_ENTER_NP_END TIMESTAMP(141) | ||
26833 | + | ||
26834 | +#define TS_EXIT_NP_START TIMESTAMP(150) | ||
26835 | +#define TS_EXIT_NP_END TIMESTAMP(151) | ||
26836 | + | ||
26837 | +#define TS_SRP_UP_START TIMESTAMP(160) | ||
26838 | +#define TS_SRP_UP_END TIMESTAMP(161) | ||
26839 | +#define TS_SRP_DOWN_START TIMESTAMP(162) | ||
26840 | +#define TS_SRP_DOWN_END TIMESTAMP(163) | ||
26841 | + | ||
26842 | +#define TS_PI_UP_START TIMESTAMP(170) | ||
26843 | +#define TS_PI_UP_END TIMESTAMP(171) | ||
26844 | +#define TS_PI_DOWN_START TIMESTAMP(172) | ||
26845 | +#define TS_PI_DOWN_END TIMESTAMP(173) | ||
26846 | + | ||
26847 | +#define TS_FIFO_UP_START TIMESTAMP(180) | ||
26848 | +#define TS_FIFO_UP_END TIMESTAMP(181) | ||
26849 | +#define TS_FIFO_DOWN_START TIMESTAMP(182) | ||
26850 | +#define TS_FIFO_DOWN_END TIMESTAMP(183) | ||
26851 | + | ||
26852 | +#define TS_SEND_RESCHED_START(c) CTIMESTAMP(190, c) | ||
26853 | +#define TS_SEND_RESCHED_END DTIMESTAMP(191, TSK_UNKNOWN) | ||
26854 | + | ||
26855 | + | ||
26856 | +#endif /* !_SYS_TRACE_H_ */ | ||
26857 | diff --git a/include/litmus/unistd_32.h b/include/litmus/unistd_32.h | ||
26858 | new file mode 100644 | ||
26859 | index 0000000..dbddc65 | ||
26860 | --- /dev/null | ||
26861 | +++ b/include/litmus/unistd_32.h | ||
26862 | @@ -0,0 +1,23 @@ | ||
26863 | +/* | ||
26864 | + * included from arch/x86/include/asm/unistd_32.h | ||
26865 | + * | ||
26866 | + * LITMUS^RT syscalls with "relative" numbers | ||
26867 | + */ | ||
26868 | +#define __LSC(x) (__NR_LITMUS + x) | ||
26869 | + | ||
26870 | +#define __NR_set_rt_task_param __LSC(0) | ||
26871 | +#define __NR_get_rt_task_param __LSC(1) | ||
26872 | +#define __NR_complete_job __LSC(2) | ||
26873 | +#define __NR_od_open __LSC(3) | ||
26874 | +#define __NR_od_close __LSC(4) | ||
26875 | +#define __NR_fmlp_down __LSC(5) | ||
26876 | +#define __NR_fmlp_up __LSC(6) | ||
26877 | +#define __NR_srp_down __LSC(7) | ||
26878 | +#define __NR_srp_up __LSC(8) | ||
26879 | +#define __NR_query_job_no __LSC(9) | ||
26880 | +#define __NR_wait_for_job_release __LSC(10) | ||
26881 | +#define __NR_wait_for_ts_release __LSC(11) | ||
26882 | +#define __NR_release_ts __LSC(12) | ||
26883 | +#define __NR_null_call __LSC(13) | ||
26884 | + | ||
26885 | +#define NR_litmus_syscalls 14 | ||
26886 | diff --git a/include/litmus/unistd_64.h b/include/litmus/unistd_64.h | ||
26887 | new file mode 100644 | ||
26888 | index 0000000..f0618e7 | ||
26889 | --- /dev/null | ||
26890 | +++ b/include/litmus/unistd_64.h | ||
26891 | @@ -0,0 +1,37 @@ | ||
26892 | +/* | ||
26893 | + * included from arch/x86/include/asm/unistd_64.h | ||
26894 | + * | ||
26895 | + * LITMUS^RT syscalls with "relative" numbers | ||
26896 | + */ | ||
26897 | +#define __LSC(x) (__NR_LITMUS + x) | ||
26898 | + | ||
26899 | +#define __NR_set_rt_task_param __LSC(0) | ||
26900 | +__SYSCALL(__NR_set_rt_task_param, sys_set_rt_task_param) | ||
26901 | +#define __NR_get_rt_task_param __LSC(1) | ||
26902 | +__SYSCALL(__NR_get_rt_task_param, sys_get_rt_task_param) | ||
26903 | +#define __NR_complete_job __LSC(2) | ||
26904 | +__SYSCALL(__NR_complete_job, sys_complete_job) | ||
26905 | +#define __NR_od_open __LSC(3) | ||
26906 | +__SYSCALL(__NR_od_open, sys_od_open) | ||
26907 | +#define __NR_od_close __LSC(4) | ||
26908 | +__SYSCALL(__NR_od_close, sys_od_close) | ||
26909 | +#define __NR_fmlp_down __LSC(5) | ||
26910 | +__SYSCALL(__NR_fmlp_down, sys_fmlp_down) | ||
26911 | +#define __NR_fmlp_up __LSC(6) | ||
26912 | +__SYSCALL(__NR_fmlp_up, sys_fmlp_up) | ||
26913 | +#define __NR_srp_down __LSC(7) | ||
26914 | +__SYSCALL(__NR_srp_down, sys_srp_down) | ||
26915 | +#define __NR_srp_up __LSC(8) | ||
26916 | +__SYSCALL(__NR_srp_up, sys_srp_up) | ||
26917 | +#define __NR_query_job_no __LSC(9) | ||
26918 | +__SYSCALL(__NR_query_job_no, sys_query_job_no) | ||
26919 | +#define __NR_wait_for_job_release __LSC(10) | ||
26920 | +__SYSCALL(__NR_wait_for_job_release, sys_wait_for_job_release) | ||
26921 | +#define __NR_wait_for_ts_release __LSC(11) | ||
26922 | +__SYSCALL(__NR_wait_for_ts_release, sys_wait_for_ts_release) | ||
26923 | +#define __NR_release_ts __LSC(12) | ||
26924 | +__SYSCALL(__NR_release_ts, sys_release_ts) | ||
26925 | +#define __NR_null_call __LSC(13) | ||
26926 | +__SYSCALL(__NR_null_call, sys_null_call) | ||
26927 | + | ||
26928 | +#define NR_litmus_syscalls 14 | ||
26929 | diff --git a/include/net/ip.h b/include/net/ip.h | ||
26930 | index 69db943..2f47e54 100644 | ||
26931 | --- a/include/net/ip.h | ||
26932 | +++ b/include/net/ip.h | ||
26933 | @@ -342,7 +342,6 @@ enum ip_defrag_users | ||
26934 | IP_DEFRAG_CALL_RA_CHAIN, | ||
26935 | IP_DEFRAG_CONNTRACK_IN, | ||
26936 | IP_DEFRAG_CONNTRACK_OUT, | ||
26937 | - IP_DEFRAG_CONNTRACK_BRIDGE_IN, | ||
26938 | IP_DEFRAG_VS_IN, | ||
26939 | IP_DEFRAG_VS_OUT, | ||
26940 | IP_DEFRAG_VS_FWD | ||
26941 | diff --git a/include/net/ipv6.h b/include/net/ipv6.h | ||
26942 | index 639bbf0..8c31d8a 100644 | ||
26943 | --- a/include/net/ipv6.h | ||
26944 | +++ b/include/net/ipv6.h | ||
26945 | @@ -354,16 +354,8 @@ static inline int ipv6_prefix_equal(const struct in6_addr *a1, | ||
26946 | |||
26947 | struct inet_frag_queue; | ||
26948 | |||
26949 | -enum ip6_defrag_users { | ||
26950 | - IP6_DEFRAG_LOCAL_DELIVER, | ||
26951 | - IP6_DEFRAG_CONNTRACK_IN, | ||
26952 | - IP6_DEFRAG_CONNTRACK_OUT, | ||
26953 | - IP6_DEFRAG_CONNTRACK_BRIDGE_IN, | ||
26954 | -}; | ||
26955 | - | ||
26956 | struct ip6_create_arg { | ||
26957 | __be32 id; | ||
26958 | - u32 user; | ||
26959 | struct in6_addr *src; | ||
26960 | struct in6_addr *dst; | ||
26961 | }; | ||
26962 | diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h | ||
26963 | index 1ee717e..abc55ad 100644 | ||
26964 | --- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h | ||
26965 | +++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h | ||
26966 | @@ -9,7 +9,7 @@ extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6; | ||
26967 | |||
26968 | extern int nf_ct_frag6_init(void); | ||
26969 | extern void nf_ct_frag6_cleanup(void); | ||
26970 | -extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user); | ||
26971 | +extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb); | ||
26972 | extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb, | ||
26973 | struct net_device *in, | ||
26974 | struct net_device *out, | ||
26975 | diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h | ||
26976 | index 63d4498..ba1ba0c 100644 | ||
26977 | --- a/include/net/netns/conntrack.h | ||
26978 | +++ b/include/net/netns/conntrack.h | ||
26979 | @@ -11,8 +11,6 @@ struct nf_conntrack_ecache; | ||
26980 | struct netns_ct { | ||
26981 | atomic_t count; | ||
26982 | unsigned int expect_count; | ||
26983 | - unsigned int htable_size; | ||
26984 | - struct kmem_cache *nf_conntrack_cachep; | ||
26985 | struct hlist_nulls_head *hash; | ||
26986 | struct hlist_head *expect_hash; | ||
26987 | struct hlist_nulls_head unconfirmed; | ||
26988 | @@ -30,6 +28,5 @@ struct netns_ct { | ||
26989 | #endif | ||
26990 | int hash_vmalloc; | ||
26991 | int expect_vmalloc; | ||
26992 | - char *slabname; | ||
26993 | }; | ||
26994 | #endif | ||
26995 | diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h | ||
26996 | index 9a4b8b7..2eb3814 100644 | ||
26997 | --- a/include/net/netns/ipv4.h | ||
26998 | +++ b/include/net/netns/ipv4.h | ||
26999 | @@ -40,7 +40,6 @@ struct netns_ipv4 { | ||
27000 | struct xt_table *iptable_security; | ||
27001 | struct xt_table *nat_table; | ||
27002 | struct hlist_head *nat_bysource; | ||
27003 | - unsigned int nat_htable_size; | ||
27004 | int nat_vmalloced; | ||
27005 | #endif | ||
27006 | |||
27007 | diff --git a/include/net/netrom.h b/include/net/netrom.h | ||
27008 | index ab170a6..15696b1 100644 | ||
27009 | --- a/include/net/netrom.h | ||
27010 | +++ b/include/net/netrom.h | ||
27011 | @@ -132,8 +132,6 @@ static __inline__ void nr_node_put(struct nr_node *nr_node) | ||
27012 | static __inline__ void nr_neigh_put(struct nr_neigh *nr_neigh) | ||
27013 | { | ||
27014 | if (atomic_dec_and_test(&nr_neigh->refcount)) { | ||
27015 | - if (nr_neigh->ax25) | ||
27016 | - ax25_cb_put(nr_neigh->ax25); | ||
27017 | kfree(nr_neigh->digipeat); | ||
27018 | kfree(nr_neigh); | ||
27019 | } | ||
27020 | diff --git a/include/net/tcp.h b/include/net/tcp.h | ||
27021 | index 842ac4d..03a49c7 100644 | ||
27022 | --- a/include/net/tcp.h | ||
27023 | +++ b/include/net/tcp.h | ||
27024 | @@ -1263,20 +1263,14 @@ static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_bu | ||
27025 | * TCP connection after "boundary" unsucessful, exponentially backed-off | ||
27026 | * retransmissions with an initial RTO of TCP_RTO_MIN. | ||
27027 | */ | ||
27028 | -static inline bool retransmits_timed_out(struct sock *sk, | ||
27029 | +static inline bool retransmits_timed_out(const struct sock *sk, | ||
27030 | unsigned int boundary) | ||
27031 | { | ||
27032 | unsigned int timeout, linear_backoff_thresh; | ||
27033 | - unsigned int start_ts; | ||
27034 | |||
27035 | if (!inet_csk(sk)->icsk_retransmits) | ||
27036 | return false; | ||
27037 | |||
27038 | - if (unlikely(!tcp_sk(sk)->retrans_stamp)) | ||
27039 | - start_ts = TCP_SKB_CB(tcp_write_queue_head(sk))->when; | ||
27040 | - else | ||
27041 | - start_ts = tcp_sk(sk)->retrans_stamp; | ||
27042 | - | ||
27043 | linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN); | ||
27044 | |||
27045 | if (boundary <= linear_backoff_thresh) | ||
27046 | @@ -1285,7 +1279,7 @@ static inline bool retransmits_timed_out(struct sock *sk, | ||
27047 | timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN + | ||
27048 | (boundary - linear_backoff_thresh) * TCP_RTO_MAX; | ||
27049 | |||
27050 | - return (tcp_time_stamp - start_ts) >= timeout; | ||
27051 | + return (tcp_time_stamp - tcp_sk(sk)->retrans_stamp) >= timeout; | ||
27052 | } | ||
27053 | |||
27054 | static inline struct sk_buff *tcp_send_head(struct sock *sk) | ||
27055 | diff --git a/include/scsi/fc_frame.h b/include/scsi/fc_frame.h | ||
27056 | index 148126d..c35d238 100644 | ||
27057 | --- a/include/scsi/fc_frame.h | ||
27058 | +++ b/include/scsi/fc_frame.h | ||
27059 | @@ -37,9 +37,6 @@ | ||
27060 | #define FC_FRAME_HEADROOM 32 /* headroom for VLAN + FCoE headers */ | ||
27061 | #define FC_FRAME_TAILROOM 8 /* trailer space for FCoE */ | ||
27062 | |||
27063 | -/* Max number of skb frags allowed, reserving one for fcoe_crc_eof page */ | ||
27064 | -#define FC_FRAME_SG_LEN (MAX_SKB_FRAGS - 1) | ||
27065 | - | ||
27066 | #define fp_skb(fp) (&((fp)->skb)) | ||
27067 | #define fr_hdr(fp) ((fp)->skb.data) | ||
27068 | #define fr_len(fp) ((fp)->skb.len) | ||
27069 | diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h | ||
27070 | index 09a124b..65dc9aa 100644 | ||
27071 | --- a/include/scsi/libfc.h | ||
27072 | +++ b/include/scsi/libfc.h | ||
27073 | @@ -145,7 +145,6 @@ enum fc_rport_state { | ||
27074 | RPORT_ST_LOGO, /* port logout sent */ | ||
27075 | RPORT_ST_ADISC, /* Discover Address sent */ | ||
27076 | RPORT_ST_DELETE, /* port being deleted */ | ||
27077 | - RPORT_ST_RESTART, /* remote port being deleted and will restart */ | ||
27078 | }; | ||
27079 | |||
27080 | /** | ||
27081 | diff --git a/include/scsi/osd_protocol.h b/include/scsi/osd_protocol.h | ||
27082 | index 6856612..2cc8e8b 100644 | ||
27083 | --- a/include/scsi/osd_protocol.h | ||
27084 | +++ b/include/scsi/osd_protocol.h | ||
27085 | @@ -17,7 +17,6 @@ | ||
27086 | #define __OSD_PROTOCOL_H__ | ||
27087 | |||
27088 | #include <linux/types.h> | ||
27089 | -#include <linux/kernel.h> | ||
27090 | #include <asm/unaligned.h> | ||
27091 | #include <scsi/scsi.h> | ||
27092 | |||
27093 | diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h | ||
27094 | index 0b4baba..47941fc 100644 | ||
27095 | --- a/include/scsi/scsi_host.h | ||
27096 | +++ b/include/scsi/scsi_host.h | ||
27097 | @@ -677,12 +677,6 @@ struct Scsi_Host { | ||
27098 | void *shost_data; | ||
27099 | |||
27100 | /* | ||
27101 | - * Points to the physical bus device we'd use to do DMA | ||
27102 | - * Needed just in case we have virtual hosts. | ||
27103 | - */ | ||
27104 | - struct device *dma_dev; | ||
27105 | - | ||
27106 | - /* | ||
27107 | * We should ensure that this is aligned, both for better performance | ||
27108 | * and also because some compilers (m68k) don't automatically force | ||
27109 | * alignment to a long boundary. | ||
27110 | @@ -726,9 +720,7 @@ extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *); | ||
27111 | extern void scsi_flush_work(struct Scsi_Host *); | ||
27112 | |||
27113 | extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int); | ||
27114 | -extern int __must_check scsi_add_host_with_dma(struct Scsi_Host *, | ||
27115 | - struct device *, | ||
27116 | - struct device *); | ||
27117 | +extern int __must_check scsi_add_host(struct Scsi_Host *, struct device *); | ||
27118 | extern void scsi_scan_host(struct Scsi_Host *); | ||
27119 | extern void scsi_rescan_device(struct device *); | ||
27120 | extern void scsi_remove_host(struct Scsi_Host *); | ||
27121 | @@ -739,12 +731,6 @@ extern const char *scsi_host_state_name(enum scsi_host_state); | ||
27122 | |||
27123 | extern u64 scsi_calculate_bounce_limit(struct Scsi_Host *); | ||
27124 | |||
27125 | -static inline int __must_check scsi_add_host(struct Scsi_Host *host, | ||
27126 | - struct device *dev) | ||
27127 | -{ | ||
27128 | - return scsi_add_host_with_dma(host, dev, dev); | ||
27129 | -} | ||
27130 | - | ||
27131 | static inline struct device *scsi_get_device(struct Scsi_Host *shost) | ||
27132 | { | ||
27133 | return shost->shost_gendev.parent; | ||
27134 | diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h | ||
27135 | index dacb8ef..cc0d966 100644 | ||
27136 | --- a/include/trace/ftrace.h | ||
27137 | +++ b/include/trace/ftrace.h | ||
27138 | @@ -159,7 +159,7 @@ | ||
27139 | #undef __get_str | ||
27140 | |||
27141 | #undef TP_printk | ||
27142 | -#define TP_printk(fmt, args...) "\"%s\", %s\n", fmt, __stringify(args) | ||
27143 | +#define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args) | ||
27144 | |||
27145 | #undef TP_fast_assign | ||
27146 | #define TP_fast_assign(args...) args | ||
27147 | diff --git a/kernel/acct.c b/kernel/acct.c | ||
27148 | index a6605ca..9a4715a 100644 | ||
27149 | --- a/kernel/acct.c | ||
27150 | +++ b/kernel/acct.c | ||
27151 | @@ -536,8 +536,7 @@ static void do_acct_process(struct bsd_acct_struct *acct, | ||
27152 | do_div(elapsed, AHZ); | ||
27153 | ac.ac_btime = get_seconds() - elapsed; | ||
27154 | /* we really need to bite the bullet and change layout */ | ||
27155 | - ac.ac_uid = orig_cred->uid; | ||
27156 | - ac.ac_gid = orig_cred->gid; | ||
27157 | + current_uid_gid(&ac.ac_uid, &ac.ac_gid); | ||
27158 | #if ACCT_VERSION==2 | ||
27159 | ac.ac_ahz = AHZ; | ||
27160 | #endif | ||
27161 | diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c | ||
27162 | index 4b05bd9..2451dc6 100644 | ||
27163 | --- a/kernel/audit_tree.c | ||
27164 | +++ b/kernel/audit_tree.c | ||
27165 | @@ -277,7 +277,7 @@ static void untag_chunk(struct node *p) | ||
27166 | owner->root = NULL; | ||
27167 | } | ||
27168 | |||
27169 | - for (i = j = 0; j <= size; i++, j++) { | ||
27170 | + for (i = j = 0; i < size; i++, j++) { | ||
27171 | struct audit_tree *s; | ||
27172 | if (&chunk->owners[j] == p) { | ||
27173 | list_del_init(&p->list); | ||
27174 | @@ -290,7 +290,7 @@ static void untag_chunk(struct node *p) | ||
27175 | if (!s) /* result of earlier fallback */ | ||
27176 | continue; | ||
27177 | get_tree(s); | ||
27178 | - list_replace_init(&chunk->owners[j].list, &new->owners[i].list); | ||
27179 | + list_replace_init(&chunk->owners[i].list, &new->owners[j].list); | ||
27180 | } | ||
27181 | |||
27182 | list_replace_rcu(&chunk->hash, &new->hash); | ||
27183 | @@ -373,17 +373,15 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) | ||
27184 | for (n = 0; n < old->count; n++) { | ||
27185 | if (old->owners[n].owner == tree) { | ||
27186 | spin_unlock(&hash_lock); | ||
27187 | - put_inotify_watch(&old->watch); | ||
27188 | + put_inotify_watch(watch); | ||
27189 | return 0; | ||
27190 | } | ||
27191 | } | ||
27192 | spin_unlock(&hash_lock); | ||
27193 | |||
27194 | chunk = alloc_chunk(old->count + 1); | ||
27195 | - if (!chunk) { | ||
27196 | - put_inotify_watch(&old->watch); | ||
27197 | + if (!chunk) | ||
27198 | return -ENOMEM; | ||
27199 | - } | ||
27200 | |||
27201 | mutex_lock(&inode->inotify_mutex); | ||
27202 | if (inotify_clone_watch(&old->watch, &chunk->watch) < 0) { | ||
27203 | @@ -427,8 +425,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) | ||
27204 | spin_unlock(&hash_lock); | ||
27205 | inotify_evict_watch(&old->watch); | ||
27206 | mutex_unlock(&inode->inotify_mutex); | ||
27207 | - put_inotify_watch(&old->watch); /* pair to inotify_find_watch */ | ||
27208 | - put_inotify_watch(&old->watch); /* and kill it */ | ||
27209 | + put_inotify_watch(&old->watch); | ||
27210 | return 0; | ||
27211 | } | ||
27212 | |||
27213 | diff --git a/kernel/cgroup.c b/kernel/cgroup.c | ||
27214 | index 1fbcc74..0249f4b 100644 | ||
27215 | --- a/kernel/cgroup.c | ||
27216 | +++ b/kernel/cgroup.c | ||
27217 | @@ -2468,6 +2468,7 @@ static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp, | ||
27218 | /* make sure l doesn't vanish out from under us */ | ||
27219 | down_write(&l->mutex); | ||
27220 | mutex_unlock(&cgrp->pidlist_mutex); | ||
27221 | + l->use_count++; | ||
27222 | return l; | ||
27223 | } | ||
27224 | } | ||
27225 | diff --git a/kernel/cpu.c b/kernel/cpu.c | ||
27226 | index 291ac58..6ba0f1e 100644 | ||
27227 | --- a/kernel/cpu.c | ||
27228 | +++ b/kernel/cpu.c | ||
27229 | @@ -212,8 +212,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | ||
27230 | err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, | ||
27231 | hcpu, -1, &nr_calls); | ||
27232 | if (err == NOTIFY_BAD) { | ||
27233 | - set_cpu_active(cpu, true); | ||
27234 | - | ||
27235 | nr_calls--; | ||
27236 | __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, | ||
27237 | hcpu, nr_calls, NULL); | ||
27238 | @@ -225,11 +223,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | ||
27239 | |||
27240 | /* Ensure that we are not runnable on dying cpu */ | ||
27241 | cpumask_copy(old_allowed, ¤t->cpus_allowed); | ||
27242 | - set_cpus_allowed_ptr(current, cpu_active_mask); | ||
27243 | + set_cpus_allowed_ptr(current, | ||
27244 | + cpumask_of(cpumask_any_but(cpu_online_mask, cpu))); | ||
27245 | |||
27246 | err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); | ||
27247 | if (err) { | ||
27248 | - set_cpu_active(cpu, true); | ||
27249 | /* CPU didn't die: tell everyone. Can't complain. */ | ||
27250 | if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, | ||
27251 | hcpu) == NOTIFY_BAD) | ||
27252 | @@ -294,6 +292,9 @@ int __ref cpu_down(unsigned int cpu) | ||
27253 | |||
27254 | err = _cpu_down(cpu, 0); | ||
27255 | |||
27256 | + if (cpu_online(cpu)) | ||
27257 | + set_cpu_active(cpu, true); | ||
27258 | + | ||
27259 | out: | ||
27260 | cpu_maps_update_done(); | ||
27261 | stop_machine_destroy(); | ||
27262 | @@ -386,23 +387,15 @@ int disable_nonboot_cpus(void) | ||
27263 | * with the userspace trying to use the CPU hotplug at the same time | ||
27264 | */ | ||
27265 | cpumask_clear(frozen_cpus); | ||
27266 | - | ||
27267 | - for_each_online_cpu(cpu) { | ||
27268 | - if (cpu == first_cpu) | ||
27269 | - continue; | ||
27270 | - set_cpu_active(cpu, false); | ||
27271 | - } | ||
27272 | - | ||
27273 | - synchronize_sched(); | ||
27274 | - | ||
27275 | printk("Disabling non-boot CPUs ...\n"); | ||
27276 | for_each_online_cpu(cpu) { | ||
27277 | if (cpu == first_cpu) | ||
27278 | continue; | ||
27279 | error = _cpu_down(cpu, 1); | ||
27280 | - if (!error) | ||
27281 | + if (!error) { | ||
27282 | cpumask_set_cpu(cpu, frozen_cpus); | ||
27283 | - else { | ||
27284 | + printk("CPU%d is down\n", cpu); | ||
27285 | + } else { | ||
27286 | printk(KERN_ERR "Error taking CPU%d down: %d\n", | ||
27287 | cpu, error); | ||
27288 | break; | ||
27289 | diff --git a/kernel/cpuset.c b/kernel/cpuset.c | ||
27290 | index 39e5121..b5cb469 100644 | ||
27291 | --- a/kernel/cpuset.c | ||
27292 | +++ b/kernel/cpuset.c | ||
27293 | @@ -873,7 +873,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, | ||
27294 | if (retval < 0) | ||
27295 | return retval; | ||
27296 | |||
27297 | - if (!cpumask_subset(trialcs->cpus_allowed, cpu_active_mask)) | ||
27298 | + if (!cpumask_subset(trialcs->cpus_allowed, cpu_online_mask)) | ||
27299 | return -EINVAL; | ||
27300 | } | ||
27301 | retval = validate_change(cs, trialcs); | ||
27302 | @@ -2011,7 +2011,7 @@ static void scan_for_empty_cpusets(struct cpuset *root) | ||
27303 | } | ||
27304 | |||
27305 | /* Continue past cpusets with all cpus, mems online */ | ||
27306 | - if (cpumask_subset(cp->cpus_allowed, cpu_active_mask) && | ||
27307 | + if (cpumask_subset(cp->cpus_allowed, cpu_online_mask) && | ||
27308 | nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY])) | ||
27309 | continue; | ||
27310 | |||
27311 | @@ -2020,7 +2020,7 @@ static void scan_for_empty_cpusets(struct cpuset *root) | ||
27312 | /* Remove offline cpus and mems from this cpuset. */ | ||
27313 | mutex_lock(&callback_mutex); | ||
27314 | cpumask_and(cp->cpus_allowed, cp->cpus_allowed, | ||
27315 | - cpu_active_mask); | ||
27316 | + cpu_online_mask); | ||
27317 | nodes_and(cp->mems_allowed, cp->mems_allowed, | ||
27318 | node_states[N_HIGH_MEMORY]); | ||
27319 | mutex_unlock(&callback_mutex); | ||
27320 | @@ -2058,10 +2058,8 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb, | ||
27321 | switch (phase) { | ||
27322 | case CPU_ONLINE: | ||
27323 | case CPU_ONLINE_FROZEN: | ||
27324 | - case CPU_DOWN_PREPARE: | ||
27325 | - case CPU_DOWN_PREPARE_FROZEN: | ||
27326 | - case CPU_DOWN_FAILED: | ||
27327 | - case CPU_DOWN_FAILED_FROZEN: | ||
27328 | + case CPU_DEAD: | ||
27329 | + case CPU_DEAD_FROZEN: | ||
27330 | break; | ||
27331 | |||
27332 | default: | ||
27333 | @@ -2070,7 +2068,7 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb, | ||
27334 | |||
27335 | cgroup_lock(); | ||
27336 | mutex_lock(&callback_mutex); | ||
27337 | - cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask); | ||
27338 | + cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask); | ||
27339 | mutex_unlock(&callback_mutex); | ||
27340 | scan_for_empty_cpusets(&top_cpuset); | ||
27341 | ndoms = generate_sched_domains(&doms, &attr); | ||
27342 | @@ -2117,7 +2115,7 @@ static int cpuset_track_online_nodes(struct notifier_block *self, | ||
27343 | |||
27344 | void __init cpuset_init_smp(void) | ||
27345 | { | ||
27346 | - cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask); | ||
27347 | + cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask); | ||
27348 | top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; | ||
27349 | |||
27350 | hotcpu_notifier(cpuset_track_online_cpus, 0); | ||
27351 | diff --git a/kernel/cred.c b/kernel/cred.c | ||
27352 | index 1ed8ca1..dd76cfe 100644 | ||
27353 | --- a/kernel/cred.c | ||
27354 | +++ b/kernel/cred.c | ||
27355 | @@ -224,7 +224,7 @@ struct cred *cred_alloc_blank(void) | ||
27356 | #ifdef CONFIG_KEYS | ||
27357 | new->tgcred = kzalloc(sizeof(*new->tgcred), GFP_KERNEL); | ||
27358 | if (!new->tgcred) { | ||
27359 | - kmem_cache_free(cred_jar, new); | ||
27360 | + kfree(new); | ||
27361 | return NULL; | ||
27362 | } | ||
27363 | atomic_set(&new->tgcred->usage, 1); | ||
27364 | diff --git a/kernel/exit.c b/kernel/exit.c | ||
27365 | index f7864ac..3da0425 100644 | ||
27366 | --- a/kernel/exit.c | ||
27367 | +++ b/kernel/exit.c | ||
27368 | @@ -56,6 +56,8 @@ | ||
27369 | #include <asm/mmu_context.h> | ||
27370 | #include "cred-internals.h" | ||
27371 | |||
27372 | +extern void exit_od_table(struct task_struct *t); | ||
27373 | + | ||
27374 | static void exit_mm(struct task_struct * tsk); | ||
27375 | |||
27376 | static void __unhash_process(struct task_struct *p) | ||
27377 | @@ -954,6 +956,8 @@ NORET_TYPE void do_exit(long code) | ||
27378 | if (unlikely(tsk->audit_context)) | ||
27379 | audit_free(tsk); | ||
27380 | |||
27381 | + exit_od_table(tsk); | ||
27382 | + | ||
27383 | tsk->exit_code = code; | ||
27384 | taskstats_exit(tsk, group_dead); | ||
27385 | |||
27386 | diff --git a/kernel/fork.c b/kernel/fork.c | ||
27387 | index 166b8c4..9fad346 100644 | ||
27388 | --- a/kernel/fork.c | ||
27389 | +++ b/kernel/fork.c | ||
27390 | @@ -74,6 +74,9 @@ | ||
27391 | |||
27392 | #include <trace/events/sched.h> | ||
27393 | |||
27394 | +#include <litmus/litmus.h> | ||
27395 | +#include <litmus/sched_plugin.h> | ||
27396 | + | ||
27397 | /* | ||
27398 | * Protected counters by write_lock_irq(&tasklist_lock) | ||
27399 | */ | ||
27400 | @@ -162,6 +165,7 @@ void __put_task_struct(struct task_struct *tsk) | ||
27401 | WARN_ON(atomic_read(&tsk->usage)); | ||
27402 | WARN_ON(tsk == current); | ||
27403 | |||
27404 | + exit_litmus(tsk); | ||
27405 | exit_creds(tsk); | ||
27406 | delayacct_tsk_free(tsk); | ||
27407 | |||
27408 | @@ -244,6 +248,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) | ||
27409 | |||
27410 | tsk->stack = ti; | ||
27411 | |||
27412 | + /* Don't let the new task be a real-time task. */ | ||
27413 | + litmus_fork(tsk); | ||
27414 | + | ||
27415 | err = prop_local_init_single(&tsk->dirties); | ||
27416 | if (err) | ||
27417 | goto out; | ||
27418 | diff --git a/kernel/futex.c b/kernel/futex.c | ||
27419 | index 1ad4fa6..fb65e82 100644 | ||
27420 | --- a/kernel/futex.c | ||
27421 | +++ b/kernel/futex.c | ||
27422 | @@ -203,6 +203,8 @@ static void drop_futex_key_refs(union futex_key *key) | ||
27423 | * @uaddr: virtual address of the futex | ||
27424 | * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED | ||
27425 | * @key: address where result is stored. | ||
27426 | + * @rw: mapping needs to be read/write (values: VERIFY_READ, | ||
27427 | + * VERIFY_WRITE) | ||
27428 | * | ||
27429 | * Returns a negative error code or 0 | ||
27430 | * The key words are stored in *key on success. | ||
27431 | @@ -214,7 +216,7 @@ static void drop_futex_key_refs(union futex_key *key) | ||
27432 | * lock_page() might sleep, the caller should not hold a spinlock. | ||
27433 | */ | ||
27434 | static int | ||
27435 | -get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key) | ||
27436 | +get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) | ||
27437 | { | ||
27438 | unsigned long address = (unsigned long)uaddr; | ||
27439 | struct mm_struct *mm = current->mm; | ||
27440 | @@ -237,7 +239,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key) | ||
27441 | * but access_ok() should be faster than find_vma() | ||
27442 | */ | ||
27443 | if (!fshared) { | ||
27444 | - if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))) | ||
27445 | + if (unlikely(!access_ok(rw, uaddr, sizeof(u32)))) | ||
27446 | return -EFAULT; | ||
27447 | key->private.mm = mm; | ||
27448 | key->private.address = address; | ||
27449 | @@ -246,7 +248,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key) | ||
27450 | } | ||
27451 | |||
27452 | again: | ||
27453 | - err = get_user_pages_fast(address, 1, 1, &page); | ||
27454 | + err = get_user_pages_fast(address, 1, rw == VERIFY_WRITE, &page); | ||
27455 | if (err < 0) | ||
27456 | return err; | ||
27457 | |||
27458 | @@ -302,14 +304,8 @@ void put_futex_key(int fshared, union futex_key *key) | ||
27459 | */ | ||
27460 | static int fault_in_user_writeable(u32 __user *uaddr) | ||
27461 | { | ||
27462 | - struct mm_struct *mm = current->mm; | ||
27463 | - int ret; | ||
27464 | - | ||
27465 | - down_read(&mm->mmap_sem); | ||
27466 | - ret = get_user_pages(current, mm, (unsigned long)uaddr, | ||
27467 | - 1, 1, 0, NULL, NULL); | ||
27468 | - up_read(&mm->mmap_sem); | ||
27469 | - | ||
27470 | + int ret = get_user_pages(current, current->mm, (unsigned long)uaddr, | ||
27471 | + 1, 1, 0, NULL, NULL); | ||
27472 | return ret < 0 ? ret : 0; | ||
27473 | } | ||
27474 | |||
27475 | @@ -530,25 +526,8 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, | ||
27476 | return -EINVAL; | ||
27477 | |||
27478 | WARN_ON(!atomic_read(&pi_state->refcount)); | ||
27479 | - | ||
27480 | - /* | ||
27481 | - * When pi_state->owner is NULL then the owner died | ||
27482 | - * and another waiter is on the fly. pi_state->owner | ||
27483 | - * is fixed up by the task which acquires | ||
27484 | - * pi_state->rt_mutex. | ||
27485 | - * | ||
27486 | - * We do not check for pid == 0 which can happen when | ||
27487 | - * the owner died and robust_list_exit() cleared the | ||
27488 | - * TID. | ||
27489 | - */ | ||
27490 | - if (pid && pi_state->owner) { | ||
27491 | - /* | ||
27492 | - * Bail out if user space manipulated the | ||
27493 | - * futex value. | ||
27494 | - */ | ||
27495 | - if (pid != task_pid_vnr(pi_state->owner)) | ||
27496 | - return -EINVAL; | ||
27497 | - } | ||
27498 | + WARN_ON(pid && pi_state->owner && | ||
27499 | + pi_state->owner->pid != pid); | ||
27500 | |||
27501 | atomic_inc(&pi_state->refcount); | ||
27502 | *ps = pi_state; | ||
27503 | @@ -775,13 +754,6 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) | ||
27504 | if (!pi_state) | ||
27505 | return -EINVAL; | ||
27506 | |||
27507 | - /* | ||
27508 | - * If current does not own the pi_state then the futex is | ||
27509 | - * inconsistent and user space fiddled with the futex value. | ||
27510 | - */ | ||
27511 | - if (pi_state->owner != current) | ||
27512 | - return -EINVAL; | ||
27513 | - | ||
27514 | spin_lock(&pi_state->pi_mutex.wait_lock); | ||
27515 | new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); | ||
27516 | |||
27517 | @@ -889,7 +861,7 @@ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset) | ||
27518 | if (!bitset) | ||
27519 | return -EINVAL; | ||
27520 | |||
27521 | - ret = get_futex_key(uaddr, fshared, &key); | ||
27522 | + ret = get_futex_key(uaddr, fshared, &key, VERIFY_READ); | ||
27523 | if (unlikely(ret != 0)) | ||
27524 | goto out; | ||
27525 | |||
27526 | @@ -935,10 +907,10 @@ futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2, | ||
27527 | int ret, op_ret; | ||
27528 | |||
27529 | retry: | ||
27530 | - ret = get_futex_key(uaddr1, fshared, &key1); | ||
27531 | + ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ); | ||
27532 | if (unlikely(ret != 0)) | ||
27533 | goto out; | ||
27534 | - ret = get_futex_key(uaddr2, fshared, &key2); | ||
27535 | + ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE); | ||
27536 | if (unlikely(ret != 0)) | ||
27537 | goto out_put_key1; | ||
27538 | |||
27539 | @@ -1197,10 +1169,11 @@ retry: | ||
27540 | pi_state = NULL; | ||
27541 | } | ||
27542 | |||
27543 | - ret = get_futex_key(uaddr1, fshared, &key1); | ||
27544 | + ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ); | ||
27545 | if (unlikely(ret != 0)) | ||
27546 | goto out; | ||
27547 | - ret = get_futex_key(uaddr2, fshared, &key2); | ||
27548 | + ret = get_futex_key(uaddr2, fshared, &key2, | ||
27549 | + requeue_pi ? VERIFY_WRITE : VERIFY_READ); | ||
27550 | if (unlikely(ret != 0)) | ||
27551 | goto out_put_key1; | ||
27552 | |||
27553 | @@ -1759,7 +1732,7 @@ static int futex_wait_setup(u32 __user *uaddr, u32 val, int fshared, | ||
27554 | */ | ||
27555 | retry: | ||
27556 | q->key = FUTEX_KEY_INIT; | ||
27557 | - ret = get_futex_key(uaddr, fshared, &q->key); | ||
27558 | + ret = get_futex_key(uaddr, fshared, &q->key, VERIFY_READ); | ||
27559 | if (unlikely(ret != 0)) | ||
27560 | return ret; | ||
27561 | |||
27562 | @@ -1925,7 +1898,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared, | ||
27563 | q.requeue_pi_key = NULL; | ||
27564 | retry: | ||
27565 | q.key = FUTEX_KEY_INIT; | ||
27566 | - ret = get_futex_key(uaddr, fshared, &q.key); | ||
27567 | + ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_WRITE); | ||
27568 | if (unlikely(ret != 0)) | ||
27569 | goto out; | ||
27570 | |||
27571 | @@ -1995,7 +1968,7 @@ retry_private: | ||
27572 | /* Unqueue and drop the lock */ | ||
27573 | unqueue_me_pi(&q); | ||
27574 | |||
27575 | - goto out_put_key; | ||
27576 | + goto out; | ||
27577 | |||
27578 | out_unlock_put_key: | ||
27579 | queue_unlock(&q, hb); | ||
27580 | @@ -2044,7 +2017,7 @@ retry: | ||
27581 | if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current)) | ||
27582 | return -EPERM; | ||
27583 | |||
27584 | - ret = get_futex_key(uaddr, fshared, &key); | ||
27585 | + ret = get_futex_key(uaddr, fshared, &key, VERIFY_WRITE); | ||
27586 | if (unlikely(ret != 0)) | ||
27587 | goto out; | ||
27588 | |||
27589 | @@ -2236,7 +2209,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared, | ||
27590 | rt_waiter.task = NULL; | ||
27591 | |||
27592 | key2 = FUTEX_KEY_INIT; | ||
27593 | - ret = get_futex_key(uaddr2, fshared, &key2); | ||
27594 | + ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE); | ||
27595 | if (unlikely(ret != 0)) | ||
27596 | goto out; | ||
27597 | |||
27598 | diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c | ||
27599 | index 3e1c36e..7b19403 100644 | ||
27600 | --- a/kernel/hrtimer.c | ||
27601 | +++ b/kernel/hrtimer.c | ||
27602 | @@ -46,6 +46,8 @@ | ||
27603 | #include <linux/sched.h> | ||
27604 | #include <linux/timer.h> | ||
27605 | |||
27606 | +#include <litmus/litmus.h> | ||
27607 | + | ||
27608 | #include <asm/uaccess.h> | ||
27609 | |||
27610 | #include <trace/events/timer.h> | ||
27611 | @@ -1016,6 +1018,85 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) | ||
27612 | } | ||
27613 | EXPORT_SYMBOL_GPL(hrtimer_start); | ||
27614 | |||
27615 | +/** | ||
27616 | + * hrtimer_pull - PULL_TIMERS_VECTOR callback on remote cpu | ||
27617 | + */ | ||
27618 | +void hrtimer_pull(void) | ||
27619 | +{ | ||
27620 | + struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases); | ||
27621 | + struct hrtimer_start_on_info *info; | ||
27622 | + struct list_head *pos, *safe, list; | ||
27623 | + | ||
27624 | + spin_lock(&base->lock); | ||
27625 | + list_replace_init(&base->to_pull, &list); | ||
27626 | + spin_unlock(&base->lock); | ||
27627 | + | ||
27628 | + list_for_each_safe(pos, safe, &list) { | ||
27629 | + info = list_entry(pos, struct hrtimer_start_on_info, list); | ||
27630 | + TRACE("pulled timer 0x%x\n", info->timer); | ||
27631 | + list_del(pos); | ||
27632 | + hrtimer_start(info->timer, info->time, info->mode); | ||
27633 | + } | ||
27634 | +} | ||
27635 | + | ||
27636 | +/** | ||
27637 | + * hrtimer_start_on - trigger timer arming on remote cpu | ||
27638 | + * @cpu: remote cpu | ||
27639 | + * @info: save timer information for enqueuing on remote cpu | ||
27640 | + * @timer: timer to be pulled | ||
27641 | + * @time: expire time | ||
27642 | + * @mode: timer mode | ||
27643 | + */ | ||
27644 | +int hrtimer_start_on(int cpu, struct hrtimer_start_on_info* info, | ||
27645 | + struct hrtimer *timer, ktime_t time, | ||
27646 | + const enum hrtimer_mode mode) | ||
27647 | +{ | ||
27648 | + unsigned long flags; | ||
27649 | + struct hrtimer_cpu_base* base; | ||
27650 | + int in_use = 0, was_empty; | ||
27651 | + | ||
27652 | + /* serialize access to info through the timer base */ | ||
27653 | + lock_hrtimer_base(timer, &flags); | ||
27654 | + | ||
27655 | + in_use = (atomic_read(&info->state) != HRTIMER_START_ON_INACTIVE); | ||
27656 | + if (!in_use) { | ||
27657 | + INIT_LIST_HEAD(&info->list); | ||
27658 | + info->timer = timer; | ||
27659 | + info->time = time; | ||
27660 | + info->mode = mode; | ||
27661 | + /* mark as in use */ | ||
27662 | + atomic_set(&info->state, HRTIMER_START_ON_QUEUED); | ||
27663 | + } | ||
27664 | + | ||
27665 | + unlock_hrtimer_base(timer, &flags); | ||
27666 | + | ||
27667 | + if (!in_use) { | ||
27668 | + /* initiate pull */ | ||
27669 | + preempt_disable(); | ||
27670 | + if (cpu == smp_processor_id()) { | ||
27671 | + /* start timer locally; we may get called | ||
27672 | + * with rq->lock held, do not wake up anything | ||
27673 | + */ | ||
27674 | + TRACE("hrtimer_start_on: starting on local CPU\n"); | ||
27675 | + __hrtimer_start_range_ns(info->timer, info->time, | ||
27676 | + 0, info->mode, 0); | ||
27677 | + } else { | ||
27678 | + TRACE("hrtimer_start_on: pulling to remote CPU\n"); | ||
27679 | + base = &per_cpu(hrtimer_bases, cpu); | ||
27680 | + spin_lock_irqsave(&base->lock, flags); | ||
27681 | + was_empty = list_empty(&base->to_pull); | ||
27682 | + list_add(&info->list, &base->to_pull); | ||
27683 | + spin_unlock_irqrestore(&base->lock, flags); | ||
27684 | + if (was_empty) | ||
27685 | + /* only send IPI if other no else | ||
27686 | + * has done so already | ||
27687 | + */ | ||
27688 | + smp_send_pull_timers(cpu); | ||
27689 | + } | ||
27690 | + preempt_enable(); | ||
27691 | + } | ||
27692 | + return in_use; | ||
27693 | +} | ||
27694 | |||
27695 | /** | ||
27696 | * hrtimer_try_to_cancel - try to deactivate a timer | ||
27697 | @@ -1597,6 +1678,7 @@ static void __cpuinit init_hrtimers_cpu(int cpu) | ||
27698 | cpu_base->clock_base[i].cpu_base = cpu_base; | ||
27699 | |||
27700 | hrtimer_init_hres(cpu_base); | ||
27701 | + INIT_LIST_HEAD(&cpu_base->to_pull); | ||
27702 | } | ||
27703 | |||
27704 | #ifdef CONFIG_HOTPLUG_CPU | ||
27705 | diff --git a/kernel/module.c b/kernel/module.c | ||
27706 | index dfa33e8..5842a71 100644 | ||
27707 | --- a/kernel/module.c | ||
27708 | +++ b/kernel/module.c | ||
27709 | @@ -1030,23 +1030,11 @@ static int try_to_force_load(struct module *mod, const char *reason) | ||
27710 | } | ||
27711 | |||
27712 | #ifdef CONFIG_MODVERSIONS | ||
27713 | -/* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */ | ||
27714 | -static unsigned long maybe_relocated(unsigned long crc, | ||
27715 | - const struct module *crc_owner) | ||
27716 | -{ | ||
27717 | -#ifdef ARCH_RELOCATES_KCRCTAB | ||
27718 | - if (crc_owner == NULL) | ||
27719 | - return crc - (unsigned long)reloc_start; | ||
27720 | -#endif | ||
27721 | - return crc; | ||
27722 | -} | ||
27723 | - | ||
27724 | static int check_version(Elf_Shdr *sechdrs, | ||
27725 | unsigned int versindex, | ||
27726 | const char *symname, | ||
27727 | struct module *mod, | ||
27728 | - const unsigned long *crc, | ||
27729 | - const struct module *crc_owner) | ||
27730 | + const unsigned long *crc) | ||
27731 | { | ||
27732 | unsigned int i, num_versions; | ||
27733 | struct modversion_info *versions; | ||
27734 | @@ -1067,10 +1055,10 @@ static int check_version(Elf_Shdr *sechdrs, | ||
27735 | if (strcmp(versions[i].name, symname) != 0) | ||
27736 | continue; | ||
27737 | |||
27738 | - if (versions[i].crc == maybe_relocated(*crc, crc_owner)) | ||
27739 | + if (versions[i].crc == *crc) | ||
27740 | return 1; | ||
27741 | DEBUGP("Found checksum %lX vs module %lX\n", | ||
27742 | - maybe_relocated(*crc, crc_owner), versions[i].crc); | ||
27743 | + *crc, versions[i].crc); | ||
27744 | goto bad_version; | ||
27745 | } | ||
27746 | |||
27747 | @@ -1093,8 +1081,7 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs, | ||
27748 | if (!find_symbol(MODULE_SYMBOL_PREFIX "module_layout", NULL, | ||
27749 | &crc, true, false)) | ||
27750 | BUG(); | ||
27751 | - return check_version(sechdrs, versindex, "module_layout", mod, crc, | ||
27752 | - NULL); | ||
27753 | + return check_version(sechdrs, versindex, "module_layout", mod, crc); | ||
27754 | } | ||
27755 | |||
27756 | /* First part is kernel version, which we ignore if module has crcs. */ | ||
27757 | @@ -1112,8 +1099,7 @@ static inline int check_version(Elf_Shdr *sechdrs, | ||
27758 | unsigned int versindex, | ||
27759 | const char *symname, | ||
27760 | struct module *mod, | ||
27761 | - const unsigned long *crc, | ||
27762 | - const struct module *crc_owner) | ||
27763 | + const unsigned long *crc) | ||
27764 | { | ||
27765 | return 1; | ||
27766 | } | ||
27767 | @@ -1148,8 +1134,8 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs, | ||
27768 | /* use_module can fail due to OOM, | ||
27769 | or module initialization or unloading */ | ||
27770 | if (sym) { | ||
27771 | - if (!check_version(sechdrs, versindex, name, mod, crc, owner) | ||
27772 | - || !use_module(mod, owner)) | ||
27773 | + if (!check_version(sechdrs, versindex, name, mod, crc) || | ||
27774 | + !use_module(mod, owner)) | ||
27775 | sym = NULL; | ||
27776 | } | ||
27777 | return sym; | ||
27778 | @@ -1160,12 +1146,6 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs, | ||
27779 | * J. Corbet <corbet@lwn.net> | ||
27780 | */ | ||
27781 | #if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) | ||
27782 | - | ||
27783 | -static inline bool sect_empty(const Elf_Shdr *sect) | ||
27784 | -{ | ||
27785 | - return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0; | ||
27786 | -} | ||
27787 | - | ||
27788 | struct module_sect_attr | ||
27789 | { | ||
27790 | struct module_attribute mattr; | ||
27791 | @@ -1207,7 +1187,8 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect, | ||
27792 | |||
27793 | /* Count loaded sections and allocate structures */ | ||
27794 | for (i = 0; i < nsect; i++) | ||
27795 | - if (!sect_empty(&sechdrs[i])) | ||
27796 | + if (sechdrs[i].sh_flags & SHF_ALLOC | ||
27797 | + && sechdrs[i].sh_size) | ||
27798 | nloaded++; | ||
27799 | size[0] = ALIGN(sizeof(*sect_attrs) | ||
27800 | + nloaded * sizeof(sect_attrs->attrs[0]), | ||
27801 | @@ -1225,7 +1206,9 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect, | ||
27802 | sattr = §_attrs->attrs[0]; | ||
27803 | gattr = §_attrs->grp.attrs[0]; | ||
27804 | for (i = 0; i < nsect; i++) { | ||
27805 | - if (sect_empty(&sechdrs[i])) | ||
27806 | + if (! (sechdrs[i].sh_flags & SHF_ALLOC)) | ||
27807 | + continue; | ||
27808 | + if (!sechdrs[i].sh_size) | ||
27809 | continue; | ||
27810 | sattr->address = sechdrs[i].sh_addr; | ||
27811 | sattr->name = kstrdup(secstrings + sechdrs[i].sh_name, | ||
27812 | @@ -1309,7 +1292,7 @@ static void add_notes_attrs(struct module *mod, unsigned int nsect, | ||
27813 | /* Count notes sections and allocate structures. */ | ||
27814 | notes = 0; | ||
27815 | for (i = 0; i < nsect; i++) | ||
27816 | - if (!sect_empty(&sechdrs[i]) && | ||
27817 | + if ((sechdrs[i].sh_flags & SHF_ALLOC) && | ||
27818 | (sechdrs[i].sh_type == SHT_NOTE)) | ||
27819 | ++notes; | ||
27820 | |||
27821 | @@ -1325,7 +1308,7 @@ static void add_notes_attrs(struct module *mod, unsigned int nsect, | ||
27822 | notes_attrs->notes = notes; | ||
27823 | nattr = ¬es_attrs->attrs[0]; | ||
27824 | for (loaded = i = 0; i < nsect; ++i) { | ||
27825 | - if (sect_empty(&sechdrs[i])) | ||
27826 | + if (!(sechdrs[i].sh_flags & SHF_ALLOC)) | ||
27827 | continue; | ||
27828 | if (sechdrs[i].sh_type == SHT_NOTE) { | ||
27829 | nattr->attr.name = mod->sect_attrs->attrs[loaded].name; | ||
27830 | diff --git a/kernel/perf_event.c b/kernel/perf_event.c | ||
27831 | index 413d101..7f29643 100644 | ||
27832 | --- a/kernel/perf_event.c | ||
27833 | +++ b/kernel/perf_event.c | ||
27834 | @@ -1359,9 +1359,6 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) | ||
27835 | if (event->state != PERF_EVENT_STATE_ACTIVE) | ||
27836 | continue; | ||
27837 | |||
27838 | - if (event->cpu != -1 && event->cpu != smp_processor_id()) | ||
27839 | - continue; | ||
27840 | - | ||
27841 | hwc = &event->hw; | ||
27842 | |||
27843 | interrupts = hwc->interrupts; | ||
27844 | @@ -1586,7 +1583,7 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu) | ||
27845 | if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) | ||
27846 | return ERR_PTR(-EACCES); | ||
27847 | |||
27848 | - if (cpu < 0 || cpu >= nr_cpumask_bits) | ||
27849 | + if (cpu < 0 || cpu > num_possible_cpus()) | ||
27850 | return ERR_PTR(-EINVAL); | ||
27851 | |||
27852 | /* | ||
27853 | @@ -2177,7 +2174,6 @@ static void perf_mmap_data_free(struct perf_mmap_data *data) | ||
27854 | perf_mmap_free_page((unsigned long)data->user_page); | ||
27855 | for (i = 0; i < data->nr_pages; i++) | ||
27856 | perf_mmap_free_page((unsigned long)data->data_pages[i]); | ||
27857 | - kfree(data); | ||
27858 | } | ||
27859 | |||
27860 | #else | ||
27861 | @@ -2218,7 +2214,6 @@ static void perf_mmap_data_free_work(struct work_struct *work) | ||
27862 | perf_mmap_unmark_page(base + (i * PAGE_SIZE)); | ||
27863 | |||
27864 | vfree(base); | ||
27865 | - kfree(data); | ||
27866 | } | ||
27867 | |||
27868 | static void perf_mmap_data_free(struct perf_mmap_data *data) | ||
27869 | @@ -2324,6 +2319,7 @@ static void perf_mmap_data_free_rcu(struct rcu_head *rcu_head) | ||
27870 | |||
27871 | data = container_of(rcu_head, struct perf_mmap_data, rcu_head); | ||
27872 | perf_mmap_data_free(data); | ||
27873 | + kfree(data); | ||
27874 | } | ||
27875 | |||
27876 | static void perf_mmap_data_release(struct perf_event *event) | ||
27877 | @@ -3229,12 +3225,6 @@ static void perf_event_task_output(struct perf_event *event, | ||
27878 | |||
27879 | static int perf_event_task_match(struct perf_event *event) | ||
27880 | { | ||
27881 | - if (event->state != PERF_EVENT_STATE_ACTIVE) | ||
27882 | - return 0; | ||
27883 | - | ||
27884 | - if (event->cpu != -1 && event->cpu != smp_processor_id()) | ||
27885 | - return 0; | ||
27886 | - | ||
27887 | if (event->attr.comm || event->attr.mmap || event->attr.task) | ||
27888 | return 1; | ||
27889 | |||
27890 | @@ -3264,13 +3254,13 @@ static void perf_event_task_event(struct perf_task_event *task_event) | ||
27891 | |||
27892 | cpuctx = &get_cpu_var(perf_cpu_context); | ||
27893 | perf_event_task_ctx(&cpuctx->ctx, task_event); | ||
27894 | + put_cpu_var(perf_cpu_context); | ||
27895 | |||
27896 | rcu_read_lock(); | ||
27897 | if (!ctx) | ||
27898 | ctx = rcu_dereference(task_event->task->perf_event_ctxp); | ||
27899 | if (ctx) | ||
27900 | perf_event_task_ctx(ctx, task_event); | ||
27901 | - put_cpu_var(perf_cpu_context); | ||
27902 | rcu_read_unlock(); | ||
27903 | } | ||
27904 | |||
27905 | @@ -3347,12 +3337,6 @@ static void perf_event_comm_output(struct perf_event *event, | ||
27906 | |||
27907 | static int perf_event_comm_match(struct perf_event *event) | ||
27908 | { | ||
27909 | - if (event->state != PERF_EVENT_STATE_ACTIVE) | ||
27910 | - return 0; | ||
27911 | - | ||
27912 | - if (event->cpu != -1 && event->cpu != smp_processor_id()) | ||
27913 | - return 0; | ||
27914 | - | ||
27915 | if (event->attr.comm) | ||
27916 | return 1; | ||
27917 | |||
27918 | @@ -3393,6 +3377,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event) | ||
27919 | |||
27920 | cpuctx = &get_cpu_var(perf_cpu_context); | ||
27921 | perf_event_comm_ctx(&cpuctx->ctx, comm_event); | ||
27922 | + put_cpu_var(perf_cpu_context); | ||
27923 | |||
27924 | rcu_read_lock(); | ||
27925 | /* | ||
27926 | @@ -3402,7 +3387,6 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event) | ||
27927 | ctx = rcu_dereference(current->perf_event_ctxp); | ||
27928 | if (ctx) | ||
27929 | perf_event_comm_ctx(ctx, comm_event); | ||
27930 | - put_cpu_var(perf_cpu_context); | ||
27931 | rcu_read_unlock(); | ||
27932 | } | ||
27933 | |||
27934 | @@ -3477,12 +3461,6 @@ static void perf_event_mmap_output(struct perf_event *event, | ||
27935 | static int perf_event_mmap_match(struct perf_event *event, | ||
27936 | struct perf_mmap_event *mmap_event) | ||
27937 | { | ||
27938 | - if (event->state != PERF_EVENT_STATE_ACTIVE) | ||
27939 | - return 0; | ||
27940 | - | ||
27941 | - if (event->cpu != -1 && event->cpu != smp_processor_id()) | ||
27942 | - return 0; | ||
27943 | - | ||
27944 | if (event->attr.mmap) | ||
27945 | return 1; | ||
27946 | |||
27947 | @@ -3560,6 +3538,7 @@ got_name: | ||
27948 | |||
27949 | cpuctx = &get_cpu_var(perf_cpu_context); | ||
27950 | perf_event_mmap_ctx(&cpuctx->ctx, mmap_event); | ||
27951 | + put_cpu_var(perf_cpu_context); | ||
27952 | |||
27953 | rcu_read_lock(); | ||
27954 | /* | ||
27955 | @@ -3569,7 +3548,6 @@ got_name: | ||
27956 | ctx = rcu_dereference(current->perf_event_ctxp); | ||
27957 | if (ctx) | ||
27958 | perf_event_mmap_ctx(ctx, mmap_event); | ||
27959 | - put_cpu_var(perf_cpu_context); | ||
27960 | rcu_read_unlock(); | ||
27961 | |||
27962 | kfree(buf); | ||
27963 | @@ -3832,9 +3810,6 @@ static int perf_swevent_match(struct perf_event *event, | ||
27964 | enum perf_type_id type, | ||
27965 | u32 event_id, struct pt_regs *regs) | ||
27966 | { | ||
27967 | - if (event->cpu != -1 && event->cpu != smp_processor_id()) | ||
27968 | - return 0; | ||
27969 | - | ||
27970 | if (!perf_swevent_is_counting(event)) | ||
27971 | return 0; | ||
27972 | |||
27973 | @@ -3974,7 +3949,6 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) | ||
27974 | event->pmu->read(event); | ||
27975 | |||
27976 | data.addr = 0; | ||
27977 | - data.period = event->hw.last_period; | ||
27978 | regs = get_irq_regs(); | ||
27979 | /* | ||
27980 | * In case we exclude kernel IPs or are somehow not in interrupt | ||
27981 | diff --git a/kernel/printk.c b/kernel/printk.c | ||
27982 | index f38b07f..6712a25 100644 | ||
27983 | --- a/kernel/printk.c | ||
27984 | +++ b/kernel/printk.c | ||
27985 | @@ -70,6 +70,13 @@ int console_printk[4] = { | ||
27986 | static int saved_console_loglevel = -1; | ||
27987 | |||
27988 | /* | ||
27989 | + * divert printk() messages when there is a LITMUS^RT debug listener | ||
27990 | + */ | ||
27991 | +#include <litmus/litmus.h> | ||
27992 | +int trace_override = 0; | ||
27993 | +int trace_recurse = 0; | ||
27994 | + | ||
27995 | +/* | ||
27996 | * Low level drivers may need that to know if they can schedule in | ||
27997 | * their unblank() callback or not. So let's export it. | ||
27998 | */ | ||
27999 | @@ -713,6 +720,9 @@ asmlinkage int vprintk(const char *fmt, va_list args) | ||
28000 | /* Emit the output into the temporary buffer */ | ||
28001 | printed_len += vscnprintf(printk_buf + printed_len, | ||
28002 | sizeof(printk_buf) - printed_len, fmt, args); | ||
28003 | + /* if LITMUS^RT tracer is active divert printk() msgs */ | ||
28004 | + if (trace_override && !trace_recurse) | ||
28005 | + TRACE("%s", printk_buf); | ||
28006 | |||
28007 | |||
28008 | p = printk_buf; | ||
28009 | @@ -782,7 +792,7 @@ asmlinkage int vprintk(const char *fmt, va_list args) | ||
28010 | * Try to acquire and then immediately release the | ||
28011 | * console semaphore. The release will do all the | ||
28012 | * actual magic (print out buffers, wake up klogd, | ||
28013 | - * etc). | ||
28014 | + * etc). | ||
28015 | * | ||
28016 | * The acquire_console_semaphore_for_printk() function | ||
28017 | * will release 'logbuf_lock' regardless of whether it | ||
28018 | @@ -1019,7 +1029,7 @@ int printk_needs_cpu(int cpu) | ||
28019 | |||
28020 | void wake_up_klogd(void) | ||
28021 | { | ||
28022 | - if (waitqueue_active(&log_wait)) | ||
28023 | + if (!trace_override && waitqueue_active(&log_wait)) | ||
28024 | __raw_get_cpu_var(printk_pending) = 1; | ||
28025 | } | ||
28026 | |||
28027 | diff --git a/kernel/rcutree.c b/kernel/rcutree.c | ||
28028 | index 683c4f3..f3077c0 100644 | ||
28029 | --- a/kernel/rcutree.c | ||
28030 | +++ b/kernel/rcutree.c | ||
28031 | @@ -176,29 +176,9 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp) | ||
28032 | return &rsp->node[0]; | ||
28033 | } | ||
28034 | |||
28035 | -/* | ||
28036 | - * Record the specified "completed" value, which is later used to validate | ||
28037 | - * dynticks counter manipulations and CPU-offline checks. Specify | ||
28038 | - * "rsp->completed - 1" to unconditionally invalidate any future dynticks | ||
28039 | - * manipulations and CPU-offline checks. Such invalidation is useful at | ||
28040 | - * the beginning of a grace period. | ||
28041 | - */ | ||
28042 | -static void dyntick_record_completed(struct rcu_state *rsp, long comp) | ||
28043 | -{ | ||
28044 | - rsp->dynticks_completed = comp; | ||
28045 | -} | ||
28046 | - | ||
28047 | #ifdef CONFIG_SMP | ||
28048 | |||
28049 | /* | ||
28050 | - * Recall the previously recorded value of the completion for dynticks. | ||
28051 | - */ | ||
28052 | -static long dyntick_recall_completed(struct rcu_state *rsp) | ||
28053 | -{ | ||
28054 | - return rsp->dynticks_completed; | ||
28055 | -} | ||
28056 | - | ||
28057 | -/* | ||
28058 | * If the specified CPU is offline, tell the caller that it is in | ||
28059 | * a quiescent state. Otherwise, whack it with a reschedule IPI. | ||
28060 | * Grace periods can end up waiting on an offline CPU when that | ||
28061 | @@ -355,9 +335,28 @@ void rcu_irq_exit(void) | ||
28062 | set_need_resched(); | ||
28063 | } | ||
28064 | |||
28065 | +/* | ||
28066 | + * Record the specified "completed" value, which is later used to validate | ||
28067 | + * dynticks counter manipulations. Specify "rsp->completed - 1" to | ||
28068 | + * unconditionally invalidate any future dynticks manipulations (which is | ||
28069 | + * useful at the beginning of a grace period). | ||
28070 | + */ | ||
28071 | +static void dyntick_record_completed(struct rcu_state *rsp, long comp) | ||
28072 | +{ | ||
28073 | + rsp->dynticks_completed = comp; | ||
28074 | +} | ||
28075 | + | ||
28076 | #ifdef CONFIG_SMP | ||
28077 | |||
28078 | /* | ||
28079 | + * Recall the previously recorded value of the completion for dynticks. | ||
28080 | + */ | ||
28081 | +static long dyntick_recall_completed(struct rcu_state *rsp) | ||
28082 | +{ | ||
28083 | + return rsp->dynticks_completed; | ||
28084 | +} | ||
28085 | + | ||
28086 | +/* | ||
28087 | * Snapshot the specified CPU's dynticks counter so that we can later | ||
28088 | * credit them with an implicit quiescent state. Return 1 if this CPU | ||
28089 | * is in dynticks idle mode, which is an extended quiescent state. | ||
28090 | @@ -420,8 +419,24 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) | ||
28091 | |||
28092 | #else /* #ifdef CONFIG_NO_HZ */ | ||
28093 | |||
28094 | +static void dyntick_record_completed(struct rcu_state *rsp, long comp) | ||
28095 | +{ | ||
28096 | +} | ||
28097 | + | ||
28098 | #ifdef CONFIG_SMP | ||
28099 | |||
28100 | +/* | ||
28101 | + * If there are no dynticks, then the only way that a CPU can passively | ||
28102 | + * be in a quiescent state is to be offline. Unlike dynticks idle, which | ||
28103 | + * is a point in time during the prior (already finished) grace period, | ||
28104 | + * an offline CPU is always in a quiescent state, and thus can be | ||
28105 | + * unconditionally applied. So just return the current value of completed. | ||
28106 | + */ | ||
28107 | +static long dyntick_recall_completed(struct rcu_state *rsp) | ||
28108 | +{ | ||
28109 | + return rsp->completed; | ||
28110 | +} | ||
28111 | + | ||
28112 | static int dyntick_save_progress_counter(struct rcu_data *rdp) | ||
28113 | { | ||
28114 | return 0; | ||
28115 | @@ -538,33 +553,13 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | ||
28116 | /* | ||
28117 | * Update CPU-local rcu_data state to record the newly noticed grace period. | ||
28118 | * This is used both when we started the grace period and when we notice | ||
28119 | - * that someone else started the grace period. The caller must hold the | ||
28120 | - * ->lock of the leaf rcu_node structure corresponding to the current CPU, | ||
28121 | - * and must have irqs disabled. | ||
28122 | + * that someone else started the grace period. | ||
28123 | */ | ||
28124 | -static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) | ||
28125 | -{ | ||
28126 | - if (rdp->gpnum != rnp->gpnum) { | ||
28127 | - rdp->qs_pending = 1; | ||
28128 | - rdp->passed_quiesc = 0; | ||
28129 | - rdp->gpnum = rnp->gpnum; | ||
28130 | - } | ||
28131 | -} | ||
28132 | - | ||
28133 | static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp) | ||
28134 | { | ||
28135 | - unsigned long flags; | ||
28136 | - struct rcu_node *rnp; | ||
28137 | - | ||
28138 | - local_irq_save(flags); | ||
28139 | - rnp = rdp->mynode; | ||
28140 | - if (rdp->gpnum == ACCESS_ONCE(rnp->gpnum) || /* outside lock. */ | ||
28141 | - !spin_trylock(&rnp->lock)) { /* irqs already off, retry later. */ | ||
28142 | - local_irq_restore(flags); | ||
28143 | - return; | ||
28144 | - } | ||
28145 | - __note_new_gpnum(rsp, rnp, rdp); | ||
28146 | - spin_unlock_irqrestore(&rnp->lock, flags); | ||
28147 | + rdp->qs_pending = 1; | ||
28148 | + rdp->passed_quiesc = 0; | ||
28149 | + rdp->gpnum = rsp->gpnum; | ||
28150 | } | ||
28151 | |||
28152 | /* | ||
28153 | @@ -588,79 +583,6 @@ check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp) | ||
28154 | } | ||
28155 | |||
28156 | /* | ||
28157 | - * Advance this CPU's callbacks, but only if the current grace period | ||
28158 | - * has ended. This may be called only from the CPU to whom the rdp | ||
28159 | - * belongs. In addition, the corresponding leaf rcu_node structure's | ||
28160 | - * ->lock must be held by the caller, with irqs disabled. | ||
28161 | - */ | ||
28162 | -static void | ||
28163 | -__rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) | ||
28164 | -{ | ||
28165 | - /* Did another grace period end? */ | ||
28166 | - if (rdp->completed != rnp->completed) { | ||
28167 | - | ||
28168 | - /* Advance callbacks. No harm if list empty. */ | ||
28169 | - rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL]; | ||
28170 | - rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL]; | ||
28171 | - rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
28172 | - | ||
28173 | - /* Remember that we saw this grace-period completion. */ | ||
28174 | - rdp->completed = rnp->completed; | ||
28175 | - } | ||
28176 | -} | ||
28177 | - | ||
28178 | -/* | ||
28179 | - * Advance this CPU's callbacks, but only if the current grace period | ||
28180 | - * has ended. This may be called only from the CPU to whom the rdp | ||
28181 | - * belongs. | ||
28182 | - */ | ||
28183 | -static void | ||
28184 | -rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp) | ||
28185 | -{ | ||
28186 | - unsigned long flags; | ||
28187 | - struct rcu_node *rnp; | ||
28188 | - | ||
28189 | - local_irq_save(flags); | ||
28190 | - rnp = rdp->mynode; | ||
28191 | - if (rdp->completed == ACCESS_ONCE(rnp->completed) || /* outside lock. */ | ||
28192 | - !spin_trylock(&rnp->lock)) { /* irqs already off, retry later. */ | ||
28193 | - local_irq_restore(flags); | ||
28194 | - return; | ||
28195 | - } | ||
28196 | - __rcu_process_gp_end(rsp, rnp, rdp); | ||
28197 | - spin_unlock_irqrestore(&rnp->lock, flags); | ||
28198 | -} | ||
28199 | - | ||
28200 | -/* | ||
28201 | - * Do per-CPU grace-period initialization for running CPU. The caller | ||
28202 | - * must hold the lock of the leaf rcu_node structure corresponding to | ||
28203 | - * this CPU. | ||
28204 | - */ | ||
28205 | -static void | ||
28206 | -rcu_start_gp_per_cpu(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) | ||
28207 | -{ | ||
28208 | - /* Prior grace period ended, so advance callbacks for current CPU. */ | ||
28209 | - __rcu_process_gp_end(rsp, rnp, rdp); | ||
28210 | - | ||
28211 | - /* | ||
28212 | - * Because this CPU just now started the new grace period, we know | ||
28213 | - * that all of its callbacks will be covered by this upcoming grace | ||
28214 | - * period, even the ones that were registered arbitrarily recently. | ||
28215 | - * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL. | ||
28216 | - * | ||
28217 | - * Other CPUs cannot be sure exactly when the grace period started. | ||
28218 | - * Therefore, their recently registered callbacks must pass through | ||
28219 | - * an additional RCU_NEXT_READY stage, so that they will be handled | ||
28220 | - * by the next RCU grace period. | ||
28221 | - */ | ||
28222 | - rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
28223 | - rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
28224 | - | ||
28225 | - /* Set state so that this CPU will detect the next quiescent state. */ | ||
28226 | - __note_new_gpnum(rsp, rnp, rdp); | ||
28227 | -} | ||
28228 | - | ||
28229 | -/* | ||
28230 | * Start a new RCU grace period if warranted, re-initializing the hierarchy | ||
28231 | * in preparation for detecting the next grace period. The caller must hold | ||
28232 | * the root node's ->lock, which is released before return. Hard irqs must | ||
28233 | @@ -685,15 +607,28 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | ||
28234 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; | ||
28235 | record_gp_stall_check_time(rsp); | ||
28236 | dyntick_record_completed(rsp, rsp->completed - 1); | ||
28237 | + note_new_gpnum(rsp, rdp); | ||
28238 | + | ||
28239 | + /* | ||
28240 | + * Because this CPU just now started the new grace period, we know | ||
28241 | + * that all of its callbacks will be covered by this upcoming grace | ||
28242 | + * period, even the ones that were registered arbitrarily recently. | ||
28243 | + * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL. | ||
28244 | + * | ||
28245 | + * Other CPUs cannot be sure exactly when the grace period started. | ||
28246 | + * Therefore, their recently registered callbacks must pass through | ||
28247 | + * an additional RCU_NEXT_READY stage, so that they will be handled | ||
28248 | + * by the next RCU grace period. | ||
28249 | + */ | ||
28250 | + rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
28251 | + rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
28252 | |||
28253 | /* Special-case the common single-level case. */ | ||
28254 | if (NUM_RCU_NODES == 1) { | ||
28255 | rcu_preempt_check_blocked_tasks(rnp); | ||
28256 | rnp->qsmask = rnp->qsmaskinit; | ||
28257 | rnp->gpnum = rsp->gpnum; | ||
28258 | - rnp->completed = rsp->completed; | ||
28259 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ | ||
28260 | - rcu_start_gp_per_cpu(rsp, rnp, rdp); | ||
28261 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
28262 | return; | ||
28263 | } | ||
28264 | @@ -726,9 +661,6 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | ||
28265 | rcu_preempt_check_blocked_tasks(rnp); | ||
28266 | rnp->qsmask = rnp->qsmaskinit; | ||
28267 | rnp->gpnum = rsp->gpnum; | ||
28268 | - rnp->completed = rsp->completed; | ||
28269 | - if (rnp == rdp->mynode) | ||
28270 | - rcu_start_gp_per_cpu(rsp, rnp, rdp); | ||
28271 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
28272 | } | ||
28273 | |||
28274 | @@ -740,6 +672,34 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | ||
28275 | } | ||
28276 | |||
28277 | /* | ||
28278 | + * Advance this CPU's callbacks, but only if the current grace period | ||
28279 | + * has ended. This may be called only from the CPU to whom the rdp | ||
28280 | + * belongs. | ||
28281 | + */ | ||
28282 | +static void | ||
28283 | +rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp) | ||
28284 | +{ | ||
28285 | + long completed_snap; | ||
28286 | + unsigned long flags; | ||
28287 | + | ||
28288 | + local_irq_save(flags); | ||
28289 | + completed_snap = ACCESS_ONCE(rsp->completed); /* outside of lock. */ | ||
28290 | + | ||
28291 | + /* Did another grace period end? */ | ||
28292 | + if (rdp->completed != completed_snap) { | ||
28293 | + | ||
28294 | + /* Advance callbacks. No harm if list empty. */ | ||
28295 | + rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL]; | ||
28296 | + rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL]; | ||
28297 | + rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
28298 | + | ||
28299 | + /* Remember that we saw this grace-period completion. */ | ||
28300 | + rdp->completed = completed_snap; | ||
28301 | + } | ||
28302 | + local_irq_restore(flags); | ||
28303 | +} | ||
28304 | + | ||
28305 | +/* | ||
28306 | * Clean up after the prior grace period and let rcu_start_gp() start up | ||
28307 | * the next grace period if one is needed. Note that the caller must | ||
28308 | * hold rnp->lock, as required by rcu_start_gp(), which will release it. | ||
28309 | @@ -750,6 +710,7 @@ static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags) | ||
28310 | WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); | ||
28311 | rsp->completed = rsp->gpnum; | ||
28312 | rsp->signaled = RCU_GP_IDLE; | ||
28313 | + rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); | ||
28314 | rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ | ||
28315 | } | ||
28316 | |||
28317 | @@ -1183,7 +1144,6 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | ||
28318 | long lastcomp; | ||
28319 | struct rcu_node *rnp = rcu_get_root(rsp); | ||
28320 | u8 signaled; | ||
28321 | - u8 forcenow; | ||
28322 | |||
28323 | if (!rcu_gp_in_progress(rsp)) | ||
28324 | return; /* No grace period in progress, nothing to force. */ | ||
28325 | @@ -1220,23 +1180,16 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | ||
28326 | if (rcu_process_dyntick(rsp, lastcomp, | ||
28327 | dyntick_save_progress_counter)) | ||
28328 | goto unlock_ret; | ||
28329 | - /* fall into next case. */ | ||
28330 | - | ||
28331 | - case RCU_SAVE_COMPLETED: | ||
28332 | |||
28333 | /* Update state, record completion counter. */ | ||
28334 | - forcenow = 0; | ||
28335 | spin_lock(&rnp->lock); | ||
28336 | if (lastcomp == rsp->completed && | ||
28337 | - rsp->signaled == signaled) { | ||
28338 | + rsp->signaled == RCU_SAVE_DYNTICK) { | ||
28339 | rsp->signaled = RCU_FORCE_QS; | ||
28340 | dyntick_record_completed(rsp, lastcomp); | ||
28341 | - forcenow = signaled == RCU_SAVE_COMPLETED; | ||
28342 | } | ||
28343 | spin_unlock(&rnp->lock); | ||
28344 | - if (!forcenow) | ||
28345 | - break; | ||
28346 | - /* fall into next case. */ | ||
28347 | + break; | ||
28348 | |||
28349 | case RCU_FORCE_QS: | ||
28350 | |||
28351 | @@ -1591,16 +1544,21 @@ static void __cpuinit | ||
28352 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) | ||
28353 | { | ||
28354 | unsigned long flags; | ||
28355 | + long lastcomp; | ||
28356 | unsigned long mask; | ||
28357 | struct rcu_data *rdp = rsp->rda[cpu]; | ||
28358 | struct rcu_node *rnp = rcu_get_root(rsp); | ||
28359 | |||
28360 | /* Set up local state, ensuring consistent view of global state. */ | ||
28361 | spin_lock_irqsave(&rnp->lock, flags); | ||
28362 | + lastcomp = rsp->completed; | ||
28363 | + rdp->completed = lastcomp; | ||
28364 | + rdp->gpnum = lastcomp; | ||
28365 | rdp->passed_quiesc = 0; /* We could be racing with new GP, */ | ||
28366 | rdp->qs_pending = 1; /* so set up to respond to current GP. */ | ||
28367 | rdp->beenonline = 1; /* We have now been online. */ | ||
28368 | rdp->preemptable = preemptable; | ||
28369 | + rdp->passed_quiesc_completed = lastcomp - 1; | ||
28370 | rdp->qlen_last_fqs_check = 0; | ||
28371 | rdp->n_force_qs_snap = rsp->n_force_qs; | ||
28372 | rdp->blimit = blimit; | ||
28373 | @@ -1622,11 +1580,6 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) | ||
28374 | spin_lock(&rnp->lock); /* irqs already disabled. */ | ||
28375 | rnp->qsmaskinit |= mask; | ||
28376 | mask = rnp->grpmask; | ||
28377 | - if (rnp == rdp->mynode) { | ||
28378 | - rdp->gpnum = rnp->completed; /* if GP in progress... */ | ||
28379 | - rdp->completed = rnp->completed; | ||
28380 | - rdp->passed_quiesc_completed = rnp->completed - 1; | ||
28381 | - } | ||
28382 | spin_unlock(&rnp->lock); /* irqs already disabled. */ | ||
28383 | rnp = rnp->parent; | ||
28384 | } while (rnp != NULL && !(rnp->qsmaskinit & mask)); | ||
28385 | diff --git a/kernel/rcutree.h b/kernel/rcutree.h | ||
28386 | index ddb79ec..1899023 100644 | ||
28387 | --- a/kernel/rcutree.h | ||
28388 | +++ b/kernel/rcutree.h | ||
28389 | @@ -84,9 +84,6 @@ struct rcu_node { | ||
28390 | long gpnum; /* Current grace period for this node. */ | ||
28391 | /* This will either be equal to or one */ | ||
28392 | /* behind the root rcu_node's gpnum. */ | ||
28393 | - long completed; /* Last grace period completed for this node. */ | ||
28394 | - /* This will either be equal to or one */ | ||
28395 | - /* behind the root rcu_node's gpnum. */ | ||
28396 | unsigned long qsmask; /* CPUs or groups that need to switch in */ | ||
28397 | /* order for current grace period to proceed.*/ | ||
28398 | /* In leaf rcu_node, each bit corresponds to */ | ||
28399 | @@ -207,12 +204,11 @@ struct rcu_data { | ||
28400 | #define RCU_GP_IDLE 0 /* No grace period in progress. */ | ||
28401 | #define RCU_GP_INIT 1 /* Grace period being initialized. */ | ||
28402 | #define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */ | ||
28403 | -#define RCU_SAVE_COMPLETED 3 /* Need to save rsp->completed. */ | ||
28404 | -#define RCU_FORCE_QS 4 /* Need to force quiescent state. */ | ||
28405 | +#define RCU_FORCE_QS 3 /* Need to force quiescent state. */ | ||
28406 | #ifdef CONFIG_NO_HZ | ||
28407 | #define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK | ||
28408 | #else /* #ifdef CONFIG_NO_HZ */ | ||
28409 | -#define RCU_SIGNAL_INIT RCU_SAVE_COMPLETED | ||
28410 | +#define RCU_SIGNAL_INIT RCU_FORCE_QS | ||
28411 | #endif /* #else #ifdef CONFIG_NO_HZ */ | ||
28412 | |||
28413 | #define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */ | ||
28414 | @@ -278,8 +274,9 @@ struct rcu_state { | ||
28415 | unsigned long jiffies_stall; /* Time at which to check */ | ||
28416 | /* for CPU stalls. */ | ||
28417 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
28418 | +#ifdef CONFIG_NO_HZ | ||
28419 | long dynticks_completed; /* Value of completed @ snap. */ | ||
28420 | - /* Protected by fqslock. */ | ||
28421 | +#endif /* #ifdef CONFIG_NO_HZ */ | ||
28422 | }; | ||
28423 | |||
28424 | #ifdef RCU_TREE_NONCORE | ||
28425 | @@ -301,7 +298,7 @@ DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data); | ||
28426 | #else /* #ifdef RCU_TREE_NONCORE */ | ||
28427 | |||
28428 | /* Forward declarations for rcutree_plugin.h */ | ||
28429 | -static void rcu_bootup_announce(void); | ||
28430 | +static inline void rcu_bootup_announce(void); | ||
28431 | long rcu_batches_completed(void); | ||
28432 | static void rcu_preempt_note_context_switch(int cpu); | ||
28433 | static int rcu_preempted_readers(struct rcu_node *rnp); | ||
28434 | diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h | ||
28435 | index c03edf7..ef2a58c 100644 | ||
28436 | --- a/kernel/rcutree_plugin.h | ||
28437 | +++ b/kernel/rcutree_plugin.h | ||
28438 | @@ -33,7 +33,7 @@ DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); | ||
28439 | /* | ||
28440 | * Tell them what RCU they are running. | ||
28441 | */ | ||
28442 | -static void rcu_bootup_announce(void) | ||
28443 | +static inline void rcu_bootup_announce(void) | ||
28444 | { | ||
28445 | printk(KERN_INFO | ||
28446 | "Experimental preemptable hierarchical RCU implementation.\n"); | ||
28447 | @@ -481,7 +481,7 @@ void exit_rcu(void) | ||
28448 | /* | ||
28449 | * Tell them what RCU they are running. | ||
28450 | */ | ||
28451 | -static void rcu_bootup_announce(void) | ||
28452 | +static inline void rcu_bootup_announce(void) | ||
28453 | { | ||
28454 | printk(KERN_INFO "Hierarchical RCU implementation.\n"); | ||
28455 | } | ||
28456 | diff --git a/kernel/sched.c b/kernel/sched.c | ||
28457 | index 60d74cc..1701eae 100644 | ||
28458 | --- a/kernel/sched.c | ||
28459 | +++ b/kernel/sched.c | ||
28460 | @@ -77,6 +77,9 @@ | ||
28461 | |||
28462 | #include "sched_cpupri.h" | ||
28463 | |||
28464 | +#include <litmus/sched_trace.h> | ||
28465 | +#include <litmus/trace.h> | ||
28466 | + | ||
28467 | #define CREATE_TRACE_POINTS | ||
28468 | #include <trace/events/sched.h> | ||
28469 | |||
28470 | @@ -482,6 +485,12 @@ struct rt_rq { | ||
28471 | #endif | ||
28472 | }; | ||
28473 | |||
28474 | +/* Litmus related fields in a runqueue */ | ||
28475 | +struct litmus_rq { | ||
28476 | + unsigned long nr_running; | ||
28477 | + struct task_struct *prev; | ||
28478 | +}; | ||
28479 | + | ||
28480 | #ifdef CONFIG_SMP | ||
28481 | |||
28482 | /* | ||
28483 | @@ -546,6 +555,7 @@ struct rq { | ||
28484 | |||
28485 | struct cfs_rq cfs; | ||
28486 | struct rt_rq rt; | ||
28487 | + struct litmus_rq litmus; | ||
28488 | |||
28489 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
28490 | /* list of leaf cfs_rq on this cpu: */ | ||
28491 | @@ -591,8 +601,6 @@ struct rq { | ||
28492 | |||
28493 | u64 rt_avg; | ||
28494 | u64 age_stamp; | ||
28495 | - u64 idle_stamp; | ||
28496 | - u64 avg_idle; | ||
28497 | #endif | ||
28498 | |||
28499 | /* calc_load related fields */ | ||
28500 | @@ -816,7 +824,6 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32; | ||
28501 | * default: 0.25ms | ||
28502 | */ | ||
28503 | unsigned int sysctl_sched_shares_ratelimit = 250000; | ||
28504 | -unsigned int normalized_sysctl_sched_shares_ratelimit = 250000; | ||
28505 | |||
28506 | /* | ||
28507 | * Inject some fuzzyness into changing the per-cpu group shares | ||
28508 | @@ -1813,17 +1820,17 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares) | ||
28509 | #endif | ||
28510 | |||
28511 | static void calc_load_account_active(struct rq *this_rq); | ||
28512 | -static void update_sysctl(void); | ||
28513 | |||
28514 | #include "sched_stats.h" | ||
28515 | #include "sched_idletask.c" | ||
28516 | #include "sched_fair.c" | ||
28517 | #include "sched_rt.c" | ||
28518 | +#include "../litmus/sched_litmus.c" | ||
28519 | #ifdef CONFIG_SCHED_DEBUG | ||
28520 | # include "sched_debug.c" | ||
28521 | #endif | ||
28522 | |||
28523 | -#define sched_class_highest (&rt_sched_class) | ||
28524 | +#define sched_class_highest (&litmus_sched_class) | ||
28525 | #define for_each_class(class) \ | ||
28526 | for (class = sched_class_highest; class; class = class->next) | ||
28527 | |||
28528 | @@ -2038,9 +2045,6 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) | ||
28529 | { | ||
28530 | s64 delta; | ||
28531 | |||
28532 | - if (p->sched_class != &fair_sched_class) | ||
28533 | - return 0; | ||
28534 | - | ||
28535 | /* | ||
28536 | * Buddy candidates are cache hot: | ||
28537 | */ | ||
28538 | @@ -2049,6 +2053,9 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) | ||
28539 | &p->se == cfs_rq_of(&p->se)->last)) | ||
28540 | return 1; | ||
28541 | |||
28542 | + if (p->sched_class != &fair_sched_class) | ||
28543 | + return 0; | ||
28544 | + | ||
28545 | if (sysctl_sched_migration_cost == -1) | ||
28546 | return 1; | ||
28547 | if (sysctl_sched_migration_cost == 0) | ||
28548 | @@ -2347,6 +2354,9 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | ||
28549 | unsigned long flags; | ||
28550 | struct rq *rq, *orig_rq; | ||
28551 | |||
28552 | + if (is_realtime(p)) | ||
28553 | + TRACE_TASK(p, "try_to_wake_up() state:%d\n", p->state); | ||
28554 | + | ||
28555 | if (!sched_feat(SYNC_WAKEUPS)) | ||
28556 | wake_flags &= ~WF_SYNC; | ||
28557 | |||
28558 | @@ -2365,7 +2375,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | ||
28559 | orig_cpu = cpu; | ||
28560 | |||
28561 | #ifdef CONFIG_SMP | ||
28562 | - if (unlikely(task_running(rq, p))) | ||
28563 | + if (unlikely(task_running(rq, p)) || is_realtime(p)) | ||
28564 | goto out_activate; | ||
28565 | |||
28566 | /* | ||
28567 | @@ -2444,19 +2454,10 @@ out_running: | ||
28568 | #ifdef CONFIG_SMP | ||
28569 | if (p->sched_class->task_wake_up) | ||
28570 | p->sched_class->task_wake_up(rq, p); | ||
28571 | - | ||
28572 | - if (unlikely(rq->idle_stamp)) { | ||
28573 | - u64 delta = rq->clock - rq->idle_stamp; | ||
28574 | - u64 max = 2*sysctl_sched_migration_cost; | ||
28575 | - | ||
28576 | - if (delta > max) | ||
28577 | - rq->avg_idle = max; | ||
28578 | - else | ||
28579 | - update_avg(&rq->avg_idle, delta); | ||
28580 | - rq->idle_stamp = 0; | ||
28581 | - } | ||
28582 | #endif | ||
28583 | out: | ||
28584 | + if (is_realtime(p)) | ||
28585 | + TRACE_TASK(p, "try_to_wake_up() done state:%d\n", p->state); | ||
28586 | task_rq_unlock(rq, &flags); | ||
28587 | put_cpu(); | ||
28588 | |||
28589 | @@ -2765,6 +2766,8 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) | ||
28590 | */ | ||
28591 | prev_state = prev->state; | ||
28592 | finish_arch_switch(prev); | ||
28593 | + litmus->finish_switch(prev); | ||
28594 | + prev->rt_param.stack_in_use = NO_CPU; | ||
28595 | perf_event_task_sched_in(current, cpu_of(rq)); | ||
28596 | finish_lock_switch(rq, prev); | ||
28597 | |||
28598 | @@ -2788,6 +2791,15 @@ static inline void pre_schedule(struct rq *rq, struct task_struct *prev) | ||
28599 | { | ||
28600 | if (prev->sched_class->pre_schedule) | ||
28601 | prev->sched_class->pre_schedule(rq, prev); | ||
28602 | + | ||
28603 | + /* LITMUS^RT not very clean hack: we need to save the prev task | ||
28604 | + * as our scheduling decision rely on it (as we drop the rq lock | ||
28605 | + * something in prev can change...); there is no way to escape | ||
28606 | + * this ack apart from modifying pick_nex_task(rq, _prev_) or | ||
28607 | + * falling back on the previous solution of decoupling | ||
28608 | + * scheduling decisions | ||
28609 | + */ | ||
28610 | + rq->litmus.prev = prev; | ||
28611 | } | ||
28612 | |||
28613 | /* rq->lock is NOT held, but preemption is disabled */ | ||
28614 | @@ -3179,6 +3191,10 @@ static void pull_task(struct rq *src_rq, struct task_struct *p, | ||
28615 | deactivate_task(src_rq, p, 0); | ||
28616 | set_task_cpu(p, this_cpu); | ||
28617 | activate_task(this_rq, p, 0); | ||
28618 | + /* | ||
28619 | + * Note that idle threads have a prio of MAX_PRIO, for this test | ||
28620 | + * to be always true for them. | ||
28621 | + */ | ||
28622 | check_preempt_curr(this_rq, p, 0); | ||
28623 | } | ||
28624 | |||
28625 | @@ -4137,7 +4153,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, | ||
28626 | unsigned long flags; | ||
28627 | struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); | ||
28628 | |||
28629 | - cpumask_copy(cpus, cpu_active_mask); | ||
28630 | + cpumask_setall(cpus); | ||
28631 | |||
28632 | /* | ||
28633 | * When power savings policy is enabled for the parent domain, idle | ||
28634 | @@ -4300,7 +4316,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd) | ||
28635 | int all_pinned = 0; | ||
28636 | struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); | ||
28637 | |||
28638 | - cpumask_copy(cpus, cpu_active_mask); | ||
28639 | + cpumask_setall(cpus); | ||
28640 | |||
28641 | /* | ||
28642 | * When power savings policy is enabled for the parent domain, idle | ||
28643 | @@ -4440,11 +4456,6 @@ static void idle_balance(int this_cpu, struct rq *this_rq) | ||
28644 | int pulled_task = 0; | ||
28645 | unsigned long next_balance = jiffies + HZ; | ||
28646 | |||
28647 | - this_rq->idle_stamp = this_rq->clock; | ||
28648 | - | ||
28649 | - if (this_rq->avg_idle < sysctl_sched_migration_cost) | ||
28650 | - return; | ||
28651 | - | ||
28652 | for_each_domain(this_cpu, sd) { | ||
28653 | unsigned long interval; | ||
28654 | |||
28655 | @@ -4459,10 +4470,8 @@ static void idle_balance(int this_cpu, struct rq *this_rq) | ||
28656 | interval = msecs_to_jiffies(sd->balance_interval); | ||
28657 | if (time_after(next_balance, sd->last_balance + interval)) | ||
28658 | next_balance = sd->last_balance + interval; | ||
28659 | - if (pulled_task) { | ||
28660 | - this_rq->idle_stamp = 0; | ||
28661 | + if (pulled_task) | ||
28662 | break; | ||
28663 | - } | ||
28664 | } | ||
28665 | if (pulled_task || time_after(jiffies, this_rq->next_balance)) { | ||
28666 | /* | ||
28667 | @@ -4697,7 +4706,7 @@ int select_nohz_load_balancer(int stop_tick) | ||
28668 | cpumask_set_cpu(cpu, nohz.cpu_mask); | ||
28669 | |||
28670 | /* time for ilb owner also to sleep */ | ||
28671 | - if (cpumask_weight(nohz.cpu_mask) == num_active_cpus()) { | ||
28672 | + if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) { | ||
28673 | if (atomic_read(&nohz.load_balancer) == cpu) | ||
28674 | atomic_set(&nohz.load_balancer, -1); | ||
28675 | return 0; | ||
28676 | @@ -5250,18 +5259,26 @@ void scheduler_tick(void) | ||
28677 | |||
28678 | sched_clock_tick(); | ||
28679 | |||
28680 | + TS_TICK_START(current); | ||
28681 | + | ||
28682 | spin_lock(&rq->lock); | ||
28683 | update_rq_clock(rq); | ||
28684 | update_cpu_load(rq); | ||
28685 | curr->sched_class->task_tick(rq, curr, 0); | ||
28686 | + | ||
28687 | + /* litmus_tick may force current to resched */ | ||
28688 | + litmus_tick(rq, curr); | ||
28689 | + | ||
28690 | spin_unlock(&rq->lock); | ||
28691 | |||
28692 | perf_event_task_tick(curr, cpu); | ||
28693 | |||
28694 | #ifdef CONFIG_SMP | ||
28695 | rq->idle_at_tick = idle_cpu(cpu); | ||
28696 | - trigger_load_balance(rq, cpu); | ||
28697 | + if (!is_realtime(current)) | ||
28698 | + trigger_load_balance(rq, cpu); | ||
28699 | #endif | ||
28700 | + TS_TICK_END(current); | ||
28701 | } | ||
28702 | |||
28703 | notrace unsigned long get_parent_ip(unsigned long addr) | ||
28704 | @@ -5404,12 +5421,20 @@ pick_next_task(struct rq *rq) | ||
28705 | /* | ||
28706 | * Optimization: we know that if all tasks are in | ||
28707 | * the fair class we can call that function directly: | ||
28708 | - */ | ||
28709 | - if (likely(rq->nr_running == rq->cfs.nr_running)) { | ||
28710 | + | ||
28711 | + * NOT IN LITMUS^RT! | ||
28712 | + | ||
28713 | + * This breaks many assumptions in the plugins. | ||
28714 | + * Do not uncomment without thinking long and hard | ||
28715 | + * about how this affects global plugins such as GSN-EDF. | ||
28716 | + | ||
28717 | + if (rq->nr_running == rq->cfs.nr_running) { | ||
28718 | + TRACE("taking shortcut in pick_next_task()\n"); | ||
28719 | p = fair_sched_class.pick_next_task(rq); | ||
28720 | if (likely(p)) | ||
28721 | return p; | ||
28722 | } | ||
28723 | + */ | ||
28724 | |||
28725 | class = sched_class_highest; | ||
28726 | for ( ; ; ) { | ||
28727 | @@ -5444,6 +5469,8 @@ need_resched: | ||
28728 | |||
28729 | release_kernel_lock(prev); | ||
28730 | need_resched_nonpreemptible: | ||
28731 | + TS_SCHED_START; | ||
28732 | + sched_trace_task_switch_away(prev); | ||
28733 | |||
28734 | schedule_debug(prev); | ||
28735 | |||
28736 | @@ -5478,24 +5505,40 @@ need_resched_nonpreemptible: | ||
28737 | rq->curr = next; | ||
28738 | ++*switch_count; | ||
28739 | |||
28740 | + TS_SCHED_END(next); | ||
28741 | + TS_CXS_START(next); | ||
28742 | context_switch(rq, prev, next); /* unlocks the rq */ | ||
28743 | + TS_CXS_END(current); | ||
28744 | /* | ||
28745 | * the context switch might have flipped the stack from under | ||
28746 | * us, hence refresh the local variables. | ||
28747 | */ | ||
28748 | cpu = smp_processor_id(); | ||
28749 | rq = cpu_rq(cpu); | ||
28750 | - } else | ||
28751 | + } else { | ||
28752 | + TS_SCHED_END(prev); | ||
28753 | spin_unlock_irq(&rq->lock); | ||
28754 | + } | ||
28755 | + | ||
28756 | + TS_SCHED2_START(current); | ||
28757 | + sched_trace_task_switch_to(current); | ||
28758 | |||
28759 | post_schedule(rq); | ||
28760 | |||
28761 | - if (unlikely(reacquire_kernel_lock(current) < 0)) | ||
28762 | + if (unlikely(reacquire_kernel_lock(current) < 0)) { | ||
28763 | + TS_SCHED2_END(current); | ||
28764 | goto need_resched_nonpreemptible; | ||
28765 | + } | ||
28766 | |||
28767 | preempt_enable_no_resched(); | ||
28768 | + | ||
28769 | + TS_SCHED2_END(current); | ||
28770 | + | ||
28771 | if (need_resched()) | ||
28772 | goto need_resched; | ||
28773 | + | ||
28774 | + if (srp_active()) | ||
28775 | + srp_ceiling_block(); | ||
28776 | } | ||
28777 | EXPORT_SYMBOL(schedule); | ||
28778 | |||
28779 | @@ -5772,6 +5815,17 @@ void complete_all(struct completion *x) | ||
28780 | } | ||
28781 | EXPORT_SYMBOL(complete_all); | ||
28782 | |||
28783 | +void complete_n(struct completion *x, int n) | ||
28784 | +{ | ||
28785 | + unsigned long flags; | ||
28786 | + | ||
28787 | + spin_lock_irqsave(&x->wait.lock, flags); | ||
28788 | + x->done += n; | ||
28789 | + __wake_up_common(&x->wait, TASK_NORMAL, n, 0, NULL); | ||
28790 | + spin_unlock_irqrestore(&x->wait.lock, flags); | ||
28791 | +} | ||
28792 | +EXPORT_SYMBOL(complete_n); | ||
28793 | + | ||
28794 | static inline long __sched | ||
28795 | do_wait_for_common(struct completion *x, long timeout, int state) | ||
28796 | { | ||
28797 | @@ -6203,6 +6257,9 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) | ||
28798 | case SCHED_RR: | ||
28799 | p->sched_class = &rt_sched_class; | ||
28800 | break; | ||
28801 | + case SCHED_LITMUS: | ||
28802 | + p->sched_class = &litmus_sched_class; | ||
28803 | + break; | ||
28804 | } | ||
28805 | |||
28806 | p->rt_priority = prio; | ||
28807 | @@ -6250,7 +6307,7 @@ recheck: | ||
28808 | |||
28809 | if (policy != SCHED_FIFO && policy != SCHED_RR && | ||
28810 | policy != SCHED_NORMAL && policy != SCHED_BATCH && | ||
28811 | - policy != SCHED_IDLE) | ||
28812 | + policy != SCHED_IDLE && policy != SCHED_LITMUS) | ||
28813 | return -EINVAL; | ||
28814 | } | ||
28815 | |||
28816 | @@ -6265,6 +6322,8 @@ recheck: | ||
28817 | return -EINVAL; | ||
28818 | if (rt_policy(policy) != (param->sched_priority != 0)) | ||
28819 | return -EINVAL; | ||
28820 | + if (policy == SCHED_LITMUS && policy == p->policy) | ||
28821 | + return -EINVAL; | ||
28822 | |||
28823 | /* | ||
28824 | * Allow unprivileged RT tasks to decrease priority: | ||
28825 | @@ -6319,6 +6378,12 @@ recheck: | ||
28826 | return retval; | ||
28827 | } | ||
28828 | |||
28829 | + if (policy == SCHED_LITMUS) { | ||
28830 | + retval = litmus_admit_task(p); | ||
28831 | + if (retval) | ||
28832 | + return retval; | ||
28833 | + } | ||
28834 | + | ||
28835 | /* | ||
28836 | * make sure no PI-waiters arrive (or leave) while we are | ||
28837 | * changing the priority of the task: | ||
28838 | @@ -6346,9 +6411,18 @@ recheck: | ||
28839 | |||
28840 | p->sched_reset_on_fork = reset_on_fork; | ||
28841 | |||
28842 | + if (p->policy == SCHED_LITMUS) | ||
28843 | + litmus_exit_task(p); | ||
28844 | + | ||
28845 | oldprio = p->prio; | ||
28846 | __setscheduler(rq, p, policy, param->sched_priority); | ||
28847 | |||
28848 | + if (policy == SCHED_LITMUS) { | ||
28849 | + p->rt_param.stack_in_use = running ? rq->cpu : NO_CPU; | ||
28850 | + p->rt_param.present = running; | ||
28851 | + litmus->task_new(p, on_rq, running); | ||
28852 | + } | ||
28853 | + | ||
28854 | if (running) | ||
28855 | p->sched_class->set_curr_task(rq); | ||
28856 | if (on_rq) { | ||
28857 | @@ -6518,10 +6592,11 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) | ||
28858 | read_lock(&tasklist_lock); | ||
28859 | |||
28860 | p = find_process_by_pid(pid); | ||
28861 | - if (!p) { | ||
28862 | + /* Don't set affinity if task not found and for LITMUS tasks */ | ||
28863 | + if (!p || is_realtime(p)) { | ||
28864 | read_unlock(&tasklist_lock); | ||
28865 | put_online_cpus(); | ||
28866 | - return -ESRCH; | ||
28867 | + return p ? -EPERM : -ESRCH; | ||
28868 | } | ||
28869 | |||
28870 | /* | ||
28871 | @@ -6980,6 +7055,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | ||
28872 | __sched_fork(idle); | ||
28873 | idle->se.exec_start = sched_clock(); | ||
28874 | |||
28875 | + idle->prio = idle->normal_prio = MAX_PRIO; | ||
28876 | cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); | ||
28877 | __set_task_cpu(idle, cpu); | ||
28878 | |||
28879 | @@ -7020,23 +7096,22 @@ cpumask_var_t nohz_cpu_mask; | ||
28880 | * | ||
28881 | * This idea comes from the SD scheduler of Con Kolivas: | ||
28882 | */ | ||
28883 | -static void update_sysctl(void) | ||
28884 | +static inline void sched_init_granularity(void) | ||
28885 | { | ||
28886 | - unsigned int cpus = min(num_online_cpus(), 8U); | ||
28887 | - unsigned int factor = 1 + ilog2(cpus); | ||
28888 | + unsigned int factor = 1 + ilog2(num_online_cpus()); | ||
28889 | + const unsigned long limit = 200000000; | ||
28890 | |||
28891 | -#define SET_SYSCTL(name) \ | ||
28892 | - (sysctl_##name = (factor) * normalized_sysctl_##name) | ||
28893 | - SET_SYSCTL(sched_min_granularity); | ||
28894 | - SET_SYSCTL(sched_latency); | ||
28895 | - SET_SYSCTL(sched_wakeup_granularity); | ||
28896 | - SET_SYSCTL(sched_shares_ratelimit); | ||
28897 | -#undef SET_SYSCTL | ||
28898 | -} | ||
28899 | + sysctl_sched_min_granularity *= factor; | ||
28900 | + if (sysctl_sched_min_granularity > limit) | ||
28901 | + sysctl_sched_min_granularity = limit; | ||
28902 | |||
28903 | -static inline void sched_init_granularity(void) | ||
28904 | -{ | ||
28905 | - update_sysctl(); | ||
28906 | + sysctl_sched_latency *= factor; | ||
28907 | + if (sysctl_sched_latency > limit) | ||
28908 | + sysctl_sched_latency = limit; | ||
28909 | + | ||
28910 | + sysctl_sched_wakeup_granularity *= factor; | ||
28911 | + | ||
28912 | + sysctl_sched_shares_ratelimit *= factor; | ||
28913 | } | ||
28914 | |||
28915 | #ifdef CONFIG_SMP | ||
28916 | @@ -7073,7 +7148,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) | ||
28917 | int ret = 0; | ||
28918 | |||
28919 | rq = task_rq_lock(p, &flags); | ||
28920 | - if (!cpumask_intersects(new_mask, cpu_active_mask)) { | ||
28921 | + if (!cpumask_intersects(new_mask, cpu_online_mask)) { | ||
28922 | ret = -EINVAL; | ||
28923 | goto out; | ||
28924 | } | ||
28925 | @@ -7095,7 +7170,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) | ||
28926 | if (cpumask_test_cpu(task_cpu(p), new_mask)) | ||
28927 | goto out; | ||
28928 | |||
28929 | - if (migrate_task(p, cpumask_any_and(cpu_active_mask, new_mask), &req)) { | ||
28930 | + if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) { | ||
28931 | /* Need help from migration thread: drop lock and wait. */ | ||
28932 | struct task_struct *mt = rq->migration_thread; | ||
28933 | |||
28934 | @@ -7249,19 +7324,19 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | ||
28935 | |||
28936 | again: | ||
28937 | /* Look for allowed, online CPU in same node. */ | ||
28938 | - for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask) | ||
28939 | + for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask) | ||
28940 | if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) | ||
28941 | goto move; | ||
28942 | |||
28943 | /* Any allowed, online CPU? */ | ||
28944 | - dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask); | ||
28945 | + dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask); | ||
28946 | if (dest_cpu < nr_cpu_ids) | ||
28947 | goto move; | ||
28948 | |||
28949 | /* No more Mr. Nice Guy. */ | ||
28950 | if (dest_cpu >= nr_cpu_ids) { | ||
28951 | cpuset_cpus_allowed_locked(p, &p->cpus_allowed); | ||
28952 | - dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed); | ||
28953 | + dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed); | ||
28954 | |||
28955 | /* | ||
28956 | * Don't tell them about moving exiting tasks or | ||
28957 | @@ -7290,7 +7365,7 @@ move: | ||
28958 | */ | ||
28959 | static void migrate_nr_uninterruptible(struct rq *rq_src) | ||
28960 | { | ||
28961 | - struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask)); | ||
28962 | + struct rq *rq_dest = cpu_rq(cpumask_any(cpu_online_mask)); | ||
28963 | unsigned long flags; | ||
28964 | |||
28965 | local_irq_save(flags); | ||
28966 | @@ -7544,7 +7619,7 @@ static ctl_table *sd_alloc_ctl_cpu_table(int cpu) | ||
28967 | static struct ctl_table_header *sd_sysctl_header; | ||
28968 | static void register_sched_domain_sysctl(void) | ||
28969 | { | ||
28970 | - int i, cpu_num = num_possible_cpus(); | ||
28971 | + int i, cpu_num = num_online_cpus(); | ||
28972 | struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); | ||
28973 | char buf[32]; | ||
28974 | |||
28975 | @@ -7554,7 +7629,7 @@ static void register_sched_domain_sysctl(void) | ||
28976 | if (entry == NULL) | ||
28977 | return; | ||
28978 | |||
28979 | - for_each_possible_cpu(i) { | ||
28980 | + for_each_online_cpu(i) { | ||
28981 | snprintf(buf, 32, "cpu%d", i); | ||
28982 | entry->procname = kstrdup(buf, GFP_KERNEL); | ||
28983 | entry->mode = 0555; | ||
28984 | @@ -7684,6 +7759,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | ||
28985 | spin_lock_irq(&rq->lock); | ||
28986 | update_rq_clock(rq); | ||
28987 | deactivate_task(rq, rq->idle, 0); | ||
28988 | + rq->idle->static_prio = MAX_PRIO; | ||
28989 | __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); | ||
28990 | rq->idle->sched_class = &idle_sched_class; | ||
28991 | migrate_dead_tasks(cpu); | ||
28992 | @@ -7922,8 +7998,6 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) | ||
28993 | |||
28994 | static void free_rootdomain(struct root_domain *rd) | ||
28995 | { | ||
28996 | - synchronize_sched(); | ||
28997 | - | ||
28998 | cpupri_cleanup(&rd->cpupri); | ||
28999 | |||
29000 | free_cpumask_var(rd->rto_mask); | ||
29001 | @@ -8064,7 +8138,6 @@ static cpumask_var_t cpu_isolated_map; | ||
29002 | /* Setup the mask of cpus configured for isolated domains */ | ||
29003 | static int __init isolated_cpu_setup(char *str) | ||
29004 | { | ||
29005 | - alloc_bootmem_cpumask_var(&cpu_isolated_map); | ||
29006 | cpulist_parse(str, cpu_isolated_map); | ||
29007 | return 1; | ||
29008 | } | ||
29009 | @@ -9042,7 +9115,7 @@ match1: | ||
29010 | if (doms_new == NULL) { | ||
29011 | ndoms_cur = 0; | ||
29012 | doms_new = fallback_doms; | ||
29013 | - cpumask_andnot(&doms_new[0], cpu_active_mask, cpu_isolated_map); | ||
29014 | + cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map); | ||
29015 | WARN_ON_ONCE(dattr_new); | ||
29016 | } | ||
29017 | |||
29018 | @@ -9173,10 +9246,8 @@ static int update_sched_domains(struct notifier_block *nfb, | ||
29019 | switch (action) { | ||
29020 | case CPU_ONLINE: | ||
29021 | case CPU_ONLINE_FROZEN: | ||
29022 | - case CPU_DOWN_PREPARE: | ||
29023 | - case CPU_DOWN_PREPARE_FROZEN: | ||
29024 | - case CPU_DOWN_FAILED: | ||
29025 | - case CPU_DOWN_FAILED_FROZEN: | ||
29026 | + case CPU_DEAD: | ||
29027 | + case CPU_DEAD_FROZEN: | ||
29028 | partition_sched_domains(1, NULL, NULL); | ||
29029 | return NOTIFY_OK; | ||
29030 | |||
29031 | @@ -9223,7 +9294,7 @@ void __init sched_init_smp(void) | ||
29032 | #endif | ||
29033 | get_online_cpus(); | ||
29034 | mutex_lock(&sched_domains_mutex); | ||
29035 | - arch_init_sched_domains(cpu_active_mask); | ||
29036 | + arch_init_sched_domains(cpu_online_mask); | ||
29037 | cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); | ||
29038 | if (cpumask_empty(non_isolated_cpus)) | ||
29039 | cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); | ||
29040 | @@ -9544,8 +9615,6 @@ void __init sched_init(void) | ||
29041 | rq->cpu = i; | ||
29042 | rq->online = 0; | ||
29043 | rq->migration_thread = NULL; | ||
29044 | - rq->idle_stamp = 0; | ||
29045 | - rq->avg_idle = 2*sysctl_sched_migration_cost; | ||
29046 | INIT_LIST_HEAD(&rq->migration_queue); | ||
29047 | rq_attach_root(rq, &def_root_domain); | ||
29048 | #endif | ||
29049 | @@ -9595,9 +9664,7 @@ void __init sched_init(void) | ||
29050 | zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT); | ||
29051 | alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT); | ||
29052 | #endif | ||
29053 | - /* May be allocated at isolcpus cmdline parse time */ | ||
29054 | - if (cpu_isolated_map == NULL) | ||
29055 | - zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); | ||
29056 | + zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); | ||
29057 | #endif /* SMP */ | ||
29058 | |||
29059 | perf_event_init(); | ||
29060 | diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c | ||
29061 | index 5b49613..479ce56 100644 | ||
29062 | --- a/kernel/sched_clock.c | ||
29063 | +++ b/kernel/sched_clock.c | ||
29064 | @@ -236,18 +236,6 @@ void sched_clock_idle_wakeup_event(u64 delta_ns) | ||
29065 | } | ||
29066 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); | ||
29067 | |||
29068 | -unsigned long long cpu_clock(int cpu) | ||
29069 | -{ | ||
29070 | - unsigned long long clock; | ||
29071 | - unsigned long flags; | ||
29072 | - | ||
29073 | - local_irq_save(flags); | ||
29074 | - clock = sched_clock_cpu(cpu); | ||
29075 | - local_irq_restore(flags); | ||
29076 | - | ||
29077 | - return clock; | ||
29078 | -} | ||
29079 | - | ||
29080 | #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ | ||
29081 | |||
29082 | void sched_clock_init(void) | ||
29083 | @@ -263,12 +251,17 @@ u64 sched_clock_cpu(int cpu) | ||
29084 | return sched_clock(); | ||
29085 | } | ||
29086 | |||
29087 | +#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ | ||
29088 | |||
29089 | unsigned long long cpu_clock(int cpu) | ||
29090 | { | ||
29091 | - return sched_clock_cpu(cpu); | ||
29092 | -} | ||
29093 | + unsigned long long clock; | ||
29094 | + unsigned long flags; | ||
29095 | |||
29096 | -#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ | ||
29097 | + local_irq_save(flags); | ||
29098 | + clock = sched_clock_cpu(cpu); | ||
29099 | + local_irq_restore(flags); | ||
29100 | |||
29101 | + return clock; | ||
29102 | +} | ||
29103 | EXPORT_SYMBOL_GPL(cpu_clock); | ||
29104 | diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c | ||
29105 | index 6988cf0..efb8440 100644 | ||
29106 | --- a/kernel/sched_debug.c | ||
29107 | +++ b/kernel/sched_debug.c | ||
29108 | @@ -285,16 +285,12 @@ static void print_cpu(struct seq_file *m, int cpu) | ||
29109 | |||
29110 | #ifdef CONFIG_SCHEDSTATS | ||
29111 | #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n); | ||
29112 | -#define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n); | ||
29113 | |||
29114 | P(yld_count); | ||
29115 | |||
29116 | P(sched_switch); | ||
29117 | P(sched_count); | ||
29118 | P(sched_goidle); | ||
29119 | -#ifdef CONFIG_SMP | ||
29120 | - P64(avg_idle); | ||
29121 | -#endif | ||
29122 | |||
29123 | P(ttwu_count); | ||
29124 | P(ttwu_local); | ||
29125 | diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c | ||
29126 | index d80812d..ef43ff9 100644 | ||
29127 | --- a/kernel/sched_fair.c | ||
29128 | +++ b/kernel/sched_fair.c | ||
29129 | @@ -35,14 +35,12 @@ | ||
29130 | * run vmstat and monitor the context-switches (cs) field) | ||
29131 | */ | ||
29132 | unsigned int sysctl_sched_latency = 5000000ULL; | ||
29133 | -unsigned int normalized_sysctl_sched_latency = 5000000ULL; | ||
29134 | |||
29135 | /* | ||
29136 | * Minimal preemption granularity for CPU-bound tasks: | ||
29137 | * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) | ||
29138 | */ | ||
29139 | unsigned int sysctl_sched_min_granularity = 1000000ULL; | ||
29140 | -unsigned int normalized_sysctl_sched_min_granularity = 1000000ULL; | ||
29141 | |||
29142 | /* | ||
29143 | * is kept at sysctl_sched_latency / sysctl_sched_min_granularity | ||
29144 | @@ -72,7 +70,6 @@ unsigned int __read_mostly sysctl_sched_compat_yield; | ||
29145 | * have immediate wakeup/sleep latencies. | ||
29146 | */ | ||
29147 | unsigned int sysctl_sched_wakeup_granularity = 1000000UL; | ||
29148 | -unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; | ||
29149 | |||
29150 | const_debug unsigned int sysctl_sched_migration_cost = 500000UL; | ||
29151 | |||
29152 | @@ -1377,9 +1374,6 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag | ||
29153 | |||
29154 | rcu_read_lock(); | ||
29155 | for_each_domain(cpu, tmp) { | ||
29156 | - if (!(tmp->flags & SD_LOAD_BALANCE)) | ||
29157 | - continue; | ||
29158 | - | ||
29159 | /* | ||
29160 | * If power savings logic is enabled for a domain, see if we | ||
29161 | * are not overloaded, if so, don't balance wider. | ||
29162 | @@ -1404,38 +1398,11 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag | ||
29163 | want_sd = 0; | ||
29164 | } | ||
29165 | |||
29166 | - if (want_affine && (tmp->flags & SD_WAKE_AFFINE)) { | ||
29167 | - int candidate = -1, i; | ||
29168 | - | ||
29169 | - if (cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) | ||
29170 | - candidate = cpu; | ||
29171 | - | ||
29172 | - /* | ||
29173 | - * Check for an idle shared cache. | ||
29174 | - */ | ||
29175 | - if (tmp->flags & SD_PREFER_SIBLING) { | ||
29176 | - if (candidate == cpu) { | ||
29177 | - if (!cpu_rq(prev_cpu)->cfs.nr_running) | ||
29178 | - candidate = prev_cpu; | ||
29179 | - } | ||
29180 | - | ||
29181 | - if (candidate == -1 || candidate == cpu) { | ||
29182 | - for_each_cpu(i, sched_domain_span(tmp)) { | ||
29183 | - if (!cpumask_test_cpu(i, &p->cpus_allowed)) | ||
29184 | - continue; | ||
29185 | - if (!cpu_rq(i)->cfs.nr_running) { | ||
29186 | - candidate = i; | ||
29187 | - break; | ||
29188 | - } | ||
29189 | - } | ||
29190 | - } | ||
29191 | - } | ||
29192 | + if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && | ||
29193 | + cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { | ||
29194 | |||
29195 | - if (candidate >= 0) { | ||
29196 | - affine_sd = tmp; | ||
29197 | - want_affine = 0; | ||
29198 | - cpu = candidate; | ||
29199 | - } | ||
29200 | + affine_sd = tmp; | ||
29201 | + want_affine = 0; | ||
29202 | } | ||
29203 | |||
29204 | if (!want_sd && !want_affine) | ||
29205 | @@ -1631,7 +1598,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ | ||
29206 | |||
29207 | update_curr(cfs_rq); | ||
29208 | |||
29209 | - if (unlikely(rt_prio(p->prio))) { | ||
29210 | + if (unlikely(rt_prio(p->prio)) || p->policy == SCHED_LITMUS) { | ||
29211 | resched_task(curr); | ||
29212 | return; | ||
29213 | } | ||
29214 | @@ -1883,17 +1850,6 @@ move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
29215 | |||
29216 | return 0; | ||
29217 | } | ||
29218 | - | ||
29219 | -static void rq_online_fair(struct rq *rq) | ||
29220 | -{ | ||
29221 | - update_sysctl(); | ||
29222 | -} | ||
29223 | - | ||
29224 | -static void rq_offline_fair(struct rq *rq) | ||
29225 | -{ | ||
29226 | - update_sysctl(); | ||
29227 | -} | ||
29228 | - | ||
29229 | #endif /* CONFIG_SMP */ | ||
29230 | |||
29231 | /* | ||
29232 | @@ -2041,8 +1997,6 @@ static const struct sched_class fair_sched_class = { | ||
29233 | |||
29234 | .load_balance = load_balance_fair, | ||
29235 | .move_one_task = move_one_task_fair, | ||
29236 | - .rq_online = rq_online_fair, | ||
29237 | - .rq_offline = rq_offline_fair, | ||
29238 | #endif | ||
29239 | |||
29240 | .set_curr_task = set_curr_task_fair, | ||
29241 | diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c | ||
29242 | index a4d790c..f622880 100644 | ||
29243 | --- a/kernel/sched_rt.c | ||
29244 | +++ b/kernel/sched_rt.c | ||
29245 | @@ -1004,7 +1004,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) | ||
29246 | */ | ||
29247 | static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags) | ||
29248 | { | ||
29249 | - if (p->prio < rq->curr->prio) { | ||
29250 | + if (p->prio < rq->curr->prio || p->policy == SCHED_LITMUS) { | ||
29251 | resched_task(rq->curr); | ||
29252 | return; | ||
29253 | } | ||
29254 | diff --git a/kernel/signal.c b/kernel/signal.c | ||
29255 | index 4d0658d..6705320 100644 | ||
29256 | --- a/kernel/signal.c | ||
29257 | +++ b/kernel/signal.c | ||
29258 | @@ -939,8 +939,7 @@ static void print_fatal_signal(struct pt_regs *regs, int signr) | ||
29259 | for (i = 0; i < 16; i++) { | ||
29260 | unsigned char insn; | ||
29261 | |||
29262 | - if (get_user(insn, (unsigned char *)(regs->ip + i))) | ||
29263 | - break; | ||
29264 | + __get_user(insn, (unsigned char *)(regs->ip + i)); | ||
29265 | printk("%02x ", insn); | ||
29266 | } | ||
29267 | } | ||
29268 | diff --git a/kernel/sysctl.c b/kernel/sysctl.c | ||
29269 | index b8bd058..0d949c5 100644 | ||
29270 | --- a/kernel/sysctl.c | ||
29271 | +++ b/kernel/sysctl.c | ||
29272 | @@ -1345,7 +1345,6 @@ static struct ctl_table vm_table[] = { | ||
29273 | .strategy = &sysctl_jiffies, | ||
29274 | }, | ||
29275 | #endif | ||
29276 | -#ifdef CONFIG_MMU | ||
29277 | { | ||
29278 | .ctl_name = CTL_UNNUMBERED, | ||
29279 | .procname = "mmap_min_addr", | ||
29280 | @@ -1354,7 +1353,6 @@ static struct ctl_table vm_table[] = { | ||
29281 | .mode = 0644, | ||
29282 | .proc_handler = &mmap_min_addr_handler, | ||
29283 | }, | ||
29284 | -#endif | ||
29285 | #ifdef CONFIG_NUMA | ||
29286 | { | ||
29287 | .ctl_name = CTL_UNNUMBERED, | ||
29288 | @@ -1607,8 +1605,7 @@ static struct ctl_table debug_table[] = { | ||
29289 | .data = &show_unhandled_signals, | ||
29290 | .maxlen = sizeof(int), | ||
29291 | .mode = 0644, | ||
29292 | - .proc_handler = proc_dointvec_minmax, | ||
29293 | - .extra1 = &zero, | ||
29294 | + .proc_handler = proc_dointvec | ||
29295 | }, | ||
29296 | #endif | ||
29297 | { .ctl_name = 0 } | ||
29298 | diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c | ||
29299 | index 469193c..b6e7aae 100644 | ||
29300 | --- a/kernel/sysctl_check.c | ||
29301 | +++ b/kernel/sysctl_check.c | ||
29302 | @@ -220,7 +220,6 @@ static const struct trans_ctl_table trans_net_ipv4_conf_vars_table[] = { | ||
29303 | { NET_IPV4_CONF_PROMOTE_SECONDARIES, "promote_secondaries" }, | ||
29304 | { NET_IPV4_CONF_ARP_ACCEPT, "arp_accept" }, | ||
29305 | { NET_IPV4_CONF_ARP_NOTIFY, "arp_notify" }, | ||
29306 | - { NET_IPV4_CONF_SRC_VMARK, "src_valid_mark" }, | ||
29307 | {} | ||
29308 | }; | ||
29309 | |||
29310 | diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c | ||
29311 | index 0d809ae..620b58a 100644 | ||
29312 | --- a/kernel/time/clockevents.c | ||
29313 | +++ b/kernel/time/clockevents.c | ||
29314 | @@ -20,8 +20,6 @@ | ||
29315 | #include <linux/sysdev.h> | ||
29316 | #include <linux/tick.h> | ||
29317 | |||
29318 | -#include "tick-internal.h" | ||
29319 | - | ||
29320 | /* The registered clock event devices */ | ||
29321 | static LIST_HEAD(clockevent_devices); | ||
29322 | static LIST_HEAD(clockevents_released); | ||
29323 | @@ -239,9 +237,8 @@ void clockevents_exchange_device(struct clock_event_device *old, | ||
29324 | */ | ||
29325 | void clockevents_notify(unsigned long reason, void *arg) | ||
29326 | { | ||
29327 | - struct clock_event_device *dev, *tmp; | ||
29328 | + struct list_head *node, *tmp; | ||
29329 | unsigned long flags; | ||
29330 | - int cpu; | ||
29331 | |||
29332 | spin_lock_irqsave(&clockevents_lock, flags); | ||
29333 | clockevents_do_notify(reason, arg); | ||
29334 | @@ -252,20 +249,8 @@ void clockevents_notify(unsigned long reason, void *arg) | ||
29335 | * Unregister the clock event devices which were | ||
29336 | * released from the users in the notify chain. | ||
29337 | */ | ||
29338 | - list_for_each_entry_safe(dev, tmp, &clockevents_released, list) | ||
29339 | - list_del(&dev->list); | ||
29340 | - /* | ||
29341 | - * Now check whether the CPU has left unused per cpu devices | ||
29342 | - */ | ||
29343 | - cpu = *((int *)arg); | ||
29344 | - list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) { | ||
29345 | - if (cpumask_test_cpu(cpu, dev->cpumask) && | ||
29346 | - cpumask_weight(dev->cpumask) == 1 && | ||
29347 | - !tick_is_broadcast_device(dev)) { | ||
29348 | - BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); | ||
29349 | - list_del(&dev->list); | ||
29350 | - } | ||
29351 | - } | ||
29352 | + list_for_each_safe(node, tmp, &clockevents_released) | ||
29353 | + list_del(node); | ||
29354 | break; | ||
29355 | default: | ||
29356 | break; | ||
29357 | diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c | ||
29358 | index ecc7adb..5e18c6a 100644 | ||
29359 | --- a/kernel/time/clocksource.c | ||
29360 | +++ b/kernel/time/clocksource.c | ||
29361 | @@ -413,47 +413,6 @@ void clocksource_touch_watchdog(void) | ||
29362 | clocksource_resume_watchdog(); | ||
29363 | } | ||
29364 | |||
29365 | -/** | ||
29366 | - * clocksource_max_deferment - Returns max time the clocksource can be deferred | ||
29367 | - * @cs: Pointer to clocksource | ||
29368 | - * | ||
29369 | - */ | ||
29370 | -static u64 clocksource_max_deferment(struct clocksource *cs) | ||
29371 | -{ | ||
29372 | - u64 max_nsecs, max_cycles; | ||
29373 | - | ||
29374 | - /* | ||
29375 | - * Calculate the maximum number of cycles that we can pass to the | ||
29376 | - * cyc2ns function without overflowing a 64-bit signed result. The | ||
29377 | - * maximum number of cycles is equal to ULLONG_MAX/cs->mult which | ||
29378 | - * is equivalent to the below. | ||
29379 | - * max_cycles < (2^63)/cs->mult | ||
29380 | - * max_cycles < 2^(log2((2^63)/cs->mult)) | ||
29381 | - * max_cycles < 2^(log2(2^63) - log2(cs->mult)) | ||
29382 | - * max_cycles < 2^(63 - log2(cs->mult)) | ||
29383 | - * max_cycles < 1 << (63 - log2(cs->mult)) | ||
29384 | - * Please note that we add 1 to the result of the log2 to account for | ||
29385 | - * any rounding errors, ensure the above inequality is satisfied and | ||
29386 | - * no overflow will occur. | ||
29387 | - */ | ||
29388 | - max_cycles = 1ULL << (63 - (ilog2(cs->mult) + 1)); | ||
29389 | - | ||
29390 | - /* | ||
29391 | - * The actual maximum number of cycles we can defer the clocksource is | ||
29392 | - * determined by the minimum of max_cycles and cs->mask. | ||
29393 | - */ | ||
29394 | - max_cycles = min_t(u64, max_cycles, (u64) cs->mask); | ||
29395 | - max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult, cs->shift); | ||
29396 | - | ||
29397 | - /* | ||
29398 | - * To ensure that the clocksource does not wrap whilst we are idle, | ||
29399 | - * limit the time the clocksource can be deferred by 12.5%. Please | ||
29400 | - * note a margin of 12.5% is used because this can be computed with | ||
29401 | - * a shift, versus say 10% which would require division. | ||
29402 | - */ | ||
29403 | - return max_nsecs - (max_nsecs >> 5); | ||
29404 | -} | ||
29405 | - | ||
29406 | #ifdef CONFIG_GENERIC_TIME | ||
29407 | |||
29408 | /** | ||
29409 | @@ -552,9 +511,6 @@ static void clocksource_enqueue(struct clocksource *cs) | ||
29410 | */ | ||
29411 | int clocksource_register(struct clocksource *cs) | ||
29412 | { | ||
29413 | - /* calculate max idle time permitted for this clocksource */ | ||
29414 | - cs->max_idle_ns = clocksource_max_deferment(cs); | ||
29415 | - | ||
29416 | mutex_lock(&clocksource_mutex); | ||
29417 | clocksource_enqueue(cs); | ||
29418 | clocksource_select(); | ||
29419 | diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c | ||
29420 | index 44320b1..dcbff75 100644 | ||
29421 | --- a/kernel/time/tick-sched.c | ||
29422 | +++ b/kernel/time/tick-sched.c | ||
29423 | @@ -216,7 +216,6 @@ void tick_nohz_stop_sched_tick(int inidle) | ||
29424 | struct tick_sched *ts; | ||
29425 | ktime_t last_update, expires, now; | ||
29426 | struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; | ||
29427 | - u64 time_delta; | ||
29428 | int cpu; | ||
29429 | |||
29430 | local_irq_save(flags); | ||
29431 | @@ -276,17 +275,6 @@ void tick_nohz_stop_sched_tick(int inidle) | ||
29432 | seq = read_seqbegin(&xtime_lock); | ||
29433 | last_update = last_jiffies_update; | ||
29434 | last_jiffies = jiffies; | ||
29435 | - | ||
29436 | - /* | ||
29437 | - * On SMP we really should only care for the CPU which | ||
29438 | - * has the do_timer duty assigned. All other CPUs can | ||
29439 | - * sleep as long as they want. | ||
29440 | - */ | ||
29441 | - if (cpu == tick_do_timer_cpu || | ||
29442 | - tick_do_timer_cpu == TICK_DO_TIMER_NONE) | ||
29443 | - time_delta = timekeeping_max_deferment(); | ||
29444 | - else | ||
29445 | - time_delta = KTIME_MAX; | ||
29446 | } while (read_seqretry(&xtime_lock, seq)); | ||
29447 | |||
29448 | /* Get the next timer wheel timer */ | ||
29449 | @@ -306,26 +294,11 @@ void tick_nohz_stop_sched_tick(int inidle) | ||
29450 | if ((long)delta_jiffies >= 1) { | ||
29451 | |||
29452 | /* | ||
29453 | - * calculate the expiry time for the next timer wheel | ||
29454 | - * timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals | ||
29455 | - * that there is no timer pending or at least extremely | ||
29456 | - * far into the future (12 days for HZ=1000). In this | ||
29457 | - * case we set the expiry to the end of time. | ||
29458 | - */ | ||
29459 | - if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) { | ||
29460 | - /* | ||
29461 | - * Calculate the time delta for the next timer event. | ||
29462 | - * If the time delta exceeds the maximum time delta | ||
29463 | - * permitted by the current clocksource then adjust | ||
29464 | - * the time delta accordingly to ensure the | ||
29465 | - * clocksource does not wrap. | ||
29466 | - */ | ||
29467 | - time_delta = min_t(u64, time_delta, | ||
29468 | - tick_period.tv64 * delta_jiffies); | ||
29469 | - expires = ktime_add_ns(last_update, time_delta); | ||
29470 | - } else { | ||
29471 | - expires.tv64 = KTIME_MAX; | ||
29472 | - } | ||
29473 | + * calculate the expiry time for the next timer wheel | ||
29474 | + * timer | ||
29475 | + */ | ||
29476 | + expires = ktime_add_ns(last_update, tick_period.tv64 * | ||
29477 | + delta_jiffies); | ||
29478 | |||
29479 | /* | ||
29480 | * If this cpu is the one which updates jiffies, then | ||
29481 | @@ -369,19 +342,22 @@ void tick_nohz_stop_sched_tick(int inidle) | ||
29482 | |||
29483 | ts->idle_sleeps++; | ||
29484 | |||
29485 | - /* Mark expires */ | ||
29486 | - ts->idle_expires = expires; | ||
29487 | - | ||
29488 | /* | ||
29489 | - * If the expiration time == KTIME_MAX, then | ||
29490 | - * in this case we simply stop the tick timer. | ||
29491 | + * delta_jiffies >= NEXT_TIMER_MAX_DELTA signals that | ||
29492 | + * there is no timer pending or at least extremly far | ||
29493 | + * into the future (12 days for HZ=1000). In this case | ||
29494 | + * we simply stop the tick timer: | ||
29495 | */ | ||
29496 | - if (unlikely(expires.tv64 == KTIME_MAX)) { | ||
29497 | + if (unlikely(delta_jiffies >= NEXT_TIMER_MAX_DELTA)) { | ||
29498 | + ts->idle_expires.tv64 = KTIME_MAX; | ||
29499 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) | ||
29500 | hrtimer_cancel(&ts->sched_timer); | ||
29501 | goto out; | ||
29502 | } | ||
29503 | |||
29504 | + /* Mark expiries */ | ||
29505 | + ts->idle_expires = expires; | ||
29506 | + | ||
29507 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { | ||
29508 | hrtimer_start(&ts->sched_timer, expires, | ||
29509 | HRTIMER_MODE_ABS_PINNED); | ||
29510 | @@ -710,6 +686,46 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) | ||
29511 | } | ||
29512 | |||
29513 | /** | ||
29514 | + * tick_set_quanta_type - get the quanta type as a boot option | ||
29515 | + * Default is standard setup with ticks staggered over first | ||
29516 | + * half of tick period. | ||
29517 | + */ | ||
29518 | +int quanta_type = LINUX_DEFAULT_TICKS; | ||
29519 | +static int __init tick_set_quanta_type(char *str) | ||
29520 | +{ | ||
29521 | + if (strcmp("aligned", str) == 0) { | ||
29522 | + quanta_type = LITMUS_ALIGNED_TICKS; | ||
29523 | + printk(KERN_INFO "LITMUS^RT: setting aligned quanta\n"); | ||
29524 | + } | ||
29525 | + else if (strcmp("staggered", str) == 0) { | ||
29526 | + quanta_type = LITMUS_STAGGERED_TICKS; | ||
29527 | + printk(KERN_INFO "LITMUS^RT: setting staggered quanta\n"); | ||
29528 | + } | ||
29529 | + return 1; | ||
29530 | +} | ||
29531 | +__setup("quanta=", tick_set_quanta_type); | ||
29532 | + | ||
29533 | +u64 cpu_stagger_offset(int cpu) | ||
29534 | +{ | ||
29535 | + u64 offset = 0; | ||
29536 | + switch (quanta_type) { | ||
29537 | + case LITMUS_ALIGNED_TICKS: | ||
29538 | + offset = 0; | ||
29539 | + break; | ||
29540 | + case LITMUS_STAGGERED_TICKS: | ||
29541 | + offset = ktime_to_ns(tick_period); | ||
29542 | + do_div(offset, num_possible_cpus()); | ||
29543 | + offset *= cpu; | ||
29544 | + break; | ||
29545 | + default: | ||
29546 | + offset = ktime_to_ns(tick_period) >> 1; | ||
29547 | + do_div(offset, num_possible_cpus()); | ||
29548 | + offset *= cpu; | ||
29549 | + } | ||
29550 | + return offset; | ||
29551 | +} | ||
29552 | + | ||
29553 | +/** | ||
29554 | * tick_setup_sched_timer - setup the tick emulation timer | ||
29555 | */ | ||
29556 | void tick_setup_sched_timer(void) | ||
29557 | @@ -726,9 +742,11 @@ void tick_setup_sched_timer(void) | ||
29558 | |||
29559 | /* Get the next period (per cpu) */ | ||
29560 | hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); | ||
29561 | - offset = ktime_to_ns(tick_period) >> 1; | ||
29562 | - do_div(offset, num_possible_cpus()); | ||
29563 | - offset *= smp_processor_id(); | ||
29564 | + | ||
29565 | + /* Offset must be set correctly to achieve desired quanta type. */ | ||
29566 | + offset = cpu_stagger_offset(smp_processor_id()); | ||
29567 | + | ||
29568 | + /* Add the correct offset to expiration time */ | ||
29569 | hrtimer_add_expires_ns(&ts->sched_timer, offset); | ||
29570 | |||
29571 | for (;;) { | ||
29572 | diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c | ||
29573 | index 8b709de..c3a4e29 100644 | ||
29574 | --- a/kernel/time/timekeeping.c | ||
29575 | +++ b/kernel/time/timekeeping.c | ||
29576 | @@ -488,17 +488,6 @@ int timekeeping_valid_for_hres(void) | ||
29577 | } | ||
29578 | |||
29579 | /** | ||
29580 | - * timekeeping_max_deferment - Returns max time the clocksource can be deferred | ||
29581 | - * | ||
29582 | - * Caller must observe xtime_lock via read_seqbegin/read_seqretry to | ||
29583 | - * ensure that the clocksource does not change! | ||
29584 | - */ | ||
29585 | -u64 timekeeping_max_deferment(void) | ||
29586 | -{ | ||
29587 | - return timekeeper.clock->max_idle_ns; | ||
29588 | -} | ||
29589 | - | ||
29590 | -/** | ||
29591 | * read_persistent_clock - Return time from the persistent clock. | ||
29592 | * | ||
29593 | * Weak dummy function for arches that do not yet support it. | ||
29594 | @@ -845,7 +834,6 @@ void getboottime(struct timespec *ts) | ||
29595 | |||
29596 | set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec); | ||
29597 | } | ||
29598 | -EXPORT_SYMBOL_GPL(getboottime); | ||
29599 | |||
29600 | /** | ||
29601 | * monotonic_to_bootbased - Convert the monotonic time to boot based. | ||
29602 | @@ -855,7 +843,6 @@ void monotonic_to_bootbased(struct timespec *ts) | ||
29603 | { | ||
29604 | *ts = timespec_add_safe(*ts, total_sleep_time); | ||
29605 | } | ||
29606 | -EXPORT_SYMBOL_GPL(monotonic_to_bootbased); | ||
29607 | |||
29608 | unsigned long get_seconds(void) | ||
29609 | { | ||
29610 | diff --git a/litmus/Kconfig b/litmus/Kconfig | ||
29611 | new file mode 100644 | ||
29612 | index 0000000..874794f | ||
29613 | --- /dev/null | ||
29614 | +++ b/litmus/Kconfig | ||
29615 | @@ -0,0 +1,85 @@ | ||
29616 | +menu "LITMUS^RT" | ||
29617 | + | ||
29618 | +menu "Real-Time Synchronization" | ||
29619 | + | ||
29620 | +config NP_SECTION | ||
29621 | + bool "Non-preemptive section support" | ||
29622 | + default n | ||
29623 | + help | ||
29624 | + Allow tasks to become non-preemptable. | ||
29625 | + Note that plugins still need to explicitly support non-preemptivity. | ||
29626 | + Currently, only GSN-EDF and PSN-EDF have such support. | ||
29627 | + | ||
29628 | + This is required to support the FMLP. | ||
29629 | + If disabled, all tasks will be considered preemptable at all times. | ||
29630 | + | ||
29631 | +config SRP | ||
29632 | + bool "Stack Resource Policy (SRP)" | ||
29633 | + default n | ||
29634 | + help | ||
29635 | + Include support for Baker's Stack Resource Policy. | ||
29636 | + | ||
29637 | + Say Yes if you want FMLP local long critical section | ||
29638 | + synchronization support. | ||
29639 | + | ||
29640 | +config FMLP | ||
29641 | + bool "FMLP support" | ||
29642 | + depends on NP_SECTION | ||
29643 | + default n | ||
29644 | + help | ||
29645 | + Include support for deterministic multiprocessor real-time | ||
29646 | + synchronization support. | ||
29647 | + | ||
29648 | + Say Yes if you want FMLP long critical section | ||
29649 | + synchronization support. | ||
29650 | + | ||
29651 | +endmenu | ||
29652 | + | ||
29653 | +menu "Tracing" | ||
29654 | + | ||
29655 | +config FEATHER_TRACE | ||
29656 | + bool "Feather-Trace Infrastructure" | ||
29657 | + default y | ||
29658 | + help | ||
29659 | + Feather-Trace basic tracing infrastructure. Includes device file | ||
29660 | + driver and instrumentation point support. | ||
29661 | + | ||
29662 | + | ||
29663 | +config SCHED_TASK_TRACE | ||
29664 | + bool "Trace real-time tasks" | ||
29665 | + depends on FEATHER_TRACE | ||
29666 | + default y | ||
29667 | + help | ||
29668 | + Include support for the sched_trace_XXX() tracing functions. This | ||
29669 | + allows the collection of real-time task events such as job | ||
29670 | + completions, job releases, early completions, etc. This results in a | ||
29671 | + small overhead in the scheduling code. Disable if the overhead is not | ||
29672 | + acceptable (e.g., benchmarking). | ||
29673 | + | ||
29674 | + Say Yes for debugging. | ||
29675 | + Say No for overhead tracing. | ||
29676 | + | ||
29677 | +config SCHED_OVERHEAD_TRACE | ||
29678 | + bool "Record timestamps for overhead measurements" | ||
29679 | + depends on FEATHER_TRACE | ||
29680 | + default n | ||
29681 | + help | ||
29682 | + Export event stream for overhead tracing. | ||
29683 | + Say Yes for overhead tracing. | ||
29684 | + | ||
29685 | +config SCHED_DEBUG_TRACE | ||
29686 | + bool "TRACE() debugging" | ||
29687 | + default y | ||
29688 | + help | ||
29689 | + Include support for sched_trace_log_messageg(), which is used to | ||
29690 | + implement TRACE(). If disabled, no TRACE() messages will be included | ||
29691 | + in the kernel, and no overheads due to debugging statements will be | ||
29692 | + incurred by the scheduler. Disable if the overhead is not acceptable | ||
29693 | + (e.g. benchmarking). | ||
29694 | + | ||
29695 | + Say Yes for debugging. | ||
29696 | + Say No for overhead tracing. | ||
29697 | + | ||
29698 | +endmenu | ||
29699 | + | ||
29700 | +endmenu | ||
29701 | diff --git a/litmus/Makefile b/litmus/Makefile | ||
29702 | new file mode 100644 | ||
29703 | index 0000000..0cc33e8 | ||
29704 | --- /dev/null | ||
29705 | +++ b/litmus/Makefile | ||
29706 | @@ -0,0 +1,23 @@ | ||
29707 | +# | ||
29708 | +# Makefile for LITMUS^RT | ||
29709 | +# | ||
29710 | + | ||
29711 | +obj-y = sched_plugin.o litmus.o \ | ||
29712 | + jobs.o \ | ||
29713 | + sync.o \ | ||
29714 | + rt_domain.o \ | ||
29715 | + edf_common.o \ | ||
29716 | + fdso.o \ | ||
29717 | + srp.o \ | ||
29718 | + fmlp.o \ | ||
29719 | + bheap.o \ | ||
29720 | + ctrldev.o \ | ||
29721 | + sched_gsn_edf.o \ | ||
29722 | + sched_psn_edf.o \ | ||
29723 | + sched_cedf.o \ | ||
29724 | + sched_pfair.o | ||
29725 | + | ||
29726 | +obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o | ||
29727 | +obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o | ||
29728 | +obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o | ||
29729 | +obj-$(CONFIG_SCHED_OVERHEAD_TRACE) += trace.o | ||
29730 | diff --git a/litmus/bheap.c b/litmus/bheap.c | ||
29731 | new file mode 100644 | ||
29732 | index 0000000..528af97 | ||
29733 | --- /dev/null | ||
29734 | +++ b/litmus/bheap.c | ||
29735 | @@ -0,0 +1,314 @@ | ||
29736 | +#include "linux/kernel.h" | ||
29737 | +#include "litmus/bheap.h" | ||
29738 | + | ||
29739 | +void bheap_init(struct bheap* heap) | ||
29740 | +{ | ||
29741 | + heap->head = NULL; | ||
29742 | + heap->min = NULL; | ||
29743 | +} | ||
29744 | + | ||
29745 | +void bheap_node_init(struct bheap_node** _h, void* value) | ||
29746 | +{ | ||
29747 | + struct bheap_node* h = *_h; | ||
29748 | + h->parent = NULL; | ||
29749 | + h->next = NULL; | ||
29750 | + h->child = NULL; | ||
29751 | + h->degree = NOT_IN_HEAP; | ||
29752 | + h->value = value; | ||
29753 | + h->ref = _h; | ||
29754 | +} | ||
29755 | + | ||
29756 | + | ||
29757 | +/* make child a subtree of root */ | ||
29758 | +static void __bheap_link(struct bheap_node* root, | ||
29759 | + struct bheap_node* child) | ||
29760 | +{ | ||
29761 | + child->parent = root; | ||
29762 | + child->next = root->child; | ||
29763 | + root->child = child; | ||
29764 | + root->degree++; | ||
29765 | +} | ||
29766 | + | ||
29767 | +/* merge root lists */ | ||
29768 | +static struct bheap_node* __bheap_merge(struct bheap_node* a, | ||
29769 | + struct bheap_node* b) | ||
29770 | +{ | ||
29771 | + struct bheap_node* head = NULL; | ||
29772 | + struct bheap_node** pos = &head; | ||
29773 | + | ||
29774 | + while (a && b) { | ||
29775 | + if (a->degree < b->degree) { | ||
29776 | + *pos = a; | ||
29777 | + a = a->next; | ||
29778 | + } else { | ||
29779 | + *pos = b; | ||
29780 | + b = b->next; | ||
29781 | + } | ||
29782 | + pos = &(*pos)->next; | ||
29783 | + } | ||
29784 | + if (a) | ||
29785 | + *pos = a; | ||
29786 | + else | ||
29787 | + *pos = b; | ||
29788 | + return head; | ||
29789 | +} | ||
29790 | + | ||
29791 | +/* reverse a linked list of nodes. also clears parent pointer */ | ||
29792 | +static struct bheap_node* __bheap_reverse(struct bheap_node* h) | ||
29793 | +{ | ||
29794 | + struct bheap_node* tail = NULL; | ||
29795 | + struct bheap_node* next; | ||
29796 | + | ||
29797 | + if (!h) | ||
29798 | + return h; | ||
29799 | + | ||
29800 | + h->parent = NULL; | ||
29801 | + while (h->next) { | ||
29802 | + next = h->next; | ||
29803 | + h->next = tail; | ||
29804 | + tail = h; | ||
29805 | + h = next; | ||
29806 | + h->parent = NULL; | ||
29807 | + } | ||
29808 | + h->next = tail; | ||
29809 | + return h; | ||
29810 | +} | ||
29811 | + | ||
29812 | +static void __bheap_min(bheap_prio_t higher_prio, struct bheap* heap, | ||
29813 | + struct bheap_node** prev, struct bheap_node** node) | ||
29814 | +{ | ||
29815 | + struct bheap_node *_prev, *cur; | ||
29816 | + *prev = NULL; | ||
29817 | + | ||
29818 | + if (!heap->head) { | ||
29819 | + *node = NULL; | ||
29820 | + return; | ||
29821 | + } | ||
29822 | + | ||
29823 | + *node = heap->head; | ||
29824 | + _prev = heap->head; | ||
29825 | + cur = heap->head->next; | ||
29826 | + while (cur) { | ||
29827 | + if (higher_prio(cur, *node)) { | ||
29828 | + *node = cur; | ||
29829 | + *prev = _prev; | ||
29830 | + } | ||
29831 | + _prev = cur; | ||
29832 | + cur = cur->next; | ||
29833 | + } | ||
29834 | +} | ||
29835 | + | ||
29836 | +static void __bheap_union(bheap_prio_t higher_prio, struct bheap* heap, | ||
29837 | + struct bheap_node* h2) | ||
29838 | +{ | ||
29839 | + struct bheap_node* h1; | ||
29840 | + struct bheap_node *prev, *x, *next; | ||
29841 | + if (!h2) | ||
29842 | + return; | ||
29843 | + h1 = heap->head; | ||
29844 | + if (!h1) { | ||
29845 | + heap->head = h2; | ||
29846 | + return; | ||
29847 | + } | ||
29848 | + h1 = __bheap_merge(h1, h2); | ||
29849 | + prev = NULL; | ||
29850 | + x = h1; | ||
29851 | + next = x->next; | ||
29852 | + while (next) { | ||
29853 | + if (x->degree != next->degree || | ||
29854 | + (next->next && next->next->degree == x->degree)) { | ||
29855 | + /* nothing to do, advance */ | ||
29856 | + prev = x; | ||
29857 | + x = next; | ||
29858 | + } else if (higher_prio(x, next)) { | ||
29859 | + /* x becomes the root of next */ | ||
29860 | + x->next = next->next; | ||
29861 | + __bheap_link(x, next); | ||
29862 | + } else { | ||
29863 | + /* next becomes the root of x */ | ||
29864 | + if (prev) | ||
29865 | + prev->next = next; | ||
29866 | + else | ||
29867 | + h1 = next; | ||
29868 | + __bheap_link(next, x); | ||
29869 | + x = next; | ||
29870 | + } | ||
29871 | + next = x->next; | ||
29872 | + } | ||
29873 | + heap->head = h1; | ||
29874 | +} | ||
29875 | + | ||
29876 | +static struct bheap_node* __bheap_extract_min(bheap_prio_t higher_prio, | ||
29877 | + struct bheap* heap) | ||
29878 | +{ | ||
29879 | + struct bheap_node *prev, *node; | ||
29880 | + __bheap_min(higher_prio, heap, &prev, &node); | ||
29881 | + if (!node) | ||
29882 | + return NULL; | ||
29883 | + if (prev) | ||
29884 | + prev->next = node->next; | ||
29885 | + else | ||
29886 | + heap->head = node->next; | ||
29887 | + __bheap_union(higher_prio, heap, __bheap_reverse(node->child)); | ||
29888 | + return node; | ||
29889 | +} | ||
29890 | + | ||
29891 | +/* insert (and reinitialize) a node into the heap */ | ||
29892 | +void bheap_insert(bheap_prio_t higher_prio, struct bheap* heap, | ||
29893 | + struct bheap_node* node) | ||
29894 | +{ | ||
29895 | + struct bheap_node *min; | ||
29896 | + node->child = NULL; | ||
29897 | + node->parent = NULL; | ||
29898 | + node->next = NULL; | ||
29899 | + node->degree = 0; | ||
29900 | + if (heap->min && higher_prio(node, heap->min)) { | ||
29901 | + /* swap min cache */ | ||
29902 | + min = heap->min; | ||
29903 | + min->child = NULL; | ||
29904 | + min->parent = NULL; | ||
29905 | + min->next = NULL; | ||
29906 | + min->degree = 0; | ||
29907 | + __bheap_union(higher_prio, heap, min); | ||
29908 | + heap->min = node; | ||
29909 | + } else | ||
29910 | + __bheap_union(higher_prio, heap, node); | ||
29911 | +} | ||
29912 | + | ||
29913 | +void bheap_uncache_min(bheap_prio_t higher_prio, struct bheap* heap) | ||
29914 | +{ | ||
29915 | + struct bheap_node* min; | ||
29916 | + if (heap->min) { | ||
29917 | + min = heap->min; | ||
29918 | + heap->min = NULL; | ||
29919 | + bheap_insert(higher_prio, heap, min); | ||
29920 | + } | ||
29921 | +} | ||
29922 | + | ||
29923 | +/* merge addition into target */ | ||
29924 | +void bheap_union(bheap_prio_t higher_prio, | ||
29925 | + struct bheap* target, struct bheap* addition) | ||
29926 | +{ | ||
29927 | + /* first insert any cached minima, if necessary */ | ||
29928 | + bheap_uncache_min(higher_prio, target); | ||
29929 | + bheap_uncache_min(higher_prio, addition); | ||
29930 | + __bheap_union(higher_prio, target, addition->head); | ||
29931 | + /* this is a destructive merge */ | ||
29932 | + addition->head = NULL; | ||
29933 | +} | ||
29934 | + | ||
29935 | +struct bheap_node* bheap_peek(bheap_prio_t higher_prio, | ||
29936 | + struct bheap* heap) | ||
29937 | +{ | ||
29938 | + if (!heap->min) | ||
29939 | + heap->min = __bheap_extract_min(higher_prio, heap); | ||
29940 | + return heap->min; | ||
29941 | +} | ||
29942 | + | ||
29943 | +struct bheap_node* bheap_take(bheap_prio_t higher_prio, | ||
29944 | + struct bheap* heap) | ||
29945 | +{ | ||
29946 | + struct bheap_node *node; | ||
29947 | + if (!heap->min) | ||
29948 | + heap->min = __bheap_extract_min(higher_prio, heap); | ||
29949 | + node = heap->min; | ||
29950 | + heap->min = NULL; | ||
29951 | + if (node) | ||
29952 | + node->degree = NOT_IN_HEAP; | ||
29953 | + return node; | ||
29954 | +} | ||
29955 | + | ||
29956 | +int bheap_decrease(bheap_prio_t higher_prio, struct bheap_node* node) | ||
29957 | +{ | ||
29958 | + struct bheap_node *parent; | ||
29959 | + struct bheap_node** tmp_ref; | ||
29960 | + void* tmp; | ||
29961 | + | ||
29962 | + /* bubble up */ | ||
29963 | + parent = node->parent; | ||
29964 | + while (parent && higher_prio(node, parent)) { | ||
29965 | + /* swap parent and node */ | ||
29966 | + tmp = parent->value; | ||
29967 | + parent->value = node->value; | ||
29968 | + node->value = tmp; | ||
29969 | + /* swap references */ | ||
29970 | + *(parent->ref) = node; | ||
29971 | + *(node->ref) = parent; | ||
29972 | + tmp_ref = parent->ref; | ||
29973 | + parent->ref = node->ref; | ||
29974 | + node->ref = tmp_ref; | ||
29975 | + /* step up */ | ||
29976 | + node = parent; | ||
29977 | + parent = node->parent; | ||
29978 | + } | ||
29979 | + | ||
29980 | + return parent != NULL; | ||
29981 | +} | ||
29982 | + | ||
29983 | +void bheap_delete(bheap_prio_t higher_prio, struct bheap* heap, | ||
29984 | + struct bheap_node* node) | ||
29985 | +{ | ||
29986 | + struct bheap_node *parent, *prev, *pos; | ||
29987 | + struct bheap_node** tmp_ref; | ||
29988 | + void* tmp; | ||
29989 | + | ||
29990 | + if (heap->min != node) { | ||
29991 | + /* bubble up */ | ||
29992 | + parent = node->parent; | ||
29993 | + while (parent) { | ||
29994 | + /* swap parent and node */ | ||
29995 | + tmp = parent->value; | ||
29996 | + parent->value = node->value; | ||
29997 | + node->value = tmp; | ||
29998 | + /* swap references */ | ||
29999 | + *(parent->ref) = node; | ||
30000 | + *(node->ref) = parent; | ||
30001 | + tmp_ref = parent->ref; | ||
30002 | + parent->ref = node->ref; | ||
30003 | + node->ref = tmp_ref; | ||
30004 | + /* step up */ | ||
30005 | + node = parent; | ||
30006 | + parent = node->parent; | ||
30007 | + } | ||
30008 | + /* now delete: | ||
30009 | + * first find prev */ | ||
30010 | + prev = NULL; | ||
30011 | + pos = heap->head; | ||
30012 | + while (pos != node) { | ||
30013 | + prev = pos; | ||
30014 | + pos = pos->next; | ||
30015 | + } | ||
30016 | + /* we have prev, now remove node */ | ||
30017 | + if (prev) | ||
30018 | + prev->next = node->next; | ||
30019 | + else | ||
30020 | + heap->head = node->next; | ||
30021 | + __bheap_union(higher_prio, heap, __bheap_reverse(node->child)); | ||
30022 | + } else | ||
30023 | + heap->min = NULL; | ||
30024 | + node->degree = NOT_IN_HEAP; | ||
30025 | +} | ||
30026 | + | ||
30027 | +/* allocate a heap node for value and insert into the heap */ | ||
30028 | +int bheap_add(bheap_prio_t higher_prio, struct bheap* heap, | ||
30029 | + void* value, int gfp_flags) | ||
30030 | +{ | ||
30031 | + struct bheap_node* hn = bheap_node_alloc(gfp_flags); | ||
30032 | + if (likely(hn)) { | ||
30033 | + bheap_node_init(&hn, value); | ||
30034 | + bheap_insert(higher_prio, heap, hn); | ||
30035 | + } | ||
30036 | + return hn != NULL; | ||
30037 | +} | ||
30038 | + | ||
30039 | +void* bheap_take_del(bheap_prio_t higher_prio, | ||
30040 | + struct bheap* heap) | ||
30041 | +{ | ||
30042 | + struct bheap_node* hn = bheap_take(higher_prio, heap); | ||
30043 | + void* ret = NULL; | ||
30044 | + if (hn) { | ||
30045 | + ret = hn->value; | ||
30046 | + bheap_node_free(hn); | ||
30047 | + } | ||
30048 | + return ret; | ||
30049 | +} | ||
30050 | diff --git a/litmus/ctrldev.c b/litmus/ctrldev.c | ||
30051 | new file mode 100644 | ||
30052 | index 0000000..6677a67 | ||
30053 | --- /dev/null | ||
30054 | +++ b/litmus/ctrldev.c | ||
30055 | @@ -0,0 +1,150 @@ | ||
30056 | +#include <linux/sched.h> | ||
30057 | +#include <linux/mm.h> | ||
30058 | +#include <linux/fs.h> | ||
30059 | +#include <linux/miscdevice.h> | ||
30060 | +#include <linux/module.h> | ||
30061 | + | ||
30062 | +#include <litmus/litmus.h> | ||
30063 | + | ||
30064 | +/* only one page for now, but we might want to add a RO version at some point */ | ||
30065 | + | ||
30066 | +#define CTRL_NAME "litmus/ctrl" | ||
30067 | + | ||
30068 | +/* allocate t->rt_param.ctrl_page*/ | ||
30069 | +static int alloc_ctrl_page(struct task_struct *t) | ||
30070 | +{ | ||
30071 | + int err = 0; | ||
30072 | + | ||
30073 | + /* only allocate if the task doesn't have one yet */ | ||
30074 | + if (!tsk_rt(t)->ctrl_page) { | ||
30075 | + tsk_rt(t)->ctrl_page = (void*) get_zeroed_page(GFP_KERNEL); | ||
30076 | + if (!tsk_rt(t)->ctrl_page) | ||
30077 | + err = -ENOMEM; | ||
30078 | + /* will get de-allocated in task teardown */ | ||
30079 | + TRACE_TASK(t, "%s ctrl_page = %p\n", __FUNCTION__, | ||
30080 | + tsk_rt(t)->ctrl_page); | ||
30081 | + } | ||
30082 | + return err; | ||
30083 | +} | ||
30084 | + | ||
30085 | +static int map_ctrl_page(struct task_struct *t, struct vm_area_struct* vma) | ||
30086 | +{ | ||
30087 | + int err; | ||
30088 | + unsigned long pfn; | ||
30089 | + | ||
30090 | + struct page* ctrl = virt_to_page(tsk_rt(t)->ctrl_page); | ||
30091 | + | ||
30092 | + /* Increase ref count. Is decreased when vma is destroyed. */ | ||
30093 | + get_page(ctrl); | ||
30094 | + | ||
30095 | + /* compute page frame number */ | ||
30096 | + pfn = page_to_pfn(ctrl); | ||
30097 | + | ||
30098 | + TRACE_CUR(CTRL_NAME | ||
30099 | + ": mapping %p (pfn:%lx, %lx) to 0x%lx (prot:%lx)\n", | ||
30100 | + tsk_rt(t)->ctrl_page, pfn, page_to_pfn(ctrl), vma->vm_start, | ||
30101 | + vma->vm_page_prot); | ||
30102 | + | ||
30103 | + /* Map it into the vma. Make sure to use PAGE_SHARED, otherwise | ||
30104 | + * userspace actually gets a copy-on-write page. */ | ||
30105 | + err = remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE, PAGE_SHARED); | ||
30106 | + | ||
30107 | + if (err) | ||
30108 | + TRACE_CUR(CTRL_NAME ": remap_pfn_range() failed (%d)\n", err); | ||
30109 | + | ||
30110 | + return err; | ||
30111 | +} | ||
30112 | + | ||
30113 | +static void litmus_ctrl_vm_close(struct vm_area_struct* vma) | ||
30114 | +{ | ||
30115 | + TRACE_CUR("%s flags=0x%x prot=0x%x\n", __FUNCTION__, | ||
30116 | + vma->vm_flags, vma->vm_page_prot); | ||
30117 | + | ||
30118 | + TRACE_CUR(CTRL_NAME | ||
30119 | + ": %p:%p vma:%p vma->vm_private_data:%p closed.\n", | ||
30120 | + (void*) vma->vm_start, (void*) vma->vm_end, vma, | ||
30121 | + vma->vm_private_data, current->comm, | ||
30122 | + current->pid); | ||
30123 | +} | ||
30124 | + | ||
30125 | +static int litmus_ctrl_vm_fault(struct vm_area_struct* vma, | ||
30126 | + struct vm_fault* vmf) | ||
30127 | +{ | ||
30128 | + /* This function should never be called, since | ||
30129 | + * all pages should have been mapped by mmap() | ||
30130 | + * already. */ | ||
30131 | + TRACE_CUR("%s flags=0x%x\n", __FUNCTION__, vma->vm_flags); | ||
30132 | + | ||
30133 | + /* nope, you only get one page */ | ||
30134 | + return VM_FAULT_SIGBUS; | ||
30135 | +} | ||
30136 | + | ||
30137 | +static struct vm_operations_struct litmus_ctrl_vm_ops = { | ||
30138 | + .close = litmus_ctrl_vm_close, | ||
30139 | + .fault = litmus_ctrl_vm_fault, | ||
30140 | +}; | ||
30141 | + | ||
30142 | +static int litmus_ctrl_mmap(struct file* filp, struct vm_area_struct* vma) | ||
30143 | +{ | ||
30144 | + int err = 0; | ||
30145 | + | ||
30146 | + /* first make sure mapper knows what he's doing */ | ||
30147 | + | ||
30148 | + /* you can only get one page */ | ||
30149 | + if (vma->vm_end - vma->vm_start != PAGE_SIZE) | ||
30150 | + return -EINVAL; | ||
30151 | + | ||
30152 | + /* you can only map the "first" page */ | ||
30153 | + if (vma->vm_pgoff != 0) | ||
30154 | + return -EINVAL; | ||
30155 | + | ||
30156 | + /* you can't share it with anyone */ | ||
30157 | + if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED)) | ||
30158 | + return -EINVAL; | ||
30159 | + | ||
30160 | + vma->vm_ops = &litmus_ctrl_vm_ops; | ||
30161 | + /* this mapping should not be kept across forks, | ||
30162 | + * and cannot be expanded */ | ||
30163 | + vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; | ||
30164 | + | ||
30165 | + err = alloc_ctrl_page(current); | ||
30166 | + if (!err) | ||
30167 | + err = map_ctrl_page(current, vma); | ||
30168 | + | ||
30169 | + TRACE_CUR("%s flags=0x%x prot=0x%lx\n", | ||
30170 | + __FUNCTION__, vma->vm_flags, vma->vm_page_prot); | ||
30171 | + | ||
30172 | + return err; | ||
30173 | +} | ||
30174 | + | ||
30175 | +static struct file_operations litmus_ctrl_fops = { | ||
30176 | + .owner = THIS_MODULE, | ||
30177 | + .mmap = litmus_ctrl_mmap, | ||
30178 | +}; | ||
30179 | + | ||
30180 | +static struct miscdevice litmus_ctrl_dev = { | ||
30181 | + .name = CTRL_NAME, | ||
30182 | + .minor = MISC_DYNAMIC_MINOR, | ||
30183 | + .fops = &litmus_ctrl_fops, | ||
30184 | +}; | ||
30185 | + | ||
30186 | +static int __init init_litmus_ctrl_dev(void) | ||
30187 | +{ | ||
30188 | + int err; | ||
30189 | + | ||
30190 | + BUILD_BUG_ON(sizeof(struct control_page) > PAGE_SIZE); | ||
30191 | + | ||
30192 | + printk("Initializing LITMUS^RT control device.\n"); | ||
30193 | + err = misc_register(&litmus_ctrl_dev); | ||
30194 | + if (err) | ||
30195 | + printk("Could not allocate %s device (%d).\n", CTRL_NAME, err); | ||
30196 | + return err; | ||
30197 | +} | ||
30198 | + | ||
30199 | +static void __exit exit_litmus_ctrl_dev(void) | ||
30200 | +{ | ||
30201 | + misc_deregister(&litmus_ctrl_dev); | ||
30202 | +} | ||
30203 | + | ||
30204 | +module_init(init_litmus_ctrl_dev); | ||
30205 | +module_exit(exit_litmus_ctrl_dev); | ||
30206 | diff --git a/litmus/edf_common.c b/litmus/edf_common.c | ||
30207 | new file mode 100644 | ||
30208 | index 0000000..06daec6 | ||
30209 | --- /dev/null | ||
30210 | +++ b/litmus/edf_common.c | ||
30211 | @@ -0,0 +1,102 @@ | ||
30212 | +/* | ||
30213 | + * kernel/edf_common.c | ||
30214 | + * | ||
30215 | + * Common functions for EDF based scheduler. | ||
30216 | + */ | ||
30217 | + | ||
30218 | +#include <linux/percpu.h> | ||
30219 | +#include <linux/sched.h> | ||
30220 | +#include <linux/list.h> | ||
30221 | + | ||
30222 | +#include <litmus/litmus.h> | ||
30223 | +#include <litmus/sched_plugin.h> | ||
30224 | +#include <litmus/sched_trace.h> | ||
30225 | + | ||
30226 | +#include <litmus/edf_common.h> | ||
30227 | + | ||
30228 | +/* edf_higher_prio - returns true if first has a higher EDF priority | ||
30229 | + * than second. Deadline ties are broken by PID. | ||
30230 | + * | ||
30231 | + * both first and second may be NULL | ||
30232 | + */ | ||
30233 | +int edf_higher_prio(struct task_struct* first, | ||
30234 | + struct task_struct* second) | ||
30235 | +{ | ||
30236 | + struct task_struct *first_task = first; | ||
30237 | + struct task_struct *second_task = second; | ||
30238 | + | ||
30239 | + /* There is no point in comparing a task to itself. */ | ||
30240 | + if (first && first == second) { | ||
30241 | + TRACE_TASK(first, | ||
30242 | + "WARNING: pointless edf priority comparison.\n"); | ||
30243 | + return 0; | ||
30244 | + } | ||
30245 | + | ||
30246 | + | ||
30247 | + /* Check for inherited priorities. Change task | ||
30248 | + * used for comparison in such a case. | ||
30249 | + */ | ||
30250 | + if (first && first->rt_param.inh_task) | ||
30251 | + first_task = first->rt_param.inh_task; | ||
30252 | + if (second && second->rt_param.inh_task) | ||
30253 | + second_task = second->rt_param.inh_task; | ||
30254 | + | ||
30255 | + return | ||
30256 | + /* it has to exist in order to have higher priority */ | ||
30257 | + first_task && ( | ||
30258 | + /* does the second task exist and is it a real-time task? If | ||
30259 | + * not, the first task (which is a RT task) has higher | ||
30260 | + * priority. | ||
30261 | + */ | ||
30262 | + !second_task || !is_realtime(second_task) || | ||
30263 | + | ||
30264 | + /* is the deadline of the first task earlier? | ||
30265 | + * Then it has higher priority. | ||
30266 | + */ | ||
30267 | + earlier_deadline(first_task, second_task) || | ||
30268 | + | ||
30269 | + /* Do we have a deadline tie? | ||
30270 | + * Then break by PID. | ||
30271 | + */ | ||
30272 | + (get_deadline(first_task) == get_deadline(second_task) && | ||
30273 | + (first_task->pid < second_task->pid || | ||
30274 | + | ||
30275 | + /* If the PIDs are the same then the task with the inherited | ||
30276 | + * priority wins. | ||
30277 | + */ | ||
30278 | + (first_task->pid == second_task->pid && | ||
30279 | + !second->rt_param.inh_task)))); | ||
30280 | +} | ||
30281 | + | ||
30282 | +int edf_ready_order(struct bheap_node* a, struct bheap_node* b) | ||
30283 | +{ | ||
30284 | + return edf_higher_prio(bheap2task(a), bheap2task(b)); | ||
30285 | +} | ||
30286 | + | ||
30287 | +void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched, | ||
30288 | + release_jobs_t release) | ||
30289 | +{ | ||
30290 | + rt_domain_init(rt, edf_ready_order, resched, release); | ||
30291 | +} | ||
30292 | + | ||
30293 | +/* need_to_preempt - check whether the task t needs to be preempted | ||
30294 | + * call only with irqs disabled and with ready_lock acquired | ||
30295 | + * THIS DOES NOT TAKE NON-PREEMPTIVE SECTIONS INTO ACCOUNT! | ||
30296 | + */ | ||
30297 | +int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t) | ||
30298 | +{ | ||
30299 | + /* we need the read lock for edf_ready_queue */ | ||
30300 | + /* no need to preempt if there is nothing pending */ | ||
30301 | + if (!__jobs_pending(rt)) | ||
30302 | + return 0; | ||
30303 | + /* we need to reschedule if t doesn't exist */ | ||
30304 | + if (!t) | ||
30305 | + return 1; | ||
30306 | + | ||
30307 | + /* NOTE: We cannot check for non-preemptibility since we | ||
30308 | + * don't know what address space we're currently in. | ||
30309 | + */ | ||
30310 | + | ||
30311 | + /* make sure to get non-rt stuff out of the way */ | ||
30312 | + return !is_realtime(t) || edf_higher_prio(__next_ready(rt), t); | ||
30313 | +} | ||
30314 | diff --git a/litmus/fdso.c b/litmus/fdso.c | ||
30315 | new file mode 100644 | ||
30316 | index 0000000..85be716 | ||
30317 | --- /dev/null | ||
30318 | +++ b/litmus/fdso.c | ||
30319 | @@ -0,0 +1,281 @@ | ||
30320 | +/* fdso.c - file descriptor attached shared objects | ||
30321 | + * | ||
30322 | + * (c) 2007 B. Brandenburg, LITMUS^RT project | ||
30323 | + * | ||
30324 | + * Notes: | ||
30325 | + * - objects descriptor (OD) tables are not cloned during a fork. | ||
30326 | + * - objects are created on-demand, and freed after the last reference | ||
30327 | + * is dropped. | ||
30328 | + * - for now, object types are hard coded. | ||
30329 | + * - As long as we have live objects, we keep a reference to the inode. | ||
30330 | + */ | ||
30331 | + | ||
30332 | +#include <linux/errno.h> | ||
30333 | +#include <linux/sched.h> | ||
30334 | +#include <linux/mutex.h> | ||
30335 | +#include <linux/file.h> | ||
30336 | +#include <asm/uaccess.h> | ||
30337 | + | ||
30338 | +#include <litmus/fdso.h> | ||
30339 | + | ||
30340 | +extern struct fdso_ops fmlp_sem_ops; | ||
30341 | +extern struct fdso_ops srp_sem_ops; | ||
30342 | + | ||
30343 | +static const struct fdso_ops* fdso_ops[] = { | ||
30344 | + &fmlp_sem_ops, | ||
30345 | + &srp_sem_ops, | ||
30346 | +}; | ||
30347 | + | ||
30348 | +static void* fdso_create(obj_type_t type) | ||
30349 | +{ | ||
30350 | + if (fdso_ops[type]->create) | ||
30351 | + return fdso_ops[type]->create(); | ||
30352 | + else | ||
30353 | + return NULL; | ||
30354 | +} | ||
30355 | + | ||
30356 | +static void fdso_destroy(obj_type_t type, void* obj) | ||
30357 | +{ | ||
30358 | + fdso_ops[type]->destroy(obj); | ||
30359 | +} | ||
30360 | + | ||
30361 | +static int fdso_open(struct od_table_entry* entry, void* __user config) | ||
30362 | +{ | ||
30363 | + if (fdso_ops[entry->obj->type]->open) | ||
30364 | + return fdso_ops[entry->obj->type]->open(entry, config); | ||
30365 | + else | ||
30366 | + return 0; | ||
30367 | +} | ||
30368 | + | ||
30369 | +static int fdso_close(struct od_table_entry* entry) | ||
30370 | +{ | ||
30371 | + if (fdso_ops[entry->obj->type]->close) | ||
30372 | + return fdso_ops[entry->obj->type]->close(entry); | ||
30373 | + else | ||
30374 | + return 0; | ||
30375 | +} | ||
30376 | + | ||
30377 | +/* inode must be locked already */ | ||
30378 | +static struct inode_obj_id* alloc_inode_obj(struct inode* inode, | ||
30379 | + obj_type_t type, | ||
30380 | + unsigned int id) | ||
30381 | +{ | ||
30382 | + struct inode_obj_id* obj; | ||
30383 | + void* raw_obj; | ||
30384 | + | ||
30385 | + raw_obj = fdso_create(type); | ||
30386 | + if (!raw_obj) | ||
30387 | + return NULL; | ||
30388 | + | ||
30389 | + obj = kmalloc(sizeof(*obj), GFP_KERNEL); | ||
30390 | + if (!obj) | ||
30391 | + return NULL; | ||
30392 | + INIT_LIST_HEAD(&obj->list); | ||
30393 | + atomic_set(&obj->count, 1); | ||
30394 | + obj->type = type; | ||
30395 | + obj->id = id; | ||
30396 | + obj->obj = raw_obj; | ||
30397 | + obj->inode = inode; | ||
30398 | + | ||
30399 | + list_add(&obj->list, &inode->i_obj_list); | ||
30400 | + atomic_inc(&inode->i_count); | ||
30401 | + | ||
30402 | + printk(KERN_DEBUG "alloc_inode_obj(%p, %d, %d): object created\n", inode, type, id); | ||
30403 | + return obj; | ||
30404 | +} | ||
30405 | + | ||
30406 | +/* inode must be locked already */ | ||
30407 | +static struct inode_obj_id* get_inode_obj(struct inode* inode, | ||
30408 | + obj_type_t type, | ||
30409 | + unsigned int id) | ||
30410 | +{ | ||
30411 | + struct list_head* pos; | ||
30412 | + struct inode_obj_id* obj = NULL; | ||
30413 | + | ||
30414 | + list_for_each(pos, &inode->i_obj_list) { | ||
30415 | + obj = list_entry(pos, struct inode_obj_id, list); | ||
30416 | + if (obj->id == id && obj->type == type) { | ||
30417 | + atomic_inc(&obj->count); | ||
30418 | + return obj; | ||
30419 | + } | ||
30420 | + } | ||
30421 | + printk(KERN_DEBUG "get_inode_obj(%p, %d, %d): couldn't find object\n", inode, type, id); | ||
30422 | + return NULL; | ||
30423 | +} | ||
30424 | + | ||
30425 | + | ||
30426 | +static void put_inode_obj(struct inode_obj_id* obj) | ||
30427 | +{ | ||
30428 | + struct inode* inode; | ||
30429 | + int let_go = 0; | ||
30430 | + | ||
30431 | + inode = obj->inode; | ||
30432 | + if (atomic_dec_and_test(&obj->count)) { | ||
30433 | + | ||
30434 | + mutex_lock(&inode->i_obj_mutex); | ||
30435 | + /* no new references can be obtained */ | ||
30436 | + if (!atomic_read(&obj->count)) { | ||
30437 | + list_del(&obj->list); | ||
30438 | + fdso_destroy(obj->type, obj->obj); | ||
30439 | + kfree(obj); | ||
30440 | + let_go = 1; | ||
30441 | + } | ||
30442 | + mutex_unlock(&inode->i_obj_mutex); | ||
30443 | + if (let_go) | ||
30444 | + iput(inode); | ||
30445 | + } | ||
30446 | +} | ||
30447 | + | ||
30448 | +static struct od_table_entry* get_od_entry(struct task_struct* t) | ||
30449 | +{ | ||
30450 | + struct od_table_entry* table; | ||
30451 | + int i; | ||
30452 | + | ||
30453 | + | ||
30454 | + table = t->od_table; | ||
30455 | + if (!table) { | ||
30456 | + table = kzalloc(sizeof(*table) * MAX_OBJECT_DESCRIPTORS, | ||
30457 | + GFP_KERNEL); | ||
30458 | + t->od_table = table; | ||
30459 | + } | ||
30460 | + | ||
30461 | + for (i = 0; table && i < MAX_OBJECT_DESCRIPTORS; i++) | ||
30462 | + if (!table[i].used) { | ||
30463 | + table[i].used = 1; | ||
30464 | + return table + i; | ||
30465 | + } | ||
30466 | + return NULL; | ||
30467 | +} | ||
30468 | + | ||
30469 | +static int put_od_entry(struct od_table_entry* od) | ||
30470 | +{ | ||
30471 | + put_inode_obj(od->obj); | ||
30472 | + od->used = 0; | ||
30473 | + return 0; | ||
30474 | +} | ||
30475 | + | ||
30476 | +void exit_od_table(struct task_struct* t) | ||
30477 | +{ | ||
30478 | + int i; | ||
30479 | + | ||
30480 | + if (t->od_table) { | ||
30481 | + for (i = 0; i < MAX_OBJECT_DESCRIPTORS; i++) | ||
30482 | + if (t->od_table[i].used) | ||
30483 | + put_od_entry(t->od_table + i); | ||
30484 | + kfree(t->od_table); | ||
30485 | + t->od_table = NULL; | ||
30486 | + } | ||
30487 | +} | ||
30488 | + | ||
30489 | +static int do_sys_od_open(struct file* file, obj_type_t type, int id, | ||
30490 | + void* __user config) | ||
30491 | +{ | ||
30492 | + int idx = 0, err; | ||
30493 | + struct inode* inode; | ||
30494 | + struct inode_obj_id* obj = NULL; | ||
30495 | + struct od_table_entry* entry; | ||
30496 | + | ||
30497 | + inode = file->f_dentry->d_inode; | ||
30498 | + | ||
30499 | + entry = get_od_entry(current); | ||
30500 | + if (!entry) | ||
30501 | + return -ENOMEM; | ||
30502 | + | ||
30503 | + mutex_lock(&inode->i_obj_mutex); | ||
30504 | + obj = get_inode_obj(inode, type, id); | ||
30505 | + if (!obj) | ||
30506 | + obj = alloc_inode_obj(inode, type, id); | ||
30507 | + if (!obj) { | ||
30508 | + idx = -ENOMEM; | ||
30509 | + entry->used = 0; | ||
30510 | + } else { | ||
30511 | + entry->obj = obj; | ||
30512 | + entry->extra = NULL; | ||
30513 | + idx = entry - current->od_table; | ||
30514 | + } | ||
30515 | + | ||
30516 | + mutex_unlock(&inode->i_obj_mutex); | ||
30517 | + | ||
30518 | + err = fdso_open(entry, config); | ||
30519 | + if (err < 0) { | ||
30520 | + /* The class rejected the open call. | ||
30521 | + * We need to clean up and tell user space. | ||
30522 | + */ | ||
30523 | + put_od_entry(entry); | ||
30524 | + idx = err; | ||
30525 | + } | ||
30526 | + | ||
30527 | + return idx; | ||
30528 | +} | ||
30529 | + | ||
30530 | + | ||
30531 | +struct od_table_entry* __od_lookup(int od) | ||
30532 | +{ | ||
30533 | + struct task_struct *t = current; | ||
30534 | + | ||
30535 | + if (!t->od_table) | ||
30536 | + return NULL; | ||
30537 | + if (od < 0 || od >= MAX_OBJECT_DESCRIPTORS) | ||
30538 | + return NULL; | ||
30539 | + if (!t->od_table[od].used) | ||
30540 | + return NULL; | ||
30541 | + return t->od_table + od; | ||
30542 | +} | ||
30543 | + | ||
30544 | + | ||
30545 | +asmlinkage long sys_od_open(int fd, int type, int obj_id, void* __user config) | ||
30546 | +{ | ||
30547 | + int ret = 0; | ||
30548 | + struct file* file; | ||
30549 | + | ||
30550 | + /* | ||
30551 | + 1) get file from fd, get inode from file | ||
30552 | + 2) lock inode | ||
30553 | + 3) try to lookup object | ||
30554 | + 4) if not present create and enqueue object, inc inode refcnt | ||
30555 | + 5) increment refcnt of object | ||
30556 | + 6) alloc od_table_entry, setup ptrs | ||
30557 | + 7) unlock inode | ||
30558 | + 8) return offset in od_table as OD | ||
30559 | + */ | ||
30560 | + | ||
30561 | + if (type < MIN_OBJ_TYPE || type > MAX_OBJ_TYPE) { | ||
30562 | + ret = -EINVAL; | ||
30563 | + goto out; | ||
30564 | + } | ||
30565 | + | ||
30566 | + file = fget(fd); | ||
30567 | + if (!file) { | ||
30568 | + ret = -EBADF; | ||
30569 | + goto out; | ||
30570 | + } | ||
30571 | + | ||
30572 | + ret = do_sys_od_open(file, type, obj_id, config); | ||
30573 | + | ||
30574 | + fput(file); | ||
30575 | + | ||
30576 | +out: | ||
30577 | + return ret; | ||
30578 | +} | ||
30579 | + | ||
30580 | + | ||
30581 | +asmlinkage long sys_od_close(int od) | ||
30582 | +{ | ||
30583 | + int ret = -EINVAL; | ||
30584 | + struct task_struct *t = current; | ||
30585 | + | ||
30586 | + if (od < 0 || od >= MAX_OBJECT_DESCRIPTORS) | ||
30587 | + return ret; | ||
30588 | + | ||
30589 | + if (!t->od_table || !t->od_table[od].used) | ||
30590 | + return ret; | ||
30591 | + | ||
30592 | + | ||
30593 | + /* give the class a chance to reject the close | ||
30594 | + */ | ||
30595 | + ret = fdso_close(t->od_table + od); | ||
30596 | + if (ret == 0) | ||
30597 | + ret = put_od_entry(t->od_table + od); | ||
30598 | + | ||
30599 | + return ret; | ||
30600 | +} | ||
30601 | diff --git a/litmus/fmlp.c b/litmus/fmlp.c | ||
30602 | new file mode 100644 | ||
30603 | index 0000000..03fa735 | ||
30604 | --- /dev/null | ||
30605 | +++ b/litmus/fmlp.c | ||
30606 | @@ -0,0 +1,268 @@ | ||
30607 | +/* | ||
30608 | + * FMLP implementation. | ||
30609 | + * Much of the code here is borrowed from include/asm-i386/semaphore.h | ||
30610 | + */ | ||
30611 | + | ||
30612 | +#include <asm/atomic.h> | ||
30613 | + | ||
30614 | +#include <linux/semaphore.h> | ||
30615 | +#include <linux/sched.h> | ||
30616 | +#include <linux/wait.h> | ||
30617 | +#include <linux/spinlock.h> | ||
30618 | + | ||
30619 | +#include <litmus/litmus.h> | ||
30620 | +#include <litmus/sched_plugin.h> | ||
30621 | +#include <litmus/edf_common.h> | ||
30622 | + | ||
30623 | +#include <litmus/fdso.h> | ||
30624 | + | ||
30625 | +#include <litmus/trace.h> | ||
30626 | + | ||
30627 | +#ifdef CONFIG_FMLP | ||
30628 | + | ||
30629 | +static void* create_fmlp_semaphore(void) | ||
30630 | +{ | ||
30631 | + struct pi_semaphore* sem; | ||
30632 | + int i; | ||
30633 | + | ||
30634 | + sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
30635 | + if (!sem) | ||
30636 | + return NULL; | ||
30637 | + atomic_set(&sem->count, 1); | ||
30638 | + sem->sleepers = 0; | ||
30639 | + init_waitqueue_head(&sem->wait); | ||
30640 | + sem->hp.task = NULL; | ||
30641 | + sem->holder = NULL; | ||
30642 | + for (i = 0; i < NR_CPUS; i++) | ||
30643 | + sem->hp.cpu_task[i] = NULL; | ||
30644 | + return sem; | ||
30645 | +} | ||
30646 | + | ||
30647 | +static int open_fmlp_semaphore(struct od_table_entry* entry, void* __user arg) | ||
30648 | +{ | ||
30649 | + if (!fmlp_active()) | ||
30650 | + return -EBUSY; | ||
30651 | + return 0; | ||
30652 | +} | ||
30653 | + | ||
30654 | +static void destroy_fmlp_semaphore(void* sem) | ||
30655 | +{ | ||
30656 | + /* XXX assert invariants */ | ||
30657 | + kfree(sem); | ||
30658 | +} | ||
30659 | + | ||
30660 | +struct fdso_ops fmlp_sem_ops = { | ||
30661 | + .create = create_fmlp_semaphore, | ||
30662 | + .open = open_fmlp_semaphore, | ||
30663 | + .destroy = destroy_fmlp_semaphore | ||
30664 | +}; | ||
30665 | + | ||
30666 | +struct wq_pair { | ||
30667 | + struct task_struct* tsk; | ||
30668 | + struct pi_semaphore* sem; | ||
30669 | +}; | ||
30670 | + | ||
30671 | +static int rt_pi_wake_up(wait_queue_t *wait, unsigned mode, int sync, | ||
30672 | + void *key) | ||
30673 | +{ | ||
30674 | + struct wq_pair* wqp = (struct wq_pair*) wait->private; | ||
30675 | + set_rt_flags(wqp->tsk, RT_F_EXIT_SEM); | ||
30676 | + litmus->inherit_priority(wqp->sem, wqp->tsk); | ||
30677 | + TRACE_TASK(wqp->tsk, | ||
30678 | + "woken up by rt_pi_wake_up() (RT_F_SEM_EXIT, PI)\n"); | ||
30679 | + /* point to task for default_wake_function() */ | ||
30680 | + wait->private = wqp->tsk; | ||
30681 | + default_wake_function(wait, mode, sync, key); | ||
30682 | + | ||
30683 | + /* Always return true since we know that if we encountered a task | ||
30684 | + * that was already running the wake_up raced with the schedule in | ||
30685 | + * rt_pi_down(). In that case the task in rt_pi_down() will be scheduled | ||
30686 | + * immediately and own the lock. We must not wake up another task in | ||
30687 | + * any case. | ||
30688 | + */ | ||
30689 | + return 1; | ||
30690 | +} | ||
30691 | + | ||
30692 | +/* caller is responsible for locking */ | ||
30693 | +int edf_set_hp_task(struct pi_semaphore *sem) | ||
30694 | +{ | ||
30695 | + struct list_head *tmp, *next; | ||
30696 | + struct task_struct *queued; | ||
30697 | + int ret = 0; | ||
30698 | + | ||
30699 | + sem->hp.task = NULL; | ||
30700 | + list_for_each_safe(tmp, next, &sem->wait.task_list) { | ||
30701 | + queued = ((struct wq_pair*) | ||
30702 | + list_entry(tmp, wait_queue_t, | ||
30703 | + task_list)->private)->tsk; | ||
30704 | + | ||
30705 | + /* Compare task prios, find high prio task. */ | ||
30706 | + if (edf_higher_prio(queued, sem->hp.task)) { | ||
30707 | + sem->hp.task = queued; | ||
30708 | + ret = 1; | ||
30709 | + } | ||
30710 | + } | ||
30711 | + return ret; | ||
30712 | +} | ||
30713 | + | ||
30714 | +/* caller is responsible for locking */ | ||
30715 | +int edf_set_hp_cpu_task(struct pi_semaphore *sem, int cpu) | ||
30716 | +{ | ||
30717 | + struct list_head *tmp, *next; | ||
30718 | + struct task_struct *queued; | ||
30719 | + int ret = 0; | ||
30720 | + | ||
30721 | + sem->hp.cpu_task[cpu] = NULL; | ||
30722 | + list_for_each_safe(tmp, next, &sem->wait.task_list) { | ||
30723 | + queued = ((struct wq_pair*) | ||
30724 | + list_entry(tmp, wait_queue_t, | ||
30725 | + task_list)->private)->tsk; | ||
30726 | + | ||
30727 | + /* Compare task prios, find high prio task. */ | ||
30728 | + if (get_partition(queued) == cpu && | ||
30729 | + edf_higher_prio(queued, sem->hp.cpu_task[cpu])) { | ||
30730 | + sem->hp.cpu_task[cpu] = queued; | ||
30731 | + ret = 1; | ||
30732 | + } | ||
30733 | + } | ||
30734 | + return ret; | ||
30735 | +} | ||
30736 | + | ||
30737 | +static int do_fmlp_down(struct pi_semaphore* sem) | ||
30738 | +{ | ||
30739 | + unsigned long flags; | ||
30740 | + struct task_struct *tsk = current; | ||
30741 | + struct wq_pair pair; | ||
30742 | + int suspended = 1; | ||
30743 | + wait_queue_t wait = { | ||
30744 | + .private = &pair, | ||
30745 | + .func = rt_pi_wake_up, | ||
30746 | + .task_list = {NULL, NULL} | ||
30747 | + }; | ||
30748 | + | ||
30749 | + pair.tsk = tsk; | ||
30750 | + pair.sem = sem; | ||
30751 | + spin_lock_irqsave(&sem->wait.lock, flags); | ||
30752 | + | ||
30753 | + if (atomic_dec_return(&sem->count) < 0 || | ||
30754 | + waitqueue_active(&sem->wait)) { | ||
30755 | + /* we need to suspend */ | ||
30756 | + tsk->state = TASK_UNINTERRUPTIBLE; | ||
30757 | + add_wait_queue_exclusive_locked(&sem->wait, &wait); | ||
30758 | + | ||
30759 | + TRACE_CUR("suspends on PI lock %p\n", sem); | ||
30760 | + litmus->pi_block(sem, tsk); | ||
30761 | + | ||
30762 | + /* release lock before sleeping */ | ||
30763 | + spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
30764 | + | ||
30765 | + TS_PI_DOWN_END; | ||
30766 | + preempt_enable_no_resched(); | ||
30767 | + | ||
30768 | + | ||
30769 | + /* we depend on the FIFO order | ||
30770 | + * Thus, we don't need to recheck when we wake up, we | ||
30771 | + * are guaranteed to have the lock since there is only one | ||
30772 | + * wake up per release | ||
30773 | + */ | ||
30774 | + schedule(); | ||
30775 | + | ||
30776 | + TRACE_CUR("woke up, now owns PI lock %p\n", sem); | ||
30777 | + | ||
30778 | + /* try_to_wake_up() set our state to TASK_RUNNING, | ||
30779 | + * all we need to do is to remove our wait queue entry | ||
30780 | + */ | ||
30781 | + remove_wait_queue(&sem->wait, &wait); | ||
30782 | + } else { | ||
30783 | + /* no priority inheritance necessary, since there are no queued | ||
30784 | + * tasks. | ||
30785 | + */ | ||
30786 | + suspended = 0; | ||
30787 | + TRACE_CUR("acquired PI lock %p, no contention\n", sem); | ||
30788 | + sem->holder = tsk; | ||
30789 | + | ||
30790 | + /* don't know if we're global or partitioned. */ | ||
30791 | + sem->hp.task = tsk; | ||
30792 | + sem->hp.cpu_task[get_partition(tsk)] = tsk; | ||
30793 | + | ||
30794 | + litmus->inherit_priority(sem, tsk); | ||
30795 | + spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
30796 | + } | ||
30797 | + return suspended; | ||
30798 | +} | ||
30799 | + | ||
30800 | +static void do_fmlp_up(struct pi_semaphore* sem) | ||
30801 | +{ | ||
30802 | + unsigned long flags; | ||
30803 | + | ||
30804 | + spin_lock_irqsave(&sem->wait.lock, flags); | ||
30805 | + | ||
30806 | + TRACE_CUR("releases PI lock %p\n", sem); | ||
30807 | + litmus->return_priority(sem); | ||
30808 | + sem->holder = NULL; | ||
30809 | + if (atomic_inc_return(&sem->count) < 1) | ||
30810 | + /* there is a task queued */ | ||
30811 | + wake_up_locked(&sem->wait); | ||
30812 | + | ||
30813 | + spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
30814 | +} | ||
30815 | + | ||
30816 | +asmlinkage long sys_fmlp_down(int sem_od) | ||
30817 | +{ | ||
30818 | + long ret = 0; | ||
30819 | + struct pi_semaphore * sem; | ||
30820 | + int suspended = 0; | ||
30821 | + | ||
30822 | + preempt_disable(); | ||
30823 | + TS_PI_DOWN_START; | ||
30824 | + | ||
30825 | + sem = lookup_fmlp_sem(sem_od); | ||
30826 | + if (sem) | ||
30827 | + suspended = do_fmlp_down(sem); | ||
30828 | + else | ||
30829 | + ret = -EINVAL; | ||
30830 | + | ||
30831 | + if (!suspended) { | ||
30832 | + TS_PI_DOWN_END; | ||
30833 | + preempt_enable(); | ||
30834 | + } | ||
30835 | + | ||
30836 | + return ret; | ||
30837 | +} | ||
30838 | + | ||
30839 | +asmlinkage long sys_fmlp_up(int sem_od) | ||
30840 | +{ | ||
30841 | + long ret = 0; | ||
30842 | + struct pi_semaphore * sem; | ||
30843 | + | ||
30844 | + preempt_disable(); | ||
30845 | + TS_PI_UP_START; | ||
30846 | + | ||
30847 | + sem = lookup_fmlp_sem(sem_od); | ||
30848 | + if (sem) | ||
30849 | + do_fmlp_up(sem); | ||
30850 | + else | ||
30851 | + ret = -EINVAL; | ||
30852 | + | ||
30853 | + | ||
30854 | + TS_PI_UP_END; | ||
30855 | + preempt_enable(); | ||
30856 | + | ||
30857 | + return ret; | ||
30858 | +} | ||
30859 | + | ||
30860 | +#else | ||
30861 | + | ||
30862 | +struct fdso_ops fmlp_sem_ops = {}; | ||
30863 | + | ||
30864 | +asmlinkage long sys_fmlp_down(int sem_od) | ||
30865 | +{ | ||
30866 | + return -ENOSYS; | ||
30867 | +} | ||
30868 | + | ||
30869 | +asmlinkage long sys_fmlp_up(int sem_od) | ||
30870 | +{ | ||
30871 | + return -ENOSYS; | ||
30872 | +} | ||
30873 | + | ||
30874 | +#endif | ||
30875 | diff --git a/litmus/ft_event.c b/litmus/ft_event.c | ||
30876 | new file mode 100644 | ||
30877 | index 0000000..6084b6d | ||
30878 | --- /dev/null | ||
30879 | +++ b/litmus/ft_event.c | ||
30880 | @@ -0,0 +1,43 @@ | ||
30881 | +#include <linux/types.h> | ||
30882 | + | ||
30883 | +#include <litmus/feather_trace.h> | ||
30884 | + | ||
30885 | +#ifndef __ARCH_HAS_FEATHER_TRACE | ||
30886 | +/* provide dummy implementation */ | ||
30887 | + | ||
30888 | +int ft_events[MAX_EVENTS]; | ||
30889 | + | ||
30890 | +int ft_enable_event(unsigned long id) | ||
30891 | +{ | ||
30892 | + if (id < MAX_EVENTS) { | ||
30893 | + ft_events[id]++; | ||
30894 | + return 1; | ||
30895 | + } else | ||
30896 | + return 0; | ||
30897 | +} | ||
30898 | + | ||
30899 | +int ft_disable_event(unsigned long id) | ||
30900 | +{ | ||
30901 | + if (id < MAX_EVENTS && ft_events[id]) { | ||
30902 | + ft_events[id]--; | ||
30903 | + return 1; | ||
30904 | + } else | ||
30905 | + return 0; | ||
30906 | +} | ||
30907 | + | ||
30908 | +int ft_disable_all_events(void) | ||
30909 | +{ | ||
30910 | + int i; | ||
30911 | + | ||
30912 | + for (i = 0; i < MAX_EVENTS; i++) | ||
30913 | + ft_events[i] = 0; | ||
30914 | + | ||
30915 | + return MAX_EVENTS; | ||
30916 | +} | ||
30917 | + | ||
30918 | +int ft_is_event_enabled(unsigned long id) | ||
30919 | +{ | ||
30920 | + return id < MAX_EVENTS && ft_events[id]; | ||
30921 | +} | ||
30922 | + | ||
30923 | +#endif | ||
30924 | diff --git a/litmus/ftdev.c b/litmus/ftdev.c | ||
30925 | new file mode 100644 | ||
30926 | index 0000000..8b2d74d | ||
30927 | --- /dev/null | ||
30928 | +++ b/litmus/ftdev.c | ||
30929 | @@ -0,0 +1,359 @@ | ||
30930 | +#include <linux/sched.h> | ||
30931 | +#include <linux/fs.h> | ||
30932 | +#include <linux/cdev.h> | ||
30933 | +#include <asm/uaccess.h> | ||
30934 | +#include <linux/module.h> | ||
30935 | + | ||
30936 | +#include <litmus/litmus.h> | ||
30937 | +#include <litmus/feather_trace.h> | ||
30938 | +#include <litmus/ftdev.h> | ||
30939 | + | ||
30940 | +struct ft_buffer* alloc_ft_buffer(unsigned int count, size_t size) | ||
30941 | +{ | ||
30942 | + struct ft_buffer* buf; | ||
30943 | + size_t total = (size + 1) * count; | ||
30944 | + char* mem; | ||
30945 | + int order = 0, pages = 1; | ||
30946 | + | ||
30947 | + buf = kmalloc(sizeof(*buf), GFP_KERNEL); | ||
30948 | + if (!buf) | ||
30949 | + return NULL; | ||
30950 | + | ||
30951 | + total = (total / PAGE_SIZE) + (total % PAGE_SIZE != 0); | ||
30952 | + while (pages < total) { | ||
30953 | + order++; | ||
30954 | + pages *= 2; | ||
30955 | + } | ||
30956 | + | ||
30957 | + mem = (char*) __get_free_pages(GFP_KERNEL, order); | ||
30958 | + if (!mem) { | ||
30959 | + kfree(buf); | ||
30960 | + return NULL; | ||
30961 | + } | ||
30962 | + | ||
30963 | + if (!init_ft_buffer(buf, count, size, | ||
30964 | + mem + (count * size), /* markers at the end */ | ||
30965 | + mem)) { /* buffer objects */ | ||
30966 | + free_pages((unsigned long) mem, order); | ||
30967 | + kfree(buf); | ||
30968 | + return NULL; | ||
30969 | + } | ||
30970 | + return buf; | ||
30971 | +} | ||
30972 | + | ||
30973 | +void free_ft_buffer(struct ft_buffer* buf) | ||
30974 | +{ | ||
30975 | + int order = 0, pages = 1; | ||
30976 | + size_t total; | ||
30977 | + | ||
30978 | + if (buf) { | ||
30979 | + total = (buf->slot_size + 1) * buf->slot_count; | ||
30980 | + total = (total / PAGE_SIZE) + (total % PAGE_SIZE != 0); | ||
30981 | + while (pages < total) { | ||
30982 | + order++; | ||
30983 | + pages *= 2; | ||
30984 | + } | ||
30985 | + free_pages((unsigned long) buf->buffer_mem, order); | ||
30986 | + kfree(buf); | ||
30987 | + } | ||
30988 | +} | ||
30989 | + | ||
30990 | +struct ftdev_event { | ||
30991 | + int id; | ||
30992 | + struct ftdev_event* next; | ||
30993 | +}; | ||
30994 | + | ||
30995 | +static int activate(struct ftdev_event** chain, int id) | ||
30996 | +{ | ||
30997 | + struct ftdev_event* ev = kmalloc(sizeof(*ev), GFP_KERNEL); | ||
30998 | + if (ev) { | ||
30999 | + printk(KERN_INFO | ||
31000 | + "Enabling feather-trace event %d.\n", (int) id); | ||
31001 | + ft_enable_event(id); | ||
31002 | + ev->id = id; | ||
31003 | + ev->next = *chain; | ||
31004 | + *chain = ev; | ||
31005 | + } | ||
31006 | + return ev ? 0 : -ENOMEM; | ||
31007 | +} | ||
31008 | + | ||
31009 | +static void deactivate(struct ftdev_event** chain, int id) | ||
31010 | +{ | ||
31011 | + struct ftdev_event **cur = chain; | ||
31012 | + struct ftdev_event *nxt; | ||
31013 | + while (*cur) { | ||
31014 | + if ((*cur)->id == id) { | ||
31015 | + nxt = (*cur)->next; | ||
31016 | + kfree(*cur); | ||
31017 | + *cur = nxt; | ||
31018 | + printk(KERN_INFO | ||
31019 | + "Disabling feather-trace event %d.\n", (int) id); | ||
31020 | + ft_disable_event(id); | ||
31021 | + break; | ||
31022 | + } | ||
31023 | + cur = &(*cur)->next; | ||
31024 | + } | ||
31025 | +} | ||
31026 | + | ||
31027 | +static int ftdev_open(struct inode *in, struct file *filp) | ||
31028 | +{ | ||
31029 | + struct ftdev* ftdev; | ||
31030 | + struct ftdev_minor* ftdm; | ||
31031 | + unsigned int buf_idx = iminor(in); | ||
31032 | + int err = 0; | ||
31033 | + | ||
31034 | + ftdev = container_of(in->i_cdev, struct ftdev, cdev); | ||
31035 | + | ||
31036 | + if (buf_idx >= ftdev->minor_cnt) { | ||
31037 | + err = -ENODEV; | ||
31038 | + goto out; | ||
31039 | + } | ||
31040 | + if (ftdev->can_open && (err = ftdev->can_open(ftdev, buf_idx))) | ||
31041 | + goto out; | ||
31042 | + | ||
31043 | + ftdm = ftdev->minor + buf_idx; | ||
31044 | + filp->private_data = ftdm; | ||
31045 | + | ||
31046 | + if (mutex_lock_interruptible(&ftdm->lock)) { | ||
31047 | + err = -ERESTARTSYS; | ||
31048 | + goto out; | ||
31049 | + } | ||
31050 | + | ||
31051 | + if (!ftdm->readers && ftdev->alloc) | ||
31052 | + err = ftdev->alloc(ftdev, buf_idx); | ||
31053 | + if (0 == err) | ||
31054 | + ftdm->readers++; | ||
31055 | + | ||
31056 | + mutex_unlock(&ftdm->lock); | ||
31057 | +out: | ||
31058 | + return err; | ||
31059 | +} | ||
31060 | + | ||
31061 | +static int ftdev_release(struct inode *in, struct file *filp) | ||
31062 | +{ | ||
31063 | + struct ftdev* ftdev; | ||
31064 | + struct ftdev_minor* ftdm; | ||
31065 | + unsigned int buf_idx = iminor(in); | ||
31066 | + int err = 0; | ||
31067 | + | ||
31068 | + ftdev = container_of(in->i_cdev, struct ftdev, cdev); | ||
31069 | + | ||
31070 | + if (buf_idx >= ftdev->minor_cnt) { | ||
31071 | + err = -ENODEV; | ||
31072 | + goto out; | ||
31073 | + } | ||
31074 | + ftdm = ftdev->minor + buf_idx; | ||
31075 | + | ||
31076 | + if (mutex_lock_interruptible(&ftdm->lock)) { | ||
31077 | + err = -ERESTARTSYS; | ||
31078 | + goto out; | ||
31079 | + } | ||
31080 | + | ||
31081 | + if (ftdm->readers == 1) { | ||
31082 | + while (ftdm->events) | ||
31083 | + deactivate(&ftdm->events, ftdm->events->id); | ||
31084 | + | ||
31085 | + /* wait for any pending events to complete */ | ||
31086 | + set_current_state(TASK_UNINTERRUPTIBLE); | ||
31087 | + schedule_timeout(HZ); | ||
31088 | + | ||
31089 | + printk(KERN_ALERT "Failed trace writes: %u\n", | ||
31090 | + ftdm->buf->failed_writes); | ||
31091 | + | ||
31092 | + if (ftdev->free) | ||
31093 | + ftdev->free(ftdev, buf_idx); | ||
31094 | + } | ||
31095 | + | ||
31096 | + ftdm->readers--; | ||
31097 | + mutex_unlock(&ftdm->lock); | ||
31098 | +out: | ||
31099 | + return err; | ||
31100 | +} | ||
31101 | + | ||
31102 | +/* based on ft_buffer_read | ||
31103 | + * @returns < 0 : page fault | ||
31104 | + * = 0 : no data available | ||
31105 | + * = 1 : one slot copied | ||
31106 | + */ | ||
31107 | +static int ft_buffer_copy_to_user(struct ft_buffer* buf, char __user *dest) | ||
31108 | +{ | ||
31109 | + unsigned int idx; | ||
31110 | + int err = 0; | ||
31111 | + if (buf->free_count != buf->slot_count) { | ||
31112 | + /* data available */ | ||
31113 | + idx = buf->read_idx % buf->slot_count; | ||
31114 | + if (buf->slots[idx] == SLOT_READY) { | ||
31115 | + err = copy_to_user(dest, ((char*) buf->buffer_mem) + | ||
31116 | + idx * buf->slot_size, | ||
31117 | + buf->slot_size); | ||
31118 | + if (err == 0) { | ||
31119 | + /* copy ok */ | ||
31120 | + buf->slots[idx] = SLOT_FREE; | ||
31121 | + buf->read_idx++; | ||
31122 | + fetch_and_inc(&buf->free_count); | ||
31123 | + err = 1; | ||
31124 | + } | ||
31125 | + } | ||
31126 | + } | ||
31127 | + return err; | ||
31128 | +} | ||
31129 | + | ||
31130 | +static ssize_t ftdev_read(struct file *filp, | ||
31131 | + char __user *to, size_t len, loff_t *f_pos) | ||
31132 | +{ | ||
31133 | + /* we ignore f_pos, this is strictly sequential */ | ||
31134 | + | ||
31135 | + ssize_t err = 0; | ||
31136 | + size_t chunk; | ||
31137 | + int copied; | ||
31138 | + struct ftdev_minor* ftdm = filp->private_data; | ||
31139 | + | ||
31140 | + if (mutex_lock_interruptible(&ftdm->lock)) { | ||
31141 | + err = -ERESTARTSYS; | ||
31142 | + goto out; | ||
31143 | + } | ||
31144 | + | ||
31145 | + | ||
31146 | + chunk = ftdm->buf->slot_size; | ||
31147 | + while (len >= chunk) { | ||
31148 | + copied = ft_buffer_copy_to_user(ftdm->buf, to); | ||
31149 | + if (copied == 1) { | ||
31150 | + len -= chunk; | ||
31151 | + to += chunk; | ||
31152 | + err += chunk; | ||
31153 | + } else if (err == 0 && copied == 0 && ftdm->events) { | ||
31154 | + /* Only wait if there are any events enabled and only | ||
31155 | + * if we haven't copied some data yet. We cannot wait | ||
31156 | + * here with copied data because that data would get | ||
31157 | + * lost if the task is interrupted (e.g., killed). | ||
31158 | + */ | ||
31159 | + set_current_state(TASK_INTERRUPTIBLE); | ||
31160 | + schedule_timeout(50); | ||
31161 | + if (signal_pending(current)) { | ||
31162 | + if (err == 0) | ||
31163 | + /* nothing read yet, signal problem */ | ||
31164 | + err = -ERESTARTSYS; | ||
31165 | + break; | ||
31166 | + } | ||
31167 | + } else if (copied < 0) { | ||
31168 | + /* page fault */ | ||
31169 | + err = copied; | ||
31170 | + break; | ||
31171 | + } else | ||
31172 | + /* nothing left to get, return to user space */ | ||
31173 | + break; | ||
31174 | + } | ||
31175 | + mutex_unlock(&ftdm->lock); | ||
31176 | +out: | ||
31177 | + return err; | ||
31178 | +} | ||
31179 | + | ||
31180 | +typedef uint32_t cmd_t; | ||
31181 | + | ||
31182 | +static ssize_t ftdev_write(struct file *filp, const char __user *from, | ||
31183 | + size_t len, loff_t *f_pos) | ||
31184 | +{ | ||
31185 | + struct ftdev_minor* ftdm = filp->private_data; | ||
31186 | + ssize_t err = -EINVAL; | ||
31187 | + cmd_t cmd; | ||
31188 | + cmd_t id; | ||
31189 | + | ||
31190 | + if (len % sizeof(cmd) || len < 2 * sizeof(cmd)) | ||
31191 | + goto out; | ||
31192 | + | ||
31193 | + if (copy_from_user(&cmd, from, sizeof(cmd))) { | ||
31194 | + err = -EFAULT; | ||
31195 | + goto out; | ||
31196 | + } | ||
31197 | + len -= sizeof(cmd); | ||
31198 | + from += sizeof(cmd); | ||
31199 | + | ||
31200 | + if (cmd != FTDEV_ENABLE_CMD && cmd != FTDEV_DISABLE_CMD) | ||
31201 | + goto out; | ||
31202 | + | ||
31203 | + if (mutex_lock_interruptible(&ftdm->lock)) { | ||
31204 | + err = -ERESTARTSYS; | ||
31205 | + goto out; | ||
31206 | + } | ||
31207 | + | ||
31208 | + err = sizeof(cmd); | ||
31209 | + while (len) { | ||
31210 | + if (copy_from_user(&id, from, sizeof(cmd))) { | ||
31211 | + err = -EFAULT; | ||
31212 | + goto out_unlock; | ||
31213 | + } | ||
31214 | + /* FIXME: check id against list of acceptable events */ | ||
31215 | + len -= sizeof(cmd); | ||
31216 | + from += sizeof(cmd); | ||
31217 | + if (cmd == FTDEV_DISABLE_CMD) | ||
31218 | + deactivate(&ftdm->events, id); | ||
31219 | + else if (activate(&ftdm->events, id) != 0) { | ||
31220 | + err = -ENOMEM; | ||
31221 | + goto out_unlock; | ||
31222 | + } | ||
31223 | + err += sizeof(cmd); | ||
31224 | + } | ||
31225 | + | ||
31226 | +out_unlock: | ||
31227 | + mutex_unlock(&ftdm->lock); | ||
31228 | +out: | ||
31229 | + return err; | ||
31230 | +} | ||
31231 | + | ||
31232 | +struct file_operations ftdev_fops = { | ||
31233 | + .owner = THIS_MODULE, | ||
31234 | + .open = ftdev_open, | ||
31235 | + .release = ftdev_release, | ||
31236 | + .write = ftdev_write, | ||
31237 | + .read = ftdev_read, | ||
31238 | +}; | ||
31239 | + | ||
31240 | + | ||
31241 | +void ftdev_init(struct ftdev* ftdev, struct module* owner) | ||
31242 | +{ | ||
31243 | + int i; | ||
31244 | + cdev_init(&ftdev->cdev, &ftdev_fops); | ||
31245 | + ftdev->cdev.owner = owner; | ||
31246 | + ftdev->cdev.ops = &ftdev_fops; | ||
31247 | + ftdev->minor_cnt = 0; | ||
31248 | + for (i = 0; i < MAX_FTDEV_MINORS; i++) { | ||
31249 | + mutex_init(&ftdev->minor[i].lock); | ||
31250 | + ftdev->minor[i].readers = 0; | ||
31251 | + ftdev->minor[i].buf = NULL; | ||
31252 | + ftdev->minor[i].events = NULL; | ||
31253 | + } | ||
31254 | + ftdev->alloc = NULL; | ||
31255 | + ftdev->free = NULL; | ||
31256 | + ftdev->can_open = NULL; | ||
31257 | +} | ||
31258 | + | ||
31259 | +int register_ftdev(struct ftdev* ftdev, const char* name, int major) | ||
31260 | +{ | ||
31261 | + dev_t trace_dev; | ||
31262 | + int error = 0; | ||
31263 | + | ||
31264 | + if(major) { | ||
31265 | + trace_dev = MKDEV(major, 0); | ||
31266 | + error = register_chrdev_region(trace_dev, ftdev->minor_cnt, | ||
31267 | + name); | ||
31268 | + } else { | ||
31269 | + error = alloc_chrdev_region(&trace_dev, 0, ftdev->minor_cnt, | ||
31270 | + name); | ||
31271 | + major = MAJOR(trace_dev); | ||
31272 | + } | ||
31273 | + if (error) | ||
31274 | + { | ||
31275 | + printk(KERN_WARNING "ftdev(%s): " | ||
31276 | + "Could not register major/minor number %d/%u\n", | ||
31277 | + name, major, ftdev->minor_cnt); | ||
31278 | + return error; | ||
31279 | + } | ||
31280 | + error = cdev_add(&ftdev->cdev, trace_dev, ftdev->minor_cnt); | ||
31281 | + if (error) { | ||
31282 | + printk(KERN_WARNING "ftdev(%s): " | ||
31283 | + "Could not add cdev for major/minor = %d/%u.\n", | ||
31284 | + name, major, ftdev->minor_cnt); | ||
31285 | + return error; | ||
31286 | + } | ||
31287 | + return error; | ||
31288 | +} | ||
31289 | diff --git a/litmus/jobs.c b/litmus/jobs.c | ||
31290 | new file mode 100644 | ||
31291 | index 0000000..36e3146 | ||
31292 | --- /dev/null | ||
31293 | +++ b/litmus/jobs.c | ||
31294 | @@ -0,0 +1,43 @@ | ||
31295 | +/* litmus/jobs.c - common job control code | ||
31296 | + */ | ||
31297 | + | ||
31298 | +#include <linux/sched.h> | ||
31299 | + | ||
31300 | +#include <litmus/litmus.h> | ||
31301 | +#include <litmus/jobs.h> | ||
31302 | + | ||
31303 | +void prepare_for_next_period(struct task_struct *t) | ||
31304 | +{ | ||
31305 | + BUG_ON(!t); | ||
31306 | + /* prepare next release */ | ||
31307 | + t->rt_param.job_params.release = t->rt_param.job_params.deadline; | ||
31308 | + t->rt_param.job_params.deadline += get_rt_period(t); | ||
31309 | + t->rt_param.job_params.exec_time = 0; | ||
31310 | + /* update job sequence number */ | ||
31311 | + t->rt_param.job_params.job_no++; | ||
31312 | + | ||
31313 | + /* don't confuse Linux */ | ||
31314 | + t->rt.time_slice = 1; | ||
31315 | +} | ||
31316 | + | ||
31317 | +void release_at(struct task_struct *t, lt_t start) | ||
31318 | +{ | ||
31319 | + t->rt_param.job_params.deadline = start; | ||
31320 | + prepare_for_next_period(t); | ||
31321 | + set_rt_flags(t, RT_F_RUNNING); | ||
31322 | +} | ||
31323 | + | ||
31324 | + | ||
31325 | +/* | ||
31326 | + * Deactivate current task until the beginning of the next period. | ||
31327 | + */ | ||
31328 | +long complete_job(void) | ||
31329 | +{ | ||
31330 | + /* Mark that we do not excute anymore */ | ||
31331 | + set_rt_flags(current, RT_F_SLEEP); | ||
31332 | + /* call schedule, this will return when a new job arrives | ||
31333 | + * it also takes care of preparing for the next release | ||
31334 | + */ | ||
31335 | + schedule(); | ||
31336 | + return 0; | ||
31337 | +} | ||
31338 | diff --git a/litmus/litmus.c b/litmus/litmus.c | ||
31339 | new file mode 100644 | ||
31340 | index 0000000..3cf7cb9 | ||
31341 | --- /dev/null | ||
31342 | +++ b/litmus/litmus.c | ||
31343 | @@ -0,0 +1,699 @@ | ||
31344 | +/* | ||
31345 | + * litmus.c -- Implementation of the LITMUS syscalls, | ||
31346 | + * the LITMUS intialization code, | ||
31347 | + * and the procfs interface.. | ||
31348 | + */ | ||
31349 | +#include <asm/uaccess.h> | ||
31350 | +#include <linux/uaccess.h> | ||
31351 | +#include <linux/sysrq.h> | ||
31352 | + | ||
31353 | +#include <linux/module.h> | ||
31354 | +#include <linux/proc_fs.h> | ||
31355 | +#include <linux/slab.h> | ||
31356 | + | ||
31357 | +#include <litmus/litmus.h> | ||
31358 | +#include <linux/sched.h> | ||
31359 | +#include <litmus/sched_plugin.h> | ||
31360 | + | ||
31361 | +#include <litmus/bheap.h> | ||
31362 | + | ||
31363 | +#include <litmus/trace.h> | ||
31364 | + | ||
31365 | +#include <litmus/rt_domain.h> | ||
31366 | + | ||
31367 | +/* Number of RT tasks that exist in the system */ | ||
31368 | +atomic_t rt_task_count = ATOMIC_INIT(0); | ||
31369 | +static DEFINE_SPINLOCK(task_transition_lock); | ||
31370 | + | ||
31371 | +/* Give log messages sequential IDs. */ | ||
31372 | +atomic_t __log_seq_no = ATOMIC_INIT(0); | ||
31373 | + | ||
31374 | +/* current master CPU for handling timer IRQs */ | ||
31375 | +atomic_t release_master_cpu = ATOMIC_INIT(NO_CPU); | ||
31376 | + | ||
31377 | +static struct kmem_cache * bheap_node_cache; | ||
31378 | +extern struct kmem_cache * release_heap_cache; | ||
31379 | + | ||
31380 | +struct bheap_node* bheap_node_alloc(int gfp_flags) | ||
31381 | +{ | ||
31382 | + return kmem_cache_alloc(bheap_node_cache, gfp_flags); | ||
31383 | +} | ||
31384 | + | ||
31385 | +void bheap_node_free(struct bheap_node* hn) | ||
31386 | +{ | ||
31387 | + kmem_cache_free(bheap_node_cache, hn); | ||
31388 | +} | ||
31389 | + | ||
31390 | +struct release_heap* release_heap_alloc(int gfp_flags); | ||
31391 | +void release_heap_free(struct release_heap* rh); | ||
31392 | + | ||
31393 | +/* | ||
31394 | + * sys_set_task_rt_param | ||
31395 | + * @pid: Pid of the task which scheduling parameters must be changed | ||
31396 | + * @param: New real-time extension parameters such as the execution cost and | ||
31397 | + * period | ||
31398 | + * Syscall for manipulating with task rt extension params | ||
31399 | + * Returns EFAULT if param is NULL. | ||
31400 | + * ESRCH if pid is not corrsponding | ||
31401 | + * to a valid task. | ||
31402 | + * EINVAL if either period or execution cost is <=0 | ||
31403 | + * EPERM if pid is a real-time task | ||
31404 | + * 0 if success | ||
31405 | + * | ||
31406 | + * Only non-real-time tasks may be configured with this system call | ||
31407 | + * to avoid races with the scheduler. In practice, this means that a | ||
31408 | + * task's parameters must be set _before_ calling sys_prepare_rt_task() | ||
31409 | + * | ||
31410 | + * find_task_by_vpid() assumes that we are in the same namespace of the | ||
31411 | + * target. | ||
31412 | + */ | ||
31413 | +asmlinkage long sys_set_rt_task_param(pid_t pid, struct rt_task __user * param) | ||
31414 | +{ | ||
31415 | + struct rt_task tp; | ||
31416 | + struct task_struct *target; | ||
31417 | + int retval = -EINVAL; | ||
31418 | + | ||
31419 | + printk("Setting up rt task parameters for process %d.\n", pid); | ||
31420 | + | ||
31421 | + if (pid < 0 || param == 0) { | ||
31422 | + goto out; | ||
31423 | + } | ||
31424 | + if (copy_from_user(&tp, param, sizeof(tp))) { | ||
31425 | + retval = -EFAULT; | ||
31426 | + goto out; | ||
31427 | + } | ||
31428 | + | ||
31429 | + /* Task search and manipulation must be protected */ | ||
31430 | + read_lock_irq(&tasklist_lock); | ||
31431 | + if (!(target = find_task_by_vpid(pid))) { | ||
31432 | + retval = -ESRCH; | ||
31433 | + goto out_unlock; | ||
31434 | + } | ||
31435 | + | ||
31436 | + if (is_realtime(target)) { | ||
31437 | + /* The task is already a real-time task. | ||
31438 | + * We cannot not allow parameter changes at this point. | ||
31439 | + */ | ||
31440 | + retval = -EBUSY; | ||
31441 | + goto out_unlock; | ||
31442 | + } | ||
31443 | + | ||
31444 | + if (tp.exec_cost <= 0) | ||
31445 | + goto out_unlock; | ||
31446 | + if (tp.period <= 0) | ||
31447 | + goto out_unlock; | ||
31448 | + if (!cpu_online(tp.cpu)) | ||
31449 | + goto out_unlock; | ||
31450 | + if (tp.period < tp.exec_cost) | ||
31451 | + { | ||
31452 | + printk(KERN_INFO "litmus: real-time task %d rejected " | ||
31453 | + "because wcet > period\n", pid); | ||
31454 | + goto out_unlock; | ||
31455 | + } | ||
31456 | + | ||
31457 | + target->rt_param.task_params = tp; | ||
31458 | + | ||
31459 | + retval = 0; | ||
31460 | + out_unlock: | ||
31461 | + read_unlock_irq(&tasklist_lock); | ||
31462 | + out: | ||
31463 | + return retval; | ||
31464 | +} | ||
31465 | + | ||
31466 | +/* | ||
31467 | + * Getter of task's RT params | ||
31468 | + * returns EINVAL if param or pid is NULL | ||
31469 | + * returns ESRCH if pid does not correspond to a valid task | ||
31470 | + * returns EFAULT if copying of parameters has failed. | ||
31471 | + * | ||
31472 | + * find_task_by_vpid() assumes that we are in the same namespace of the | ||
31473 | + * target. | ||
31474 | + */ | ||
31475 | +asmlinkage long sys_get_rt_task_param(pid_t pid, struct rt_task __user * param) | ||
31476 | +{ | ||
31477 | + int retval = -EINVAL; | ||
31478 | + struct task_struct *source; | ||
31479 | + struct rt_task lp; | ||
31480 | + if (param == 0 || pid < 0) | ||
31481 | + goto out; | ||
31482 | + read_lock(&tasklist_lock); | ||
31483 | + if (!(source = find_task_by_vpid(pid))) { | ||
31484 | + retval = -ESRCH; | ||
31485 | + goto out_unlock; | ||
31486 | + } | ||
31487 | + lp = source->rt_param.task_params; | ||
31488 | + read_unlock(&tasklist_lock); | ||
31489 | + /* Do copying outside the lock */ | ||
31490 | + retval = | ||
31491 | + copy_to_user(param, &lp, sizeof(lp)) ? -EFAULT : 0; | ||
31492 | + return retval; | ||
31493 | + out_unlock: | ||
31494 | + read_unlock(&tasklist_lock); | ||
31495 | + out: | ||
31496 | + return retval; | ||
31497 | + | ||
31498 | +} | ||
31499 | + | ||
31500 | +/* | ||
31501 | + * This is the crucial function for periodic task implementation, | ||
31502 | + * It checks if a task is periodic, checks if such kind of sleep | ||
31503 | + * is permitted and calls plugin-specific sleep, which puts the | ||
31504 | + * task into a wait array. | ||
31505 | + * returns 0 on successful wakeup | ||
31506 | + * returns EPERM if current conditions do not permit such sleep | ||
31507 | + * returns EINVAL if current task is not able to go to sleep | ||
31508 | + */ | ||
31509 | +asmlinkage long sys_complete_job(void) | ||
31510 | +{ | ||
31511 | + int retval = -EPERM; | ||
31512 | + if (!is_realtime(current)) { | ||
31513 | + retval = -EINVAL; | ||
31514 | + goto out; | ||
31515 | + } | ||
31516 | + /* Task with negative or zero period cannot sleep */ | ||
31517 | + if (get_rt_period(current) <= 0) { | ||
31518 | + retval = -EINVAL; | ||
31519 | + goto out; | ||
31520 | + } | ||
31521 | + /* The plugin has to put the task into an | ||
31522 | + * appropriate queue and call schedule | ||
31523 | + */ | ||
31524 | + retval = litmus->complete_job(); | ||
31525 | + out: | ||
31526 | + return retval; | ||
31527 | +} | ||
31528 | + | ||
31529 | +/* This is an "improved" version of sys_complete_job that | ||
31530 | + * addresses the problem of unintentionally missing a job after | ||
31531 | + * an overrun. | ||
31532 | + * | ||
31533 | + * returns 0 on successful wakeup | ||
31534 | + * returns EPERM if current conditions do not permit such sleep | ||
31535 | + * returns EINVAL if current task is not able to go to sleep | ||
31536 | + */ | ||
31537 | +asmlinkage long sys_wait_for_job_release(unsigned int job) | ||
31538 | +{ | ||
31539 | + int retval = -EPERM; | ||
31540 | + if (!is_realtime(current)) { | ||
31541 | + retval = -EINVAL; | ||
31542 | + goto out; | ||
31543 | + } | ||
31544 | + | ||
31545 | + /* Task with negative or zero period cannot sleep */ | ||
31546 | + if (get_rt_period(current) <= 0) { | ||
31547 | + retval = -EINVAL; | ||
31548 | + goto out; | ||
31549 | + } | ||
31550 | + | ||
31551 | + retval = 0; | ||
31552 | + | ||
31553 | + /* first wait until we have "reached" the desired job | ||
31554 | + * | ||
31555 | + * This implementation has at least two problems: | ||
31556 | + * | ||
31557 | + * 1) It doesn't gracefully handle the wrap around of | ||
31558 | + * job_no. Since LITMUS is a prototype, this is not much | ||
31559 | + * of a problem right now. | ||
31560 | + * | ||
31561 | + * 2) It is theoretically racy if a job release occurs | ||
31562 | + * between checking job_no and calling sleep_next_period(). | ||
31563 | + * A proper solution would requiring adding another callback | ||
31564 | + * in the plugin structure and testing the condition with | ||
31565 | + * interrupts disabled. | ||
31566 | + * | ||
31567 | + * FIXME: At least problem 2 should be taken care of eventually. | ||
31568 | + */ | ||
31569 | + while (!retval && job > current->rt_param.job_params.job_no) | ||
31570 | + /* If the last job overran then job <= job_no and we | ||
31571 | + * don't send the task to sleep. | ||
31572 | + */ | ||
31573 | + retval = litmus->complete_job(); | ||
31574 | + out: | ||
31575 | + return retval; | ||
31576 | +} | ||
31577 | + | ||
31578 | +/* This is a helper syscall to query the current job sequence number. | ||
31579 | + * | ||
31580 | + * returns 0 on successful query | ||
31581 | + * returns EPERM if task is not a real-time task. | ||
31582 | + * returns EFAULT if &job is not a valid pointer. | ||
31583 | + */ | ||
31584 | +asmlinkage long sys_query_job_no(unsigned int __user *job) | ||
31585 | +{ | ||
31586 | + int retval = -EPERM; | ||
31587 | + if (is_realtime(current)) | ||
31588 | + retval = put_user(current->rt_param.job_params.job_no, job); | ||
31589 | + | ||
31590 | + return retval; | ||
31591 | +} | ||
31592 | + | ||
31593 | +/* sys_null_call() is only used for determining raw system call | ||
31594 | + * overheads (kernel entry, kernel exit). It has no useful side effects. | ||
31595 | + * If ts is non-NULL, then the current Feather-Trace time is recorded. | ||
31596 | + */ | ||
31597 | +asmlinkage long sys_null_call(cycles_t __user *ts) | ||
31598 | +{ | ||
31599 | + long ret = 0; | ||
31600 | + cycles_t now; | ||
31601 | + | ||
31602 | + if (ts) { | ||
31603 | + now = get_cycles(); | ||
31604 | + ret = put_user(now, ts); | ||
31605 | + } | ||
31606 | + | ||
31607 | + return ret; | ||
31608 | +} | ||
31609 | + | ||
31610 | +/* p is a real-time task. Re-init its state as a best-effort task. */ | ||
31611 | +static void reinit_litmus_state(struct task_struct* p, int restore) | ||
31612 | +{ | ||
31613 | + struct rt_task user_config = {}; | ||
31614 | + void* ctrl_page = NULL; | ||
31615 | + | ||
31616 | + if (restore) { | ||
31617 | + /* Safe user-space provided configuration data. | ||
31618 | + * and allocated page. */ | ||
31619 | + user_config = p->rt_param.task_params; | ||
31620 | + ctrl_page = p->rt_param.ctrl_page; | ||
31621 | + } | ||
31622 | + | ||
31623 | + /* We probably should not be inheriting any task's priority | ||
31624 | + * at this point in time. | ||
31625 | + */ | ||
31626 | + WARN_ON(p->rt_param.inh_task); | ||
31627 | + | ||
31628 | + /* We need to restore the priority of the task. */ | ||
31629 | +// __setscheduler(p, p->rt_param.old_policy, p->rt_param.old_prio); XXX why is this commented? | ||
31630 | + | ||
31631 | + /* Cleanup everything else. */ | ||
31632 | + memset(&p->rt_param, 0, sizeof(p->rt_param)); | ||
31633 | + | ||
31634 | + /* Restore preserved fields. */ | ||
31635 | + if (restore) { | ||
31636 | + p->rt_param.task_params = user_config; | ||
31637 | + p->rt_param.ctrl_page = ctrl_page; | ||
31638 | + } | ||
31639 | +} | ||
31640 | + | ||
31641 | +long litmus_admit_task(struct task_struct* tsk) | ||
31642 | +{ | ||
31643 | + long retval = 0; | ||
31644 | + unsigned long flags; | ||
31645 | + | ||
31646 | + BUG_ON(is_realtime(tsk)); | ||
31647 | + | ||
31648 | + if (get_rt_period(tsk) == 0 || | ||
31649 | + get_exec_cost(tsk) > get_rt_period(tsk)) { | ||
31650 | + TRACE_TASK(tsk, "litmus admit: invalid task parameters " | ||
31651 | + "(%lu, %lu)\n", | ||
31652 | + get_exec_cost(tsk), get_rt_period(tsk)); | ||
31653 | + retval = -EINVAL; | ||
31654 | + goto out; | ||
31655 | + } | ||
31656 | + | ||
31657 | + if (!cpu_online(get_partition(tsk))) { | ||
31658 | + TRACE_TASK(tsk, "litmus admit: cpu %d is not online\n", | ||
31659 | + get_partition(tsk)); | ||
31660 | + retval = -EINVAL; | ||
31661 | + goto out; | ||
31662 | + } | ||
31663 | + | ||
31664 | + INIT_LIST_HEAD(&tsk_rt(tsk)->list); | ||
31665 | + | ||
31666 | + /* avoid scheduler plugin changing underneath us */ | ||
31667 | + spin_lock_irqsave(&task_transition_lock, flags); | ||
31668 | + | ||
31669 | + /* allocate heap node for this task */ | ||
31670 | + tsk_rt(tsk)->heap_node = bheap_node_alloc(GFP_ATOMIC); | ||
31671 | + tsk_rt(tsk)->rel_heap = release_heap_alloc(GFP_ATOMIC); | ||
31672 | + | ||
31673 | + if (!tsk_rt(tsk)->heap_node || !tsk_rt(tsk)->rel_heap) { | ||
31674 | + printk(KERN_WARNING "litmus: no more heap node memory!?\n"); | ||
31675 | + | ||
31676 | + bheap_node_free(tsk_rt(tsk)->heap_node); | ||
31677 | + release_heap_free(tsk_rt(tsk)->rel_heap); | ||
31678 | + | ||
31679 | + retval = -ENOMEM; | ||
31680 | + goto out_unlock; | ||
31681 | + } else { | ||
31682 | + bheap_node_init(&tsk_rt(tsk)->heap_node, tsk); | ||
31683 | + } | ||
31684 | + | ||
31685 | + retval = litmus->admit_task(tsk); | ||
31686 | + | ||
31687 | + if (!retval) { | ||
31688 | + sched_trace_task_name(tsk); | ||
31689 | + sched_trace_task_param(tsk); | ||
31690 | + atomic_inc(&rt_task_count); | ||
31691 | + } | ||
31692 | + | ||
31693 | +out_unlock: | ||
31694 | + spin_unlock_irqrestore(&task_transition_lock, flags); | ||
31695 | +out: | ||
31696 | + return retval; | ||
31697 | +} | ||
31698 | + | ||
31699 | +void litmus_exit_task(struct task_struct* tsk) | ||
31700 | +{ | ||
31701 | + if (is_realtime(tsk)) { | ||
31702 | + sched_trace_task_completion(tsk, 1); | ||
31703 | + | ||
31704 | + litmus->task_exit(tsk); | ||
31705 | + | ||
31706 | + BUG_ON(bheap_node_in_heap(tsk_rt(tsk)->heap_node)); | ||
31707 | + bheap_node_free(tsk_rt(tsk)->heap_node); | ||
31708 | + release_heap_free(tsk_rt(tsk)->rel_heap); | ||
31709 | + | ||
31710 | + atomic_dec(&rt_task_count); | ||
31711 | + reinit_litmus_state(tsk, 1); | ||
31712 | + } | ||
31713 | +} | ||
31714 | + | ||
31715 | +/* Switching a plugin in use is tricky. | ||
31716 | + * We must watch out that no real-time tasks exists | ||
31717 | + * (and that none is created in parallel) and that the plugin is not | ||
31718 | + * currently in use on any processor (in theory). | ||
31719 | + * | ||
31720 | + * For now, we don't enforce the second part since it is unlikely to cause | ||
31721 | + * any trouble by itself as long as we don't unload modules. | ||
31722 | + */ | ||
31723 | +int switch_sched_plugin(struct sched_plugin* plugin) | ||
31724 | +{ | ||
31725 | + unsigned long flags; | ||
31726 | + int ret = 0; | ||
31727 | + | ||
31728 | + BUG_ON(!plugin); | ||
31729 | + | ||
31730 | + /* stop task transitions */ | ||
31731 | + spin_lock_irqsave(&task_transition_lock, flags); | ||
31732 | + | ||
31733 | + /* don't switch if there are active real-time tasks */ | ||
31734 | + if (atomic_read(&rt_task_count) == 0) { | ||
31735 | + ret = litmus->deactivate_plugin(); | ||
31736 | + if (0 != ret) | ||
31737 | + goto out; | ||
31738 | + ret = plugin->activate_plugin(); | ||
31739 | + if (0 != ret) { | ||
31740 | + printk(KERN_INFO "Can't activate %s (%d).\n", | ||
31741 | + plugin->plugin_name, ret); | ||
31742 | + plugin = &linux_sched_plugin; | ||
31743 | + } | ||
31744 | + printk(KERN_INFO "Switching to LITMUS^RT plugin %s.\n", plugin->plugin_name); | ||
31745 | + litmus = plugin; | ||
31746 | + } else | ||
31747 | + ret = -EBUSY; | ||
31748 | +out: | ||
31749 | + spin_unlock_irqrestore(&task_transition_lock, flags); | ||
31750 | + return ret; | ||
31751 | +} | ||
31752 | + | ||
31753 | +/* Called upon fork. | ||
31754 | + * p is the newly forked task. | ||
31755 | + */ | ||
31756 | +void litmus_fork(struct task_struct* p) | ||
31757 | +{ | ||
31758 | + if (is_realtime(p)) | ||
31759 | + /* clean out any litmus related state, don't preserve anything */ | ||
31760 | + reinit_litmus_state(p, 0); | ||
31761 | + else | ||
31762 | + /* non-rt tasks might have ctrl_page set */ | ||
31763 | + tsk_rt(p)->ctrl_page = NULL; | ||
31764 | + | ||
31765 | + /* od tables are never inherited across a fork */ | ||
31766 | + p->od_table = NULL; | ||
31767 | +} | ||
31768 | + | ||
31769 | +/* Called upon execve(). | ||
31770 | + * current is doing the exec. | ||
31771 | + * Don't let address space specific stuff leak. | ||
31772 | + */ | ||
31773 | +void litmus_exec(void) | ||
31774 | +{ | ||
31775 | + struct task_struct* p = current; | ||
31776 | + | ||
31777 | + if (is_realtime(p)) { | ||
31778 | + WARN_ON(p->rt_param.inh_task); | ||
31779 | + if (tsk_rt(p)->ctrl_page) { | ||
31780 | + free_page((unsigned long) tsk_rt(p)->ctrl_page); | ||
31781 | + tsk_rt(p)->ctrl_page = NULL; | ||
31782 | + } | ||
31783 | + } | ||
31784 | +} | ||
31785 | + | ||
31786 | +void exit_litmus(struct task_struct *dead_tsk) | ||
31787 | +{ | ||
31788 | + /* We also allow non-RT tasks to | ||
31789 | + * allocate control pages to allow | ||
31790 | + * measurements with non-RT tasks. | ||
31791 | + * So check if we need to free the page | ||
31792 | + * in any case. | ||
31793 | + */ | ||
31794 | + if (tsk_rt(dead_tsk)->ctrl_page) { | ||
31795 | + TRACE_TASK(dead_tsk, | ||
31796 | + "freeing ctrl_page %p\n", | ||
31797 | + tsk_rt(dead_tsk)->ctrl_page); | ||
31798 | + free_page((unsigned long) tsk_rt(dead_tsk)->ctrl_page); | ||
31799 | + } | ||
31800 | + | ||
31801 | + /* main cleanup only for RT tasks */ | ||
31802 | + if (is_realtime(dead_tsk)) | ||
31803 | + litmus_exit_task(dead_tsk); | ||
31804 | +} | ||
31805 | + | ||
31806 | + | ||
31807 | +#ifdef CONFIG_MAGIC_SYSRQ | ||
31808 | +int sys_kill(int pid, int sig); | ||
31809 | + | ||
31810 | +static void sysrq_handle_kill_rt_tasks(int key, struct tty_struct *tty) | ||
31811 | +{ | ||
31812 | + struct task_struct *t; | ||
31813 | + read_lock(&tasklist_lock); | ||
31814 | + for_each_process(t) { | ||
31815 | + if (is_realtime(t)) { | ||
31816 | + sys_kill(t->pid, SIGKILL); | ||
31817 | + } | ||
31818 | + } | ||
31819 | + read_unlock(&tasklist_lock); | ||
31820 | +} | ||
31821 | + | ||
31822 | +static struct sysrq_key_op sysrq_kill_rt_tasks_op = { | ||
31823 | + .handler = sysrq_handle_kill_rt_tasks, | ||
31824 | + .help_msg = "quit-rt-tasks(X)", | ||
31825 | + .action_msg = "sent SIGKILL to all LITMUS^RT real-time tasks", | ||
31826 | +}; | ||
31827 | +#endif | ||
31828 | + | ||
31829 | +/* in litmus/sync.c */ | ||
31830 | +int count_tasks_waiting_for_release(void); | ||
31831 | + | ||
31832 | +static int proc_read_stats(char *page, char **start, | ||
31833 | + off_t off, int count, | ||
31834 | + int *eof, void *data) | ||
31835 | +{ | ||
31836 | + int len; | ||
31837 | + | ||
31838 | + len = snprintf(page, PAGE_SIZE, | ||
31839 | + "real-time tasks = %d\n" | ||
31840 | + "ready for release = %d\n", | ||
31841 | + atomic_read(&rt_task_count), | ||
31842 | + count_tasks_waiting_for_release()); | ||
31843 | + return len; | ||
31844 | +} | ||
31845 | + | ||
31846 | +static int proc_read_plugins(char *page, char **start, | ||
31847 | + off_t off, int count, | ||
31848 | + int *eof, void *data) | ||
31849 | +{ | ||
31850 | + int len; | ||
31851 | + | ||
31852 | + len = print_sched_plugins(page, PAGE_SIZE); | ||
31853 | + return len; | ||
31854 | +} | ||
31855 | + | ||
31856 | +static int proc_read_curr(char *page, char **start, | ||
31857 | + off_t off, int count, | ||
31858 | + int *eof, void *data) | ||
31859 | +{ | ||
31860 | + int len; | ||
31861 | + | ||
31862 | + len = snprintf(page, PAGE_SIZE, "%s\n", litmus->plugin_name); | ||
31863 | + return len; | ||
31864 | +} | ||
31865 | + | ||
31866 | +static int proc_write_curr(struct file *file, | ||
31867 | + const char *buffer, | ||
31868 | + unsigned long count, | ||
31869 | + void *data) | ||
31870 | +{ | ||
31871 | + int len, ret; | ||
31872 | + char name[65]; | ||
31873 | + struct sched_plugin* found; | ||
31874 | + | ||
31875 | + if(count > 64) | ||
31876 | + len = 64; | ||
31877 | + else | ||
31878 | + len = count; | ||
31879 | + | ||
31880 | + if(copy_from_user(name, buffer, len)) | ||
31881 | + return -EFAULT; | ||
31882 | + | ||
31883 | + name[len] = '\0'; | ||
31884 | + /* chomp name */ | ||
31885 | + if (len > 1 && name[len - 1] == '\n') | ||
31886 | + name[len - 1] = '\0'; | ||
31887 | + | ||
31888 | + found = find_sched_plugin(name); | ||
31889 | + | ||
31890 | + if (found) { | ||
31891 | + ret = switch_sched_plugin(found); | ||
31892 | + if (ret != 0) | ||
31893 | + printk(KERN_INFO "Could not switch plugin: %d\n", ret); | ||
31894 | + } else | ||
31895 | + printk(KERN_INFO "Plugin '%s' is unknown.\n", name); | ||
31896 | + | ||
31897 | + return len; | ||
31898 | +} | ||
31899 | + | ||
31900 | + | ||
31901 | +static int proc_read_release_master(char *page, char **start, | ||
31902 | + off_t off, int count, | ||
31903 | + int *eof, void *data) | ||
31904 | +{ | ||
31905 | + int len, master; | ||
31906 | + master = atomic_read(&release_master_cpu); | ||
31907 | + if (master == NO_CPU) | ||
31908 | + len = snprintf(page, PAGE_SIZE, "NO_CPU\n"); | ||
31909 | + else | ||
31910 | + len = snprintf(page, PAGE_SIZE, "%d\n", master); | ||
31911 | + return len; | ||
31912 | +} | ||
31913 | + | ||
31914 | +static int proc_write_release_master(struct file *file, | ||
31915 | + const char *buffer, | ||
31916 | + unsigned long count, | ||
31917 | + void *data) | ||
31918 | +{ | ||
31919 | + int cpu, err, online = 0; | ||
31920 | + char msg[64]; | ||
31921 | + | ||
31922 | + if (count > 63) | ||
31923 | + return -EINVAL; | ||
31924 | + | ||
31925 | + if (copy_from_user(msg, buffer, count)) | ||
31926 | + return -EFAULT; | ||
31927 | + | ||
31928 | + /* terminate */ | ||
31929 | + msg[count] = '\0'; | ||
31930 | + /* chomp */ | ||
31931 | + if (count > 1 && msg[count - 1] == '\n') | ||
31932 | + msg[count - 1] = '\0'; | ||
31933 | + | ||
31934 | + if (strcmp(msg, "NO_CPU") == 0) { | ||
31935 | + atomic_set(&release_master_cpu, NO_CPU); | ||
31936 | + return count; | ||
31937 | + } else { | ||
31938 | + err = sscanf(msg, "%d", &cpu); | ||
31939 | + if (err == 1 && cpu >= 0 && (online = cpu_online(cpu))) { | ||
31940 | + atomic_set(&release_master_cpu, cpu); | ||
31941 | + return count; | ||
31942 | + } else { | ||
31943 | + TRACE("invalid release master: '%s' " | ||
31944 | + "(err:%d cpu:%d online:%d)\n", | ||
31945 | + msg, err, cpu, online); | ||
31946 | + return -EINVAL; | ||
31947 | + } | ||
31948 | + } | ||
31949 | +} | ||
31950 | + | ||
31951 | +static struct proc_dir_entry *litmus_dir = NULL, | ||
31952 | + *curr_file = NULL, | ||
31953 | + *stat_file = NULL, | ||
31954 | + *plugs_file = NULL, | ||
31955 | + *release_master_file = NULL; | ||
31956 | + | ||
31957 | +static int __init init_litmus_proc(void) | ||
31958 | +{ | ||
31959 | + litmus_dir = proc_mkdir("litmus", NULL); | ||
31960 | + if (!litmus_dir) { | ||
31961 | + printk(KERN_ERR "Could not allocate LITMUS^RT procfs entry.\n"); | ||
31962 | + return -ENOMEM; | ||
31963 | + } | ||
31964 | + | ||
31965 | + curr_file = create_proc_entry("active_plugin", | ||
31966 | + 0644, litmus_dir); | ||
31967 | + if (!curr_file) { | ||
31968 | + printk(KERN_ERR "Could not allocate active_plugin " | ||
31969 | + "procfs entry.\n"); | ||
31970 | + return -ENOMEM; | ||
31971 | + } | ||
31972 | + curr_file->read_proc = proc_read_curr; | ||
31973 | + curr_file->write_proc = proc_write_curr; | ||
31974 | + | ||
31975 | + release_master_file = create_proc_entry("release_master", | ||
31976 | + 0644, litmus_dir); | ||
31977 | + if (!release_master_file) { | ||
31978 | + printk(KERN_ERR "Could not allocate release_master " | ||
31979 | + "procfs entry.\n"); | ||
31980 | + return -ENOMEM; | ||
31981 | + } | ||
31982 | + release_master_file->read_proc = proc_read_release_master; | ||
31983 | + release_master_file->write_proc = proc_write_release_master; | ||
31984 | + | ||
31985 | + stat_file = create_proc_read_entry("stats", 0444, litmus_dir, | ||
31986 | + proc_read_stats, NULL); | ||
31987 | + | ||
31988 | + plugs_file = create_proc_read_entry("plugins", 0444, litmus_dir, | ||
31989 | + proc_read_plugins, NULL); | ||
31990 | + | ||
31991 | + return 0; | ||
31992 | +} | ||
31993 | + | ||
31994 | +static void exit_litmus_proc(void) | ||
31995 | +{ | ||
31996 | + if (plugs_file) | ||
31997 | + remove_proc_entry("plugins", litmus_dir); | ||
31998 | + if (stat_file) | ||
31999 | + remove_proc_entry("stats", litmus_dir); | ||
32000 | + if (curr_file) | ||
32001 | + remove_proc_entry("active_plugin", litmus_dir); | ||
32002 | + if (litmus_dir) | ||
32003 | + remove_proc_entry("litmus", NULL); | ||
32004 | +} | ||
32005 | + | ||
32006 | +extern struct sched_plugin linux_sched_plugin; | ||
32007 | + | ||
32008 | +static int __init _init_litmus(void) | ||
32009 | +{ | ||
32010 | + /* Common initializers, | ||
32011 | + * mode change lock is used to enforce single mode change | ||
32012 | + * operation. | ||
32013 | + */ | ||
32014 | + printk("Starting LITMUS^RT kernel\n"); | ||
32015 | + | ||
32016 | + register_sched_plugin(&linux_sched_plugin); | ||
32017 | + | ||
32018 | + bheap_node_cache = KMEM_CACHE(bheap_node, SLAB_PANIC); | ||
32019 | + release_heap_cache = KMEM_CACHE(release_heap, SLAB_PANIC); | ||
32020 | + | ||
32021 | +#ifdef CONFIG_MAGIC_SYSRQ | ||
32022 | + /* offer some debugging help */ | ||
32023 | + if (!register_sysrq_key('x', &sysrq_kill_rt_tasks_op)) | ||
32024 | + printk("Registered kill rt tasks magic sysrq.\n"); | ||
32025 | + else | ||
32026 | + printk("Could not register kill rt tasks magic sysrq.\n"); | ||
32027 | +#endif | ||
32028 | + | ||
32029 | + init_litmus_proc(); | ||
32030 | + | ||
32031 | + return 0; | ||
32032 | +} | ||
32033 | + | ||
32034 | +static void _exit_litmus(void) | ||
32035 | +{ | ||
32036 | + exit_litmus_proc(); | ||
32037 | + kmem_cache_destroy(bheap_node_cache); | ||
32038 | + kmem_cache_destroy(release_heap_cache); | ||
32039 | +} | ||
32040 | + | ||
32041 | +module_init(_init_litmus); | ||
32042 | +module_exit(_exit_litmus); | ||
32043 | diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c | ||
32044 | new file mode 100644 | ||
32045 | index 0000000..0ed6d5c | ||
32046 | --- /dev/null | ||
32047 | +++ b/litmus/rt_domain.c | ||
32048 | @@ -0,0 +1,306 @@ | ||
32049 | +/* | ||
32050 | + * litmus/rt_domain.c | ||
32051 | + * | ||
32052 | + * LITMUS real-time infrastructure. This file contains the | ||
32053 | + * functions that manipulate RT domains. RT domains are an abstraction | ||
32054 | + * of a ready queue and a release queue. | ||
32055 | + */ | ||
32056 | + | ||
32057 | +#include <linux/percpu.h> | ||
32058 | +#include <linux/sched.h> | ||
32059 | +#include <linux/list.h> | ||
32060 | +#include <linux/slab.h> | ||
32061 | + | ||
32062 | +#include <litmus/litmus.h> | ||
32063 | +#include <litmus/sched_plugin.h> | ||
32064 | +#include <litmus/sched_trace.h> | ||
32065 | + | ||
32066 | +#include <litmus/rt_domain.h> | ||
32067 | + | ||
32068 | +#include <litmus/trace.h> | ||
32069 | + | ||
32070 | +#include <litmus/bheap.h> | ||
32071 | + | ||
32072 | +static int dummy_resched(rt_domain_t *rt) | ||
32073 | +{ | ||
32074 | + return 0; | ||
32075 | +} | ||
32076 | + | ||
32077 | +static int dummy_order(struct bheap_node* a, struct bheap_node* b) | ||
32078 | +{ | ||
32079 | + return 0; | ||
32080 | +} | ||
32081 | + | ||
32082 | +/* default implementation: use default lock */ | ||
32083 | +static void default_release_jobs(rt_domain_t* rt, struct bheap* tasks) | ||
32084 | +{ | ||
32085 | + merge_ready(rt, tasks); | ||
32086 | +} | ||
32087 | + | ||
32088 | +static unsigned int time2slot(lt_t time) | ||
32089 | +{ | ||
32090 | + return (unsigned int) time2quanta(time, FLOOR) % RELEASE_QUEUE_SLOTS; | ||
32091 | +} | ||
32092 | + | ||
32093 | +static enum hrtimer_restart on_release_timer(struct hrtimer *timer) | ||
32094 | +{ | ||
32095 | + unsigned long flags; | ||
32096 | + struct release_heap* rh; | ||
32097 | + | ||
32098 | + TRACE("on_release_timer(0x%p) starts.\n", timer); | ||
32099 | + | ||
32100 | + TS_RELEASE_START; | ||
32101 | + | ||
32102 | + rh = container_of(timer, struct release_heap, timer); | ||
32103 | + | ||
32104 | + spin_lock_irqsave(&rh->dom->release_lock, flags); | ||
32105 | + TRACE("CB has the release_lock 0x%p\n", &rh->dom->release_lock); | ||
32106 | + /* remove from release queue */ | ||
32107 | + list_del(&rh->list); | ||
32108 | + spin_unlock_irqrestore(&rh->dom->release_lock, flags); | ||
32109 | + TRACE("CB returned release_lock 0x%p\n", &rh->dom->release_lock); | ||
32110 | + | ||
32111 | + /* call release callback */ | ||
32112 | + rh->dom->release_jobs(rh->dom, &rh->heap); | ||
32113 | + /* WARNING: rh can be referenced from other CPUs from now on. */ | ||
32114 | + | ||
32115 | + TS_RELEASE_END; | ||
32116 | + | ||
32117 | + TRACE("on_release_timer(0x%p) ends.\n", timer); | ||
32118 | + | ||
32119 | + return HRTIMER_NORESTART; | ||
32120 | +} | ||
32121 | + | ||
32122 | +/* allocated in litmus.c */ | ||
32123 | +struct kmem_cache * release_heap_cache; | ||
32124 | + | ||
32125 | +struct release_heap* release_heap_alloc(int gfp_flags) | ||
32126 | +{ | ||
32127 | + struct release_heap* rh; | ||
32128 | + rh= kmem_cache_alloc(release_heap_cache, gfp_flags); | ||
32129 | + if (rh) { | ||
32130 | + /* initialize timer */ | ||
32131 | + hrtimer_init(&rh->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | ||
32132 | + rh->timer.function = on_release_timer; | ||
32133 | + } | ||
32134 | + return rh; | ||
32135 | +} | ||
32136 | + | ||
32137 | +void release_heap_free(struct release_heap* rh) | ||
32138 | +{ | ||
32139 | + /* make sure timer is no longer in use */ | ||
32140 | + hrtimer_cancel(&rh->timer); | ||
32141 | + kmem_cache_free(release_heap_cache, rh); | ||
32142 | +} | ||
32143 | + | ||
32144 | +/* Caller must hold release lock. | ||
32145 | + * Will return heap for given time. If no such heap exists prior to | ||
32146 | + * the invocation it will be created. | ||
32147 | + */ | ||
32148 | +static struct release_heap* get_release_heap(rt_domain_t *rt, | ||
32149 | + struct task_struct* t, | ||
32150 | + int use_task_heap) | ||
32151 | +{ | ||
32152 | + struct list_head* pos; | ||
32153 | + struct release_heap* heap = NULL; | ||
32154 | + struct release_heap* rh; | ||
32155 | + lt_t release_time = get_release(t); | ||
32156 | + unsigned int slot = time2slot(release_time); | ||
32157 | + | ||
32158 | + /* initialize pos for the case that the list is empty */ | ||
32159 | + pos = rt->release_queue.slot[slot].next; | ||
32160 | + list_for_each(pos, &rt->release_queue.slot[slot]) { | ||
32161 | + rh = list_entry(pos, struct release_heap, list); | ||
32162 | + if (release_time == rh->release_time) { | ||
32163 | + /* perfect match -- this happens on hyperperiod | ||
32164 | + * boundaries | ||
32165 | + */ | ||
32166 | + heap = rh; | ||
32167 | + break; | ||
32168 | + } else if (lt_before(release_time, rh->release_time)) { | ||
32169 | + /* we need to insert a new node since rh is | ||
32170 | + * already in the future | ||
32171 | + */ | ||
32172 | + break; | ||
32173 | + } | ||
32174 | + } | ||
32175 | + if (!heap && use_task_heap) { | ||
32176 | + /* use pre-allocated release heap */ | ||
32177 | + rh = tsk_rt(t)->rel_heap; | ||
32178 | + | ||
32179 | + rh->dom = rt; | ||
32180 | + rh->release_time = release_time; | ||
32181 | + | ||
32182 | + /* add to release queue */ | ||
32183 | + list_add(&rh->list, pos->prev); | ||
32184 | + heap = rh; | ||
32185 | + } | ||
32186 | + return heap; | ||
32187 | +} | ||
32188 | + | ||
32189 | +static void reinit_release_heap(struct task_struct* t) | ||
32190 | +{ | ||
32191 | + struct release_heap* rh; | ||
32192 | + | ||
32193 | + /* use pre-allocated release heap */ | ||
32194 | + rh = tsk_rt(t)->rel_heap; | ||
32195 | + | ||
32196 | + /* Make sure it is safe to use. The timer callback could still | ||
32197 | + * be executing on another CPU; hrtimer_cancel() will wait | ||
32198 | + * until the timer callback has completed. However, under no | ||
32199 | + * circumstances should the timer be active (= yet to be | ||
32200 | + * triggered). | ||
32201 | + * | ||
32202 | + * WARNING: If the CPU still holds the release_lock at this point, | ||
32203 | + * deadlock may occur! | ||
32204 | + */ | ||
32205 | + BUG_ON(hrtimer_cancel(&rh->timer)); | ||
32206 | + | ||
32207 | + /* initialize */ | ||
32208 | + bheap_init(&rh->heap); | ||
32209 | + atomic_set(&rh->info.state, HRTIMER_START_ON_INACTIVE); | ||
32210 | +} | ||
32211 | +/* arm_release_timer() - start local release timer or trigger | ||
32212 | + * remote timer (pull timer) | ||
32213 | + * | ||
32214 | + * Called by add_release() with: | ||
32215 | + * - tobe_lock taken | ||
32216 | + * - IRQ disabled | ||
32217 | + */ | ||
32218 | +static void arm_release_timer(rt_domain_t *_rt) | ||
32219 | +{ | ||
32220 | + rt_domain_t *rt = _rt; | ||
32221 | + struct list_head list; | ||
32222 | + struct list_head *pos, *safe; | ||
32223 | + struct task_struct* t; | ||
32224 | + struct release_heap* rh; | ||
32225 | + | ||
32226 | + TRACE("arm_release_timer() at %llu\n", litmus_clock()); | ||
32227 | + list_replace_init(&rt->tobe_released, &list); | ||
32228 | + | ||
32229 | + list_for_each_safe(pos, safe, &list) { | ||
32230 | + /* pick task of work list */ | ||
32231 | + t = list_entry(pos, struct task_struct, rt_param.list); | ||
32232 | + sched_trace_task_release(t); | ||
32233 | + list_del(pos); | ||
32234 | + | ||
32235 | + /* put into release heap while holding release_lock */ | ||
32236 | + spin_lock(&rt->release_lock); | ||
32237 | + TRACE_TASK(t, "I have the release_lock 0x%p\n", &rt->release_lock); | ||
32238 | + | ||
32239 | + rh = get_release_heap(rt, t, 0); | ||
32240 | + if (!rh) { | ||
32241 | + /* need to use our own, but drop lock first */ | ||
32242 | + spin_unlock(&rt->release_lock); | ||
32243 | + TRACE_TASK(t, "Dropped release_lock 0x%p\n", | ||
32244 | + &rt->release_lock); | ||
32245 | + | ||
32246 | + reinit_release_heap(t); | ||
32247 | + TRACE_TASK(t, "release_heap ready\n"); | ||
32248 | + | ||
32249 | + spin_lock(&rt->release_lock); | ||
32250 | + TRACE_TASK(t, "Re-acquired release_lock 0x%p\n", | ||
32251 | + &rt->release_lock); | ||
32252 | + | ||
32253 | + rh = get_release_heap(rt, t, 1); | ||
32254 | + } | ||
32255 | + bheap_insert(rt->order, &rh->heap, tsk_rt(t)->heap_node); | ||
32256 | + TRACE_TASK(t, "arm_release_timer(): added to release heap\n"); | ||
32257 | + | ||
32258 | + spin_unlock(&rt->release_lock); | ||
32259 | + TRACE_TASK(t, "Returned the release_lock 0x%p\n", &rt->release_lock); | ||
32260 | + | ||
32261 | + /* To avoid arming the timer multiple times, we only let the | ||
32262 | + * owner do the arming (which is the "first" task to reference | ||
32263 | + * this release_heap anyway). | ||
32264 | + */ | ||
32265 | + if (rh == tsk_rt(t)->rel_heap) { | ||
32266 | + TRACE_TASK(t, "arming timer 0x%p\n", &rh->timer); | ||
32267 | + /* we cannot arm the timer using hrtimer_start() | ||
32268 | + * as it may deadlock on rq->lock | ||
32269 | + * | ||
32270 | + * PINNED mode is ok on both local and remote CPU | ||
32271 | + */ | ||
32272 | + if (rt->release_master == NO_CPU) | ||
32273 | + __hrtimer_start_range_ns(&rh->timer, | ||
32274 | + ns_to_ktime(rh->release_time), | ||
32275 | + 0, HRTIMER_MODE_ABS_PINNED, 0); | ||
32276 | + else | ||
32277 | + hrtimer_start_on(rt->release_master, | ||
32278 | + &rh->info, &rh->timer, | ||
32279 | + ns_to_ktime(rh->release_time), | ||
32280 | + HRTIMER_MODE_ABS_PINNED); | ||
32281 | + } else | ||
32282 | + TRACE_TASK(t, "0x%p is not my timer\n", &rh->timer); | ||
32283 | + } | ||
32284 | +} | ||
32285 | + | ||
32286 | +void rt_domain_init(rt_domain_t *rt, | ||
32287 | + bheap_prio_t order, | ||
32288 | + check_resched_needed_t check, | ||
32289 | + release_jobs_t release | ||
32290 | + ) | ||
32291 | +{ | ||
32292 | + int i; | ||
32293 | + | ||
32294 | + BUG_ON(!rt); | ||
32295 | + if (!check) | ||
32296 | + check = dummy_resched; | ||
32297 | + if (!release) | ||
32298 | + release = default_release_jobs; | ||
32299 | + if (!order) | ||
32300 | + order = dummy_order; | ||
32301 | + | ||
32302 | + rt->release_master = NO_CPU; | ||
32303 | + | ||
32304 | + bheap_init(&rt->ready_queue); | ||
32305 | + INIT_LIST_HEAD(&rt->tobe_released); | ||
32306 | + for (i = 0; i < RELEASE_QUEUE_SLOTS; i++) | ||
32307 | + INIT_LIST_HEAD(&rt->release_queue.slot[i]); | ||
32308 | + | ||
32309 | + spin_lock_init(&rt->ready_lock); | ||
32310 | + spin_lock_init(&rt->release_lock); | ||
32311 | + spin_lock_init(&rt->tobe_lock); | ||
32312 | + | ||
32313 | + rt->check_resched = check; | ||
32314 | + rt->release_jobs = release; | ||
32315 | + rt->order = order; | ||
32316 | +} | ||
32317 | + | ||
32318 | +/* add_ready - add a real-time task to the rt ready queue. It must be runnable. | ||
32319 | + * @new: the newly released task | ||
32320 | + */ | ||
32321 | +void __add_ready(rt_domain_t* rt, struct task_struct *new) | ||
32322 | +{ | ||
32323 | + TRACE("rt: adding %s/%d (%llu, %llu) rel=%llu to ready queue at %llu\n", | ||
32324 | + new->comm, new->pid, get_exec_cost(new), get_rt_period(new), | ||
32325 | + get_release(new), litmus_clock()); | ||
32326 | + | ||
32327 | + BUG_ON(bheap_node_in_heap(tsk_rt(new)->heap_node)); | ||
32328 | + | ||
32329 | + bheap_insert(rt->order, &rt->ready_queue, tsk_rt(new)->heap_node); | ||
32330 | + rt->check_resched(rt); | ||
32331 | +} | ||
32332 | + | ||
32333 | +/* merge_ready - Add a sorted set of tasks to the rt ready queue. They must be runnable. | ||
32334 | + * @tasks - the newly released tasks | ||
32335 | + */ | ||
32336 | +void __merge_ready(rt_domain_t* rt, struct bheap* tasks) | ||
32337 | +{ | ||
32338 | + bheap_union(rt->order, &rt->ready_queue, tasks); | ||
32339 | + rt->check_resched(rt); | ||
32340 | +} | ||
32341 | + | ||
32342 | +/* add_release - add a real-time task to the rt release queue. | ||
32343 | + * @task: the sleeping task | ||
32344 | + */ | ||
32345 | +void __add_release(rt_domain_t* rt, struct task_struct *task) | ||
32346 | +{ | ||
32347 | + TRACE_TASK(task, "add_release(), rel=%llu\n", get_release(task)); | ||
32348 | + list_add(&tsk_rt(task)->list, &rt->tobe_released); | ||
32349 | + task->rt_param.domain = rt; | ||
32350 | + | ||
32351 | + /* start release timer */ | ||
32352 | + arm_release_timer(rt); | ||
32353 | +} | ||
32354 | + | ||
32355 | diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c | ||
32356 | new file mode 100644 | ||
32357 | index 0000000..d0767ce | ||
32358 | --- /dev/null | ||
32359 | +++ b/litmus/sched_cedf.c | ||
32360 | @@ -0,0 +1,727 @@ | ||
32361 | +/* | ||
32362 | + * kernel/sched_cedf.c | ||
32363 | + * | ||
32364 | + * Implementation of the Clustered EDF (C-EDF) scheduling algorithm. | ||
32365 | + * Linking is included so that support for synchronization (e.g., through | ||
32366 | + * the implementation of a "CSN-EDF" algorithm) can be added later if desired. | ||
32367 | + * | ||
32368 | + * This version uses the simple approach and serializes all scheduling | ||
32369 | + * decisions by the use of a queue lock. This is probably not the | ||
32370 | + * best way to do it, but it should suffice for now. | ||
32371 | + */ | ||
32372 | + | ||
32373 | +#include <linux/spinlock.h> | ||
32374 | +#include <linux/percpu.h> | ||
32375 | +#include <linux/sched.h> | ||
32376 | +#include <linux/list.h> | ||
32377 | + | ||
32378 | +#include <litmus/litmus.h> | ||
32379 | +#include <litmus/jobs.h> | ||
32380 | +#include <litmus/sched_plugin.h> | ||
32381 | +#include <litmus/edf_common.h> | ||
32382 | +#include <litmus/sched_trace.h> | ||
32383 | +#include <litmus/bheap.h> | ||
32384 | + | ||
32385 | +#include <linux/module.h> | ||
32386 | + | ||
32387 | +/* Overview of C-EDF operations. | ||
32388 | + * | ||
32389 | + * link_task_to_cpu(T, cpu) - Low-level operation to update the linkage | ||
32390 | + * structure (NOT the actually scheduled | ||
32391 | + * task). If there is another linked task To | ||
32392 | + * already it will set To->linked_on = NO_CPU | ||
32393 | + * (thereby removing its association with this | ||
32394 | + * CPU). However, it will not requeue the | ||
32395 | + * previously linked task (if any). It will set | ||
32396 | + * T's state to RT_F_RUNNING and check whether | ||
32397 | + * it is already running somewhere else. If T | ||
32398 | + * is scheduled somewhere else it will link | ||
32399 | + * it to that CPU instead (and pull the linked | ||
32400 | + * task to cpu). T may be NULL. | ||
32401 | + * | ||
32402 | + * unlink(T) - Unlink removes T from all scheduler data | ||
32403 | + * structures. If it is linked to some CPU it | ||
32404 | + * will link NULL to that CPU. If it is | ||
32405 | + * currently queued in the cedf queue for | ||
32406 | + * a partition, it will be removed from | ||
32407 | + * the rt_domain. It is safe to call | ||
32408 | + * unlink(T) if T is not linked. T may not | ||
32409 | + * be NULL. | ||
32410 | + * | ||
32411 | + * requeue(T) - Requeue will insert T into the appropriate | ||
32412 | + * queue. If the system is in real-time mode and | ||
32413 | + * the T is released already, it will go into the | ||
32414 | + * ready queue. If the system is not in | ||
32415 | + * real-time mode is T, then T will go into the | ||
32416 | + * release queue. If T's release time is in the | ||
32417 | + * future, it will go into the release | ||
32418 | + * queue. That means that T's release time/job | ||
32419 | + * no/etc. has to be updated before requeue(T) is | ||
32420 | + * called. It is not safe to call requeue(T) | ||
32421 | + * when T is already queued. T may not be NULL. | ||
32422 | + * | ||
32423 | + * cedf_job_arrival(T) - This is the catch-all function when T enters | ||
32424 | + * the system after either a suspension or at a | ||
32425 | + * job release. It will queue T (which means it | ||
32426 | + * is not safe to call cedf_job_arrival(T) if | ||
32427 | + * T is already queued) and then check whether a | ||
32428 | + * preemption is necessary. If a preemption is | ||
32429 | + * necessary it will update the linkage | ||
32430 | + * accordingly and cause scheduled to be called | ||
32431 | + * (either with an IPI or need_resched). It is | ||
32432 | + * safe to call cedf_job_arrival(T) if T's | ||
32433 | + * next job has not been actually released yet | ||
32434 | + * (release time in the future). T will be put | ||
32435 | + * on the release queue in that case. | ||
32436 | + * | ||
32437 | + * job_completion(T) - Take care of everything that needs to be done | ||
32438 | + * to prepare T for its next release and place | ||
32439 | + * it in the right queue with | ||
32440 | + * cedf_job_arrival(). | ||
32441 | + * | ||
32442 | + * | ||
32443 | + * When we now that T is linked to CPU then link_task_to_cpu(NULL, CPU) is | ||
32444 | + * equivalent to unlink(T). Note that if you unlink a task from a CPU none of | ||
32445 | + * the functions will automatically propagate pending task from the ready queue | ||
32446 | + * to a linked task. This is the job of the calling function ( by means of | ||
32447 | + * __take_ready). | ||
32448 | + */ | ||
32449 | + | ||
32450 | +/* cpu_entry_t - maintain the linked and scheduled state | ||
32451 | + */ | ||
32452 | +typedef struct { | ||
32453 | + int cpu; | ||
32454 | + struct task_struct* linked; /* only RT tasks */ | ||
32455 | + struct task_struct* scheduled; /* only RT tasks */ | ||
32456 | + struct list_head list; | ||
32457 | + atomic_t will_schedule; /* prevent unneeded IPIs */ | ||
32458 | +} cpu_entry_t; | ||
32459 | +DEFINE_PER_CPU(cpu_entry_t, cedf_cpu_entries); | ||
32460 | + | ||
32461 | +cpu_entry_t* *cedf_cpu_entries_array; | ||
32462 | + | ||
32463 | +#define set_will_schedule() \ | ||
32464 | + (atomic_set(&__get_cpu_var(cedf_cpu_entries).will_schedule, 1)) | ||
32465 | +#define clear_will_schedule() \ | ||
32466 | + (atomic_set(&__get_cpu_var(cedf_cpu_entries).will_schedule, 0)) | ||
32467 | +#define test_will_schedule(cpu) \ | ||
32468 | + (atomic_read(&per_cpu(cedf_cpu_entries, cpu).will_schedule)) | ||
32469 | + | ||
32470 | +/* Cluster size -- currently four. This is a variable to allow for | ||
32471 | + * the possibility of changing the cluster size online in the future. | ||
32472 | + */ | ||
32473 | +int cluster_size = 4; | ||
32474 | + | ||
32475 | +int do_cleanup = 1; | ||
32476 | + | ||
32477 | +typedef struct { | ||
32478 | + rt_domain_t domain; | ||
32479 | + int first_cpu; | ||
32480 | + int last_cpu; | ||
32481 | + | ||
32482 | + /* the cpus queue themselves according to priority in here */ | ||
32483 | + struct list_head cedf_cpu_queue; | ||
32484 | + | ||
32485 | + /* per-partition spinlock: protects the domain and | ||
32486 | + * serializes scheduling decisions | ||
32487 | + */ | ||
32488 | +#define slock domain.ready_lock | ||
32489 | +} cedf_domain_t; | ||
32490 | + | ||
32491 | +DEFINE_PER_CPU(cedf_domain_t*, cedf_domains) = NULL; | ||
32492 | + | ||
32493 | +cedf_domain_t* *cedf_domains_array; | ||
32494 | + | ||
32495 | + | ||
32496 | +/* These are defined similarly to partitioning, except that a | ||
32497 | + * tasks partition is any cpu of the cluster to which it | ||
32498 | + * is assigned, typically the lowest-numbered cpu. | ||
32499 | + */ | ||
32500 | +#define local_edf (&__get_cpu_var(cedf_domains)->domain) | ||
32501 | +#define local_cedf __get_cpu_var(cedf_domains) | ||
32502 | +#define remote_edf(cpu) (&per_cpu(cedf_domains, cpu)->domain) | ||
32503 | +#define remote_cedf(cpu) per_cpu(cedf_domains, cpu) | ||
32504 | +#define task_edf(task) remote_edf(get_partition(task)) | ||
32505 | +#define task_cedf(task) remote_cedf(get_partition(task)) | ||
32506 | + | ||
32507 | +/* update_cpu_position - Move the cpu entry to the correct place to maintain | ||
32508 | + * order in the cpu queue. Caller must hold cedf lock. | ||
32509 | + * | ||
32510 | + * This really should be a heap. | ||
32511 | + */ | ||
32512 | +static void update_cpu_position(cpu_entry_t *entry) | ||
32513 | +{ | ||
32514 | + cpu_entry_t *other; | ||
32515 | + struct list_head *cedf_cpu_queue = | ||
32516 | + &(remote_cedf(entry->cpu))->cedf_cpu_queue; | ||
32517 | + struct list_head *pos; | ||
32518 | + | ||
32519 | + BUG_ON(!cedf_cpu_queue); | ||
32520 | + | ||
32521 | + if (likely(in_list(&entry->list))) | ||
32522 | + list_del(&entry->list); | ||
32523 | + /* if we do not execute real-time jobs we just move | ||
32524 | + * to the end of the queue | ||
32525 | + */ | ||
32526 | + if (entry->linked) { | ||
32527 | + list_for_each(pos, cedf_cpu_queue) { | ||
32528 | + other = list_entry(pos, cpu_entry_t, list); | ||
32529 | + if (edf_higher_prio(entry->linked, other->linked)) { | ||
32530 | + __list_add(&entry->list, pos->prev, pos); | ||
32531 | + return; | ||
32532 | + } | ||
32533 | + } | ||
32534 | + } | ||
32535 | + /* if we get this far we have the lowest priority job */ | ||
32536 | + list_add_tail(&entry->list, cedf_cpu_queue); | ||
32537 | +} | ||
32538 | + | ||
32539 | +/* link_task_to_cpu - Update the link of a CPU. | ||
32540 | + * Handles the case where the to-be-linked task is already | ||
32541 | + * scheduled on a different CPU. | ||
32542 | + */ | ||
32543 | +static noinline void link_task_to_cpu(struct task_struct* linked, | ||
32544 | + cpu_entry_t *entry) | ||
32545 | +{ | ||
32546 | + cpu_entry_t *sched; | ||
32547 | + struct task_struct* tmp; | ||
32548 | + int on_cpu; | ||
32549 | + | ||
32550 | + BUG_ON(linked && !is_realtime(linked)); | ||
32551 | + | ||
32552 | + /* Cannot link task to a CPU that doesn't belong to its partition... */ | ||
32553 | + BUG_ON(linked && remote_cedf(entry->cpu) != task_cedf(linked)); | ||
32554 | + | ||
32555 | + /* Currently linked task is set to be unlinked. */ | ||
32556 | + if (entry->linked) { | ||
32557 | + entry->linked->rt_param.linked_on = NO_CPU; | ||
32558 | + } | ||
32559 | + | ||
32560 | + /* Link new task to CPU. */ | ||
32561 | + if (linked) { | ||
32562 | + set_rt_flags(linked, RT_F_RUNNING); | ||
32563 | + /* handle task is already scheduled somewhere! */ | ||
32564 | + on_cpu = linked->rt_param.scheduled_on; | ||
32565 | + if (on_cpu != NO_CPU) { | ||
32566 | + sched = &per_cpu(cedf_cpu_entries, on_cpu); | ||
32567 | + /* this should only happen if not linked already */ | ||
32568 | + BUG_ON(sched->linked == linked); | ||
32569 | + | ||
32570 | + /* If we are already scheduled on the CPU to which we | ||
32571 | + * wanted to link, we don't need to do the swap -- | ||
32572 | + * we just link ourselves to the CPU and depend on | ||
32573 | + * the caller to get things right. | ||
32574 | + */ | ||
32575 | + if (entry != sched) { | ||
32576 | + tmp = sched->linked; | ||
32577 | + linked->rt_param.linked_on = sched->cpu; | ||
32578 | + sched->linked = linked; | ||
32579 | + update_cpu_position(sched); | ||
32580 | + linked = tmp; | ||
32581 | + } | ||
32582 | + } | ||
32583 | + if (linked) /* might be NULL due to swap */ | ||
32584 | + linked->rt_param.linked_on = entry->cpu; | ||
32585 | + } | ||
32586 | + entry->linked = linked; | ||
32587 | + | ||
32588 | + if (entry->linked) | ||
32589 | + TRACE_TASK(entry->linked, "linked to CPU %d, state:%d\n", | ||
32590 | + entry->cpu, entry->linked->state); | ||
32591 | + else | ||
32592 | + TRACE("NULL linked to CPU %d\n", entry->cpu); | ||
32593 | + | ||
32594 | + update_cpu_position(entry); | ||
32595 | +} | ||
32596 | + | ||
32597 | +/* unlink - Make sure a task is not linked any longer to an entry | ||
32598 | + * where it was linked before. Must hold cedf_lock. | ||
32599 | + */ | ||
32600 | +static noinline void unlink(struct task_struct* t) | ||
32601 | +{ | ||
32602 | + cpu_entry_t *entry; | ||
32603 | + | ||
32604 | + if (unlikely(!t)) { | ||
32605 | + TRACE_BUG_ON(!t); | ||
32606 | + return; | ||
32607 | + } | ||
32608 | + | ||
32609 | + if (t->rt_param.linked_on != NO_CPU) { | ||
32610 | + /* unlink */ | ||
32611 | + entry = &per_cpu(cedf_cpu_entries, t->rt_param.linked_on); | ||
32612 | + t->rt_param.linked_on = NO_CPU; | ||
32613 | + link_task_to_cpu(NULL, entry); | ||
32614 | + } else if (is_queued(t)) { | ||
32615 | + /* This is an interesting situation: t is scheduled, | ||
32616 | + * but was just recently unlinked. It cannot be | ||
32617 | + * linked anywhere else (because then it would have | ||
32618 | + * been relinked to this CPU), thus it must be in some | ||
32619 | + * queue. We must remove it from the list in this | ||
32620 | + * case. | ||
32621 | + */ | ||
32622 | + remove(task_edf(t), t); | ||
32623 | + } | ||
32624 | +} | ||
32625 | + | ||
32626 | + | ||
32627 | +/* preempt - force a CPU to reschedule | ||
32628 | + */ | ||
32629 | +static noinline void preempt(cpu_entry_t *entry) | ||
32630 | +{ | ||
32631 | + preempt_if_preemptable(entry->scheduled, entry->cpu); | ||
32632 | +} | ||
32633 | + | ||
32634 | +/* requeue - Put an unlinked task into c-edf domain. | ||
32635 | + * Caller must hold cedf_lock. | ||
32636 | + */ | ||
32637 | +static noinline void requeue(struct task_struct* task) | ||
32638 | +{ | ||
32639 | + cedf_domain_t* cedf; | ||
32640 | + rt_domain_t* edf; | ||
32641 | + | ||
32642 | + BUG_ON(!task); | ||
32643 | + /* sanity check rt_list before insertion */ | ||
32644 | + BUG_ON(is_queued(task)); | ||
32645 | + | ||
32646 | + /* Get correct real-time domain. */ | ||
32647 | + cedf = task_cedf(task); | ||
32648 | + edf = &cedf->domain; | ||
32649 | + | ||
32650 | + if (is_released(task, litmus_clock())) | ||
32651 | + __add_ready(edf, task); | ||
32652 | + else { | ||
32653 | + /* it has got to wait */ | ||
32654 | + add_release(edf, task); | ||
32655 | + } | ||
32656 | +} | ||
32657 | + | ||
32658 | +static void check_for_preemptions(cedf_domain_t* cedf) | ||
32659 | +{ | ||
32660 | + cpu_entry_t *last; | ||
32661 | + struct task_struct *task; | ||
32662 | + struct list_head *cedf_cpu_queue; | ||
32663 | + cedf_cpu_queue = &cedf->cedf_cpu_queue; | ||
32664 | + | ||
32665 | + for(last = list_entry(cedf_cpu_queue->prev, cpu_entry_t, list); | ||
32666 | + edf_preemption_needed(&cedf->domain, last->linked); | ||
32667 | + last = list_entry(cedf_cpu_queue->prev, cpu_entry_t, list)) { | ||
32668 | + /* preemption necessary */ | ||
32669 | + task = __take_ready(&cedf->domain); | ||
32670 | + TRACE("check_for_preemptions: task %d linked to %d, state:%d\n", | ||
32671 | + task->pid, last->cpu, task->state); | ||
32672 | + if (last->linked) | ||
32673 | + requeue(last->linked); | ||
32674 | + link_task_to_cpu(task, last); | ||
32675 | + preempt(last); | ||
32676 | + } | ||
32677 | + | ||
32678 | +} | ||
32679 | + | ||
32680 | +/* cedf_job_arrival: task is either resumed or released */ | ||
32681 | +static noinline void cedf_job_arrival(struct task_struct* task) | ||
32682 | +{ | ||
32683 | + cedf_domain_t* cedf; | ||
32684 | + rt_domain_t* edf; | ||
32685 | + | ||
32686 | + BUG_ON(!task); | ||
32687 | + | ||
32688 | + /* Get correct real-time domain. */ | ||
32689 | + cedf = task_cedf(task); | ||
32690 | + edf = &cedf->domain; | ||
32691 | + | ||
32692 | + /* first queue arriving job */ | ||
32693 | + requeue(task); | ||
32694 | + | ||
32695 | + /* then check for any necessary preemptions */ | ||
32696 | + check_for_preemptions(cedf); | ||
32697 | +} | ||
32698 | + | ||
32699 | +/* check for current job releases */ | ||
32700 | +static void cedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) | ||
32701 | +{ | ||
32702 | + cedf_domain_t* cedf = container_of(rt, cedf_domain_t, domain); | ||
32703 | + unsigned long flags; | ||
32704 | + | ||
32705 | + spin_lock_irqsave(&cedf->slock, flags); | ||
32706 | + | ||
32707 | + __merge_ready(&cedf->domain, tasks); | ||
32708 | + check_for_preemptions(cedf); | ||
32709 | + spin_unlock_irqrestore(&cedf->slock, flags); | ||
32710 | +} | ||
32711 | + | ||
32712 | +/* cedf_tick - this function is called for every local timer | ||
32713 | + * interrupt. | ||
32714 | + * | ||
32715 | + * checks whether the current task has expired and checks | ||
32716 | + * whether we need to preempt it if it has not expired | ||
32717 | + */ | ||
32718 | +static void cedf_tick(struct task_struct* t) | ||
32719 | +{ | ||
32720 | + BUG_ON(!t); | ||
32721 | + | ||
32722 | + if (is_realtime(t) && budget_exhausted(t)) { | ||
32723 | + if (!is_np(t)) { | ||
32724 | + /* np tasks will be preempted when they become | ||
32725 | + * preemptable again | ||
32726 | + */ | ||
32727 | + set_tsk_need_resched(t); | ||
32728 | + set_will_schedule(); | ||
32729 | + TRACE("cedf_scheduler_tick: " | ||
32730 | + "%d is preemptable (state:%d) " | ||
32731 | + " => FORCE_RESCHED\n", t->pid, t->state); | ||
32732 | + } else if(is_user_np(t)) { | ||
32733 | + TRACE("cedf_scheduler_tick: " | ||
32734 | + "%d is non-preemptable (state:%d), " | ||
32735 | + "preemption delayed.\n", t->pid, t->state); | ||
32736 | + request_exit_np(t); | ||
32737 | + } | ||
32738 | + } | ||
32739 | +} | ||
32740 | + | ||
32741 | +/* caller holds cedf_lock */ | ||
32742 | +static noinline void job_completion(struct task_struct *t, int forced) | ||
32743 | +{ | ||
32744 | + BUG_ON(!t); | ||
32745 | + | ||
32746 | + sched_trace_task_completion(t, forced); | ||
32747 | + | ||
32748 | + TRACE_TASK(t, "job_completion(). [state:%d]\n", t->state); | ||
32749 | + | ||
32750 | + /* set flags */ | ||
32751 | + set_rt_flags(t, RT_F_SLEEP); | ||
32752 | + /* prepare for next period */ | ||
32753 | + prepare_for_next_period(t); | ||
32754 | + /* unlink */ | ||
32755 | + unlink(t); | ||
32756 | + /* requeue | ||
32757 | + * But don't requeue a blocking task. */ | ||
32758 | + if (is_running(t)) | ||
32759 | + cedf_job_arrival(t); | ||
32760 | +} | ||
32761 | + | ||
32762 | +/* Getting schedule() right is a bit tricky. schedule() may not make any | ||
32763 | + * assumptions on the state of the current task since it may be called for a | ||
32764 | + * number of reasons. The reasons include a scheduler_tick() determined that it | ||
32765 | + * was necessary, because sys_exit_np() was called, because some Linux | ||
32766 | + * subsystem determined so, or even (in the worst case) because there is a bug | ||
32767 | + * hidden somewhere. Thus, we must take extreme care to determine what the | ||
32768 | + * current state is. | ||
32769 | + * | ||
32770 | + * The CPU could currently be scheduling a task (or not), be linked (or not). | ||
32771 | + * | ||
32772 | + * The following assertions for the scheduled task could hold: | ||
32773 | + * | ||
32774 | + * - !is_running(scheduled) // the job blocks | ||
32775 | + * - scheduled->timeslice == 0 // the job completed (forcefully) | ||
32776 | + * - get_rt_flag() == RT_F_SLEEP // the job completed (by syscall) | ||
32777 | + * - linked != scheduled // we need to reschedule (for any reason) | ||
32778 | + * - is_np(scheduled) // rescheduling must be delayed, | ||
32779 | + * sys_exit_np must be requested | ||
32780 | + * | ||
32781 | + * Any of these can occur together. | ||
32782 | + */ | ||
32783 | +static struct task_struct* cedf_schedule(struct task_struct * prev) | ||
32784 | +{ | ||
32785 | + cedf_domain_t* cedf = local_cedf; | ||
32786 | + rt_domain_t* edf = &cedf->domain; | ||
32787 | + cpu_entry_t* entry = &__get_cpu_var(cedf_cpu_entries); | ||
32788 | + int out_of_time, sleep, preempt, np, | ||
32789 | + exists, blocks; | ||
32790 | + struct task_struct* next = NULL; | ||
32791 | + | ||
32792 | + BUG_ON(!prev); | ||
32793 | + BUG_ON(!cedf); | ||
32794 | + BUG_ON(!edf); | ||
32795 | + BUG_ON(!entry); | ||
32796 | + BUG_ON(cedf != remote_cedf(entry->cpu)); | ||
32797 | + BUG_ON(is_realtime(prev) && cedf != task_cedf(prev)); | ||
32798 | + | ||
32799 | + /* Will be released in finish_switch. */ | ||
32800 | + spin_lock(&cedf->slock); | ||
32801 | + clear_will_schedule(); | ||
32802 | + | ||
32803 | + /* sanity checking */ | ||
32804 | + BUG_ON(entry->scheduled && entry->scheduled != prev); | ||
32805 | + BUG_ON(entry->scheduled && !is_realtime(prev)); | ||
32806 | + BUG_ON(is_realtime(prev) && !entry->scheduled); | ||
32807 | + | ||
32808 | + /* (0) Determine state */ | ||
32809 | + exists = entry->scheduled != NULL; | ||
32810 | + blocks = exists && !is_running(entry->scheduled); | ||
32811 | + out_of_time = exists && budget_exhausted(entry->scheduled); | ||
32812 | + np = exists && is_np(entry->scheduled); | ||
32813 | + sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; | ||
32814 | + preempt = entry->scheduled != entry->linked; | ||
32815 | + | ||
32816 | + /* If a task blocks we have no choice but to reschedule. | ||
32817 | + */ | ||
32818 | + if (blocks) | ||
32819 | + unlink(entry->scheduled); | ||
32820 | + | ||
32821 | + /* Request a sys_exit_np() call if we would like to preempt but cannot. | ||
32822 | + * We need to make sure to update the link structure anyway in case | ||
32823 | + * that we are still linked. Multiple calls to request_exit_np() don't | ||
32824 | + * hurt. | ||
32825 | + */ | ||
32826 | + if (np && (out_of_time || preempt || sleep)) { | ||
32827 | + unlink(entry->scheduled); | ||
32828 | + request_exit_np(entry->scheduled); | ||
32829 | + } | ||
32830 | + | ||
32831 | + /* Any task that is preemptable and either exhausts its execution | ||
32832 | + * budget or wants to sleep completes. We may have to reschedule after | ||
32833 | + * this. Don't do a job completion if blocks (can't have timers | ||
32834 | + * running for blocked jobs). Preemption go first for the same reason. | ||
32835 | + */ | ||
32836 | + if (!np && (out_of_time || sleep) && !blocks && !preempt) | ||
32837 | + job_completion(entry->scheduled, !sleep); | ||
32838 | + | ||
32839 | + /* Link pending task if we became unlinked. | ||
32840 | + */ | ||
32841 | + if (!entry->linked) | ||
32842 | + link_task_to_cpu(__take_ready(edf), entry); | ||
32843 | + | ||
32844 | + /* The final scheduling decision. Do we need to switch for some reason? | ||
32845 | + * If linked different from scheduled select linked as next. | ||
32846 | + */ | ||
32847 | + if ((!np || blocks) && | ||
32848 | + entry->linked != entry->scheduled) { | ||
32849 | + /* Schedule a linked job? */ | ||
32850 | + if (entry->linked) { | ||
32851 | + entry->linked->rt_param.scheduled_on = entry->cpu; | ||
32852 | + next = entry->linked; | ||
32853 | + } | ||
32854 | + if (entry->scheduled) { | ||
32855 | + /* not gonna be scheduled soon */ | ||
32856 | + entry->scheduled->rt_param.scheduled_on = NO_CPU; | ||
32857 | + TRACE_TASK(entry->scheduled, "cedf_schedule: scheduled_on = NO_CPU\n"); | ||
32858 | + } | ||
32859 | + } else | ||
32860 | + /* Only override Linux scheduler if we have real-time task | ||
32861 | + * scheduled that needs to continue. | ||
32862 | + */ | ||
32863 | + if (exists) | ||
32864 | + next = prev; | ||
32865 | + | ||
32866 | + spin_unlock(&cedf->slock); | ||
32867 | + | ||
32868 | + return next; | ||
32869 | +} | ||
32870 | + | ||
32871 | +/* _finish_switch - we just finished the switch away from prev | ||
32872 | + */ | ||
32873 | +static void cedf_finish_switch(struct task_struct *prev) | ||
32874 | +{ | ||
32875 | + cpu_entry_t* entry = &__get_cpu_var(cedf_cpu_entries); | ||
32876 | + | ||
32877 | + BUG_ON(!prev); | ||
32878 | + BUG_ON(!entry); | ||
32879 | + | ||
32880 | + entry->scheduled = is_realtime(current) ? current : NULL; | ||
32881 | +} | ||
32882 | + | ||
32883 | +/* Prepare a task for running in RT mode | ||
32884 | + */ | ||
32885 | +static void cedf_task_new(struct task_struct *t, int on_rq, int running) | ||
32886 | +{ | ||
32887 | + unsigned long flags; | ||
32888 | + cedf_domain_t* cedf = task_cedf(t); | ||
32889 | + cpu_entry_t* entry; | ||
32890 | + | ||
32891 | + BUG_ON(!cedf); | ||
32892 | + | ||
32893 | + spin_lock_irqsave(&cedf->slock, flags); | ||
32894 | + if (running) { | ||
32895 | + entry = &per_cpu(cedf_cpu_entries, task_cpu(t)); | ||
32896 | + BUG_ON(!entry); | ||
32897 | + BUG_ON(entry->scheduled); | ||
32898 | + entry->scheduled = t; | ||
32899 | + t->rt_param.scheduled_on = task_cpu(t); | ||
32900 | + } else | ||
32901 | + t->rt_param.scheduled_on = NO_CPU; | ||
32902 | + t->rt_param.linked_on = NO_CPU; | ||
32903 | + | ||
32904 | + /* setup job params */ | ||
32905 | + release_at(t, litmus_clock()); | ||
32906 | + | ||
32907 | + cedf_job_arrival(t); | ||
32908 | + spin_unlock_irqrestore(&cedf->slock, flags); | ||
32909 | +} | ||
32910 | + | ||
32911 | + | ||
32912 | +static void cedf_task_wake_up(struct task_struct *task) | ||
32913 | +{ | ||
32914 | + unsigned long flags; | ||
32915 | + cedf_domain_t* cedf; | ||
32916 | + lt_t now; | ||
32917 | + | ||
32918 | + BUG_ON(!task); | ||
32919 | + | ||
32920 | + cedf = task_cedf(task); | ||
32921 | + BUG_ON(!cedf); | ||
32922 | + | ||
32923 | + spin_lock_irqsave(&cedf->slock, flags); | ||
32924 | + /* We need to take suspensions because of semaphores into | ||
32925 | + * account! If a job resumes after being suspended due to acquiring | ||
32926 | + * a semaphore, it should never be treated as a new job release. | ||
32927 | + */ | ||
32928 | + if (get_rt_flags(task) == RT_F_EXIT_SEM) { | ||
32929 | + set_rt_flags(task, RT_F_RUNNING); | ||
32930 | + } else { | ||
32931 | + now = litmus_clock(); | ||
32932 | + if (is_tardy(task, now)) { | ||
32933 | + /* new sporadic release */ | ||
32934 | + release_at(task, now); | ||
32935 | + sched_trace_task_release(task); | ||
32936 | + } | ||
32937 | + else if (task->rt.time_slice) | ||
32938 | + /* came back in time before deadline | ||
32939 | + */ | ||
32940 | + set_rt_flags(task, RT_F_RUNNING); | ||
32941 | + } | ||
32942 | + cedf_job_arrival(task); | ||
32943 | + spin_unlock_irqrestore(&cedf->slock, flags); | ||
32944 | +} | ||
32945 | + | ||
32946 | + | ||
32947 | +static void cedf_task_block(struct task_struct *t) | ||
32948 | +{ | ||
32949 | + unsigned long flags; | ||
32950 | + | ||
32951 | + BUG_ON(!t); | ||
32952 | + | ||
32953 | + /* unlink if necessary */ | ||
32954 | + spin_lock_irqsave(&task_cedf(t)->slock, flags); | ||
32955 | + | ||
32956 | + t->rt_param.scheduled_on = NO_CPU; | ||
32957 | + unlink(t); | ||
32958 | + | ||
32959 | + spin_unlock_irqrestore(&task_cedf(t)->slock, flags); | ||
32960 | + | ||
32961 | + BUG_ON(!is_realtime(t)); | ||
32962 | +} | ||
32963 | + | ||
32964 | +static void cedf_task_exit(struct task_struct * t) | ||
32965 | +{ | ||
32966 | + unsigned long flags; | ||
32967 | + | ||
32968 | + BUG_ON(!t); | ||
32969 | + | ||
32970 | + /* unlink if necessary */ | ||
32971 | + spin_lock_irqsave(&task_cedf(t)->slock, flags); | ||
32972 | + unlink(t); | ||
32973 | + if (tsk_rt(t)->scheduled_on != NO_CPU) { | ||
32974 | + cedf_cpu_entries_array[tsk_rt(t)->scheduled_on]-> | ||
32975 | + scheduled = NULL; | ||
32976 | + tsk_rt(t)->scheduled_on = NO_CPU; | ||
32977 | + } | ||
32978 | + spin_unlock_irqrestore(&task_cedf(t)->slock, flags); | ||
32979 | + | ||
32980 | + BUG_ON(!is_realtime(t)); | ||
32981 | + TRACE_TASK(t, "RIP\n"); | ||
32982 | +} | ||
32983 | + | ||
32984 | +static long cedf_admit_task(struct task_struct* tsk) | ||
32985 | +{ | ||
32986 | + return (task_cpu(tsk) >= task_cedf(tsk)->first_cpu && | ||
32987 | + task_cpu(tsk) <= task_cedf(tsk)->last_cpu) ? 0 : -EINVAL; | ||
32988 | +} | ||
32989 | + | ||
32990 | + | ||
32991 | +/* Plugin object */ | ||
32992 | +static struct sched_plugin cedf_plugin __cacheline_aligned_in_smp = { | ||
32993 | + .plugin_name = "C-EDF", | ||
32994 | + .finish_switch = cedf_finish_switch, | ||
32995 | + .tick = cedf_tick, | ||
32996 | + .task_new = cedf_task_new, | ||
32997 | + .complete_job = complete_job, | ||
32998 | + .task_exit = cedf_task_exit, | ||
32999 | + .schedule = cedf_schedule, | ||
33000 | + .task_wake_up = cedf_task_wake_up, | ||
33001 | + .task_block = cedf_task_block, | ||
33002 | + .admit_task = cedf_admit_task | ||
33003 | +}; | ||
33004 | + | ||
33005 | +static void cedf_domain_init(int first_cpu, int last_cpu) | ||
33006 | +{ | ||
33007 | + int cpu; | ||
33008 | + | ||
33009 | + /* Create new domain for this cluster. */ | ||
33010 | + cedf_domain_t *new_cedf_domain = kmalloc(sizeof(*new_cedf_domain), | ||
33011 | + GFP_KERNEL); | ||
33012 | + | ||
33013 | + /* Initialize cluster domain. */ | ||
33014 | + edf_domain_init(&new_cedf_domain->domain, NULL, | ||
33015 | + cedf_release_jobs); | ||
33016 | + new_cedf_domain->first_cpu = first_cpu; | ||
33017 | + new_cedf_domain->last_cpu = last_cpu; | ||
33018 | + INIT_LIST_HEAD(&new_cedf_domain->cedf_cpu_queue); | ||
33019 | + | ||
33020 | + /* Assign all cpus in cluster to point to this domain. */ | ||
33021 | + for (cpu = first_cpu; cpu <= last_cpu; cpu++) { | ||
33022 | + remote_cedf(cpu) = new_cedf_domain; | ||
33023 | + cedf_domains_array[cpu] = new_cedf_domain; | ||
33024 | + } | ||
33025 | +} | ||
33026 | + | ||
33027 | +static int __init init_cedf(void) | ||
33028 | +{ | ||
33029 | + int cpu; | ||
33030 | + cpu_entry_t *entry; | ||
33031 | + | ||
33032 | + /* num_online_cpus() should have been set already | ||
33033 | + * if the number of available cpus is less then the cluster | ||
33034 | + * size (currently 4) then it is pointless trying to use | ||
33035 | + * CEDF, so we disable this plugin | ||
33036 | + */ | ||
33037 | + if(num_online_cpus() < cluster_size) { | ||
33038 | + printk(KERN_INFO "Not registering C-EDF plugin: " | ||
33039 | + "Num Online Cpus (%d) < Min Cluster Size (%d)\n", | ||
33040 | + num_online_cpus(), cluster_size); | ||
33041 | + do_cleanup = 0; | ||
33042 | + return 0; | ||
33043 | + } | ||
33044 | + | ||
33045 | + /* | ||
33046 | + * initialize short_cut for per-cpu cedf state; | ||
33047 | + * there may be a problem here if someone removes a cpu | ||
33048 | + * while we are doing this initialization... and if cpus | ||
33049 | + * are added / removed later... is it a _real_ problem for cedf? | ||
33050 | + */ | ||
33051 | + cedf_cpu_entries_array = kmalloc( | ||
33052 | + sizeof(cpu_entry_t *) * num_online_cpus(), | ||
33053 | + GFP_KERNEL); | ||
33054 | + | ||
33055 | + cedf_domains_array = kmalloc( | ||
33056 | + sizeof(cedf_domain_t *) * num_online_cpus(), | ||
33057 | + GFP_KERNEL); | ||
33058 | + | ||
33059 | + /* initialize CPU state */ | ||
33060 | + for (cpu = 0; cpu < num_online_cpus(); cpu++) { | ||
33061 | + entry = &per_cpu(cedf_cpu_entries, cpu); | ||
33062 | + cedf_cpu_entries_array[cpu] = entry; | ||
33063 | + atomic_set(&entry->will_schedule, 0); | ||
33064 | + entry->linked = NULL; | ||
33065 | + entry->scheduled = NULL; | ||
33066 | + entry->cpu = cpu; | ||
33067 | + INIT_LIST_HEAD(&entry->list); | ||
33068 | + } | ||
33069 | + | ||
33070 | + /* initialize all cluster domains */ | ||
33071 | + for (cpu = 0; cpu < num_online_cpus(); cpu += cluster_size) | ||
33072 | + cedf_domain_init(cpu, cpu+cluster_size-1); | ||
33073 | + | ||
33074 | + return register_sched_plugin(&cedf_plugin); | ||
33075 | +} | ||
33076 | + | ||
33077 | +static void clean_cedf(void) | ||
33078 | +{ | ||
33079 | + if(do_cleanup) { | ||
33080 | + kfree(cedf_cpu_entries_array); | ||
33081 | + kfree(cedf_domains_array); | ||
33082 | + } | ||
33083 | +} | ||
33084 | + | ||
33085 | +module_init(init_cedf); | ||
33086 | +module_exit(clean_cedf); | ||
33087 | + | ||
33088 | diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c | ||
33089 | new file mode 100644 | ||
33090 | index 0000000..b9310dd | ||
33091 | --- /dev/null | ||
33092 | +++ b/litmus/sched_gsn_edf.c | ||
33093 | @@ -0,0 +1,828 @@ | ||
33094 | +/* | ||
33095 | + * litmus/sched_gsn_edf.c | ||
33096 | + * | ||
33097 | + * Implementation of the GSN-EDF scheduling algorithm. | ||
33098 | + * | ||
33099 | + * This version uses the simple approach and serializes all scheduling | ||
33100 | + * decisions by the use of a queue lock. This is probably not the | ||
33101 | + * best way to do it, but it should suffice for now. | ||
33102 | + */ | ||
33103 | + | ||
33104 | +#include <linux/spinlock.h> | ||
33105 | +#include <linux/percpu.h> | ||
33106 | +#include <linux/sched.h> | ||
33107 | + | ||
33108 | +#include <litmus/litmus.h> | ||
33109 | +#include <litmus/jobs.h> | ||
33110 | +#include <litmus/sched_plugin.h> | ||
33111 | +#include <litmus/edf_common.h> | ||
33112 | +#include <litmus/sched_trace.h> | ||
33113 | + | ||
33114 | +#include <litmus/bheap.h> | ||
33115 | + | ||
33116 | +#include <linux/module.h> | ||
33117 | + | ||
33118 | +/* Overview of GSN-EDF operations. | ||
33119 | + * | ||
33120 | + * For a detailed explanation of GSN-EDF have a look at the FMLP paper. This | ||
33121 | + * description only covers how the individual operations are implemented in | ||
33122 | + * LITMUS. | ||
33123 | + * | ||
33124 | + * link_task_to_cpu(T, cpu) - Low-level operation to update the linkage | ||
33125 | + * structure (NOT the actually scheduled | ||
33126 | + * task). If there is another linked task To | ||
33127 | + * already it will set To->linked_on = NO_CPU | ||
33128 | + * (thereby removing its association with this | ||
33129 | + * CPU). However, it will not requeue the | ||
33130 | + * previously linked task (if any). It will set | ||
33131 | + * T's state to RT_F_RUNNING and check whether | ||
33132 | + * it is already running somewhere else. If T | ||
33133 | + * is scheduled somewhere else it will link | ||
33134 | + * it to that CPU instead (and pull the linked | ||
33135 | + * task to cpu). T may be NULL. | ||
33136 | + * | ||
33137 | + * unlink(T) - Unlink removes T from all scheduler data | ||
33138 | + * structures. If it is linked to some CPU it | ||
33139 | + * will link NULL to that CPU. If it is | ||
33140 | + * currently queued in the gsnedf queue it will | ||
33141 | + * be removed from the rt_domain. It is safe to | ||
33142 | + * call unlink(T) if T is not linked. T may not | ||
33143 | + * be NULL. | ||
33144 | + * | ||
33145 | + * requeue(T) - Requeue will insert T into the appropriate | ||
33146 | + * queue. If the system is in real-time mode and | ||
33147 | + * the T is released already, it will go into the | ||
33148 | + * ready queue. If the system is not in | ||
33149 | + * real-time mode is T, then T will go into the | ||
33150 | + * release queue. If T's release time is in the | ||
33151 | + * future, it will go into the release | ||
33152 | + * queue. That means that T's release time/job | ||
33153 | + * no/etc. has to be updated before requeu(T) is | ||
33154 | + * called. It is not safe to call requeue(T) | ||
33155 | + * when T is already queued. T may not be NULL. | ||
33156 | + * | ||
33157 | + * gsnedf_job_arrival(T) - This is the catch all function when T enters | ||
33158 | + * the system after either a suspension or at a | ||
33159 | + * job release. It will queue T (which means it | ||
33160 | + * is not safe to call gsnedf_job_arrival(T) if | ||
33161 | + * T is already queued) and then check whether a | ||
33162 | + * preemption is necessary. If a preemption is | ||
33163 | + * necessary it will update the linkage | ||
33164 | + * accordingly and cause scheduled to be called | ||
33165 | + * (either with an IPI or need_resched). It is | ||
33166 | + * safe to call gsnedf_job_arrival(T) if T's | ||
33167 | + * next job has not been actually released yet | ||
33168 | + * (releast time in the future). T will be put | ||
33169 | + * on the release queue in that case. | ||
33170 | + * | ||
33171 | + * job_completion(T) - Take care of everything that needs to be done | ||
33172 | + * to prepare T for its next release and place | ||
33173 | + * it in the right queue with | ||
33174 | + * gsnedf_job_arrival(). | ||
33175 | + * | ||
33176 | + * | ||
33177 | + * When we now that T is linked to CPU then link_task_to_cpu(NULL, CPU) is | ||
33178 | + * equivalent to unlink(T). Note that if you unlink a task from a CPU none of | ||
33179 | + * the functions will automatically propagate pending task from the ready queue | ||
33180 | + * to a linked task. This is the job of the calling function ( by means of | ||
33181 | + * __take_ready). | ||
33182 | + */ | ||
33183 | + | ||
33184 | + | ||
33185 | +/* cpu_entry_t - maintain the linked and scheduled state | ||
33186 | + */ | ||
33187 | +typedef struct { | ||
33188 | + int cpu; | ||
33189 | + struct task_struct* linked; /* only RT tasks */ | ||
33190 | + struct task_struct* scheduled; /* only RT tasks */ | ||
33191 | + atomic_t will_schedule; /* prevent unneeded IPIs */ | ||
33192 | + struct bheap_node* hn; | ||
33193 | +} cpu_entry_t; | ||
33194 | +DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries); | ||
33195 | + | ||
33196 | +cpu_entry_t* gsnedf_cpus[NR_CPUS]; | ||
33197 | + | ||
33198 | +#define set_will_schedule() \ | ||
33199 | + (atomic_set(&__get_cpu_var(gsnedf_cpu_entries).will_schedule, 1)) | ||
33200 | +#define clear_will_schedule() \ | ||
33201 | + (atomic_set(&__get_cpu_var(gsnedf_cpu_entries).will_schedule, 0)) | ||
33202 | +#define test_will_schedule(cpu) \ | ||
33203 | + (atomic_read(&per_cpu(gsnedf_cpu_entries, cpu).will_schedule)) | ||
33204 | + | ||
33205 | + | ||
33206 | +/* the cpus queue themselves according to priority in here */ | ||
33207 | +static struct bheap_node gsnedf_heap_node[NR_CPUS]; | ||
33208 | +static struct bheap gsnedf_cpu_heap; | ||
33209 | + | ||
33210 | +static rt_domain_t gsnedf; | ||
33211 | +#define gsnedf_lock (gsnedf.ready_lock) | ||
33212 | + | ||
33213 | + | ||
33214 | +/* Uncomment this if you want to see all scheduling decisions in the | ||
33215 | + * TRACE() log. | ||
33216 | +#define WANT_ALL_SCHED_EVENTS | ||
33217 | + */ | ||
33218 | + | ||
33219 | +static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) | ||
33220 | +{ | ||
33221 | + cpu_entry_t *a, *b; | ||
33222 | + a = _a->value; | ||
33223 | + b = _b->value; | ||
33224 | + /* Note that a and b are inverted: we want the lowest-priority CPU at | ||
33225 | + * the top of the heap. | ||
33226 | + */ | ||
33227 | + return edf_higher_prio(b->linked, a->linked); | ||
33228 | +} | ||
33229 | + | ||
33230 | +/* update_cpu_position - Move the cpu entry to the correct place to maintain | ||
33231 | + * order in the cpu queue. Caller must hold gsnedf lock. | ||
33232 | + */ | ||
33233 | +static void update_cpu_position(cpu_entry_t *entry) | ||
33234 | +{ | ||
33235 | + if (likely(bheap_node_in_heap(entry->hn))) | ||
33236 | + bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn); | ||
33237 | + bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn); | ||
33238 | +} | ||
33239 | + | ||
33240 | +/* caller must hold gsnedf lock */ | ||
33241 | +static cpu_entry_t* lowest_prio_cpu(void) | ||
33242 | +{ | ||
33243 | + struct bheap_node* hn; | ||
33244 | + hn = bheap_peek(cpu_lower_prio, &gsnedf_cpu_heap); | ||
33245 | + return hn->value; | ||
33246 | +} | ||
33247 | + | ||
33248 | + | ||
33249 | +/* link_task_to_cpu - Update the link of a CPU. | ||
33250 | + * Handles the case where the to-be-linked task is already | ||
33251 | + * scheduled on a different CPU. | ||
33252 | + */ | ||
33253 | +static noinline void link_task_to_cpu(struct task_struct* linked, | ||
33254 | + cpu_entry_t *entry) | ||
33255 | +{ | ||
33256 | + cpu_entry_t *sched; | ||
33257 | + struct task_struct* tmp; | ||
33258 | + int on_cpu; | ||
33259 | + | ||
33260 | + BUG_ON(linked && !is_realtime(linked)); | ||
33261 | + | ||
33262 | + /* Currently linked task is set to be unlinked. */ | ||
33263 | + if (entry->linked) { | ||
33264 | + entry->linked->rt_param.linked_on = NO_CPU; | ||
33265 | + } | ||
33266 | + | ||
33267 | + /* Link new task to CPU. */ | ||
33268 | + if (linked) { | ||
33269 | + set_rt_flags(linked, RT_F_RUNNING); | ||
33270 | + /* handle task is already scheduled somewhere! */ | ||
33271 | + on_cpu = linked->rt_param.scheduled_on; | ||
33272 | + if (on_cpu != NO_CPU) { | ||
33273 | + sched = &per_cpu(gsnedf_cpu_entries, on_cpu); | ||
33274 | + /* this should only happen if not linked already */ | ||
33275 | + BUG_ON(sched->linked == linked); | ||
33276 | + | ||
33277 | + /* If we are already scheduled on the CPU to which we | ||
33278 | + * wanted to link, we don't need to do the swap -- | ||
33279 | + * we just link ourselves to the CPU and depend on | ||
33280 | + * the caller to get things right. | ||
33281 | + */ | ||
33282 | + if (entry != sched) { | ||
33283 | + TRACE_TASK(linked, | ||
33284 | + "already scheduled on %d, updating link.\n", | ||
33285 | + sched->cpu); | ||
33286 | + tmp = sched->linked; | ||
33287 | + linked->rt_param.linked_on = sched->cpu; | ||
33288 | + sched->linked = linked; | ||
33289 | + update_cpu_position(sched); | ||
33290 | + linked = tmp; | ||
33291 | + } | ||
33292 | + } | ||
33293 | + if (linked) /* might be NULL due to swap */ | ||
33294 | + linked->rt_param.linked_on = entry->cpu; | ||
33295 | + } | ||
33296 | + entry->linked = linked; | ||
33297 | +#ifdef WANT_ALL_SCHED_EVENTS | ||
33298 | + if (linked) | ||
33299 | + TRACE_TASK(linked, "linked to %d.\n", entry->cpu); | ||
33300 | + else | ||
33301 | + TRACE("NULL linked to %d.\n", entry->cpu); | ||
33302 | +#endif | ||
33303 | + update_cpu_position(entry); | ||
33304 | +} | ||
33305 | + | ||
33306 | +/* unlink - Make sure a task is not linked any longer to an entry | ||
33307 | + * where it was linked before. Must hold gsnedf_lock. | ||
33308 | + */ | ||
33309 | +static noinline void unlink(struct task_struct* t) | ||
33310 | +{ | ||
33311 | + cpu_entry_t *entry; | ||
33312 | + | ||
33313 | + if (unlikely(!t)) { | ||
33314 | + TRACE_BUG_ON(!t); | ||
33315 | + return; | ||
33316 | + } | ||
33317 | + | ||
33318 | + if (t->rt_param.linked_on != NO_CPU) { | ||
33319 | + /* unlink */ | ||
33320 | + entry = &per_cpu(gsnedf_cpu_entries, t->rt_param.linked_on); | ||
33321 | + t->rt_param.linked_on = NO_CPU; | ||
33322 | + link_task_to_cpu(NULL, entry); | ||
33323 | + } else if (is_queued(t)) { | ||
33324 | + /* This is an interesting situation: t is scheduled, | ||
33325 | + * but was just recently unlinked. It cannot be | ||
33326 | + * linked anywhere else (because then it would have | ||
33327 | + * been relinked to this CPU), thus it must be in some | ||
33328 | + * queue. We must remove it from the list in this | ||
33329 | + * case. | ||
33330 | + */ | ||
33331 | + remove(&gsnedf, t); | ||
33332 | + } | ||
33333 | +} | ||
33334 | + | ||
33335 | + | ||
33336 | +/* preempt - force a CPU to reschedule | ||
33337 | + */ | ||
33338 | +static void preempt(cpu_entry_t *entry) | ||
33339 | +{ | ||
33340 | + preempt_if_preemptable(entry->scheduled, entry->cpu); | ||
33341 | +} | ||
33342 | + | ||
33343 | +/* requeue - Put an unlinked task into gsn-edf domain. | ||
33344 | + * Caller must hold gsnedf_lock. | ||
33345 | + */ | ||
33346 | +static noinline void requeue(struct task_struct* task) | ||
33347 | +{ | ||
33348 | + BUG_ON(!task); | ||
33349 | + /* sanity check before insertion */ | ||
33350 | + BUG_ON(is_queued(task)); | ||
33351 | + | ||
33352 | + if (is_released(task, litmus_clock())) | ||
33353 | + __add_ready(&gsnedf, task); | ||
33354 | + else { | ||
33355 | + /* it has got to wait */ | ||
33356 | + add_release(&gsnedf, task); | ||
33357 | + } | ||
33358 | +} | ||
33359 | + | ||
33360 | +/* check for any necessary preemptions */ | ||
33361 | +static void check_for_preemptions(void) | ||
33362 | +{ | ||
33363 | + struct task_struct *task; | ||
33364 | + cpu_entry_t* last; | ||
33365 | + | ||
33366 | + for(last = lowest_prio_cpu(); | ||
33367 | + edf_preemption_needed(&gsnedf, last->linked); | ||
33368 | + last = lowest_prio_cpu()) { | ||
33369 | + /* preemption necessary */ | ||
33370 | + task = __take_ready(&gsnedf); | ||
33371 | + TRACE("check_for_preemptions: attempting to link task %d to %d\n", | ||
33372 | + task->pid, last->cpu); | ||
33373 | + if (last->linked) | ||
33374 | + requeue(last->linked); | ||
33375 | + link_task_to_cpu(task, last); | ||
33376 | + preempt(last); | ||
33377 | + } | ||
33378 | +} | ||
33379 | + | ||
33380 | +/* gsnedf_job_arrival: task is either resumed or released */ | ||
33381 | +static noinline void gsnedf_job_arrival(struct task_struct* task) | ||
33382 | +{ | ||
33383 | + BUG_ON(!task); | ||
33384 | + | ||
33385 | + requeue(task); | ||
33386 | + check_for_preemptions(); | ||
33387 | +} | ||
33388 | + | ||
33389 | +static void gsnedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) | ||
33390 | +{ | ||
33391 | + unsigned long flags; | ||
33392 | + | ||
33393 | + spin_lock_irqsave(&gsnedf_lock, flags); | ||
33394 | + | ||
33395 | + __merge_ready(rt, tasks); | ||
33396 | + check_for_preemptions(); | ||
33397 | + | ||
33398 | + spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
33399 | +} | ||
33400 | + | ||
33401 | +/* caller holds gsnedf_lock */ | ||
33402 | +static noinline void job_completion(struct task_struct *t, int forced) | ||
33403 | +{ | ||
33404 | + BUG_ON(!t); | ||
33405 | + | ||
33406 | + sched_trace_task_completion(t, forced); | ||
33407 | + | ||
33408 | + TRACE_TASK(t, "job_completion().\n"); | ||
33409 | + | ||
33410 | + /* set flags */ | ||
33411 | + set_rt_flags(t, RT_F_SLEEP); | ||
33412 | + /* prepare for next period */ | ||
33413 | + prepare_for_next_period(t); | ||
33414 | + if (is_released(t, litmus_clock())) | ||
33415 | + sched_trace_task_release(t); | ||
33416 | + /* unlink */ | ||
33417 | + unlink(t); | ||
33418 | + /* requeue | ||
33419 | + * But don't requeue a blocking task. */ | ||
33420 | + if (is_running(t)) | ||
33421 | + gsnedf_job_arrival(t); | ||
33422 | +} | ||
33423 | + | ||
33424 | +/* gsnedf_tick - this function is called for every local timer | ||
33425 | + * interrupt. | ||
33426 | + * | ||
33427 | + * checks whether the current task has expired and checks | ||
33428 | + * whether we need to preempt it if it has not expired | ||
33429 | + */ | ||
33430 | +static void gsnedf_tick(struct task_struct* t) | ||
33431 | +{ | ||
33432 | + if (is_realtime(t) && budget_exhausted(t)) { | ||
33433 | + if (!is_np(t)) { | ||
33434 | + /* np tasks will be preempted when they become | ||
33435 | + * preemptable again | ||
33436 | + */ | ||
33437 | + set_tsk_need_resched(t); | ||
33438 | + set_will_schedule(); | ||
33439 | + TRACE("gsnedf_scheduler_tick: " | ||
33440 | + "%d is preemptable " | ||
33441 | + " => FORCE_RESCHED\n", t->pid); | ||
33442 | + } else if (is_user_np(t)) { | ||
33443 | + TRACE("gsnedf_scheduler_tick: " | ||
33444 | + "%d is non-preemptable, " | ||
33445 | + "preemption delayed.\n", t->pid); | ||
33446 | + request_exit_np(t); | ||
33447 | + } | ||
33448 | + } | ||
33449 | +} | ||
33450 | + | ||
33451 | +/* Getting schedule() right is a bit tricky. schedule() may not make any | ||
33452 | + * assumptions on the state of the current task since it may be called for a | ||
33453 | + * number of reasons. The reasons include a scheduler_tick() determined that it | ||
33454 | + * was necessary, because sys_exit_np() was called, because some Linux | ||
33455 | + * subsystem determined so, or even (in the worst case) because there is a bug | ||
33456 | + * hidden somewhere. Thus, we must take extreme care to determine what the | ||
33457 | + * current state is. | ||
33458 | + * | ||
33459 | + * The CPU could currently be scheduling a task (or not), be linked (or not). | ||
33460 | + * | ||
33461 | + * The following assertions for the scheduled task could hold: | ||
33462 | + * | ||
33463 | + * - !is_running(scheduled) // the job blocks | ||
33464 | + * - scheduled->timeslice == 0 // the job completed (forcefully) | ||
33465 | + * - get_rt_flag() == RT_F_SLEEP // the job completed (by syscall) | ||
33466 | + * - linked != scheduled // we need to reschedule (for any reason) | ||
33467 | + * - is_np(scheduled) // rescheduling must be delayed, | ||
33468 | + * sys_exit_np must be requested | ||
33469 | + * | ||
33470 | + * Any of these can occur together. | ||
33471 | + */ | ||
33472 | +static struct task_struct* gsnedf_schedule(struct task_struct * prev) | ||
33473 | +{ | ||
33474 | + cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries); | ||
33475 | + int out_of_time, sleep, preempt, np, exists, blocks; | ||
33476 | + struct task_struct* next = NULL; | ||
33477 | + | ||
33478 | + /* Bail out early if we are the release master. | ||
33479 | + * The release master never schedules any real-time tasks. | ||
33480 | + */ | ||
33481 | + if (gsnedf.release_master == entry->cpu) | ||
33482 | + return NULL; | ||
33483 | + | ||
33484 | + spin_lock(&gsnedf_lock); | ||
33485 | + clear_will_schedule(); | ||
33486 | + | ||
33487 | + /* sanity checking */ | ||
33488 | + BUG_ON(entry->scheduled && entry->scheduled != prev); | ||
33489 | + BUG_ON(entry->scheduled && !is_realtime(prev)); | ||
33490 | + BUG_ON(is_realtime(prev) && !entry->scheduled); | ||
33491 | + | ||
33492 | + /* (0) Determine state */ | ||
33493 | + exists = entry->scheduled != NULL; | ||
33494 | + blocks = exists && !is_running(entry->scheduled); | ||
33495 | + out_of_time = exists && budget_exhausted(entry->scheduled); | ||
33496 | + np = exists && is_np(entry->scheduled); | ||
33497 | + sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; | ||
33498 | + preempt = entry->scheduled != entry->linked; | ||
33499 | + | ||
33500 | +#ifdef WANT_ALL_SCHED_EVENTS | ||
33501 | + TRACE_TASK(prev, "invoked gsnedf_schedule.\n"); | ||
33502 | +#endif | ||
33503 | + | ||
33504 | + if (exists) | ||
33505 | + TRACE_TASK(prev, | ||
33506 | + "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d " | ||
33507 | + "state:%d sig:%d\n", | ||
33508 | + blocks, out_of_time, np, sleep, preempt, | ||
33509 | + prev->state, signal_pending(prev)); | ||
33510 | + if (entry->linked && preempt) | ||
33511 | + TRACE_TASK(prev, "will be preempted by %s/%d\n", | ||
33512 | + entry->linked->comm, entry->linked->pid); | ||
33513 | + | ||
33514 | + | ||
33515 | + /* If a task blocks we have no choice but to reschedule. | ||
33516 | + */ | ||
33517 | + if (blocks) | ||
33518 | + unlink(entry->scheduled); | ||
33519 | + | ||
33520 | + /* Request a sys_exit_np() call if we would like to preempt but cannot. | ||
33521 | + * We need to make sure to update the link structure anyway in case | ||
33522 | + * that we are still linked. Multiple calls to request_exit_np() don't | ||
33523 | + * hurt. | ||
33524 | + */ | ||
33525 | + if (np && (out_of_time || preempt || sleep)) { | ||
33526 | + unlink(entry->scheduled); | ||
33527 | + request_exit_np(entry->scheduled); | ||
33528 | + } | ||
33529 | + | ||
33530 | + /* Any task that is preemptable and either exhausts its execution | ||
33531 | + * budget or wants to sleep completes. We may have to reschedule after | ||
33532 | + * this. Don't do a job completion if we block (can't have timers running | ||
33533 | + * for blocked jobs). Preemption go first for the same reason. | ||
33534 | + */ | ||
33535 | + if (!np && (out_of_time || sleep) && !blocks && !preempt) | ||
33536 | + job_completion(entry->scheduled, !sleep); | ||
33537 | + | ||
33538 | + /* Link pending task if we became unlinked. | ||
33539 | + */ | ||
33540 | + if (!entry->linked) | ||
33541 | + link_task_to_cpu(__take_ready(&gsnedf), entry); | ||
33542 | + | ||
33543 | + /* The final scheduling decision. Do we need to switch for some reason? | ||
33544 | + * If linked is different from scheduled, then select linked as next. | ||
33545 | + */ | ||
33546 | + if ((!np || blocks) && | ||
33547 | + entry->linked != entry->scheduled) { | ||
33548 | + /* Schedule a linked job? */ | ||
33549 | + if (entry->linked) { | ||
33550 | + entry->linked->rt_param.scheduled_on = entry->cpu; | ||
33551 | + next = entry->linked; | ||
33552 | + } | ||
33553 | + if (entry->scheduled) { | ||
33554 | + /* not gonna be scheduled soon */ | ||
33555 | + entry->scheduled->rt_param.scheduled_on = NO_CPU; | ||
33556 | + TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); | ||
33557 | + } | ||
33558 | + } else | ||
33559 | + /* Only override Linux scheduler if we have a real-time task | ||
33560 | + * scheduled that needs to continue. | ||
33561 | + */ | ||
33562 | + if (exists) | ||
33563 | + next = prev; | ||
33564 | + | ||
33565 | + spin_unlock(&gsnedf_lock); | ||
33566 | + | ||
33567 | +#ifdef WANT_ALL_SCHED_EVENTS | ||
33568 | + TRACE("gsnedf_lock released, next=0x%p\n", next); | ||
33569 | + | ||
33570 | + if (next) | ||
33571 | + TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); | ||
33572 | + else if (exists && !next) | ||
33573 | + TRACE("becomes idle at %llu.\n", litmus_clock()); | ||
33574 | +#endif | ||
33575 | + | ||
33576 | + | ||
33577 | + return next; | ||
33578 | +} | ||
33579 | + | ||
33580 | + | ||
33581 | +/* _finish_switch - we just finished the switch away from prev | ||
33582 | + */ | ||
33583 | +static void gsnedf_finish_switch(struct task_struct *prev) | ||
33584 | +{ | ||
33585 | + cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries); | ||
33586 | + | ||
33587 | + entry->scheduled = is_realtime(current) ? current : NULL; | ||
33588 | +#ifdef WANT_ALL_SCHED_EVENTS | ||
33589 | + TRACE_TASK(prev, "switched away from\n"); | ||
33590 | +#endif | ||
33591 | +} | ||
33592 | + | ||
33593 | + | ||
33594 | +/* Prepare a task for running in RT mode | ||
33595 | + */ | ||
33596 | +static void gsnedf_task_new(struct task_struct * t, int on_rq, int running) | ||
33597 | +{ | ||
33598 | + unsigned long flags; | ||
33599 | + cpu_entry_t* entry; | ||
33600 | + | ||
33601 | + TRACE("gsn edf: task new %d\n", t->pid); | ||
33602 | + | ||
33603 | + spin_lock_irqsave(&gsnedf_lock, flags); | ||
33604 | + | ||
33605 | + /* setup job params */ | ||
33606 | + release_at(t, litmus_clock()); | ||
33607 | + | ||
33608 | + if (running) { | ||
33609 | + entry = &per_cpu(gsnedf_cpu_entries, task_cpu(t)); | ||
33610 | + BUG_ON(entry->scheduled); | ||
33611 | + | ||
33612 | + if (entry->cpu != gsnedf.release_master) { | ||
33613 | + entry->scheduled = t; | ||
33614 | + tsk_rt(t)->scheduled_on = task_cpu(t); | ||
33615 | + } else { | ||
33616 | + /* do not schedule on release master */ | ||
33617 | + preempt(entry); /* force resched */ | ||
33618 | + tsk_rt(t)->scheduled_on = NO_CPU; | ||
33619 | + } | ||
33620 | + } else { | ||
33621 | + t->rt_param.scheduled_on = NO_CPU; | ||
33622 | + } | ||
33623 | + t->rt_param.linked_on = NO_CPU; | ||
33624 | + | ||
33625 | + gsnedf_job_arrival(t); | ||
33626 | + spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
33627 | +} | ||
33628 | + | ||
33629 | +static void gsnedf_task_wake_up(struct task_struct *task) | ||
33630 | +{ | ||
33631 | + unsigned long flags; | ||
33632 | + lt_t now; | ||
33633 | + | ||
33634 | + TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | ||
33635 | + | ||
33636 | + spin_lock_irqsave(&gsnedf_lock, flags); | ||
33637 | + /* We need to take suspensions because of semaphores into | ||
33638 | + * account! If a job resumes after being suspended due to acquiring | ||
33639 | + * a semaphore, it should never be treated as a new job release. | ||
33640 | + */ | ||
33641 | + if (get_rt_flags(task) == RT_F_EXIT_SEM) { | ||
33642 | + set_rt_flags(task, RT_F_RUNNING); | ||
33643 | + } else { | ||
33644 | + now = litmus_clock(); | ||
33645 | + if (is_tardy(task, now)) { | ||
33646 | + /* new sporadic release */ | ||
33647 | + release_at(task, now); | ||
33648 | + sched_trace_task_release(task); | ||
33649 | + } | ||
33650 | + else { | ||
33651 | + if (task->rt.time_slice) { | ||
33652 | + /* came back in time before deadline | ||
33653 | + */ | ||
33654 | + set_rt_flags(task, RT_F_RUNNING); | ||
33655 | + } | ||
33656 | + } | ||
33657 | + } | ||
33658 | + gsnedf_job_arrival(task); | ||
33659 | + spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
33660 | +} | ||
33661 | + | ||
33662 | +static void gsnedf_task_block(struct task_struct *t) | ||
33663 | +{ | ||
33664 | + unsigned long flags; | ||
33665 | + | ||
33666 | + TRACE_TASK(t, "block at %llu\n", litmus_clock()); | ||
33667 | + | ||
33668 | + /* unlink if necessary */ | ||
33669 | + spin_lock_irqsave(&gsnedf_lock, flags); | ||
33670 | + unlink(t); | ||
33671 | + spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
33672 | + | ||
33673 | + BUG_ON(!is_realtime(t)); | ||
33674 | +} | ||
33675 | + | ||
33676 | + | ||
33677 | +static void gsnedf_task_exit(struct task_struct * t) | ||
33678 | +{ | ||
33679 | + unsigned long flags; | ||
33680 | + | ||
33681 | + /* unlink if necessary */ | ||
33682 | + spin_lock_irqsave(&gsnedf_lock, flags); | ||
33683 | + unlink(t); | ||
33684 | + if (tsk_rt(t)->scheduled_on != NO_CPU) { | ||
33685 | + gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; | ||
33686 | + tsk_rt(t)->scheduled_on = NO_CPU; | ||
33687 | + } | ||
33688 | + spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
33689 | + | ||
33690 | + BUG_ON(!is_realtime(t)); | ||
33691 | + TRACE_TASK(t, "RIP\n"); | ||
33692 | +} | ||
33693 | + | ||
33694 | +#ifdef CONFIG_FMLP | ||
33695 | + | ||
33696 | +/* Update the queue position of a task that got it's priority boosted via | ||
33697 | + * priority inheritance. */ | ||
33698 | +static void update_queue_position(struct task_struct *holder) | ||
33699 | +{ | ||
33700 | + /* We don't know whether holder is in the ready queue. It should, but | ||
33701 | + * on a budget overrun it may already be in a release queue. Hence, | ||
33702 | + * calling unlink() is not possible since it assumes that the task is | ||
33703 | + * not in a release queue. However, we can safely check whether | ||
33704 | + * sem->holder is currently in a queue or scheduled after locking both | ||
33705 | + * the release and the ready queue lock. */ | ||
33706 | + | ||
33707 | + /* Assumption: caller holds gsnedf_lock */ | ||
33708 | + | ||
33709 | + int check_preempt = 0; | ||
33710 | + | ||
33711 | + if (tsk_rt(holder)->linked_on != NO_CPU) { | ||
33712 | + TRACE_TASK(holder, "%s: linked on %d\n", | ||
33713 | + __FUNCTION__, tsk_rt(holder)->linked_on); | ||
33714 | + /* Holder is scheduled; need to re-order CPUs. | ||
33715 | + * We can't use heap_decrease() here since | ||
33716 | + * the cpu_heap is ordered in reverse direction, so | ||
33717 | + * it is actually an increase. */ | ||
33718 | + bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, | ||
33719 | + gsnedf_cpus[tsk_rt(holder)->linked_on]->hn); | ||
33720 | + bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, | ||
33721 | + gsnedf_cpus[tsk_rt(holder)->linked_on]->hn); | ||
33722 | + } else { | ||
33723 | + /* holder may be queued: first stop queue changes */ | ||
33724 | + spin_lock(&gsnedf.release_lock); | ||
33725 | + if (is_queued(holder)) { | ||
33726 | + TRACE_TASK(holder, "%s: is queued\n", | ||
33727 | + __FUNCTION__); | ||
33728 | + /* We need to update the position | ||
33729 | + * of holder in some heap. Note that this | ||
33730 | + * may be a release heap. */ | ||
33731 | + check_preempt = | ||
33732 | + !bheap_decrease(edf_ready_order, | ||
33733 | + tsk_rt(holder)->heap_node); | ||
33734 | + } else { | ||
33735 | + /* Nothing to do: if it is not queued and not linked | ||
33736 | + * then it is currently being moved by other code | ||
33737 | + * (e.g., a timer interrupt handler) that will use the | ||
33738 | + * correct priority when enqueuing the task. */ | ||
33739 | + TRACE_TASK(holder, "%s: is NOT queued => Done.\n", | ||
33740 | + __FUNCTION__); | ||
33741 | + } | ||
33742 | + spin_unlock(&gsnedf.release_lock); | ||
33743 | + | ||
33744 | + /* If holder was enqueued in a release heap, then the following | ||
33745 | + * preemption check is pointless, but we can't easily detect | ||
33746 | + * that case. If you want to fix this, then consider that | ||
33747 | + * simply adding a state flag requires O(n) time to update when | ||
33748 | + * releasing n tasks, which conflicts with the goal to have | ||
33749 | + * O(log n) merges. */ | ||
33750 | + if (check_preempt) { | ||
33751 | + /* heap_decrease() hit the top level of the heap: make | ||
33752 | + * sure preemption checks get the right task, not the | ||
33753 | + * potentially stale cache. */ | ||
33754 | + bheap_uncache_min(edf_ready_order, | ||
33755 | + &gsnedf.ready_queue); | ||
33756 | + check_for_preemptions(); | ||
33757 | + } | ||
33758 | + } | ||
33759 | +} | ||
33760 | + | ||
33761 | +static long gsnedf_pi_block(struct pi_semaphore *sem, | ||
33762 | + struct task_struct *new_waiter) | ||
33763 | +{ | ||
33764 | + /* This callback has to handle the situation where a new waiter is | ||
33765 | + * added to the wait queue of the semaphore. | ||
33766 | + * | ||
33767 | + * We must check if has a higher priority than the currently | ||
33768 | + * highest-priority task, and then potentially reschedule. | ||
33769 | + */ | ||
33770 | + | ||
33771 | + BUG_ON(!new_waiter); | ||
33772 | + | ||
33773 | + if (edf_higher_prio(new_waiter, sem->hp.task)) { | ||
33774 | + TRACE_TASK(new_waiter, " boosts priority via %p\n", sem); | ||
33775 | + /* called with IRQs disabled */ | ||
33776 | + spin_lock(&gsnedf_lock); | ||
33777 | + /* store new highest-priority task */ | ||
33778 | + sem->hp.task = new_waiter; | ||
33779 | + if (sem->holder) { | ||
33780 | + TRACE_TASK(sem->holder, | ||
33781 | + " holds %p and will inherit from %s/%d\n", | ||
33782 | + sem, | ||
33783 | + new_waiter->comm, new_waiter->pid); | ||
33784 | + /* let holder inherit */ | ||
33785 | + sem->holder->rt_param.inh_task = new_waiter; | ||
33786 | + update_queue_position(sem->holder); | ||
33787 | + } | ||
33788 | + spin_unlock(&gsnedf_lock); | ||
33789 | + } | ||
33790 | + | ||
33791 | + return 0; | ||
33792 | +} | ||
33793 | + | ||
33794 | +static long gsnedf_inherit_priority(struct pi_semaphore *sem, | ||
33795 | + struct task_struct *new_owner) | ||
33796 | +{ | ||
33797 | + /* We don't need to acquire the gsnedf_lock since at the time of this | ||
33798 | + * call new_owner isn't actually scheduled yet (it's still sleeping) | ||
33799 | + * and since the calling function already holds sem->wait.lock, which | ||
33800 | + * prevents concurrent sem->hp.task changes. | ||
33801 | + */ | ||
33802 | + | ||
33803 | + if (sem->hp.task && sem->hp.task != new_owner) { | ||
33804 | + new_owner->rt_param.inh_task = sem->hp.task; | ||
33805 | + TRACE_TASK(new_owner, "inherited priority from %s/%d\n", | ||
33806 | + sem->hp.task->comm, sem->hp.task->pid); | ||
33807 | + } else | ||
33808 | + TRACE_TASK(new_owner, | ||
33809 | + "cannot inherit priority, " | ||
33810 | + "no higher priority job waits.\n"); | ||
33811 | + return 0; | ||
33812 | +} | ||
33813 | + | ||
33814 | +/* This function is called on a semaphore release, and assumes that | ||
33815 | + * the current task is also the semaphore holder. | ||
33816 | + */ | ||
33817 | +static long gsnedf_return_priority(struct pi_semaphore *sem) | ||
33818 | +{ | ||
33819 | + struct task_struct* t = current; | ||
33820 | + int ret = 0; | ||
33821 | + | ||
33822 | + /* Find new highest-priority semaphore task | ||
33823 | + * if holder task is the current hp.task. | ||
33824 | + * | ||
33825 | + * Calling function holds sem->wait.lock. | ||
33826 | + */ | ||
33827 | + if (t == sem->hp.task) | ||
33828 | + edf_set_hp_task(sem); | ||
33829 | + | ||
33830 | + TRACE_CUR("gsnedf_return_priority for lock %p\n", sem); | ||
33831 | + | ||
33832 | + if (t->rt_param.inh_task) { | ||
33833 | + /* interrupts already disabled by PI code */ | ||
33834 | + spin_lock(&gsnedf_lock); | ||
33835 | + | ||
33836 | + /* Reset inh_task to NULL. */ | ||
33837 | + t->rt_param.inh_task = NULL; | ||
33838 | + | ||
33839 | + /* Check if rescheduling is necessary */ | ||
33840 | + unlink(t); | ||
33841 | + gsnedf_job_arrival(t); | ||
33842 | + spin_unlock(&gsnedf_lock); | ||
33843 | + } | ||
33844 | + | ||
33845 | + return ret; | ||
33846 | +} | ||
33847 | + | ||
33848 | +#endif | ||
33849 | + | ||
33850 | +static long gsnedf_admit_task(struct task_struct* tsk) | ||
33851 | +{ | ||
33852 | + return 0; | ||
33853 | +} | ||
33854 | + | ||
33855 | +static long gsnedf_activate_plugin(void) | ||
33856 | +{ | ||
33857 | + int cpu; | ||
33858 | + cpu_entry_t *entry; | ||
33859 | + | ||
33860 | + bheap_init(&gsnedf_cpu_heap); | ||
33861 | + gsnedf.release_master = atomic_read(&release_master_cpu); | ||
33862 | + | ||
33863 | + for_each_online_cpu(cpu) { | ||
33864 | + entry = &per_cpu(gsnedf_cpu_entries, cpu); | ||
33865 | + bheap_node_init(&entry->hn, entry); | ||
33866 | + atomic_set(&entry->will_schedule, 0); | ||
33867 | + entry->linked = NULL; | ||
33868 | + entry->scheduled = NULL; | ||
33869 | + if (cpu != gsnedf.release_master) { | ||
33870 | + TRACE("GSN-EDF: Initializing CPU #%d.\n", cpu); | ||
33871 | + update_cpu_position(entry); | ||
33872 | + } else { | ||
33873 | + TRACE("GSN-EDF: CPU %d is release master.\n", cpu); | ||
33874 | + } | ||
33875 | + } | ||
33876 | + return 0; | ||
33877 | +} | ||
33878 | + | ||
33879 | +/* Plugin object */ | ||
33880 | +static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { | ||
33881 | + .plugin_name = "GSN-EDF", | ||
33882 | + .finish_switch = gsnedf_finish_switch, | ||
33883 | + .tick = gsnedf_tick, | ||
33884 | + .task_new = gsnedf_task_new, | ||
33885 | + .complete_job = complete_job, | ||
33886 | + .task_exit = gsnedf_task_exit, | ||
33887 | + .schedule = gsnedf_schedule, | ||
33888 | + .task_wake_up = gsnedf_task_wake_up, | ||
33889 | + .task_block = gsnedf_task_block, | ||
33890 | +#ifdef CONFIG_FMLP | ||
33891 | + .fmlp_active = 1, | ||
33892 | + .pi_block = gsnedf_pi_block, | ||
33893 | + .inherit_priority = gsnedf_inherit_priority, | ||
33894 | + .return_priority = gsnedf_return_priority, | ||
33895 | +#endif | ||
33896 | + .admit_task = gsnedf_admit_task, | ||
33897 | + .activate_plugin = gsnedf_activate_plugin, | ||
33898 | +}; | ||
33899 | + | ||
33900 | + | ||
33901 | +static int __init init_gsn_edf(void) | ||
33902 | +{ | ||
33903 | + int cpu; | ||
33904 | + cpu_entry_t *entry; | ||
33905 | + | ||
33906 | + bheap_init(&gsnedf_cpu_heap); | ||
33907 | + /* initialize CPU state */ | ||
33908 | + for (cpu = 0; cpu < NR_CPUS; cpu++) { | ||
33909 | + entry = &per_cpu(gsnedf_cpu_entries, cpu); | ||
33910 | + gsnedf_cpus[cpu] = entry; | ||
33911 | + atomic_set(&entry->will_schedule, 0); | ||
33912 | + entry->cpu = cpu; | ||
33913 | + entry->hn = &gsnedf_heap_node[cpu]; | ||
33914 | + bheap_node_init(&entry->hn, entry); | ||
33915 | + } | ||
33916 | + edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs); | ||
33917 | + return register_sched_plugin(&gsn_edf_plugin); | ||
33918 | +} | ||
33919 | + | ||
33920 | + | ||
33921 | +module_init(init_gsn_edf); | ||
33922 | diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c | ||
33923 | new file mode 100644 | ||
33924 | index 0000000..c1fc774 | ||
33925 | --- /dev/null | ||
33926 | +++ b/litmus/sched_litmus.c | ||
33927 | @@ -0,0 +1,318 @@ | ||
33928 | +/* This file is included from kernel/sched.c */ | ||
33929 | + | ||
33930 | +#include <litmus/litmus.h> | ||
33931 | +#include <litmus/sched_plugin.h> | ||
33932 | + | ||
33933 | +static void update_time_litmus(struct rq *rq, struct task_struct *p) | ||
33934 | +{ | ||
33935 | + u64 delta = rq->clock - p->se.exec_start; | ||
33936 | + if (unlikely((s64)delta < 0)) | ||
33937 | + delta = 0; | ||
33938 | + /* per job counter */ | ||
33939 | + p->rt_param.job_params.exec_time += delta; | ||
33940 | + /* task counter */ | ||
33941 | + p->se.sum_exec_runtime += delta; | ||
33942 | + /* sched_clock() */ | ||
33943 | + p->se.exec_start = rq->clock; | ||
33944 | + cpuacct_charge(p, delta); | ||
33945 | +} | ||
33946 | + | ||
33947 | +static void double_rq_lock(struct rq *rq1, struct rq *rq2); | ||
33948 | +static void double_rq_unlock(struct rq *rq1, struct rq *rq2); | ||
33949 | + | ||
33950 | +/* | ||
33951 | + * litmus_tick gets called by scheduler_tick() with HZ freq | ||
33952 | + * Interrupts are disabled | ||
33953 | + */ | ||
33954 | +static void litmus_tick(struct rq *rq, struct task_struct *p) | ||
33955 | +{ | ||
33956 | + TS_PLUGIN_TICK_START; | ||
33957 | + | ||
33958 | + if (is_realtime(p)) | ||
33959 | + update_time_litmus(rq, p); | ||
33960 | + | ||
33961 | + /* plugin tick */ | ||
33962 | + litmus->tick(p); | ||
33963 | + | ||
33964 | + return; | ||
33965 | +} | ||
33966 | + | ||
33967 | +static struct task_struct * | ||
33968 | +litmus_schedule(struct rq *rq, struct task_struct *prev) | ||
33969 | +{ | ||
33970 | + struct rq* other_rq; | ||
33971 | + struct task_struct *next; | ||
33972 | + | ||
33973 | + long was_running; | ||
33974 | + lt_t _maybe_deadlock = 0; | ||
33975 | + | ||
33976 | + /* let the plugin schedule */ | ||
33977 | + next = litmus->schedule(prev); | ||
33978 | + | ||
33979 | + /* check if a global plugin pulled a task from a different RQ */ | ||
33980 | + if (next && task_rq(next) != rq) { | ||
33981 | + /* we need to migrate the task */ | ||
33982 | + other_rq = task_rq(next); | ||
33983 | + TRACE_TASK(next, "migrate from %d\n", other_rq->cpu); | ||
33984 | + | ||
33985 | + /* while we drop the lock, the prev task could change its | ||
33986 | + * state | ||
33987 | + */ | ||
33988 | + was_running = is_running(prev); | ||
33989 | + mb(); | ||
33990 | + spin_unlock(&rq->lock); | ||
33991 | + | ||
33992 | + /* Don't race with a concurrent switch. This could deadlock in | ||
33993 | + * the case of cross or circular migrations. It's the job of | ||
33994 | + * the plugin to make sure that doesn't happen. | ||
33995 | + */ | ||
33996 | + TRACE_TASK(next, "stack_in_use=%d\n", | ||
33997 | + next->rt_param.stack_in_use); | ||
33998 | + if (next->rt_param.stack_in_use != NO_CPU) { | ||
33999 | + TRACE_TASK(next, "waiting to deschedule\n"); | ||
34000 | + _maybe_deadlock = litmus_clock(); | ||
34001 | + } | ||
34002 | + while (next->rt_param.stack_in_use != NO_CPU) { | ||
34003 | + cpu_relax(); | ||
34004 | + mb(); | ||
34005 | + if (next->rt_param.stack_in_use == NO_CPU) | ||
34006 | + TRACE_TASK(next,"descheduled. Proceeding.\n"); | ||
34007 | + | ||
34008 | + if (lt_before(_maybe_deadlock + 10000000, | ||
34009 | + litmus_clock())) { | ||
34010 | + /* We've been spinning for 10ms. | ||
34011 | + * Something can't be right! | ||
34012 | + * Let's abandon the task and bail out; at least | ||
34013 | + * we will have debug info instead of a hard | ||
34014 | + * deadlock. | ||
34015 | + */ | ||
34016 | + TRACE_TASK(next,"stack too long in use. " | ||
34017 | + "Deadlock?\n"); | ||
34018 | + next = NULL; | ||
34019 | + | ||
34020 | + /* bail out */ | ||
34021 | + spin_lock(&rq->lock); | ||
34022 | + return next; | ||
34023 | + } | ||
34024 | + } | ||
34025 | +#ifdef __ARCH_WANT_UNLOCKED_CTXSW | ||
34026 | + if (next->oncpu) | ||
34027 | + TRACE_TASK(next, "waiting for !oncpu"); | ||
34028 | + while (next->oncpu) { | ||
34029 | + cpu_relax(); | ||
34030 | + mb(); | ||
34031 | + } | ||
34032 | +#endif | ||
34033 | + double_rq_lock(rq, other_rq); | ||
34034 | + mb(); | ||
34035 | + if (is_realtime(prev) && is_running(prev) != was_running) { | ||
34036 | + TRACE_TASK(prev, | ||
34037 | + "state changed while we dropped" | ||
34038 | + " the lock: is_running=%d, was_running=%d\n", | ||
34039 | + is_running(prev), was_running); | ||
34040 | + if (is_running(prev) && !was_running) { | ||
34041 | + /* prev task became unblocked | ||
34042 | + * we need to simulate normal sequence of events | ||
34043 | + * to scheduler plugins. | ||
34044 | + */ | ||
34045 | + litmus->task_block(prev); | ||
34046 | + litmus->task_wake_up(prev); | ||
34047 | + } | ||
34048 | + } | ||
34049 | + | ||
34050 | + set_task_cpu(next, smp_processor_id()); | ||
34051 | + | ||
34052 | + /* DEBUG: now that we have the lock we need to make sure a | ||
34053 | + * couple of things still hold: | ||
34054 | + * - it is still a real-time task | ||
34055 | + * - it is still runnable (could have been stopped) | ||
34056 | + * If either is violated, then the active plugin is | ||
34057 | + * doing something wrong. | ||
34058 | + */ | ||
34059 | + if (!is_realtime(next) || !is_running(next)) { | ||
34060 | + /* BAD BAD BAD */ | ||
34061 | + TRACE_TASK(next,"BAD: migration invariant FAILED: " | ||
34062 | + "rt=%d running=%d\n", | ||
34063 | + is_realtime(next), | ||
34064 | + is_running(next)); | ||
34065 | + /* drop the task */ | ||
34066 | + next = NULL; | ||
34067 | + } | ||
34068 | + /* release the other CPU's runqueue, but keep ours */ | ||
34069 | + spin_unlock(&other_rq->lock); | ||
34070 | + } | ||
34071 | + if (next) { | ||
34072 | + next->rt_param.stack_in_use = rq->cpu; | ||
34073 | + next->se.exec_start = rq->clock; | ||
34074 | + } | ||
34075 | + | ||
34076 | + return next; | ||
34077 | +} | ||
34078 | + | ||
34079 | +static void enqueue_task_litmus(struct rq *rq, struct task_struct *p, | ||
34080 | + int wakeup) | ||
34081 | +{ | ||
34082 | + if (wakeup) { | ||
34083 | + sched_trace_task_resume(p); | ||
34084 | + tsk_rt(p)->present = 1; | ||
34085 | + litmus->task_wake_up(p); | ||
34086 | + | ||
34087 | + rq->litmus.nr_running++; | ||
34088 | + } else | ||
34089 | + TRACE_TASK(p, "ignoring an enqueue, not a wake up.\n"); | ||
34090 | +} | ||
34091 | + | ||
34092 | +static void dequeue_task_litmus(struct rq *rq, struct task_struct *p, int sleep) | ||
34093 | +{ | ||
34094 | + if (sleep) { | ||
34095 | + litmus->task_block(p); | ||
34096 | + tsk_rt(p)->present = 0; | ||
34097 | + sched_trace_task_block(p); | ||
34098 | + | ||
34099 | + rq->litmus.nr_running--; | ||
34100 | + } else | ||
34101 | + TRACE_TASK(p, "ignoring a dequeue, not going to sleep.\n"); | ||
34102 | +} | ||
34103 | + | ||
34104 | +static void yield_task_litmus(struct rq *rq) | ||
34105 | +{ | ||
34106 | + BUG_ON(rq->curr != current); | ||
34107 | + /* sched_yield() is called to trigger delayed preemptions. | ||
34108 | + * Thus, mark the current task as needing to be rescheduled. | ||
34109 | + * This will cause the scheduler plugin to be invoked, which can | ||
34110 | + * then determine if a preemption is still required. | ||
34111 | + */ | ||
34112 | + clear_exit_np(current); | ||
34113 | + set_tsk_need_resched(current); | ||
34114 | +} | ||
34115 | + | ||
34116 | +/* Plugins are responsible for this. | ||
34117 | + */ | ||
34118 | +static void check_preempt_curr_litmus(struct rq *rq, struct task_struct *p, int flags) | ||
34119 | +{ | ||
34120 | +} | ||
34121 | + | ||
34122 | +static void put_prev_task_litmus(struct rq *rq, struct task_struct *p) | ||
34123 | +{ | ||
34124 | +} | ||
34125 | + | ||
34126 | +static void pre_schedule_litmus(struct rq *rq, struct task_struct *prev) | ||
34127 | +{ | ||
34128 | + update_time_litmus(rq, prev); | ||
34129 | + if (!is_running(prev)) | ||
34130 | + tsk_rt(prev)->present = 0; | ||
34131 | +} | ||
34132 | + | ||
34133 | +/* pick_next_task_litmus() - litmus_schedule() function | ||
34134 | + * | ||
34135 | + * return the next task to be scheduled | ||
34136 | + */ | ||
34137 | +static struct task_struct *pick_next_task_litmus(struct rq *rq) | ||
34138 | +{ | ||
34139 | + /* get the to-be-switched-out task (prev) */ | ||
34140 | + struct task_struct *prev = rq->litmus.prev; | ||
34141 | + struct task_struct *next; | ||
34142 | + | ||
34143 | + /* if not called from schedule() but from somewhere | ||
34144 | + * else (e.g., migration), return now! | ||
34145 | + */ | ||
34146 | + if(!rq->litmus.prev) | ||
34147 | + return NULL; | ||
34148 | + | ||
34149 | + rq->litmus.prev = NULL; | ||
34150 | + | ||
34151 | + TS_PLUGIN_SCHED_START; | ||
34152 | + next = litmus_schedule(rq, prev); | ||
34153 | + TS_PLUGIN_SCHED_END; | ||
34154 | + | ||
34155 | + return next; | ||
34156 | +} | ||
34157 | + | ||
34158 | +static void task_tick_litmus(struct rq *rq, struct task_struct *p, int queued) | ||
34159 | +{ | ||
34160 | + /* nothing to do; tick related tasks are done by litmus_tick() */ | ||
34161 | + return; | ||
34162 | +} | ||
34163 | + | ||
34164 | +static void switched_to_litmus(struct rq *rq, struct task_struct *p, int running) | ||
34165 | +{ | ||
34166 | +} | ||
34167 | + | ||
34168 | +static void prio_changed_litmus(struct rq *rq, struct task_struct *p, | ||
34169 | + int oldprio, int running) | ||
34170 | +{ | ||
34171 | +} | ||
34172 | + | ||
34173 | +unsigned int get_rr_interval_litmus(struct task_struct *p) | ||
34174 | +{ | ||
34175 | + /* return infinity */ | ||
34176 | + return 0; | ||
34177 | +} | ||
34178 | + | ||
34179 | +/* This is called when a task became a real-time task, either due to a SCHED_* | ||
34180 | + * class transition or due to PI mutex inheritance. We don't handle Linux PI | ||
34181 | + * mutex inheritance yet (and probably never will). Use LITMUS provided | ||
34182 | + * synchronization primitives instead. | ||
34183 | + */ | ||
34184 | +static void set_curr_task_litmus(struct rq *rq) | ||
34185 | +{ | ||
34186 | + rq->curr->se.exec_start = rq->clock; | ||
34187 | +} | ||
34188 | + | ||
34189 | + | ||
34190 | +#ifdef CONFIG_SMP | ||
34191 | +/* execve tries to rebalance task in this scheduling domain */ | ||
34192 | +static int select_task_rq_litmus(struct task_struct *p, int sd_flag, int flags) | ||
34193 | +{ | ||
34194 | + /* preemption is already disabled. | ||
34195 | + * We don't want to change cpu here | ||
34196 | + */ | ||
34197 | + return smp_processor_id(); | ||
34198 | +} | ||
34199 | + | ||
34200 | +/* we don't repartition at runtime */ | ||
34201 | + | ||
34202 | +static unsigned long | ||
34203 | +load_balance_litmus(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
34204 | + unsigned long max_load_move, | ||
34205 | + struct sched_domain *sd, enum cpu_idle_type idle, | ||
34206 | + int *all_pinned, int *this_best_prio) | ||
34207 | +{ | ||
34208 | + return 0; | ||
34209 | +} | ||
34210 | + | ||
34211 | +static int | ||
34212 | +move_one_task_litmus(struct rq *this_rq, int this_cpu, struct rq *busiest, | ||
34213 | + struct sched_domain *sd, enum cpu_idle_type idle) | ||
34214 | +{ | ||
34215 | + return 0; | ||
34216 | +} | ||
34217 | +#endif | ||
34218 | + | ||
34219 | +const struct sched_class litmus_sched_class = { | ||
34220 | + .next = &rt_sched_class, | ||
34221 | + .enqueue_task = enqueue_task_litmus, | ||
34222 | + .dequeue_task = dequeue_task_litmus, | ||
34223 | + .yield_task = yield_task_litmus, | ||
34224 | + | ||
34225 | + .check_preempt_curr = check_preempt_curr_litmus, | ||
34226 | + | ||
34227 | + .pick_next_task = pick_next_task_litmus, | ||
34228 | + .put_prev_task = put_prev_task_litmus, | ||
34229 | + | ||
34230 | +#ifdef CONFIG_SMP | ||
34231 | + .select_task_rq = select_task_rq_litmus, | ||
34232 | + | ||
34233 | + .load_balance = load_balance_litmus, | ||
34234 | + .move_one_task = move_one_task_litmus, | ||
34235 | + .pre_schedule = pre_schedule_litmus, | ||
34236 | +#endif | ||
34237 | + | ||
34238 | + .set_curr_task = set_curr_task_litmus, | ||
34239 | + .task_tick = task_tick_litmus, | ||
34240 | + | ||
34241 | + .get_rr_interval = get_rr_interval_litmus, | ||
34242 | + | ||
34243 | + .prio_changed = prio_changed_litmus, | ||
34244 | + .switched_to = switched_to_litmus, | ||
34245 | +}; | ||
34246 | diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c | ||
34247 | new file mode 100644 | ||
34248 | index 0000000..2ea3922 | ||
34249 | --- /dev/null | ||
34250 | +++ b/litmus/sched_pfair.c | ||
34251 | @@ -0,0 +1,896 @@ | ||
34252 | +/* | ||
34253 | + * kernel/sched_pfair.c | ||
34254 | + * | ||
34255 | + * Implementation of the (global) Pfair scheduling algorithm. | ||
34256 | + * | ||
34257 | + */ | ||
34258 | + | ||
34259 | +#include <asm/div64.h> | ||
34260 | +#include <linux/delay.h> | ||
34261 | +#include <linux/module.h> | ||
34262 | +#include <linux/spinlock.h> | ||
34263 | +#include <linux/percpu.h> | ||
34264 | +#include <linux/sched.h> | ||
34265 | +#include <linux/list.h> | ||
34266 | + | ||
34267 | +#include <litmus/litmus.h> | ||
34268 | +#include <litmus/jobs.h> | ||
34269 | +#include <litmus/rt_domain.h> | ||
34270 | +#include <litmus/sched_plugin.h> | ||
34271 | +#include <litmus/sched_trace.h> | ||
34272 | + | ||
34273 | +#include <litmus/bheap.h> | ||
34274 | + | ||
34275 | +struct subtask { | ||
34276 | + /* measured in quanta relative to job release */ | ||
34277 | + quanta_t release; | ||
34278 | + quanta_t deadline; | ||
34279 | + quanta_t overlap; /* called "b bit" by PD^2 */ | ||
34280 | + quanta_t group_deadline; | ||
34281 | +}; | ||
34282 | + | ||
34283 | +struct pfair_param { | ||
34284 | + quanta_t quanta; /* number of subtasks */ | ||
34285 | + quanta_t cur; /* index of current subtask */ | ||
34286 | + | ||
34287 | + quanta_t release; /* in quanta */ | ||
34288 | + quanta_t period; /* in quanta */ | ||
34289 | + | ||
34290 | + quanta_t last_quantum; /* when scheduled last */ | ||
34291 | + int last_cpu; /* where scheduled last */ | ||
34292 | + | ||
34293 | + unsigned int sporadic_release; /* On wakeup, new sporadic release? */ | ||
34294 | + | ||
34295 | + struct subtask subtasks[0]; /* allocate together with pfair_param */ | ||
34296 | +}; | ||
34297 | + | ||
34298 | +#define tsk_pfair(tsk) ((tsk)->rt_param.pfair) | ||
34299 | + | ||
34300 | +struct pfair_state { | ||
34301 | + int cpu; | ||
34302 | + volatile quanta_t cur_tick; /* updated by the CPU that is advancing | ||
34303 | + * the time */ | ||
34304 | + volatile quanta_t local_tick; /* What tick is the local CPU currently | ||
34305 | + * executing? Updated only by the local | ||
34306 | + * CPU. In QEMU, this may lag behind the | ||
34307 | + * current tick. In a real system, with | ||
34308 | + * proper timers and aligned quanta, | ||
34309 | + * that should only be the | ||
34310 | + * case for a very short time after the | ||
34311 | + * time advanced. With staggered quanta, | ||
34312 | + * it will lag for the duration of the | ||
34313 | + * offset. | ||
34314 | + */ | ||
34315 | + | ||
34316 | + struct task_struct* linked; /* the task that should be executing */ | ||
34317 | + struct task_struct* local; /* the local copy of linked */ | ||
34318 | + struct task_struct* scheduled; /* what is actually scheduled */ | ||
34319 | + | ||
34320 | + unsigned long missed_quanta; | ||
34321 | + lt_t offset; /* stagger offset */ | ||
34322 | +}; | ||
34323 | + | ||
34324 | +/* Currently, we limit the maximum period of any task to 2000 quanta. | ||
34325 | + * The reason is that it makes the implementation easier since we do not | ||
34326 | + * need to reallocate the release wheel on task arrivals. | ||
34327 | + * In the future | ||
34328 | + */ | ||
34329 | +#define PFAIR_MAX_PERIOD 2000 | ||
34330 | + | ||
34331 | +/* This is the release queue wheel. It is indexed by pfair_time % | ||
34332 | + * PFAIR_MAX_PERIOD. Each heap is ordered by PFAIR priority, so that it can be | ||
34333 | + * merged with the ready queue. | ||
34334 | + */ | ||
34335 | +static struct bheap release_queue[PFAIR_MAX_PERIOD]; | ||
34336 | + | ||
34337 | +DEFINE_PER_CPU(struct pfair_state, pfair_state); | ||
34338 | +struct pfair_state* *pstate; /* short cut */ | ||
34339 | + | ||
34340 | +static quanta_t pfair_time = 0; /* the "official" PFAIR clock */ | ||
34341 | +static quanta_t merge_time = 0; /* Updated after the release queue has been | ||
34342 | + * merged. Used by drop_all_references(). | ||
34343 | + */ | ||
34344 | + | ||
34345 | +static rt_domain_t pfair; | ||
34346 | + | ||
34347 | +/* The pfair_lock is used to serialize all scheduling events. | ||
34348 | + */ | ||
34349 | +#define pfair_lock pfair.ready_lock | ||
34350 | + | ||
34351 | +/* Enable for lots of trace info. | ||
34352 | + * #define PFAIR_DEBUG | ||
34353 | + */ | ||
34354 | + | ||
34355 | +#ifdef PFAIR_DEBUG | ||
34356 | +#define PTRACE_TASK(t, f, args...) TRACE_TASK(t, f, ## args) | ||
34357 | +#define PTRACE(f, args...) TRACE(f, ## args) | ||
34358 | +#else | ||
34359 | +#define PTRACE_TASK(t, f, args...) | ||
34360 | +#define PTRACE(f, args...) | ||
34361 | +#endif | ||
34362 | + | ||
34363 | +/* gcc will inline all of these accessor functions... */ | ||
34364 | +static struct subtask* cur_subtask(struct task_struct* t) | ||
34365 | +{ | ||
34366 | + return tsk_pfair(t)->subtasks + tsk_pfair(t)->cur; | ||
34367 | +} | ||
34368 | + | ||
34369 | +static quanta_t cur_deadline(struct task_struct* t) | ||
34370 | +{ | ||
34371 | + return cur_subtask(t)->deadline + tsk_pfair(t)->release; | ||
34372 | +} | ||
34373 | + | ||
34374 | + | ||
34375 | +static quanta_t cur_sub_release(struct task_struct* t) | ||
34376 | +{ | ||
34377 | + return cur_subtask(t)->release + tsk_pfair(t)->release; | ||
34378 | +} | ||
34379 | + | ||
34380 | +static quanta_t cur_release(struct task_struct* t) | ||
34381 | +{ | ||
34382 | +#ifdef EARLY_RELEASE | ||
34383 | + /* only the release of the first subtask counts when we early | ||
34384 | + * release */ | ||
34385 | + return tsk_pfair(t)->release; | ||
34386 | +#else | ||
34387 | + return cur_sub_release(t); | ||
34388 | +#endif | ||
34389 | +} | ||
34390 | + | ||
34391 | +static quanta_t cur_overlap(struct task_struct* t) | ||
34392 | +{ | ||
34393 | + return cur_subtask(t)->overlap; | ||
34394 | +} | ||
34395 | + | ||
34396 | +static quanta_t cur_group_deadline(struct task_struct* t) | ||
34397 | +{ | ||
34398 | + quanta_t gdl = cur_subtask(t)->group_deadline; | ||
34399 | + if (gdl) | ||
34400 | + return gdl + tsk_pfair(t)->release; | ||
34401 | + else | ||
34402 | + return gdl; | ||
34403 | +} | ||
34404 | + | ||
34405 | + | ||
34406 | +static int pfair_higher_prio(struct task_struct* first, | ||
34407 | + struct task_struct* second) | ||
34408 | +{ | ||
34409 | + return /* first task must exist */ | ||
34410 | + first && ( | ||
34411 | + /* Does the second task exist and is it a real-time task? If | ||
34412 | + * not, the first task (which is a RT task) has higher | ||
34413 | + * priority. | ||
34414 | + */ | ||
34415 | + !second || !is_realtime(second) || | ||
34416 | + | ||
34417 | + /* Is the (subtask) deadline of the first task earlier? | ||
34418 | + * Then it has higher priority. | ||
34419 | + */ | ||
34420 | + time_before(cur_deadline(first), cur_deadline(second)) || | ||
34421 | + | ||
34422 | + /* Do we have a deadline tie? | ||
34423 | + * Then break by B-bit. | ||
34424 | + */ | ||
34425 | + (cur_deadline(first) == cur_deadline(second) && | ||
34426 | + (cur_overlap(first) > cur_overlap(second) || | ||
34427 | + | ||
34428 | + /* Do we have a B-bit tie? | ||
34429 | + * Then break by group deadline. | ||
34430 | + */ | ||
34431 | + (cur_overlap(first) == cur_overlap(second) && | ||
34432 | + (time_after(cur_group_deadline(first), | ||
34433 | + cur_group_deadline(second)) || | ||
34434 | + | ||
34435 | + /* Do we have a group deadline tie? | ||
34436 | + * Then break by PID, which are unique. | ||
34437 | + */ | ||
34438 | + (cur_group_deadline(first) == | ||
34439 | + cur_group_deadline(second) && | ||
34440 | + first->pid < second->pid)))))); | ||
34441 | +} | ||
34442 | + | ||
34443 | +int pfair_ready_order(struct bheap_node* a, struct bheap_node* b) | ||
34444 | +{ | ||
34445 | + return pfair_higher_prio(bheap2task(a), bheap2task(b)); | ||
34446 | +} | ||
34447 | + | ||
34448 | +/* return the proper release queue for time t */ | ||
34449 | +static struct bheap* relq(quanta_t t) | ||
34450 | +{ | ||
34451 | + struct bheap* rq = &release_queue[t % PFAIR_MAX_PERIOD]; | ||
34452 | + return rq; | ||
34453 | +} | ||
34454 | + | ||
34455 | +static void prepare_release(struct task_struct* t, quanta_t at) | ||
34456 | +{ | ||
34457 | + tsk_pfair(t)->release = at; | ||
34458 | + tsk_pfair(t)->cur = 0; | ||
34459 | +} | ||
34460 | + | ||
34461 | +static void __pfair_add_release(struct task_struct* t, struct bheap* queue) | ||
34462 | +{ | ||
34463 | + bheap_insert(pfair_ready_order, queue, | ||
34464 | + tsk_rt(t)->heap_node); | ||
34465 | +} | ||
34466 | + | ||
34467 | +static void pfair_add_release(struct task_struct* t) | ||
34468 | +{ | ||
34469 | + BUG_ON(bheap_node_in_heap(tsk_rt(t)->heap_node)); | ||
34470 | + __pfair_add_release(t, relq(cur_release(t))); | ||
34471 | +} | ||
34472 | + | ||
34473 | +/* pull released tasks from the release queue */ | ||
34474 | +static void poll_releases(quanta_t time) | ||
34475 | +{ | ||
34476 | + __merge_ready(&pfair, relq(time)); | ||
34477 | + merge_time = time; | ||
34478 | +} | ||
34479 | + | ||
34480 | +static void check_preempt(struct task_struct* t) | ||
34481 | +{ | ||
34482 | + int cpu = NO_CPU; | ||
34483 | + if (tsk_rt(t)->linked_on != tsk_rt(t)->scheduled_on && | ||
34484 | + tsk_rt(t)->present) { | ||
34485 | + /* the task can be scheduled and | ||
34486 | + * is not scheduled where it ought to be scheduled | ||
34487 | + */ | ||
34488 | + cpu = tsk_rt(t)->linked_on != NO_CPU ? | ||
34489 | + tsk_rt(t)->linked_on : | ||
34490 | + tsk_rt(t)->scheduled_on; | ||
34491 | + PTRACE_TASK(t, "linked_on:%d, scheduled_on:%d\n", | ||
34492 | + tsk_rt(t)->linked_on, tsk_rt(t)->scheduled_on); | ||
34493 | + /* preempt */ | ||
34494 | + if (cpu == smp_processor_id()) | ||
34495 | + set_tsk_need_resched(current); | ||
34496 | + else { | ||
34497 | + smp_send_reschedule(cpu); | ||
34498 | + } | ||
34499 | + } | ||
34500 | +} | ||
34501 | + | ||
34502 | +/* caller must hold pfair_lock */ | ||
34503 | +static void drop_all_references(struct task_struct *t) | ||
34504 | +{ | ||
34505 | + int cpu; | ||
34506 | + struct pfair_state* s; | ||
34507 | + struct bheap* q; | ||
34508 | + if (bheap_node_in_heap(tsk_rt(t)->heap_node)) { | ||
34509 | + /* figure out what queue the node is in */ | ||
34510 | + if (time_before_eq(cur_release(t), merge_time)) | ||
34511 | + q = &pfair.ready_queue; | ||
34512 | + else | ||
34513 | + q = relq(cur_release(t)); | ||
34514 | + bheap_delete(pfair_ready_order, q, | ||
34515 | + tsk_rt(t)->heap_node); | ||
34516 | + } | ||
34517 | + for (cpu = 0; cpu < num_online_cpus(); cpu++) { | ||
34518 | + s = &per_cpu(pfair_state, cpu); | ||
34519 | + if (s->linked == t) | ||
34520 | + s->linked = NULL; | ||
34521 | + if (s->local == t) | ||
34522 | + s->local = NULL; | ||
34523 | + if (s->scheduled == t) | ||
34524 | + s->scheduled = NULL; | ||
34525 | + } | ||
34526 | +} | ||
34527 | + | ||
34528 | +/* returns 1 if the task needs to go the release queue */ | ||
34529 | +static int advance_subtask(quanta_t time, struct task_struct* t, int cpu) | ||
34530 | +{ | ||
34531 | + struct pfair_param* p = tsk_pfair(t); | ||
34532 | + int to_relq; | ||
34533 | + p->cur = (p->cur + 1) % p->quanta; | ||
34534 | + if (!p->cur) { | ||
34535 | + sched_trace_task_completion(t, 1); | ||
34536 | + if (tsk_rt(t)->present) { | ||
34537 | + /* we start a new job */ | ||
34538 | + prepare_for_next_period(t); | ||
34539 | + sched_trace_task_release(t); | ||
34540 | + get_rt_flags(t) = RT_F_RUNNING; | ||
34541 | + p->release += p->period; | ||
34542 | + } else { | ||
34543 | + /* remove task from system until it wakes */ | ||
34544 | + drop_all_references(t); | ||
34545 | + tsk_pfair(t)->sporadic_release = 1; | ||
34546 | + TRACE_TASK(t, "on %d advanced to subtask %lu (not present)\n", | ||
34547 | + cpu, p->cur); | ||
34548 | + return 0; | ||
34549 | + } | ||
34550 | + } | ||
34551 | + to_relq = time_after(cur_release(t), time); | ||
34552 | + TRACE_TASK(t, "on %d advanced to subtask %lu -> to_relq=%d\n", | ||
34553 | + cpu, p->cur, to_relq); | ||
34554 | + return to_relq; | ||
34555 | +} | ||
34556 | + | ||
34557 | +static void advance_subtasks(quanta_t time) | ||
34558 | +{ | ||
34559 | + int cpu, missed; | ||
34560 | + struct task_struct* l; | ||
34561 | + struct pfair_param* p; | ||
34562 | + | ||
34563 | + for_each_online_cpu(cpu) { | ||
34564 | + l = pstate[cpu]->linked; | ||
34565 | + missed = pstate[cpu]->linked != pstate[cpu]->local; | ||
34566 | + if (l) { | ||
34567 | + p = tsk_pfair(l); | ||
34568 | + p->last_quantum = time; | ||
34569 | + p->last_cpu = cpu; | ||
34570 | + if (advance_subtask(time, l, cpu)) { | ||
34571 | + pstate[cpu]->linked = NULL; | ||
34572 | + pfair_add_release(l); | ||
34573 | + } | ||
34574 | + } | ||
34575 | + } | ||
34576 | +} | ||
34577 | + | ||
34578 | +static int target_cpu(quanta_t time, struct task_struct* t, int default_cpu) | ||
34579 | +{ | ||
34580 | + int cpu; | ||
34581 | + if (tsk_rt(t)->scheduled_on != NO_CPU) { | ||
34582 | + /* always observe scheduled_on linkage */ | ||
34583 | + default_cpu = tsk_rt(t)->scheduled_on; | ||
34584 | + } else if (tsk_pfair(t)->last_quantum == time - 1) { | ||
34585 | + /* back2back quanta */ | ||
34586 | + /* Only observe last_quantum if no scheduled_on is in the way. | ||
34587 | + * This should only kick in if a CPU missed quanta, and that | ||
34588 | + * *should* only happen in QEMU. | ||
34589 | + */ | ||
34590 | + cpu = tsk_pfair(t)->last_cpu; | ||
34591 | + if (!pstate[cpu]->linked || | ||
34592 | + tsk_rt(pstate[cpu]->linked)->scheduled_on != cpu) { | ||
34593 | + default_cpu = cpu; | ||
34594 | + } | ||
34595 | + } | ||
34596 | + return default_cpu; | ||
34597 | +} | ||
34598 | + | ||
34599 | +/* returns one if linking was redirected */ | ||
34600 | +static int pfair_link(quanta_t time, int cpu, | ||
34601 | + struct task_struct* t) | ||
34602 | +{ | ||
34603 | + int target = target_cpu(time, t, cpu); | ||
34604 | + struct task_struct* prev = pstate[cpu]->linked; | ||
34605 | + struct task_struct* other; | ||
34606 | + | ||
34607 | + if (target != cpu) { | ||
34608 | + other = pstate[target]->linked; | ||
34609 | + pstate[target]->linked = t; | ||
34610 | + tsk_rt(t)->linked_on = target; | ||
34611 | + if (!other) | ||
34612 | + /* linked ok, but reschedule this CPU */ | ||
34613 | + return 1; | ||
34614 | + if (target < cpu) { | ||
34615 | + /* link other to cpu instead */ | ||
34616 | + tsk_rt(other)->linked_on = cpu; | ||
34617 | + pstate[cpu]->linked = other; | ||
34618 | + if (prev) { | ||
34619 | + /* prev got pushed back into the ready queue */ | ||
34620 | + tsk_rt(prev)->linked_on = NO_CPU; | ||
34621 | + __add_ready(&pfair, prev); | ||
34622 | + } | ||
34623 | + /* we are done with this cpu */ | ||
34624 | + return 0; | ||
34625 | + } else { | ||
34626 | + /* re-add other, it's original CPU was not considered yet */ | ||
34627 | + tsk_rt(other)->linked_on = NO_CPU; | ||
34628 | + __add_ready(&pfair, other); | ||
34629 | + /* reschedule this CPU */ | ||
34630 | + return 1; | ||
34631 | + } | ||
34632 | + } else { | ||
34633 | + pstate[cpu]->linked = t; | ||
34634 | + tsk_rt(t)->linked_on = cpu; | ||
34635 | + if (prev) { | ||
34636 | + /* prev got pushed back into the ready queue */ | ||
34637 | + tsk_rt(prev)->linked_on = NO_CPU; | ||
34638 | + __add_ready(&pfair, prev); | ||
34639 | + } | ||
34640 | + /* we are done with this CPU */ | ||
34641 | + return 0; | ||
34642 | + } | ||
34643 | +} | ||
34644 | + | ||
34645 | +static void schedule_subtasks(quanta_t time) | ||
34646 | +{ | ||
34647 | + int cpu, retry; | ||
34648 | + | ||
34649 | + for_each_online_cpu(cpu) { | ||
34650 | + retry = 1; | ||
34651 | + while (retry) { | ||
34652 | + if (pfair_higher_prio(__peek_ready(&pfair), | ||
34653 | + pstate[cpu]->linked)) | ||
34654 | + retry = pfair_link(time, cpu, | ||
34655 | + __take_ready(&pfair)); | ||
34656 | + else | ||
34657 | + retry = 0; | ||
34658 | + } | ||
34659 | + } | ||
34660 | +} | ||
34661 | + | ||
34662 | +static void schedule_next_quantum(quanta_t time) | ||
34663 | +{ | ||
34664 | + int cpu; | ||
34665 | + | ||
34666 | + /* called with interrupts disabled */ | ||
34667 | + PTRACE("--- Q %lu at %llu PRE-SPIN\n", | ||
34668 | + time, litmus_clock()); | ||
34669 | + spin_lock(&pfair_lock); | ||
34670 | + PTRACE("<<< Q %lu at %llu\n", | ||
34671 | + time, litmus_clock()); | ||
34672 | + | ||
34673 | + sched_trace_quantum_boundary(); | ||
34674 | + | ||
34675 | + advance_subtasks(time); | ||
34676 | + poll_releases(time); | ||
34677 | + schedule_subtasks(time); | ||
34678 | + | ||
34679 | + for (cpu = 0; cpu < num_online_cpus(); cpu++) | ||
34680 | + if (pstate[cpu]->linked) | ||
34681 | + PTRACE_TASK(pstate[cpu]->linked, | ||
34682 | + " linked on %d.\n", cpu); | ||
34683 | + else | ||
34684 | + PTRACE("(null) linked on %d.\n", cpu); | ||
34685 | + | ||
34686 | + /* We are done. Advance time. */ | ||
34687 | + mb(); | ||
34688 | + for (cpu = 0; cpu < num_online_cpus(); cpu++) { | ||
34689 | + if (pstate[cpu]->local_tick != pstate[cpu]->cur_tick) { | ||
34690 | + TRACE("BAD Quantum not acked on %d " | ||
34691 | + "(l:%lu c:%lu p:%lu)\n", | ||
34692 | + cpu, | ||
34693 | + pstate[cpu]->local_tick, | ||
34694 | + pstate[cpu]->cur_tick, | ||
34695 | + pfair_time); | ||
34696 | + pstate[cpu]->missed_quanta++; | ||
34697 | + } | ||
34698 | + pstate[cpu]->cur_tick = time; | ||
34699 | + } | ||
34700 | + PTRACE(">>> Q %lu at %llu\n", | ||
34701 | + time, litmus_clock()); | ||
34702 | + spin_unlock(&pfair_lock); | ||
34703 | +} | ||
34704 | + | ||
34705 | +static noinline void wait_for_quantum(quanta_t q, struct pfair_state* state) | ||
34706 | +{ | ||
34707 | + quanta_t loc; | ||
34708 | + | ||
34709 | + goto first; /* skip mb() on first iteration */ | ||
34710 | + do { | ||
34711 | + cpu_relax(); | ||
34712 | + mb(); | ||
34713 | + first: loc = state->cur_tick; | ||
34714 | + /* FIXME: what if loc > cur? */ | ||
34715 | + } while (time_before(loc, q)); | ||
34716 | + PTRACE("observed cur_tick:%lu >= q:%lu\n", | ||
34717 | + loc, q); | ||
34718 | +} | ||
34719 | + | ||
34720 | +static quanta_t current_quantum(struct pfair_state* state) | ||
34721 | +{ | ||
34722 | + lt_t t = litmus_clock() - state->offset; | ||
34723 | + return time2quanta(t, FLOOR); | ||
34724 | +} | ||
34725 | + | ||
34726 | +static void catchup_quanta(quanta_t from, quanta_t target, | ||
34727 | + struct pfair_state* state) | ||
34728 | +{ | ||
34729 | + quanta_t cur = from, time; | ||
34730 | + TRACE("+++< BAD catching up quanta from %lu to %lu\n", | ||
34731 | + from, target); | ||
34732 | + while (time_before(cur, target)) { | ||
34733 | + wait_for_quantum(cur, state); | ||
34734 | + cur++; | ||
34735 | + time = cmpxchg(&pfair_time, | ||
34736 | + cur - 1, /* expected */ | ||
34737 | + cur /* next */ | ||
34738 | + ); | ||
34739 | + if (time == cur - 1) | ||
34740 | + schedule_next_quantum(cur); | ||
34741 | + } | ||
34742 | + TRACE("+++> catching up done\n"); | ||
34743 | +} | ||
34744 | + | ||
34745 | +/* pfair_tick - this function is called for every local timer | ||
34746 | + * interrupt. | ||
34747 | + */ | ||
34748 | +static void pfair_tick(struct task_struct* t) | ||
34749 | +{ | ||
34750 | + struct pfair_state* state = &__get_cpu_var(pfair_state); | ||
34751 | + quanta_t time, cur; | ||
34752 | + int retry = 10; | ||
34753 | + | ||
34754 | + do { | ||
34755 | + cur = current_quantum(state); | ||
34756 | + PTRACE("q %lu at %llu\n", cur, litmus_clock()); | ||
34757 | + | ||
34758 | + /* Attempt to advance time. First CPU to get here | ||
34759 | + * will prepare the next quantum. | ||
34760 | + */ | ||
34761 | + time = cmpxchg(&pfair_time, | ||
34762 | + cur - 1, /* expected */ | ||
34763 | + cur /* next */ | ||
34764 | + ); | ||
34765 | + if (time == cur - 1) { | ||
34766 | + /* exchange succeeded */ | ||
34767 | + wait_for_quantum(cur - 1, state); | ||
34768 | + schedule_next_quantum(cur); | ||
34769 | + retry = 0; | ||
34770 | + } else if (time_before(time, cur - 1)) { | ||
34771 | + /* the whole system missed a tick !? */ | ||
34772 | + catchup_quanta(time, cur, state); | ||
34773 | + retry--; | ||
34774 | + } else if (time_after(time, cur)) { | ||
34775 | + /* our timer lagging behind!? */ | ||
34776 | + TRACE("BAD pfair_time:%lu > cur:%lu\n", time, cur); | ||
34777 | + retry--; | ||
34778 | + } else { | ||
34779 | + /* Some other CPU already started scheduling | ||
34780 | + * this quantum. Let it do its job and then update. | ||
34781 | + */ | ||
34782 | + retry = 0; | ||
34783 | + } | ||
34784 | + } while (retry); | ||
34785 | + | ||
34786 | + /* Spin locally until time advances. */ | ||
34787 | + wait_for_quantum(cur, state); | ||
34788 | + | ||
34789 | + /* copy assignment */ | ||
34790 | + /* FIXME: what if we race with a future update? Corrupted state? */ | ||
34791 | + state->local = state->linked; | ||
34792 | + /* signal that we are done */ | ||
34793 | + mb(); | ||
34794 | + state->local_tick = state->cur_tick; | ||
34795 | + | ||
34796 | + if (state->local != current | ||
34797 | + && (is_realtime(current) || is_present(state->local))) | ||
34798 | + set_tsk_need_resched(current); | ||
34799 | +} | ||
34800 | + | ||
34801 | +static int safe_to_schedule(struct task_struct* t, int cpu) | ||
34802 | +{ | ||
34803 | + int where = tsk_rt(t)->scheduled_on; | ||
34804 | + if (where != NO_CPU && where != cpu) { | ||
34805 | + TRACE_TASK(t, "BAD: can't be scheduled on %d, " | ||
34806 | + "scheduled already on %d.\n", cpu, where); | ||
34807 | + return 0; | ||
34808 | + } else | ||
34809 | + return tsk_rt(t)->present && get_rt_flags(t) == RT_F_RUNNING; | ||
34810 | +} | ||
34811 | + | ||
34812 | +static struct task_struct* pfair_schedule(struct task_struct * prev) | ||
34813 | +{ | ||
34814 | + struct pfair_state* state = &__get_cpu_var(pfair_state); | ||
34815 | + int blocks; | ||
34816 | + struct task_struct* next = NULL; | ||
34817 | + | ||
34818 | + spin_lock(&pfair_lock); | ||
34819 | + | ||
34820 | + blocks = is_realtime(prev) && !is_running(prev); | ||
34821 | + | ||
34822 | + if (state->local && safe_to_schedule(state->local, state->cpu)) | ||
34823 | + next = state->local; | ||
34824 | + | ||
34825 | + if (prev != next) { | ||
34826 | + tsk_rt(prev)->scheduled_on = NO_CPU; | ||
34827 | + if (next) | ||
34828 | + tsk_rt(next)->scheduled_on = state->cpu; | ||
34829 | + } | ||
34830 | + | ||
34831 | + spin_unlock(&pfair_lock); | ||
34832 | + | ||
34833 | + if (next) | ||
34834 | + TRACE_TASK(next, "scheduled rel=%lu at %lu (%llu)\n", | ||
34835 | + tsk_pfair(next)->release, pfair_time, litmus_clock()); | ||
34836 | + else if (is_realtime(prev)) | ||
34837 | + TRACE("Becomes idle at %lu (%llu)\n", pfair_time, litmus_clock()); | ||
34838 | + | ||
34839 | + return next; | ||
34840 | +} | ||
34841 | + | ||
34842 | +static void pfair_task_new(struct task_struct * t, int on_rq, int running) | ||
34843 | +{ | ||
34844 | + unsigned long flags; | ||
34845 | + | ||
34846 | + TRACE("pfair: task new %d state:%d\n", t->pid, t->state); | ||
34847 | + | ||
34848 | + spin_lock_irqsave(&pfair_lock, flags); | ||
34849 | + if (running) | ||
34850 | + t->rt_param.scheduled_on = task_cpu(t); | ||
34851 | + else | ||
34852 | + t->rt_param.scheduled_on = NO_CPU; | ||
34853 | + | ||
34854 | + prepare_release(t, pfair_time + 1); | ||
34855 | + tsk_pfair(t)->sporadic_release = 0; | ||
34856 | + pfair_add_release(t); | ||
34857 | + check_preempt(t); | ||
34858 | + | ||
34859 | + spin_unlock_irqrestore(&pfair_lock, flags); | ||
34860 | +} | ||
34861 | + | ||
34862 | +static void pfair_task_wake_up(struct task_struct *t) | ||
34863 | +{ | ||
34864 | + unsigned long flags; | ||
34865 | + lt_t now; | ||
34866 | + | ||
34867 | + TRACE_TASK(t, "wakes at %llu, release=%lu, pfair_time:%lu\n", | ||
34868 | + litmus_clock(), cur_release(t), pfair_time); | ||
34869 | + | ||
34870 | + spin_lock_irqsave(&pfair_lock, flags); | ||
34871 | + | ||
34872 | + /* It is a little unclear how to deal with Pfair | ||
34873 | + * tasks that block for a while and then wake. For now, | ||
34874 | + * if a task blocks and wakes before its next job release, | ||
34875 | + * then it may resume if it is currently linked somewhere | ||
34876 | + * (as if it never blocked at all). Otherwise, we have a | ||
34877 | + * new sporadic job release. | ||
34878 | + */ | ||
34879 | + if (tsk_pfair(t)->sporadic_release) { | ||
34880 | + now = litmus_clock(); | ||
34881 | + release_at(t, now); | ||
34882 | + prepare_release(t, time2quanta(now, CEIL)); | ||
34883 | + sched_trace_task_release(t); | ||
34884 | + /* FIXME: race with pfair_time advancing */ | ||
34885 | + pfair_add_release(t); | ||
34886 | + tsk_pfair(t)->sporadic_release = 0; | ||
34887 | + } | ||
34888 | + | ||
34889 | + check_preempt(t); | ||
34890 | + | ||
34891 | + spin_unlock_irqrestore(&pfair_lock, flags); | ||
34892 | + TRACE_TASK(t, "wake up done at %llu\n", litmus_clock()); | ||
34893 | +} | ||
34894 | + | ||
34895 | +static void pfair_task_block(struct task_struct *t) | ||
34896 | +{ | ||
34897 | + BUG_ON(!is_realtime(t)); | ||
34898 | + TRACE_TASK(t, "blocks at %llu, state:%d\n", | ||
34899 | + litmus_clock(), t->state); | ||
34900 | +} | ||
34901 | + | ||
34902 | +static void pfair_task_exit(struct task_struct * t) | ||
34903 | +{ | ||
34904 | + unsigned long flags; | ||
34905 | + | ||
34906 | + BUG_ON(!is_realtime(t)); | ||
34907 | + | ||
34908 | + /* Remote task from release or ready queue, and ensure | ||
34909 | + * that it is not the scheduled task for ANY CPU. We | ||
34910 | + * do this blanket check because occassionally when | ||
34911 | + * tasks exit while blocked, the task_cpu of the task | ||
34912 | + * might not be the same as the CPU that the PFAIR scheduler | ||
34913 | + * has chosen for it. | ||
34914 | + */ | ||
34915 | + spin_lock_irqsave(&pfair_lock, flags); | ||
34916 | + | ||
34917 | + TRACE_TASK(t, "RIP, state:%d\n", t->state); | ||
34918 | + drop_all_references(t); | ||
34919 | + | ||
34920 | + spin_unlock_irqrestore(&pfair_lock, flags); | ||
34921 | + | ||
34922 | + kfree(t->rt_param.pfair); | ||
34923 | + t->rt_param.pfair = NULL; | ||
34924 | +} | ||
34925 | + | ||
34926 | + | ||
34927 | +static void pfair_release_at(struct task_struct* task, lt_t start) | ||
34928 | +{ | ||
34929 | + unsigned long flags; | ||
34930 | + quanta_t release; | ||
34931 | + | ||
34932 | + BUG_ON(!is_realtime(task)); | ||
34933 | + | ||
34934 | + spin_lock_irqsave(&pfair_lock, flags); | ||
34935 | + release_at(task, start); | ||
34936 | + release = time2quanta(start, CEIL); | ||
34937 | + | ||
34938 | + if (release - pfair_time >= PFAIR_MAX_PERIOD) | ||
34939 | + release = pfair_time + PFAIR_MAX_PERIOD; | ||
34940 | + | ||
34941 | + TRACE_TASK(task, "sys release at %lu\n", release); | ||
34942 | + | ||
34943 | + drop_all_references(task); | ||
34944 | + prepare_release(task, release); | ||
34945 | + pfair_add_release(task); | ||
34946 | + | ||
34947 | + /* Clear sporadic release flag, since this release subsumes any | ||
34948 | + * sporadic release on wake. | ||
34949 | + */ | ||
34950 | + tsk_pfair(task)->sporadic_release = 0; | ||
34951 | + | ||
34952 | + spin_unlock_irqrestore(&pfair_lock, flags); | ||
34953 | +} | ||
34954 | + | ||
34955 | +static void init_subtask(struct subtask* sub, unsigned long i, | ||
34956 | + lt_t quanta, lt_t period) | ||
34957 | +{ | ||
34958 | + /* since i is zero-based, the formulas are shifted by one */ | ||
34959 | + lt_t tmp; | ||
34960 | + | ||
34961 | + /* release */ | ||
34962 | + tmp = period * i; | ||
34963 | + do_div(tmp, quanta); /* floor */ | ||
34964 | + sub->release = (quanta_t) tmp; | ||
34965 | + | ||
34966 | + /* deadline */ | ||
34967 | + tmp = period * (i + 1); | ||
34968 | + if (do_div(tmp, quanta)) /* ceil */ | ||
34969 | + tmp++; | ||
34970 | + sub->deadline = (quanta_t) tmp; | ||
34971 | + | ||
34972 | + /* next release */ | ||
34973 | + tmp = period * (i + 1); | ||
34974 | + do_div(tmp, quanta); /* floor */ | ||
34975 | + sub->overlap = sub->deadline - (quanta_t) tmp; | ||
34976 | + | ||
34977 | + /* Group deadline. | ||
34978 | + * Based on the formula given in Uma's thesis. | ||
34979 | + */ | ||
34980 | + if (2 * quanta >= period) { | ||
34981 | + /* heavy */ | ||
34982 | + tmp = (sub->deadline - (i + 1)) * period; | ||
34983 | + if (period > quanta && | ||
34984 | + do_div(tmp, (period - quanta))) /* ceil */ | ||
34985 | + tmp++; | ||
34986 | + sub->group_deadline = (quanta_t) tmp; | ||
34987 | + } else | ||
34988 | + sub->group_deadline = 0; | ||
34989 | +} | ||
34990 | + | ||
34991 | +static void dump_subtasks(struct task_struct* t) | ||
34992 | +{ | ||
34993 | + unsigned long i; | ||
34994 | + for (i = 0; i < t->rt_param.pfair->quanta; i++) | ||
34995 | + TRACE_TASK(t, "SUBTASK %lu: rel=%lu dl=%lu bbit:%lu gdl:%lu\n", | ||
34996 | + i + 1, | ||
34997 | + t->rt_param.pfair->subtasks[i].release, | ||
34998 | + t->rt_param.pfair->subtasks[i].deadline, | ||
34999 | + t->rt_param.pfair->subtasks[i].overlap, | ||
35000 | + t->rt_param.pfair->subtasks[i].group_deadline); | ||
35001 | +} | ||
35002 | + | ||
35003 | +static long pfair_admit_task(struct task_struct* t) | ||
35004 | +{ | ||
35005 | + lt_t quanta; | ||
35006 | + lt_t period; | ||
35007 | + s64 quantum_length = ktime_to_ns(tick_period); | ||
35008 | + struct pfair_param* param; | ||
35009 | + unsigned long i; | ||
35010 | + | ||
35011 | + /* Pfair is a tick-based method, so the time | ||
35012 | + * of interest is jiffies. Calculate tick-based | ||
35013 | + * times for everything. | ||
35014 | + * (Ceiling of exec cost, floor of period.) | ||
35015 | + */ | ||
35016 | + | ||
35017 | + quanta = get_exec_cost(t); | ||
35018 | + period = get_rt_period(t); | ||
35019 | + | ||
35020 | + quanta = time2quanta(get_exec_cost(t), CEIL); | ||
35021 | + | ||
35022 | + if (do_div(period, quantum_length)) | ||
35023 | + printk(KERN_WARNING | ||
35024 | + "The period of %s/%d is not a multiple of %llu.\n", | ||
35025 | + t->comm, t->pid, (unsigned long long) quantum_length); | ||
35026 | + | ||
35027 | + if (period >= PFAIR_MAX_PERIOD) { | ||
35028 | + printk(KERN_WARNING | ||
35029 | + "PFAIR: Rejecting task %s/%d; its period is too long.\n", | ||
35030 | + t->comm, t->pid); | ||
35031 | + return -EINVAL; | ||
35032 | + } | ||
35033 | + | ||
35034 | + if (quanta == period) { | ||
35035 | + /* special case: task has weight 1.0 */ | ||
35036 | + printk(KERN_INFO | ||
35037 | + "Admitting weight 1.0 task. (%s/%d, %llu, %llu).\n", | ||
35038 | + t->comm, t->pid, quanta, period); | ||
35039 | + quanta = 1; | ||
35040 | + period = 1; | ||
35041 | + } | ||
35042 | + | ||
35043 | + param = kmalloc(sizeof(*param) + | ||
35044 | + quanta * sizeof(struct subtask), GFP_ATOMIC); | ||
35045 | + | ||
35046 | + if (!param) | ||
35047 | + return -ENOMEM; | ||
35048 | + | ||
35049 | + param->quanta = quanta; | ||
35050 | + param->cur = 0; | ||
35051 | + param->release = 0; | ||
35052 | + param->period = period; | ||
35053 | + | ||
35054 | + for (i = 0; i < quanta; i++) | ||
35055 | + init_subtask(param->subtasks + i, i, quanta, period); | ||
35056 | + | ||
35057 | + if (t->rt_param.pfair) | ||
35058 | + /* get rid of stale allocation */ | ||
35059 | + kfree(t->rt_param.pfair); | ||
35060 | + | ||
35061 | + t->rt_param.pfair = param; | ||
35062 | + | ||
35063 | + /* spew out some debug info */ | ||
35064 | + dump_subtasks(t); | ||
35065 | + | ||
35066 | + return 0; | ||
35067 | +} | ||
35068 | + | ||
35069 | +static long pfair_activate_plugin(void) | ||
35070 | +{ | ||
35071 | + int cpu; | ||
35072 | + struct pfair_state* state; | ||
35073 | + | ||
35074 | + state = &__get_cpu_var(pfair_state); | ||
35075 | + pfair_time = current_quantum(state); | ||
35076 | + | ||
35077 | + TRACE("Activating PFAIR at q=%lu\n", pfair_time); | ||
35078 | + | ||
35079 | + for (cpu = 0; cpu < num_online_cpus(); cpu++) { | ||
35080 | + state = &per_cpu(pfair_state, cpu); | ||
35081 | + state->cur_tick = pfair_time; | ||
35082 | + state->local_tick = pfair_time; | ||
35083 | + state->missed_quanta = 0; | ||
35084 | + state->offset = cpu_stagger_offset(cpu); | ||
35085 | + } | ||
35086 | + | ||
35087 | + return 0; | ||
35088 | +} | ||
35089 | + | ||
35090 | +/* Plugin object */ | ||
35091 | +static struct sched_plugin pfair_plugin __cacheline_aligned_in_smp = { | ||
35092 | + .plugin_name = "PFAIR", | ||
35093 | + .tick = pfair_tick, | ||
35094 | + .task_new = pfair_task_new, | ||
35095 | + .task_exit = pfair_task_exit, | ||
35096 | + .schedule = pfair_schedule, | ||
35097 | + .task_wake_up = pfair_task_wake_up, | ||
35098 | + .task_block = pfair_task_block, | ||
35099 | + .admit_task = pfair_admit_task, | ||
35100 | + .release_at = pfair_release_at, | ||
35101 | + .complete_job = complete_job, | ||
35102 | + .activate_plugin = pfair_activate_plugin, | ||
35103 | +}; | ||
35104 | + | ||
35105 | +static int __init init_pfair(void) | ||
35106 | +{ | ||
35107 | + int cpu, i; | ||
35108 | + struct pfair_state *state; | ||
35109 | + | ||
35110 | + | ||
35111 | + /* | ||
35112 | + * initialize short_cut for per-cpu pfair state; | ||
35113 | + * there may be a problem here if someone removes a cpu | ||
35114 | + * while we are doing this initialization... and if cpus | ||
35115 | + * are added / removed later... is it a _real_ problem? | ||
35116 | + */ | ||
35117 | + pstate = kmalloc(sizeof(struct pfair_state*) * num_online_cpus(), GFP_KERNEL); | ||
35118 | + | ||
35119 | + /* initialize release queue */ | ||
35120 | + for (i = 0; i < PFAIR_MAX_PERIOD; i++) | ||
35121 | + bheap_init(&release_queue[i]); | ||
35122 | + | ||
35123 | + /* initialize CPU state */ | ||
35124 | + for (cpu = 0; cpu < num_online_cpus(); cpu++) { | ||
35125 | + state = &per_cpu(pfair_state, cpu); | ||
35126 | + state->cpu = cpu; | ||
35127 | + state->cur_tick = 0; | ||
35128 | + state->local_tick = 0; | ||
35129 | + state->linked = NULL; | ||
35130 | + state->local = NULL; | ||
35131 | + state->scheduled = NULL; | ||
35132 | + state->missed_quanta = 0; | ||
35133 | + state->offset = cpu_stagger_offset(cpu); | ||
35134 | + pstate[cpu] = state; | ||
35135 | + } | ||
35136 | + | ||
35137 | + rt_domain_init(&pfair, pfair_ready_order, NULL, NULL); | ||
35138 | + return register_sched_plugin(&pfair_plugin); | ||
35139 | +} | ||
35140 | + | ||
35141 | +static void __exit clean_pfair(void) | ||
35142 | +{ | ||
35143 | + kfree(pstate); | ||
35144 | +} | ||
35145 | + | ||
35146 | +module_init(init_pfair); | ||
35147 | +module_exit(clean_pfair); | ||
35148 | diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c | ||
35149 | new file mode 100644 | ||
35150 | index 0000000..bc7c0e9 | ||
35151 | --- /dev/null | ||
35152 | +++ b/litmus/sched_plugin.c | ||
35153 | @@ -0,0 +1,257 @@ | ||
35154 | +/* sched_plugin.c -- core infrastructure for the scheduler plugin system | ||
35155 | + * | ||
35156 | + * This file includes the initialization of the plugin system, the no-op Linux | ||
35157 | + * scheduler plugin, some dummy functions, and some helper functions. | ||
35158 | + */ | ||
35159 | + | ||
35160 | +#include <linux/list.h> | ||
35161 | +#include <linux/spinlock.h> | ||
35162 | + | ||
35163 | +#include <litmus/litmus.h> | ||
35164 | +#include <litmus/sched_plugin.h> | ||
35165 | + | ||
35166 | +#include <litmus/jobs.h> | ||
35167 | + | ||
35168 | +/* | ||
35169 | + * Generic function to trigger preemption on either local or remote cpu | ||
35170 | + * from scheduler plugins. The key feature is that this function is | ||
35171 | + * non-preemptive section aware and does not invoke the scheduler / send | ||
35172 | + * IPIs if the to-be-preempted task is actually non-preemptive. | ||
35173 | + */ | ||
35174 | +void preempt_if_preemptable(struct task_struct* t, int on_cpu) | ||
35175 | +{ | ||
35176 | + /* t is the real-time task executing on CPU on_cpu If t is NULL, then | ||
35177 | + * on_cpu is currently scheduling background work. | ||
35178 | + */ | ||
35179 | + | ||
35180 | + int send_ipi; | ||
35181 | + | ||
35182 | + if (smp_processor_id() == on_cpu) { | ||
35183 | + /* local CPU case */ | ||
35184 | + if (t) { | ||
35185 | + /* check if we need to poke userspace */ | ||
35186 | + if (is_user_np(t)) | ||
35187 | + /* yes, poke it */ | ||
35188 | + request_exit_np(t); | ||
35189 | + else | ||
35190 | + /* no, see if we are allowed to preempt the | ||
35191 | + * currently-executing task */ | ||
35192 | + if (!is_kernel_np(t)) | ||
35193 | + set_tsk_need_resched(t); | ||
35194 | + } else | ||
35195 | + /* move non-real-time task out of the way */ | ||
35196 | + set_tsk_need_resched(current); | ||
35197 | + } else { | ||
35198 | + /* remote CPU case */ | ||
35199 | + if (!t) | ||
35200 | + /* currently schedules non-real-time work */ | ||
35201 | + send_ipi = 1; | ||
35202 | + else { | ||
35203 | + /* currently schedules real-time work */ | ||
35204 | + if (is_user_np(t)) { | ||
35205 | + /* need to notify user space of delayed | ||
35206 | + * preemption */ | ||
35207 | + | ||
35208 | + /* to avoid a race, set the flag, then test | ||
35209 | + * again */ | ||
35210 | + request_exit_np(t); | ||
35211 | + /* make sure it got written */ | ||
35212 | + mb(); | ||
35213 | + } | ||
35214 | + /* Only send an ipi if remote task might have raced our | ||
35215 | + * request, i.e., send an IPI to make sure if it exited | ||
35216 | + * its critical section. | ||
35217 | + */ | ||
35218 | + send_ipi = !is_np(t) && !is_kernel_np(t); | ||
35219 | + } | ||
35220 | + if (likely(send_ipi)) | ||
35221 | + smp_send_reschedule(on_cpu); | ||
35222 | + } | ||
35223 | +} | ||
35224 | + | ||
35225 | + | ||
35226 | +/************************************************************* | ||
35227 | + * Dummy plugin functions * | ||
35228 | + *************************************************************/ | ||
35229 | + | ||
35230 | +static void litmus_dummy_finish_switch(struct task_struct * prev) | ||
35231 | +{ | ||
35232 | +} | ||
35233 | + | ||
35234 | +static struct task_struct* litmus_dummy_schedule(struct task_struct * prev) | ||
35235 | +{ | ||
35236 | + return NULL; | ||
35237 | +} | ||
35238 | + | ||
35239 | +static void litmus_dummy_tick(struct task_struct* tsk) | ||
35240 | +{ | ||
35241 | +} | ||
35242 | + | ||
35243 | +static long litmus_dummy_admit_task(struct task_struct* tsk) | ||
35244 | +{ | ||
35245 | + printk(KERN_CRIT "LITMUS^RT: Linux plugin rejects %s/%d.\n", | ||
35246 | + tsk->comm, tsk->pid); | ||
35247 | + return -EINVAL; | ||
35248 | +} | ||
35249 | + | ||
35250 | +static void litmus_dummy_task_new(struct task_struct *t, int on_rq, int running) | ||
35251 | +{ | ||
35252 | +} | ||
35253 | + | ||
35254 | +static void litmus_dummy_task_wake_up(struct task_struct *task) | ||
35255 | +{ | ||
35256 | +} | ||
35257 | + | ||
35258 | +static void litmus_dummy_task_block(struct task_struct *task) | ||
35259 | +{ | ||
35260 | +} | ||
35261 | + | ||
35262 | +static void litmus_dummy_task_exit(struct task_struct *task) | ||
35263 | +{ | ||
35264 | +} | ||
35265 | + | ||
35266 | +static long litmus_dummy_complete_job(void) | ||
35267 | +{ | ||
35268 | + return -ENOSYS; | ||
35269 | +} | ||
35270 | + | ||
35271 | +static long litmus_dummy_activate_plugin(void) | ||
35272 | +{ | ||
35273 | + return 0; | ||
35274 | +} | ||
35275 | + | ||
35276 | +static long litmus_dummy_deactivate_plugin(void) | ||
35277 | +{ | ||
35278 | + return 0; | ||
35279 | +} | ||
35280 | + | ||
35281 | +#ifdef CONFIG_FMLP | ||
35282 | + | ||
35283 | +static long litmus_dummy_inherit_priority(struct pi_semaphore *sem, | ||
35284 | + struct task_struct *new_owner) | ||
35285 | +{ | ||
35286 | + return -ENOSYS; | ||
35287 | +} | ||
35288 | + | ||
35289 | +static long litmus_dummy_return_priority(struct pi_semaphore *sem) | ||
35290 | +{ | ||
35291 | + return -ENOSYS; | ||
35292 | +} | ||
35293 | + | ||
35294 | +static long litmus_dummy_pi_block(struct pi_semaphore *sem, | ||
35295 | + struct task_struct *new_waiter) | ||
35296 | +{ | ||
35297 | + return -ENOSYS; | ||
35298 | +} | ||
35299 | + | ||
35300 | +#endif | ||
35301 | + | ||
35302 | + | ||
35303 | +/* The default scheduler plugin. It doesn't do anything and lets Linux do its | ||
35304 | + * job. | ||
35305 | + */ | ||
35306 | +struct sched_plugin linux_sched_plugin = { | ||
35307 | + .plugin_name = "Linux", | ||
35308 | + .tick = litmus_dummy_tick, | ||
35309 | + .task_new = litmus_dummy_task_new, | ||
35310 | + .task_exit = litmus_dummy_task_exit, | ||
35311 | + .task_wake_up = litmus_dummy_task_wake_up, | ||
35312 | + .task_block = litmus_dummy_task_block, | ||
35313 | + .complete_job = litmus_dummy_complete_job, | ||
35314 | + .schedule = litmus_dummy_schedule, | ||
35315 | + .finish_switch = litmus_dummy_finish_switch, | ||
35316 | + .activate_plugin = litmus_dummy_activate_plugin, | ||
35317 | + .deactivate_plugin = litmus_dummy_deactivate_plugin, | ||
35318 | +#ifdef CONFIG_FMLP | ||
35319 | + .inherit_priority = litmus_dummy_inherit_priority, | ||
35320 | + .return_priority = litmus_dummy_return_priority, | ||
35321 | + .pi_block = litmus_dummy_pi_block, | ||
35322 | +#endif | ||
35323 | + .admit_task = litmus_dummy_admit_task | ||
35324 | +}; | ||
35325 | + | ||
35326 | +/* | ||
35327 | + * The reference to current plugin that is used to schedule tasks within | ||
35328 | + * the system. It stores references to actual function implementations | ||
35329 | + * Should be initialized by calling "init_***_plugin()" | ||
35330 | + */ | ||
35331 | +struct sched_plugin *litmus = &linux_sched_plugin; | ||
35332 | + | ||
35333 | +/* the list of registered scheduling plugins */ | ||
35334 | +static LIST_HEAD(sched_plugins); | ||
35335 | +static DEFINE_SPINLOCK(sched_plugins_lock); | ||
35336 | + | ||
35337 | +#define CHECK(func) {\ | ||
35338 | + if (!plugin->func) \ | ||
35339 | + plugin->func = litmus_dummy_ ## func;} | ||
35340 | + | ||
35341 | +/* FIXME: get reference to module */ | ||
35342 | +int register_sched_plugin(struct sched_plugin* plugin) | ||
35343 | +{ | ||
35344 | + printk(KERN_INFO "Registering LITMUS^RT plugin %s.\n", | ||
35345 | + plugin->plugin_name); | ||
35346 | + | ||
35347 | + /* make sure we don't trip over null pointers later */ | ||
35348 | + CHECK(finish_switch); | ||
35349 | + CHECK(schedule); | ||
35350 | + CHECK(tick); | ||
35351 | + CHECK(task_wake_up); | ||
35352 | + CHECK(task_exit); | ||
35353 | + CHECK(task_block); | ||
35354 | + CHECK(task_new); | ||
35355 | + CHECK(complete_job); | ||
35356 | + CHECK(activate_plugin); | ||
35357 | + CHECK(deactivate_plugin); | ||
35358 | +#ifdef CONFIG_FMLP | ||
35359 | + CHECK(inherit_priority); | ||
35360 | + CHECK(return_priority); | ||
35361 | + CHECK(pi_block); | ||
35362 | +#endif | ||
35363 | + CHECK(admit_task); | ||
35364 | + | ||
35365 | + if (!plugin->release_at) | ||
35366 | + plugin->release_at = release_at; | ||
35367 | + | ||
35368 | + spin_lock(&sched_plugins_lock); | ||
35369 | + list_add(&plugin->list, &sched_plugins); | ||
35370 | + spin_unlock(&sched_plugins_lock); | ||
35371 | + | ||
35372 | + return 0; | ||
35373 | +} | ||
35374 | + | ||
35375 | + | ||
35376 | +/* FIXME: reference counting, etc. */ | ||
35377 | +struct sched_plugin* find_sched_plugin(const char* name) | ||
35378 | +{ | ||
35379 | + struct list_head *pos; | ||
35380 | + struct sched_plugin *plugin; | ||
35381 | + | ||
35382 | + spin_lock(&sched_plugins_lock); | ||
35383 | + list_for_each(pos, &sched_plugins) { | ||
35384 | + plugin = list_entry(pos, struct sched_plugin, list); | ||
35385 | + if (!strcmp(plugin->plugin_name, name)) | ||
35386 | + goto out_unlock; | ||
35387 | + } | ||
35388 | + plugin = NULL; | ||
35389 | + | ||
35390 | +out_unlock: | ||
35391 | + spin_unlock(&sched_plugins_lock); | ||
35392 | + return plugin; | ||
35393 | +} | ||
35394 | + | ||
35395 | +int print_sched_plugins(char* buf, int max) | ||
35396 | +{ | ||
35397 | + int count = 0; | ||
35398 | + struct list_head *pos; | ||
35399 | + struct sched_plugin *plugin; | ||
35400 | + | ||
35401 | + spin_lock(&sched_plugins_lock); | ||
35402 | + list_for_each(pos, &sched_plugins) { | ||
35403 | + plugin = list_entry(pos, struct sched_plugin, list); | ||
35404 | + count += snprintf(buf + count, max - count, "%s\n", plugin->plugin_name); | ||
35405 | + if (max - count <= 0) | ||
35406 | + break; | ||
35407 | + } | ||
35408 | + spin_unlock(&sched_plugins_lock); | ||
35409 | + return count; | ||
35410 | +} | ||
35411 | diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c | ||
35412 | new file mode 100644 | ||
35413 | index 0000000..7f71ecf | ||
35414 | --- /dev/null | ||
35415 | +++ b/litmus/sched_psn_edf.c | ||
35416 | @@ -0,0 +1,478 @@ | ||
35417 | +/* | ||
35418 | + * kernel/sched_psn_edf.c | ||
35419 | + * | ||
35420 | + * Implementation of the PSN-EDF scheduler plugin. | ||
35421 | + * Based on kern/sched_part_edf.c and kern/sched_gsn_edf.c. | ||
35422 | + * | ||
35423 | + * Suspensions and non-preemptable sections are supported. | ||
35424 | + * Priority inheritance is not supported. | ||
35425 | + */ | ||
35426 | + | ||
35427 | +#include <linux/percpu.h> | ||
35428 | +#include <linux/sched.h> | ||
35429 | +#include <linux/list.h> | ||
35430 | +#include <linux/spinlock.h> | ||
35431 | + | ||
35432 | +#include <linux/module.h> | ||
35433 | + | ||
35434 | +#include <litmus/litmus.h> | ||
35435 | +#include <litmus/jobs.h> | ||
35436 | +#include <litmus/sched_plugin.h> | ||
35437 | +#include <litmus/edf_common.h> | ||
35438 | + | ||
35439 | + | ||
35440 | +typedef struct { | ||
35441 | + rt_domain_t domain; | ||
35442 | + int cpu; | ||
35443 | + struct task_struct* scheduled; /* only RT tasks */ | ||
35444 | +/* | ||
35445 | + * scheduling lock slock | ||
35446 | + * protects the domain and serializes scheduling decisions | ||
35447 | + */ | ||
35448 | +#define slock domain.ready_lock | ||
35449 | + | ||
35450 | +} psnedf_domain_t; | ||
35451 | + | ||
35452 | +DEFINE_PER_CPU(psnedf_domain_t, psnedf_domains); | ||
35453 | + | ||
35454 | +#define local_edf (&__get_cpu_var(psnedf_domains).domain) | ||
35455 | +#define local_pedf (&__get_cpu_var(psnedf_domains)) | ||
35456 | +#define remote_edf(cpu) (&per_cpu(psnedf_domains, cpu).domain) | ||
35457 | +#define remote_pedf(cpu) (&per_cpu(psnedf_domains, cpu)) | ||
35458 | +#define task_edf(task) remote_edf(get_partition(task)) | ||
35459 | +#define task_pedf(task) remote_pedf(get_partition(task)) | ||
35460 | + | ||
35461 | + | ||
35462 | +static void psnedf_domain_init(psnedf_domain_t* pedf, | ||
35463 | + check_resched_needed_t check, | ||
35464 | + release_jobs_t release, | ||
35465 | + int cpu) | ||
35466 | +{ | ||
35467 | + edf_domain_init(&pedf->domain, check, release); | ||
35468 | + pedf->cpu = cpu; | ||
35469 | + pedf->scheduled = NULL; | ||
35470 | +} | ||
35471 | + | ||
35472 | +static void requeue(struct task_struct* t, rt_domain_t *edf) | ||
35473 | +{ | ||
35474 | + if (t->state != TASK_RUNNING) | ||
35475 | + TRACE_TASK(t, "requeue: !TASK_RUNNING\n"); | ||
35476 | + | ||
35477 | + set_rt_flags(t, RT_F_RUNNING); | ||
35478 | + if (is_released(t, litmus_clock())) | ||
35479 | + __add_ready(edf, t); | ||
35480 | + else | ||
35481 | + add_release(edf, t); /* it has got to wait */ | ||
35482 | +} | ||
35483 | + | ||
35484 | +/* we assume the lock is being held */ | ||
35485 | +static void preempt(psnedf_domain_t *pedf) | ||
35486 | +{ | ||
35487 | + preempt_if_preemptable(pedf->scheduled, pedf->cpu); | ||
35488 | +} | ||
35489 | + | ||
35490 | +/* This check is trivial in partioned systems as we only have to consider | ||
35491 | + * the CPU of the partition. | ||
35492 | + */ | ||
35493 | +static int psnedf_check_resched(rt_domain_t *edf) | ||
35494 | +{ | ||
35495 | + psnedf_domain_t *pedf = container_of(edf, psnedf_domain_t, domain); | ||
35496 | + | ||
35497 | + /* because this is a callback from rt_domain_t we already hold | ||
35498 | + * the necessary lock for the ready queue | ||
35499 | + */ | ||
35500 | + if (edf_preemption_needed(edf, pedf->scheduled)) { | ||
35501 | + preempt(pedf); | ||
35502 | + return 1; | ||
35503 | + } else | ||
35504 | + return 0; | ||
35505 | +} | ||
35506 | + | ||
35507 | +static void job_completion(struct task_struct* t, int forced) | ||
35508 | +{ | ||
35509 | + sched_trace_task_completion(t,forced); | ||
35510 | + TRACE_TASK(t, "job_completion().\n"); | ||
35511 | + | ||
35512 | + set_rt_flags(t, RT_F_SLEEP); | ||
35513 | + prepare_for_next_period(t); | ||
35514 | +} | ||
35515 | + | ||
35516 | +static void psnedf_tick(struct task_struct *t) | ||
35517 | +{ | ||
35518 | + psnedf_domain_t *pedf = local_pedf; | ||
35519 | + | ||
35520 | + /* Check for inconsistency. We don't need the lock for this since | ||
35521 | + * ->scheduled is only changed in schedule, which obviously is not | ||
35522 | + * executing in parallel on this CPU | ||
35523 | + */ | ||
35524 | + BUG_ON(is_realtime(t) && t != pedf->scheduled); | ||
35525 | + | ||
35526 | + if (is_realtime(t) && budget_exhausted(t)) { | ||
35527 | + if (!is_np(t)) { | ||
35528 | + set_tsk_need_resched(t); | ||
35529 | + TRACE("psnedf_scheduler_tick: " | ||
35530 | + "%d is preemptable " | ||
35531 | + " => FORCE_RESCHED\n", t->pid); | ||
35532 | + } else if (is_user_np(t)) { | ||
35533 | + TRACE("psnedf_scheduler_tick: " | ||
35534 | + "%d is non-preemptable, " | ||
35535 | + "preemption delayed.\n", t->pid); | ||
35536 | + request_exit_np(t); | ||
35537 | + } | ||
35538 | + } | ||
35539 | +} | ||
35540 | + | ||
35541 | +static struct task_struct* psnedf_schedule(struct task_struct * prev) | ||
35542 | +{ | ||
35543 | + psnedf_domain_t* pedf = local_pedf; | ||
35544 | + rt_domain_t* edf = &pedf->domain; | ||
35545 | + struct task_struct* next; | ||
35546 | + | ||
35547 | + int out_of_time, sleep, preempt, | ||
35548 | + np, exists, blocks, resched; | ||
35549 | + | ||
35550 | + spin_lock(&pedf->slock); | ||
35551 | + | ||
35552 | + /* sanity checking | ||
35553 | + * differently from gedf, when a task exits (dead) | ||
35554 | + * pedf->schedule may be null and prev _is_ realtime | ||
35555 | + */ | ||
35556 | + BUG_ON(pedf->scheduled && pedf->scheduled != prev); | ||
35557 | + BUG_ON(pedf->scheduled && !is_realtime(prev)); | ||
35558 | + | ||
35559 | + /* (0) Determine state */ | ||
35560 | + exists = pedf->scheduled != NULL; | ||
35561 | + blocks = exists && !is_running(pedf->scheduled); | ||
35562 | + out_of_time = exists && budget_exhausted(pedf->scheduled); | ||
35563 | + np = exists && is_np(pedf->scheduled); | ||
35564 | + sleep = exists && get_rt_flags(pedf->scheduled) == RT_F_SLEEP; | ||
35565 | + preempt = edf_preemption_needed(edf, prev); | ||
35566 | + | ||
35567 | + /* If we need to preempt do so. | ||
35568 | + * The following checks set resched to 1 in case of special | ||
35569 | + * circumstances. | ||
35570 | + */ | ||
35571 | + resched = preempt; | ||
35572 | + | ||
35573 | + /* If a task blocks we have no choice but to reschedule. | ||
35574 | + */ | ||
35575 | + if (blocks) | ||
35576 | + resched = 1; | ||
35577 | + | ||
35578 | + /* Request a sys_exit_np() call if we would like to preempt but cannot. | ||
35579 | + * Multiple calls to request_exit_np() don't hurt. | ||
35580 | + */ | ||
35581 | + if (np && (out_of_time || preempt || sleep)) | ||
35582 | + request_exit_np(pedf->scheduled); | ||
35583 | + | ||
35584 | + /* Any task that is preemptable and either exhausts its execution | ||
35585 | + * budget or wants to sleep completes. We may have to reschedule after | ||
35586 | + * this. | ||
35587 | + */ | ||
35588 | + if (!np && (out_of_time || sleep) && !blocks) { | ||
35589 | + job_completion(pedf->scheduled, !sleep); | ||
35590 | + resched = 1; | ||
35591 | + } | ||
35592 | + | ||
35593 | + /* The final scheduling decision. Do we need to switch for some reason? | ||
35594 | + * Switch if we are in RT mode and have no task or if we need to | ||
35595 | + * resched. | ||
35596 | + */ | ||
35597 | + next = NULL; | ||
35598 | + if ((!np || blocks) && (resched || !exists)) { | ||
35599 | + /* Take care of a previously scheduled | ||
35600 | + * job by taking it out of the Linux runqueue. | ||
35601 | + */ | ||
35602 | + if (pedf->scheduled && !blocks) | ||
35603 | + requeue(pedf->scheduled, edf); | ||
35604 | + next = __take_ready(edf); | ||
35605 | + } else | ||
35606 | + /* Only override Linux scheduler if we have a real-time task | ||
35607 | + * scheduled that needs to continue. | ||
35608 | + */ | ||
35609 | + if (exists) | ||
35610 | + next = prev; | ||
35611 | + | ||
35612 | + if (next) { | ||
35613 | + TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); | ||
35614 | + set_rt_flags(next, RT_F_RUNNING); | ||
35615 | + } else { | ||
35616 | + TRACE("becoming idle at %llu\n", litmus_clock()); | ||
35617 | + } | ||
35618 | + | ||
35619 | + pedf->scheduled = next; | ||
35620 | + spin_unlock(&pedf->slock); | ||
35621 | + | ||
35622 | + return next; | ||
35623 | +} | ||
35624 | + | ||
35625 | + | ||
35626 | +/* Prepare a task for running in RT mode | ||
35627 | + */ | ||
35628 | +static void psnedf_task_new(struct task_struct * t, int on_rq, int running) | ||
35629 | +{ | ||
35630 | + rt_domain_t* edf = task_edf(t); | ||
35631 | + psnedf_domain_t* pedf = task_pedf(t); | ||
35632 | + unsigned long flags; | ||
35633 | + | ||
35634 | + TRACE_TASK(t, "psn edf: task new, cpu = %d\n", | ||
35635 | + t->rt_param.task_params.cpu); | ||
35636 | + | ||
35637 | + /* setup job parameters */ | ||
35638 | + release_at(t, litmus_clock()); | ||
35639 | + | ||
35640 | + /* The task should be running in the queue, otherwise signal | ||
35641 | + * code will try to wake it up with fatal consequences. | ||
35642 | + */ | ||
35643 | + spin_lock_irqsave(&pedf->slock, flags); | ||
35644 | + if (running) { | ||
35645 | + /* there shouldn't be anything else running at the time */ | ||
35646 | + BUG_ON(pedf->scheduled); | ||
35647 | + pedf->scheduled = t; | ||
35648 | + } else { | ||
35649 | + requeue(t, edf); | ||
35650 | + /* maybe we have to reschedule */ | ||
35651 | + preempt(pedf); | ||
35652 | + } | ||
35653 | + spin_unlock_irqrestore(&pedf->slock, flags); | ||
35654 | +} | ||
35655 | + | ||
35656 | +static void psnedf_task_wake_up(struct task_struct *task) | ||
35657 | +{ | ||
35658 | + unsigned long flags; | ||
35659 | + psnedf_domain_t* pedf = task_pedf(task); | ||
35660 | + rt_domain_t* edf = task_edf(task); | ||
35661 | + lt_t now; | ||
35662 | + | ||
35663 | + TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | ||
35664 | + spin_lock_irqsave(&pedf->slock, flags); | ||
35665 | + BUG_ON(is_queued(task)); | ||
35666 | + /* We need to take suspensions because of semaphores into | ||
35667 | + * account! If a job resumes after being suspended due to acquiring | ||
35668 | + * a semaphore, it should never be treated as a new job release. | ||
35669 | + * | ||
35670 | + * FIXME: This should be done in some more predictable and userspace-controlled way. | ||
35671 | + */ | ||
35672 | + now = litmus_clock(); | ||
35673 | + if (is_tardy(task, now) && | ||
35674 | + get_rt_flags(task) != RT_F_EXIT_SEM) { | ||
35675 | + /* new sporadic release */ | ||
35676 | + release_at(task, now); | ||
35677 | + sched_trace_task_release(task); | ||
35678 | + } | ||
35679 | + | ||
35680 | + /* Only add to ready queue if it is not the currently-scheduled | ||
35681 | + * task. This could be the case if a task was woken up concurrently | ||
35682 | + * on a remote CPU before the executing CPU got around to actually | ||
35683 | + * de-scheduling the task, i.e., wake_up() raced with schedule() | ||
35684 | + * and won. | ||
35685 | + */ | ||
35686 | + if (pedf->scheduled != task) | ||
35687 | + requeue(task, edf); | ||
35688 | + | ||
35689 | + spin_unlock_irqrestore(&pedf->slock, flags); | ||
35690 | + TRACE_TASK(task, "wake up done\n"); | ||
35691 | +} | ||
35692 | + | ||
35693 | +static void psnedf_task_block(struct task_struct *t) | ||
35694 | +{ | ||
35695 | + /* only running tasks can block, thus t is in no queue */ | ||
35696 | + TRACE_TASK(t, "block at %llu, state=%d\n", litmus_clock(), t->state); | ||
35697 | + | ||
35698 | + BUG_ON(!is_realtime(t)); | ||
35699 | + BUG_ON(is_queued(t)); | ||
35700 | +} | ||
35701 | + | ||
35702 | +static void psnedf_task_exit(struct task_struct * t) | ||
35703 | +{ | ||
35704 | + unsigned long flags; | ||
35705 | + psnedf_domain_t* pedf = task_pedf(t); | ||
35706 | + rt_domain_t* edf; | ||
35707 | + | ||
35708 | + spin_lock_irqsave(&pedf->slock, flags); | ||
35709 | + if (is_queued(t)) { | ||
35710 | + /* dequeue */ | ||
35711 | + edf = task_edf(t); | ||
35712 | + remove(edf, t); | ||
35713 | + } | ||
35714 | + if (pedf->scheduled == t) | ||
35715 | + pedf->scheduled = NULL; | ||
35716 | + | ||
35717 | + TRACE_TASK(t, "RIP, now reschedule\n"); | ||
35718 | + | ||
35719 | + preempt(pedf); | ||
35720 | + spin_unlock_irqrestore(&pedf->slock, flags); | ||
35721 | +} | ||
35722 | + | ||
35723 | +#ifdef CONFIG_FMLP | ||
35724 | +static long psnedf_pi_block(struct pi_semaphore *sem, | ||
35725 | + struct task_struct *new_waiter) | ||
35726 | +{ | ||
35727 | + psnedf_domain_t* pedf; | ||
35728 | + rt_domain_t* edf; | ||
35729 | + struct task_struct* t; | ||
35730 | + int cpu = get_partition(new_waiter); | ||
35731 | + | ||
35732 | + BUG_ON(!new_waiter); | ||
35733 | + | ||
35734 | + if (edf_higher_prio(new_waiter, sem->hp.cpu_task[cpu])) { | ||
35735 | + TRACE_TASK(new_waiter, " boosts priority\n"); | ||
35736 | + pedf = task_pedf(new_waiter); | ||
35737 | + edf = task_edf(new_waiter); | ||
35738 | + | ||
35739 | + /* interrupts already disabled */ | ||
35740 | + spin_lock(&pedf->slock); | ||
35741 | + | ||
35742 | + /* store new highest-priority task */ | ||
35743 | + sem->hp.cpu_task[cpu] = new_waiter; | ||
35744 | + if (sem->holder && | ||
35745 | + get_partition(sem->holder) == get_partition(new_waiter)) { | ||
35746 | + /* let holder inherit */ | ||
35747 | + sem->holder->rt_param.inh_task = new_waiter; | ||
35748 | + t = sem->holder; | ||
35749 | + if (is_queued(t)) { | ||
35750 | + /* queued in domain*/ | ||
35751 | + remove(edf, t); | ||
35752 | + /* readd to make priority change take place */ | ||
35753 | + /* FIXME: this looks outdated */ | ||
35754 | + if (is_released(t, litmus_clock())) | ||
35755 | + __add_ready(edf, t); | ||
35756 | + else | ||
35757 | + add_release(edf, t); | ||
35758 | + } | ||
35759 | + } | ||
35760 | + | ||
35761 | + /* check if we need to reschedule */ | ||
35762 | + if (edf_preemption_needed(edf, current)) | ||
35763 | + preempt(pedf); | ||
35764 | + | ||
35765 | + spin_unlock(&pedf->slock); | ||
35766 | + } | ||
35767 | + | ||
35768 | + return 0; | ||
35769 | +} | ||
35770 | + | ||
35771 | +static long psnedf_inherit_priority(struct pi_semaphore *sem, | ||
35772 | + struct task_struct *new_owner) | ||
35773 | +{ | ||
35774 | + int cpu = get_partition(new_owner); | ||
35775 | + | ||
35776 | + new_owner->rt_param.inh_task = sem->hp.cpu_task[cpu]; | ||
35777 | + if (sem->hp.cpu_task[cpu] && new_owner != sem->hp.cpu_task[cpu]) { | ||
35778 | + TRACE_TASK(new_owner, | ||
35779 | + "inherited priority from %s/%d\n", | ||
35780 | + sem->hp.cpu_task[cpu]->comm, | ||
35781 | + sem->hp.cpu_task[cpu]->pid); | ||
35782 | + } else | ||
35783 | + TRACE_TASK(new_owner, | ||
35784 | + "cannot inherit priority: " | ||
35785 | + "no higher priority job waits on this CPU!\n"); | ||
35786 | + /* make new owner non-preemptable as required by FMLP under | ||
35787 | + * PSN-EDF. | ||
35788 | + */ | ||
35789 | + make_np(new_owner); | ||
35790 | + return 0; | ||
35791 | +} | ||
35792 | + | ||
35793 | + | ||
35794 | +/* This function is called on a semaphore release, and assumes that | ||
35795 | + * the current task is also the semaphore holder. | ||
35796 | + */ | ||
35797 | +static long psnedf_return_priority(struct pi_semaphore *sem) | ||
35798 | +{ | ||
35799 | + struct task_struct* t = current; | ||
35800 | + psnedf_domain_t* pedf = task_pedf(t); | ||
35801 | + rt_domain_t* edf = task_edf(t); | ||
35802 | + int ret = 0; | ||
35803 | + int cpu = get_partition(current); | ||
35804 | + int still_np; | ||
35805 | + | ||
35806 | + | ||
35807 | + /* Find new highest-priority semaphore task | ||
35808 | + * if holder task is the current hp.cpu_task[cpu]. | ||
35809 | + * | ||
35810 | + * Calling function holds sem->wait.lock. | ||
35811 | + */ | ||
35812 | + if (t == sem->hp.cpu_task[cpu]) | ||
35813 | + edf_set_hp_cpu_task(sem, cpu); | ||
35814 | + | ||
35815 | + still_np = take_np(current); | ||
35816 | + | ||
35817 | + /* Since we don't nest resources, this | ||
35818 | + * should always be zero */ | ||
35819 | + BUG_ON(still_np); | ||
35820 | + | ||
35821 | + if (current->rt_param.inh_task) { | ||
35822 | + TRACE_CUR("return priority of %s/%d\n", | ||
35823 | + current->rt_param.inh_task->comm, | ||
35824 | + current->rt_param.inh_task->pid); | ||
35825 | + } else | ||
35826 | + TRACE_CUR(" no priority to return %p\n", sem); | ||
35827 | + | ||
35828 | + | ||
35829 | + /* Always check for delayed preemptions that might have become | ||
35830 | + * necessary due to non-preemptive execution. | ||
35831 | + */ | ||
35832 | + spin_lock(&pedf->slock); | ||
35833 | + | ||
35834 | + /* Reset inh_task to NULL. */ | ||
35835 | + current->rt_param.inh_task = NULL; | ||
35836 | + | ||
35837 | + /* check if we need to reschedule */ | ||
35838 | + if (edf_preemption_needed(edf, current)) | ||
35839 | + preempt(pedf); | ||
35840 | + | ||
35841 | + spin_unlock(&pedf->slock); | ||
35842 | + | ||
35843 | + | ||
35844 | + return ret; | ||
35845 | +} | ||
35846 | + | ||
35847 | +#endif | ||
35848 | + | ||
35849 | +static long psnedf_admit_task(struct task_struct* tsk) | ||
35850 | +{ | ||
35851 | + return task_cpu(tsk) == tsk->rt_param.task_params.cpu ? 0 : -EINVAL; | ||
35852 | +} | ||
35853 | + | ||
35854 | +/* Plugin object */ | ||
35855 | +static struct sched_plugin psn_edf_plugin __cacheline_aligned_in_smp = { | ||
35856 | + .plugin_name = "PSN-EDF", | ||
35857 | +#ifdef CONFIG_SRP | ||
35858 | + .srp_active = 1, | ||
35859 | +#endif | ||
35860 | + .tick = psnedf_tick, | ||
35861 | + .task_new = psnedf_task_new, | ||
35862 | + .complete_job = complete_job, | ||
35863 | + .task_exit = psnedf_task_exit, | ||
35864 | + .schedule = psnedf_schedule, | ||
35865 | + .task_wake_up = psnedf_task_wake_up, | ||
35866 | + .task_block = psnedf_task_block, | ||
35867 | +#ifdef CONFIG_FMLP | ||
35868 | + .fmlp_active = 1, | ||
35869 | + .pi_block = psnedf_pi_block, | ||
35870 | + .inherit_priority = psnedf_inherit_priority, | ||
35871 | + .return_priority = psnedf_return_priority, | ||
35872 | +#endif | ||
35873 | + .admit_task = psnedf_admit_task | ||
35874 | +}; | ||
35875 | + | ||
35876 | + | ||
35877 | +static int __init init_psn_edf(void) | ||
35878 | +{ | ||
35879 | + int i; | ||
35880 | + | ||
35881 | + /* We do not really want to support cpu hotplug, do we? ;) | ||
35882 | + * However, if we are so crazy to do so, | ||
35883 | + * we cannot use num_online_cpu() | ||
35884 | + */ | ||
35885 | + for (i = 0; i < num_online_cpus(); i++) { | ||
35886 | + psnedf_domain_init(remote_pedf(i), | ||
35887 | + psnedf_check_resched, | ||
35888 | + NULL, i); | ||
35889 | + } | ||
35890 | + return register_sched_plugin(&psn_edf_plugin); | ||
35891 | +} | ||
35892 | + | ||
35893 | +module_init(init_psn_edf); | ||
35894 | + | ||
35895 | diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c | ||
35896 | new file mode 100644 | ||
35897 | index 0000000..39a543e | ||
35898 | --- /dev/null | ||
35899 | +++ b/litmus/sched_task_trace.c | ||
35900 | @@ -0,0 +1,204 @@ | ||
35901 | +/* | ||
35902 | + * sched_task_trace.c -- record scheduling events to a byte stream | ||
35903 | + */ | ||
35904 | + | ||
35905 | +#define NO_TASK_TRACE_DECLS | ||
35906 | + | ||
35907 | +#include <linux/module.h> | ||
35908 | +#include <linux/sched.h> | ||
35909 | +#include <linux/percpu.h> | ||
35910 | + | ||
35911 | +#include <litmus/ftdev.h> | ||
35912 | +#include <litmus/litmus.h> | ||
35913 | + | ||
35914 | +#include <litmus/sched_trace.h> | ||
35915 | +#include <litmus/feather_trace.h> | ||
35916 | +#include <litmus/ftdev.h> | ||
35917 | + | ||
35918 | + | ||
35919 | +/* set MAJOR to 0 to have it dynamically assigned */ | ||
35920 | +#define FT_TASK_TRACE_MAJOR 253 | ||
35921 | +#define NO_EVENTS 4096 /* this is a buffer of 12 4k pages per CPU */ | ||
35922 | + | ||
35923 | +#define now() litmus_clock() | ||
35924 | + | ||
35925 | +struct local_buffer { | ||
35926 | + struct st_event_record record[NO_EVENTS]; | ||
35927 | + char flag[NO_EVENTS]; | ||
35928 | + struct ft_buffer ftbuf; | ||
35929 | +}; | ||
35930 | + | ||
35931 | +DEFINE_PER_CPU(struct local_buffer, st_event_buffer); | ||
35932 | + | ||
35933 | +static struct ftdev st_dev; | ||
35934 | + | ||
35935 | +static int st_dev_can_open(struct ftdev *dev, unsigned int cpu) | ||
35936 | +{ | ||
35937 | + return cpu_online(cpu) ? 0 : -ENODEV; | ||
35938 | +} | ||
35939 | + | ||
35940 | +static int __init init_sched_task_trace(void) | ||
35941 | +{ | ||
35942 | + struct local_buffer* buf; | ||
35943 | + int i, ok = 0; | ||
35944 | + ftdev_init(&st_dev, THIS_MODULE); | ||
35945 | + for (i = 0; i < NR_CPUS; i++) { | ||
35946 | + buf = &per_cpu(st_event_buffer, i); | ||
35947 | + ok += init_ft_buffer(&buf->ftbuf, NO_EVENTS, | ||
35948 | + sizeof(struct st_event_record), | ||
35949 | + buf->flag, | ||
35950 | + buf->record); | ||
35951 | + st_dev.minor[i].buf = &buf->ftbuf; | ||
35952 | + } | ||
35953 | + if (ok == NR_CPUS) { | ||
35954 | + st_dev.minor_cnt = NR_CPUS; | ||
35955 | + st_dev.can_open = st_dev_can_open; | ||
35956 | + return register_ftdev(&st_dev, "sched_trace", FT_TASK_TRACE_MAJOR); | ||
35957 | + } else { | ||
35958 | + return -EINVAL; | ||
35959 | + } | ||
35960 | +} | ||
35961 | + | ||
35962 | +module_init(init_sched_task_trace); | ||
35963 | + | ||
35964 | + | ||
35965 | +static inline struct st_event_record* get_record(u8 type, struct task_struct* t) | ||
35966 | +{ | ||
35967 | + struct st_event_record* rec = NULL; | ||
35968 | + struct local_buffer* buf; | ||
35969 | + | ||
35970 | + buf = &get_cpu_var(st_event_buffer); | ||
35971 | + if (ft_buffer_start_write(&buf->ftbuf, (void**) &rec)) { | ||
35972 | + rec->hdr.type = type; | ||
35973 | + rec->hdr.cpu = smp_processor_id(); | ||
35974 | + rec->hdr.pid = t ? t->pid : 0; | ||
35975 | + rec->hdr.job = t ? t->rt_param.job_params.job_no : 0; | ||
35976 | + } else { | ||
35977 | + put_cpu_var(st_event_buffer); | ||
35978 | + } | ||
35979 | + /* rec will be NULL if it failed */ | ||
35980 | + return rec; | ||
35981 | +} | ||
35982 | + | ||
35983 | +static inline void put_record(struct st_event_record* rec) | ||
35984 | +{ | ||
35985 | + struct local_buffer* buf; | ||
35986 | + buf = &__get_cpu_var(st_event_buffer); | ||
35987 | + ft_buffer_finish_write(&buf->ftbuf, rec); | ||
35988 | + put_cpu_var(st_event_buffer); | ||
35989 | +} | ||
35990 | + | ||
35991 | +feather_callback void do_sched_trace_task_name(unsigned long id, unsigned long _task) | ||
35992 | +{ | ||
35993 | + struct task_struct *t = (struct task_struct*) _task; | ||
35994 | + struct st_event_record* rec = get_record(ST_NAME, t); | ||
35995 | + int i; | ||
35996 | + if (rec) { | ||
35997 | + for (i = 0; i < min(TASK_COMM_LEN, ST_NAME_LEN); i++) | ||
35998 | + rec->data.name.cmd[i] = t->comm[i]; | ||
35999 | + put_record(rec); | ||
36000 | + } | ||
36001 | +} | ||
36002 | + | ||
36003 | +feather_callback void do_sched_trace_task_param(unsigned long id, unsigned long _task) | ||
36004 | +{ | ||
36005 | + struct task_struct *t = (struct task_struct*) _task; | ||
36006 | + struct st_event_record* rec = get_record(ST_PARAM, t); | ||
36007 | + if (rec) { | ||
36008 | + rec->data.param.wcet = get_exec_cost(t); | ||
36009 | + rec->data.param.period = get_rt_period(t); | ||
36010 | + rec->data.param.phase = get_rt_phase(t); | ||
36011 | + rec->data.param.partition = get_partition(t); | ||
36012 | + put_record(rec); | ||
36013 | + } | ||
36014 | +} | ||
36015 | + | ||
36016 | +feather_callback void do_sched_trace_task_release(unsigned long id, unsigned long _task) | ||
36017 | +{ | ||
36018 | + struct task_struct *t = (struct task_struct*) _task; | ||
36019 | + struct st_event_record* rec = get_record(ST_RELEASE, t); | ||
36020 | + if (rec) { | ||
36021 | + rec->data.release.release = get_release(t); | ||
36022 | + rec->data.release.deadline = get_deadline(t); | ||
36023 | + put_record(rec); | ||
36024 | + } | ||
36025 | +} | ||
36026 | + | ||
36027 | +/* skipped: st_assigned_data, we don't use it atm */ | ||
36028 | + | ||
36029 | +feather_callback void do_sched_trace_task_switch_to(unsigned long id, | ||
36030 | + unsigned long _task) | ||
36031 | +{ | ||
36032 | + struct task_struct *t = (struct task_struct*) _task; | ||
36033 | + struct st_event_record* rec; | ||
36034 | + if (is_realtime(t)) { | ||
36035 | + rec = get_record(ST_SWITCH_TO, t); | ||
36036 | + if (rec) { | ||
36037 | + rec->data.switch_to.when = now(); | ||
36038 | + rec->data.switch_to.exec_time = get_exec_time(t); | ||
36039 | + put_record(rec); | ||
36040 | + } | ||
36041 | + } | ||
36042 | +} | ||
36043 | + | ||
36044 | +feather_callback void do_sched_trace_task_switch_away(unsigned long id, | ||
36045 | + unsigned long _task) | ||
36046 | +{ | ||
36047 | + struct task_struct *t = (struct task_struct*) _task; | ||
36048 | + struct st_event_record* rec; | ||
36049 | + if (is_realtime(t)) { | ||
36050 | + rec = get_record(ST_SWITCH_AWAY, t); | ||
36051 | + if (rec) { | ||
36052 | + rec->data.switch_away.when = now(); | ||
36053 | + rec->data.switch_away.exec_time = get_exec_time(t); | ||
36054 | + put_record(rec); | ||
36055 | + } | ||
36056 | + } | ||
36057 | +} | ||
36058 | + | ||
36059 | +feather_callback void do_sched_trace_task_completion(unsigned long id, | ||
36060 | + unsigned long _task, | ||
36061 | + unsigned long forced) | ||
36062 | +{ | ||
36063 | + struct task_struct *t = (struct task_struct*) _task; | ||
36064 | + struct st_event_record* rec = get_record(ST_COMPLETION, t); | ||
36065 | + if (rec) { | ||
36066 | + rec->data.completion.when = now(); | ||
36067 | + rec->data.completion.forced = forced; | ||
36068 | + put_record(rec); | ||
36069 | + } | ||
36070 | +} | ||
36071 | + | ||
36072 | +feather_callback void do_sched_trace_task_block(unsigned long id, | ||
36073 | + unsigned long _task) | ||
36074 | +{ | ||
36075 | + struct task_struct *t = (struct task_struct*) _task; | ||
36076 | + struct st_event_record* rec = get_record(ST_BLOCK, t); | ||
36077 | + if (rec) { | ||
36078 | + rec->data.block.when = now(); | ||
36079 | + put_record(rec); | ||
36080 | + } | ||
36081 | +} | ||
36082 | + | ||
36083 | +feather_callback void do_sched_trace_task_resume(unsigned long id, | ||
36084 | + unsigned long _task) | ||
36085 | +{ | ||
36086 | + struct task_struct *t = (struct task_struct*) _task; | ||
36087 | + struct st_event_record* rec = get_record(ST_RESUME, t); | ||
36088 | + if (rec) { | ||
36089 | + rec->data.resume.when = now(); | ||
36090 | + put_record(rec); | ||
36091 | + } | ||
36092 | +} | ||
36093 | + | ||
36094 | +feather_callback void do_sched_trace_sys_release(unsigned long id, | ||
36095 | + unsigned long _start) | ||
36096 | +{ | ||
36097 | + lt_t *start = (lt_t*) _start; | ||
36098 | + struct st_event_record* rec = get_record(ST_SYS_RELEASE, NULL); | ||
36099 | + if (rec) { | ||
36100 | + rec->data.sys_release.when = now(); | ||
36101 | + rec->data.sys_release.release = *start; | ||
36102 | + put_record(rec); | ||
36103 | + } | ||
36104 | +} | ||
36105 | diff --git a/litmus/sched_trace.c b/litmus/sched_trace.c | ||
36106 | new file mode 100644 | ||
36107 | index 0000000..ad0b138 | ||
36108 | --- /dev/null | ||
36109 | +++ b/litmus/sched_trace.c | ||
36110 | @@ -0,0 +1,378 @@ | ||
36111 | +/* | ||
36112 | + * sched_trace.c -- record scheduling events to a byte stream. | ||
36113 | + */ | ||
36114 | +#include <linux/spinlock.h> | ||
36115 | +#include <linux/semaphore.h> | ||
36116 | + | ||
36117 | +#include <linux/fs.h> | ||
36118 | +#include <linux/miscdevice.h> | ||
36119 | +#include <asm/uaccess.h> | ||
36120 | +#include <linux/module.h> | ||
36121 | +#include <linux/sysrq.h> | ||
36122 | + | ||
36123 | +#include <linux/kfifo.h> | ||
36124 | + | ||
36125 | +#include <litmus/sched_trace.h> | ||
36126 | +#include <litmus/litmus.h> | ||
36127 | + | ||
36128 | +#define SCHED_TRACE_NAME "litmus/log" | ||
36129 | + | ||
36130 | +/* Allocate a buffer of about 32k per CPU */ | ||
36131 | +#define LITMUS_TRACE_BUF_PAGES 8 | ||
36132 | +#define LITMUS_TRACE_BUF_SIZE (PAGE_SIZE * LITMUS_TRACE_BUF_PAGES * NR_CPUS) | ||
36133 | + | ||
36134 | +/* Max length of one read from the buffer */ | ||
36135 | +#define MAX_READ_LEN (64 * 1024) | ||
36136 | + | ||
36137 | +/* Max length for one write --- from kernel --- to the buffer */ | ||
36138 | +#define MSG_SIZE 255 | ||
36139 | + | ||
36140 | +/* Inner ring buffer structure */ | ||
36141 | +typedef struct { | ||
36142 | + rwlock_t del_lock; | ||
36143 | + | ||
36144 | + /* the buffer */ | ||
36145 | + struct kfifo *kfifo; | ||
36146 | +} ring_buffer_t; | ||
36147 | + | ||
36148 | +/* Main buffer structure */ | ||
36149 | +typedef struct { | ||
36150 | + ring_buffer_t buf; | ||
36151 | + atomic_t reader_cnt; | ||
36152 | + struct semaphore reader_mutex; | ||
36153 | +} trace_buffer_t; | ||
36154 | + | ||
36155 | + | ||
36156 | +/* | ||
36157 | + * Inner buffer management functions | ||
36158 | + */ | ||
36159 | +void rb_init(ring_buffer_t* buf) | ||
36160 | +{ | ||
36161 | + rwlock_init(&buf->del_lock); | ||
36162 | + buf->kfifo = NULL; | ||
36163 | +} | ||
36164 | + | ||
36165 | +int rb_alloc_buf(ring_buffer_t* buf, unsigned int size) | ||
36166 | +{ | ||
36167 | + unsigned long flags; | ||
36168 | + | ||
36169 | + write_lock_irqsave(&buf->del_lock, flags); | ||
36170 | + | ||
36171 | + buf->kfifo = kfifo_alloc(size, GFP_ATOMIC, NULL); | ||
36172 | + | ||
36173 | + write_unlock_irqrestore(&buf->del_lock, flags); | ||
36174 | + | ||
36175 | + if(IS_ERR(buf->kfifo)) { | ||
36176 | + printk(KERN_ERR "kfifo_alloc failed\n"); | ||
36177 | + return PTR_ERR(buf->kfifo); | ||
36178 | + } | ||
36179 | + | ||
36180 | + return 0; | ||
36181 | +} | ||
36182 | + | ||
36183 | +int rb_free_buf(ring_buffer_t* buf) | ||
36184 | +{ | ||
36185 | + unsigned long flags; | ||
36186 | + | ||
36187 | + write_lock_irqsave(&buf->del_lock, flags); | ||
36188 | + | ||
36189 | + BUG_ON(!buf->kfifo); | ||
36190 | + kfifo_free(buf->kfifo); | ||
36191 | + | ||
36192 | + buf->kfifo = NULL; | ||
36193 | + | ||
36194 | + write_unlock_irqrestore(&buf->del_lock, flags); | ||
36195 | + | ||
36196 | + return 0; | ||
36197 | +} | ||
36198 | + | ||
36199 | +/* | ||
36200 | + * Assumption: concurrent writes are serialized externally | ||
36201 | + * | ||
36202 | + * Will only succeed if there is enough space for all len bytes. | ||
36203 | + */ | ||
36204 | +int rb_put(ring_buffer_t* buf, char* mem, size_t len) | ||
36205 | +{ | ||
36206 | + unsigned long flags; | ||
36207 | + int error = 0; | ||
36208 | + | ||
36209 | + read_lock_irqsave(&buf->del_lock, flags); | ||
36210 | + | ||
36211 | + if (!buf->kfifo) { | ||
36212 | + error = -ENODEV; | ||
36213 | + goto out; | ||
36214 | + } | ||
36215 | + | ||
36216 | + if((__kfifo_put(buf->kfifo, mem, len)) < len) { | ||
36217 | + error = -ENOMEM; | ||
36218 | + goto out; | ||
36219 | + } | ||
36220 | + | ||
36221 | + out: | ||
36222 | + read_unlock_irqrestore(&buf->del_lock, flags); | ||
36223 | + return error; | ||
36224 | +} | ||
36225 | + | ||
36226 | +/* Assumption: concurrent reads are serialized externally */ | ||
36227 | +int rb_get(ring_buffer_t* buf, char* mem, size_t len) | ||
36228 | +{ | ||
36229 | + unsigned long flags; | ||
36230 | + int error = 0; | ||
36231 | + | ||
36232 | + read_lock_irqsave(&buf->del_lock, flags); | ||
36233 | + if (!buf->kfifo) { | ||
36234 | + error = -ENODEV; | ||
36235 | + goto out; | ||
36236 | + } | ||
36237 | + | ||
36238 | + error = __kfifo_get(buf->kfifo, (unsigned char*)mem, len); | ||
36239 | + | ||
36240 | + out: | ||
36241 | + read_unlock_irqrestore(&buf->del_lock, flags); | ||
36242 | + return error; | ||
36243 | +} | ||
36244 | + | ||
36245 | +/* | ||
36246 | + * Device Driver management | ||
36247 | + */ | ||
36248 | +static spinlock_t log_buffer_lock = SPIN_LOCK_UNLOCKED; | ||
36249 | +static trace_buffer_t log_buffer; | ||
36250 | + | ||
36251 | +static void init_log_buffer(void) | ||
36252 | +{ | ||
36253 | + rb_init(&log_buffer.buf); | ||
36254 | + atomic_set(&log_buffer.reader_cnt,0); | ||
36255 | + init_MUTEX(&log_buffer.reader_mutex); | ||
36256 | +} | ||
36257 | + | ||
36258 | +static DEFINE_PER_CPU(char[MSG_SIZE], fmt_buffer); | ||
36259 | + | ||
36260 | +/* | ||
36261 | + * sched_trace_log_message - Write to the trace buffer (log_buffer) | ||
36262 | + * | ||
36263 | + * This is the only function accessing the log_buffer from inside the | ||
36264 | + * kernel for writing. | ||
36265 | + * Concurrent access to sched_trace_log_message must be serialized using | ||
36266 | + * log_buffer_lock | ||
36267 | + * The maximum length of a formatted message is 255 | ||
36268 | + */ | ||
36269 | +void sched_trace_log_message(const char* fmt, ...) | ||
36270 | +{ | ||
36271 | + unsigned long flags; | ||
36272 | + va_list args; | ||
36273 | + size_t len; | ||
36274 | + char* buf; | ||
36275 | + | ||
36276 | + va_start(args, fmt); | ||
36277 | + local_irq_save(flags); | ||
36278 | + | ||
36279 | + /* format message */ | ||
36280 | + buf = __get_cpu_var(fmt_buffer); | ||
36281 | + len = vscnprintf(buf, MSG_SIZE, fmt, args); | ||
36282 | + | ||
36283 | + spin_lock(&log_buffer_lock); | ||
36284 | + /* Don't copy the trailing null byte, we don't want null bytes | ||
36285 | + * in a text file. | ||
36286 | + */ | ||
36287 | + rb_put(&log_buffer.buf, buf, len); | ||
36288 | + spin_unlock(&log_buffer_lock); | ||
36289 | + | ||
36290 | + local_irq_restore(flags); | ||
36291 | + va_end(args); | ||
36292 | +} | ||
36293 | + | ||
36294 | +/* | ||
36295 | + * log_read - Read the trace buffer | ||
36296 | + * | ||
36297 | + * This function is called as a file operation from userspace. | ||
36298 | + * Readers can sleep. Access is serialized through reader_mutex | ||
36299 | + */ | ||
36300 | +static ssize_t log_read(struct file *filp, char __user *to, size_t len, | ||
36301 | + loff_t *f_pos) | ||
36302 | +{ | ||
36303 | + /* we ignore f_pos, this is strictly sequential */ | ||
36304 | + | ||
36305 | + ssize_t error = -EINVAL; | ||
36306 | + char* mem; | ||
36307 | + trace_buffer_t *tbuf = filp->private_data; | ||
36308 | + | ||
36309 | + if (down_interruptible(&tbuf->reader_mutex)) { | ||
36310 | + error = -ERESTARTSYS; | ||
36311 | + goto out; | ||
36312 | + } | ||
36313 | + | ||
36314 | + if (len > MAX_READ_LEN) | ||
36315 | + len = MAX_READ_LEN; | ||
36316 | + | ||
36317 | + mem = kmalloc(len, GFP_KERNEL); | ||
36318 | + if (!mem) { | ||
36319 | + error = -ENOMEM; | ||
36320 | + goto out_unlock; | ||
36321 | + } | ||
36322 | + | ||
36323 | + error = rb_get(&tbuf->buf, mem, len); | ||
36324 | + while (!error) { | ||
36325 | + set_current_state(TASK_INTERRUPTIBLE); | ||
36326 | + schedule_timeout(110); | ||
36327 | + if (signal_pending(current)) | ||
36328 | + error = -ERESTARTSYS; | ||
36329 | + else | ||
36330 | + error = rb_get(&tbuf->buf, mem, len); | ||
36331 | + } | ||
36332 | + | ||
36333 | + if (error > 0 && copy_to_user(to, mem, error)) | ||
36334 | + error = -EFAULT; | ||
36335 | + | ||
36336 | + kfree(mem); | ||
36337 | + out_unlock: | ||
36338 | + up(&tbuf->reader_mutex); | ||
36339 | + out: | ||
36340 | + return error; | ||
36341 | +} | ||
36342 | + | ||
36343 | +/* | ||
36344 | + * Enable redirection of printk() messages to the trace buffer. | ||
36345 | + * Defined in kernel/printk.c | ||
36346 | + */ | ||
36347 | +extern int trace_override; | ||
36348 | +extern int trace_recurse; | ||
36349 | + | ||
36350 | +/* | ||
36351 | + * log_open - open the global log message ring buffer. | ||
36352 | + */ | ||
36353 | +static int log_open(struct inode *in, struct file *filp) | ||
36354 | +{ | ||
36355 | + int error = -EINVAL; | ||
36356 | + trace_buffer_t* tbuf; | ||
36357 | + | ||
36358 | + tbuf = &log_buffer; | ||
36359 | + | ||
36360 | + if (down_interruptible(&tbuf->reader_mutex)) { | ||
36361 | + error = -ERESTARTSYS; | ||
36362 | + goto out; | ||
36363 | + } | ||
36364 | + | ||
36365 | + /* first open must allocate buffers */ | ||
36366 | + if (atomic_inc_return(&tbuf->reader_cnt) == 1) { | ||
36367 | + if ((error = rb_alloc_buf(&tbuf->buf, LITMUS_TRACE_BUF_SIZE))) | ||
36368 | + { | ||
36369 | + atomic_dec(&tbuf->reader_cnt); | ||
36370 | + goto out_unlock; | ||
36371 | + } | ||
36372 | + } | ||
36373 | + | ||
36374 | + error = 0; | ||
36375 | + filp->private_data = tbuf; | ||
36376 | + | ||
36377 | + printk(KERN_DEBUG | ||
36378 | + "sched_trace kfifo at 0x%p with buffer starting at: 0x%p\n", | ||
36379 | + tbuf->buf.kfifo, &((tbuf->buf.kfifo)->buffer)); | ||
36380 | + | ||
36381 | + /* override printk() */ | ||
36382 | + trace_override++; | ||
36383 | + | ||
36384 | + out_unlock: | ||
36385 | + up(&tbuf->reader_mutex); | ||
36386 | + out: | ||
36387 | + return error; | ||
36388 | +} | ||
36389 | + | ||
36390 | +static int log_release(struct inode *in, struct file *filp) | ||
36391 | +{ | ||
36392 | + int error = -EINVAL; | ||
36393 | + trace_buffer_t* tbuf = filp->private_data; | ||
36394 | + | ||
36395 | + BUG_ON(!filp->private_data); | ||
36396 | + | ||
36397 | + if (down_interruptible(&tbuf->reader_mutex)) { | ||
36398 | + error = -ERESTARTSYS; | ||
36399 | + goto out; | ||
36400 | + } | ||
36401 | + | ||
36402 | + /* last release must deallocate buffers */ | ||
36403 | + if (atomic_dec_return(&tbuf->reader_cnt) == 0) { | ||
36404 | + error = rb_free_buf(&tbuf->buf); | ||
36405 | + } | ||
36406 | + | ||
36407 | + /* release printk() overriding */ | ||
36408 | + trace_override--; | ||
36409 | + | ||
36410 | + printk(KERN_DEBUG "sched_trace kfifo released\n"); | ||
36411 | + | ||
36412 | + up(&tbuf->reader_mutex); | ||
36413 | + out: | ||
36414 | + return error; | ||
36415 | +} | ||
36416 | + | ||
36417 | +/* | ||
36418 | + * log_fops - The file operations for accessing the global LITMUS log message | ||
36419 | + * buffer. | ||
36420 | + * | ||
36421 | + * Except for opening the device file it uses the same operations as trace_fops. | ||
36422 | + */ | ||
36423 | +static struct file_operations log_fops = { | ||
36424 | + .owner = THIS_MODULE, | ||
36425 | + .open = log_open, | ||
36426 | + .release = log_release, | ||
36427 | + .read = log_read, | ||
36428 | +}; | ||
36429 | + | ||
36430 | +static struct miscdevice litmus_log_dev = { | ||
36431 | + .name = SCHED_TRACE_NAME, | ||
36432 | + .minor = MISC_DYNAMIC_MINOR, | ||
36433 | + .fops = &log_fops, | ||
36434 | +}; | ||
36435 | + | ||
36436 | +#ifdef CONFIG_MAGIC_SYSRQ | ||
36437 | +void dump_trace_buffer(int max) | ||
36438 | +{ | ||
36439 | + char line[80]; | ||
36440 | + int len; | ||
36441 | + int count = 0; | ||
36442 | + | ||
36443 | + /* potential, but very unlikely, race... */ | ||
36444 | + trace_recurse = 1; | ||
36445 | + while ((max == 0 || count++ < max) && | ||
36446 | + (len = rb_get(&log_buffer.buf, line, sizeof(line) - 1)) > 0) { | ||
36447 | + line[len] = '\0'; | ||
36448 | + printk("%s", line); | ||
36449 | + } | ||
36450 | + trace_recurse = 0; | ||
36451 | +} | ||
36452 | + | ||
36453 | +static void sysrq_dump_trace_buffer(int key, struct tty_struct *tty) | ||
36454 | +{ | ||
36455 | + dump_trace_buffer(100); | ||
36456 | +} | ||
36457 | + | ||
36458 | +static struct sysrq_key_op sysrq_dump_trace_buffer_op = { | ||
36459 | + .handler = sysrq_dump_trace_buffer, | ||
36460 | + .help_msg = "dump-trace-buffer(Y)", | ||
36461 | + .action_msg = "writing content of TRACE() buffer", | ||
36462 | +}; | ||
36463 | +#endif | ||
36464 | + | ||
36465 | +static int __init init_sched_trace(void) | ||
36466 | +{ | ||
36467 | + printk("Initializing TRACE() device\n"); | ||
36468 | + init_log_buffer(); | ||
36469 | + | ||
36470 | +#ifdef CONFIG_MAGIC_SYSRQ | ||
36471 | + /* offer some debugging help */ | ||
36472 | + if (!register_sysrq_key('y', &sysrq_dump_trace_buffer_op)) | ||
36473 | + printk("Registered dump-trace-buffer(Y) magic sysrq.\n"); | ||
36474 | + else | ||
36475 | + printk("Could not register dump-trace-buffer(Y) magic sysrq.\n"); | ||
36476 | +#endif | ||
36477 | + | ||
36478 | + | ||
36479 | + return misc_register(&litmus_log_dev); | ||
36480 | +} | ||
36481 | + | ||
36482 | +static void __exit exit_sched_trace(void) | ||
36483 | +{ | ||
36484 | + misc_deregister(&litmus_log_dev); | ||
36485 | +} | ||
36486 | + | ||
36487 | +module_init(init_sched_trace); | ||
36488 | +module_exit(exit_sched_trace); | ||
36489 | diff --git a/litmus/srp.c b/litmus/srp.c | ||
36490 | new file mode 100644 | ||
36491 | index 0000000..71639b9 | ||
36492 | --- /dev/null | ||
36493 | +++ b/litmus/srp.c | ||
36494 | @@ -0,0 +1,318 @@ | ||
36495 | +/* ************************************************************************** */ | ||
36496 | +/* STACK RESOURCE POLICY */ | ||
36497 | +/* ************************************************************************** */ | ||
36498 | + | ||
36499 | +#include <asm/atomic.h> | ||
36500 | +#include <linux/wait.h> | ||
36501 | +#include <litmus/litmus.h> | ||
36502 | +#include <litmus/sched_plugin.h> | ||
36503 | + | ||
36504 | +#include <litmus/fdso.h> | ||
36505 | + | ||
36506 | +#include <litmus/trace.h> | ||
36507 | + | ||
36508 | + | ||
36509 | +#ifdef CONFIG_SRP | ||
36510 | + | ||
36511 | +struct srp_priority { | ||
36512 | + struct list_head list; | ||
36513 | + unsigned int period; | ||
36514 | + pid_t pid; | ||
36515 | +}; | ||
36516 | + | ||
36517 | +#define list2prio(l) list_entry(l, struct srp_priority, list) | ||
36518 | + | ||
36519 | +/* SRP task priority comparison function. Smaller periods have highest | ||
36520 | + * priority, tie-break is PID. Special case: period == 0 <=> no priority | ||
36521 | + */ | ||
36522 | +static int srp_higher_prio(struct srp_priority* first, | ||
36523 | + struct srp_priority* second) | ||
36524 | +{ | ||
36525 | + if (!first->period) | ||
36526 | + return 0; | ||
36527 | + else | ||
36528 | + return !second->period || | ||
36529 | + first->period < second->period || ( | ||
36530 | + first->period == second->period && | ||
36531 | + first->pid < second->pid); | ||
36532 | +} | ||
36533 | + | ||
36534 | +struct srp { | ||
36535 | + struct list_head ceiling; | ||
36536 | + wait_queue_head_t ceiling_blocked; | ||
36537 | +}; | ||
36538 | + | ||
36539 | + | ||
36540 | +atomic_t srp_objects_in_use = ATOMIC_INIT(0); | ||
36541 | + | ||
36542 | +DEFINE_PER_CPU(struct srp, srp); | ||
36543 | + | ||
36544 | + | ||
36545 | +/* Initialize SRP semaphores at boot time. */ | ||
36546 | +static int __init srp_init(void) | ||
36547 | +{ | ||
36548 | + int i; | ||
36549 | + | ||
36550 | + printk("Initializing SRP per-CPU ceilings..."); | ||
36551 | + for (i = 0; i < NR_CPUS; i++) { | ||
36552 | + init_waitqueue_head(&per_cpu(srp, i).ceiling_blocked); | ||
36553 | + INIT_LIST_HEAD(&per_cpu(srp, i).ceiling); | ||
36554 | + } | ||
36555 | + printk(" done!\n"); | ||
36556 | + | ||
36557 | + return 0; | ||
36558 | +} | ||
36559 | +module_init(srp_init); | ||
36560 | + | ||
36561 | + | ||
36562 | +#define system_ceiling(srp) list2prio(srp->ceiling.next) | ||
36563 | + | ||
36564 | + | ||
36565 | +#define UNDEF_SEM -2 | ||
36566 | + | ||
36567 | + | ||
36568 | +/* struct for uniprocessor SRP "semaphore" */ | ||
36569 | +struct srp_semaphore { | ||
36570 | + struct srp_priority ceiling; | ||
36571 | + struct task_struct* owner; | ||
36572 | + int cpu; /* cpu associated with this "semaphore" and resource */ | ||
36573 | +}; | ||
36574 | + | ||
36575 | +#define ceiling2sem(c) container_of(c, struct srp_semaphore, ceiling) | ||
36576 | + | ||
36577 | +static int srp_exceeds_ceiling(struct task_struct* first, | ||
36578 | + struct srp* srp) | ||
36579 | +{ | ||
36580 | + return list_empty(&srp->ceiling) || | ||
36581 | + get_rt_period(first) < system_ceiling(srp)->period || | ||
36582 | + (get_rt_period(first) == system_ceiling(srp)->period && | ||
36583 | + first->pid < system_ceiling(srp)->pid) || | ||
36584 | + ceiling2sem(system_ceiling(srp))->owner == first; | ||
36585 | +} | ||
36586 | + | ||
36587 | +static void srp_add_prio(struct srp* srp, struct srp_priority* prio) | ||
36588 | +{ | ||
36589 | + struct list_head *pos; | ||
36590 | + if (in_list(&prio->list)) { | ||
36591 | + printk(KERN_CRIT "WARNING: SRP violation detected, prio is already in " | ||
36592 | + "ceiling list! cpu=%d, srp=%p\n", smp_processor_id(), ceiling2sem(prio)); | ||
36593 | + return; | ||
36594 | + } | ||
36595 | + list_for_each(pos, &srp->ceiling) | ||
36596 | + if (unlikely(srp_higher_prio(prio, list2prio(pos)))) { | ||
36597 | + __list_add(&prio->list, pos->prev, pos); | ||
36598 | + return; | ||
36599 | + } | ||
36600 | + | ||
36601 | + list_add_tail(&prio->list, &srp->ceiling); | ||
36602 | +} | ||
36603 | + | ||
36604 | + | ||
36605 | +static void* create_srp_semaphore(void) | ||
36606 | +{ | ||
36607 | + struct srp_semaphore* sem; | ||
36608 | + | ||
36609 | + sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
36610 | + if (!sem) | ||
36611 | + return NULL; | ||
36612 | + | ||
36613 | + INIT_LIST_HEAD(&sem->ceiling.list); | ||
36614 | + sem->ceiling.period = 0; | ||
36615 | + sem->cpu = UNDEF_SEM; | ||
36616 | + sem->owner = NULL; | ||
36617 | + atomic_inc(&srp_objects_in_use); | ||
36618 | + return sem; | ||
36619 | +} | ||
36620 | + | ||
36621 | +static noinline int open_srp_semaphore(struct od_table_entry* entry, void* __user arg) | ||
36622 | +{ | ||
36623 | + struct srp_semaphore* sem = (struct srp_semaphore*) entry->obj->obj; | ||
36624 | + int ret = 0; | ||
36625 | + struct task_struct* t = current; | ||
36626 | + struct srp_priority t_prio; | ||
36627 | + | ||
36628 | + TRACE("opening SRP semaphore %p, cpu=%d\n", sem, sem->cpu); | ||
36629 | + if (!srp_active()) | ||
36630 | + return -EBUSY; | ||
36631 | + | ||
36632 | + if (sem->cpu == UNDEF_SEM) | ||
36633 | + sem->cpu = get_partition(t); | ||
36634 | + else if (sem->cpu != get_partition(t)) | ||
36635 | + ret = -EPERM; | ||
36636 | + | ||
36637 | + if (ret == 0) { | ||
36638 | + t_prio.period = get_rt_period(t); | ||
36639 | + t_prio.pid = t->pid; | ||
36640 | + if (srp_higher_prio(&t_prio, &sem->ceiling)) { | ||
36641 | + sem->ceiling.period = t_prio.period; | ||
36642 | + sem->ceiling.pid = t_prio.pid; | ||
36643 | + } | ||
36644 | + } | ||
36645 | + | ||
36646 | + return ret; | ||
36647 | +} | ||
36648 | + | ||
36649 | +static void destroy_srp_semaphore(void* sem) | ||
36650 | +{ | ||
36651 | + /* XXX invariants */ | ||
36652 | + atomic_dec(&srp_objects_in_use); | ||
36653 | + kfree(sem); | ||
36654 | +} | ||
36655 | + | ||
36656 | +struct fdso_ops srp_sem_ops = { | ||
36657 | + .create = create_srp_semaphore, | ||
36658 | + .open = open_srp_semaphore, | ||
36659 | + .destroy = destroy_srp_semaphore | ||
36660 | +}; | ||
36661 | + | ||
36662 | + | ||
36663 | +static void do_srp_down(struct srp_semaphore* sem) | ||
36664 | +{ | ||
36665 | + /* Update ceiling. */ | ||
36666 | + srp_add_prio(&__get_cpu_var(srp), &sem->ceiling); | ||
36667 | + WARN_ON(sem->owner != NULL); | ||
36668 | + sem->owner = current; | ||
36669 | + TRACE_CUR("acquired srp 0x%p\n", sem); | ||
36670 | +} | ||
36671 | + | ||
36672 | +static void do_srp_up(struct srp_semaphore* sem) | ||
36673 | +{ | ||
36674 | + /* Determine new system priority ceiling for this CPU. */ | ||
36675 | + WARN_ON(!in_list(&sem->ceiling.list)); | ||
36676 | + if (in_list(&sem->ceiling.list)) | ||
36677 | + list_del(&sem->ceiling.list); | ||
36678 | + | ||
36679 | + sem->owner = NULL; | ||
36680 | + | ||
36681 | + /* Wake tasks on this CPU, if they exceed current ceiling. */ | ||
36682 | + TRACE_CUR("released srp 0x%p\n", sem); | ||
36683 | + wake_up_all(&__get_cpu_var(srp).ceiling_blocked); | ||
36684 | +} | ||
36685 | + | ||
36686 | +/* Adjust the system-wide priority ceiling if resource is claimed. */ | ||
36687 | +asmlinkage long sys_srp_down(int sem_od) | ||
36688 | +{ | ||
36689 | + int cpu; | ||
36690 | + int ret = -EINVAL; | ||
36691 | + struct srp_semaphore* sem; | ||
36692 | + | ||
36693 | + /* disabling preemptions is sufficient protection since | ||
36694 | + * SRP is strictly per CPU and we don't interfere with any | ||
36695 | + * interrupt handlers | ||
36696 | + */ | ||
36697 | + preempt_disable(); | ||
36698 | + TS_SRP_DOWN_START; | ||
36699 | + | ||
36700 | + cpu = smp_processor_id(); | ||
36701 | + sem = lookup_srp_sem(sem_od); | ||
36702 | + if (sem && sem->cpu == cpu) { | ||
36703 | + do_srp_down(sem); | ||
36704 | + ret = 0; | ||
36705 | + } | ||
36706 | + | ||
36707 | + TS_SRP_DOWN_END; | ||
36708 | + preempt_enable(); | ||
36709 | + return ret; | ||
36710 | +} | ||
36711 | + | ||
36712 | +/* Adjust the system-wide priority ceiling if resource is freed. */ | ||
36713 | +asmlinkage long sys_srp_up(int sem_od) | ||
36714 | +{ | ||
36715 | + int cpu; | ||
36716 | + int ret = -EINVAL; | ||
36717 | + struct srp_semaphore* sem; | ||
36718 | + | ||
36719 | + preempt_disable(); | ||
36720 | + TS_SRP_UP_START; | ||
36721 | + | ||
36722 | + cpu = smp_processor_id(); | ||
36723 | + sem = lookup_srp_sem(sem_od); | ||
36724 | + | ||
36725 | + if (sem && sem->cpu == cpu) { | ||
36726 | + do_srp_up(sem); | ||
36727 | + ret = 0; | ||
36728 | + } | ||
36729 | + | ||
36730 | + TS_SRP_UP_END; | ||
36731 | + preempt_enable(); | ||
36732 | + return ret; | ||
36733 | +} | ||
36734 | + | ||
36735 | +static int srp_wake_up(wait_queue_t *wait, unsigned mode, int sync, | ||
36736 | + void *key) | ||
36737 | +{ | ||
36738 | + int cpu = smp_processor_id(); | ||
36739 | + struct task_struct *tsk = wait->private; | ||
36740 | + if (cpu != get_partition(tsk)) | ||
36741 | + TRACE_TASK(tsk, "srp_wake_up on wrong cpu, partition is %d\b", | ||
36742 | + get_partition(tsk)); | ||
36743 | + else if (srp_exceeds_ceiling(tsk, &__get_cpu_var(srp))) | ||
36744 | + return default_wake_function(wait, mode, sync, key); | ||
36745 | + return 0; | ||
36746 | +} | ||
36747 | + | ||
36748 | + | ||
36749 | + | ||
36750 | +static void do_ceiling_block(struct task_struct *tsk) | ||
36751 | +{ | ||
36752 | + wait_queue_t wait = { | ||
36753 | + .private = tsk, | ||
36754 | + .func = srp_wake_up, | ||
36755 | + .task_list = {NULL, NULL} | ||
36756 | + }; | ||
36757 | + | ||
36758 | + tsk->state = TASK_UNINTERRUPTIBLE; | ||
36759 | + add_wait_queue(&__get_cpu_var(srp).ceiling_blocked, &wait); | ||
36760 | + tsk->rt_param.srp_non_recurse = 1; | ||
36761 | + preempt_enable_no_resched(); | ||
36762 | + schedule(); | ||
36763 | + preempt_disable(); | ||
36764 | + tsk->rt_param.srp_non_recurse = 0; | ||
36765 | + remove_wait_queue(&__get_cpu_var(srp).ceiling_blocked, &wait); | ||
36766 | +} | ||
36767 | + | ||
36768 | +/* Wait for current task priority to exceed system-wide priority ceiling. | ||
36769 | + */ | ||
36770 | +void srp_ceiling_block(void) | ||
36771 | +{ | ||
36772 | + struct task_struct *tsk = current; | ||
36773 | + | ||
36774 | + /* Only applies to real-time tasks, but optimize for RT tasks. */ | ||
36775 | + if (unlikely(!is_realtime(tsk))) | ||
36776 | + return; | ||
36777 | + | ||
36778 | + /* Avoid recursive ceiling blocking. */ | ||
36779 | + if (unlikely(tsk->rt_param.srp_non_recurse)) | ||
36780 | + return; | ||
36781 | + | ||
36782 | + /* Bail out early if there aren't any SRP resources around. */ | ||
36783 | + if (likely(!atomic_read(&srp_objects_in_use))) | ||
36784 | + return; | ||
36785 | + | ||
36786 | + preempt_disable(); | ||
36787 | + if (!srp_exceeds_ceiling(tsk, &__get_cpu_var(srp))) { | ||
36788 | + TRACE_CUR("is priority ceiling blocked.\n"); | ||
36789 | + while (!srp_exceeds_ceiling(tsk, &__get_cpu_var(srp))) | ||
36790 | + do_ceiling_block(tsk); | ||
36791 | + TRACE_CUR("finally exceeds system ceiling.\n"); | ||
36792 | + } else | ||
36793 | + TRACE_CUR("is not priority ceiling blocked\n"); | ||
36794 | + preempt_enable(); | ||
36795 | +} | ||
36796 | + | ||
36797 | + | ||
36798 | +#else | ||
36799 | + | ||
36800 | +asmlinkage long sys_srp_down(int sem_od) | ||
36801 | +{ | ||
36802 | + return -ENOSYS; | ||
36803 | +} | ||
36804 | + | ||
36805 | +asmlinkage long sys_srp_up(int sem_od) | ||
36806 | +{ | ||
36807 | + return -ENOSYS; | ||
36808 | +} | ||
36809 | + | ||
36810 | +struct fdso_ops srp_sem_ops = {}; | ||
36811 | + | ||
36812 | +#endif | ||
36813 | diff --git a/litmus/sync.c b/litmus/sync.c | ||
36814 | new file mode 100644 | ||
36815 | index 0000000..bf75fde | ||
36816 | --- /dev/null | ||
36817 | +++ b/litmus/sync.c | ||
36818 | @@ -0,0 +1,104 @@ | ||
36819 | +/* litmus/sync.c - Support for synchronous and asynchronous task system releases. | ||
36820 | + * | ||
36821 | + * | ||
36822 | + */ | ||
36823 | + | ||
36824 | +#include <asm/atomic.h> | ||
36825 | +#include <asm/uaccess.h> | ||
36826 | +#include <linux/spinlock.h> | ||
36827 | +#include <linux/list.h> | ||
36828 | +#include <linux/sched.h> | ||
36829 | +#include <linux/completion.h> | ||
36830 | + | ||
36831 | +#include <litmus/litmus.h> | ||
36832 | +#include <litmus/sched_plugin.h> | ||
36833 | +#include <litmus/jobs.h> | ||
36834 | + | ||
36835 | +#include <litmus/sched_trace.h> | ||
36836 | + | ||
36837 | +static DECLARE_COMPLETION(ts_release); | ||
36838 | + | ||
36839 | +static long do_wait_for_ts_release(void) | ||
36840 | +{ | ||
36841 | + long ret = 0; | ||
36842 | + | ||
36843 | + /* If the interruption races with a release, the completion object | ||
36844 | + * may have a non-zero counter. To avoid this problem, this should | ||
36845 | + * be replaced by wait_for_completion(). | ||
36846 | + * | ||
36847 | + * For debugging purposes, this is interruptible for now. | ||
36848 | + */ | ||
36849 | + ret = wait_for_completion_interruptible(&ts_release); | ||
36850 | + | ||
36851 | + return ret; | ||
36852 | +} | ||
36853 | + | ||
36854 | +int count_tasks_waiting_for_release(void) | ||
36855 | +{ | ||
36856 | + unsigned long flags; | ||
36857 | + int task_count = 0; | ||
36858 | + struct list_head *pos; | ||
36859 | + | ||
36860 | + spin_lock_irqsave(&ts_release.wait.lock, flags); | ||
36861 | + list_for_each(pos, &ts_release.wait.task_list) { | ||
36862 | + task_count++; | ||
36863 | + } | ||
36864 | + spin_unlock_irqrestore(&ts_release.wait.lock, flags); | ||
36865 | + | ||
36866 | + return task_count; | ||
36867 | +} | ||
36868 | + | ||
36869 | +static long do_release_ts(lt_t start) | ||
36870 | +{ | ||
36871 | + int task_count = 0; | ||
36872 | + unsigned long flags; | ||
36873 | + struct list_head *pos; | ||
36874 | + struct task_struct *t; | ||
36875 | + | ||
36876 | + | ||
36877 | + spin_lock_irqsave(&ts_release.wait.lock, flags); | ||
36878 | + TRACE("<<<<<< synchronous task system release >>>>>>\n"); | ||
36879 | + | ||
36880 | + sched_trace_sys_release(&start); | ||
36881 | + list_for_each(pos, &ts_release.wait.task_list) { | ||
36882 | + t = (struct task_struct*) list_entry(pos, | ||
36883 | + struct __wait_queue, | ||
36884 | + task_list)->private; | ||
36885 | + task_count++; | ||
36886 | + litmus->release_at(t, start + t->rt_param.task_params.phase); | ||
36887 | + sched_trace_task_release(t); | ||
36888 | + } | ||
36889 | + | ||
36890 | + spin_unlock_irqrestore(&ts_release.wait.lock, flags); | ||
36891 | + | ||
36892 | + complete_n(&ts_release, task_count); | ||
36893 | + | ||
36894 | + return task_count; | ||
36895 | +} | ||
36896 | + | ||
36897 | + | ||
36898 | +asmlinkage long sys_wait_for_ts_release(void) | ||
36899 | +{ | ||
36900 | + long ret = -EPERM; | ||
36901 | + struct task_struct *t = current; | ||
36902 | + | ||
36903 | + if (is_realtime(t)) | ||
36904 | + ret = do_wait_for_ts_release(); | ||
36905 | + | ||
36906 | + return ret; | ||
36907 | +} | ||
36908 | + | ||
36909 | + | ||
36910 | +asmlinkage long sys_release_ts(lt_t __user *__delay) | ||
36911 | +{ | ||
36912 | + long ret; | ||
36913 | + lt_t delay; | ||
36914 | + | ||
36915 | + /* FIXME: check capabilities... */ | ||
36916 | + | ||
36917 | + ret = copy_from_user(&delay, __delay, sizeof(delay)); | ||
36918 | + if (ret == 0) | ||
36919 | + ret = do_release_ts(litmus_clock() + delay); | ||
36920 | + | ||
36921 | + return ret; | ||
36922 | +} | ||
36923 | diff --git a/litmus/trace.c b/litmus/trace.c | ||
36924 | new file mode 100644 | ||
36925 | index 0000000..4403769 | ||
36926 | --- /dev/null | ||
36927 | +++ b/litmus/trace.c | ||
36928 | @@ -0,0 +1,103 @@ | ||
36929 | +#include <linux/module.h> | ||
36930 | + | ||
36931 | +#include <litmus/ftdev.h> | ||
36932 | +#include <litmus/litmus.h> | ||
36933 | +#include <litmus/trace.h> | ||
36934 | + | ||
36935 | +/******************************************************************************/ | ||
36936 | +/* Allocation */ | ||
36937 | +/******************************************************************************/ | ||
36938 | + | ||
36939 | +static struct ftdev overhead_dev; | ||
36940 | + | ||
36941 | +#define trace_ts_buf overhead_dev.minor[0].buf | ||
36942 | + | ||
36943 | +static unsigned int ts_seq_no = 0; | ||
36944 | + | ||
36945 | +static inline void __save_timestamp_cpu(unsigned long event, | ||
36946 | + uint8_t type, uint8_t cpu) | ||
36947 | +{ | ||
36948 | + unsigned int seq_no; | ||
36949 | + struct timestamp *ts; | ||
36950 | + seq_no = fetch_and_inc((int *) &ts_seq_no); | ||
36951 | + if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { | ||
36952 | + ts->event = event; | ||
36953 | + ts->timestamp = ft_timestamp(); | ||
36954 | + ts->seq_no = seq_no; | ||
36955 | + ts->cpu = cpu; | ||
36956 | + ts->task_type = type; | ||
36957 | + ft_buffer_finish_write(trace_ts_buf, ts); | ||
36958 | + } | ||
36959 | +} | ||
36960 | + | ||
36961 | +static inline void __save_timestamp(unsigned long event, | ||
36962 | + uint8_t type) | ||
36963 | +{ | ||
36964 | + __save_timestamp_cpu(event, type, raw_smp_processor_id()); | ||
36965 | +} | ||
36966 | + | ||
36967 | +feather_callback void save_timestamp(unsigned long event) | ||
36968 | +{ | ||
36969 | + __save_timestamp(event, TSK_UNKNOWN); | ||
36970 | +} | ||
36971 | + | ||
36972 | +feather_callback void save_timestamp_def(unsigned long event, | ||
36973 | + unsigned long type) | ||
36974 | +{ | ||
36975 | + __save_timestamp(event, (uint8_t) type); | ||
36976 | +} | ||
36977 | + | ||
36978 | +feather_callback void save_timestamp_task(unsigned long event, | ||
36979 | + unsigned long t_ptr) | ||
36980 | +{ | ||
36981 | + int rt = is_realtime((struct task_struct *) t_ptr); | ||
36982 | + __save_timestamp(event, rt ? TSK_RT : TSK_BE); | ||
36983 | +} | ||
36984 | + | ||
36985 | +feather_callback void save_timestamp_cpu(unsigned long event, | ||
36986 | + unsigned long cpu) | ||
36987 | +{ | ||
36988 | + __save_timestamp_cpu(event, TSK_UNKNOWN, cpu); | ||
36989 | +} | ||
36990 | + | ||
36991 | +/******************************************************************************/ | ||
36992 | +/* DEVICE FILE DRIVER */ | ||
36993 | +/******************************************************************************/ | ||
36994 | + | ||
36995 | +/* | ||
36996 | + * should be 8M; it is the max we can ask to buddy system allocator (MAX_ORDER) | ||
36997 | + * and we might not get as much | ||
36998 | + */ | ||
36999 | +#define NO_TIMESTAMPS (2 << 11) | ||
37000 | + | ||
37001 | +/* set MAJOR to 0 to have it dynamically assigned */ | ||
37002 | +#define FT_TRACE_MAJOR 252 | ||
37003 | + | ||
37004 | +static int alloc_timestamp_buffer(struct ftdev* ftdev, unsigned int idx) | ||
37005 | +{ | ||
37006 | + unsigned int count = NO_TIMESTAMPS; | ||
37007 | + while (count && !trace_ts_buf) { | ||
37008 | + printk("time stamp buffer: trying to allocate %u time stamps.\n", count); | ||
37009 | + ftdev->minor[idx].buf = alloc_ft_buffer(count, sizeof(struct timestamp)); | ||
37010 | + count /= 2; | ||
37011 | + } | ||
37012 | + return ftdev->minor[idx].buf ? 0 : -ENOMEM; | ||
37013 | +} | ||
37014 | + | ||
37015 | +static void free_timestamp_buffer(struct ftdev* ftdev, unsigned int idx) | ||
37016 | +{ | ||
37017 | + free_ft_buffer(ftdev->minor[idx].buf); | ||
37018 | + ftdev->minor[idx].buf = NULL; | ||
37019 | +} | ||
37020 | + | ||
37021 | +static int __init init_ft_overhead_trace(void) | ||
37022 | +{ | ||
37023 | + printk("Initializing Feather-Trace overhead tracing device.\n"); | ||
37024 | + ftdev_init(&overhead_dev, THIS_MODULE); | ||
37025 | + overhead_dev.minor_cnt = 1; /* only one buffer */ | ||
37026 | + overhead_dev.alloc = alloc_timestamp_buffer; | ||
37027 | + overhead_dev.free = free_timestamp_buffer; | ||
37028 | + return register_ftdev(&overhead_dev, "ft_trace", FT_TRACE_MAJOR); | ||
37029 | +} | ||
37030 | + | ||
37031 | +module_init(init_ft_overhead_trace); | ||
37032 | diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c | ||
37033 | index 1491260..bf706f8 100644 | ||
37034 | --- a/net/ax25/ax25_out.c | ||
37035 | +++ b/net/ax25/ax25_out.c | ||
37036 | @@ -92,12 +92,6 @@ ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, ax25_address *src, ax2 | ||
37037 | #endif | ||
37038 | } | ||
37039 | |||
37040 | - /* | ||
37041 | - * There is one ref for the state machine; a caller needs | ||
37042 | - * one more to put it back, just like with the existing one. | ||
37043 | - */ | ||
37044 | - ax25_cb_hold(ax25); | ||
37045 | - | ||
37046 | ax25_cb_add(ax25); | ||
37047 | |||
37048 | ax25->state = AX25_STATE_1; | ||
37049 | diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c | ||
37050 | index 0b7f262..bd1c654 100644 | ||
37051 | --- a/net/bridge/netfilter/ebtables.c | ||
37052 | +++ b/net/bridge/netfilter/ebtables.c | ||
37053 | @@ -1406,9 +1406,6 @@ static int do_ebt_set_ctl(struct sock *sk, | ||
37054 | { | ||
37055 | int ret; | ||
37056 | |||
37057 | - if (!capable(CAP_NET_ADMIN)) | ||
37058 | - return -EPERM; | ||
37059 | - | ||
37060 | switch(cmd) { | ||
37061 | case EBT_SO_SET_ENTRIES: | ||
37062 | ret = do_replace(sock_net(sk), user, len); | ||
37063 | @@ -1428,9 +1425,6 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) | ||
37064 | struct ebt_replace tmp; | ||
37065 | struct ebt_table *t; | ||
37066 | |||
37067 | - if (!capable(CAP_NET_ADMIN)) | ||
37068 | - return -EPERM; | ||
37069 | - | ||
37070 | if (copy_from_user(&tmp, user, sizeof(tmp))) | ||
37071 | return -EFAULT; | ||
37072 | |||
37073 | diff --git a/net/core/dev.c b/net/core/dev.c | ||
37074 | index 584046e..fe10551 100644 | ||
37075 | --- a/net/core/dev.c | ||
37076 | +++ b/net/core/dev.c | ||
37077 | @@ -4860,11 +4860,6 @@ int register_netdevice(struct net_device *dev) | ||
37078 | rollback_registered(dev); | ||
37079 | dev->reg_state = NETREG_UNREGISTERED; | ||
37080 | } | ||
37081 | - /* | ||
37082 | - * Prevent userspace races by waiting until the network | ||
37083 | - * device is fully setup before sending notifications. | ||
37084 | - */ | ||
37085 | - rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); | ||
37086 | |||
37087 | out: | ||
37088 | return ret; | ||
37089 | @@ -5403,12 +5398,6 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char | ||
37090 | /* Notify protocols, that a new device appeared. */ | ||
37091 | call_netdevice_notifiers(NETDEV_REGISTER, dev); | ||
37092 | |||
37093 | - /* | ||
37094 | - * Prevent userspace races by waiting until the network | ||
37095 | - * device is fully setup before sending notifications. | ||
37096 | - */ | ||
37097 | - rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); | ||
37098 | - | ||
37099 | synchronize_net(); | ||
37100 | err = 0; | ||
37101 | out: | ||
37102 | diff --git a/net/core/dst.c b/net/core/dst.c | ||
37103 | index cb1b348..57bc4d5 100644 | ||
37104 | --- a/net/core/dst.c | ||
37105 | +++ b/net/core/dst.c | ||
37106 | @@ -17,7 +17,6 @@ | ||
37107 | #include <linux/string.h> | ||
37108 | #include <linux/types.h> | ||
37109 | #include <net/net_namespace.h> | ||
37110 | -#include <linux/sched.h> | ||
37111 | |||
37112 | #include <net/dst.h> | ||
37113 | |||
37114 | @@ -80,7 +79,6 @@ loop: | ||
37115 | while ((dst = next) != NULL) { | ||
37116 | next = dst->next; | ||
37117 | prefetch(&next->next); | ||
37118 | - cond_resched(); | ||
37119 | if (likely(atomic_read(&dst->__refcnt))) { | ||
37120 | last->next = dst; | ||
37121 | last = dst; | ||
37122 | diff --git a/net/core/pktgen.c b/net/core/pktgen.c | ||
37123 | index 6a993b1..6e79e96 100644 | ||
37124 | --- a/net/core/pktgen.c | ||
37125 | +++ b/net/core/pktgen.c | ||
37126 | @@ -3516,7 +3516,6 @@ static int pktgen_thread_worker(void *arg) | ||
37127 | wait_event_interruptible_timeout(t->queue, | ||
37128 | t->control != 0, | ||
37129 | HZ/10); | ||
37130 | - try_to_freeze(); | ||
37131 | continue; | ||
37132 | } | ||
37133 | |||
37134 | diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c | ||
37135 | index d4fd895..eb42873 100644 | ||
37136 | --- a/net/core/rtnetlink.c | ||
37137 | +++ b/net/core/rtnetlink.c | ||
37138 | @@ -1334,11 +1334,13 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi | ||
37139 | case NETDEV_UNREGISTER: | ||
37140 | rtmsg_ifinfo(RTM_DELLINK, dev, ~0U); | ||
37141 | break; | ||
37142 | + case NETDEV_REGISTER: | ||
37143 | + rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U); | ||
37144 | + break; | ||
37145 | case NETDEV_UP: | ||
37146 | case NETDEV_DOWN: | ||
37147 | rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING); | ||
37148 | break; | ||
37149 | - case NETDEV_REGISTER: | ||
37150 | case NETDEV_CHANGE: | ||
37151 | case NETDEV_GOING_DOWN: | ||
37152 | break; | ||
37153 | diff --git a/net/core/sock.c b/net/core/sock.c | ||
37154 | index 6605e75..7626b6a 100644 | ||
37155 | --- a/net/core/sock.c | ||
37156 | +++ b/net/core/sock.c | ||
37157 | @@ -1181,10 +1181,6 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority) | ||
37158 | |||
37159 | if (newsk->sk_prot->sockets_allocated) | ||
37160 | percpu_counter_inc(newsk->sk_prot->sockets_allocated); | ||
37161 | - | ||
37162 | - if (sock_flag(newsk, SOCK_TIMESTAMP) || | ||
37163 | - sock_flag(newsk, SOCK_TIMESTAMPING_RX_SOFTWARE)) | ||
37164 | - net_enable_timestamp(); | ||
37165 | } | ||
37166 | out: | ||
37167 | return newsk; | ||
37168 | diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c | ||
37169 | index 0030e73..5df2f6a 100644 | ||
37170 | --- a/net/ipv4/devinet.c | ||
37171 | +++ b/net/ipv4/devinet.c | ||
37172 | @@ -1450,7 +1450,6 @@ static struct devinet_sysctl_table { | ||
37173 | DEVINET_SYSCTL_RW_ENTRY(SEND_REDIRECTS, "send_redirects"), | ||
37174 | DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE, | ||
37175 | "accept_source_route"), | ||
37176 | - DEVINET_SYSCTL_RW_ENTRY(SRC_VMARK, "src_valid_mark"), | ||
37177 | DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"), | ||
37178 | DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"), | ||
37179 | DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"), | ||
37180 | diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c | ||
37181 | index 29391ee..aa00398 100644 | ||
37182 | --- a/net/ipv4/fib_frontend.c | ||
37183 | +++ b/net/ipv4/fib_frontend.c | ||
37184 | @@ -251,8 +251,6 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif, | ||
37185 | if (in_dev) { | ||
37186 | no_addr = in_dev->ifa_list == NULL; | ||
37187 | rpf = IN_DEV_RPFILTER(in_dev); | ||
37188 | - if (mark && !IN_DEV_SRC_VMARK(in_dev)) | ||
37189 | - fl.mark = 0; | ||
37190 | } | ||
37191 | rcu_read_unlock(); | ||
37192 | |||
37193 | diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c | ||
37194 | index 4d50daa..f989518 100644 | ||
37195 | --- a/net/ipv4/ip_output.c | ||
37196 | +++ b/net/ipv4/ip_output.c | ||
37197 | @@ -501,8 +501,8 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | ||
37198 | if (skb->sk) { | ||
37199 | frag->sk = skb->sk; | ||
37200 | frag->destructor = sock_wfree; | ||
37201 | + truesizes += frag->truesize; | ||
37202 | } | ||
37203 | - truesizes += frag->truesize; | ||
37204 | } | ||
37205 | |||
37206 | /* Everything is OK. Generate! */ | ||
37207 | diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c | ||
37208 | index 98442f3..27774c9 100644 | ||
37209 | --- a/net/ipv4/netfilter/arp_tables.c | ||
37210 | +++ b/net/ipv4/netfilter/arp_tables.c | ||
37211 | @@ -925,10 +925,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat) | ||
37212 | if (t && !IS_ERR(t)) { | ||
37213 | struct arpt_getinfo info; | ||
37214 | const struct xt_table_info *private = t->private; | ||
37215 | -#ifdef CONFIG_COMPAT | ||
37216 | - struct xt_table_info tmp; | ||
37217 | |||
37218 | +#ifdef CONFIG_COMPAT | ||
37219 | if (compat) { | ||
37220 | + struct xt_table_info tmp; | ||
37221 | ret = compat_table_info(private, &tmp); | ||
37222 | xt_compat_flush_offsets(NFPROTO_ARP); | ||
37223 | private = &tmp; | ||
37224 | diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c | ||
37225 | index 62aff31..cde755d 100644 | ||
37226 | --- a/net/ipv4/netfilter/ip_tables.c | ||
37227 | +++ b/net/ipv4/netfilter/ip_tables.c | ||
37228 | @@ -1132,10 +1132,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat) | ||
37229 | if (t && !IS_ERR(t)) { | ||
37230 | struct ipt_getinfo info; | ||
37231 | const struct xt_table_info *private = t->private; | ||
37232 | -#ifdef CONFIG_COMPAT | ||
37233 | - struct xt_table_info tmp; | ||
37234 | |||
37235 | +#ifdef CONFIG_COMPAT | ||
37236 | if (compat) { | ||
37237 | + struct xt_table_info tmp; | ||
37238 | ret = compat_table_info(private, &tmp); | ||
37239 | xt_compat_flush_offsets(AF_INET); | ||
37240 | private = &tmp; | ||
37241 | diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | ||
37242 | index 1032a15..aa95bb8 100644 | ||
37243 | --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | ||
37244 | +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | ||
37245 | @@ -213,7 +213,7 @@ static ctl_table ip_ct_sysctl_table[] = { | ||
37246 | { | ||
37247 | .ctl_name = NET_IPV4_NF_CONNTRACK_BUCKETS, | ||
37248 | .procname = "ip_conntrack_buckets", | ||
37249 | - .data = &init_net.ct.htable_size, | ||
37250 | + .data = &nf_conntrack_htable_size, | ||
37251 | .maxlen = sizeof(unsigned int), | ||
37252 | .mode = 0444, | ||
37253 | .proc_handler = proc_dointvec, | ||
37254 | diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c | ||
37255 | index 2fb7b76..8668a3d 100644 | ||
37256 | --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c | ||
37257 | +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c | ||
37258 | @@ -32,7 +32,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq) | ||
37259 | struct hlist_nulls_node *n; | ||
37260 | |||
37261 | for (st->bucket = 0; | ||
37262 | - st->bucket < net->ct.htable_size; | ||
37263 | + st->bucket < nf_conntrack_htable_size; | ||
37264 | st->bucket++) { | ||
37265 | n = rcu_dereference(net->ct.hash[st->bucket].first); | ||
37266 | if (!is_a_nulls(n)) | ||
37267 | @@ -50,7 +50,7 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq, | ||
37268 | head = rcu_dereference(head->next); | ||
37269 | while (is_a_nulls(head)) { | ||
37270 | if (likely(get_nulls_value(head) == st->bucket)) { | ||
37271 | - if (++st->bucket >= net->ct.htable_size) | ||
37272 | + if (++st->bucket >= nf_conntrack_htable_size) | ||
37273 | return NULL; | ||
37274 | } | ||
37275 | head = rcu_dereference(net->ct.hash[st->bucket].first); | ||
37276 | diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c | ||
37277 | index 331ead3..fa2d6b6 100644 | ||
37278 | --- a/net/ipv4/netfilter/nf_defrag_ipv4.c | ||
37279 | +++ b/net/ipv4/netfilter/nf_defrag_ipv4.c | ||
37280 | @@ -14,7 +14,6 @@ | ||
37281 | #include <net/route.h> | ||
37282 | #include <net/ip.h> | ||
37283 | |||
37284 | -#include <linux/netfilter_bridge.h> | ||
37285 | #include <linux/netfilter_ipv4.h> | ||
37286 | #include <net/netfilter/ipv4/nf_defrag_ipv4.h> | ||
37287 | |||
37288 | @@ -35,20 +34,6 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user) | ||
37289 | return err; | ||
37290 | } | ||
37291 | |||
37292 | -static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum, | ||
37293 | - struct sk_buff *skb) | ||
37294 | -{ | ||
37295 | -#ifdef CONFIG_BRIDGE_NETFILTER | ||
37296 | - if (skb->nf_bridge && | ||
37297 | - skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING) | ||
37298 | - return IP_DEFRAG_CONNTRACK_BRIDGE_IN; | ||
37299 | -#endif | ||
37300 | - if (hooknum == NF_INET_PRE_ROUTING) | ||
37301 | - return IP_DEFRAG_CONNTRACK_IN; | ||
37302 | - else | ||
37303 | - return IP_DEFRAG_CONNTRACK_OUT; | ||
37304 | -} | ||
37305 | - | ||
37306 | static unsigned int ipv4_conntrack_defrag(unsigned int hooknum, | ||
37307 | struct sk_buff *skb, | ||
37308 | const struct net_device *in, | ||
37309 | @@ -65,8 +50,10 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum, | ||
37310 | #endif | ||
37311 | /* Gather fragments. */ | ||
37312 | if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { | ||
37313 | - enum ip_defrag_users user = nf_ct_defrag_user(hooknum, skb); | ||
37314 | - if (nf_ct_ipv4_gather_frags(skb, user)) | ||
37315 | + if (nf_ct_ipv4_gather_frags(skb, | ||
37316 | + hooknum == NF_INET_PRE_ROUTING ? | ||
37317 | + IP_DEFRAG_CONNTRACK_IN : | ||
37318 | + IP_DEFRAG_CONNTRACK_OUT)) | ||
37319 | return NF_STOLEN; | ||
37320 | } | ||
37321 | return NF_ACCEPT; | ||
37322 | diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c | ||
37323 | index 26066a2..fe1a644 100644 | ||
37324 | --- a/net/ipv4/netfilter/nf_nat_core.c | ||
37325 | +++ b/net/ipv4/netfilter/nf_nat_core.c | ||
37326 | @@ -35,6 +35,9 @@ static DEFINE_SPINLOCK(nf_nat_lock); | ||
37327 | |||
37328 | static struct nf_conntrack_l3proto *l3proto __read_mostly; | ||
37329 | |||
37330 | +/* Calculated at init based on memory size */ | ||
37331 | +static unsigned int nf_nat_htable_size __read_mostly; | ||
37332 | + | ||
37333 | #define MAX_IP_NAT_PROTO 256 | ||
37334 | static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO] | ||
37335 | __read_mostly; | ||
37336 | @@ -69,7 +72,7 @@ EXPORT_SYMBOL_GPL(nf_nat_proto_put); | ||
37337 | |||
37338 | /* We keep an extra hash for each conntrack, for fast searching. */ | ||
37339 | static inline unsigned int | ||
37340 | -hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple) | ||
37341 | +hash_by_src(const struct nf_conntrack_tuple *tuple) | ||
37342 | { | ||
37343 | unsigned int hash; | ||
37344 | |||
37345 | @@ -77,7 +80,7 @@ hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple) | ||
37346 | hash = jhash_3words((__force u32)tuple->src.u3.ip, | ||
37347 | (__force u32)tuple->src.u.all, | ||
37348 | tuple->dst.protonum, 0); | ||
37349 | - return ((u64)hash * net->ipv4.nat_htable_size) >> 32; | ||
37350 | + return ((u64)hash * nf_nat_htable_size) >> 32; | ||
37351 | } | ||
37352 | |||
37353 | /* Is this tuple already taken? (not by us) */ | ||
37354 | @@ -144,7 +147,7 @@ find_appropriate_src(struct net *net, | ||
37355 | struct nf_conntrack_tuple *result, | ||
37356 | const struct nf_nat_range *range) | ||
37357 | { | ||
37358 | - unsigned int h = hash_by_src(net, tuple); | ||
37359 | + unsigned int h = hash_by_src(tuple); | ||
37360 | const struct nf_conn_nat *nat; | ||
37361 | const struct nf_conn *ct; | ||
37362 | const struct hlist_node *n; | ||
37363 | @@ -327,7 +330,7 @@ nf_nat_setup_info(struct nf_conn *ct, | ||
37364 | if (have_to_hash) { | ||
37365 | unsigned int srchash; | ||
37366 | |||
37367 | - srchash = hash_by_src(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); | ||
37368 | + srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); | ||
37369 | spin_lock_bh(&nf_nat_lock); | ||
37370 | /* nf_conntrack_alter_reply might re-allocate exntension aera */ | ||
37371 | nat = nfct_nat(ct); | ||
37372 | @@ -676,10 +679,8 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct, | ||
37373 | |||
37374 | static int __net_init nf_nat_net_init(struct net *net) | ||
37375 | { | ||
37376 | - /* Leave them the same for the moment. */ | ||
37377 | - net->ipv4.nat_htable_size = net->ct.htable_size; | ||
37378 | - net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size, | ||
37379 | - &net->ipv4.nat_vmalloced, 0); | ||
37380 | + net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, | ||
37381 | + &net->ipv4.nat_vmalloced, 0); | ||
37382 | if (!net->ipv4.nat_bysource) | ||
37383 | return -ENOMEM; | ||
37384 | return 0; | ||
37385 | @@ -702,7 +703,7 @@ static void __net_exit nf_nat_net_exit(struct net *net) | ||
37386 | nf_ct_iterate_cleanup(net, &clean_nat, NULL); | ||
37387 | synchronize_rcu(); | ||
37388 | nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced, | ||
37389 | - net->ipv4.nat_htable_size); | ||
37390 | + nf_nat_htable_size); | ||
37391 | } | ||
37392 | |||
37393 | static struct pernet_operations nf_nat_net_ops = { | ||
37394 | @@ -723,6 +724,9 @@ static int __init nf_nat_init(void) | ||
37395 | return ret; | ||
37396 | } | ||
37397 | |||
37398 | + /* Leave them the same for the moment. */ | ||
37399 | + nf_nat_htable_size = nf_conntrack_htable_size; | ||
37400 | + | ||
37401 | ret = register_pernet_subsys(&nf_nat_net_ops); | ||
37402 | if (ret < 0) | ||
37403 | goto cleanup_extend; | ||
37404 | diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c | ||
37405 | index 4bac362..df159ff 100644 | ||
37406 | --- a/net/ipv6/exthdrs.c | ||
37407 | +++ b/net/ipv6/exthdrs.c | ||
37408 | @@ -559,11 +559,6 @@ static inline struct inet6_dev *ipv6_skb_idev(struct sk_buff *skb) | ||
37409 | return skb_dst(skb) ? ip6_dst_idev(skb_dst(skb)) : __in6_dev_get(skb->dev); | ||
37410 | } | ||
37411 | |||
37412 | -static inline struct net *ipv6_skb_net(struct sk_buff *skb) | ||
37413 | -{ | ||
37414 | - return skb_dst(skb) ? dev_net(skb_dst(skb)->dev) : dev_net(skb->dev); | ||
37415 | -} | ||
37416 | - | ||
37417 | /* Router Alert as of RFC 2711 */ | ||
37418 | |||
37419 | static int ipv6_hop_ra(struct sk_buff *skb, int optoff) | ||
37420 | @@ -585,8 +580,8 @@ static int ipv6_hop_ra(struct sk_buff *skb, int optoff) | ||
37421 | static int ipv6_hop_jumbo(struct sk_buff *skb, int optoff) | ||
37422 | { | ||
37423 | const unsigned char *nh = skb_network_header(skb); | ||
37424 | - struct net *net = ipv6_skb_net(skb); | ||
37425 | u32 pkt_len; | ||
37426 | + struct net *net = dev_net(skb_dst(skb)->dev); | ||
37427 | |||
37428 | if (nh[optoff + 1] != 4 || (optoff & 3) != 2) { | ||
37429 | LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n", | ||
37430 | diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c | ||
37431 | index 1de56fd..cc9f8ef 100644 | ||
37432 | --- a/net/ipv6/netfilter/ip6_tables.c | ||
37433 | +++ b/net/ipv6/netfilter/ip6_tables.c | ||
37434 | @@ -1164,10 +1164,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat) | ||
37435 | if (t && !IS_ERR(t)) { | ||
37436 | struct ip6t_getinfo info; | ||
37437 | const struct xt_table_info *private = t->private; | ||
37438 | -#ifdef CONFIG_COMPAT | ||
37439 | - struct xt_table_info tmp; | ||
37440 | |||
37441 | +#ifdef CONFIG_COMPAT | ||
37442 | if (compat) { | ||
37443 | + struct xt_table_info tmp; | ||
37444 | ret = compat_table_info(private, &tmp); | ||
37445 | xt_compat_flush_offsets(AF_INET6); | ||
37446 | private = &tmp; | ||
37447 | diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c | ||
37448 | index 0956eba..5f2ec20 100644 | ||
37449 | --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c | ||
37450 | +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c | ||
37451 | @@ -20,7 +20,6 @@ | ||
37452 | #include <net/ipv6.h> | ||
37453 | #include <net/inet_frag.h> | ||
37454 | |||
37455 | -#include <linux/netfilter_bridge.h> | ||
37456 | #include <linux/netfilter_ipv6.h> | ||
37457 | #include <net/netfilter/nf_conntrack.h> | ||
37458 | #include <net/netfilter/nf_conntrack_helper.h> | ||
37459 | @@ -188,21 +187,6 @@ out: | ||
37460 | return nf_conntrack_confirm(skb); | ||
37461 | } | ||
37462 | |||
37463 | -static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum, | ||
37464 | - struct sk_buff *skb) | ||
37465 | -{ | ||
37466 | -#ifdef CONFIG_BRIDGE_NETFILTER | ||
37467 | - if (skb->nf_bridge && | ||
37468 | - skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING) | ||
37469 | - return IP6_DEFRAG_CONNTRACK_BRIDGE_IN; | ||
37470 | -#endif | ||
37471 | - if (hooknum == NF_INET_PRE_ROUTING) | ||
37472 | - return IP6_DEFRAG_CONNTRACK_IN; | ||
37473 | - else | ||
37474 | - return IP6_DEFRAG_CONNTRACK_OUT; | ||
37475 | - | ||
37476 | -} | ||
37477 | - | ||
37478 | static unsigned int ipv6_defrag(unsigned int hooknum, | ||
37479 | struct sk_buff *skb, | ||
37480 | const struct net_device *in, | ||
37481 | @@ -215,7 +199,8 @@ static unsigned int ipv6_defrag(unsigned int hooknum, | ||
37482 | if (skb->nfct) | ||
37483 | return NF_ACCEPT; | ||
37484 | |||
37485 | - reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb)); | ||
37486 | + reasm = nf_ct_frag6_gather(skb); | ||
37487 | + | ||
37488 | /* queued */ | ||
37489 | if (reasm == NULL) | ||
37490 | return NF_STOLEN; | ||
37491 | diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c | ||
37492 | index 4b6a539..f3aba25 100644 | ||
37493 | --- a/net/ipv6/netfilter/nf_conntrack_reasm.c | ||
37494 | +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c | ||
37495 | @@ -170,14 +170,13 @@ out: | ||
37496 | /* Creation primitives. */ | ||
37497 | |||
37498 | static __inline__ struct nf_ct_frag6_queue * | ||
37499 | -fq_find(__be32 id, u32 user, struct in6_addr *src, struct in6_addr *dst) | ||
37500 | +fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst) | ||
37501 | { | ||
37502 | struct inet_frag_queue *q; | ||
37503 | struct ip6_create_arg arg; | ||
37504 | unsigned int hash; | ||
37505 | |||
37506 | arg.id = id; | ||
37507 | - arg.user = user; | ||
37508 | arg.src = src; | ||
37509 | arg.dst = dst; | ||
37510 | |||
37511 | @@ -562,7 +561,7 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff) | ||
37512 | return 0; | ||
37513 | } | ||
37514 | |||
37515 | -struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user) | ||
37516 | +struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb) | ||
37517 | { | ||
37518 | struct sk_buff *clone; | ||
37519 | struct net_device *dev = skb->dev; | ||
37520 | @@ -608,7 +607,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user) | ||
37521 | if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh) | ||
37522 | nf_ct_frag6_evictor(); | ||
37523 | |||
37524 | - fq = fq_find(fhdr->identification, user, &hdr->saddr, &hdr->daddr); | ||
37525 | + fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr); | ||
37526 | if (fq == NULL) { | ||
37527 | pr_debug("Can't find and can't create new queue\n"); | ||
37528 | goto ret_orig; | ||
37529 | diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c | ||
37530 | index 4d18699..da5bd0e 100644 | ||
37531 | --- a/net/ipv6/reassembly.c | ||
37532 | +++ b/net/ipv6/reassembly.c | ||
37533 | @@ -72,7 +72,6 @@ struct frag_queue | ||
37534 | struct inet_frag_queue q; | ||
37535 | |||
37536 | __be32 id; /* fragment id */ | ||
37537 | - u32 user; | ||
37538 | struct in6_addr saddr; | ||
37539 | struct in6_addr daddr; | ||
37540 | |||
37541 | @@ -142,7 +141,7 @@ int ip6_frag_match(struct inet_frag_queue *q, void *a) | ||
37542 | struct ip6_create_arg *arg = a; | ||
37543 | |||
37544 | fq = container_of(q, struct frag_queue, q); | ||
37545 | - return (fq->id == arg->id && fq->user == arg->user && | ||
37546 | + return (fq->id == arg->id && | ||
37547 | ipv6_addr_equal(&fq->saddr, arg->src) && | ||
37548 | ipv6_addr_equal(&fq->daddr, arg->dst)); | ||
37549 | } | ||
37550 | @@ -164,7 +163,6 @@ void ip6_frag_init(struct inet_frag_queue *q, void *a) | ||
37551 | struct ip6_create_arg *arg = a; | ||
37552 | |||
37553 | fq->id = arg->id; | ||
37554 | - fq->user = arg->user; | ||
37555 | ipv6_addr_copy(&fq->saddr, arg->src); | ||
37556 | ipv6_addr_copy(&fq->daddr, arg->dst); | ||
37557 | } | ||
37558 | @@ -246,7 +244,6 @@ fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst, | ||
37559 | unsigned int hash; | ||
37560 | |||
37561 | arg.id = id; | ||
37562 | - arg.user = IP6_DEFRAG_LOCAL_DELIVER; | ||
37563 | arg.src = src; | ||
37564 | arg.dst = dst; | ||
37565 | |||
37566 | diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c | ||
37567 | index fe2d3f8..7b5131b 100644 | ||
37568 | --- a/net/mac80211/cfg.c | ||
37569 | +++ b/net/mac80211/cfg.c | ||
37570 | @@ -338,8 +338,7 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) | ||
37571 | sinfo->rx_packets = sta->rx_packets; | ||
37572 | sinfo->tx_packets = sta->tx_packets; | ||
37573 | |||
37574 | - if ((sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) || | ||
37575 | - (sta->local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)) { | ||
37576 | + if (sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) { | ||
37577 | sinfo->filled |= STATION_INFO_SIGNAL; | ||
37578 | sinfo->signal = (s8)sta->last_signal; | ||
37579 | } | ||
37580 | @@ -1306,9 +1305,6 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, | ||
37581 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | ||
37582 | struct ieee80211_conf *conf = &local->hw.conf; | ||
37583 | |||
37584 | - if (sdata->vif.type != NL80211_IFTYPE_STATION) | ||
37585 | - return -EOPNOTSUPP; | ||
37586 | - | ||
37587 | if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) | ||
37588 | return -EOPNOTSUPP; | ||
37589 | |||
37590 | diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h | ||
37591 | index d87645e..37b9051 100644 | ||
37592 | --- a/net/mac80211/driver-trace.h | ||
37593 | +++ b/net/mac80211/driver-trace.h | ||
37594 | @@ -655,7 +655,7 @@ TRACE_EVENT(drv_ampdu_action, | ||
37595 | __entry->ret = ret; | ||
37596 | __entry->action = action; | ||
37597 | __entry->tid = tid; | ||
37598 | - __entry->ssn = ssn ? *ssn : 0; | ||
37599 | + __entry->ssn = *ssn; | ||
37600 | ), | ||
37601 | |||
37602 | TP_printk( | ||
37603 | diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c | ||
37604 | index 07600a6..f1362f3 100644 | ||
37605 | --- a/net/mac80211/ibss.c | ||
37606 | +++ b/net/mac80211/ibss.c | ||
37607 | @@ -455,10 +455,6 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata) | ||
37608 | |||
37609 | ieee80211_sta_expire(sdata, IEEE80211_IBSS_INACTIVITY_LIMIT); | ||
37610 | |||
37611 | - if (time_before(jiffies, ifibss->last_scan_completed + | ||
37612 | - IEEE80211_IBSS_MERGE_INTERVAL)) | ||
37613 | - return; | ||
37614 | - | ||
37615 | if (ieee80211_sta_active_ibss(sdata)) | ||
37616 | return; | ||
37617 | |||
37618 | @@ -643,7 +639,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata, | ||
37619 | } | ||
37620 | if (pos[1] != 0 && | ||
37621 | (pos[1] != ifibss->ssid_len || | ||
37622 | - memcmp(pos + 2, ifibss->ssid, ifibss->ssid_len))) { | ||
37623 | + !memcmp(pos + 2, ifibss->ssid, ifibss->ssid_len))) { | ||
37624 | /* Ignore ProbeReq for foreign SSID */ | ||
37625 | return; | ||
37626 | } | ||
37627 | diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h | ||
37628 | index 5a46164..10d316e 100644 | ||
37629 | --- a/net/mac80211/ieee80211_i.h | ||
37630 | +++ b/net/mac80211/ieee80211_i.h | ||
37631 | @@ -808,7 +808,6 @@ struct ieee80211_local { | ||
37632 | unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */ | ||
37633 | |||
37634 | bool pspolling; | ||
37635 | - bool scan_ps_enabled; | ||
37636 | /* | ||
37637 | * PS can only be enabled when we have exactly one managed | ||
37638 | * interface (and monitors) in PS, this then points there. | ||
37639 | diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c | ||
37640 | index 079c500..b8295cb 100644 | ||
37641 | --- a/net/mac80211/iface.c | ||
37642 | +++ b/net/mac80211/iface.c | ||
37643 | @@ -15,14 +15,12 @@ | ||
37644 | #include <linux/netdevice.h> | ||
37645 | #include <linux/rtnetlink.h> | ||
37646 | #include <net/mac80211.h> | ||
37647 | -#include <net/ieee80211_radiotap.h> | ||
37648 | #include "ieee80211_i.h" | ||
37649 | #include "sta_info.h" | ||
37650 | #include "debugfs_netdev.h" | ||
37651 | #include "mesh.h" | ||
37652 | #include "led.h" | ||
37653 | #include "driver-ops.h" | ||
37654 | -#include "wme.h" | ||
37655 | |||
37656 | /** | ||
37657 | * DOC: Interface list locking | ||
37658 | @@ -644,12 +642,6 @@ static void ieee80211_teardown_sdata(struct net_device *dev) | ||
37659 | WARN_ON(flushed); | ||
37660 | } | ||
37661 | |||
37662 | -static u16 ieee80211_netdev_select_queue(struct net_device *dev, | ||
37663 | - struct sk_buff *skb) | ||
37664 | -{ | ||
37665 | - return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); | ||
37666 | -} | ||
37667 | - | ||
37668 | static const struct net_device_ops ieee80211_dataif_ops = { | ||
37669 | .ndo_open = ieee80211_open, | ||
37670 | .ndo_stop = ieee80211_stop, | ||
37671 | @@ -658,35 +650,8 @@ static const struct net_device_ops ieee80211_dataif_ops = { | ||
37672 | .ndo_set_multicast_list = ieee80211_set_multicast_list, | ||
37673 | .ndo_change_mtu = ieee80211_change_mtu, | ||
37674 | .ndo_set_mac_address = eth_mac_addr, | ||
37675 | - .ndo_select_queue = ieee80211_netdev_select_queue, | ||
37676 | }; | ||
37677 | |||
37678 | -static u16 ieee80211_monitor_select_queue(struct net_device *dev, | ||
37679 | - struct sk_buff *skb) | ||
37680 | -{ | ||
37681 | - struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
37682 | - struct ieee80211_local *local = sdata->local; | ||
37683 | - struct ieee80211_hdr *hdr; | ||
37684 | - struct ieee80211_radiotap_header *rtap = (void *)skb->data; | ||
37685 | - | ||
37686 | - if (local->hw.queues < 4) | ||
37687 | - return 0; | ||
37688 | - | ||
37689 | - if (skb->len < 4 || | ||
37690 | - skb->len < le16_to_cpu(rtap->it_len) + 2 /* frame control */) | ||
37691 | - return 0; /* doesn't matter, frame will be dropped */ | ||
37692 | - | ||
37693 | - hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len)); | ||
37694 | - | ||
37695 | - if (!ieee80211_is_data(hdr->frame_control)) { | ||
37696 | - skb->priority = 7; | ||
37697 | - return ieee802_1d_to_ac[skb->priority]; | ||
37698 | - } | ||
37699 | - | ||
37700 | - skb->priority = 0; | ||
37701 | - return ieee80211_downgrade_queue(local, skb); | ||
37702 | -} | ||
37703 | - | ||
37704 | static const struct net_device_ops ieee80211_monitorif_ops = { | ||
37705 | .ndo_open = ieee80211_open, | ||
37706 | .ndo_stop = ieee80211_stop, | ||
37707 | @@ -695,7 +660,6 @@ static const struct net_device_ops ieee80211_monitorif_ops = { | ||
37708 | .ndo_set_multicast_list = ieee80211_set_multicast_list, | ||
37709 | .ndo_change_mtu = ieee80211_change_mtu, | ||
37710 | .ndo_set_mac_address = eth_mac_addr, | ||
37711 | - .ndo_select_queue = ieee80211_monitor_select_queue, | ||
37712 | }; | ||
37713 | |||
37714 | static void ieee80211_if_setup(struct net_device *dev) | ||
37715 | @@ -804,8 +768,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, | ||
37716 | |||
37717 | ASSERT_RTNL(); | ||
37718 | |||
37719 | - ndev = alloc_netdev_mq(sizeof(*sdata) + local->hw.vif_data_size, | ||
37720 | - name, ieee80211_if_setup, local->hw.queues); | ||
37721 | + ndev = alloc_netdev(sizeof(*sdata) + local->hw.vif_data_size, | ||
37722 | + name, ieee80211_if_setup); | ||
37723 | if (!ndev) | ||
37724 | return -ENOMEM; | ||
37725 | dev_net_set(ndev, wiphy_net(local->hw.wiphy)); | ||
37726 | diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h | ||
37727 | index 010ff2f..dd1c193 100644 | ||
37728 | --- a/net/mac80211/mesh.h | ||
37729 | +++ b/net/mac80211/mesh.h | ||
37730 | @@ -186,9 +186,8 @@ struct mesh_rmc { | ||
37731 | */ | ||
37732 | #define MESH_PREQ_MIN_INT 10 | ||
37733 | #define MESH_DIAM_TRAVERSAL_TIME 50 | ||
37734 | -/* A path will be refreshed if it is used PATH_REFRESH_TIME milliseconds before | ||
37735 | - * timing out. This way it will remain ACTIVE and no data frames will be | ||
37736 | - * unnecesarily held in the pending queue. | ||
37737 | +/* Paths will be refreshed if they are closer than PATH_REFRESH_TIME to their | ||
37738 | + * expiration | ||
37739 | */ | ||
37740 | #define MESH_PATH_REFRESH_TIME 1000 | ||
37741 | #define MESH_MIN_DISCOVERY_TIMEOUT (2 * MESH_DIAM_TRAVERSAL_TIME) | ||
37742 | diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c | ||
37743 | index 93c49fc..29b82e9 100644 | ||
37744 | --- a/net/mac80211/mesh_hwmp.c | ||
37745 | +++ b/net/mac80211/mesh_hwmp.c | ||
37746 | @@ -813,7 +813,7 @@ int mesh_nexthop_lookup(struct sk_buff *skb, | ||
37747 | } | ||
37748 | |||
37749 | if (mpath->flags & MESH_PATH_ACTIVE) { | ||
37750 | - if (time_after(jiffies, mpath->exp_time - | ||
37751 | + if (time_after(jiffies, mpath->exp_time + | ||
37752 | msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) | ||
37753 | && !memcmp(sdata->dev->dev_addr, hdr->addr4, | ||
37754 | ETH_ALEN) | ||
37755 | diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c | ||
37756 | index 6cae295..dc5049d 100644 | ||
37757 | --- a/net/mac80211/mlme.c | ||
37758 | +++ b/net/mac80211/mlme.c | ||
37759 | @@ -904,14 +904,6 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, | ||
37760 | sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL | | ||
37761 | IEEE80211_STA_BEACON_POLL); | ||
37762 | |||
37763 | - /* | ||
37764 | - * Always handle WMM once after association regardless | ||
37765 | - * of the first value the AP uses. Setting -1 here has | ||
37766 | - * that effect because the AP values is an unsigned | ||
37767 | - * 4-bit value. | ||
37768 | - */ | ||
37769 | - sdata->u.mgd.wmm_last_param_set = -1; | ||
37770 | - | ||
37771 | ieee80211_led_assoc(local, 1); | ||
37772 | |||
37773 | sdata->vif.bss_conf.assoc = 1; | ||
37774 | @@ -1953,9 +1945,7 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | ||
37775 | rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); | ||
37776 | break; | ||
37777 | case IEEE80211_STYPE_ACTION: | ||
37778 | - if (mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT) | ||
37779 | - break; | ||
37780 | - | ||
37781 | + /* XXX: differentiate, can only happen for CSA now! */ | ||
37782 | ieee80211_sta_process_chanswitch(sdata, | ||
37783 | &mgmt->u.action.u.chan_switch.sw_elem, | ||
37784 | ifmgd->associated); | ||
37785 | diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c | ||
37786 | index 16c6cdc..7170bf4 100644 | ||
37787 | --- a/net/mac80211/rx.c | ||
37788 | +++ b/net/mac80211/rx.c | ||
37789 | @@ -1514,6 +1514,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) | ||
37790 | mpp_path_add(mesh_hdr->eaddr2, hdr->addr4, sdata); | ||
37791 | } else { | ||
37792 | spin_lock_bh(&mppath->state_lock); | ||
37793 | + mppath->exp_time = jiffies; | ||
37794 | if (compare_ether_addr(mppath->mpp, hdr->addr4) != 0) | ||
37795 | memcpy(mppath->mpp, hdr->addr4, ETH_ALEN); | ||
37796 | spin_unlock_bh(&mppath->state_lock); | ||
37797 | @@ -1548,9 +1549,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) | ||
37798 | memset(info, 0, sizeof(*info)); | ||
37799 | info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; | ||
37800 | info->control.vif = &rx->sdata->vif; | ||
37801 | - skb_set_queue_mapping(skb, | ||
37802 | - ieee80211_select_queue(rx->sdata, fwd_skb)); | ||
37803 | - ieee80211_set_qos_hdr(local, skb); | ||
37804 | + ieee80211_select_queue(local, fwd_skb); | ||
37805 | if (is_multicast_ether_addr(fwd_hdr->addr1)) | ||
37806 | IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh, | ||
37807 | fwded_mcast); | ||
37808 | @@ -1810,10 +1809,6 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | ||
37809 | } | ||
37810 | break; | ||
37811 | default: | ||
37812 | - /* do not process rejected action frames */ | ||
37813 | - if (mgmt->u.action.category & 0x80) | ||
37814 | - return RX_DROP_MONITOR; | ||
37815 | - | ||
37816 | return RX_CONTINUE; | ||
37817 | } | ||
37818 | |||
37819 | diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c | ||
37820 | index 1a41909..71e10ca 100644 | ||
37821 | --- a/net/mac80211/scan.c | ||
37822 | +++ b/net/mac80211/scan.c | ||
37823 | @@ -196,8 +196,7 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) | ||
37824 | static void ieee80211_scan_ps_enable(struct ieee80211_sub_if_data *sdata) | ||
37825 | { | ||
37826 | struct ieee80211_local *local = sdata->local; | ||
37827 | - | ||
37828 | - local->scan_ps_enabled = false; | ||
37829 | + bool ps = false; | ||
37830 | |||
37831 | /* FIXME: what to do when local->pspolling is true? */ | ||
37832 | |||
37833 | @@ -205,13 +204,12 @@ static void ieee80211_scan_ps_enable(struct ieee80211_sub_if_data *sdata) | ||
37834 | cancel_work_sync(&local->dynamic_ps_enable_work); | ||
37835 | |||
37836 | if (local->hw.conf.flags & IEEE80211_CONF_PS) { | ||
37837 | - local->scan_ps_enabled = true; | ||
37838 | + ps = true; | ||
37839 | local->hw.conf.flags &= ~IEEE80211_CONF_PS; | ||
37840 | ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); | ||
37841 | } | ||
37842 | |||
37843 | - if (!(local->scan_ps_enabled) || | ||
37844 | - !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)) | ||
37845 | + if (!ps || !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)) | ||
37846 | /* | ||
37847 | * If power save was enabled, no need to send a nullfunc | ||
37848 | * frame because AP knows that we are sleeping. But if the | ||
37849 | @@ -232,7 +230,7 @@ static void ieee80211_scan_ps_disable(struct ieee80211_sub_if_data *sdata) | ||
37850 | |||
37851 | if (!local->ps_sdata) | ||
37852 | ieee80211_send_nullfunc(local, sdata, 0); | ||
37853 | - else if (local->scan_ps_enabled) { | ||
37854 | + else { | ||
37855 | /* | ||
37856 | * In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware | ||
37857 | * will send a nullfunc frame with the powersave bit set | ||
37858 | @@ -248,16 +246,6 @@ static void ieee80211_scan_ps_disable(struct ieee80211_sub_if_data *sdata) | ||
37859 | */ | ||
37860 | local->hw.conf.flags |= IEEE80211_CONF_PS; | ||
37861 | ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); | ||
37862 | - } else if (local->hw.conf.dynamic_ps_timeout > 0) { | ||
37863 | - /* | ||
37864 | - * If IEEE80211_CONF_PS was not set and the dynamic_ps_timer | ||
37865 | - * had been running before leaving the operating channel, | ||
37866 | - * restart the timer now and send a nullfunc frame to inform | ||
37867 | - * the AP that we are awake. | ||
37868 | - */ | ||
37869 | - ieee80211_send_nullfunc(local, sdata, 0); | ||
37870 | - mod_timer(&local->dynamic_ps_timer, jiffies + | ||
37871 | - msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); | ||
37872 | } | ||
37873 | } | ||
37874 | |||
37875 | @@ -276,14 +264,10 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) | ||
37876 | |||
37877 | mutex_lock(&local->scan_mtx); | ||
37878 | |||
37879 | - /* | ||
37880 | - * It's ok to abort a not-yet-running scan (that | ||
37881 | - * we have one at all will be verified by checking | ||
37882 | - * local->scan_req next), but not to complete it | ||
37883 | - * successfully. | ||
37884 | - */ | ||
37885 | - if (WARN_ON(!local->scanning && !aborted)) | ||
37886 | - aborted = true; | ||
37887 | + if (WARN_ON(!local->scanning)) { | ||
37888 | + mutex_unlock(&local->scan_mtx); | ||
37889 | + return; | ||
37890 | + } | ||
37891 | |||
37892 | if (WARN_ON(!local->scan_req)) { | ||
37893 | mutex_unlock(&local->scan_mtx); | ||
37894 | diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c | ||
37895 | index 441f68e..eaa4118 100644 | ||
37896 | --- a/net/mac80211/tx.c | ||
37897 | +++ b/net/mac80211/tx.c | ||
37898 | @@ -1401,7 +1401,6 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, | ||
37899 | |||
37900 | if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) && | ||
37901 | local->hw.conf.dynamic_ps_timeout > 0 && | ||
37902 | - !local->quiescing && | ||
37903 | !(local->scanning) && local->ps_sdata) { | ||
37904 | if (local->hw.conf.flags & IEEE80211_CONF_PS) { | ||
37905 | ieee80211_stop_queues_by_reason(&local->hw, | ||
37906 | @@ -1482,7 +1481,7 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, | ||
37907 | return; | ||
37908 | } | ||
37909 | |||
37910 | - ieee80211_set_qos_hdr(local, skb); | ||
37911 | + ieee80211_select_queue(local, skb); | ||
37912 | ieee80211_tx(sdata, skb, false); | ||
37913 | dev_put(sdata->dev); | ||
37914 | } | ||
37915 | @@ -2226,9 +2225,6 @@ void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, | ||
37916 | if (!encrypt) | ||
37917 | info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; | ||
37918 | |||
37919 | - /* send all internal mgmt frames on VO */ | ||
37920 | - skb_set_queue_mapping(skb, 0); | ||
37921 | - | ||
37922 | /* | ||
37923 | * The other path calling ieee80211_xmit is from the tasklet, | ||
37924 | * and while we can handle concurrent transmissions locking | ||
37925 | diff --git a/net/mac80211/util.c b/net/mac80211/util.c | ||
37926 | index 553cffe..e6c08da 100644 | ||
37927 | --- a/net/mac80211/util.c | ||
37928 | +++ b/net/mac80211/util.c | ||
37929 | @@ -269,7 +269,6 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue, | ||
37930 | enum queue_stop_reason reason) | ||
37931 | { | ||
37932 | struct ieee80211_local *local = hw_to_local(hw); | ||
37933 | - struct ieee80211_sub_if_data *sdata; | ||
37934 | |||
37935 | if (WARN_ON(queue >= hw->queues)) | ||
37936 | return; | ||
37937 | @@ -282,11 +281,6 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue, | ||
37938 | |||
37939 | if (!skb_queue_empty(&local->pending[queue])) | ||
37940 | tasklet_schedule(&local->tx_pending_tasklet); | ||
37941 | - | ||
37942 | - rcu_read_lock(); | ||
37943 | - list_for_each_entry_rcu(sdata, &local->interfaces, list) | ||
37944 | - netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue)); | ||
37945 | - rcu_read_unlock(); | ||
37946 | } | ||
37947 | |||
37948 | void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, | ||
37949 | @@ -311,17 +305,11 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue, | ||
37950 | enum queue_stop_reason reason) | ||
37951 | { | ||
37952 | struct ieee80211_local *local = hw_to_local(hw); | ||
37953 | - struct ieee80211_sub_if_data *sdata; | ||
37954 | |||
37955 | if (WARN_ON(queue >= hw->queues)) | ||
37956 | return; | ||
37957 | |||
37958 | __set_bit(reason, &local->queue_stop_reasons[queue]); | ||
37959 | - | ||
37960 | - rcu_read_lock(); | ||
37961 | - list_for_each_entry_rcu(sdata, &local->interfaces, list) | ||
37962 | - netif_tx_stop_queue(netdev_get_tx_queue(sdata->dev, queue)); | ||
37963 | - rcu_read_unlock(); | ||
37964 | } | ||
37965 | |||
37966 | void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue, | ||
37967 | @@ -591,7 +579,7 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, | ||
37968 | if (elen > left) | ||
37969 | break; | ||
37970 | |||
37971 | - if (calc_crc && id < 64 && (filter & (1ULL << id))) | ||
37972 | + if (calc_crc && id < 64 && (filter & BIT(id))) | ||
37973 | crc = crc32_be(crc, pos - 2, elen + 2); | ||
37974 | |||
37975 | switch (id) { | ||
37976 | @@ -1043,19 +1031,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) | ||
37977 | |||
37978 | /* restart hardware */ | ||
37979 | if (local->open_count) { | ||
37980 | - /* | ||
37981 | - * Upon resume hardware can sometimes be goofy due to | ||
37982 | - * various platform / driver / bus issues, so restarting | ||
37983 | - * the device may at times not work immediately. Propagate | ||
37984 | - * the error. | ||
37985 | - */ | ||
37986 | res = drv_start(local); | ||
37987 | - if (res) { | ||
37988 | - WARN(local->suspended, "Harware became unavailable " | ||
37989 | - "upon resume. This is could be a software issue" | ||
37990 | - "prior to suspend or a harware issue\n"); | ||
37991 | - return res; | ||
37992 | - } | ||
37993 | |||
37994 | ieee80211_led_radio(local, true); | ||
37995 | } | ||
37996 | diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c | ||
37997 | index 6d32ebf..b19b769 100644 | ||
37998 | --- a/net/mac80211/wme.c | ||
37999 | +++ b/net/mac80211/wme.c | ||
38000 | @@ -44,62 +44,22 @@ static int wme_downgrade_ac(struct sk_buff *skb) | ||
38001 | } | ||
38002 | |||
38003 | |||
38004 | -/* Indicate which queue to use. */ | ||
38005 | -u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, | ||
38006 | - struct sk_buff *skb) | ||
38007 | +/* Indicate which queue to use. */ | ||
38008 | +static u16 classify80211(struct ieee80211_local *local, struct sk_buff *skb) | ||
38009 | { | ||
38010 | - struct ieee80211_local *local = sdata->local; | ||
38011 | - struct sta_info *sta = NULL; | ||
38012 | - u32 sta_flags = 0; | ||
38013 | - const u8 *ra = NULL; | ||
38014 | - bool qos = false; | ||
38015 | + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | ||
38016 | |||
38017 | - if (local->hw.queues < 4 || skb->len < 6) { | ||
38018 | - skb->priority = 0; /* required for correct WPA/11i MIC */ | ||
38019 | - return min_t(u16, local->hw.queues - 1, | ||
38020 | - ieee802_1d_to_ac[skb->priority]); | ||
38021 | - } | ||
38022 | - | ||
38023 | - rcu_read_lock(); | ||
38024 | - switch (sdata->vif.type) { | ||
38025 | - case NL80211_IFTYPE_AP_VLAN: | ||
38026 | - case NL80211_IFTYPE_AP: | ||
38027 | - ra = skb->data; | ||
38028 | - break; | ||
38029 | - case NL80211_IFTYPE_WDS: | ||
38030 | - ra = sdata->u.wds.remote_addr; | ||
38031 | - break; | ||
38032 | -#ifdef CONFIG_MAC80211_MESH | ||
38033 | - case NL80211_IFTYPE_MESH_POINT: | ||
38034 | - /* | ||
38035 | - * XXX: This is clearly broken ... but already was before, | ||
38036 | - * because ieee80211_fill_mesh_addresses() would clear A1 | ||
38037 | - * except for multicast addresses. | ||
38038 | - */ | ||
38039 | - break; | ||
38040 | -#endif | ||
38041 | - case NL80211_IFTYPE_STATION: | ||
38042 | - ra = sdata->u.mgd.bssid; | ||
38043 | - break; | ||
38044 | - case NL80211_IFTYPE_ADHOC: | ||
38045 | - ra = skb->data; | ||
38046 | - break; | ||
38047 | - default: | ||
38048 | - break; | ||
38049 | + if (!ieee80211_is_data(hdr->frame_control)) { | ||
38050 | + /* management frames go on AC_VO queue, but are sent | ||
38051 | + * without QoS control fields */ | ||
38052 | + return 0; | ||
38053 | } | ||
38054 | |||
38055 | - if (!sta && ra && !is_multicast_ether_addr(ra)) { | ||
38056 | - sta = sta_info_get(local, ra); | ||
38057 | - if (sta) | ||
38058 | - sta_flags = get_sta_flags(sta); | ||
38059 | + if (0 /* injected */) { | ||
38060 | + /* use AC from radiotap */ | ||
38061 | } | ||
38062 | |||
38063 | - if (sta_flags & WLAN_STA_WME) | ||
38064 | - qos = true; | ||
38065 | - | ||
38066 | - rcu_read_unlock(); | ||
38067 | - | ||
38068 | - if (!qos) { | ||
38069 | + if (!ieee80211_is_data_qos(hdr->frame_control)) { | ||
38070 | skb->priority = 0; /* required for correct WPA/11i MIC */ | ||
38071 | return ieee802_1d_to_ac[skb->priority]; | ||
38072 | } | ||
38073 | @@ -108,12 +68,6 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, | ||
38074 | * data frame has */ | ||
38075 | skb->priority = cfg80211_classify8021d(skb); | ||
38076 | |||
38077 | - return ieee80211_downgrade_queue(local, skb); | ||
38078 | -} | ||
38079 | - | ||
38080 | -u16 ieee80211_downgrade_queue(struct ieee80211_local *local, | ||
38081 | - struct sk_buff *skb) | ||
38082 | -{ | ||
38083 | /* in case we are a client verify acm is not set for this ac */ | ||
38084 | while (unlikely(local->wmm_acm & BIT(skb->priority))) { | ||
38085 | if (wme_downgrade_ac(skb)) { | ||
38086 | @@ -131,17 +85,24 @@ u16 ieee80211_downgrade_queue(struct ieee80211_local *local, | ||
38087 | return ieee802_1d_to_ac[skb->priority]; | ||
38088 | } | ||
38089 | |||
38090 | -void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb) | ||
38091 | +void ieee80211_select_queue(struct ieee80211_local *local, struct sk_buff *skb) | ||
38092 | { | ||
38093 | - struct ieee80211_hdr *hdr = (void *)skb->data; | ||
38094 | - | ||
38095 | - /* Fill in the QoS header if there is one. */ | ||
38096 | + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | ||
38097 | + u16 queue; | ||
38098 | + u8 tid; | ||
38099 | + | ||
38100 | + queue = classify80211(local, skb); | ||
38101 | + if (unlikely(queue >= local->hw.queues)) | ||
38102 | + queue = local->hw.queues - 1; | ||
38103 | + | ||
38104 | + /* | ||
38105 | + * Now we know the 1d priority, fill in the QoS header if | ||
38106 | + * there is one (and we haven't done this before). | ||
38107 | + */ | ||
38108 | if (ieee80211_is_data_qos(hdr->frame_control)) { | ||
38109 | u8 *p = ieee80211_get_qos_ctl(hdr); | ||
38110 | - u8 ack_policy = 0, tid; | ||
38111 | - | ||
38112 | + u8 ack_policy = 0; | ||
38113 | tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; | ||
38114 | - | ||
38115 | if (unlikely(local->wifi_wme_noack_test)) | ||
38116 | ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK << | ||
38117 | QOS_CONTROL_ACK_POLICY_SHIFT; | ||
38118 | @@ -149,4 +110,6 @@ void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb) | ||
38119 | *p++ = ack_policy | tid; | ||
38120 | *p = 0; | ||
38121 | } | ||
38122 | + | ||
38123 | + skb_set_queue_mapping(skb, queue); | ||
38124 | } | ||
38125 | diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h | ||
38126 | index 6053b1c..d4fd87c 100644 | ||
38127 | --- a/net/mac80211/wme.h | ||
38128 | +++ b/net/mac80211/wme.h | ||
38129 | @@ -20,11 +20,7 @@ | ||
38130 | |||
38131 | extern const int ieee802_1d_to_ac[8]; | ||
38132 | |||
38133 | -u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, | ||
38134 | - struct sk_buff *skb); | ||
38135 | -void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb); | ||
38136 | -u16 ieee80211_downgrade_queue(struct ieee80211_local *local, | ||
38137 | - struct sk_buff *skb); | ||
38138 | - | ||
38139 | +void ieee80211_select_queue(struct ieee80211_local *local, | ||
38140 | + struct sk_buff *skb); | ||
38141 | |||
38142 | #endif /* _WME_H */ | ||
38143 | diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c | ||
38144 | index 02b2610..446e9bd 100644 | ||
38145 | --- a/net/netfilter/ipvs/ip_vs_ctl.c | ||
38146 | +++ b/net/netfilter/ipvs/ip_vs_ctl.c | ||
38147 | @@ -2714,8 +2714,6 @@ static int ip_vs_genl_parse_service(struct ip_vs_service_user_kern *usvc, | ||
38148 | if (!(nla_af && (nla_fwmark || (nla_port && nla_protocol && nla_addr)))) | ||
38149 | return -EINVAL; | ||
38150 | |||
38151 | - memset(usvc, 0, sizeof(*usvc)); | ||
38152 | - | ||
38153 | usvc->af = nla_get_u16(nla_af); | ||
38154 | #ifdef CONFIG_IP_VS_IPV6 | ||
38155 | if (usvc->af != AF_INET && usvc->af != AF_INET6) | ||
38156 | @@ -2903,8 +2901,6 @@ static int ip_vs_genl_parse_dest(struct ip_vs_dest_user_kern *udest, | ||
38157 | if (!(nla_addr && nla_port)) | ||
38158 | return -EINVAL; | ||
38159 | |||
38160 | - memset(udest, 0, sizeof(*udest)); | ||
38161 | - | ||
38162 | nla_memcpy(&udest->addr, nla_addr, sizeof(udest->addr)); | ||
38163 | udest->port = nla_get_u16(nla_port); | ||
38164 | |||
38165 | diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c | ||
38166 | index 1374179..b9168c1 100644 | ||
38167 | --- a/net/netfilter/nf_conntrack_core.c | ||
38168 | +++ b/net/netfilter/nf_conntrack_core.c | ||
38169 | @@ -30,7 +30,6 @@ | ||
38170 | #include <linux/netdevice.h> | ||
38171 | #include <linux/socket.h> | ||
38172 | #include <linux/mm.h> | ||
38173 | -#include <linux/nsproxy.h> | ||
38174 | #include <linux/rculist_nulls.h> | ||
38175 | |||
38176 | #include <net/netfilter/nf_conntrack.h> | ||
38177 | @@ -64,6 +63,8 @@ EXPORT_SYMBOL_GPL(nf_conntrack_max); | ||
38178 | struct nf_conn nf_conntrack_untracked __read_mostly; | ||
38179 | EXPORT_SYMBOL_GPL(nf_conntrack_untracked); | ||
38180 | |||
38181 | +static struct kmem_cache *nf_conntrack_cachep __read_mostly; | ||
38182 | + | ||
38183 | static int nf_conntrack_hash_rnd_initted; | ||
38184 | static unsigned int nf_conntrack_hash_rnd; | ||
38185 | |||
38186 | @@ -85,10 +86,9 @@ static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple, | ||
38187 | return ((u64)h * size) >> 32; | ||
38188 | } | ||
38189 | |||
38190 | -static inline u_int32_t hash_conntrack(const struct net *net, | ||
38191 | - const struct nf_conntrack_tuple *tuple) | ||
38192 | +static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple) | ||
38193 | { | ||
38194 | - return __hash_conntrack(tuple, net->ct.htable_size, | ||
38195 | + return __hash_conntrack(tuple, nf_conntrack_htable_size, | ||
38196 | nf_conntrack_hash_rnd); | ||
38197 | } | ||
38198 | |||
38199 | @@ -296,7 +296,7 @@ __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple) | ||
38200 | { | ||
38201 | struct nf_conntrack_tuple_hash *h; | ||
38202 | struct hlist_nulls_node *n; | ||
38203 | - unsigned int hash = hash_conntrack(net, tuple); | ||
38204 | + unsigned int hash = hash_conntrack(tuple); | ||
38205 | |||
38206 | /* Disable BHs the entire time since we normally need to disable them | ||
38207 | * at least once for the stats anyway. | ||
38208 | @@ -366,11 +366,10 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct, | ||
38209 | |||
38210 | void nf_conntrack_hash_insert(struct nf_conn *ct) | ||
38211 | { | ||
38212 | - struct net *net = nf_ct_net(ct); | ||
38213 | unsigned int hash, repl_hash; | ||
38214 | |||
38215 | - hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); | ||
38216 | - repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); | ||
38217 | + hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); | ||
38218 | + repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); | ||
38219 | |||
38220 | __nf_conntrack_hash_insert(ct, hash, repl_hash); | ||
38221 | } | ||
38222 | @@ -398,8 +397,8 @@ __nf_conntrack_confirm(struct sk_buff *skb) | ||
38223 | if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) | ||
38224 | return NF_ACCEPT; | ||
38225 | |||
38226 | - hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); | ||
38227 | - repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); | ||
38228 | + hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); | ||
38229 | + repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); | ||
38230 | |||
38231 | /* We're not in hash table, and we refuse to set up related | ||
38232 | connections for unconfirmed conns. But packet copies and | ||
38233 | @@ -469,7 +468,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, | ||
38234 | struct net *net = nf_ct_net(ignored_conntrack); | ||
38235 | struct nf_conntrack_tuple_hash *h; | ||
38236 | struct hlist_nulls_node *n; | ||
38237 | - unsigned int hash = hash_conntrack(net, tuple); | ||
38238 | + unsigned int hash = hash_conntrack(tuple); | ||
38239 | |||
38240 | /* Disable BHs the entire time since we need to disable them at | ||
38241 | * least once for the stats anyway. | ||
38242 | @@ -504,7 +503,7 @@ static noinline int early_drop(struct net *net, unsigned int hash) | ||
38243 | int dropped = 0; | ||
38244 | |||
38245 | rcu_read_lock(); | ||
38246 | - for (i = 0; i < net->ct.htable_size; i++) { | ||
38247 | + for (i = 0; i < nf_conntrack_htable_size; i++) { | ||
38248 | hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], | ||
38249 | hnnode) { | ||
38250 | tmp = nf_ct_tuplehash_to_ctrack(h); | ||
38251 | @@ -518,8 +517,7 @@ static noinline int early_drop(struct net *net, unsigned int hash) | ||
38252 | ct = NULL; | ||
38253 | if (ct || cnt >= NF_CT_EVICTION_RANGE) | ||
38254 | break; | ||
38255 | - | ||
38256 | - hash = (hash + 1) % net->ct.htable_size; | ||
38257 | + hash = (hash + 1) % nf_conntrack_htable_size; | ||
38258 | } | ||
38259 | rcu_read_unlock(); | ||
38260 | |||
38261 | @@ -553,7 +551,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net, | ||
38262 | |||
38263 | if (nf_conntrack_max && | ||
38264 | unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { | ||
38265 | - unsigned int hash = hash_conntrack(net, orig); | ||
38266 | + unsigned int hash = hash_conntrack(orig); | ||
38267 | if (!early_drop(net, hash)) { | ||
38268 | atomic_dec(&net->ct.count); | ||
38269 | if (net_ratelimit()) | ||
38270 | @@ -568,7 +566,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net, | ||
38271 | * Do not use kmem_cache_zalloc(), as this cache uses | ||
38272 | * SLAB_DESTROY_BY_RCU. | ||
38273 | */ | ||
38274 | - ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp); | ||
38275 | + ct = kmem_cache_alloc(nf_conntrack_cachep, gfp); | ||
38276 | if (ct == NULL) { | ||
38277 | pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n"); | ||
38278 | atomic_dec(&net->ct.count); | ||
38279 | @@ -607,7 +605,7 @@ void nf_conntrack_free(struct nf_conn *ct) | ||
38280 | nf_ct_ext_destroy(ct); | ||
38281 | atomic_dec(&net->ct.count); | ||
38282 | nf_ct_ext_free(ct); | ||
38283 | - kmem_cache_free(net->ct.nf_conntrack_cachep, ct); | ||
38284 | + kmem_cache_free(nf_conntrack_cachep, ct); | ||
38285 | } | ||
38286 | EXPORT_SYMBOL_GPL(nf_conntrack_free); | ||
38287 | |||
38288 | @@ -1010,7 +1008,7 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data), | ||
38289 | struct hlist_nulls_node *n; | ||
38290 | |||
38291 | spin_lock_bh(&nf_conntrack_lock); | ||
38292 | - for (; *bucket < net->ct.htable_size; (*bucket)++) { | ||
38293 | + for (; *bucket < nf_conntrack_htable_size; (*bucket)++) { | ||
38294 | hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) { | ||
38295 | ct = nf_ct_tuplehash_to_ctrack(h); | ||
38296 | if (iter(ct, data)) | ||
38297 | @@ -1109,12 +1107,9 @@ static void nf_ct_release_dying_list(struct net *net) | ||
38298 | |||
38299 | static void nf_conntrack_cleanup_init_net(void) | ||
38300 | { | ||
38301 | - /* wait until all references to nf_conntrack_untracked are dropped */ | ||
38302 | - while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1) | ||
38303 | - schedule(); | ||
38304 | - | ||
38305 | nf_conntrack_helper_fini(); | ||
38306 | nf_conntrack_proto_fini(); | ||
38307 | + kmem_cache_destroy(nf_conntrack_cachep); | ||
38308 | } | ||
38309 | |||
38310 | static void nf_conntrack_cleanup_net(struct net *net) | ||
38311 | @@ -1126,14 +1121,15 @@ static void nf_conntrack_cleanup_net(struct net *net) | ||
38312 | schedule(); | ||
38313 | goto i_see_dead_people; | ||
38314 | } | ||
38315 | + /* wait until all references to nf_conntrack_untracked are dropped */ | ||
38316 | + while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1) | ||
38317 | + schedule(); | ||
38318 | |||
38319 | nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, | ||
38320 | - net->ct.htable_size); | ||
38321 | + nf_conntrack_htable_size); | ||
38322 | nf_conntrack_ecache_fini(net); | ||
38323 | nf_conntrack_acct_fini(net); | ||
38324 | nf_conntrack_expect_fini(net); | ||
38325 | - kmem_cache_destroy(net->ct.nf_conntrack_cachep); | ||
38326 | - kfree(net->ct.slabname); | ||
38327 | free_percpu(net->ct.stat); | ||
38328 | } | ||
38329 | |||
38330 | @@ -1188,12 +1184,10 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) | ||
38331 | { | ||
38332 | int i, bucket, vmalloced, old_vmalloced; | ||
38333 | unsigned int hashsize, old_size; | ||
38334 | + int rnd; | ||
38335 | struct hlist_nulls_head *hash, *old_hash; | ||
38336 | struct nf_conntrack_tuple_hash *h; | ||
38337 | |||
38338 | - if (current->nsproxy->net_ns != &init_net) | ||
38339 | - return -EOPNOTSUPP; | ||
38340 | - | ||
38341 | /* On boot, we can set this without any fancy locking. */ | ||
38342 | if (!nf_conntrack_htable_size) | ||
38343 | return param_set_uint(val, kp); | ||
38344 | @@ -1206,29 +1200,33 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) | ||
38345 | if (!hash) | ||
38346 | return -ENOMEM; | ||
38347 | |||
38348 | + /* We have to rehahs for the new table anyway, so we also can | ||
38349 | + * use a newrandom seed */ | ||
38350 | + get_random_bytes(&rnd, sizeof(rnd)); | ||
38351 | + | ||
38352 | /* Lookups in the old hash might happen in parallel, which means we | ||
38353 | * might get false negatives during connection lookup. New connections | ||
38354 | * created because of a false negative won't make it into the hash | ||
38355 | * though since that required taking the lock. | ||
38356 | */ | ||
38357 | spin_lock_bh(&nf_conntrack_lock); | ||
38358 | - for (i = 0; i < init_net.ct.htable_size; i++) { | ||
38359 | + for (i = 0; i < nf_conntrack_htable_size; i++) { | ||
38360 | while (!hlist_nulls_empty(&init_net.ct.hash[i])) { | ||
38361 | h = hlist_nulls_entry(init_net.ct.hash[i].first, | ||
38362 | struct nf_conntrack_tuple_hash, hnnode); | ||
38363 | hlist_nulls_del_rcu(&h->hnnode); | ||
38364 | - bucket = __hash_conntrack(&h->tuple, hashsize, | ||
38365 | - nf_conntrack_hash_rnd); | ||
38366 | + bucket = __hash_conntrack(&h->tuple, hashsize, rnd); | ||
38367 | hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); | ||
38368 | } | ||
38369 | } | ||
38370 | - old_size = init_net.ct.htable_size; | ||
38371 | + old_size = nf_conntrack_htable_size; | ||
38372 | old_vmalloced = init_net.ct.hash_vmalloc; | ||
38373 | old_hash = init_net.ct.hash; | ||
38374 | |||
38375 | - init_net.ct.htable_size = nf_conntrack_htable_size = hashsize; | ||
38376 | + nf_conntrack_htable_size = hashsize; | ||
38377 | init_net.ct.hash_vmalloc = vmalloced; | ||
38378 | init_net.ct.hash = hash; | ||
38379 | + nf_conntrack_hash_rnd = rnd; | ||
38380 | spin_unlock_bh(&nf_conntrack_lock); | ||
38381 | |||
38382 | nf_ct_free_hashtable(old_hash, old_vmalloced, old_size); | ||
38383 | @@ -1267,6 +1265,15 @@ static int nf_conntrack_init_init_net(void) | ||
38384 | NF_CONNTRACK_VERSION, nf_conntrack_htable_size, | ||
38385 | nf_conntrack_max); | ||
38386 | |||
38387 | + nf_conntrack_cachep = kmem_cache_create("nf_conntrack", | ||
38388 | + sizeof(struct nf_conn), | ||
38389 | + 0, SLAB_DESTROY_BY_RCU, NULL); | ||
38390 | + if (!nf_conntrack_cachep) { | ||
38391 | + printk(KERN_ERR "Unable to create nf_conn slab cache\n"); | ||
38392 | + ret = -ENOMEM; | ||
38393 | + goto err_cache; | ||
38394 | + } | ||
38395 | + | ||
38396 | ret = nf_conntrack_proto_init(); | ||
38397 | if (ret < 0) | ||
38398 | goto err_proto; | ||
38399 | @@ -1275,19 +1282,13 @@ static int nf_conntrack_init_init_net(void) | ||
38400 | if (ret < 0) | ||
38401 | goto err_helper; | ||
38402 | |||
38403 | - /* Set up fake conntrack: to never be deleted, not in any hashes */ | ||
38404 | -#ifdef CONFIG_NET_NS | ||
38405 | - nf_conntrack_untracked.ct_net = &init_net; | ||
38406 | -#endif | ||
38407 | - atomic_set(&nf_conntrack_untracked.ct_general.use, 1); | ||
38408 | - /* - and look it like as a confirmed connection */ | ||
38409 | - set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status); | ||
38410 | - | ||
38411 | return 0; | ||
38412 | |||
38413 | err_helper: | ||
38414 | nf_conntrack_proto_fini(); | ||
38415 | err_proto: | ||
38416 | + kmem_cache_destroy(nf_conntrack_cachep); | ||
38417 | +err_cache: | ||
38418 | return ret; | ||
38419 | } | ||
38420 | |||
38421 | @@ -1309,24 +1310,7 @@ static int nf_conntrack_init_net(struct net *net) | ||
38422 | ret = -ENOMEM; | ||
38423 | goto err_stat; | ||
38424 | } | ||
38425 | - | ||
38426 | - net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net); | ||
38427 | - if (!net->ct.slabname) { | ||
38428 | - ret = -ENOMEM; | ||
38429 | - goto err_slabname; | ||
38430 | - } | ||
38431 | - | ||
38432 | - net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname, | ||
38433 | - sizeof(struct nf_conn), 0, | ||
38434 | - SLAB_DESTROY_BY_RCU, NULL); | ||
38435 | - if (!net->ct.nf_conntrack_cachep) { | ||
38436 | - printk(KERN_ERR "Unable to create nf_conn slab cache\n"); | ||
38437 | - ret = -ENOMEM; | ||
38438 | - goto err_cache; | ||
38439 | - } | ||
38440 | - | ||
38441 | - net->ct.htable_size = nf_conntrack_htable_size; | ||
38442 | - net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, | ||
38443 | + net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, | ||
38444 | &net->ct.hash_vmalloc, 1); | ||
38445 | if (!net->ct.hash) { | ||
38446 | ret = -ENOMEM; | ||
38447 | @@ -1343,6 +1327,15 @@ static int nf_conntrack_init_net(struct net *net) | ||
38448 | if (ret < 0) | ||
38449 | goto err_ecache; | ||
38450 | |||
38451 | + /* Set up fake conntrack: | ||
38452 | + - to never be deleted, not in any hashes */ | ||
38453 | +#ifdef CONFIG_NET_NS | ||
38454 | + nf_conntrack_untracked.ct_net = &init_net; | ||
38455 | +#endif | ||
38456 | + atomic_set(&nf_conntrack_untracked.ct_general.use, 1); | ||
38457 | + /* - and look it like as a confirmed connection */ | ||
38458 | + set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status); | ||
38459 | + | ||
38460 | return 0; | ||
38461 | |||
38462 | err_ecache: | ||
38463 | @@ -1351,12 +1344,8 @@ err_acct: | ||
38464 | nf_conntrack_expect_fini(net); | ||
38465 | err_expect: | ||
38466 | nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, | ||
38467 | - net->ct.htable_size); | ||
38468 | + nf_conntrack_htable_size); | ||
38469 | err_hash: | ||
38470 | - kmem_cache_destroy(net->ct.nf_conntrack_cachep); | ||
38471 | -err_cache: | ||
38472 | - kfree(net->ct.slabname); | ||
38473 | -err_slabname: | ||
38474 | free_percpu(net->ct.stat); | ||
38475 | err_stat: | ||
38476 | return ret; | ||
38477 | diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c | ||
38478 | index e73eb04..2032dfe 100644 | ||
38479 | --- a/net/netfilter/nf_conntrack_expect.c | ||
38480 | +++ b/net/netfilter/nf_conntrack_expect.c | ||
38481 | @@ -569,7 +569,7 @@ static void exp_proc_remove(struct net *net) | ||
38482 | #endif /* CONFIG_PROC_FS */ | ||
38483 | } | ||
38484 | |||
38485 | -module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400); | ||
38486 | +module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0600); | ||
38487 | |||
38488 | int nf_conntrack_expect_init(struct net *net) | ||
38489 | { | ||
38490 | @@ -577,7 +577,7 @@ int nf_conntrack_expect_init(struct net *net) | ||
38491 | |||
38492 | if (net_eq(net, &init_net)) { | ||
38493 | if (!nf_ct_expect_hsize) { | ||
38494 | - nf_ct_expect_hsize = net->ct.htable_size / 256; | ||
38495 | + nf_ct_expect_hsize = nf_conntrack_htable_size / 256; | ||
38496 | if (!nf_ct_expect_hsize) | ||
38497 | nf_ct_expect_hsize = 1; | ||
38498 | } | ||
38499 | diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c | ||
38500 | index 7dfd469..5509dd1 100644 | ||
38501 | --- a/net/netfilter/nf_conntrack_ftp.c | ||
38502 | +++ b/net/netfilter/nf_conntrack_ftp.c | ||
38503 | @@ -323,24 +323,24 @@ static void update_nl_seq(struct nf_conn *ct, u32 nl_seq, | ||
38504 | struct nf_ct_ftp_master *info, int dir, | ||
38505 | struct sk_buff *skb) | ||
38506 | { | ||
38507 | - unsigned int i, oldest; | ||
38508 | + unsigned int i, oldest = NUM_SEQ_TO_REMEMBER; | ||
38509 | |||
38510 | /* Look for oldest: if we find exact match, we're done. */ | ||
38511 | for (i = 0; i < info->seq_aft_nl_num[dir]; i++) { | ||
38512 | if (info->seq_aft_nl[dir][i] == nl_seq) | ||
38513 | return; | ||
38514 | + | ||
38515 | + if (oldest == info->seq_aft_nl_num[dir] || | ||
38516 | + before(info->seq_aft_nl[dir][i], | ||
38517 | + info->seq_aft_nl[dir][oldest])) | ||
38518 | + oldest = i; | ||
38519 | } | ||
38520 | |||
38521 | if (info->seq_aft_nl_num[dir] < NUM_SEQ_TO_REMEMBER) { | ||
38522 | info->seq_aft_nl[dir][info->seq_aft_nl_num[dir]++] = nl_seq; | ||
38523 | - } else { | ||
38524 | - if (before(info->seq_aft_nl[dir][0], info->seq_aft_nl[dir][1])) | ||
38525 | - oldest = 0; | ||
38526 | - else | ||
38527 | - oldest = 1; | ||
38528 | - | ||
38529 | - if (after(nl_seq, info->seq_aft_nl[dir][oldest])) | ||
38530 | - info->seq_aft_nl[dir][oldest] = nl_seq; | ||
38531 | + } else if (oldest != NUM_SEQ_TO_REMEMBER && | ||
38532 | + after(nl_seq, info->seq_aft_nl[dir][oldest])) { | ||
38533 | + info->seq_aft_nl[dir][oldest] = nl_seq; | ||
38534 | } | ||
38535 | } | ||
38536 | |||
38537 | diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c | ||
38538 | index 4b1a56b..65c2a7b 100644 | ||
38539 | --- a/net/netfilter/nf_conntrack_helper.c | ||
38540 | +++ b/net/netfilter/nf_conntrack_helper.c | ||
38541 | @@ -192,7 +192,7 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me, | ||
38542 | /* Get rid of expecteds, set helpers to NULL. */ | ||
38543 | hlist_nulls_for_each_entry(h, nn, &net->ct.unconfirmed, hnnode) | ||
38544 | unhelp(h, me); | ||
38545 | - for (i = 0; i < net->ct.htable_size; i++) { | ||
38546 | + for (i = 0; i < nf_conntrack_htable_size; i++) { | ||
38547 | hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode) | ||
38548 | unhelp(h, me); | ||
38549 | } | ||
38550 | diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c | ||
38551 | index d521718..59d8064 100644 | ||
38552 | --- a/net/netfilter/nf_conntrack_netlink.c | ||
38553 | +++ b/net/netfilter/nf_conntrack_netlink.c | ||
38554 | @@ -594,7 +594,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) | ||
38555 | |||
38556 | rcu_read_lock(); | ||
38557 | last = (struct nf_conn *)cb->args[1]; | ||
38558 | - for (; cb->args[0] < init_net.ct.htable_size; cb->args[0]++) { | ||
38559 | + for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) { | ||
38560 | restart: | ||
38561 | hlist_nulls_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]], | ||
38562 | hnnode) { | ||
38563 | diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c | ||
38564 | index 1a84bf6..1935153 100644 | ||
38565 | --- a/net/netfilter/nf_conntrack_standalone.c | ||
38566 | +++ b/net/netfilter/nf_conntrack_standalone.c | ||
38567 | @@ -51,7 +51,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq) | ||
38568 | struct hlist_nulls_node *n; | ||
38569 | |||
38570 | for (st->bucket = 0; | ||
38571 | - st->bucket < net->ct.htable_size; | ||
38572 | + st->bucket < nf_conntrack_htable_size; | ||
38573 | st->bucket++) { | ||
38574 | n = rcu_dereference(net->ct.hash[st->bucket].first); | ||
38575 | if (!is_a_nulls(n)) | ||
38576 | @@ -69,7 +69,7 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq, | ||
38577 | head = rcu_dereference(head->next); | ||
38578 | while (is_a_nulls(head)) { | ||
38579 | if (likely(get_nulls_value(head) == st->bucket)) { | ||
38580 | - if (++st->bucket >= net->ct.htable_size) | ||
38581 | + if (++st->bucket >= nf_conntrack_htable_size) | ||
38582 | return NULL; | ||
38583 | } | ||
38584 | head = rcu_dereference(net->ct.hash[st->bucket].first); | ||
38585 | @@ -358,7 +358,7 @@ static ctl_table nf_ct_sysctl_table[] = { | ||
38586 | { | ||
38587 | .ctl_name = NET_NF_CONNTRACK_BUCKETS, | ||
38588 | .procname = "nf_conntrack_buckets", | ||
38589 | - .data = &init_net.ct.htable_size, | ||
38590 | + .data = &nf_conntrack_htable_size, | ||
38591 | .maxlen = sizeof(unsigned int), | ||
38592 | .mode = 0444, | ||
38593 | .proc_handler = proc_dointvec, | ||
38594 | @@ -429,7 +429,6 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net) | ||
38595 | goto out_kmemdup; | ||
38596 | |||
38597 | table[1].data = &net->ct.count; | ||
38598 | - table[2].data = &net->ct.htable_size; | ||
38599 | table[3].data = &net->ct.sysctl_checksum; | ||
38600 | table[4].data = &net->ct.sysctl_log_invalid; | ||
38601 | |||
38602 | diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c | ||
38603 | index ae66305..6dc4652 100644 | ||
38604 | --- a/net/netfilter/xt_conntrack.c | ||
38605 | +++ b/net/netfilter/xt_conntrack.c | ||
38606 | @@ -113,8 +113,7 @@ ct_proto_port_check(const struct xt_conntrack_mtinfo2 *info, | ||
38607 | } | ||
38608 | |||
38609 | static bool | ||
38610 | -conntrack_mt(const struct sk_buff *skb, const struct xt_match_param *par, | ||
38611 | - u16 state_mask, u16 status_mask) | ||
38612 | +conntrack_mt(const struct sk_buff *skb, const struct xt_match_param *par) | ||
38613 | { | ||
38614 | const struct xt_conntrack_mtinfo2 *info = par->matchinfo; | ||
38615 | enum ip_conntrack_info ctinfo; | ||
38616 | @@ -137,7 +136,7 @@ conntrack_mt(const struct sk_buff *skb, const struct xt_match_param *par, | ||
38617 | if (test_bit(IPS_DST_NAT_BIT, &ct->status)) | ||
38618 | statebit |= XT_CONNTRACK_STATE_DNAT; | ||
38619 | } | ||
38620 | - if (!!(state_mask & statebit) ^ | ||
38621 | + if (!!(info->state_mask & statebit) ^ | ||
38622 | !(info->invert_flags & XT_CONNTRACK_STATE)) | ||
38623 | return false; | ||
38624 | } | ||
38625 | @@ -173,7 +172,7 @@ conntrack_mt(const struct sk_buff *skb, const struct xt_match_param *par, | ||
38626 | return false; | ||
38627 | |||
38628 | if ((info->match_flags & XT_CONNTRACK_STATUS) && | ||
38629 | - (!!(status_mask & ct->status) ^ | ||
38630 | + (!!(info->status_mask & ct->status) ^ | ||
38631 | !(info->invert_flags & XT_CONNTRACK_STATUS))) | ||
38632 | return false; | ||
38633 | |||
38634 | @@ -193,17 +192,11 @@ conntrack_mt(const struct sk_buff *skb, const struct xt_match_param *par, | ||
38635 | static bool | ||
38636 | conntrack_mt_v1(const struct sk_buff *skb, const struct xt_match_param *par) | ||
38637 | { | ||
38638 | - const struct xt_conntrack_mtinfo1 *info = par->matchinfo; | ||
38639 | + const struct xt_conntrack_mtinfo2 *const *info = par->matchinfo; | ||
38640 | + struct xt_match_param newpar = *par; | ||
38641 | |||
38642 | - return conntrack_mt(skb, par, info->state_mask, info->status_mask); | ||
38643 | -} | ||
38644 | - | ||
38645 | -static bool | ||
38646 | -conntrack_mt_v2(const struct sk_buff *skb, const struct xt_match_param *par) | ||
38647 | -{ | ||
38648 | - const struct xt_conntrack_mtinfo2 *info = par->matchinfo; | ||
38649 | - | ||
38650 | - return conntrack_mt(skb, par, info->state_mask, info->status_mask); | ||
38651 | + newpar.matchinfo = *info; | ||
38652 | + return conntrack_mt(skb, &newpar); | ||
38653 | } | ||
38654 | |||
38655 | static bool conntrack_mt_check(const struct xt_mtchk_param *par) | ||
38656 | @@ -216,11 +209,45 @@ static bool conntrack_mt_check(const struct xt_mtchk_param *par) | ||
38657 | return true; | ||
38658 | } | ||
38659 | |||
38660 | +static bool conntrack_mt_check_v1(const struct xt_mtchk_param *par) | ||
38661 | +{ | ||
38662 | + struct xt_conntrack_mtinfo1 *info = par->matchinfo; | ||
38663 | + struct xt_conntrack_mtinfo2 *up; | ||
38664 | + int ret = conntrack_mt_check(par); | ||
38665 | + | ||
38666 | + if (ret < 0) | ||
38667 | + return ret; | ||
38668 | + | ||
38669 | + up = kmalloc(sizeof(*up), GFP_KERNEL); | ||
38670 | + if (up == NULL) { | ||
38671 | + nf_ct_l3proto_module_put(par->family); | ||
38672 | + return -ENOMEM; | ||
38673 | + } | ||
38674 | + | ||
38675 | + /* | ||
38676 | + * The strategy here is to minimize the overhead of v1 matching, | ||
38677 | + * by prebuilding a v2 struct and putting the pointer into the | ||
38678 | + * v1 dataspace. | ||
38679 | + */ | ||
38680 | + memcpy(up, info, offsetof(typeof(*info), state_mask)); | ||
38681 | + up->state_mask = info->state_mask; | ||
38682 | + up->status_mask = info->status_mask; | ||
38683 | + *(void **)info = up; | ||
38684 | + return true; | ||
38685 | +} | ||
38686 | + | ||
38687 | static void conntrack_mt_destroy(const struct xt_mtdtor_param *par) | ||
38688 | { | ||
38689 | nf_ct_l3proto_module_put(par->family); | ||
38690 | } | ||
38691 | |||
38692 | +static void conntrack_mt_destroy_v1(const struct xt_mtdtor_param *par) | ||
38693 | +{ | ||
38694 | + struct xt_conntrack_mtinfo2 **info = par->matchinfo; | ||
38695 | + kfree(*info); | ||
38696 | + conntrack_mt_destroy(par); | ||
38697 | +} | ||
38698 | + | ||
38699 | static struct xt_match conntrack_mt_reg[] __read_mostly = { | ||
38700 | { | ||
38701 | .name = "conntrack", | ||
38702 | @@ -228,8 +255,8 @@ static struct xt_match conntrack_mt_reg[] __read_mostly = { | ||
38703 | .family = NFPROTO_UNSPEC, | ||
38704 | .matchsize = sizeof(struct xt_conntrack_mtinfo1), | ||
38705 | .match = conntrack_mt_v1, | ||
38706 | - .checkentry = conntrack_mt_check, | ||
38707 | - .destroy = conntrack_mt_destroy, | ||
38708 | + .checkentry = conntrack_mt_check_v1, | ||
38709 | + .destroy = conntrack_mt_destroy_v1, | ||
38710 | .me = THIS_MODULE, | ||
38711 | }, | ||
38712 | { | ||
38713 | @@ -237,7 +264,7 @@ static struct xt_match conntrack_mt_reg[] __read_mostly = { | ||
38714 | .revision = 2, | ||
38715 | .family = NFPROTO_UNSPEC, | ||
38716 | .matchsize = sizeof(struct xt_conntrack_mtinfo2), | ||
38717 | - .match = conntrack_mt_v2, | ||
38718 | + .match = conntrack_mt, | ||
38719 | .checkentry = conntrack_mt_check, | ||
38720 | .destroy = conntrack_mt_destroy, | ||
38721 | .me = THIS_MODULE, | ||
38722 | diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c | ||
38723 | index 850ffc0..4eb1ac9 100644 | ||
38724 | --- a/net/netrom/nr_route.c | ||
38725 | +++ b/net/netrom/nr_route.c | ||
38726 | @@ -842,13 +842,12 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25) | ||
38727 | dptr = skb_push(skb, 1); | ||
38728 | *dptr = AX25_P_NETROM; | ||
38729 | |||
38730 | - ax25s = nr_neigh->ax25; | ||
38731 | - nr_neigh->ax25 = ax25_send_frame(skb, 256, | ||
38732 | - (ax25_address *)dev->dev_addr, | ||
38733 | - &nr_neigh->callsign, | ||
38734 | - nr_neigh->digipeat, nr_neigh->dev); | ||
38735 | - if (ax25s) | ||
38736 | + ax25s = ax25_send_frame(skb, 256, (ax25_address *)dev->dev_addr, &nr_neigh->callsign, nr_neigh->digipeat, nr_neigh->dev); | ||
38737 | + if (nr_neigh->ax25 && ax25s) { | ||
38738 | + /* We were already holding this ax25_cb */ | ||
38739 | ax25_cb_put(ax25s); | ||
38740 | + } | ||
38741 | + nr_neigh->ax25 = ax25s; | ||
38742 | |||
38743 | dev_put(dev); | ||
38744 | ret = (nr_neigh->ax25 != NULL); | ||
38745 | diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c | ||
38746 | index 41866eb..f2d116a 100644 | ||
38747 | --- a/net/packet/af_packet.c | ||
38748 | +++ b/net/packet/af_packet.c | ||
38749 | @@ -1028,20 +1028,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) | ||
38750 | |||
38751 | status = TP_STATUS_SEND_REQUEST; | ||
38752 | err = dev_queue_xmit(skb); | ||
38753 | - if (unlikely(err > 0)) { | ||
38754 | - err = net_xmit_errno(err); | ||
38755 | - if (err && __packet_get_status(po, ph) == | ||
38756 | - TP_STATUS_AVAILABLE) { | ||
38757 | - /* skb was destructed already */ | ||
38758 | - skb = NULL; | ||
38759 | - goto out_status; | ||
38760 | - } | ||
38761 | - /* | ||
38762 | - * skb was dropped but not destructed yet; | ||
38763 | - * let's treat it like congestion or err < 0 | ||
38764 | - */ | ||
38765 | - err = 0; | ||
38766 | - } | ||
38767 | + if (unlikely(err > 0 && (err = net_xmit_errno(err)) != 0)) | ||
38768 | + goto out_xmit; | ||
38769 | packet_increment_head(&po->tx_ring); | ||
38770 | len_sum += tp_len; | ||
38771 | } while (likely((ph != NULL) || ((!(msg->msg_flags & MSG_DONTWAIT)) | ||
38772 | @@ -1051,6 +1039,9 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) | ||
38773 | err = len_sum; | ||
38774 | goto out_put; | ||
38775 | |||
38776 | +out_xmit: | ||
38777 | + skb->destructor = sock_wfree; | ||
38778 | + atomic_dec(&po->tx_ring.pending); | ||
38779 | out_status: | ||
38780 | __packet_set_status(po, ph, status); | ||
38781 | kfree_skb(skb); | ||
38782 | diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c | ||
38783 | index 5ef5f69..bd86a63 100644 | ||
38784 | --- a/net/rose/rose_link.c | ||
38785 | +++ b/net/rose/rose_link.c | ||
38786 | @@ -101,17 +101,13 @@ static void rose_t0timer_expiry(unsigned long param) | ||
38787 | static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh) | ||
38788 | { | ||
38789 | ax25_address *rose_call; | ||
38790 | - ax25_cb *ax25s; | ||
38791 | |||
38792 | if (ax25cmp(&rose_callsign, &null_ax25_address) == 0) | ||
38793 | rose_call = (ax25_address *)neigh->dev->dev_addr; | ||
38794 | else | ||
38795 | rose_call = &rose_callsign; | ||
38796 | |||
38797 | - ax25s = neigh->ax25; | ||
38798 | neigh->ax25 = ax25_send_frame(skb, 260, rose_call, &neigh->callsign, neigh->digipeat, neigh->dev); | ||
38799 | - if (ax25s) | ||
38800 | - ax25_cb_put(ax25s); | ||
38801 | |||
38802 | return (neigh->ax25 != NULL); | ||
38803 | } | ||
38804 | @@ -124,17 +120,13 @@ static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh) | ||
38805 | static int rose_link_up(struct rose_neigh *neigh) | ||
38806 | { | ||
38807 | ax25_address *rose_call; | ||
38808 | - ax25_cb *ax25s; | ||
38809 | |||
38810 | if (ax25cmp(&rose_callsign, &null_ax25_address) == 0) | ||
38811 | rose_call = (ax25_address *)neigh->dev->dev_addr; | ||
38812 | else | ||
38813 | rose_call = &rose_callsign; | ||
38814 | |||
38815 | - ax25s = neigh->ax25; | ||
38816 | neigh->ax25 = ax25_find_cb(rose_call, &neigh->callsign, neigh->digipeat, neigh->dev); | ||
38817 | - if (ax25s) | ||
38818 | - ax25_cb_put(ax25s); | ||
38819 | |||
38820 | return (neigh->ax25 != NULL); | ||
38821 | } | ||
38822 | diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c | ||
38823 | index 08230fa..f3e2198 100644 | ||
38824 | --- a/net/rose/rose_route.c | ||
38825 | +++ b/net/rose/rose_route.c | ||
38826 | @@ -234,8 +234,6 @@ static void rose_remove_neigh(struct rose_neigh *rose_neigh) | ||
38827 | |||
38828 | if ((s = rose_neigh_list) == rose_neigh) { | ||
38829 | rose_neigh_list = rose_neigh->next; | ||
38830 | - if (rose_neigh->ax25) | ||
38831 | - ax25_cb_put(rose_neigh->ax25); | ||
38832 | kfree(rose_neigh->digipeat); | ||
38833 | kfree(rose_neigh); | ||
38834 | return; | ||
38835 | @@ -244,8 +242,6 @@ static void rose_remove_neigh(struct rose_neigh *rose_neigh) | ||
38836 | while (s != NULL && s->next != NULL) { | ||
38837 | if (s->next == rose_neigh) { | ||
38838 | s->next = rose_neigh->next; | ||
38839 | - if (rose_neigh->ax25) | ||
38840 | - ax25_cb_put(rose_neigh->ax25); | ||
38841 | kfree(rose_neigh->digipeat); | ||
38842 | kfree(rose_neigh); | ||
38843 | return; | ||
38844 | @@ -814,7 +810,6 @@ void rose_link_failed(ax25_cb *ax25, int reason) | ||
38845 | |||
38846 | if (rose_neigh != NULL) { | ||
38847 | rose_neigh->ax25 = NULL; | ||
38848 | - ax25_cb_put(ax25); | ||
38849 | |||
38850 | rose_del_route_by_neigh(rose_neigh); | ||
38851 | rose_kill_by_neigh(rose_neigh); | ||
38852 | diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c | ||
38853 | index 9c5a19d..fc6a43c 100644 | ||
38854 | --- a/net/sunrpc/auth_gss/auth_gss.c | ||
38855 | +++ b/net/sunrpc/auth_gss/auth_gss.c | ||
38856 | @@ -485,7 +485,7 @@ gss_refresh_upcall(struct rpc_task *task) | ||
38857 | dprintk("RPC: %5u gss_refresh_upcall for uid %u\n", task->tk_pid, | ||
38858 | cred->cr_uid); | ||
38859 | gss_msg = gss_setup_upcall(task->tk_client, gss_auth, cred); | ||
38860 | - if (PTR_ERR(gss_msg) == -EAGAIN) { | ||
38861 | + if (IS_ERR(gss_msg) == -EAGAIN) { | ||
38862 | /* XXX: warning on the first, under the assumption we | ||
38863 | * shouldn't normally hit this case on a refresh. */ | ||
38864 | warn_gssd(); | ||
38865 | @@ -644,22 +644,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) | ||
38866 | p = gss_fill_context(p, end, ctx, gss_msg->auth->mech); | ||
38867 | if (IS_ERR(p)) { | ||
38868 | err = PTR_ERR(p); | ||
38869 | - switch (err) { | ||
38870 | - case -EACCES: | ||
38871 | - gss_msg->msg.errno = err; | ||
38872 | - err = mlen; | ||
38873 | - break; | ||
38874 | - case -EFAULT: | ||
38875 | - case -ENOMEM: | ||
38876 | - case -EINVAL: | ||
38877 | - case -ENOSYS: | ||
38878 | - gss_msg->msg.errno = -EAGAIN; | ||
38879 | - break; | ||
38880 | - default: | ||
38881 | - printk(KERN_CRIT "%s: bad return from " | ||
38882 | - "gss_fill_context: %ld\n", __func__, err); | ||
38883 | - BUG(); | ||
38884 | - } | ||
38885 | + gss_msg->msg.errno = (err == -EAGAIN) ? -EAGAIN : -EACCES; | ||
38886 | goto err_release_msg; | ||
38887 | } | ||
38888 | gss_msg->ctx = gss_get_ctx(ctx); | ||
38889 | diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c | ||
38890 | index 2deb0ed..ef45eba 100644 | ||
38891 | --- a/net/sunrpc/auth_gss/gss_krb5_mech.c | ||
38892 | +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c | ||
38893 | @@ -131,10 +131,8 @@ gss_import_sec_context_kerberos(const void *p, | ||
38894 | struct krb5_ctx *ctx; | ||
38895 | int tmp; | ||
38896 | |||
38897 | - if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS))) { | ||
38898 | - p = ERR_PTR(-ENOMEM); | ||
38899 | + if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS))) | ||
38900 | goto out_err; | ||
38901 | - } | ||
38902 | |||
38903 | p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate)); | ||
38904 | if (IS_ERR(p)) | ||
38905 | diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c | ||
38906 | index 76e4c6f..6efbb0c 100644 | ||
38907 | --- a/net/sunrpc/auth_gss/gss_mech_switch.c | ||
38908 | +++ b/net/sunrpc/auth_gss/gss_mech_switch.c | ||
38909 | @@ -252,7 +252,7 @@ gss_import_sec_context(const void *input_token, size_t bufsize, | ||
38910 | struct gss_ctx **ctx_id) | ||
38911 | { | ||
38912 | if (!(*ctx_id = kzalloc(sizeof(**ctx_id), GFP_KERNEL))) | ||
38913 | - return -ENOMEM; | ||
38914 | + return GSS_S_FAILURE; | ||
38915 | (*ctx_id)->mech_type = gss_mech_get(mech); | ||
38916 | |||
38917 | return mech->gm_ops | ||
38918 | diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c | ||
38919 | index 0266cca..df124f7 100644 | ||
38920 | --- a/net/sunrpc/svc_xprt.c | ||
38921 | +++ b/net/sunrpc/svc_xprt.c | ||
38922 | @@ -711,8 +711,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | ||
38923 | spin_unlock_bh(&pool->sp_lock); | ||
38924 | |||
38925 | len = 0; | ||
38926 | - if (test_bit(XPT_LISTENER, &xprt->xpt_flags) && | ||
38927 | - !test_bit(XPT_CLOSE, &xprt->xpt_flags)) { | ||
38928 | + if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { | ||
38929 | struct svc_xprt *newxpt; | ||
38930 | newxpt = xprt->xpt_ops->xpo_accept(xprt); | ||
38931 | if (newxpt) { | ||
38932 | diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c | ||
38933 | index 0d86248..0a6b7a0 100644 | ||
38934 | --- a/net/wireless/mlme.c | ||
38935 | +++ b/net/wireless/mlme.c | ||
38936 | @@ -94,18 +94,7 @@ void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len) | ||
38937 | } | ||
38938 | } | ||
38939 | |||
38940 | - /* | ||
38941 | - * We might be coming here because the driver reported | ||
38942 | - * a successful association at the same time as the | ||
38943 | - * user requested a deauth. In that case, we will have | ||
38944 | - * removed the BSS from the auth_bsses list due to the | ||
38945 | - * deauth request when the assoc response makes it. If | ||
38946 | - * the two code paths acquire the lock the other way | ||
38947 | - * around, that's just the standard situation of a | ||
38948 | - * deauth being requested while connected. | ||
38949 | - */ | ||
38950 | - if (!bss) | ||
38951 | - goto out; | ||
38952 | + WARN_ON(!bss); | ||
38953 | } else if (wdev->conn) { | ||
38954 | cfg80211_sme_failed_assoc(wdev); | ||
38955 | need_connect_result = false; | ||
38956 | diff --git a/net/wireless/reg.c b/net/wireless/reg.c | ||
38957 | index efd24a7..f256dff 100644 | ||
38958 | --- a/net/wireless/reg.c | ||
38959 | +++ b/net/wireless/reg.c | ||
38960 | @@ -1714,7 +1714,7 @@ int regulatory_hint_user(const char *alpha2) | ||
38961 | request->wiphy_idx = WIPHY_IDX_STALE; | ||
38962 | request->alpha2[0] = alpha2[0]; | ||
38963 | request->alpha2[1] = alpha2[1]; | ||
38964 | - request->initiator = NL80211_REGDOM_SET_BY_USER; | ||
38965 | + request->initiator = NL80211_REGDOM_SET_BY_USER, | ||
38966 | |||
38967 | queue_regulatory_request(request); | ||
38968 | |||
38969 | diff --git a/net/wireless/sme.c b/net/wireless/sme.c | ||
38970 | index b2930e3..9f0b280 100644 | ||
38971 | --- a/net/wireless/sme.c | ||
38972 | +++ b/net/wireless/sme.c | ||
38973 | @@ -655,7 +655,6 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie, | ||
38974 | memset(&wrqu, 0, sizeof(wrqu)); | ||
38975 | wrqu.ap_addr.sa_family = ARPHRD_ETHER; | ||
38976 | wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); | ||
38977 | - wdev->wext.connect.ssid_len = 0; | ||
38978 | #endif | ||
38979 | } | ||
38980 | |||
@@ -1,595 +1,497 @@ | |||
1 | <?xml version="1.0" encoding="utf-8" ?> | 1 | |
2 | <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> | 2 | <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> |
3 | <html xmlns="http://www.w3.org/1999/xhtml" lang="en" xml:lang="en"> | 3 | <html xmlns="http://www.w3.org/1999/xhtml" lang="en" xml:lang="en"> |
4 | <head> | 4 | <head> |
5 | <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/> | 5 | <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/> |
6 | <meta name="verify-v1" content="pZNmf5XyUUfAPdlSPbFSavMUsLgVsmBYOXzOhbIy2gw=" /> | 6 | <meta name="verify-v1" content="pZNmf5XyUUfAPdlSPbFSavMUsLgVsmBYOXzOhbIy2gw=" /> |
7 | <link rel="stylesheet" type="text/css" href="inc/format.css"/> | 7 | <link rel="stylesheet" type="text/css" href="inc/format.css"/> |
8 | <title>LITMUS RT: Linux Testbed for Multiprocessor Scheduling in Real-Time Systems</title> | 8 | <title>LITMUS RT: Linux Testbed for Multiprocessor Scheduling in Real-Time Systems</title> |
9 | </head> | 9 | </head> |
10 | <body> | 10 | <body> |
11 | <div class="logobox"> | 11 | <div class="logobox"> |
12 | <img src="inc/litmusrt.png" alt="LITMUS^RT: Linux Testbed for Multiprocessor Scheduling in Real-Time Systems" /> | 12 | <img src="inc/litmusrt.png" alt="LITMUS^RT: Linux Testbed for Multiprocessor Scheduling in Real-Time Systems" /> |
13 | <p class="authors"> | 13 | <p class="authors"> |
14 | <a href="http://www.cs.unc.edu/~anderson/">Dr. James H. Anderson & | 14 | <a href="http://www.cs.unc.edu/~anderson/">Dr. James H. Anderson & |
15 | Students</a>, | 15 | Students</a>, |
16 | <a href="http://www.unc.edu">The University of North Carolina at Chapel Hill</a> | 16 | <a href="http://www.unc.edu">The University of North Carolina at Chapel Hill</a> |
17 | </p> | 17 | </p> |
18 | 18 | ||
19 | </div> | 19 | </div> |
20 | 20 | ||
21 | <div class="nav"> | 21 | <div class="nav"> |
22 | <p> | 22 | <p> |
23 | <a href="#about">about</a> - | 23 | <a href="#about">about</a> - |
24 | <a href="#support">support</a> - | 24 | <a href="#support">support</a> - |
25 | <a href="#collaborators">collaborators</a> - | 25 | <a href="#collaborators">collaborators</a> - |
26 | <a href="#publications">publications</a> - | 26 | <a href="#publications">publications</a> - |
27 | <a href="#download">download</a> - | 27 | <a href="#download">download</a> - |
28 | <a href="#install">installation</a> - | 28 | <a href="#install">installation</a> - |
29 | <a href="#doc">documentation</a> | 29 | <a href="#doc">documentation</a> |
30 | </p> | 30 | </p> |
31 | </div> | 31 | </div> |
32 | 32 | ||
33 | <h2 id="about">About</h2> | 33 | <h2 id="about">About</h2> |
34 | <div class="box"> | 34 | <div class="box"> |
35 | <p class="nomargin"> | 35 | <p class="nomargin"> |
36 | The LITMUS<sup>RT</sup> project is a soft real-time extension of the Linux | 36 | The LITMUS<sup>RT</sup> project is a soft real-time extension of the Linux |
37 | kernel with focus on multiprocessor real-time scheduling and | 37 | kernel with focus on multiprocessor real-time scheduling and |
38 | synchronization. The Linux kernel is modified | 38 | synchronization. The Linux kernel is modified |
39 | to support the sporadic task | 39 | to support the sporadic task |
40 | model and modular scheduler plugins. Both partitioned and global scheduling | 40 | model and modular scheduler plugins. Both partitioned and global scheduling |
41 | is supported. | 41 | is supported. |
42 | </p> | 42 | </p> |
43 | <h3>Goals</h3> | 43 | <h3>Goals</h3> |
44 | <p class="notopmargin"> | 44 | <p class="notopmargin"> |
45 | The primary purpose of the LITMUS<sup>RT</sup> project is to <strong>provide a useful experimental platform for applied real-time systems research</strong>. In that regard, LITMUS<sup>RT</sup> provides abstractions and interfaces within the kernel that simplify the prototyping of multiprocessor real-time scheduling and synchronization algorithms (compared to modifying a "vanilla" Linux kernel). As a secondary goal, LITMUS<sup>RT</sup> serves as a <strong>proof of concept</strong>, showing that algorithms such as PFAIR can be implemented on current hardware. Finally, we hope that parts of LITMUS<sup>RT</sup> and the "lessons learned" may find value as blueprints/sources of inspiration for other (both commercial and open source) implementation efforts. | 45 | The primary purpose of the LITMUS<sup>RT</sup> project is to <strong>provide a useful experimental platform for applied real-time systems research</strong>. In that regard, LITMUS<sup>RT</sup> provides abstractions and interfaces within the kernel that simplify the prototyping of multiprocessor real-time scheduling and synchronization algorithms (compared to modifying a "vanilla" Linux kernel). As a secondary goal, LITMUS<sup>RT</sup> serves as a <strong>proof of concept</strong>, showing that algorithms such as PFAIR can be implemented on current hardware. Finally, we hope that parts of LITMUS<sup>RT</sup> and the "lessons learned" may find value as blueprints/sources of inspiration for other (both commercial and open source) implementation efforts. |
46 | </p> | 46 | </p> |
47 | <h3>Non-Goals</h3> | 47 | <h3>Non-Goals</h3> |
48 | <p class="notopmargin"> | 48 | <p class="notopmargin"> |
49 | LITMUS<sup>RT</sup> is not a production-quality system, and we have currently no plans to turn it into one. LITMUS<sup>RT</sup> is not "stable," <em>i.e.</em>, interfaces and implementations may change without warning between releases. POSIX-compliance is not a goal; the LITMUS<sup>RT</sup>-API offers alternate system call interfaces. While we aim to follow Linux-coding guidelines, LITMUS<sup>RT</sup> is not targeted at being merged into mainline Linux. Rather, we hope that some of the ideas protoyped in LITMUS<sup>RT</sup> may eventually find adoption in Linux. | 49 | LITMUS<sup>RT</sup> is not a production-quality system, and we have currently no plans to turn it into one. LITMUS<sup>RT</sup> is not "stable," <em>i.e.</em>, interfaces and implementations may change without warning between releases. POSIX-compliance is not a goal; the LITMUS<sup>RT</sup>-API offers alternate system call interfaces. While we aim to follow Linux-coding guidelines, LITMUS<sup>RT</sup> is not targeted at being merged into mainline Linux. Rather, we hope that some of the ideas protoyped in LITMUS<sup>RT</sup> may eventually find adoption in Linux. |
50 | </p> | 50 | </p> |
51 | <h3>Current Version</h3> | 51 | <h3>Current Version</h3> |
52 | <p class="notopmargin"> | 52 | <p class="notopmargin"> |
53 | The current version of LITMUS<sup>RT</sup> is <strong>2008.3</strong> and is based on Linux 2.6.24. | 53 | The current version of LITMUS<sup>RT</sup> is <strong>2010.1</strong> and is based on Linux 2.6.32. |
54 | It was released on 09/24/2009 and includes plugins for the following | 54 | It was released on 05/12/2010 and includes plugins for the following |
55 | scheduling policies: | 55 | scheduling policies: |
56 | </p> | 56 | </p> |
57 | <ul> | 57 | <ul> |
58 | <li> Partitioned EDF with synchronization support (PSN-EDF)</li> | 58 | <li> Partitioned EDF with synchronization support (PSN-EDF)</li> |
59 | <li> Global EDF with synchronization support (GSN-EDF)</li> | 59 | <li> Global EDF with synchronization support (GSN-EDF)</li> |
60 | <li> Clustered EDF (C-EDF) </li> | 60 | <li> Clustered EDF (C-EDF) </li> |
61 | <li> PFAIR (both staggered and aligned quanta are supported)</li> | 61 | <li> PFAIR (both staggered and aligned quanta are supported)</li> |
62 | </ul> | 62 | </ul> |
63 | <p> | 63 | <p> |
64 | Please refer to the <a href="#download">download</a> and <a href="#install">installation</a> sections for details. | 64 | Please refer to the <a href="#download">download</a> and <a href="#install">installation</a> sections for details. |
65 | </p> | 65 | </p> |
66 | <p>Earlier versions (2007.1 — 2007.3), which are based on Linux 2.6.20 | 66 | <p>Earlier versions (2008.1 &mdash 2008.3),based on Linux 2.6.24 and (2007.1 — 2007.3), based on Linux 2.6.20 |
67 | and support additional scheduling policies, are discussed | 67 | ,support additional scheduling policies, are discussed on separate pages dedicated to the <a href="litmus2007.html">LITMUS<sup>RT</sup> 2008</a> and <a href="litmus2007.html">LITMUS<sup>RT</sup> 2007 </a> series respectively. |
68 | on a separate page dedicated to the <a href="litmus2007.html">LITMUS<sup>RT</sup> 2007 series</a>. | 68 | </p> |
69 | </p> | 69 | <p class="nobottommargin"> |
70 | <p class="nobottommargin"> | 70 | The first version of LITMUS<sup>RT</sup>, which was implemented in Spring 2006, |
71 | The first version of LITMUS<sup>RT</sup>, which was implemented in Spring 2006, | 71 | is based on Linux 2.6.9. |
72 | is based on Linux 2.6.9. | 72 | </p> |
73 | </p> | 73 | <h3> Development Plans </h3> |
74 | <h3>Development Plans</h3> | 74 | There are plans to port LITMUS<sup>RT</sup> to PowerPC and ARM platforms. Please contact us for details. |
75 | <p class="nomargin"> | 75 | </div> |
76 | Re-basing to the then-current Linux kernel version is scheduled for Spring 2010. There are plans to port LITMUS<sup>RT</sup> to PowerPC and ARM platforms. Please contact us for details. | 76 | <h2 id="support">Support</h2> |
77 | </p> | 77 | <div class="box"> |
78 | </div> | 78 | <p class="nomargin"> |
79 | 79 | The LITMUS<sup>RT</sup> development effort is being supported by grants from, SUN Corp., | |
80 | <h2 id="support">Support</h2> | 80 | Intel Corp., IBM Corp., The National Science Foundation (grant CCR 0615197), and The U.S. |
81 | <div class="box"> | 81 | Army Research Office (grant W911NF-06-1-0425). |
82 | <p class="nomargin"> | 82 | </p> |
83 | The LITMUS<sup>RT</sup> development effort is being supported by grants from, SUN Corp., | 83 | </div> |
84 | Intel Corp., IBM Corp., The National Science Foundation (grant CCR 0615197), and The U.S. | 84 | |
85 | Army Research Office (grant W911NF-06-1-0425). | 85 | <h2 id="collaborators">Collaborators</h2> |
86 | </p> | 86 | <div class="box"> |
87 | </div> | 87 | <p class="notopmargin"> The LITMUS<sup>RT</sup> project is led by <a |
88 | 88 | href="http://www.cs.unc.edu/~anderson/">Dr. James H. Anderson</a>. | |
89 | <h2 id="collaborators">Collaborators</h2> | 89 | </p> |
90 | <div class="box"> | 90 | <p> |
91 | <p class="notopmargin"> The LITMUS<sup>RT</sup> project is led by <a | 91 | The implementation effort is carried out by students of the |
92 | href="http://www.cs.unc.edu/~anderson/">Dr. James H. Anderson</a>. | 92 | <a href="http://www.cs.unc.edu/~anderson/real-time/">Real-Time Systems |
93 | </p> | 93 | Group</a> at the <a href="http://www.unc.edu">University of North Carolina |
94 | <p> | 94 | at Chapel Hill</a>: |
95 | The implementation effort is carried out by students of the | 95 | </p> |
96 | <a href="http://www.cs.unc.edu/~anderson/real-time/">Real-Time Systems | 96 | <ul> |
97 | Group</a> at the <a href="http://www.unc.edu">University of North Carolina | 97 | <li> |
98 | at Chapel Hill</a>: | 98 | <a href="http://www.cs.unc.edu/~bbb/">Björn B. Brandenburg</a> (current maintainer) |
99 | </p> | 99 | </li> |
100 | <ul> | 100 | <li> <a href="http://www.sprg.uniroma2.it/home/bastoni/">Andrea Bastoni</a> (Visiting researcher from University of Rome "Tor Vergata") |
101 | <li> | 101 | </li> |
102 | <a href="http://www.cs.unc.edu/~bbb/">Björn B. Brandenburg</a> (current maintainer) | 102 | </ul> |
103 | </li> | 103 | <p class="nobottommargin"> |
104 | <li> <a href="http://www.cs.unc.edu/~jmc/">John M. Calandrino</a> <em>(graduated July 2009)</em> | 104 | (Additional collaborators for <a href="litmus2008.html#collaborators">LITMUS<sup>RT</sup> 2008 series</a> and <a href="litmus2007.html#collaborators">LITMUS<sup>RT</sup> 2007 series</a> contributed to earlier versions of LITMUS<sup>RT</sup>.) |
105 | </li> | 105 | </p> |
106 | </ul> | 106 | </div> |
107 | <p class="nobottommargin"> | 107 | |
108 | (<a href="litmus2007.html#collaborators">Additional collaborators</a> contributed to earlier versions of LITMUS<sup>RT</sup>.) | 108 | |
109 | </p> | 109 | <h2 id="publications">Publications</h2> |
110 | </div> | 110 | <div class="box"> |
111 | 111 | <!-- | |
112 | 112 | <ol class="nomargin"> | |
113 | <h2 id="publications">Publications</h2> | 113 | <li><p> |
114 | <div class="box"> | 114 | B. Brandenburg and J. Anderson, |
115 | 115 | “On the Implementation of Global Real-Time | |
116 | <ol class="nomargin"> | 116 | Schedulers”, <cite>Proceedings of the 30th IEEE Real-Time Systems Symposium</cite>, pp. 214-224, December 2009. |
117 | <li><p> | 117 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss09a.ps">Postscript</a>. |
118 | B. Brandenburg and J. Anderson, | 118 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss09a.pdf">PDF</a>. |
119 | “On the Implementation of Global Real-Time | 119 | Longer version with all graphs: |
120 | Schedulers”, <cite>Proceedings of the 30th IEEE Real-Time Systems Symposium</cite>, pp. 214-224, December 2009. | 120 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss09a_long.ps">Postscript</a>. |
121 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss09a.ps">Postscript</a>. | 121 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss09a_long.pdf">PDF</a>. |
122 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss09a.pdf">PDF</a>. | 122 | </p> |
123 | Longer version with all graphs: | 123 | <p> For reference, all evaluated plugins are provided as part of the following patch (against version 2008.3). |
124 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss09a_long.ps">Postscript</a>. | 124 | </p> |
125 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss09a_long.pdf">PDF</a>. | 125 | <ul> |
126 | </p> | 126 | <li> |
127 | <p> For reference, all evaluated plugins are provided as part of the following patch (against version 2008.3). | 127 | <a href="download/RTSS09/litmus-rt-RTSS09.patch">litmus-rt-RTSS09.patch</a> |
128 | </p> | 128 | </li> |
129 | <ul> | 129 | </ul> |
130 | <li> | 130 | |
131 | <a href="download/RTSS09/litmus-rt-RTSS09.patch">litmus-rt-RTSS09.patch</a> | 131 | </li> |
132 | </li> | 132 | <li> |
133 | </ul> | 133 | <p> |
134 | 134 | B. Brandenburg and J. Anderson | |
135 | </li> | 135 | “Reader-Writer Synchronization for Shared-Memory Multiprocessor Real-Time Systems”, |
136 | <li> | 136 | <cite>Proceedings of the 21st Euromicro Conference on Real-Time Systems</cite>, pp. 184-193, July 2009. |
137 | <p> | 137 | <a href="http://www.cs.unc.edu/~anderson/papers/ecrts09b.ps">Postscript</a>. <a href="http://www.cs.unc.edu/~anderson/papers/ecrts09b.pdf">PDF</a>. |
138 | B. Brandenburg and J. Anderson | 138 | Long version with blocking terms: |
139 | “Reader-Writer Synchronization for Shared-Memory Multiprocessor Real-Time Systems”, | 139 | <a href="http://www.cs.unc.edu/~anderson/papers/ecrts09b-long.ps">Postscript</a>. <a href="http://www.cs.unc.edu/~anderson/papers/ecrts09b-long.pdf">PDF</a>. |
140 | <cite>Proceedings of the 21st Euromicro Conference on Real-Time Systems</cite>, pp. 184-193, July 2009. | 140 | </p> |
141 | <a href="http://www.cs.unc.edu/~anderson/papers/ecrts09b.ps">Postscript</a>. <a href="http://www.cs.unc.edu/~anderson/papers/ecrts09b.pdf">PDF</a>. | 141 | </li> |
142 | Long version with blocking terms: | 142 | |
143 | <a href="http://www.cs.unc.edu/~anderson/papers/ecrts09b-long.ps">Postscript</a>. <a href="http://www.cs.unc.edu/~anderson/papers/ecrts09b-long.pdf">PDF</a>. | 143 | <li> |
144 | </p> | 144 | <p> |
145 | </li> | 145 | J. Calandrino and J. Anderson |
146 | 146 | “On the Design and Implementation of a Cache-Aware Multicore Real-Time Scheduler”, | |
147 | <li> | 147 | <cite>Proceedings of the 21st Euromicro Conference on Real-Time Systems</cite>, pp. 194-204, July 2009. |
148 | <p> | 148 | <a href="http://www.cs.unc.edu/~anderson/papers/ecrts09c.ps">Postscript</a>. <a href="http://www.cs.unc.edu/~anderson/papers/ecrts09c.pdf">PDF</a>. |
149 | J. Calandrino and J. Anderson | 149 | </p> |
150 | “On the Design and Implementation of a Cache-Aware Multicore Real-Time Scheduler”, | 150 | </li> |
151 | <cite>Proceedings of the 21st Euromicro Conference on Real-Time Systems</cite>, pp. 194-204, July 2009. | 151 | |
152 | <a href="http://www.cs.unc.edu/~anderson/papers/ecrts09c.ps">Postscript</a>. <a href="http://www.cs.unc.edu/~anderson/papers/ecrts09c.pdf">PDF</a>. | 152 | <li> |
153 | </p> | 153 | <p> |
154 | </li> | 154 | M. Mollison, B. Brandenburg, and J. Anderson |
155 | 155 | “Towards Unit Testing Real-Time Schedulers in LITMUS<sup>RT</sup>”, | |
156 | <li> | 156 | <cite>Proceedings of the Fifth International Workshop on Operating Systems Platforms for Embedded Real-Time Applications</cite>, pp. 33-39, July 2009. |
157 | <p> | 157 | <a href="http://www.cs.unc.edu/~anderson/papers/ospert09.ps">Postscript</a>. <a href="http://www.cs.unc.edu/~anderson/papers/ospert09.pdf">PDF</a>. |
158 | M. Mollison, B. Brandenburg, and J. Anderson | 158 | </p> |
159 | “Towards Unit Testing Real-Time Schedulers in LITMUS<sup>RT</sup>”, | 159 | </li> |
160 | <cite>Proceedings of the Fifth International Workshop on Operating Systems Platforms for Embedded Real-Time Applications</cite>, pp. 33-39, July 2009. | 160 | |
161 | <a href="http://www.cs.unc.edu/~anderson/papers/ospert09.ps">Postscript</a>. <a href="http://www.cs.unc.edu/~anderson/papers/ospert09.pdf">PDF</a>. | 161 | <li> |
162 | </p> | 162 | <p> |
163 | </li> | 163 | B. Brandenburg and J. Anderson, |
164 | 164 | “A Comparison of the M-PCP, D-PCP, and FMLP on LITMUS<sup>RT</sup>”, | |
165 | <li> | 165 | <cite>Proceedings of the 12th International Conference on Principles of Distributed Systems</cite>, pp. 105-124, December 2008. |
166 | <p> | 166 | <a href="http://www.cs.unc.edu/~anderson/papers/opodis08.ps">Postscript</a>. <a href="http://www.cs.unc.edu/~anderson/papers/opodis08.pdf">PDF</a>. |
167 | B. Brandenburg and J. Anderson, | 167 | </p> |
168 | “A Comparison of the M-PCP, D-PCP, and FMLP on LITMUS<sup>RT</sup>”, | 168 | </li> |
169 | <cite>Proceedings of the 12th International Conference on Principles of Distributed Systems</cite>, pp. 105-124, December 2008. | 169 | |
170 | <a href="http://www.cs.unc.edu/~anderson/papers/opodis08.ps">Postscript</a>. <a href="http://www.cs.unc.edu/~anderson/papers/opodis08.pdf">PDF</a>. | 170 | <li> |
171 | </p> | 171 | <p> |
172 | </li> | 172 | B. Brandenburg, J. Calandrino, and J. Anderson, |
173 | 173 | “On the Scalability of Real-Time Scheduling Algorithms on Multicore Platforms: A Case Study”, | |
174 | <li> | 174 | <cite>Proceedings of the 29th IEEE Real-Time Systems Symposium</cite>, |
175 | <p> | 175 | pp. 157-169, December 2008. |
176 | B. Brandenburg, J. Calandrino, and J. Anderson, | 176 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss08b.ps">Postscript</a>. <a href="http://www.cs.unc.edu/~anderson/papers/rtss08b.pdf">PDF</a>. |
177 | “On the Scalability of Real-Time Scheduling Algorithms on Multicore Platforms: A Case Study”, | 177 | </p> |
178 | <cite>Proceedings of the 29th IEEE Real-Time Systems Symposium</cite>, | 178 | </li> |
179 | pp. 157-169, December 2008. | 179 | |
180 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss08b.ps">Postscript</a>. <a href="http://www.cs.unc.edu/~anderson/papers/rtss08b.pdf">PDF</a>. | 180 | <li> |
181 | </p> | 181 | <p> |
182 | </li> | 182 | B. Brandenburg and J. Anderson, |
183 | 183 | “An Implementation of the PCP, SRP, D-PCP, M-PCP, | |
184 | <li> | 184 | and FMLP Real-Time Synchronization Protocols in LITMUS<sup>RT</sup>”, |
185 | <p> | 185 | <cite>Proceedings of the 14th IEEE International Conference on Embedded and Real-Time Computing Systems and Applications</cite>, pp. 185-194, August 2008. |
186 | B. Brandenburg and J. Anderson, | 186 | <a href="http://www.cs.unc.edu/~anderson/papers/rtcsa08.ps">Postscript</a>. <a href="http://www.cs.unc.edu/~anderson/papers/rtcsa08.pdf">PDF</a>. |
187 | “An Implementation of the PCP, SRP, D-PCP, M-PCP, | 187 | </p> |
188 | and FMLP Real-Time Synchronization Protocols in LITMUS<sup>RT</sup>”, | 188 | <p><strong>Note:</strong> The work described in this paper took part in a branch that is currently not part of |
189 | <cite>Proceedings of the 14th IEEE International Conference on Embedded and Real-Time Computing Systems and Applications</cite>, pp. 185-194, August 2008. | 189 | the main distribution. For reference, we provide the branch as a separate download: |
190 | <a href="http://www.cs.unc.edu/~anderson/papers/rtcsa08.ps">Postscript</a>. <a href="http://www.cs.unc.edu/~anderson/papers/rtcsa08.pdf">PDF</a>. | 190 | </p> |
191 | </p> | 191 | <ul> |
192 | <p><strong>Note:</strong> The work described in this paper took part in a branch that is currently not part of | 192 | <li> |
193 | the main distribution. For reference, we provide the branch as a separate download: | 193 | <a href="download/RTCSA08/litmus-rt-RTCSA08.patch">litmus-rt-RTCSA08.patch</a> |
194 | </p> | 194 | </li> |
195 | <ul> | 195 | <li> |
196 | <li> | 196 | <a href="download/RTCSA08/liblitmus-RTCSA08.tgz">liblitmus-RTCSA08.tgz</a> |
197 | <a href="download/RTCSA08/litmus-rt-RTCSA08.patch">litmus-rt-RTCSA08.patch</a> | 197 | </li> |
198 | </li> | 198 | <li><a href="download/RTCSA08/SHA256SUMS">SHA256 check sums</a> |
199 | <li> | 199 | </li> |
200 | <a href="download/RTCSA08/liblitmus-RTCSA08.tgz">liblitmus-RTCSA08.tgz</a> | 200 | </ul> |
201 | </li> | 201 | <p>Please don't use this version for active development. If you are interested in this work, it would be best |
202 | <li><a href="download/RTCSA08/SHA256SUMS">SHA256 check sums</a> | 202 | to first port the desired features to a current version of LTIMUS<sup>RT</sup> and merge them into the main distribution. |
203 | </li> | 203 | </p> |
204 | </ul> | 204 | |
205 | <p>Please don't use this version for active development. If you are interested in this work, it would be best | 205 | </li> |
206 | to first port the desired features to a current version of LTIMUS<sup>RT</sup> and merge them into the main distribution. | 206 | |
207 | </p> | 207 | <li> |
208 | 208 | <p> | |
209 | </li> | 209 | A. Block, B. Brandenburg, J. Anderson, |
210 | 210 | and S. Quint, “An Adaptive Framework for Multiprocessor Real-Time Systems”, | |
211 | <li> | 211 | <cite>Proceedings of the 20th Euromicro Conference on Real-Time Systems</cite>, pp. 23-33, July 2008. |
212 | <p> | 212 | <a href="http://www.cs.unc.edu/~anderson/papers/ecrts08b.ps">Postscript</a>. <a href="http://www.cs.unc.edu/~anderson/papers/ecrts08b.pdf">PDF</a>. |
213 | A. Block, B. Brandenburg, J. Anderson, | 213 | </p> |
214 | and S. Quint, “An Adaptive Framework for Multiprocessor Real-Time Systems”, | 214 | </li> |
215 | <cite>Proceedings of the 20th Euromicro Conference on Real-Time Systems</cite>, pp. 23-33, July 2008. | 215 | |
216 | <a href="http://www.cs.unc.edu/~anderson/papers/ecrts08b.ps">Postscript</a>. <a href="http://www.cs.unc.edu/~anderson/papers/ecrts08b.pdf">PDF</a>. | 216 | <li> |
217 | </p> | 217 | <p> |
218 | </li> | 218 | B. Brandenburg, J. Calandrino, A. Block, |
219 | 219 | H. Leontyev, and J. Anderson, “Real-Time Synchronization | |
220 | <li> | 220 | on Multiprocessors: To Block or Not to Block, to Suspend or |
221 | <p> | 221 | Spin?”, <cite> Proceedings of the 14th IEEE Real-Time and Embedded |
222 | B. Brandenburg, J. Calandrino, A. Block, | 222 | Technology and Applications Symposium</cite>, pp. 342-353, April 2008. |
223 | H. Leontyev, and J. Anderson, “Real-Time Synchronization | 223 | <a href="http://www.cs.unc.edu/~anderson/papers/rtas08.ps">Postscript</a>. <a href="http://www.cs.unc.edu/~anderson/papers/rtas08.pdf">PDF</a>. |
224 | on Multiprocessors: To Block or Not to Block, to Suspend or | 224 | </p> |
225 | Spin?”, <cite> Proceedings of the 14th IEEE Real-Time and Embedded | 225 | <p> |
226 | Technology and Applications Symposium</cite>, pp. 342-353, April 2008. | 226 | Extended version, including all graphs: |
227 | <a href="http://www.cs.unc.edu/~anderson/papers/rtas08.ps">Postscript</a>. <a href="http://www.cs.unc.edu/~anderson/papers/rtas08.pdf">PDF</a>. | 227 | <a href="http://www.cs.unc.edu/~anderson/papers/rtas08along.ps">Postscript</a>, |
228 | </p> | 228 | <a href="http://www.cs.unc.edu/~anderson/papers/rtas08along.pdf">PDF</a>. |
229 | <p> | 229 | </p> |
230 | Extended version, including all graphs: | 230 | </li> |
231 | <a href="http://www.cs.unc.edu/~anderson/papers/rtas08along.ps">Postscript</a>, | 231 | |
232 | <a href="http://www.cs.unc.edu/~anderson/papers/rtas08along.pdf">PDF</a>. | 232 | <li> |
233 | </p> | 233 | <p> |
234 | </li> | 234 | B. Brandenburg, A. Block, J. Calandrino, U. Devi, H. Leontyev, and J. Anderson, |
235 | 235 | "LITMUS<sup>RT</sup>: A Status Report", <cite> Proceedings of the 9th | |
236 | <li> | 236 | Real-Time Linux Workshop</cite>, pp. 107-123, November 2007. |
237 | <p> | 237 | <a href="http://www.cs.unc.edu/~anderson/papers/rtlws07.ps">Postscript</a>. |
238 | B. Brandenburg, A. Block, J. Calandrino, U. Devi, H. Leontyev, and J. Anderson, | 238 | <a href="http://www.cs.unc.edu/~anderson/papers/rtlws07.pdf">PDF</a>. |
239 | "LITMUS<sup>RT</sup>: A Status Report", <cite> Proceedings of the 9th | 239 | </p> |
240 | Real-Time Linux Workshop</cite>, pp. 107-123, November 2007. | 240 | </li> |
241 | <a href="http://www.cs.unc.edu/~anderson/papers/rtlws07.ps">Postscript</a>. | 241 | |
242 | <a href="http://www.cs.unc.edu/~anderson/papers/rtlws07.pdf">PDF</a>. | 242 | <li> |
243 | </p> | 243 | <p> |
244 | </li> | 244 | B. Brandenburg and J. Anderson, "Integrating Hard/Soft Real-Time Tasks |
245 | 245 | and Best-Effort Jobs on Multiprocessors", <cite> Proceedings of the 19th Euromicro | |
246 | <li> | 246 | Conference on Real-Time Systems</cite>, pp. 61-70, July 2007. |
247 | <p> | 247 | <a href="http://www.cs.unc.edu/~anderson/papers/ecrts07b.ps">Postscript</a>, |
248 | B. Brandenburg and J. Anderson, "Integrating Hard/Soft Real-Time Tasks | 248 | <a href="http://www.cs.unc.edu/~anderson/papers/ecrts07b.pdf">PDF</a>. |
249 | and Best-Effort Jobs on Multiprocessors", <cite> Proceedings of the 19th Euromicro | 249 | </p> |
250 | Conference on Real-Time Systems</cite>, pp. 61-70, July 2007. | 250 | </li> |
251 | <a href="http://www.cs.unc.edu/~anderson/papers/ecrts07b.ps">Postscript</a>, | 251 | |
252 | <a href="http://www.cs.unc.edu/~anderson/papers/ecrts07b.pdf">PDF</a>. | 252 | |
253 | </p> | 253 | <li> |
254 | </li> | 254 | <p> |
255 | 255 | J. Calandrino, H. Leontyev, A. Block, U. Devi, and J. Anderson, | |
256 | 256 | "LITMUS<sup>RT</sup>: A Testbed for Empirically Comparing Real-Time | |
257 | <li> | 257 | Multiprocessor Schedulers ", <cite>Proceedings of the 27th IEEE Real-Time Systems |
258 | <p> | 258 | Symposium</cite>, pp. 111-123, December 2006. |
259 | J. Calandrino, H. Leontyev, A. Block, U. Devi, and J. Anderson, | 259 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss06b.ps">Postscript</a>, |
260 | "LITMUS<sup>RT</sup>: A Testbed for Empirically Comparing Real-Time | 260 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss06b.pdf">PDF</a>. |
261 | Multiprocessor Schedulers ", <cite>Proceedings of the 27th IEEE Real-Time Systems | 261 | </p> |
262 | Symposium</cite>, pp. 111-123, December 2006. | 262 | </li> |
263 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss06b.ps">Postscript</a>, | 263 | |
264 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss06b.pdf">PDF</a>. | 264 | </ol> --> |
265 | </p> | 265 | </div> |
266 | </li> | 266 | |
267 | 267 | <h2 id="download">Download</h2> | |
268 | </ol> | 268 | <div class="box"> |
269 | </div> | 269 | <p class="notopmargin"> |
270 | 270 | The source code of LITMUS<sup>RT</sup> is made available as open source | |
271 | <h2 id="download">Download</h2> | 271 | under the terms of the <a href="http://www.gnu.org/licenses/gpl.txt">GNU |
272 | <div class="box"> | 272 | General Public License (GPL)</a>. |
273 | <p class="notopmargin"> | 273 | </p> |
274 | The source code of LITMUS<sup>RT</sup> is made available as open source | 274 | <p> |
275 | under the terms of the <a href="http://www.gnu.org/licenses/gpl.txt">GNU | 275 | The current release of LITMUS<sup>RT</sup> is 2010.1. |
276 | General Public License (GPL)</a>. | 276 | It consists of our Linux kernel modifications in the form of |
277 | </p> | 277 | a patch against Linux 2.6.32 and |
278 | <p> | 278 | |
279 | The current release of LITMUS<sup>RT</sup> is 2008.3. | 279 | <span class="src">liblitmus</span>, the user-space API for real-time |
280 | It consists of our Linux kernel modifications in the form of | 280 | tasks, as well as <span class="src">ft_tools</span>, a collection of tools |
281 | a patch against Linux 2.6.24 and | 281 | used for tracing with <a href="http://www.cs.unc.edu/~bbb/feathertrace/">Feather-Trace</a> (which is part of the LITMUS<sup>RT</sup> patch). |
282 | 282 | </p> | |
283 | <span class="src">liblitmus</span>, the user-space API for real-time | 283 | |
284 | tasks, as well as <span class="src">ft_tools</span>, a collection of tools | 284 | <h3 class="relname">LITMUS<sup>RT</sup> 2010.1</h3> |
285 | used for tracing with <a href="http://www.cs.unc.edu/~bbb/feather-trace/">Feather-Trace</a> (which is part of the LITMUS<sup>RT</sup> patch). | 285 | <div class="release"> |
286 | </p> | 286 | <p> |
287 | 287 | Based on Linux 2.6.32. Released in May 2010. | |
288 | <h3 class="relname">LITMUS<sup>RT</sup> 2008.3</h3> | 288 | |
289 | <div class="release"> | 289 | </p> |
290 | <p> | 290 | <h4>Files:</h4> |
291 | Based on Linux 2.6.24. Released in September 2009. | 291 | <ul> |
292 | 292 | <li> | |
293 | </p> | 293 | <a href="download/2010.1/litmus-rt-2010.1.patch">litmus-rt-2010.1.patch</a> |
294 | <h4>Files:</h4> | 294 | </li> |
295 | <ul> | 295 | <li> |
296 | <li> | 296 | <a href="download/2010.1/liblitmus-2010.1.tgz">liblitmus-2010.1.tgz</a> |
297 | <a href="download/2008.3/litmus-rt-2008.3.patch">litmus-rt-2008.3.patch</a> | 297 | </li> |
298 | </li> | 298 | |
299 | <li> | 299 | <li><a href="download/2010.1/SHA256SUMS">SHA256 check sums</a> |
300 | <a href="download/2008.3/liblitmus-2008.3.tgz">liblitmus-2008.3.tgz</a> | 300 | </li> |
301 | </li> | 301 | </ul> |
302 | <li> | 302 | <h4>Major changes (since LITMUS<sup>RT</sup> 2008.3):</h4> TO BE UPDATED |
303 | <a href="download/2008.3/ft_tools-2008.3.tgz">ft_tools-2008.3.tgz</a> | 303 | <ul> |
304 | </li> | 304 | <li> |
305 | 305 | Changed codebase from Linux 2.6.24 to Linux 2.6.32 | |
306 | <li><a href="download/2008.3/SHA256SUMS">SHA256 check sums</a> | 306 | </li> |
307 | </li> | 307 | <li>Several bugfixes.</li> |
308 | </ul> | 308 | </div> |
309 | <h4>Major changes (since LITMUS<sup>RT</sup> 2008.2):</h4> | 309 | |
310 | <ul> | 310 | <p> |
311 | <li> | 311 | Please note that the current implementation is a <em>prototype</em> with |
312 | <code>sys_null_call()</code>, a dummy system call that simplifies determining system call overheads. | 312 | certain limitations. Most notably, it is not secure in a multiuser context, |
313 | </li> | 313 | <em>i.e.</em>, real-time system calls do not require superuser |
314 | <li> | 314 | privileges. |
315 | Support for starting timers on remote CPUs via <code>hrtimer_start_on()</code>. | 315 | </p> |
316 | </li> | 316 | |
317 | <li> | 317 | <p class="nobottommargin"> |
318 | Support for dedicated release handling in GSN-EDF and a corresponding <code>/proc</code> interface. | 318 | |
319 | </li> | 319 | Older releases: <a href="litmus2008.html">LITMUS<sup>RT</sup> 2008 series</a>, <a href="litmus2007.html">LITMUS<sup>RT</sup> 2007 series</a>. |
320 | <li> | 320 | </p> |
321 | Support for IPI latency tracing. | 321 | |
322 | </li> | 322 | </div> |
323 | <li>Several bugfixes.</li> | 323 | |
324 | <li>Switched to <a href="http://www.scons.org/">scons</a> as build system in libraries.</li> | 324 | |
325 | <li>Support for cross compiling the libraries on x86-64 systems to i386 binaries (specify <code>ARCH=i386</code> in your environment).</li> | 325 | |
326 | </ul> | 326 | <h2 id="install">Installation</h2> |
327 | <p> | 327 | <div class="box"> |
328 | Please consult the <a href="doc/changes.html">Change Log</a> for further details. | 328 | <p class="notopmargin"> |
329 | </p> | 329 | The current release of LITMUS<sup>RT</sup> consists of an |
330 | </div> | 330 | extension of the Linux kernel that adds support for the sporadic task |
331 | 331 | model, a scheduler plugin infrastructure, and some scheduler plugins, as | |
332 | <h3 class="relname">LITMUS<sup>RT</sup> 2008.2</h3> | 332 | well as a user-space library that provides the LITMUS<sup>RT</sup> |
333 | <div class="release"> | 333 | real-time API. Note that the current implementation only works on the |
334 | <p> | 334 | Intel x86-32 and sparc64 architectures. |
335 | Based on Linux 2.6.24. Released in December 2008. | 335 | </p> |
336 | 336 | <h3>Patching the Kernel</h3> | |
337 | </p> | 337 | <p class="notopmargin"> |
338 | <h4>Files:</h4> | 338 | The extension to the Linux kernel is released as a patch against Linux |
339 | <ul> | 339 | 2.6.32. To install the LITMUS<sup>RT</sup> kernel, first <a |
340 | <li> | 340 | href="http://www.kernel.org/pub/linux/kernel/v2.6/linux-2.6.32.tar.bz2">download the Linux |
341 | <a href="download/2008.2/litmus-rt-2008.2.patch">litmus-rt-2008.2.patch</a> | 341 | kernel 2.6.32</a> and untar it in a directory of your choice (hereafter |
342 | </li> | 342 | referred to as <span class="src">$DIR</span>). Second, apply the |
343 | <li> | 343 | LITMUS<sup>RT</sup> patch (see <a href="#download">Section Download</a>) |
344 | <a href="download/2008.2/liblitmus-2008.2.tgz">liblitmus-2008.2.tgz</a> | 344 | and configure, compile, and install the kernel as usual. The patch is <span |
345 | </li> | 345 | class="src">-p1</span> applicable. |
346 | <li> | 346 | To summarize, the LITMUS<sup>RT</sup> kernel can be obtained, patched, and |
347 | <a href="download/2008.2/ft_tools-2008.2.tgz">ft_tools-2008.2.tgz</a> | 347 | compiled with the following commands: |
348 | </li> | 348 | </p> |
349 | 349 | <pre class="shell"> | |
350 | <li><a href="download/2008.2/SHA256SUMS">SHA256 check sums</a> | 350 | cd $DIR |
351 | </li> | 351 | # get Linux 2.6.32 |
352 | </ul> | 352 | wget http://www.kernel.org/pub/linux/kernel/v2.6/linux-2.6.32.tar.bz2 |
353 | <h4>Major changes (since LITMUS<sup>RT</sup> 2008.1):</h4> | 353 | tar xjf linux-2.6.32.tar.bz2 |
354 | <ul> | 354 | wget http://www.cs.unc.edu/~anderson/litmus-rt/download/2010.1/litmus-rt-2010.1.patch |
355 | <li>PFAIR implementation can now recover from missed tick interrupts. | 355 | mv linux-2.6.32 litmus2010 |
356 | </li> | 356 | # apply the LITMUS RT patch |
357 | <li>A bug in the PFAIR prioritization function was corrected. | 357 | cd litmus2010 |
358 | </li> | 358 | patch -p1 < ../litmus-rt-2010.1.patch |
359 | <li>Support for synchronous task system releases in the EDF-based schedulers was fixed. | 359 | # create a working kernel configuration |
360 | </li> | 360 | # - select HZ=1000 |
361 | <li><span class="src">sched_trace()</span> support was re-implemented based on Feather-Trace. | 361 | # - enable in-kernel preemptions |
362 | </li> | 362 | # - disable NO_HZ |
363 | <li>Added the tool <span class="src">showst</span> to liblitmus, which can convert <span class="src">sched_trace()</span> binary data to a human-readable format. | 363 | # - don't use power management options like frequency scaling |
364 | </li> | 364 | # - disable support for group scheduling |
365 | <li> | 365 | make menuconfig |
366 | Assorted bug fixes. | 366 | # compile the kernel |
367 | </li> | 367 | make bzImage |
368 | </ul> | 368 | make modules |
369 | </div> | 369 | # proceed to install kernel, build initrd, etc. |
370 | 370 | ... | |
371 | <h3 class="relname">LITMUS<sup>RT</sup> 2008.1</h3> | 371 | </pre> |
372 | <div class="release"> | 372 | <p> |
373 | <p> | 373 | When configuring the kernel, note that there is a menu (at the very end of the list) |
374 | Based on Linux 2.6.24. Released in July 2008. | 374 | with LITMUS<sup>RT</sup>-specific configuration options. For reference, we provide sample <a href="download/2010.1/32bit-config">32-bit </a> and <a href="download/2010.1/64bit-config">64-bit </a> configurations that are known to work under QEMU</a>. |
375 | </p> | 375 | </p> |
376 | <h4>Files:</h4> | 376 | |
377 | <ul> | 377 | <h3>Libraries</h3> |
378 | <li> | 378 | <p class="notopmargin"> |
379 | <a href="download/2008.1/litmus-rt-2008.1.patch">litmus-rt-2008.1.patch</a> | 379 | The user-space library for real-time tasks, <span class="src">liblitmus</span>, |
380 | </li> | 380 | depends on the LITMUS<sup>RT</sup> kernel kernel and provides its own build system (based on <a href="http://www.scons.org/">scons</a>). |
381 | <li> | 381 | In order to compile <span class="src">liblitmus</span>, you need to adjust the |
382 | <a href="download/2008.1/liblitmus-2008.1.tgz">liblitmus-2008.1.tgz</a> | 382 | variable <span class="src">LITMUS_KERNEL</span> in the <span class="src">SConstruct</span> file to point to your |
383 | </li> | 383 | copy of the kernel. |
384 | <li><a href="download/2008.1/SHA256SUMS">SHA256 check sums</a> | 384 | </p> |
385 | </li> | 385 | <pre class="shell"> |
386 | </ul> | 386 | cd $DIR |
387 | 387 | wget http://www.cs.unc.edu/~anderson/litmus-rt/download/2010.1/liblitmus-2010.1.tgz | |
388 | <h4>Major changes (since LITMUS<sup>RT</sup> 2007.3):</h4> | 388 | tar xzf liblitmus-2010.1.tgz |
389 | <ul> | 389 | cd liblitmus |
390 | <li>LITMUS<sup>RT</sup> was ported to Linux 2.6.24. | 390 | # change LITMUS_KERNEL in SConstruct to point to the kernel source |
391 | </li> | 391 | scons |
392 | <li>LITMUS<sup>RT</sup> was ported to <span style="src">sparc64</span>. | 392 | </pre> |
393 | </li> | 393 | <p class="nobottommargin"> |
394 | <li>LITMUS<sup>RT</sup> is now a proper scheduling class (<span class="src">SCHED_LITMUS</span>). | 394 | Please refer to the <a href="#doc">documentation</a> on how to use the LITMUS<sup>RT</sup> |
395 | </li> | 395 | real-time API as provided by <span class="src">liblitmus</span>. |
396 | <li> | 396 | </p> |
397 | LITMUS<sup>RT</sup> queues are now based on mergeable heaps. | 397 | |
398 | </li> | 398 | </div> |
399 | <li>Support for multi-threaded real-time tasks. | 399 | |
400 | </li> | 400 | |
401 | <li>Scheduler plugins can be selected at runtime; no reboot required. | 401 | <h2 id="doc">Documentation</h2> |
402 | </li> | 402 | <div class="box"> |
403 | <li> | 403 | |
404 | Many bug fixes. | 404 | <p class="notopmargin"> |
405 | </li> | 405 | Unfortunately, most of the documentation has yet to be written. To get an overview of |
406 | </ul> | 406 | the architecture of the kernel extension, we recommend reading the paper |
407 | </div> | 407 | <a href="http://www.cs.unc.edu/~anderson/papers/rtlws07.pdf">“LITMUS<sup>RT</sup>: |
408 | 408 | A Status Report”</a>. | |
409 | <p> | 409 | </p> |
410 | Please note that the current implementation is a <em>prototype</em> with | 410 | <h3>Real-Time Scheduling Policies</h3> |
411 | certain limitations. Most notably, it is not secure in a multiuser context, | 411 | <p class="qa"> |
412 | <em>i.e.</em>, real-time system calls do not require superuser | 412 | The kernel contains the following real-time scheduling policy implementations: |
413 | privileges. | 413 | </p> |
414 | </p> | 414 | <ul> |
415 | 415 | <li> | |
416 | <p class="nobottommargin"> | 416 | PFAIR, an implementation of the PD<sup>2</sup> algorithm, |
417 | Older releases: <a href="litmus2007.html">LITMUS<sup>RT</sup> 2007 series</a>. | 417 | </li> |
418 | </p> | 418 | <li> |
419 | 419 | PSN-EDF, a partitioned EDF (P-EDF) implementation with support for the real-time synchronization protocol | |
420 | </div> | 420 | FMLP, |
421 | 421 | </li> | |
422 | 422 | <li> | |
423 | 423 | GSN-EDF, a global EDF (G-EDF) implementation with support for the real-time synchronization protocol | |
424 | <h2 id="install">Installation</h2> | 424 | FMLP, |
425 | <div class="box"> | 425 | </li> |
426 | <p class="notopmargin"> | 426 | <li> |
427 | The current release of LITMUS<sup>RT</sup> consists of an | 427 | C-EDF (Clustered EDF), a hybrid of G-EDF and P-EDF, and |
428 | extension of the Linux kernel that adds support for the sporadic task | 428 | </li> |
429 | model, a scheduler plugin infrastructure, and some scheduler plugins, as | 429 | <li> |
430 | well as a user-space library that provides the LITMUS<sup>RT</sup> | 430 | Linux, a placeholder policy that disables all real-time functionality added by the LITMUS<sup>RT</sup> patch. |
431 | real-time API. Note that the current implementation only works on the | 431 | </li> |
432 | Intel x86-32 and sparc64 architectures. | 432 | </ul> |
433 | </p> | 433 | <p> |
434 | <h3>Patching the Kernel</h3> | 434 | Only one policy can be active at any time. Initially (<em>i.e.,</em> during and after boot), the "Linux" policy is active. |
435 | <p class="notopmargin"> | 435 | You can use the tool <span class="src">showsched</span> (part of <span class="src">liblitmus</span>) to display |
436 | The extension to the Linux kernel is released as a patch against Linux | 436 | the name of the currently active policy. |
437 | 2.6.24. To install the LITMUS<sup>RT</sup> kernel, first <a | 437 | </p> |
438 | href="http://www.kernel.org/pub/linux/kernel/v2.6/linux-2.6.24.tar.bz2">download the Linux | 438 | <h3>Changing the Active Policy</h3> |
439 | kernel 2.6.24</a> and untar it in a directory of your choice (hereafter | 439 | <p class="qa"> |
440 | referred to as <span class="src">$DIR</span>). Second, apply the | 440 | You can use the tool <span class="src">setsched</span> (part of <span class="src">liblitmus</span>) |
441 | LITMUS<sup>RT</sup> patch (see <a href="#download">Section Download</a>) | 441 | to select a new plugin at run time. |
442 | and configure, compile, and install the kernel as usual. The patch is <span | 442 | </p> |
443 | class="src">-p1</span> applicable. | 443 | <div class="screenshot"> |
444 | To summarize, the LITMUS<sup>RT</sup> kernel can be obtained, patched, and | 444 | <img src="gfx/setsched.png" alt="Screen shot of setsched"/> |
445 | compiled with the following commands: | 445 | </div> |
446 | </p> | 446 | <p> |
447 | <pre class="shell"> | 447 | Only root can change the active policy, and only when there are no real-time tasks present. |
448 | cd $DIR | 448 | </p> |
449 | # get Linux 2.6.24 | 449 | <p> |
450 | wget http://www.kernel.org/pub/linux/kernel/v2.6/linux-2.6.24.tar.bz2 | 450 | If you do not have the <span class="src">dialog</span> utility installed, then you can still used <span class="src">setsched</span> by passing the desired scheduling policy as a commandline parameter, <em>e.g.</em> type <span class="src"> setsched PFAIR </span> to activate the PFAIR plugin. |
451 | tar xjf linux-2.6.24.tar.bz2 | 451 | </p> |
452 | wget http://www.cs.unc.edu/~anderson/litmus-rt/download/2008.3/litmus-rt-2008.3.patch | 452 | <h3>Writing Real-Time Tasks</h3> |
453 | mv linux-2.6.24 litmus2008 | 453 | <p class="qa"> |
454 | # apply the LITMUS RT patch | 454 | The user space library that provides the LITMUS<sup>RT</sup> API, |
455 | cd litmus2008 | 455 | <span class="src">liblitmus</span>, contains two example real-time tasks |
456 | patch -p1 < ../litmus-rt-2008.3.patch | 456 | (<span class="src">base_task.c</span> and |
457 | # create a working kernel configuration | 457 | <span class="src">base_mt_task.c</span>) |
458 | # - select HZ=1000 | 458 | that both illustrate how to use the API and provide a skeleton for real-time |
459 | # - enable in-kernel preemptions | 459 | task development. To get started with development, please take a look these example |
460 | # - disable NO_HZ | 460 | programs. |
461 | # - don't use power management options like frequency scaling | 461 | </p> |
462 | # - disable support for group scheduling | 462 | <h3>Tracing Overheads and Scheduling Decisions</h3> |
463 | make menuconfig | 463 | <p class="qa">LITMUS<sup>RT</sup> provides numerous tracing facilities that are discussed in-depth in the tutorial <a href="doc/tracing.html">Tracing with LITMUS<sup>RT</sup></a>. |
464 | # compile the kernel | 464 | </p> |
465 | make bzImage | 465 | <p class="nobottommargin"> |
466 | make modules | 466 | Please contact <span class="src">bbb[AT]cs.unc.edu</span> if you have any |
467 | # proceed to install kernel, build initrd, etc. | 467 | questions. |
468 | ... | 468 | </p> |
469 | </pre> | 469 | |
470 | <p> | 470 | |
471 | When configuring the kernel, note that there is a menu (at the very end of the list) | 471 | </div> |
472 | with LITMUS<sup>RT</sup>-specific configuration options. For reference, <a href="download/2008.3/qemu-config">we provide a configuration that is known to work under QEMU</a>. | 472 | |
473 | </p> | 473 | <h2 id="credits">Credits</h2> |
474 | 474 | <div class="box"> | |
475 | <h3>Libraries</h3> | 475 | <div style="float: right;"> |
476 | <p class="notopmargin"> | 476 | <a href="http://validator.w3.org/check?uri=referer"><img |
477 | The user-space library for real-time tasks, <span class="src">liblitmus</span>, | 477 | src="http://www.w3.org/Icons/valid-xhtml10" |
478 | depends on the LITMUS<sup>RT</sup> kernel kernel and provides its own build system (based on <a href="http://www.scons.org/">scons</a>). | 478 | alt="Valid XHTML 1.0 Strict" height="31" width="88"/></a> |
479 | In order to compile <span class="src">liblitmus</span>, you need to adjust the | 479 | </div> |
480 | variable <span class="src">LITMUS_KERNEL</span> in the <span class="src">SConstruct</span> file to point to your | 480 | |
481 | copy of the kernel. | 481 | <p class="nomargin"> |
482 | </p> | 482 | Linux is a registered trademark of Linus Torvalds. <br /> The |
483 | <pre class="shell"> | 483 | LITMUS<sup>RT</sup> logo was designed by Jasper McChesney of <a href="http://www.breakforsense.net/">Break for Sense Design</a>. <br /> |
484 | cd $DIR | 484 | Web design by Björn Brandenburg. |
485 | wget http://www.cs.unc.edu/~anderson/litmus-rt/download/2008.3/liblitmus-2008.3.tgz | 485 | </p> |
486 | tar xzf liblitmus-2008.3.tgz | 486 | |
487 | cd liblitmus | 487 | |
488 | # change LITMUS_KERNEL in SConstruct to point to the kernel source | 488 | </div> |
489 | scons | 489 | |
490 | </pre> | 490 | <script src="http://www.google-analytics.com/urchin.js" type="text/javascript"> |
491 | <p class="nobottommargin"> | 491 | </script> |
492 | Please refer to the <a href="#doc">documentation</a> on how to use the LITMUS<sup>RT</sup> | 492 | <script type="text/javascript"> |
493 | real-time API as provided by <span class="src">liblitmus</span>. | 493 | _uacct = "UA-3184628-1"; |
494 | </p> | 494 | urchinTracker(); |
495 | 495 | </script> | |
496 | </div> | 496 | </body> |
497 | 497 | </html> | |
498 | |||
499 | <h2 id="doc">Documentation</h2> | ||
500 | <div class="box"> | ||
501 | |||
502 | <p class="notopmargin"> | ||
503 | Unfortunately, most of the documentation has yet to be written. To get an overview of | ||
504 | the architecture of the kernel extension, we recommend reading the paper | ||
505 | <a href="http://www.cs.unc.edu/~anderson/papers/rtlws07.pdf">“LITMUS<sup>RT</sup>: | ||
506 | A Status Report”</a>. | ||
507 | </p> | ||
508 | <h3>Real-Time Scheduling Policies</h3> | ||
509 | <p class="qa"> | ||
510 | The kernel contains the following real-time scheduling policy implementations: | ||
511 | </p> | ||
512 | <ul> | ||
513 | <li> | ||
514 | PFAIR, an implementation of the PD<sup>2</sup> algorithm, | ||
515 | </li> | ||
516 | <li> | ||
517 | PSN-EDF, a partitioned EDF (P-EDF) implementation with support for the real-time synchronization protocol | ||
518 | FMLP, | ||
519 | </li> | ||
520 | <li> | ||
521 | GSN-EDF, a global EDF (G-EDF) implementation with support for the real-time synchronization protocol | ||
522 | FMLP, | ||
523 | </li> | ||
524 | <li> | ||
525 | C-EDF (Clustered EDF), a hybrid of G-EDF and P-EDF, and | ||
526 | </li> | ||
527 | <li> | ||
528 | Linux, a placeholder policy that disables all real-time functionality added by the LITMUS<sup>RT</sup> patch. | ||
529 | </li> | ||
530 | </ul> | ||
531 | <p> | ||
532 | Only one policy can be active at any time. Initially (<em>i.e.,</em> during and after boot), the "Linux" policy is active. | ||
533 | You can use the tool <span class="src">showsched</span> (part of <span class="src">liblitmus</span>) to display | ||
534 | the name of the currently active policy. | ||
535 | </p> | ||
536 | <h3>Changing the Active Policy</h3> | ||
537 | <p class="qa"> | ||
538 | You can use the tool <span class="src">setsched</span> (part of <span class="src">liblitmus</span>) | ||
539 | to select a new plugin at run time. | ||
540 | </p> | ||
541 | <div class="screenshot"> | ||
542 | <img src="gfx/setsched.png" alt="Screen shot of setsched"/> | ||
543 | </div> | ||
544 | <p> | ||
545 | Only root can change the active policy, and only when there are no real-time tasks present. | ||
546 | </p> | ||
547 | <p> | ||
548 | If you do not have the <span class="src">dialog</span> utility installed, then you can still used <span class="src">setsched</span> by passing the desired scheduling policy as a commandline parameter, <em>e.g.</em> type <span class="src"> setsched PFAIR </span> to activate the PFAIR plugin. | ||
549 | </p> | ||
550 | <h3>Writing Real-Time Tasks</h3> | ||
551 | <p class="qa"> | ||
552 | The user space library that provides the LITMUS<sup>RT</sup> API, | ||
553 | <span class="src">liblitmus</span>, contains two example real-time tasks | ||
554 | (<span class="src">base_task.c</span> and | ||
555 | <span class="src">base_mt_task.c</span>) | ||
556 | that both illustrate how to use the API and provide a skeleton for real-time | ||
557 | task development. To get started with development, please take a look these example | ||
558 | programs. | ||
559 | </p> | ||
560 | <h3>Tracing Overheads and Scheduling Decisions</h3> | ||
561 | <p class="qa">LITMUS<sup>RT</sup> provides numerous tracing facilities that are discussed in-depth in the tutorial <a href="doc/tracing.html">Tracing with LITMUS<sup>RT</sup></a>. | ||
562 | </p> | ||
563 | <p class="nobottommargin"> | ||
564 | Please contact <span class="src">bbb[AT]cs.unc.edu</span> if you have any | ||
565 | questions. | ||
566 | </p> | ||
567 | |||
568 | |||
569 | </div> | ||
570 | |||
571 | <h2 id="credits">Credits</h2> | ||
572 | <div class="box"> | ||
573 | <div style="float: right;"> | ||
574 | <a href="http://validator.w3.org/check?uri=referer"><img | ||
575 | src="http://www.w3.org/Icons/valid-xhtml10" | ||
576 | alt="Valid XHTML 1.0 Strict" height="31" width="88"/></a> | ||
577 | </div> | ||
578 | |||
579 | <p class="nomargin"> | ||
580 | Linux is a registered trademark of Linus Torvalds. <br /> The | ||
581 | LITMUS<sup>RT</sup> logo was designed by Jasper McChesney of <a href="http://www.breakforsense.net/">Break for Sense Design</a>. <br /> | ||
582 | Web design by Björn Brandenburg. | ||
583 | </p> | ||
584 | |||
585 | |||
586 | </div> | ||
587 | |||
588 | <script src="http://www.google-analytics.com/urchin.js" type="text/javascript"> | ||
589 | </script> | ||
590 | <script type="text/javascript"> | ||
591 | _uacct = "UA-3184628-1"; | ||
592 | urchinTracker(); | ||
593 | </script> | ||
594 | </body> | ||
595 | </html> | ||
diff --git a/litmus2008.html b/litmus2008.html new file mode 100644 index 0000000..14b908a --- /dev/null +++ b/litmus2008.html | |||
@@ -0,0 +1,605 @@ | |||
1 | <?xml version="1.0" encoding="utf-8" ?> | ||
2 | <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> | ||
3 | <html xmlns="http://www.w3.org/1999/xhtml" lang="en" xml:lang="en"> | ||
4 | <head> | ||
5 | <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/> | ||
6 | <meta name="verify-v1" content="pZNmf5XyUUfAPdlSPbFSavMUsLgVsmBYOXzOhbIy2gw=" /> | ||
7 | <link rel="stylesheet" type="text/css" href="inc/format.css"/> | ||
8 | <title>LITMUS RT 2008</title> | ||
9 | </head> | ||
10 | <body> | ||
11 | <div class="logobox"> | ||
12 | <img src="inc/litmusrt.png" alt="LITMUS^RT: Linux Testbed for Multiprocessor Scheduling in Real-Time Systems" /> | ||
13 | <p class="authors"> | ||
14 | <a href="http://www.cs.unc.edu/~anderson/">Dr. James H. Anderson & | ||
15 | Students</a>, | ||
16 | <a href="http://www.unc.edu">The University of North Carolina at Chapel Hill</a> | ||
17 | </p> | ||
18 | |||
19 | </div> | ||
20 | |||
21 | <div class="alertbox"> | ||
22 | <p class="nomargin"> | ||
23 | <em><b>NOTE:</b> This web page discusses an older version of | ||
24 | LITMUS<sup>RT</sup>. Please use the | ||
25 | <a href="index.html">current version</a> unless you | ||
26 | have specific interest in the 2008 series. | ||
27 | </em> | ||
28 | </p> | ||
29 | </div> | ||
30 | |||
31 | <div class="nav"> | ||
32 | <p> | ||
33 | <a href="#about">about</a> - | ||
34 | <a href="#support">support</a> - | ||
35 | <a href="#collaborators">collaborators</a> - | ||
36 | <a href="#publications">publications</a> - | ||
37 | <a href="#download">download</a> - | ||
38 | <a href="#install">installation</a> - | ||
39 | <a href="#doc">documentation</a> | ||
40 | </p> | ||
41 | </div> | ||
42 | |||
43 | <h2 id="about">About</h2> | ||
44 | <div class="box"> | ||
45 | <p class="nomargin"> | ||
46 | The LITMUS<sup>RT</sup> project is a soft real-time extension of the Linux | ||
47 | kernel with focus on multiprocessor real-time scheduling and | ||
48 | synchronization. The Linux kernel is modified | ||
49 | to support the sporadic task | ||
50 | model and modular scheduler plugins. Both partitioned and global scheduling | ||
51 | is supported. | ||
52 | </p> | ||
53 | <h3>Goals</h3> | ||
54 | <p class="notopmargin"> | ||
55 | The primary purpose of the LITMUS<sup>RT</sup> project is to <strong>provide a useful experimental platform for applied real-time systems research</strong>. In that regard, LITMUS<sup>RT</sup> provides abstractions and interfaces within the kernel that simplify the prototyping of multiprocessor real-time scheduling and synchronization algorithms (compared to modifying a "vanilla" Linux kernel). As a secondary goal, LITMUS<sup>RT</sup> serves as a <strong>proof of concept</strong>, showing that algorithms such as PFAIR can be implemented on current hardware. Finally, we hope that parts of LITMUS<sup>RT</sup> and the "lessons learned" may find value as blueprints/sources of inspiration for other (both commercial and open source) implementation efforts. | ||
56 | </p> | ||
57 | <h3>Non-Goals</h3> | ||
58 | <p class="notopmargin"> | ||
59 | LITMUS<sup>RT</sup> is not a production-quality system, and we have currently no plans to turn it into one. LITMUS<sup>RT</sup> is not "stable," <em>i.e.</em>, interfaces and implementations may change without warning between releases. POSIX-compliance is not a goal; the LITMUS<sup>RT</sup>-API offers alternate system call interfaces. While we aim to follow Linux-coding guidelines, LITMUS<sup>RT</sup> is not targeted at being merged into mainline Linux. Rather, we hope that some of the ideas protoyped in LITMUS<sup>RT</sup> may eventually find adoption in Linux. | ||
60 | </p> | ||
61 | <h3>Current Version</h3> | ||
62 | <p class="notopmargin"> | ||
63 | The current version of LITMUS<sup>RT</sup> is <strong>2008.3</strong> and is based on Linux 2.6.24. | ||
64 | It was released on 09/24/2009 and includes plugins for the following | ||
65 | scheduling policies: | ||
66 | </p> | ||
67 | <ul> | ||
68 | <li> Partitioned EDF with synchronization support (PSN-EDF)</li> | ||
69 | <li> Global EDF with synchronization support (GSN-EDF)</li> | ||
70 | <li> Clustered EDF (C-EDF) </li> | ||
71 | <li> PFAIR (both staggered and aligned quanta are supported)</li> | ||
72 | </ul> | ||
73 | <p> | ||
74 | Please refer to the <a href="#download">download</a> and <a href="#install">installation</a> sections for details. | ||
75 | </p> | ||
76 | <p>Earlier versions (2007.1 — 2007.3), which are based on Linux 2.6.20 | ||
77 | and support additional scheduling policies, are discussed | ||
78 | on a separate page dedicated to the <a href="litmus2007.html">LITMUS<sup>RT</sup> 2007 series</a>. | ||
79 | </p> | ||
80 | <p class="nobottommargin"> | ||
81 | The first version of LITMUS<sup>RT</sup>, which was implemented in Spring 2006, | ||
82 | is based on Linux 2.6.9. | ||
83 | </p> | ||
84 | <h3>Development Plans</h3> | ||
85 | <p class="nomargin"> | ||
86 | Re-basing to the then-current Linux kernel version is scheduled for Spring 2010. There are plans to port LITMUS<sup>RT</sup> to PowerPC and ARM platforms. Please contact us for details. | ||
87 | </p> | ||
88 | </div> | ||
89 | |||
90 | <h2 id="support">Support</h2> | ||
91 | <div class="box"> | ||
92 | <p class="nomargin"> | ||
93 | The LITMUS<sup>RT</sup> development effort is being supported by grants from, SUN Corp., | ||
94 | Intel Corp., IBM Corp., The National Science Foundation (grant CCR 0615197), and The U.S. | ||
95 | Army Research Office (grant W911NF-06-1-0425). | ||
96 | </p> | ||
97 | </div> | ||
98 | |||
99 | <h2 id="collaborators">Collaborators</h2> | ||
100 | <div class="box"> | ||
101 | <p class="notopmargin"> The LITMUS<sup>RT</sup> project is led by <a | ||
102 | href="http://www.cs.unc.edu/~anderson/">Dr. James H. Anderson</a>. | ||
103 | </p> | ||
104 | <p> | ||
105 | The implementation effort is carried out by students of the | ||
106 | <a href="http://www.cs.unc.edu/~anderson/real-time/">Real-Time Systems | ||
107 | Group</a> at the <a href="http://www.unc.edu">University of North Carolina | ||
108 | at Chapel Hill</a>: | ||
109 | </p> | ||
110 | <ul> | ||
111 | <li> | ||
112 | <a href="http://www.cs.unc.edu/~bbb/">Björn B. Brandenburg</a> (current maintainer) | ||
113 | </li> | ||
114 | <li> <a href="http://www.cs.unc.edu/~jmc/">John M. Calandrino</a> <em>(graduated July 2009)</em> | ||
115 | </li> | ||
116 | </ul> | ||
117 | <p class="nobottommargin"> | ||
118 | (<a href="litmus2007.html#collaborators">Additional collaborators</a> contributed to earlier versions of LITMUS<sup>RT</sup>.) | ||
119 | </p> | ||
120 | </div> | ||
121 | |||
122 | |||
123 | <h2 id="publications">Publications</h2> | ||
124 | <div class="box"> | ||
125 | |||
126 | <ol class="nomargin"> | ||
127 | <li><p> | ||
128 | B. Brandenburg and J. Anderson, | ||
129 | “On the Implementation of Global Real-Time | ||
130 | Schedulers”, <cite>Proceedings of the 30th IEEE Real-Time Systems Symposium</cite>, pp. 214-224, December 2009. | ||
131 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss09a.ps">Postscript</a>. | ||
132 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss09a.pdf">PDF</a>. | ||
133 | Longer version with all graphs: | ||
134 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss09a_long.ps">Postscript</a>. | ||
135 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss09a_long.pdf">PDF</a>. | ||
136 | </p> | ||
137 | <p> For reference, all evaluated plugins are provided as part of the following patch (against version 2008.3). | ||
138 | </p> | ||
139 | <ul> | ||
140 | <li> | ||
141 | <a href="download/RTSS09/litmus-rt-RTSS09.patch">litmus-rt-RTSS09.patch</a> | ||
142 | </li> | ||
143 | </ul> | ||
144 | |||
145 | </li> | ||
146 | <li> | ||
147 | <p> | ||
148 | B. Brandenburg and J. Anderson | ||
149 | “Reader-Writer Synchronization for Shared-Memory Multiprocessor Real-Time Systems”, | ||
150 | <cite>Proceedings of the 21st Euromicro Conference on Real-Time Systems</cite>, pp. 184-193, July 2009. | ||
151 | <a href="http://www.cs.unc.edu/~anderson/papers/ecrts09b.ps">Postscript</a>. <a href="http://www.cs.unc.edu/~anderson/papers/ecrts09b.pdf">PDF</a>. | ||
152 | Long version with blocking terms: | ||
153 | <a href="http://www.cs.unc.edu/~anderson/papers/ecrts09b-long.ps">Postscript</a>. <a href="http://www.cs.unc.edu/~anderson/papers/ecrts09b-long.pdf">PDF</a>. | ||
154 | </p> | ||
155 | </li> | ||
156 | |||
157 | <li> | ||
158 | <p> | ||
159 | J. Calandrino and J. Anderson | ||
160 | “On the Design and Implementation of a Cache-Aware Multicore Real-Time Scheduler”, | ||
161 | <cite>Proceedings of the 21st Euromicro Conference on Real-Time Systems</cite>, pp. 194-204, July 2009. | ||
162 | <a href="http://www.cs.unc.edu/~anderson/papers/ecrts09c.ps">Postscript</a>. <a href="http://www.cs.unc.edu/~anderson/papers/ecrts09c.pdf">PDF</a>. | ||
163 | </p> | ||
164 | </li> | ||
165 | |||
166 | <li> | ||
167 | <p> | ||
168 | M. Mollison, B. Brandenburg, and J. Anderson | ||
169 | “Towards Unit Testing Real-Time Schedulers in LITMUS<sup>RT</sup>”, | ||
170 | <cite>Proceedings of the Fifth International Workshop on Operating Systems Platforms for Embedded Real-Time Applications</cite>, pp. 33-39, July 2009. | ||
171 | <a href="http://www.cs.unc.edu/~anderson/papers/ospert09.ps">Postscript</a>. <a href="http://www.cs.unc.edu/~anderson/papers/ospert09.pdf">PDF</a>. | ||
172 | </p> | ||
173 | </li> | ||
174 | |||
175 | <li> | ||
176 | <p> | ||
177 | B. Brandenburg and J. Anderson, | ||
178 | “A Comparison of the M-PCP, D-PCP, and FMLP on LITMUS<sup>RT</sup>”, | ||
179 | <cite>Proceedings of the 12th International Conference on Principles of Distributed Systems</cite>, pp. 105-124, December 2008. | ||
180 | <a href="http://www.cs.unc.edu/~anderson/papers/opodis08.ps">Postscript</a>. <a href="http://www.cs.unc.edu/~anderson/papers/opodis08.pdf">PDF</a>. | ||
181 | </p> | ||
182 | </li> | ||
183 | |||
184 | <li> | ||
185 | <p> | ||
186 | B. Brandenburg, J. Calandrino, and J. Anderson, | ||
187 | “On the Scalability of Real-Time Scheduling Algorithms on Multicore Platforms: A Case Study”, | ||
188 | <cite>Proceedings of the 29th IEEE Real-Time Systems Symposium</cite>, | ||
189 | pp. 157-169, December 2008. | ||
190 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss08b.ps">Postscript</a>. <a href="http://www.cs.unc.edu/~anderson/papers/rtss08b.pdf">PDF</a>. | ||
191 | </p> | ||
192 | </li> | ||
193 | |||
194 | <li> | ||
195 | <p> | ||
196 | B. Brandenburg and J. Anderson, | ||
197 | “An Implementation of the PCP, SRP, D-PCP, M-PCP, | ||
198 | and FMLP Real-Time Synchronization Protocols in LITMUS<sup>RT</sup>”, | ||
199 | <cite>Proceedings of the 14th IEEE International Conference on Embedded and Real-Time Computing Systems and Applications</cite>, pp. 185-194, August 2008. | ||
200 | <a href="http://www.cs.unc.edu/~anderson/papers/rtcsa08.ps">Postscript</a>. <a href="http://www.cs.unc.edu/~anderson/papers/rtcsa08.pdf">PDF</a>. | ||
201 | </p> | ||
202 | <p><strong>Note:</strong> The work described in this paper took part in a branch that is currently not part of | ||
203 | the main distribution. For reference, we provide the branch as a separate download: | ||
204 | </p> | ||
205 | <ul> | ||
206 | <li> | ||
207 | <a href="download/RTCSA08/litmus-rt-RTCSA08.patch">litmus-rt-RTCSA08.patch</a> | ||
208 | </li> | ||
209 | <li> | ||
210 | <a href="download/RTCSA08/liblitmus-RTCSA08.tgz">liblitmus-RTCSA08.tgz</a> | ||
211 | </li> | ||
212 | <li><a href="download/RTCSA08/SHA256SUMS">SHA256 check sums</a> | ||
213 | </li> | ||
214 | </ul> | ||
215 | <p>Please don't use this version for active development. If you are interested in this work, it would be best | ||
216 | to first port the desired features to a current version of LTIMUS<sup>RT</sup> and merge them into the main distribution. | ||
217 | </p> | ||
218 | |||
219 | </li> | ||
220 | |||
221 | <li> | ||
222 | <p> | ||
223 | A. Block, B. Brandenburg, J. Anderson, | ||
224 | and S. Quint, “An Adaptive Framework for Multiprocessor Real-Time Systems”, | ||
225 | <cite>Proceedings of the 20th Euromicro Conference on Real-Time Systems</cite>, pp. 23-33, July 2008. | ||
226 | <a href="http://www.cs.unc.edu/~anderson/papers/ecrts08b.ps">Postscript</a>. <a href="http://www.cs.unc.edu/~anderson/papers/ecrts08b.pdf">PDF</a>. | ||
227 | </p> | ||
228 | </li> | ||
229 | |||
230 | <li> | ||
231 | <p> | ||
232 | B. Brandenburg, J. Calandrino, A. Block, | ||
233 | H. Leontyev, and J. Anderson, “Real-Time Synchronization | ||
234 | on Multiprocessors: To Block or Not to Block, to Suspend or | ||
235 | Spin?”, <cite> Proceedings of the 14th IEEE Real-Time and Embedded | ||
236 | Technology and Applications Symposium</cite>, pp. 342-353, April 2008. | ||
237 | <a href="http://www.cs.unc.edu/~anderson/papers/rtas08.ps">Postscript</a>. <a href="http://www.cs.unc.edu/~anderson/papers/rtas08.pdf">PDF</a>. | ||
238 | </p> | ||
239 | <p> | ||
240 | Extended version, including all graphs: | ||
241 | <a href="http://www.cs.unc.edu/~anderson/papers/rtas08along.ps">Postscript</a>, | ||
242 | <a href="http://www.cs.unc.edu/~anderson/papers/rtas08along.pdf">PDF</a>. | ||
243 | </p> | ||
244 | </li> | ||
245 | |||
246 | <li> | ||
247 | <p> | ||
248 | B. Brandenburg, A. Block, J. Calandrino, U. Devi, H. Leontyev, and J. Anderson, | ||
249 | "LITMUS<sup>RT</sup>: A Status Report", <cite> Proceedings of the 9th | ||
250 | Real-Time Linux Workshop</cite>, pp. 107-123, November 2007. | ||
251 | <a href="http://www.cs.unc.edu/~anderson/papers/rtlws07.ps">Postscript</a>. | ||
252 | <a href="http://www.cs.unc.edu/~anderson/papers/rtlws07.pdf">PDF</a>. | ||
253 | </p> | ||
254 | </li> | ||
255 | |||
256 | <li> | ||
257 | <p> | ||
258 | B. Brandenburg and J. Anderson, "Integrating Hard/Soft Real-Time Tasks | ||
259 | and Best-Effort Jobs on Multiprocessors", <cite> Proceedings of the 19th Euromicro | ||
260 | Conference on Real-Time Systems</cite>, pp. 61-70, July 2007. | ||
261 | <a href="http://www.cs.unc.edu/~anderson/papers/ecrts07b.ps">Postscript</a>, | ||
262 | <a href="http://www.cs.unc.edu/~anderson/papers/ecrts07b.pdf">PDF</a>. | ||
263 | </p> | ||
264 | </li> | ||
265 | |||
266 | |||
267 | <li> | ||
268 | <p> | ||
269 | J. Calandrino, H. Leontyev, A. Block, U. Devi, and J. Anderson, | ||
270 | "LITMUS<sup>RT</sup>: A Testbed for Empirically Comparing Real-Time | ||
271 | Multiprocessor Schedulers ", <cite>Proceedings of the 27th IEEE Real-Time Systems | ||
272 | Symposium</cite>, pp. 111-123, December 2006. | ||
273 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss06b.ps">Postscript</a>, | ||
274 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss06b.pdf">PDF</a>. | ||
275 | </p> | ||
276 | </li> | ||
277 | |||
278 | </ol> | ||
279 | </div> | ||
280 | |||
281 | <h2 id="download">Download</h2> | ||
282 | <div class="box"> | ||
283 | <p class="notopmargin"> | ||
284 | The source code of LITMUS<sup>RT</sup> is made available as open source | ||
285 | under the terms of the <a href="http://www.gnu.org/licenses/gpl.txt">GNU | ||
286 | General Public License (GPL)</a>. | ||
287 | </p> | ||
288 | <p> | ||
289 | The current release of LITMUS<sup>RT</sup> is 2008.3. | ||
290 | It consists of our Linux kernel modifications in the form of | ||
291 | a patch against Linux 2.6.24 and | ||
292 | |||
293 | <span class="src">liblitmus</span>, the user-space API for real-time | ||
294 | tasks, as well as <span class="src">ft_tools</span>, a collection of tools | ||
295 | used for tracing with <a href="http://www.cs.unc.edu/~bbb/feather-trace/">Feather-Trace</a> (which is part of the LITMUS<sup>RT</sup> patch). | ||
296 | </p> | ||
297 | |||
298 | <h3 class="relname">LITMUS<sup>RT</sup> 2008.3</h3> | ||
299 | <div class="release"> | ||
300 | <p> | ||
301 | Based on Linux 2.6.24. Released in September 2009. | ||
302 | |||
303 | </p> | ||
304 | <h4>Files:</h4> | ||
305 | <ul> | ||
306 | <li> | ||
307 | <a href="download/2008.3/litmus-rt-2008.3.patch">litmus-rt-2008.3.patch</a> | ||
308 | </li> | ||
309 | <li> | ||
310 | <a href="download/2008.3/liblitmus-2008.3.tgz">liblitmus-2008.3.tgz</a> | ||
311 | </li> | ||
312 | <li> | ||
313 | <a href="download/2008.3/ft_tools-2008.3.tgz">ft_tools-2008.3.tgz</a> | ||
314 | </li> | ||
315 | |||
316 | <li><a href="download/2008.3/SHA256SUMS">SHA256 check sums</a> | ||
317 | </li> | ||
318 | </ul> | ||
319 | <h4>Major changes (since LITMUS<sup>RT</sup> 2008.2):</h4> | ||
320 | <ul> | ||
321 | <li> | ||
322 | <code>sys_null_call()</code>, a dummy system call that simplifies determining system call overheads. | ||
323 | </li> | ||
324 | <li> | ||
325 | Support for starting timers on remote CPUs via <code>hrtimer_start_on()</code>. | ||
326 | </li> | ||
327 | <li> | ||
328 | Support for dedicated release handling in GSN-EDF and a corresponding <code>/proc</code> interface. | ||
329 | </li> | ||
330 | <li> | ||
331 | Support for IPI latency tracing. | ||
332 | </li> | ||
333 | <li>Several bugfixes.</li> | ||
334 | <li>Switched to <a href="http://www.scons.org/">scons</a> as build system in libraries.</li> | ||
335 | <li>Support for cross compiling the libraries on x86-64 systems to i386 binaries (specify <code>ARCH=i386</code> in your environment).</li> | ||
336 | </ul> | ||
337 | <p> | ||
338 | Please consult the <a href="doc/changes.html">Change Log</a> for further details. | ||
339 | </p> | ||
340 | </div> | ||
341 | |||
342 | <h3 class="relname">LITMUS<sup>RT</sup> 2008.2</h3> | ||
343 | <div class="release"> | ||
344 | <p> | ||
345 | Based on Linux 2.6.24. Released in December 2008. | ||
346 | |||
347 | </p> | ||
348 | <h4>Files:</h4> | ||
349 | <ul> | ||
350 | <li> | ||
351 | <a href="download/2008.2/litmus-rt-2008.2.patch">litmus-rt-2008.2.patch</a> | ||
352 | </li> | ||
353 | <li> | ||
354 | <a href="download/2008.2/liblitmus-2008.2.tgz">liblitmus-2008.2.tgz</a> | ||
355 | </li> | ||
356 | <li> | ||
357 | <a href="download/2008.2/ft_tools-2008.2.tgz">ft_tools-2008.2.tgz</a> | ||
358 | </li> | ||
359 | |||
360 | <li><a href="download/2008.2/SHA256SUMS">SHA256 check sums</a> | ||
361 | </li> | ||
362 | </ul> | ||
363 | <h4>Major changes (since LITMUS<sup>RT</sup> 2008.1):</h4> | ||
364 | <ul> | ||
365 | <li>PFAIR implementation can now recover from missed tick interrupts. | ||
366 | </li> | ||
367 | <li>A bug in the PFAIR prioritization function was corrected. | ||
368 | </li> | ||
369 | <li>Support for synchronous task system releases in the EDF-based schedulers was fixed. | ||
370 | </li> | ||
371 | <li><span class="src">sched_trace()</span> support was re-implemented based on Feather-Trace. | ||
372 | </li> | ||
373 | <li>Added the tool <span class="src">showst</span> to liblitmus, which can convert <span class="src">sched_trace()</span> binary data to a human-readable format. | ||
374 | </li> | ||
375 | <li> | ||
376 | Assorted bug fixes. | ||
377 | </li> | ||
378 | </ul> | ||
379 | </div> | ||
380 | |||
381 | <h3 class="relname">LITMUS<sup>RT</sup> 2008.1</h3> | ||
382 | <div class="release"> | ||
383 | <p> | ||
384 | Based on Linux 2.6.24. Released in July 2008. | ||
385 | </p> | ||
386 | <h4>Files:</h4> | ||
387 | <ul> | ||
388 | <li> | ||
389 | <a href="download/2008.1/litmus-rt-2008.1.patch">litmus-rt-2008.1.patch</a> | ||
390 | </li> | ||
391 | <li> | ||
392 | <a href="download/2008.1/liblitmus-2008.1.tgz">liblitmus-2008.1.tgz</a> | ||
393 | </li> | ||
394 | <li><a href="download/2008.1/SHA256SUMS">SHA256 check sums</a> | ||
395 | </li> | ||
396 | </ul> | ||
397 | |||
398 | <h4>Major changes (since LITMUS<sup>RT</sup> 2007.3):</h4> | ||
399 | <ul> | ||
400 | <li>LITMUS<sup>RT</sup> was ported to Linux 2.6.24. | ||
401 | </li> | ||
402 | <li>LITMUS<sup>RT</sup> was ported to <span style="src">sparc64</span>. | ||
403 | </li> | ||
404 | <li>LITMUS<sup>RT</sup> is now a proper scheduling class (<span class="src">SCHED_LITMUS</span>). | ||
405 | </li> | ||
406 | <li> | ||
407 | LITMUS<sup>RT</sup> queues are now based on mergeable heaps. | ||
408 | </li> | ||
409 | <li>Support for multi-threaded real-time tasks. | ||
410 | </li> | ||
411 | <li>Scheduler plugins can be selected at runtime; no reboot required. | ||
412 | </li> | ||
413 | <li> | ||
414 | Many bug fixes. | ||
415 | </li> | ||
416 | </ul> | ||
417 | </div> | ||
418 | |||
419 | <p> | ||
420 | Please note that the current implementation is a <em>prototype</em> with | ||
421 | certain limitations. Most notably, it is not secure in a multiuser context, | ||
422 | <em>i.e.</em>, real-time system calls do not require superuser | ||
423 | privileges. | ||
424 | </p> | ||
425 | |||
426 | <p class="nobottommargin"> | ||
427 | Older releases: <a href="litmus2007.html">LITMUS<sup>RT</sup> 2007 series</a>. | ||
428 | </p> | ||
429 | |||
430 | </div> | ||
431 | |||
432 | |||
433 | |||
434 | <h2 id="install">Installation</h2> | ||
435 | <div class="box"> | ||
436 | <p class="notopmargin"> | ||
437 | The current release of LITMUS<sup>RT</sup> consists of an | ||
438 | extension of the Linux kernel that adds support for the sporadic task | ||
439 | model, a scheduler plugin infrastructure, and some scheduler plugins, as | ||
440 | well as a user-space library that provides the LITMUS<sup>RT</sup> | ||
441 | real-time API. Note that the current implementation only works on the | ||
442 | Intel x86-32 and sparc64 architectures. | ||
443 | </p> | ||
444 | <h3>Patching the Kernel</h3> | ||
445 | <p class="notopmargin"> | ||
446 | The extension to the Linux kernel is released as a patch against Linux | ||
447 | 2.6.24. To install the LITMUS<sup>RT</sup> kernel, first <a | ||
448 | href="http://www.kernel.org/pub/linux/kernel/v2.6/linux-2.6.24.tar.bz2">download the Linux | ||
449 | kernel 2.6.24</a> and untar it in a directory of your choice (hereafter | ||
450 | referred to as <span class="src">$DIR</span>). Second, apply the | ||
451 | LITMUS<sup>RT</sup> patch (see <a href="#download">Section Download</a>) | ||
452 | and configure, compile, and install the kernel as usual. The patch is <span | ||
453 | class="src">-p1</span> applicable. | ||
454 | To summarize, the LITMUS<sup>RT</sup> kernel can be obtained, patched, and | ||
455 | compiled with the following commands: | ||
456 | </p> | ||
457 | <pre class="shell"> | ||
458 | cd $DIR | ||
459 | # get Linux 2.6.24 | ||
460 | wget http://www.kernel.org/pub/linux/kernel/v2.6/linux-2.6.24.tar.bz2 | ||
461 | tar xjf linux-2.6.24.tar.bz2 | ||
462 | wget http://www.cs.unc.edu/~anderson/litmus-rt/download/2008.3/litmus-rt-2008.3.patch | ||
463 | mv linux-2.6.24 litmus2008 | ||
464 | # apply the LITMUS RT patch | ||
465 | cd litmus2008 | ||
466 | patch -p1 < ../litmus-rt-2008.3.patch | ||
467 | # create a working kernel configuration | ||
468 | # - select HZ=1000 | ||
469 | # - enable in-kernel preemptions | ||
470 | # - disable NO_HZ | ||
471 | # - don't use power management options like frequency scaling | ||
472 | # - disable support for group scheduling | ||
473 | make menuconfig | ||
474 | # compile the kernel | ||
475 | make bzImage | ||
476 | make modules | ||
477 | # proceed to install kernel, build initrd, etc. | ||
478 | ... | ||
479 | </pre> | ||
480 | <p> | ||
481 | When configuring the kernel, note that there is a menu (at the very end of the list) | ||
482 | with LITMUS<sup>RT</sup>-specific configuration options. For reference, <a href="download/2008.3/qemu-config">we provide a configuration that is known to work under QEMU</a>. | ||
483 | </p> | ||
484 | |||
485 | <h3>Libraries</h3> | ||
486 | <p class="notopmargin"> | ||
487 | The user-space library for real-time tasks, <span class="src">liblitmus</span>, | ||
488 | depends on the LITMUS<sup>RT</sup> kernel kernel and provides its own build system (based on <a href="http://www.scons.org/">scons</a>). | ||
489 | In order to compile <span class="src">liblitmus</span>, you need to adjust the | ||
490 | variable <span class="src">LITMUS_KERNEL</span> in the <span class="src">SConstruct</span> file to point to your | ||
491 | copy of the kernel. | ||
492 | </p> | ||
493 | <pre class="shell"> | ||
494 | cd $DIR | ||
495 | wget http://www.cs.unc.edu/~anderson/litmus-rt/download/2008.3/liblitmus-2008.3.tgz | ||
496 | tar xzf liblitmus-2008.3.tgz | ||
497 | cd liblitmus | ||
498 | # change LITMUS_KERNEL in SConstruct to point to the kernel source | ||
499 | scons | ||
500 | </pre> | ||
501 | <p class="nobottommargin"> | ||
502 | Please refer to the <a href="#doc">documentation</a> on how to use the LITMUS<sup>RT</sup> | ||
503 | real-time API as provided by <span class="src">liblitmus</span>. | ||
504 | </p> | ||
505 | |||
506 | </div> | ||
507 | |||
508 | |||
509 | <h2 id="doc">Documentation</h2> | ||
510 | <div class="box"> | ||
511 | |||
512 | <p class="notopmargin"> | ||
513 | Unfortunately, most of the documentation has yet to be written. To get an overview of | ||
514 | the architecture of the kernel extension, we recommend reading the paper | ||
515 | <a href="http://www.cs.unc.edu/~anderson/papers/rtlws07.pdf">“LITMUS<sup>RT</sup>: | ||
516 | A Status Report”</a>. | ||
517 | </p> | ||
518 | <h3>Real-Time Scheduling Policies</h3> | ||
519 | <p class="qa"> | ||
520 | The kernel contains the following real-time scheduling policy implementations: | ||
521 | </p> | ||
522 | <ul> | ||
523 | <li> | ||
524 | PFAIR, an implementation of the PD<sup>2</sup> algorithm, | ||
525 | </li> | ||
526 | <li> | ||
527 | PSN-EDF, a partitioned EDF (P-EDF) implementation with support for the real-time synchronization protocol | ||
528 | FMLP, | ||
529 | </li> | ||
530 | <li> | ||
531 | GSN-EDF, a global EDF (G-EDF) implementation with support for the real-time synchronization protocol | ||
532 | FMLP, | ||
533 | </li> | ||
534 | <li> | ||
535 | C-EDF (Clustered EDF), a hybrid of G-EDF and P-EDF, and | ||
536 | </li> | ||
537 | <li> | ||
538 | Linux, a placeholder policy that disables all real-time functionality added by the LITMUS<sup>RT</sup> patch. | ||
539 | </li> | ||
540 | </ul> | ||
541 | <p> | ||
542 | Only one policy can be active at any time. Initially (<em>i.e.,</em> during and after boot), the "Linux" policy is active. | ||
543 | You can use the tool <span class="src">showsched</span> (part of <span class="src">liblitmus</span>) to display | ||
544 | the name of the currently active policy. | ||
545 | </p> | ||
546 | <h3>Changing the Active Policy</h3> | ||
547 | <p class="qa"> | ||
548 | You can use the tool <span class="src">setsched</span> (part of <span class="src">liblitmus</span>) | ||
549 | to select a new plugin at run time. | ||
550 | </p> | ||
551 | <div class="screenshot"> | ||
552 | <img src="gfx/setsched.png" alt="Screen shot of setsched"/> | ||
553 | </div> | ||
554 | <p> | ||
555 | Only root can change the active policy, and only when there are no real-time tasks present. | ||
556 | </p> | ||
557 | <p> | ||
558 | If you do not have the <span class="src">dialog</span> utility installed, then you can still used <span class="src">setsched</span> by passing the desired scheduling policy as a commandline parameter, <em>e.g.</em> type <span class="src"> setsched PFAIR </span> to activate the PFAIR plugin. | ||
559 | </p> | ||
560 | <h3>Writing Real-Time Tasks</h3> | ||
561 | <p class="qa"> | ||
562 | The user space library that provides the LITMUS<sup>RT</sup> API, | ||
563 | <span class="src">liblitmus</span>, contains two example real-time tasks | ||
564 | (<span class="src">base_task.c</span> and | ||
565 | <span class="src">base_mt_task.c</span>) | ||
566 | that both illustrate how to use the API and provide a skeleton for real-time | ||
567 | task development. To get started with development, please take a look these example | ||
568 | programs. | ||
569 | </p> | ||
570 | <h3>Tracing Overheads and Scheduling Decisions</h3> | ||
571 | <p class="qa">LITMUS<sup>RT</sup> provides numerous tracing facilities that are discussed in-depth in the tutorial <a href="doc/tracing.html">Tracing with LITMUS<sup>RT</sup></a>. | ||
572 | </p> | ||
573 | <p class="nobottommargin"> | ||
574 | Please contact <span class="src">bbb[AT]cs.unc.edu</span> if you have any | ||
575 | questions. | ||
576 | </p> | ||
577 | |||
578 | |||
579 | </div> | ||
580 | |||
581 | <h2 id="credits">Credits</h2> | ||
582 | <div class="box"> | ||
583 | <div style="float: right;"> | ||
584 | <a href="http://validator.w3.org/check?uri=referer"><img | ||
585 | src="http://www.w3.org/Icons/valid-xhtml10" | ||
586 | alt="Valid XHTML 1.0 Strict" height="31" width="88"/></a> | ||
587 | </div> | ||
588 | |||
589 | <p class="nomargin"> | ||
590 | Linux is a registered trademark of Linus Torvalds. <br /> The | ||
591 | LITMUS<sup>RT</sup> logo was designed by Jasper McChesney of <a href="http://www.breakforsense.net/">Break for Sense Design</a>. <br /> | ||
592 | Web design by Björn Brandenburg. | ||
593 | </p> | ||
594 | |||
595 | |||
596 | </div> | ||
597 | |||
598 | <script src="http://www.google-analytics.com/urchin.js" type="text/javascript"> | ||
599 | </script> | ||
600 | <script type="text/javascript"> | ||
601 | _uacct = "UA-3184628-1"; | ||
602 | urchinTracker(); | ||
603 | </script> | ||
604 | </body> | ||
605 | </html> | ||